1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25 /*
26 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
27 */
28
29
30 #include <sys/types.h>
31 #include <sys/sysmacros.h>
32 #include <sys/buf.h>
33 #include <sys/errno.h>
34 #include <sys/modctl.h>
35 #include <sys/conf.h>
36 #include <sys/stat.h>
37 #include <sys/kmem.h>
38 #include <sys/proc.h>
39 #include <sys/cpuvar.h>
40 #include <sys/ddi_impldefs.h>
41 #include <sys/ddi.h>
42 #include <sys/fm/protocol.h>
43 #include <sys/fm/util.h>
44 #include <sys/fm/io/ddi.h>
45 #include <sys/sysevent/eventdefs.h>
46 #include <sys/sunddi.h>
47 #include <sys/sunndi.h>
48 #include <sys/debug.h>
49 #include <sys/bofi.h>
50 #include <sys/dvma.h>
51 #include <sys/bofi_impl.h>
52
53 /*
54 * Testing the resilience of a hardened device driver requires a suitably wide
55 * range of different types of "typical" hardware faults to be injected,
56 * preferably in a controlled and repeatable fashion. This is not in general
57 * possible via hardware, so the "fault injection test harness" is provided.
58 * This works by intercepting calls from the driver to various DDI routines,
59 * and then corrupting the result of those DDI routine calls as if the
60 * hardware had caused the corruption.
61 *
62 * Conceptually, the bofi driver consists of two parts:
63 *
64 * A driver interface that supports a number of ioctls which allow error
65 * definitions ("errdefs") to be defined and subsequently managed. The
66 * driver is a clone driver, so each open will create a separate
67 * invocation. Any errdefs created by using ioctls to that invocation
68 * will automatically be deleted when that invocation is closed.
69 *
70 * Intercept routines: When the bofi driver is attached, it edits the
71 * bus_ops structure of the bus nexus specified by the "bofi-nexus"
72 * field in the "bofi.conf" file, thus allowing the
73 * bofi driver to intercept various ddi functions. These intercept
74 * routines primarily carry out fault injections based on the errdefs
75 * created for that device.
76 *
77 * Faults can be injected into:
78 *
79 * DMA (corrupting data for DMA to/from memory areas defined by
80 * ddi_dma_setup(), ddi_dma_bind_handle(), etc)
81 *
82 * Physical IO (corrupting data sent/received via ddi_get8(), ddi_put8(),
83 * etc),
84 *
85 * Interrupts (generating spurious interrupts, losing interrupts,
86 * delaying interrupts).
87 *
88 * By default, ddi routines called from all drivers will be intercepted
89 * and faults potentially injected. However, the "bofi-to-test" field in
90 * the "bofi.conf" file can be set to a space-separated list of drivers to
91 * test (or by preceding each driver name in the list with an "!", a list
92 * of drivers not to test).
93 *
94 * In addition to fault injection, the bofi driver does a number of static
95 * checks which are controlled by properties in the "bofi.conf" file.
96 *
97 * "bofi-ddi-check" - if set will validate that there are no PIO access
98 * other than those using the DDI routines (ddi_get8(), ddi_put8(), etc).
99 *
100 * "bofi-range-check" - if set to values 1 (warning) or 2 (panic), will
101 * validate that calls to ddi_get8(), ddi_put8(), etc are not made
102 * specifying addresses outside the range of the access_handle.
103 *
104 * "bofi-sync-check" - if set will validate that calls to ddi_dma_sync()
105 * are being made correctly.
106 */
107
108 extern void *bp_mapin_common(struct buf *, int);
109
110 static int bofi_ddi_check;
111 static int bofi_sync_check;
112 static int bofi_range_check;
113
114 static struct bofi_link bofi_link_array[BOFI_NLINKS], *bofi_link_freelist;
115
116 #define LLSZMASK (sizeof (uint64_t)-1)
117
118 #define HDL_HASH_TBL_SIZE 64
119 static struct bofi_shadow hhash_table[HDL_HASH_TBL_SIZE];
120 static struct bofi_shadow dhash_table[HDL_HASH_TBL_SIZE];
121 #define HDL_DHASH(x) \
122 (&dhash_table[((uintptr_t)(x) >> 3) & (HDL_HASH_TBL_SIZE-1)])
123 #define HDL_HHASH(x) \
124 (&hhash_table[((uintptr_t)(x) >> 5) & (HDL_HASH_TBL_SIZE-1)])
125
126 static struct bofi_shadow shadow_list;
127 static struct bofi_errent *errent_listp;
128
129 static char driver_list[NAMESIZE];
130 static int driver_list_size;
131 static int driver_list_neg;
132 static char nexus_name[NAMESIZE];
133
134 static int initialized = 0;
135
136 #define NCLONES 2560
137 static int clone_tab[NCLONES];
138
139 static dev_info_t *our_dip;
140
141 static kmutex_t bofi_mutex;
142 static kmutex_t clone_tab_mutex;
143 static kmutex_t bofi_low_mutex;
144 static ddi_iblock_cookie_t bofi_low_cookie;
145 static uint_t bofi_signal(caddr_t arg);
146 static int bofi_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
147 static int bofi_attach(dev_info_t *, ddi_attach_cmd_t);
148 static int bofi_detach(dev_info_t *, ddi_detach_cmd_t);
149 static int bofi_open(dev_t *, int, int, cred_t *);
150 static int bofi_close(dev_t, int, int, cred_t *);
151 static int bofi_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
152 static int bofi_errdef_alloc(struct bofi_errdef *, char *,
153 struct bofi_errent *);
154 static int bofi_errdef_free(struct bofi_errent *);
155 static void bofi_start(struct bofi_errctl *, char *);
156 static void bofi_stop(struct bofi_errctl *, char *);
157 static void bofi_broadcast(struct bofi_errctl *, char *);
158 static void bofi_clear_acc_chk(struct bofi_errctl *, char *);
159 static void bofi_clear_errors(struct bofi_errctl *, char *);
160 static void bofi_clear_errdefs(struct bofi_errctl *, char *);
161 static int bofi_errdef_check(struct bofi_errstate *,
162 struct acc_log_elem **);
163 static int bofi_errdef_check_w(struct bofi_errstate *,
164 struct acc_log_elem **);
165 static int bofi_map(dev_info_t *, dev_info_t *, ddi_map_req_t *,
166 off_t, off_t, caddr_t *);
167 static int bofi_dma_allochdl(dev_info_t *, dev_info_t *,
168 ddi_dma_attr_t *, int (*)(caddr_t), caddr_t,
169 ddi_dma_handle_t *);
170 static int bofi_dma_freehdl(dev_info_t *, dev_info_t *,
171 ddi_dma_handle_t);
172 static int bofi_dma_bindhdl(dev_info_t *, dev_info_t *,
173 ddi_dma_handle_t, struct ddi_dma_req *, ddi_dma_cookie_t *,
174 uint_t *);
175 static int bofi_dma_unbindhdl(dev_info_t *, dev_info_t *,
176 ddi_dma_handle_t);
177 static int bofi_dma_flush(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
178 off_t, size_t, uint_t);
179 static int bofi_dma_ctl(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
180 enum ddi_dma_ctlops, off_t *, size_t *, caddr_t *, uint_t);
181 static int bofi_dma_win(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
182 uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
183 static int bofi_intr_ops(dev_info_t *dip, dev_info_t *rdip,
184 ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp,
185 void *result);
186 static int bofi_fm_ereport_callback(sysevent_t *ev, void *cookie);
187
188 evchan_t *bofi_error_chan;
189
190 #define FM_SIMULATED_DMA "simulated.dma"
191 #define FM_SIMULATED_PIO "simulated.pio"
192
193 #if defined(__sparc)
194 static void bofi_dvma_kaddr_load(ddi_dma_handle_t, caddr_t, uint_t,
195 uint_t, ddi_dma_cookie_t *);
196 static void bofi_dvma_unload(ddi_dma_handle_t, uint_t, uint_t);
197 static void bofi_dvma_sync(ddi_dma_handle_t, uint_t, uint_t);
198 static void bofi_dvma_reserve(dev_info_t *, ddi_dma_handle_t);
199 #endif
200 static int driver_under_test(dev_info_t *);
201 static int bofi_check_acc_hdl(ddi_acc_impl_t *);
202 static int bofi_check_dma_hdl(ddi_dma_impl_t *);
203 static int bofi_post_event(dev_info_t *dip, dev_info_t *rdip,
204 ddi_eventcookie_t eventhdl, void *impl_data);
205
206 static struct bus_ops bofi_bus_ops = {
207 BUSO_REV,
208 bofi_map,
209 NULL,
210 NULL,
211 NULL,
212 i_ddi_map_fault,
213 NULL,
214 bofi_dma_allochdl,
215 bofi_dma_freehdl,
216 bofi_dma_bindhdl,
217 bofi_dma_unbindhdl,
218 bofi_dma_flush,
219 bofi_dma_win,
220 bofi_dma_ctl,
221 NULL,
222 ddi_bus_prop_op,
223 ndi_busop_get_eventcookie,
224 ndi_busop_add_eventcall,
225 ndi_busop_remove_eventcall,
226 bofi_post_event,
227 NULL,
228 0,
229 0,
230 0,
231 0,
232 0,
233 0,
234 0,
235 bofi_intr_ops
236 };
237
238 static struct cb_ops bofi_cb_ops = {
239 bofi_open, /* open */
240 bofi_close, /* close */
241 nodev, /* strategy */
242 nodev, /* print */
243 nodev, /* dump */
244 nodev, /* read */
245 nodev, /* write */
246 bofi_ioctl, /* ioctl */
247 nodev, /* devmap */
248 nodev, /* mmap */
249 nodev, /* segmap */
250 nochpoll, /* chpoll */
251 ddi_prop_op, /* prop_op */
252 NULL, /* for STREAMS drivers */
253 D_MP, /* driver compatibility flag */
254 CB_REV, /* cb_ops revision */
255 nodev, /* aread */
256 nodev /* awrite */
257 };
258
259 static struct dev_ops bofi_ops = {
260 DEVO_REV, /* driver build version */
261 0, /* device reference count */
262 bofi_getinfo,
263 nulldev,
264 nulldev, /* probe */
265 bofi_attach,
266 bofi_detach,
267 nulldev, /* reset */
268 &bofi_cb_ops,
269 (struct bus_ops *)NULL,
270 nulldev, /* power */
271 ddi_quiesce_not_needed, /* quiesce */
272 };
273
274 /* module configuration stuff */
275 static void *statep;
276
277 static struct modldrv modldrv = {
278 &mod_driverops,
279 "bofi driver",
280 &bofi_ops
281 };
282
283 static struct modlinkage modlinkage = {
284 MODREV_1,
285 &modldrv,
286 0
287 };
288
289 static struct bus_ops save_bus_ops;
290
291 #if defined(__sparc)
292 static struct dvma_ops bofi_dvma_ops = {
293 DVMAO_REV,
294 bofi_dvma_kaddr_load,
295 bofi_dvma_unload,
296 bofi_dvma_sync
297 };
298 #endif
299
300 /*
301 * support routine - map user page into kernel virtual
302 */
303 static caddr_t
dmareq_mapin(offset_t len,caddr_t addr,struct as * as,int flag)304 dmareq_mapin(offset_t len, caddr_t addr, struct as *as, int flag)
305 {
306 struct buf buf;
307 struct proc proc;
308
309 /*
310 * mock up a buf structure so we can call bp_mapin_common()
311 */
312 buf.b_flags = B_PHYS;
313 buf.b_un.b_addr = (caddr_t)addr;
314 buf.b_bcount = (size_t)len;
315 proc.p_as = as;
316 buf.b_proc = &proc;
317 return (bp_mapin_common(&buf, flag));
318 }
319
320
321 /*
322 * support routine - map page chain into kernel virtual
323 */
324 static caddr_t
dmareq_pp_mapin(offset_t len,uint_t offset,page_t * pp,int flag)325 dmareq_pp_mapin(offset_t len, uint_t offset, page_t *pp, int flag)
326 {
327 struct buf buf;
328
329 /*
330 * mock up a buf structure so we can call bp_mapin_common()
331 */
332 buf.b_flags = B_PAGEIO;
333 buf.b_un.b_addr = (caddr_t)(uintptr_t)offset;
334 buf.b_bcount = (size_t)len;
335 buf.b_pages = pp;
336 return (bp_mapin_common(&buf, flag));
337 }
338
339
340 /*
341 * support routine - map page array into kernel virtual
342 */
343 static caddr_t
dmareq_pplist_mapin(uint_t len,caddr_t addr,page_t ** pplist,struct as * as,int flag)344 dmareq_pplist_mapin(uint_t len, caddr_t addr, page_t **pplist, struct as *as,
345 int flag)
346 {
347 struct buf buf;
348 struct proc proc;
349
350 /*
351 * mock up a buf structure so we can call bp_mapin_common()
352 */
353 buf.b_flags = B_PHYS|B_SHADOW;
354 buf.b_un.b_addr = addr;
355 buf.b_bcount = len;
356 buf.b_shadow = pplist;
357 proc.p_as = as;
358 buf.b_proc = &proc;
359 return (bp_mapin_common(&buf, flag));
360 }
361
362
363 /*
364 * support routine - map dmareq into kernel virtual if not already
365 * fills in *lenp with length
366 * *mapaddr will be new kernel virtual address - or null if no mapping needed
367 */
368 static caddr_t
ddi_dmareq_mapin(struct ddi_dma_req * dmareqp,caddr_t * mapaddrp,offset_t * lenp)369 ddi_dmareq_mapin(struct ddi_dma_req *dmareqp, caddr_t *mapaddrp,
370 offset_t *lenp)
371 {
372 int sleep = (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? VM_SLEEP: VM_NOSLEEP;
373
374 *lenp = dmareqp->dmar_object.dmao_size;
375 if (dmareqp->dmar_object.dmao_type == DMA_OTYP_PAGES) {
376 *mapaddrp = dmareq_pp_mapin(dmareqp->dmar_object.dmao_size,
377 dmareqp->dmar_object.dmao_obj.pp_obj.pp_offset,
378 dmareqp->dmar_object.dmao_obj.pp_obj.pp_pp, sleep);
379 return (*mapaddrp);
380 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_priv != NULL) {
381 *mapaddrp = dmareq_pplist_mapin(dmareqp->dmar_object.dmao_size,
382 dmareqp->dmar_object.dmao_obj.virt_obj.v_addr,
383 dmareqp->dmar_object.dmao_obj.virt_obj.v_priv,
384 dmareqp->dmar_object.dmao_obj.virt_obj.v_as, sleep);
385 return (*mapaddrp);
386 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_as == &kas) {
387 *mapaddrp = NULL;
388 return (dmareqp->dmar_object.dmao_obj.virt_obj.v_addr);
389 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_as == NULL) {
390 *mapaddrp = NULL;
391 return (dmareqp->dmar_object.dmao_obj.virt_obj.v_addr);
392 } else {
393 *mapaddrp = dmareq_mapin(dmareqp->dmar_object.dmao_size,
394 dmareqp->dmar_object.dmao_obj.virt_obj.v_addr,
395 dmareqp->dmar_object.dmao_obj.virt_obj.v_as, sleep);
396 return (*mapaddrp);
397 }
398 }
399
400
401 /*
402 * support routine - free off kernel virtual mapping as allocated by
403 * ddi_dmareq_mapin()
404 */
405 static void
ddi_dmareq_mapout(caddr_t addr,offset_t len,int map_flags,page_t * pp,page_t ** pplist)406 ddi_dmareq_mapout(caddr_t addr, offset_t len, int map_flags, page_t *pp,
407 page_t **pplist)
408 {
409 struct buf buf;
410
411 if (addr == NULL)
412 return;
413 /*
414 * mock up a buf structure
415 */
416 buf.b_flags = B_REMAPPED | map_flags;
417 buf.b_un.b_addr = addr;
418 buf.b_bcount = (size_t)len;
419 buf.b_pages = pp;
420 buf.b_shadow = pplist;
421 bp_mapout(&buf);
422 }
423
424 static time_t
bofi_gettime()425 bofi_gettime()
426 {
427 timestruc_t ts;
428
429 gethrestime(&ts);
430 return (ts.tv_sec);
431 }
432
433 /*
434 * reset the bus_ops structure of the specified nexus to point to
435 * the original values in the save_bus_ops structure.
436 *
437 * Note that both this routine and modify_bus_ops() rely on the current
438 * behavior of the framework in that nexus drivers are not unloadable
439 *
440 */
441
442 static int
reset_bus_ops(char * name,struct bus_ops * bop)443 reset_bus_ops(char *name, struct bus_ops *bop)
444 {
445 struct modctl *modp;
446 struct modldrv *mp;
447 struct bus_ops *bp;
448 struct dev_ops *ops;
449
450 mutex_enter(&mod_lock);
451 /*
452 * find specified module
453 */
454 modp = &modules;
455 do {
456 if (strcmp(name, modp->mod_modname) == 0) {
457 if (!modp->mod_linkage) {
458 mutex_exit(&mod_lock);
459 return (0);
460 }
461 mp = modp->mod_linkage->ml_linkage[0];
462 if (!mp || !mp->drv_dev_ops) {
463 mutex_exit(&mod_lock);
464 return (0);
465 }
466 ops = mp->drv_dev_ops;
467 bp = ops->devo_bus_ops;
468 if (!bp) {
469 mutex_exit(&mod_lock);
470 return (0);
471 }
472 if (ops->devo_refcnt > 0) {
473 /*
474 * As long as devices are active with modified
475 * bus ops bofi must not go away. There may be
476 * drivers with modified access or dma handles.
477 */
478 mutex_exit(&mod_lock);
479 return (0);
480 }
481 cmn_err(CE_NOTE, "bofi reset bus_ops for %s",
482 mp->drv_linkinfo);
483 bp->bus_intr_op = bop->bus_intr_op;
484 bp->bus_post_event = bop->bus_post_event;
485 bp->bus_map = bop->bus_map;
486 bp->bus_dma_map = bop->bus_dma_map;
487 bp->bus_dma_allochdl = bop->bus_dma_allochdl;
488 bp->bus_dma_freehdl = bop->bus_dma_freehdl;
489 bp->bus_dma_bindhdl = bop->bus_dma_bindhdl;
490 bp->bus_dma_unbindhdl = bop->bus_dma_unbindhdl;
491 bp->bus_dma_flush = bop->bus_dma_flush;
492 bp->bus_dma_win = bop->bus_dma_win;
493 bp->bus_dma_ctl = bop->bus_dma_ctl;
494 mutex_exit(&mod_lock);
495 return (1);
496 }
497 } while ((modp = modp->mod_next) != &modules);
498 mutex_exit(&mod_lock);
499 return (0);
500 }
501
502 /*
503 * modify the bus_ops structure of the specified nexus to point to bofi
504 * routines, saving the original values in the save_bus_ops structure
505 */
506
507 static int
modify_bus_ops(char * name,struct bus_ops * bop)508 modify_bus_ops(char *name, struct bus_ops *bop)
509 {
510 struct modctl *modp;
511 struct modldrv *mp;
512 struct bus_ops *bp;
513 struct dev_ops *ops;
514
515 if (ddi_name_to_major(name) == -1)
516 return (0);
517
518 mutex_enter(&mod_lock);
519 /*
520 * find specified module
521 */
522 modp = &modules;
523 do {
524 if (strcmp(name, modp->mod_modname) == 0) {
525 if (!modp->mod_linkage) {
526 mutex_exit(&mod_lock);
527 return (0);
528 }
529 mp = modp->mod_linkage->ml_linkage[0];
530 if (!mp || !mp->drv_dev_ops) {
531 mutex_exit(&mod_lock);
532 return (0);
533 }
534 ops = mp->drv_dev_ops;
535 bp = ops->devo_bus_ops;
536 if (!bp) {
537 mutex_exit(&mod_lock);
538 return (0);
539 }
540 if (ops->devo_refcnt == 0) {
541 /*
542 * If there is no device active for this
543 * module then there is nothing to do for bofi.
544 */
545 mutex_exit(&mod_lock);
546 return (0);
547 }
548 cmn_err(CE_NOTE, "bofi modify bus_ops for %s",
549 mp->drv_linkinfo);
550 save_bus_ops = *bp;
551 bp->bus_intr_op = bop->bus_intr_op;
552 bp->bus_post_event = bop->bus_post_event;
553 bp->bus_map = bop->bus_map;
554 bp->bus_dma_map = bop->bus_dma_map;
555 bp->bus_dma_allochdl = bop->bus_dma_allochdl;
556 bp->bus_dma_freehdl = bop->bus_dma_freehdl;
557 bp->bus_dma_bindhdl = bop->bus_dma_bindhdl;
558 bp->bus_dma_unbindhdl = bop->bus_dma_unbindhdl;
559 bp->bus_dma_flush = bop->bus_dma_flush;
560 bp->bus_dma_win = bop->bus_dma_win;
561 bp->bus_dma_ctl = bop->bus_dma_ctl;
562 mutex_exit(&mod_lock);
563 return (1);
564 }
565 } while ((modp = modp->mod_next) != &modules);
566 mutex_exit(&mod_lock);
567 return (0);
568 }
569
570
571 int
_init(void)572 _init(void)
573 {
574 int e;
575
576 e = ddi_soft_state_init(&statep, sizeof (struct bofi_errent), 1);
577 if (e != 0)
578 return (e);
579 if ((e = mod_install(&modlinkage)) != 0)
580 ddi_soft_state_fini(&statep);
581 return (e);
582 }
583
584
585 int
_fini(void)586 _fini(void)
587 {
588 int e;
589
590 if ((e = mod_remove(&modlinkage)) != 0)
591 return (e);
592 ddi_soft_state_fini(&statep);
593 return (e);
594 }
595
596
597 int
_info(struct modinfo * modinfop)598 _info(struct modinfo *modinfop)
599 {
600 return (mod_info(&modlinkage, modinfop));
601 }
602
603
604 static int
bofi_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)605 bofi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
606 {
607 char *name;
608 char buf[80];
609 int i;
610 int s, ss;
611 int size = NAMESIZE;
612 int new_string;
613 char *ptr;
614
615 if (cmd != DDI_ATTACH)
616 return (DDI_FAILURE);
617 /*
618 * only one instance - but we clone using the open routine
619 */
620 if (ddi_get_instance(dip) > 0)
621 return (DDI_FAILURE);
622
623 if (!initialized) {
624 if ((name = ddi_get_name(dip)) == NULL)
625 return (DDI_FAILURE);
626 (void) snprintf(buf, sizeof (buf), "%s,ctl", name);
627 if (ddi_create_minor_node(dip, buf, S_IFCHR, 0,
628 DDI_PSEUDO, NULL) == DDI_FAILURE)
629 return (DDI_FAILURE);
630
631 if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_MED,
632 &bofi_low_cookie) != DDI_SUCCESS) {
633 ddi_remove_minor_node(dip, buf);
634 return (DDI_FAILURE); /* fail attach */
635 }
636 /*
637 * get nexus name (from conf file)
638 */
639 if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 0,
640 "bofi-nexus", nexus_name, &size) != DDI_PROP_SUCCESS) {
641 ddi_remove_minor_node(dip, buf);
642 return (DDI_FAILURE);
643 }
644 /*
645 * get whether to do dma map kmem private checking
646 */
647 if ((bofi_range_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
648 dip, 0, "bofi-range-check", &ptr)) != DDI_PROP_SUCCESS)
649 bofi_range_check = 0;
650 else if (strcmp(ptr, "panic") == 0)
651 bofi_range_check = 2;
652 else if (strcmp(ptr, "warn") == 0)
653 bofi_range_check = 1;
654 else
655 bofi_range_check = 0;
656 ddi_prop_free(ptr);
657
658 /*
659 * get whether to prevent direct access to register
660 */
661 if ((bofi_ddi_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
662 dip, 0, "bofi-ddi-check", &ptr)) != DDI_PROP_SUCCESS)
663 bofi_ddi_check = 0;
664 else if (strcmp(ptr, "on") == 0)
665 bofi_ddi_check = 1;
666 else
667 bofi_ddi_check = 0;
668 ddi_prop_free(ptr);
669
670 /*
671 * get whether to do copy on ddi_dma_sync
672 */
673 if ((bofi_sync_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
674 dip, 0, "bofi-sync-check", &ptr)) != DDI_PROP_SUCCESS)
675 bofi_sync_check = 0;
676 else if (strcmp(ptr, "on") == 0)
677 bofi_sync_check = 1;
678 else
679 bofi_sync_check = 0;
680 ddi_prop_free(ptr);
681
682 /*
683 * get driver-under-test names (from conf file)
684 */
685 size = NAMESIZE;
686 if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 0,
687 "bofi-to-test", driver_list, &size) != DDI_PROP_SUCCESS)
688 driver_list[0] = 0;
689 /*
690 * and convert into a sequence of strings
691 */
692 driver_list_neg = 1;
693 new_string = 1;
694 driver_list_size = strlen(driver_list);
695 for (i = 0; i < driver_list_size; i++) {
696 if (driver_list[i] == ' ') {
697 driver_list[i] = '\0';
698 new_string = 1;
699 } else if (new_string) {
700 if (driver_list[i] != '!')
701 driver_list_neg = 0;
702 new_string = 0;
703 }
704 }
705 /*
706 * initialize mutex, lists
707 */
708 mutex_init(&clone_tab_mutex, NULL, MUTEX_DRIVER,
709 NULL);
710 /*
711 * fake up iblock cookie - need to protect outselves
712 * against drivers that use hilevel interrupts
713 */
714 ss = spl8();
715 s = spl8();
716 splx(ss);
717 mutex_init(&bofi_mutex, NULL, MUTEX_SPIN, (void *)(uintptr_t)s);
718 mutex_init(&bofi_low_mutex, NULL, MUTEX_DRIVER,
719 (void *)bofi_low_cookie);
720 shadow_list.next = &shadow_list;
721 shadow_list.prev = &shadow_list;
722 for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
723 hhash_table[i].hnext = &hhash_table[i];
724 hhash_table[i].hprev = &hhash_table[i];
725 dhash_table[i].dnext = &dhash_table[i];
726 dhash_table[i].dprev = &dhash_table[i];
727 }
728 for (i = 1; i < BOFI_NLINKS; i++)
729 bofi_link_array[i].link = &bofi_link_array[i-1];
730 bofi_link_freelist = &bofi_link_array[BOFI_NLINKS - 1];
731 /*
732 * overlay bus_ops structure
733 */
734 if (modify_bus_ops(nexus_name, &bofi_bus_ops) == 0) {
735 ddi_remove_minor_node(dip, buf);
736 mutex_destroy(&clone_tab_mutex);
737 mutex_destroy(&bofi_mutex);
738 mutex_destroy(&bofi_low_mutex);
739 return (DDI_FAILURE);
740 }
741 if (sysevent_evc_bind(FM_ERROR_CHAN, &bofi_error_chan, 0) == 0)
742 (void) sysevent_evc_subscribe(bofi_error_chan, "bofi",
743 EC_FM, bofi_fm_ereport_callback, NULL, 0);
744
745 /*
746 * save dip for getinfo
747 */
748 our_dip = dip;
749 ddi_report_dev(dip);
750 initialized = 1;
751 }
752 return (DDI_SUCCESS);
753 }
754
755
756 static int
bofi_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)757 bofi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
758 {
759 char *name;
760 char buf[80];
761
762 if (cmd != DDI_DETACH)
763 return (DDI_FAILURE);
764 if (ddi_get_instance(dip) > 0)
765 return (DDI_FAILURE);
766 if ((name = ddi_get_name(dip)) == NULL)
767 return (DDI_FAILURE);
768 (void) snprintf(buf, sizeof (buf), "%s,ctl", name);
769 mutex_enter(&bofi_low_mutex);
770 mutex_enter(&bofi_mutex);
771 /*
772 * make sure test bofi is no longer in use
773 */
774 if (shadow_list.next != &shadow_list || errent_listp != NULL) {
775 mutex_exit(&bofi_mutex);
776 mutex_exit(&bofi_low_mutex);
777 return (DDI_FAILURE);
778 }
779 mutex_exit(&bofi_mutex);
780 mutex_exit(&bofi_low_mutex);
781
782 /*
783 * restore bus_ops structure
784 */
785 if (reset_bus_ops(nexus_name, &save_bus_ops) == 0)
786 return (DDI_FAILURE);
787
788 (void) sysevent_evc_unbind(bofi_error_chan);
789
790 mutex_destroy(&clone_tab_mutex);
791 mutex_destroy(&bofi_mutex);
792 mutex_destroy(&bofi_low_mutex);
793 ddi_remove_minor_node(dip, buf);
794 our_dip = NULL;
795 initialized = 0;
796 return (DDI_SUCCESS);
797 }
798
799
800 /* ARGSUSED */
801 static int
bofi_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** result)802 bofi_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
803 {
804 dev_t dev = (dev_t)arg;
805 int minor = (int)getminor(dev);
806 int retval;
807
808 switch (cmd) {
809 case DDI_INFO_DEVT2DEVINFO:
810 if (minor != 0 || our_dip == NULL) {
811 *result = (void *)NULL;
812 retval = DDI_FAILURE;
813 } else {
814 *result = (void *)our_dip;
815 retval = DDI_SUCCESS;
816 }
817 break;
818 case DDI_INFO_DEVT2INSTANCE:
819 *result = (void *)0;
820 retval = DDI_SUCCESS;
821 break;
822 default:
823 retval = DDI_FAILURE;
824 }
825 return (retval);
826 }
827
828
829 /* ARGSUSED */
830 static int
bofi_open(dev_t * devp,int flag,int otyp,cred_t * credp)831 bofi_open(dev_t *devp, int flag, int otyp, cred_t *credp)
832 {
833 int minor = (int)getminor(*devp);
834 struct bofi_errent *softc;
835
836 /*
837 * only allow open on minor=0 - the clone device
838 */
839 if (minor != 0)
840 return (ENXIO);
841 /*
842 * fail if not attached
843 */
844 if (!initialized)
845 return (ENXIO);
846 /*
847 * find a free slot and grab it
848 */
849 mutex_enter(&clone_tab_mutex);
850 for (minor = 1; minor < NCLONES; minor++) {
851 if (clone_tab[minor] == 0) {
852 clone_tab[minor] = 1;
853 break;
854 }
855 }
856 mutex_exit(&clone_tab_mutex);
857 if (minor == NCLONES)
858 return (EAGAIN);
859 /*
860 * soft state structure for this clone is used to maintain a list
861 * of allocated errdefs so they can be freed on close
862 */
863 if (ddi_soft_state_zalloc(statep, minor) != DDI_SUCCESS) {
864 mutex_enter(&clone_tab_mutex);
865 clone_tab[minor] = 0;
866 mutex_exit(&clone_tab_mutex);
867 return (EAGAIN);
868 }
869 softc = ddi_get_soft_state(statep, minor);
870 softc->cnext = softc;
871 softc->cprev = softc;
872
873 *devp = makedevice(getmajor(*devp), minor);
874 return (0);
875 }
876
877
878 /* ARGSUSED */
879 static int
bofi_close(dev_t dev,int flag,int otyp,cred_t * credp)880 bofi_close(dev_t dev, int flag, int otyp, cred_t *credp)
881 {
882 int minor = (int)getminor(dev);
883 struct bofi_errent *softc;
884 struct bofi_errent *ep, *next_ep;
885
886 softc = ddi_get_soft_state(statep, minor);
887 if (softc == NULL)
888 return (ENXIO);
889 /*
890 * find list of errdefs and free them off
891 */
892 for (ep = softc->cnext; ep != softc; ) {
893 next_ep = ep->cnext;
894 (void) bofi_errdef_free(ep);
895 ep = next_ep;
896 }
897 /*
898 * free clone tab slot
899 */
900 mutex_enter(&clone_tab_mutex);
901 clone_tab[minor] = 0;
902 mutex_exit(&clone_tab_mutex);
903
904 ddi_soft_state_free(statep, minor);
905 return (0);
906 }
907
908
909 /* ARGSUSED */
910 static int
bofi_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)911 bofi_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
912 int *rvalp)
913 {
914 struct bofi_errent *softc;
915 int minor = (int)getminor(dev);
916 struct bofi_errdef errdef;
917 struct bofi_errctl errctl;
918 struct bofi_errstate errstate;
919 void *ed_handle;
920 struct bofi_get_handles get_handles;
921 struct bofi_get_hdl_info hdl_info;
922 struct handle_info *hdlip;
923 struct handle_info *hib;
924
925 char *buffer;
926 char *bufptr;
927 char *endbuf;
928 int req_count, count, err;
929 char *namep;
930 struct bofi_shadow *hp;
931 int retval;
932 struct bofi_shadow *hhashp;
933 int i;
934
935 switch (cmd) {
936 case BOFI_ADD_DEF:
937 /*
938 * add a new error definition
939 */
940 #ifdef _MULTI_DATAMODEL
941 switch (ddi_model_convert_from(mode & FMODELS)) {
942 case DDI_MODEL_ILP32:
943 {
944 /*
945 * For use when a 32 bit app makes a call into a
946 * 64 bit ioctl
947 */
948 struct bofi_errdef32 errdef_32;
949
950 if (ddi_copyin((void *)arg, &errdef_32,
951 sizeof (struct bofi_errdef32), mode)) {
952 return (EFAULT);
953 }
954 errdef.namesize = errdef_32.namesize;
955 (void) strncpy(errdef.name, errdef_32.name, NAMESIZE);
956 errdef.instance = errdef_32.instance;
957 errdef.rnumber = errdef_32.rnumber;
958 errdef.offset = errdef_32.offset;
959 errdef.len = errdef_32.len;
960 errdef.access_type = errdef_32.access_type;
961 errdef.access_count = errdef_32.access_count;
962 errdef.fail_count = errdef_32.fail_count;
963 errdef.acc_chk = errdef_32.acc_chk;
964 errdef.optype = errdef_32.optype;
965 errdef.operand = errdef_32.operand;
966 errdef.log.logsize = errdef_32.log.logsize;
967 errdef.log.entries = errdef_32.log.entries;
968 errdef.log.flags = errdef_32.log.flags;
969 errdef.log.wrapcnt = errdef_32.log.wrapcnt;
970 errdef.log.start_time = errdef_32.log.start_time;
971 errdef.log.stop_time = errdef_32.log.stop_time;
972 errdef.log.logbase =
973 (caddr_t)(uintptr_t)errdef_32.log.logbase;
974 errdef.errdef_handle = errdef_32.errdef_handle;
975 break;
976 }
977 case DDI_MODEL_NONE:
978 if (ddi_copyin((void *)arg, &errdef,
979 sizeof (struct bofi_errdef), mode))
980 return (EFAULT);
981 break;
982 }
983 #else /* ! _MULTI_DATAMODEL */
984 if (ddi_copyin((void *)arg, &errdef,
985 sizeof (struct bofi_errdef), mode) != 0)
986 return (EFAULT);
987 #endif /* _MULTI_DATAMODEL */
988 /*
989 * do some validation
990 */
991 if (errdef.fail_count == 0)
992 errdef.optype = 0;
993 if (errdef.optype != 0) {
994 if (errdef.access_type & BOFI_INTR &&
995 errdef.optype != BOFI_DELAY_INTR &&
996 errdef.optype != BOFI_LOSE_INTR &&
997 errdef.optype != BOFI_EXTRA_INTR)
998 return (EINVAL);
999 if ((errdef.access_type & (BOFI_DMA_RW|BOFI_PIO_R)) &&
1000 errdef.optype == BOFI_NO_TRANSFER)
1001 return (EINVAL);
1002 if ((errdef.access_type & (BOFI_PIO_RW)) &&
1003 errdef.optype != BOFI_EQUAL &&
1004 errdef.optype != BOFI_OR &&
1005 errdef.optype != BOFI_XOR &&
1006 errdef.optype != BOFI_AND &&
1007 errdef.optype != BOFI_NO_TRANSFER)
1008 return (EINVAL);
1009 }
1010 /*
1011 * find softstate for this clone, so we can tag
1012 * new errdef on to it
1013 */
1014 softc = ddi_get_soft_state(statep, minor);
1015 if (softc == NULL)
1016 return (ENXIO);
1017 /*
1018 * read in name
1019 */
1020 if (errdef.namesize > NAMESIZE)
1021 return (EINVAL);
1022 namep = kmem_zalloc(errdef.namesize+1, KM_SLEEP);
1023 (void) strncpy(namep, errdef.name, errdef.namesize);
1024
1025 if (bofi_errdef_alloc(&errdef, namep, softc) != DDI_SUCCESS) {
1026 (void) bofi_errdef_free((struct bofi_errent *)
1027 (uintptr_t)errdef.errdef_handle);
1028 kmem_free(namep, errdef.namesize+1);
1029 return (EINVAL);
1030 }
1031 /*
1032 * copy out errdef again, including filled in errdef_handle
1033 */
1034 #ifdef _MULTI_DATAMODEL
1035 switch (ddi_model_convert_from(mode & FMODELS)) {
1036 case DDI_MODEL_ILP32:
1037 {
1038 /*
1039 * For use when a 32 bit app makes a call into a
1040 * 64 bit ioctl
1041 */
1042 struct bofi_errdef32 errdef_32;
1043
1044 errdef_32.namesize = errdef.namesize;
1045 (void) strncpy(errdef_32.name, errdef.name, NAMESIZE);
1046 errdef_32.instance = errdef.instance;
1047 errdef_32.rnumber = errdef.rnumber;
1048 errdef_32.offset = errdef.offset;
1049 errdef_32.len = errdef.len;
1050 errdef_32.access_type = errdef.access_type;
1051 errdef_32.access_count = errdef.access_count;
1052 errdef_32.fail_count = errdef.fail_count;
1053 errdef_32.acc_chk = errdef.acc_chk;
1054 errdef_32.optype = errdef.optype;
1055 errdef_32.operand = errdef.operand;
1056 errdef_32.log.logsize = errdef.log.logsize;
1057 errdef_32.log.entries = errdef.log.entries;
1058 errdef_32.log.flags = errdef.log.flags;
1059 errdef_32.log.wrapcnt = errdef.log.wrapcnt;
1060 errdef_32.log.start_time = errdef.log.start_time;
1061 errdef_32.log.stop_time = errdef.log.stop_time;
1062 errdef_32.log.logbase =
1063 (caddr32_t)(uintptr_t)errdef.log.logbase;
1064 errdef_32.errdef_handle = errdef.errdef_handle;
1065 if (ddi_copyout(&errdef_32, (void *)arg,
1066 sizeof (struct bofi_errdef32), mode) != 0) {
1067 (void) bofi_errdef_free((struct bofi_errent *)
1068 errdef.errdef_handle);
1069 kmem_free(namep, errdef.namesize+1);
1070 return (EFAULT);
1071 }
1072 break;
1073 }
1074 case DDI_MODEL_NONE:
1075 if (ddi_copyout(&errdef, (void *)arg,
1076 sizeof (struct bofi_errdef), mode) != 0) {
1077 (void) bofi_errdef_free((struct bofi_errent *)
1078 errdef.errdef_handle);
1079 kmem_free(namep, errdef.namesize+1);
1080 return (EFAULT);
1081 }
1082 break;
1083 }
1084 #else /* ! _MULTI_DATAMODEL */
1085 if (ddi_copyout(&errdef, (void *)arg,
1086 sizeof (struct bofi_errdef), mode) != 0) {
1087 (void) bofi_errdef_free((struct bofi_errent *)
1088 (uintptr_t)errdef.errdef_handle);
1089 kmem_free(namep, errdef.namesize+1);
1090 return (EFAULT);
1091 }
1092 #endif /* _MULTI_DATAMODEL */
1093 return (0);
1094 case BOFI_DEL_DEF:
1095 /*
1096 * delete existing errdef
1097 */
1098 if (ddi_copyin((void *)arg, &ed_handle,
1099 sizeof (void *), mode) != 0)
1100 return (EFAULT);
1101 return (bofi_errdef_free((struct bofi_errent *)ed_handle));
1102 case BOFI_START:
1103 /*
1104 * start all errdefs corresponding to
1105 * this name and instance
1106 */
1107 if (ddi_copyin((void *)arg, &errctl,
1108 sizeof (struct bofi_errctl), mode) != 0)
1109 return (EFAULT);
1110 /*
1111 * copy in name
1112 */
1113 if (errctl.namesize > NAMESIZE)
1114 return (EINVAL);
1115 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1116 (void) strncpy(namep, errctl.name, errctl.namesize);
1117 bofi_start(&errctl, namep);
1118 kmem_free(namep, errctl.namesize+1);
1119 return (0);
1120 case BOFI_STOP:
1121 /*
1122 * stop all errdefs corresponding to
1123 * this name and instance
1124 */
1125 if (ddi_copyin((void *)arg, &errctl,
1126 sizeof (struct bofi_errctl), mode) != 0)
1127 return (EFAULT);
1128 /*
1129 * copy in name
1130 */
1131 if (errctl.namesize > NAMESIZE)
1132 return (EINVAL);
1133 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1134 (void) strncpy(namep, errctl.name, errctl.namesize);
1135 bofi_stop(&errctl, namep);
1136 kmem_free(namep, errctl.namesize+1);
1137 return (0);
1138 case BOFI_BROADCAST:
1139 /*
1140 * wakeup all errdefs corresponding to
1141 * this name and instance
1142 */
1143 if (ddi_copyin((void *)arg, &errctl,
1144 sizeof (struct bofi_errctl), mode) != 0)
1145 return (EFAULT);
1146 /*
1147 * copy in name
1148 */
1149 if (errctl.namesize > NAMESIZE)
1150 return (EINVAL);
1151 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1152 (void) strncpy(namep, errctl.name, errctl.namesize);
1153 bofi_broadcast(&errctl, namep);
1154 kmem_free(namep, errctl.namesize+1);
1155 return (0);
1156 case BOFI_CLEAR_ACC_CHK:
1157 /*
1158 * clear "acc_chk" for all errdefs corresponding to
1159 * this name and instance
1160 */
1161 if (ddi_copyin((void *)arg, &errctl,
1162 sizeof (struct bofi_errctl), mode) != 0)
1163 return (EFAULT);
1164 /*
1165 * copy in name
1166 */
1167 if (errctl.namesize > NAMESIZE)
1168 return (EINVAL);
1169 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1170 (void) strncpy(namep, errctl.name, errctl.namesize);
1171 bofi_clear_acc_chk(&errctl, namep);
1172 kmem_free(namep, errctl.namesize+1);
1173 return (0);
1174 case BOFI_CLEAR_ERRORS:
1175 /*
1176 * set "fail_count" to 0 for all errdefs corresponding to
1177 * this name and instance whose "access_count"
1178 * has expired.
1179 */
1180 if (ddi_copyin((void *)arg, &errctl,
1181 sizeof (struct bofi_errctl), mode) != 0)
1182 return (EFAULT);
1183 /*
1184 * copy in name
1185 */
1186 if (errctl.namesize > NAMESIZE)
1187 return (EINVAL);
1188 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1189 (void) strncpy(namep, errctl.name, errctl.namesize);
1190 bofi_clear_errors(&errctl, namep);
1191 kmem_free(namep, errctl.namesize+1);
1192 return (0);
1193 case BOFI_CLEAR_ERRDEFS:
1194 /*
1195 * set "access_count" and "fail_count" to 0 for all errdefs
1196 * corresponding to this name and instance
1197 */
1198 if (ddi_copyin((void *)arg, &errctl,
1199 sizeof (struct bofi_errctl), mode) != 0)
1200 return (EFAULT);
1201 /*
1202 * copy in name
1203 */
1204 if (errctl.namesize > NAMESIZE)
1205 return (EINVAL);
1206 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1207 (void) strncpy(namep, errctl.name, errctl.namesize);
1208 bofi_clear_errdefs(&errctl, namep);
1209 kmem_free(namep, errctl.namesize+1);
1210 return (0);
1211 case BOFI_CHK_STATE:
1212 {
1213 struct acc_log_elem *klg;
1214 size_t uls;
1215 /*
1216 * get state for this errdef - read in dummy errstate
1217 * with just the errdef_handle filled in
1218 */
1219 #ifdef _MULTI_DATAMODEL
1220 switch (ddi_model_convert_from(mode & FMODELS)) {
1221 case DDI_MODEL_ILP32:
1222 {
1223 /*
1224 * For use when a 32 bit app makes a call into a
1225 * 64 bit ioctl
1226 */
1227 struct bofi_errstate32 errstate_32;
1228
1229 if (ddi_copyin((void *)arg, &errstate_32,
1230 sizeof (struct bofi_errstate32), mode) != 0) {
1231 return (EFAULT);
1232 }
1233 errstate.fail_time = errstate_32.fail_time;
1234 errstate.msg_time = errstate_32.msg_time;
1235 errstate.access_count = errstate_32.access_count;
1236 errstate.fail_count = errstate_32.fail_count;
1237 errstate.acc_chk = errstate_32.acc_chk;
1238 errstate.errmsg_count = errstate_32.errmsg_count;
1239 (void) strncpy(errstate.buffer, errstate_32.buffer,
1240 ERRMSGSIZE);
1241 errstate.severity = errstate_32.severity;
1242 errstate.log.logsize = errstate_32.log.logsize;
1243 errstate.log.entries = errstate_32.log.entries;
1244 errstate.log.flags = errstate_32.log.flags;
1245 errstate.log.wrapcnt = errstate_32.log.wrapcnt;
1246 errstate.log.start_time = errstate_32.log.start_time;
1247 errstate.log.stop_time = errstate_32.log.stop_time;
1248 errstate.log.logbase =
1249 (caddr_t)(uintptr_t)errstate_32.log.logbase;
1250 errstate.errdef_handle = errstate_32.errdef_handle;
1251 break;
1252 }
1253 case DDI_MODEL_NONE:
1254 if (ddi_copyin((void *)arg, &errstate,
1255 sizeof (struct bofi_errstate), mode) != 0)
1256 return (EFAULT);
1257 break;
1258 }
1259 #else /* ! _MULTI_DATAMODEL */
1260 if (ddi_copyin((void *)arg, &errstate,
1261 sizeof (struct bofi_errstate), mode) != 0)
1262 return (EFAULT);
1263 #endif /* _MULTI_DATAMODEL */
1264 if ((retval = bofi_errdef_check(&errstate, &klg)) == EINVAL)
1265 return (EINVAL);
1266 /*
1267 * copy out real errstate structure
1268 */
1269 uls = errstate.log.logsize;
1270 if (errstate.log.entries > uls && uls)
1271 /* insufficient user memory */
1272 errstate.log.entries = uls;
1273 /* always pass back a time */
1274 if (errstate.log.stop_time == 0ul)
1275 (void) drv_getparm(TIME, &(errstate.log.stop_time));
1276
1277 #ifdef _MULTI_DATAMODEL
1278 switch (ddi_model_convert_from(mode & FMODELS)) {
1279 case DDI_MODEL_ILP32:
1280 {
1281 /*
1282 * For use when a 32 bit app makes a call into a
1283 * 64 bit ioctl
1284 */
1285 struct bofi_errstate32 errstate_32;
1286
1287 errstate_32.fail_time = errstate.fail_time;
1288 errstate_32.msg_time = errstate.msg_time;
1289 errstate_32.access_count = errstate.access_count;
1290 errstate_32.fail_count = errstate.fail_count;
1291 errstate_32.acc_chk = errstate.acc_chk;
1292 errstate_32.errmsg_count = errstate.errmsg_count;
1293 (void) strncpy(errstate_32.buffer, errstate.buffer,
1294 ERRMSGSIZE);
1295 errstate_32.severity = errstate.severity;
1296 errstate_32.log.logsize = errstate.log.logsize;
1297 errstate_32.log.entries = errstate.log.entries;
1298 errstate_32.log.flags = errstate.log.flags;
1299 errstate_32.log.wrapcnt = errstate.log.wrapcnt;
1300 errstate_32.log.start_time = errstate.log.start_time;
1301 errstate_32.log.stop_time = errstate.log.stop_time;
1302 errstate_32.log.logbase =
1303 (caddr32_t)(uintptr_t)errstate.log.logbase;
1304 errstate_32.errdef_handle = errstate.errdef_handle;
1305 if (ddi_copyout(&errstate_32, (void *)arg,
1306 sizeof (struct bofi_errstate32), mode) != 0)
1307 return (EFAULT);
1308 break;
1309 }
1310 case DDI_MODEL_NONE:
1311 if (ddi_copyout(&errstate, (void *)arg,
1312 sizeof (struct bofi_errstate), mode) != 0)
1313 return (EFAULT);
1314 break;
1315 }
1316 #else /* ! _MULTI_DATAMODEL */
1317 if (ddi_copyout(&errstate, (void *)arg,
1318 sizeof (struct bofi_errstate), mode) != 0)
1319 return (EFAULT);
1320 #endif /* _MULTI_DATAMODEL */
1321 if (uls && errstate.log.entries &&
1322 ddi_copyout(klg, errstate.log.logbase,
1323 errstate.log.entries * sizeof (struct acc_log_elem),
1324 mode) != 0) {
1325 return (EFAULT);
1326 }
1327 return (retval);
1328 }
1329 case BOFI_CHK_STATE_W:
1330 {
1331 struct acc_log_elem *klg;
1332 size_t uls;
1333 /*
1334 * get state for this errdef - read in dummy errstate
1335 * with just the errdef_handle filled in. Then wait for
1336 * a ddi_report_fault message to come back
1337 */
1338 #ifdef _MULTI_DATAMODEL
1339 switch (ddi_model_convert_from(mode & FMODELS)) {
1340 case DDI_MODEL_ILP32:
1341 {
1342 /*
1343 * For use when a 32 bit app makes a call into a
1344 * 64 bit ioctl
1345 */
1346 struct bofi_errstate32 errstate_32;
1347
1348 if (ddi_copyin((void *)arg, &errstate_32,
1349 sizeof (struct bofi_errstate32), mode) != 0) {
1350 return (EFAULT);
1351 }
1352 errstate.fail_time = errstate_32.fail_time;
1353 errstate.msg_time = errstate_32.msg_time;
1354 errstate.access_count = errstate_32.access_count;
1355 errstate.fail_count = errstate_32.fail_count;
1356 errstate.acc_chk = errstate_32.acc_chk;
1357 errstate.errmsg_count = errstate_32.errmsg_count;
1358 (void) strncpy(errstate.buffer, errstate_32.buffer,
1359 ERRMSGSIZE);
1360 errstate.severity = errstate_32.severity;
1361 errstate.log.logsize = errstate_32.log.logsize;
1362 errstate.log.entries = errstate_32.log.entries;
1363 errstate.log.flags = errstate_32.log.flags;
1364 errstate.log.wrapcnt = errstate_32.log.wrapcnt;
1365 errstate.log.start_time = errstate_32.log.start_time;
1366 errstate.log.stop_time = errstate_32.log.stop_time;
1367 errstate.log.logbase =
1368 (caddr_t)(uintptr_t)errstate_32.log.logbase;
1369 errstate.errdef_handle = errstate_32.errdef_handle;
1370 break;
1371 }
1372 case DDI_MODEL_NONE:
1373 if (ddi_copyin((void *)arg, &errstate,
1374 sizeof (struct bofi_errstate), mode) != 0)
1375 return (EFAULT);
1376 break;
1377 }
1378 #else /* ! _MULTI_DATAMODEL */
1379 if (ddi_copyin((void *)arg, &errstate,
1380 sizeof (struct bofi_errstate), mode) != 0)
1381 return (EFAULT);
1382 #endif /* _MULTI_DATAMODEL */
1383 if ((retval = bofi_errdef_check_w(&errstate, &klg)) == EINVAL)
1384 return (EINVAL);
1385 /*
1386 * copy out real errstate structure
1387 */
1388 uls = errstate.log.logsize;
1389 uls = errstate.log.logsize;
1390 if (errstate.log.entries > uls && uls)
1391 /* insufficient user memory */
1392 errstate.log.entries = uls;
1393 /* always pass back a time */
1394 if (errstate.log.stop_time == 0ul)
1395 (void) drv_getparm(TIME, &(errstate.log.stop_time));
1396
1397 #ifdef _MULTI_DATAMODEL
1398 switch (ddi_model_convert_from(mode & FMODELS)) {
1399 case DDI_MODEL_ILP32:
1400 {
1401 /*
1402 * For use when a 32 bit app makes a call into a
1403 * 64 bit ioctl
1404 */
1405 struct bofi_errstate32 errstate_32;
1406
1407 errstate_32.fail_time = errstate.fail_time;
1408 errstate_32.msg_time = errstate.msg_time;
1409 errstate_32.access_count = errstate.access_count;
1410 errstate_32.fail_count = errstate.fail_count;
1411 errstate_32.acc_chk = errstate.acc_chk;
1412 errstate_32.errmsg_count = errstate.errmsg_count;
1413 (void) strncpy(errstate_32.buffer, errstate.buffer,
1414 ERRMSGSIZE);
1415 errstate_32.severity = errstate.severity;
1416 errstate_32.log.logsize = errstate.log.logsize;
1417 errstate_32.log.entries = errstate.log.entries;
1418 errstate_32.log.flags = errstate.log.flags;
1419 errstate_32.log.wrapcnt = errstate.log.wrapcnt;
1420 errstate_32.log.start_time = errstate.log.start_time;
1421 errstate_32.log.stop_time = errstate.log.stop_time;
1422 errstate_32.log.logbase =
1423 (caddr32_t)(uintptr_t)errstate.log.logbase;
1424 errstate_32.errdef_handle = errstate.errdef_handle;
1425 if (ddi_copyout(&errstate_32, (void *)arg,
1426 sizeof (struct bofi_errstate32), mode) != 0)
1427 return (EFAULT);
1428 break;
1429 }
1430 case DDI_MODEL_NONE:
1431 if (ddi_copyout(&errstate, (void *)arg,
1432 sizeof (struct bofi_errstate), mode) != 0)
1433 return (EFAULT);
1434 break;
1435 }
1436 #else /* ! _MULTI_DATAMODEL */
1437 if (ddi_copyout(&errstate, (void *)arg,
1438 sizeof (struct bofi_errstate), mode) != 0)
1439 return (EFAULT);
1440 #endif /* _MULTI_DATAMODEL */
1441
1442 if (uls && errstate.log.entries &&
1443 ddi_copyout(klg, errstate.log.logbase,
1444 errstate.log.entries * sizeof (struct acc_log_elem),
1445 mode) != 0) {
1446 return (EFAULT);
1447 }
1448 return (retval);
1449 }
1450 case BOFI_GET_HANDLES:
1451 /*
1452 * display existing handles
1453 */
1454 #ifdef _MULTI_DATAMODEL
1455 switch (ddi_model_convert_from(mode & FMODELS)) {
1456 case DDI_MODEL_ILP32:
1457 {
1458 /*
1459 * For use when a 32 bit app makes a call into a
1460 * 64 bit ioctl
1461 */
1462 struct bofi_get_handles32 get_handles_32;
1463
1464 if (ddi_copyin((void *)arg, &get_handles_32,
1465 sizeof (get_handles_32), mode) != 0) {
1466 return (EFAULT);
1467 }
1468 get_handles.namesize = get_handles_32.namesize;
1469 (void) strncpy(get_handles.name, get_handles_32.name,
1470 NAMESIZE);
1471 get_handles.instance = get_handles_32.instance;
1472 get_handles.count = get_handles_32.count;
1473 get_handles.buffer =
1474 (caddr_t)(uintptr_t)get_handles_32.buffer;
1475 break;
1476 }
1477 case DDI_MODEL_NONE:
1478 if (ddi_copyin((void *)arg, &get_handles,
1479 sizeof (get_handles), mode) != 0)
1480 return (EFAULT);
1481 break;
1482 }
1483 #else /* ! _MULTI_DATAMODEL */
1484 if (ddi_copyin((void *)arg, &get_handles,
1485 sizeof (get_handles), mode) != 0)
1486 return (EFAULT);
1487 #endif /* _MULTI_DATAMODEL */
1488 /*
1489 * read in name
1490 */
1491 if (get_handles.namesize > NAMESIZE)
1492 return (EINVAL);
1493 namep = kmem_zalloc(get_handles.namesize+1, KM_SLEEP);
1494 (void) strncpy(namep, get_handles.name, get_handles.namesize);
1495 req_count = get_handles.count;
1496 bufptr = buffer = kmem_zalloc(req_count, KM_SLEEP);
1497 endbuf = bufptr + req_count;
1498 /*
1499 * display existing handles
1500 */
1501 mutex_enter(&bofi_low_mutex);
1502 mutex_enter(&bofi_mutex);
1503 for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
1504 hhashp = &hhash_table[i];
1505 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
1506 if (!driver_under_test(hp->dip))
1507 continue;
1508 if (ddi_name_to_major(ddi_get_name(hp->dip)) !=
1509 ddi_name_to_major(namep))
1510 continue;
1511 if (hp->instance != get_handles.instance)
1512 continue;
1513 /*
1514 * print information per handle - note that
1515 * DMA* means an unbound DMA handle
1516 */
1517 (void) snprintf(bufptr, (size_t)(endbuf-bufptr),
1518 " %s %d %s ", hp->name, hp->instance,
1519 (hp->type == BOFI_INT_HDL) ? "INTR" :
1520 (hp->type == BOFI_ACC_HDL) ? "PIO" :
1521 (hp->type == BOFI_DMA_HDL) ? "DMA" :
1522 (hp->hparrayp != NULL) ? "DVMA" : "DMA*");
1523 bufptr += strlen(bufptr);
1524 if (hp->type == BOFI_ACC_HDL) {
1525 if (hp->len == INT_MAX - hp->offset)
1526 (void) snprintf(bufptr,
1527 (size_t)(endbuf-bufptr),
1528 "reg set %d off 0x%llx\n",
1529 hp->rnumber, hp->offset);
1530 else
1531 (void) snprintf(bufptr,
1532 (size_t)(endbuf-bufptr),
1533 "reg set %d off 0x%llx"
1534 " len 0x%llx\n",
1535 hp->rnumber, hp->offset,
1536 hp->len);
1537 } else if (hp->type == BOFI_DMA_HDL)
1538 (void) snprintf(bufptr,
1539 (size_t)(endbuf-bufptr),
1540 "handle no %d len 0x%llx"
1541 " addr 0x%p\n", hp->rnumber,
1542 hp->len, (void *)hp->addr);
1543 else if (hp->type == BOFI_NULL &&
1544 hp->hparrayp == NULL)
1545 (void) snprintf(bufptr,
1546 (size_t)(endbuf-bufptr),
1547 "handle no %d\n", hp->rnumber);
1548 else
1549 (void) snprintf(bufptr,
1550 (size_t)(endbuf-bufptr), "\n");
1551 bufptr += strlen(bufptr);
1552 }
1553 }
1554 mutex_exit(&bofi_mutex);
1555 mutex_exit(&bofi_low_mutex);
1556 err = ddi_copyout(buffer, get_handles.buffer, req_count, mode);
1557 kmem_free(namep, get_handles.namesize+1);
1558 kmem_free(buffer, req_count);
1559 if (err != 0)
1560 return (EFAULT);
1561 else
1562 return (0);
1563 case BOFI_GET_HANDLE_INFO:
1564 /*
1565 * display existing handles
1566 */
1567 #ifdef _MULTI_DATAMODEL
1568 switch (ddi_model_convert_from(mode & FMODELS)) {
1569 case DDI_MODEL_ILP32:
1570 {
1571 /*
1572 * For use when a 32 bit app makes a call into a
1573 * 64 bit ioctl
1574 */
1575 struct bofi_get_hdl_info32 hdl_info_32;
1576
1577 if (ddi_copyin((void *)arg, &hdl_info_32,
1578 sizeof (hdl_info_32), mode)) {
1579 return (EFAULT);
1580 }
1581 hdl_info.namesize = hdl_info_32.namesize;
1582 (void) strncpy(hdl_info.name, hdl_info_32.name,
1583 NAMESIZE);
1584 hdl_info.count = hdl_info_32.count;
1585 hdl_info.hdli = (caddr_t)(uintptr_t)hdl_info_32.hdli;
1586 break;
1587 }
1588 case DDI_MODEL_NONE:
1589 if (ddi_copyin((void *)arg, &hdl_info,
1590 sizeof (hdl_info), mode))
1591 return (EFAULT);
1592 break;
1593 }
1594 #else /* ! _MULTI_DATAMODEL */
1595 if (ddi_copyin((void *)arg, &hdl_info,
1596 sizeof (hdl_info), mode))
1597 return (EFAULT);
1598 #endif /* _MULTI_DATAMODEL */
1599 if (hdl_info.namesize > NAMESIZE)
1600 return (EINVAL);
1601 namep = kmem_zalloc(hdl_info.namesize + 1, KM_SLEEP);
1602 (void) strncpy(namep, hdl_info.name, hdl_info.namesize);
1603 req_count = hdl_info.count;
1604 count = hdl_info.count = 0; /* the actual no of handles */
1605 if (req_count > 0) {
1606 hib = hdlip =
1607 kmem_zalloc(req_count * sizeof (struct handle_info),
1608 KM_SLEEP);
1609 } else {
1610 hib = hdlip = 0;
1611 req_count = hdl_info.count = 0;
1612 }
1613
1614 /*
1615 * display existing handles
1616 */
1617 mutex_enter(&bofi_low_mutex);
1618 mutex_enter(&bofi_mutex);
1619 for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
1620 hhashp = &hhash_table[i];
1621 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
1622 if (!driver_under_test(hp->dip) ||
1623 ddi_name_to_major(ddi_get_name(hp->dip)) !=
1624 ddi_name_to_major(namep) ||
1625 ++(hdl_info.count) > req_count ||
1626 count == req_count)
1627 continue;
1628
1629 hdlip->instance = hp->instance;
1630 hdlip->rnumber = hp->rnumber;
1631 switch (hp->type) {
1632 case BOFI_ACC_HDL:
1633 hdlip->access_type = BOFI_PIO_RW;
1634 hdlip->offset = hp->offset;
1635 hdlip->len = hp->len;
1636 break;
1637 case BOFI_DMA_HDL:
1638 hdlip->access_type = 0;
1639 if (hp->flags & DDI_DMA_WRITE)
1640 hdlip->access_type |=
1641 BOFI_DMA_W;
1642 if (hp->flags & DDI_DMA_READ)
1643 hdlip->access_type |=
1644 BOFI_DMA_R;
1645 hdlip->len = hp->len;
1646 hdlip->addr_cookie =
1647 (uint64_t)(uintptr_t)hp->addr;
1648 break;
1649 case BOFI_INT_HDL:
1650 hdlip->access_type = BOFI_INTR;
1651 break;
1652 default:
1653 hdlip->access_type = 0;
1654 break;
1655 }
1656 hdlip++;
1657 count++;
1658 }
1659 }
1660 mutex_exit(&bofi_mutex);
1661 mutex_exit(&bofi_low_mutex);
1662 err = 0;
1663 #ifdef _MULTI_DATAMODEL
1664 switch (ddi_model_convert_from(mode & FMODELS)) {
1665 case DDI_MODEL_ILP32:
1666 {
1667 /*
1668 * For use when a 32 bit app makes a call into a
1669 * 64 bit ioctl
1670 */
1671 struct bofi_get_hdl_info32 hdl_info_32;
1672
1673 hdl_info_32.namesize = hdl_info.namesize;
1674 (void) strncpy(hdl_info_32.name, hdl_info.name,
1675 NAMESIZE);
1676 hdl_info_32.count = hdl_info.count;
1677 hdl_info_32.hdli = (caddr32_t)(uintptr_t)hdl_info.hdli;
1678 if (ddi_copyout(&hdl_info_32, (void *)arg,
1679 sizeof (hdl_info_32), mode) != 0) {
1680 kmem_free(namep, hdl_info.namesize+1);
1681 if (req_count > 0)
1682 kmem_free(hib,
1683 req_count * sizeof (*hib));
1684 return (EFAULT);
1685 }
1686 break;
1687 }
1688 case DDI_MODEL_NONE:
1689 if (ddi_copyout(&hdl_info, (void *)arg,
1690 sizeof (hdl_info), mode) != 0) {
1691 kmem_free(namep, hdl_info.namesize+1);
1692 if (req_count > 0)
1693 kmem_free(hib,
1694 req_count * sizeof (*hib));
1695 return (EFAULT);
1696 }
1697 break;
1698 }
1699 #else /* ! _MULTI_DATAMODEL */
1700 if (ddi_copyout(&hdl_info, (void *)arg,
1701 sizeof (hdl_info), mode) != 0) {
1702 kmem_free(namep, hdl_info.namesize+1);
1703 if (req_count > 0)
1704 kmem_free(hib, req_count * sizeof (*hib));
1705 return (EFAULT);
1706 }
1707 #endif /* ! _MULTI_DATAMODEL */
1708 if (count > 0) {
1709 if (ddi_copyout(hib, hdl_info.hdli,
1710 count * sizeof (*hib), mode) != 0) {
1711 kmem_free(namep, hdl_info.namesize+1);
1712 if (req_count > 0)
1713 kmem_free(hib,
1714 req_count * sizeof (*hib));
1715 return (EFAULT);
1716 }
1717 }
1718 kmem_free(namep, hdl_info.namesize+1);
1719 if (req_count > 0)
1720 kmem_free(hib, req_count * sizeof (*hib));
1721 return (err);
1722 default:
1723 return (ENOTTY);
1724 }
1725 }
1726
1727
1728 /*
1729 * add a new error definition
1730 */
1731 static int
bofi_errdef_alloc(struct bofi_errdef * errdefp,char * namep,struct bofi_errent * softc)1732 bofi_errdef_alloc(struct bofi_errdef *errdefp, char *namep,
1733 struct bofi_errent *softc)
1734 {
1735 struct bofi_errent *ep;
1736 struct bofi_shadow *hp;
1737 struct bofi_link *lp;
1738
1739 /*
1740 * allocate errdef structure and put on in-use list
1741 */
1742 ep = kmem_zalloc(sizeof (struct bofi_errent), KM_SLEEP);
1743 ep->errdef = *errdefp;
1744 ep->name = namep;
1745 ep->errdef.errdef_handle = (uint64_t)(uintptr_t)ep;
1746 ep->errstate.severity = DDI_SERVICE_RESTORED;
1747 ep->errstate.errdef_handle = (uint64_t)(uintptr_t)ep;
1748 cv_init(&ep->cv, NULL, CV_DRIVER, NULL);
1749 /*
1750 * allocate space for logging
1751 */
1752 ep->errdef.log.entries = 0;
1753 ep->errdef.log.wrapcnt = 0;
1754 if (ep->errdef.access_type & BOFI_LOG)
1755 ep->logbase = kmem_alloc(sizeof (struct acc_log_elem) *
1756 ep->errdef.log.logsize, KM_SLEEP);
1757 else
1758 ep->logbase = NULL;
1759 /*
1760 * put on in-use list
1761 */
1762 mutex_enter(&bofi_low_mutex);
1763 mutex_enter(&bofi_mutex);
1764 ep->next = errent_listp;
1765 errent_listp = ep;
1766 /*
1767 * and add it to the per-clone list
1768 */
1769 ep->cnext = softc->cnext;
1770 softc->cnext->cprev = ep;
1771 ep->cprev = softc;
1772 softc->cnext = ep;
1773
1774 /*
1775 * look for corresponding shadow handle structures and if we find any
1776 * tag this errdef structure on to their link lists.
1777 */
1778 for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
1779 if (ddi_name_to_major(hp->name) == ddi_name_to_major(namep) &&
1780 hp->instance == errdefp->instance &&
1781 (((errdefp->access_type & BOFI_DMA_RW) &&
1782 (ep->errdef.rnumber == -1 ||
1783 hp->rnumber == ep->errdef.rnumber) &&
1784 hp->type == BOFI_DMA_HDL &&
1785 (((uintptr_t)(hp->addr + ep->errdef.offset +
1786 ep->errdef.len) & ~LLSZMASK) >
1787 ((uintptr_t)((hp->addr + ep->errdef.offset) +
1788 LLSZMASK) & ~LLSZMASK))) ||
1789 ((errdefp->access_type & BOFI_INTR) &&
1790 hp->type == BOFI_INT_HDL) ||
1791 ((errdefp->access_type & BOFI_PIO_RW) &&
1792 hp->type == BOFI_ACC_HDL &&
1793 (errdefp->rnumber == -1 ||
1794 hp->rnumber == errdefp->rnumber) &&
1795 (errdefp->len == 0 ||
1796 hp->offset < errdefp->offset + errdefp->len) &&
1797 hp->offset + hp->len > errdefp->offset))) {
1798 lp = bofi_link_freelist;
1799 if (lp != NULL) {
1800 bofi_link_freelist = lp->link;
1801 lp->errentp = ep;
1802 lp->link = hp->link;
1803 hp->link = lp;
1804 }
1805 }
1806 }
1807 errdefp->errdef_handle = (uint64_t)(uintptr_t)ep;
1808 mutex_exit(&bofi_mutex);
1809 mutex_exit(&bofi_low_mutex);
1810 ep->softintr_id = NULL;
1811 return (ddi_add_softintr(our_dip, DDI_SOFTINT_MED, &ep->softintr_id,
1812 NULL, NULL, bofi_signal, (caddr_t)&ep->errdef));
1813 }
1814
1815
1816 /*
1817 * delete existing errdef
1818 */
1819 static int
bofi_errdef_free(struct bofi_errent * ep)1820 bofi_errdef_free(struct bofi_errent *ep)
1821 {
1822 struct bofi_errent *hep, *prev_hep;
1823 struct bofi_link *lp, *prev_lp, *next_lp;
1824 struct bofi_shadow *hp;
1825
1826 mutex_enter(&bofi_low_mutex);
1827 mutex_enter(&bofi_mutex);
1828 /*
1829 * don't just assume its a valid ep - check that its on the
1830 * in-use list
1831 */
1832 prev_hep = NULL;
1833 for (hep = errent_listp; hep != NULL; ) {
1834 if (hep == ep)
1835 break;
1836 prev_hep = hep;
1837 hep = hep->next;
1838 }
1839 if (hep == NULL) {
1840 mutex_exit(&bofi_mutex);
1841 mutex_exit(&bofi_low_mutex);
1842 return (EINVAL);
1843 }
1844 /*
1845 * found it - delete from in-use list
1846 */
1847
1848 if (prev_hep)
1849 prev_hep->next = hep->next;
1850 else
1851 errent_listp = hep->next;
1852 /*
1853 * and take it off the per-clone list
1854 */
1855 hep->cnext->cprev = hep->cprev;
1856 hep->cprev->cnext = hep->cnext;
1857 /*
1858 * see if we are on any shadow handle link lists - and if we
1859 * are then take us off
1860 */
1861 for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
1862 prev_lp = NULL;
1863 for (lp = hp->link; lp != NULL; ) {
1864 if (lp->errentp == ep) {
1865 if (prev_lp)
1866 prev_lp->link = lp->link;
1867 else
1868 hp->link = lp->link;
1869 next_lp = lp->link;
1870 lp->link = bofi_link_freelist;
1871 bofi_link_freelist = lp;
1872 lp = next_lp;
1873 } else {
1874 prev_lp = lp;
1875 lp = lp->link;
1876 }
1877 }
1878 }
1879 mutex_exit(&bofi_mutex);
1880 mutex_exit(&bofi_low_mutex);
1881
1882 cv_destroy(&ep->cv);
1883 kmem_free(ep->name, ep->errdef.namesize+1);
1884 if ((ep->errdef.access_type & BOFI_LOG) &&
1885 ep->errdef.log.logsize && ep->logbase) /* double check */
1886 kmem_free(ep->logbase,
1887 sizeof (struct acc_log_elem) * ep->errdef.log.logsize);
1888
1889 if (ep->softintr_id)
1890 ddi_remove_softintr(ep->softintr_id);
1891 kmem_free(ep, sizeof (struct bofi_errent));
1892 return (0);
1893 }
1894
1895
1896 /*
1897 * start all errdefs corresponding to this name and instance
1898 */
1899 static void
bofi_start(struct bofi_errctl * errctlp,char * namep)1900 bofi_start(struct bofi_errctl *errctlp, char *namep)
1901 {
1902 struct bofi_errent *ep;
1903
1904 /*
1905 * look for any errdefs with matching name and instance
1906 */
1907 mutex_enter(&bofi_low_mutex);
1908 for (ep = errent_listp; ep != NULL; ep = ep->next)
1909 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1910 errctlp->instance == ep->errdef.instance) {
1911 ep->state |= BOFI_DEV_ACTIVE;
1912 (void) drv_getparm(TIME, &(ep->errdef.log.start_time));
1913 ep->errdef.log.stop_time = 0ul;
1914 }
1915 mutex_exit(&bofi_low_mutex);
1916 }
1917
1918
1919 /*
1920 * stop all errdefs corresponding to this name and instance
1921 */
1922 static void
bofi_stop(struct bofi_errctl * errctlp,char * namep)1923 bofi_stop(struct bofi_errctl *errctlp, char *namep)
1924 {
1925 struct bofi_errent *ep;
1926
1927 /*
1928 * look for any errdefs with matching name and instance
1929 */
1930 mutex_enter(&bofi_low_mutex);
1931 for (ep = errent_listp; ep != NULL; ep = ep->next)
1932 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1933 errctlp->instance == ep->errdef.instance) {
1934 ep->state &= ~BOFI_DEV_ACTIVE;
1935 if (ep->errdef.log.stop_time == 0ul)
1936 (void) drv_getparm(TIME,
1937 &(ep->errdef.log.stop_time));
1938 }
1939 mutex_exit(&bofi_low_mutex);
1940 }
1941
1942
1943 /*
1944 * wake up any thread waiting on this errdefs
1945 */
1946 static uint_t
bofi_signal(caddr_t arg)1947 bofi_signal(caddr_t arg)
1948 {
1949 struct bofi_errdef *edp = (struct bofi_errdef *)arg;
1950 struct bofi_errent *hep;
1951 struct bofi_errent *ep =
1952 (struct bofi_errent *)(uintptr_t)edp->errdef_handle;
1953
1954 mutex_enter(&bofi_low_mutex);
1955 for (hep = errent_listp; hep != NULL; ) {
1956 if (hep == ep)
1957 break;
1958 hep = hep->next;
1959 }
1960 if (hep == NULL) {
1961 mutex_exit(&bofi_low_mutex);
1962 return (DDI_INTR_UNCLAIMED);
1963 }
1964 if ((ep->errdef.access_type & BOFI_LOG) &&
1965 (edp->log.flags & BOFI_LOG_FULL)) {
1966 edp->log.stop_time = bofi_gettime();
1967 ep->state |= BOFI_NEW_MESSAGE;
1968 if (ep->state & BOFI_MESSAGE_WAIT)
1969 cv_broadcast(&ep->cv);
1970 ep->state &= ~BOFI_MESSAGE_WAIT;
1971 }
1972 if (ep->errstate.msg_time != 0) {
1973 ep->state |= BOFI_NEW_MESSAGE;
1974 if (ep->state & BOFI_MESSAGE_WAIT)
1975 cv_broadcast(&ep->cv);
1976 ep->state &= ~BOFI_MESSAGE_WAIT;
1977 }
1978 mutex_exit(&bofi_low_mutex);
1979 return (DDI_INTR_CLAIMED);
1980 }
1981
1982
1983 /*
1984 * wake up all errdefs corresponding to this name and instance
1985 */
1986 static void
bofi_broadcast(struct bofi_errctl * errctlp,char * namep)1987 bofi_broadcast(struct bofi_errctl *errctlp, char *namep)
1988 {
1989 struct bofi_errent *ep;
1990
1991 /*
1992 * look for any errdefs with matching name and instance
1993 */
1994 mutex_enter(&bofi_low_mutex);
1995 for (ep = errent_listp; ep != NULL; ep = ep->next)
1996 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1997 errctlp->instance == ep->errdef.instance) {
1998 /*
1999 * wake up sleepers
2000 */
2001 ep->state |= BOFI_NEW_MESSAGE;
2002 if (ep->state & BOFI_MESSAGE_WAIT)
2003 cv_broadcast(&ep->cv);
2004 ep->state &= ~BOFI_MESSAGE_WAIT;
2005 }
2006 mutex_exit(&bofi_low_mutex);
2007 }
2008
2009
2010 /*
2011 * clear "acc_chk" for all errdefs corresponding to this name and instance
2012 * and wake them up.
2013 */
2014 static void
bofi_clear_acc_chk(struct bofi_errctl * errctlp,char * namep)2015 bofi_clear_acc_chk(struct bofi_errctl *errctlp, char *namep)
2016 {
2017 struct bofi_errent *ep;
2018
2019 /*
2020 * look for any errdefs with matching name and instance
2021 */
2022 mutex_enter(&bofi_low_mutex);
2023 for (ep = errent_listp; ep != NULL; ep = ep->next)
2024 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2025 errctlp->instance == ep->errdef.instance) {
2026 mutex_enter(&bofi_mutex);
2027 if (ep->errdef.access_count == 0 &&
2028 ep->errdef.fail_count == 0)
2029 ep->errdef.acc_chk = 0;
2030 mutex_exit(&bofi_mutex);
2031 /*
2032 * wake up sleepers
2033 */
2034 ep->state |= BOFI_NEW_MESSAGE;
2035 if (ep->state & BOFI_MESSAGE_WAIT)
2036 cv_broadcast(&ep->cv);
2037 ep->state &= ~BOFI_MESSAGE_WAIT;
2038 }
2039 mutex_exit(&bofi_low_mutex);
2040 }
2041
2042
2043 /*
2044 * set "fail_count" to 0 for all errdefs corresponding to this name and instance
2045 * whose "access_count" has expired, set "acc_chk" to 0 and wake them up.
2046 */
2047 static void
bofi_clear_errors(struct bofi_errctl * errctlp,char * namep)2048 bofi_clear_errors(struct bofi_errctl *errctlp, char *namep)
2049 {
2050 struct bofi_errent *ep;
2051
2052 /*
2053 * look for any errdefs with matching name and instance
2054 */
2055 mutex_enter(&bofi_low_mutex);
2056 for (ep = errent_listp; ep != NULL; ep = ep->next)
2057 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2058 errctlp->instance == ep->errdef.instance) {
2059 mutex_enter(&bofi_mutex);
2060 if (ep->errdef.access_count == 0) {
2061 ep->errdef.acc_chk = 0;
2062 ep->errdef.fail_count = 0;
2063 mutex_exit(&bofi_mutex);
2064 if (ep->errdef.log.stop_time == 0ul)
2065 (void) drv_getparm(TIME,
2066 &(ep->errdef.log.stop_time));
2067 } else
2068 mutex_exit(&bofi_mutex);
2069 /*
2070 * wake up sleepers
2071 */
2072 ep->state |= BOFI_NEW_MESSAGE;
2073 if (ep->state & BOFI_MESSAGE_WAIT)
2074 cv_broadcast(&ep->cv);
2075 ep->state &= ~BOFI_MESSAGE_WAIT;
2076 }
2077 mutex_exit(&bofi_low_mutex);
2078 }
2079
2080
2081 /*
2082 * set "access_count" and "fail_count" to 0 for all errdefs corresponding to
2083 * this name and instance, set "acc_chk" to 0, and wake them up.
2084 */
2085 static void
bofi_clear_errdefs(struct bofi_errctl * errctlp,char * namep)2086 bofi_clear_errdefs(struct bofi_errctl *errctlp, char *namep)
2087 {
2088 struct bofi_errent *ep;
2089
2090 /*
2091 * look for any errdefs with matching name and instance
2092 */
2093 mutex_enter(&bofi_low_mutex);
2094 for (ep = errent_listp; ep != NULL; ep = ep->next)
2095 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2096 errctlp->instance == ep->errdef.instance) {
2097 mutex_enter(&bofi_mutex);
2098 ep->errdef.acc_chk = 0;
2099 ep->errdef.access_count = 0;
2100 ep->errdef.fail_count = 0;
2101 mutex_exit(&bofi_mutex);
2102 if (ep->errdef.log.stop_time == 0ul)
2103 (void) drv_getparm(TIME,
2104 &(ep->errdef.log.stop_time));
2105 /*
2106 * wake up sleepers
2107 */
2108 ep->state |= BOFI_NEW_MESSAGE;
2109 if (ep->state & BOFI_MESSAGE_WAIT)
2110 cv_broadcast(&ep->cv);
2111 ep->state &= ~BOFI_MESSAGE_WAIT;
2112 }
2113 mutex_exit(&bofi_low_mutex);
2114 }
2115
2116
2117 /*
2118 * get state for this errdef
2119 */
2120 static int
bofi_errdef_check(struct bofi_errstate * errstatep,struct acc_log_elem ** logpp)2121 bofi_errdef_check(struct bofi_errstate *errstatep, struct acc_log_elem **logpp)
2122 {
2123 struct bofi_errent *hep;
2124 struct bofi_errent *ep;
2125
2126 ep = (struct bofi_errent *)(uintptr_t)errstatep->errdef_handle;
2127 mutex_enter(&bofi_low_mutex);
2128 /*
2129 * don't just assume its a valid ep - check that its on the
2130 * in-use list
2131 */
2132 for (hep = errent_listp; hep != NULL; hep = hep->next)
2133 if (hep == ep)
2134 break;
2135 if (hep == NULL) {
2136 mutex_exit(&bofi_low_mutex);
2137 return (EINVAL);
2138 }
2139 mutex_enter(&bofi_mutex);
2140 ep->errstate.access_count = ep->errdef.access_count;
2141 ep->errstate.fail_count = ep->errdef.fail_count;
2142 ep->errstate.acc_chk = ep->errdef.acc_chk;
2143 ep->errstate.log = ep->errdef.log;
2144 *logpp = ep->logbase;
2145 *errstatep = ep->errstate;
2146 mutex_exit(&bofi_mutex);
2147 mutex_exit(&bofi_low_mutex);
2148 return (0);
2149 }
2150
2151
2152 /*
2153 * Wait for a ddi_report_fault message to come back for this errdef
2154 * Then return state for this errdef.
2155 * fault report is intercepted by bofi_post_event, which triggers
2156 * bofi_signal via a softint, which will wake up this routine if
2157 * we are waiting
2158 */
2159 static int
bofi_errdef_check_w(struct bofi_errstate * errstatep,struct acc_log_elem ** logpp)2160 bofi_errdef_check_w(struct bofi_errstate *errstatep,
2161 struct acc_log_elem **logpp)
2162 {
2163 struct bofi_errent *hep;
2164 struct bofi_errent *ep;
2165 int rval = 0;
2166
2167 ep = (struct bofi_errent *)(uintptr_t)errstatep->errdef_handle;
2168 mutex_enter(&bofi_low_mutex);
2169 retry:
2170 /*
2171 * don't just assume its a valid ep - check that its on the
2172 * in-use list
2173 */
2174 for (hep = errent_listp; hep != NULL; hep = hep->next)
2175 if (hep == ep)
2176 break;
2177 if (hep == NULL) {
2178 mutex_exit(&bofi_low_mutex);
2179 return (EINVAL);
2180 }
2181 /*
2182 * wait for ddi_report_fault for the devinfo corresponding
2183 * to this errdef
2184 */
2185 if (rval == 0 && !(ep->state & BOFI_NEW_MESSAGE)) {
2186 ep->state |= BOFI_MESSAGE_WAIT;
2187 if (cv_wait_sig(&ep->cv, &bofi_low_mutex) == 0) {
2188 if (!(ep->state & BOFI_NEW_MESSAGE))
2189 rval = EINTR;
2190 }
2191 goto retry;
2192 }
2193 ep->state &= ~BOFI_NEW_MESSAGE;
2194 /*
2195 * we either didn't need to sleep, we've been woken up or we've been
2196 * signaled - either way return state now
2197 */
2198 mutex_enter(&bofi_mutex);
2199 ep->errstate.access_count = ep->errdef.access_count;
2200 ep->errstate.fail_count = ep->errdef.fail_count;
2201 ep->errstate.acc_chk = ep->errdef.acc_chk;
2202 ep->errstate.log = ep->errdef.log;
2203 *logpp = ep->logbase;
2204 *errstatep = ep->errstate;
2205 mutex_exit(&bofi_mutex);
2206 mutex_exit(&bofi_low_mutex);
2207 return (rval);
2208 }
2209
2210
2211 /*
2212 * support routine - check if requested driver is defined as under test in the
2213 * conf file.
2214 */
2215 static int
driver_under_test(dev_info_t * rdip)2216 driver_under_test(dev_info_t *rdip)
2217 {
2218 int i;
2219 char *rname;
2220 major_t rmaj;
2221
2222 rname = ddi_get_name(rdip);
2223 rmaj = ddi_name_to_major(rname);
2224
2225 /*
2226 * Enforce the user to specifically request the following drivers.
2227 */
2228 for (i = 0; i < driver_list_size; i += (1 + strlen(&driver_list[i]))) {
2229 if (driver_list_neg == 0) {
2230 if (rmaj == ddi_name_to_major(&driver_list[i]))
2231 return (1);
2232 } else {
2233 if (rmaj == ddi_name_to_major(&driver_list[i+1]))
2234 return (0);
2235 }
2236 }
2237 if (driver_list_neg == 0)
2238 return (0);
2239 else
2240 return (1);
2241
2242 }
2243
2244
2245 static void
log_acc_event(struct bofi_errent * ep,uint_t at,offset_t offset,off_t len,size_t repcount,uint64_t * valuep)2246 log_acc_event(struct bofi_errent *ep, uint_t at, offset_t offset, off_t len,
2247 size_t repcount, uint64_t *valuep)
2248 {
2249 struct bofi_errdef *edp = &(ep->errdef);
2250 struct acc_log *log = &edp->log;
2251
2252 ASSERT(log != NULL);
2253 ASSERT(MUTEX_HELD(&bofi_mutex));
2254
2255 if (log->flags & BOFI_LOG_REPIO)
2256 repcount = 1;
2257 else if (repcount == 0 && edp->access_count > 0 &&
2258 (log->flags & BOFI_LOG_FULL) == 0)
2259 edp->access_count += 1;
2260
2261 if (repcount && log->entries < log->logsize) {
2262 struct acc_log_elem *elem = ep->logbase + log->entries;
2263
2264 if (log->flags & BOFI_LOG_TIMESTAMP)
2265 elem->access_time = bofi_gettime();
2266 elem->access_type = at;
2267 elem->offset = offset;
2268 elem->value = valuep ? *valuep : 0ll;
2269 elem->size = len;
2270 elem->repcount = repcount;
2271 ++log->entries;
2272 if (log->entries == log->logsize) {
2273 log->flags |= BOFI_LOG_FULL;
2274 ddi_trigger_softintr(((struct bofi_errent *)
2275 (uintptr_t)edp->errdef_handle)->softintr_id);
2276 }
2277 }
2278 if ((log->flags & BOFI_LOG_WRAP) && edp->access_count <= 1) {
2279 log->wrapcnt++;
2280 edp->access_count = log->logsize;
2281 log->entries = 0; /* wrap back to the start */
2282 }
2283 }
2284
2285
2286 /*
2287 * got a condition match on dma read/write - check counts and corrupt
2288 * data if necessary
2289 *
2290 * bofi_mutex always held when this is called.
2291 */
2292 static void
do_dma_corrupt(struct bofi_shadow * hp,struct bofi_errent * ep,uint_t synctype,off_t off,off_t length)2293 do_dma_corrupt(struct bofi_shadow *hp, struct bofi_errent *ep,
2294 uint_t synctype, off_t off, off_t length)
2295 {
2296 uint64_t operand;
2297 int i;
2298 off_t len;
2299 caddr_t logaddr;
2300 uint64_t *addr;
2301 uint64_t *endaddr;
2302 ddi_dma_impl_t *hdlp;
2303 ndi_err_t *errp;
2304
2305 ASSERT(MUTEX_HELD(&bofi_mutex));
2306 if ((ep->errdef.access_count ||
2307 ep->errdef.fail_count) &&
2308 (ep->errdef.access_type & BOFI_LOG)) {
2309 uint_t atype;
2310
2311 if (synctype == DDI_DMA_SYNC_FORDEV)
2312 atype = BOFI_DMA_W;
2313 else if (synctype == DDI_DMA_SYNC_FORCPU ||
2314 synctype == DDI_DMA_SYNC_FORKERNEL)
2315 atype = BOFI_DMA_R;
2316 else
2317 atype = 0;
2318 if ((off <= ep->errdef.offset &&
2319 off + length > ep->errdef.offset) ||
2320 (off > ep->errdef.offset &&
2321 off < ep->errdef.offset + ep->errdef.len)) {
2322 logaddr = (caddr_t)((uintptr_t)(hp->addr +
2323 off + LLSZMASK) & ~LLSZMASK);
2324
2325 log_acc_event(ep, atype, logaddr - hp->addr,
2326 length, 1, 0);
2327 }
2328 }
2329 if (ep->errdef.access_count > 1) {
2330 ep->errdef.access_count--;
2331 } else if (ep->errdef.fail_count > 0) {
2332 ep->errdef.fail_count--;
2333 ep->errdef.access_count = 0;
2334 /*
2335 * OK do the corruption
2336 */
2337 if (ep->errstate.fail_time == 0)
2338 ep->errstate.fail_time = bofi_gettime();
2339 /*
2340 * work out how much to corrupt
2341 *
2342 * Make sure endaddr isn't greater than hp->addr + hp->len.
2343 * If endaddr becomes less than addr len becomes negative
2344 * and the following loop isn't entered.
2345 */
2346 addr = (uint64_t *)((uintptr_t)((hp->addr +
2347 ep->errdef.offset) + LLSZMASK) & ~LLSZMASK);
2348 endaddr = (uint64_t *)((uintptr_t)(hp->addr + min(hp->len,
2349 ep->errdef.offset + ep->errdef.len)) & ~LLSZMASK);
2350 len = endaddr - addr;
2351 operand = ep->errdef.operand;
2352 hdlp = (ddi_dma_impl_t *)(hp->hdl.dma_handle);
2353 errp = &hdlp->dmai_error;
2354 if (ep->errdef.acc_chk & 2) {
2355 uint64_t ena;
2356 char buf[FM_MAX_CLASS];
2357
2358 errp->err_status = DDI_FM_NONFATAL;
2359 (void) snprintf(buf, FM_MAX_CLASS, FM_SIMULATED_DMA);
2360 ena = fm_ena_generate(0, FM_ENA_FMT1);
2361 ddi_fm_ereport_post(hp->dip, buf, ena,
2362 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
2363 FM_EREPORT_VERS0, NULL);
2364 }
2365 switch (ep->errdef.optype) {
2366 case BOFI_EQUAL :
2367 for (i = 0; i < len; i++)
2368 *(addr + i) = operand;
2369 break;
2370 case BOFI_AND :
2371 for (i = 0; i < len; i++)
2372 *(addr + i) &= operand;
2373 break;
2374 case BOFI_OR :
2375 for (i = 0; i < len; i++)
2376 *(addr + i) |= operand;
2377 break;
2378 case BOFI_XOR :
2379 for (i = 0; i < len; i++)
2380 *(addr + i) ^= operand;
2381 break;
2382 default:
2383 /* do nothing */
2384 break;
2385 }
2386 }
2387 }
2388
2389
2390 static uint64_t do_bofi_rd8(struct bofi_shadow *, caddr_t);
2391 static uint64_t do_bofi_rd16(struct bofi_shadow *, caddr_t);
2392 static uint64_t do_bofi_rd32(struct bofi_shadow *, caddr_t);
2393 static uint64_t do_bofi_rd64(struct bofi_shadow *, caddr_t);
2394
2395
2396 /*
2397 * check all errdefs linked to this shadow handle. If we've got a condition
2398 * match check counts and corrupt data if necessary
2399 *
2400 * bofi_mutex always held when this is called.
2401 *
2402 * because of possibility of BOFI_NO_TRANSFER, we couldn't get data
2403 * from io-space before calling this, so we pass in the func to do the
2404 * transfer as a parameter.
2405 */
2406 static uint64_t
do_pior_corrupt(struct bofi_shadow * hp,caddr_t addr,uint64_t (* func)(),size_t repcount,size_t accsize)2407 do_pior_corrupt(struct bofi_shadow *hp, caddr_t addr,
2408 uint64_t (*func)(), size_t repcount, size_t accsize)
2409 {
2410 struct bofi_errent *ep;
2411 struct bofi_link *lp;
2412 uint64_t operand;
2413 uintptr_t minlen;
2414 intptr_t base;
2415 int done_get = 0;
2416 uint64_t get_val, gv;
2417 ddi_acc_impl_t *hdlp;
2418 ndi_err_t *errp;
2419
2420 ASSERT(MUTEX_HELD(&bofi_mutex));
2421 /*
2422 * check through all errdefs associated with this shadow handle
2423 */
2424 for (lp = hp->link; lp != NULL; lp = lp->link) {
2425 ep = lp->errentp;
2426 if (ep->errdef.len == 0)
2427 minlen = hp->len;
2428 else
2429 minlen = min(hp->len, ep->errdef.len);
2430 base = addr - hp->addr - ep->errdef.offset + hp->offset;
2431 if ((ep->errdef.access_type & BOFI_PIO_R) &&
2432 (ep->state & BOFI_DEV_ACTIVE) &&
2433 base >= 0 && base < minlen) {
2434 /*
2435 * condition match for pio read
2436 */
2437 if (ep->errdef.access_count > 1) {
2438 ep->errdef.access_count--;
2439 if (done_get == 0) {
2440 done_get = 1;
2441 gv = get_val = func(hp, addr);
2442 }
2443 if (ep->errdef.access_type & BOFI_LOG) {
2444 log_acc_event(ep, BOFI_PIO_R,
2445 addr - hp->addr,
2446 accsize, repcount, &gv);
2447 }
2448 } else if (ep->errdef.fail_count > 0) {
2449 ep->errdef.fail_count--;
2450 ep->errdef.access_count = 0;
2451 /*
2452 * OK do corruption
2453 */
2454 if (ep->errstate.fail_time == 0)
2455 ep->errstate.fail_time = bofi_gettime();
2456 operand = ep->errdef.operand;
2457 if (done_get == 0) {
2458 if (ep->errdef.optype ==
2459 BOFI_NO_TRANSFER)
2460 /*
2461 * no transfer - bomb out
2462 */
2463 return (operand);
2464 done_get = 1;
2465 gv = get_val = func(hp, addr);
2466
2467 }
2468 if (ep->errdef.access_type & BOFI_LOG) {
2469 log_acc_event(ep, BOFI_PIO_R,
2470 addr - hp->addr,
2471 accsize, repcount, &gv);
2472 }
2473 hdlp = (ddi_acc_impl_t *)(hp->hdl.acc_handle);
2474 errp = hdlp->ahi_err;
2475 if (ep->errdef.acc_chk & 1) {
2476 uint64_t ena;
2477 char buf[FM_MAX_CLASS];
2478
2479 errp->err_status = DDI_FM_NONFATAL;
2480 (void) snprintf(buf, FM_MAX_CLASS,
2481 FM_SIMULATED_PIO);
2482 ena = fm_ena_generate(0, FM_ENA_FMT1);
2483 ddi_fm_ereport_post(hp->dip, buf, ena,
2484 DDI_NOSLEEP, FM_VERSION,
2485 DATA_TYPE_UINT8, FM_EREPORT_VERS0,
2486 NULL);
2487 }
2488 switch (ep->errdef.optype) {
2489 case BOFI_EQUAL :
2490 get_val = operand;
2491 break;
2492 case BOFI_AND :
2493 get_val &= operand;
2494 break;
2495 case BOFI_OR :
2496 get_val |= operand;
2497 break;
2498 case BOFI_XOR :
2499 get_val ^= operand;
2500 break;
2501 default:
2502 /* do nothing */
2503 break;
2504 }
2505 }
2506 }
2507 }
2508 if (done_get == 0)
2509 return (func(hp, addr));
2510 else
2511 return (get_val);
2512 }
2513
2514
2515 /*
2516 * check all errdefs linked to this shadow handle. If we've got a condition
2517 * match check counts and corrupt data if necessary
2518 *
2519 * bofi_mutex always held when this is called.
2520 *
2521 * because of possibility of BOFI_NO_TRANSFER, we return 0 if no data
2522 * is to be written out to io-space, 1 otherwise
2523 */
2524 static int
do_piow_corrupt(struct bofi_shadow * hp,caddr_t addr,uint64_t * valuep,size_t size,size_t repcount)2525 do_piow_corrupt(struct bofi_shadow *hp, caddr_t addr, uint64_t *valuep,
2526 size_t size, size_t repcount)
2527 {
2528 struct bofi_errent *ep;
2529 struct bofi_link *lp;
2530 uintptr_t minlen;
2531 intptr_t base;
2532 uint64_t v = *valuep;
2533 ddi_acc_impl_t *hdlp;
2534 ndi_err_t *errp;
2535
2536 ASSERT(MUTEX_HELD(&bofi_mutex));
2537 /*
2538 * check through all errdefs associated with this shadow handle
2539 */
2540 for (lp = hp->link; lp != NULL; lp = lp->link) {
2541 ep = lp->errentp;
2542 if (ep->errdef.len == 0)
2543 minlen = hp->len;
2544 else
2545 minlen = min(hp->len, ep->errdef.len);
2546 base = (caddr_t)addr - hp->addr - ep->errdef.offset +hp->offset;
2547 if ((ep->errdef.access_type & BOFI_PIO_W) &&
2548 (ep->state & BOFI_DEV_ACTIVE) &&
2549 base >= 0 && base < minlen) {
2550 /*
2551 * condition match for pio write
2552 */
2553
2554 if (ep->errdef.access_count > 1) {
2555 ep->errdef.access_count--;
2556 if (ep->errdef.access_type & BOFI_LOG)
2557 log_acc_event(ep, BOFI_PIO_W,
2558 addr - hp->addr, size,
2559 repcount, &v);
2560 } else if (ep->errdef.fail_count > 0) {
2561 ep->errdef.fail_count--;
2562 ep->errdef.access_count = 0;
2563 if (ep->errdef.access_type & BOFI_LOG)
2564 log_acc_event(ep, BOFI_PIO_W,
2565 addr - hp->addr, size,
2566 repcount, &v);
2567 /*
2568 * OK do corruption
2569 */
2570 if (ep->errstate.fail_time == 0)
2571 ep->errstate.fail_time = bofi_gettime();
2572 hdlp = (ddi_acc_impl_t *)(hp->hdl.acc_handle);
2573 errp = hdlp->ahi_err;
2574 if (ep->errdef.acc_chk & 1) {
2575 uint64_t ena;
2576 char buf[FM_MAX_CLASS];
2577
2578 errp->err_status = DDI_FM_NONFATAL;
2579 (void) snprintf(buf, FM_MAX_CLASS,
2580 FM_SIMULATED_PIO);
2581 ena = fm_ena_generate(0, FM_ENA_FMT1);
2582 ddi_fm_ereport_post(hp->dip, buf, ena,
2583 DDI_NOSLEEP, FM_VERSION,
2584 DATA_TYPE_UINT8, FM_EREPORT_VERS0,
2585 NULL);
2586 }
2587 switch (ep->errdef.optype) {
2588 case BOFI_EQUAL :
2589 *valuep = ep->errdef.operand;
2590 break;
2591 case BOFI_AND :
2592 *valuep &= ep->errdef.operand;
2593 break;
2594 case BOFI_OR :
2595 *valuep |= ep->errdef.operand;
2596 break;
2597 case BOFI_XOR :
2598 *valuep ^= ep->errdef.operand;
2599 break;
2600 case BOFI_NO_TRANSFER :
2601 /*
2602 * no transfer - bomb out
2603 */
2604 return (0);
2605 default:
2606 /* do nothing */
2607 break;
2608 }
2609 }
2610 }
2611 }
2612 return (1);
2613 }
2614
2615
2616 static uint64_t
do_bofi_rd8(struct bofi_shadow * hp,caddr_t addr)2617 do_bofi_rd8(struct bofi_shadow *hp, caddr_t addr)
2618 {
2619 return (hp->save.acc.ahi_get8(&hp->save.acc, (uint8_t *)addr));
2620 }
2621
2622 #define BOFI_READ_CHECKS(type) \
2623 if (bofi_ddi_check) \
2624 addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
2625 if (bofi_range_check && ((caddr_t)addr < hp->addr || \
2626 (caddr_t)addr - hp->addr >= hp->len)) { \
2627 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2628 "ddi_get() out of range addr %p not in %p/%llx", \
2629 (void *)addr, (void *)hp->addr, hp->len); \
2630 return (0); \
2631 }
2632
2633 /*
2634 * our getb() routine - use tryenter
2635 */
2636 static uint8_t
bofi_rd8(ddi_acc_impl_t * handle,uint8_t * addr)2637 bofi_rd8(ddi_acc_impl_t *handle, uint8_t *addr)
2638 {
2639 struct bofi_shadow *hp;
2640 uint8_t retval;
2641
2642 hp = handle->ahi_common.ah_bus_private;
2643 BOFI_READ_CHECKS(uint8_t)
2644 if (!hp->link || !mutex_tryenter(&bofi_mutex))
2645 return (hp->save.acc.ahi_get8(&hp->save.acc, addr));
2646 retval = (uint8_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd8, 1,
2647 1);
2648 mutex_exit(&bofi_mutex);
2649 return (retval);
2650 }
2651
2652
2653 static uint64_t
do_bofi_rd16(struct bofi_shadow * hp,caddr_t addr)2654 do_bofi_rd16(struct bofi_shadow *hp, caddr_t addr)
2655 {
2656 return (hp->save.acc.ahi_get16(&hp->save.acc, (uint16_t *)addr));
2657 }
2658
2659
2660 /*
2661 * our getw() routine - use tryenter
2662 */
2663 static uint16_t
bofi_rd16(ddi_acc_impl_t * handle,uint16_t * addr)2664 bofi_rd16(ddi_acc_impl_t *handle, uint16_t *addr)
2665 {
2666 struct bofi_shadow *hp;
2667 uint16_t retval;
2668
2669 hp = handle->ahi_common.ah_bus_private;
2670 BOFI_READ_CHECKS(uint16_t)
2671 if (!hp->link || !mutex_tryenter(&bofi_mutex))
2672 return (hp->save.acc.ahi_get16(&hp->save.acc, addr));
2673 retval = (uint16_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd16, 1,
2674 2);
2675 mutex_exit(&bofi_mutex);
2676 return (retval);
2677 }
2678
2679
2680 static uint64_t
do_bofi_rd32(struct bofi_shadow * hp,caddr_t addr)2681 do_bofi_rd32(struct bofi_shadow *hp, caddr_t addr)
2682 {
2683 return (hp->save.acc.ahi_get32(&hp->save.acc, (uint32_t *)addr));
2684 }
2685
2686
2687 /*
2688 * our getl() routine - use tryenter
2689 */
2690 static uint32_t
bofi_rd32(ddi_acc_impl_t * handle,uint32_t * addr)2691 bofi_rd32(ddi_acc_impl_t *handle, uint32_t *addr)
2692 {
2693 struct bofi_shadow *hp;
2694 uint32_t retval;
2695
2696 hp = handle->ahi_common.ah_bus_private;
2697 BOFI_READ_CHECKS(uint32_t)
2698 if (!hp->link || !mutex_tryenter(&bofi_mutex))
2699 return (hp->save.acc.ahi_get32(&hp->save.acc, addr));
2700 retval = (uint32_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd32, 1,
2701 4);
2702 mutex_exit(&bofi_mutex);
2703 return (retval);
2704 }
2705
2706
2707 static uint64_t
do_bofi_rd64(struct bofi_shadow * hp,caddr_t addr)2708 do_bofi_rd64(struct bofi_shadow *hp, caddr_t addr)
2709 {
2710 return (hp->save.acc.ahi_get64(&hp->save.acc, (uint64_t *)addr));
2711 }
2712
2713
2714 /*
2715 * our getll() routine - use tryenter
2716 */
2717 static uint64_t
bofi_rd64(ddi_acc_impl_t * handle,uint64_t * addr)2718 bofi_rd64(ddi_acc_impl_t *handle, uint64_t *addr)
2719 {
2720 struct bofi_shadow *hp;
2721 uint64_t retval;
2722
2723 hp = handle->ahi_common.ah_bus_private;
2724 BOFI_READ_CHECKS(uint64_t)
2725 if (!hp->link || !mutex_tryenter(&bofi_mutex))
2726 return (hp->save.acc.ahi_get64(&hp->save.acc, addr));
2727 retval = (uint64_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd64, 1,
2728 8);
2729 mutex_exit(&bofi_mutex);
2730 return (retval);
2731 }
2732
2733 #define BOFI_WRITE_TESTS(type) \
2734 if (bofi_ddi_check) \
2735 addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
2736 if (bofi_range_check && ((caddr_t)addr < hp->addr || \
2737 (caddr_t)addr - hp->addr >= hp->len)) { \
2738 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2739 "ddi_put() out of range addr %p not in %p/%llx\n", \
2740 (void *)addr, (void *)hp->addr, hp->len); \
2741 return; \
2742 }
2743
2744 /*
2745 * our putb() routine - use tryenter
2746 */
2747 static void
bofi_wr8(ddi_acc_impl_t * handle,uint8_t * addr,uint8_t value)2748 bofi_wr8(ddi_acc_impl_t *handle, uint8_t *addr, uint8_t value)
2749 {
2750 struct bofi_shadow *hp;
2751 uint64_t llvalue = value;
2752
2753 hp = handle->ahi_common.ah_bus_private;
2754 BOFI_WRITE_TESTS(uint8_t)
2755 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2756 hp->save.acc.ahi_put8(&hp->save.acc, addr, (uint8_t)llvalue);
2757 return;
2758 }
2759 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 1, 1))
2760 hp->save.acc.ahi_put8(&hp->save.acc, addr, (uint8_t)llvalue);
2761 mutex_exit(&bofi_mutex);
2762 }
2763
2764
2765 /*
2766 * our putw() routine - use tryenter
2767 */
2768 static void
bofi_wr16(ddi_acc_impl_t * handle,uint16_t * addr,uint16_t value)2769 bofi_wr16(ddi_acc_impl_t *handle, uint16_t *addr, uint16_t value)
2770 {
2771 struct bofi_shadow *hp;
2772 uint64_t llvalue = value;
2773
2774 hp = handle->ahi_common.ah_bus_private;
2775 BOFI_WRITE_TESTS(uint16_t)
2776 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2777 hp->save.acc.ahi_put16(&hp->save.acc, addr, (uint16_t)llvalue);
2778 return;
2779 }
2780 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 2, 1))
2781 hp->save.acc.ahi_put16(&hp->save.acc, addr, (uint16_t)llvalue);
2782 mutex_exit(&bofi_mutex);
2783 }
2784
2785
2786 /*
2787 * our putl() routine - use tryenter
2788 */
2789 static void
bofi_wr32(ddi_acc_impl_t * handle,uint32_t * addr,uint32_t value)2790 bofi_wr32(ddi_acc_impl_t *handle, uint32_t *addr, uint32_t value)
2791 {
2792 struct bofi_shadow *hp;
2793 uint64_t llvalue = value;
2794
2795 hp = handle->ahi_common.ah_bus_private;
2796 BOFI_WRITE_TESTS(uint32_t)
2797 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2798 hp->save.acc.ahi_put32(&hp->save.acc, addr, (uint32_t)llvalue);
2799 return;
2800 }
2801 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 4, 1))
2802 hp->save.acc.ahi_put32(&hp->save.acc, addr, (uint32_t)llvalue);
2803 mutex_exit(&bofi_mutex);
2804 }
2805
2806
2807 /*
2808 * our putll() routine - use tryenter
2809 */
2810 static void
bofi_wr64(ddi_acc_impl_t * handle,uint64_t * addr,uint64_t value)2811 bofi_wr64(ddi_acc_impl_t *handle, uint64_t *addr, uint64_t value)
2812 {
2813 struct bofi_shadow *hp;
2814 uint64_t llvalue = value;
2815
2816 hp = handle->ahi_common.ah_bus_private;
2817 BOFI_WRITE_TESTS(uint64_t)
2818 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2819 hp->save.acc.ahi_put64(&hp->save.acc, addr, (uint64_t)llvalue);
2820 return;
2821 }
2822 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 8, 1))
2823 hp->save.acc.ahi_put64(&hp->save.acc, addr, (uint64_t)llvalue);
2824 mutex_exit(&bofi_mutex);
2825 }
2826
2827 #define BOFI_REP_READ_TESTS(type) \
2828 if (bofi_ddi_check) \
2829 dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
2830 if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
2831 (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
2832 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2833 "ddi_rep_get() out of range addr %p not in %p/%llx\n", \
2834 (void *)dev_addr, (void *)hp->addr, hp->len); \
2835 if ((caddr_t)dev_addr < hp->addr || \
2836 (caddr_t)dev_addr - hp->addr >= hp->len) \
2837 return; \
2838 repcount = (type *)(hp->addr + hp->len) - dev_addr; \
2839 }
2840
2841 /*
2842 * our rep_getb() routine - use tryenter
2843 */
2844 static void
bofi_rep_rd8(ddi_acc_impl_t * handle,uint8_t * host_addr,uint8_t * dev_addr,size_t repcount,uint_t flags)2845 bofi_rep_rd8(ddi_acc_impl_t *handle, uint8_t *host_addr, uint8_t *dev_addr,
2846 size_t repcount, uint_t flags)
2847 {
2848 struct bofi_shadow *hp;
2849 int i;
2850 uint8_t *addr;
2851
2852 hp = handle->ahi_common.ah_bus_private;
2853 BOFI_REP_READ_TESTS(uint8_t)
2854 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2855 hp->save.acc.ahi_rep_get8(&hp->save.acc, host_addr, dev_addr,
2856 repcount, flags);
2857 return;
2858 }
2859 for (i = 0; i < repcount; i++) {
2860 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2861 *(host_addr + i) = (uint8_t)do_pior_corrupt(hp, (caddr_t)addr,
2862 do_bofi_rd8, i ? 0 : repcount, 1);
2863 }
2864 mutex_exit(&bofi_mutex);
2865 }
2866
2867
2868 /*
2869 * our rep_getw() routine - use tryenter
2870 */
2871 static void
bofi_rep_rd16(ddi_acc_impl_t * handle,uint16_t * host_addr,uint16_t * dev_addr,size_t repcount,uint_t flags)2872 bofi_rep_rd16(ddi_acc_impl_t *handle, uint16_t *host_addr,
2873 uint16_t *dev_addr, size_t repcount, uint_t flags)
2874 {
2875 struct bofi_shadow *hp;
2876 int i;
2877 uint16_t *addr;
2878
2879 hp = handle->ahi_common.ah_bus_private;
2880 BOFI_REP_READ_TESTS(uint16_t)
2881 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2882 hp->save.acc.ahi_rep_get16(&hp->save.acc, host_addr, dev_addr,
2883 repcount, flags);
2884 return;
2885 }
2886 for (i = 0; i < repcount; i++) {
2887 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2888 *(host_addr + i) = (uint16_t)do_pior_corrupt(hp, (caddr_t)addr,
2889 do_bofi_rd16, i ? 0 : repcount, 2);
2890 }
2891 mutex_exit(&bofi_mutex);
2892 }
2893
2894
2895 /*
2896 * our rep_getl() routine - use tryenter
2897 */
2898 static void
bofi_rep_rd32(ddi_acc_impl_t * handle,uint32_t * host_addr,uint32_t * dev_addr,size_t repcount,uint_t flags)2899 bofi_rep_rd32(ddi_acc_impl_t *handle, uint32_t *host_addr,
2900 uint32_t *dev_addr, size_t repcount, uint_t flags)
2901 {
2902 struct bofi_shadow *hp;
2903 int i;
2904 uint32_t *addr;
2905
2906 hp = handle->ahi_common.ah_bus_private;
2907 BOFI_REP_READ_TESTS(uint32_t)
2908 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2909 hp->save.acc.ahi_rep_get32(&hp->save.acc, host_addr, dev_addr,
2910 repcount, flags);
2911 return;
2912 }
2913 for (i = 0; i < repcount; i++) {
2914 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2915 *(host_addr + i) = (uint32_t)do_pior_corrupt(hp, (caddr_t)addr,
2916 do_bofi_rd32, i ? 0 : repcount, 4);
2917 }
2918 mutex_exit(&bofi_mutex);
2919 }
2920
2921
2922 /*
2923 * our rep_getll() routine - use tryenter
2924 */
2925 static void
bofi_rep_rd64(ddi_acc_impl_t * handle,uint64_t * host_addr,uint64_t * dev_addr,size_t repcount,uint_t flags)2926 bofi_rep_rd64(ddi_acc_impl_t *handle, uint64_t *host_addr,
2927 uint64_t *dev_addr, size_t repcount, uint_t flags)
2928 {
2929 struct bofi_shadow *hp;
2930 int i;
2931 uint64_t *addr;
2932
2933 hp = handle->ahi_common.ah_bus_private;
2934 BOFI_REP_READ_TESTS(uint64_t)
2935 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2936 hp->save.acc.ahi_rep_get64(&hp->save.acc, host_addr, dev_addr,
2937 repcount, flags);
2938 return;
2939 }
2940 for (i = 0; i < repcount; i++) {
2941 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2942 *(host_addr + i) = (uint64_t)do_pior_corrupt(hp, (caddr_t)addr,
2943 do_bofi_rd64, i ? 0 : repcount, 8);
2944 }
2945 mutex_exit(&bofi_mutex);
2946 }
2947
2948 #define BOFI_REP_WRITE_TESTS(type) \
2949 if (bofi_ddi_check) \
2950 dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
2951 if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
2952 (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
2953 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2954 "ddi_rep_put() out of range addr %p not in %p/%llx\n", \
2955 (void *)dev_addr, (void *)hp->addr, hp->len); \
2956 if ((caddr_t)dev_addr < hp->addr || \
2957 (caddr_t)dev_addr - hp->addr >= hp->len) \
2958 return; \
2959 repcount = (type *)(hp->addr + hp->len) - dev_addr; \
2960 }
2961
2962 /*
2963 * our rep_putb() routine - use tryenter
2964 */
2965 static void
bofi_rep_wr8(ddi_acc_impl_t * handle,uint8_t * host_addr,uint8_t * dev_addr,size_t repcount,uint_t flags)2966 bofi_rep_wr8(ddi_acc_impl_t *handle, uint8_t *host_addr, uint8_t *dev_addr,
2967 size_t repcount, uint_t flags)
2968 {
2969 struct bofi_shadow *hp;
2970 int i;
2971 uint64_t llvalue;
2972 uint8_t *addr;
2973
2974 hp = handle->ahi_common.ah_bus_private;
2975 BOFI_REP_WRITE_TESTS(uint8_t)
2976 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2977 hp->save.acc.ahi_rep_put8(&hp->save.acc, host_addr, dev_addr,
2978 repcount, flags);
2979 return;
2980 }
2981 for (i = 0; i < repcount; i++) {
2982 llvalue = *(host_addr + i);
2983 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2984 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 1, i ? 0 :
2985 repcount))
2986 hp->save.acc.ahi_put8(&hp->save.acc, addr,
2987 (uint8_t)llvalue);
2988 }
2989 mutex_exit(&bofi_mutex);
2990 }
2991
2992
2993 /*
2994 * our rep_putw() routine - use tryenter
2995 */
2996 static void
bofi_rep_wr16(ddi_acc_impl_t * handle,uint16_t * host_addr,uint16_t * dev_addr,size_t repcount,uint_t flags)2997 bofi_rep_wr16(ddi_acc_impl_t *handle, uint16_t *host_addr,
2998 uint16_t *dev_addr, size_t repcount, uint_t flags)
2999 {
3000 struct bofi_shadow *hp;
3001 int i;
3002 uint64_t llvalue;
3003 uint16_t *addr;
3004
3005 hp = handle->ahi_common.ah_bus_private;
3006 BOFI_REP_WRITE_TESTS(uint16_t)
3007 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
3008 hp->save.acc.ahi_rep_put16(&hp->save.acc, host_addr, dev_addr,
3009 repcount, flags);
3010 return;
3011 }
3012 for (i = 0; i < repcount; i++) {
3013 llvalue = *(host_addr + i);
3014 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
3015 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 2, i ? 0 :
3016 repcount))
3017 hp->save.acc.ahi_put16(&hp->save.acc, addr,
3018 (uint16_t)llvalue);
3019 }
3020 mutex_exit(&bofi_mutex);
3021 }
3022
3023
3024 /*
3025 * our rep_putl() routine - use tryenter
3026 */
3027 static void
bofi_rep_wr32(ddi_acc_impl_t * handle,uint32_t * host_addr,uint32_t * dev_addr,size_t repcount,uint_t flags)3028 bofi_rep_wr32(ddi_acc_impl_t *handle, uint32_t *host_addr,
3029 uint32_t *dev_addr, size_t repcount, uint_t flags)
3030 {
3031 struct bofi_shadow *hp;
3032 int i;
3033 uint64_t llvalue;
3034 uint32_t *addr;
3035
3036 hp = handle->ahi_common.ah_bus_private;
3037 BOFI_REP_WRITE_TESTS(uint32_t)
3038 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
3039 hp->save.acc.ahi_rep_put32(&hp->save.acc, host_addr, dev_addr,
3040 repcount, flags);
3041 return;
3042 }
3043 for (i = 0; i < repcount; i++) {
3044 llvalue = *(host_addr + i);
3045 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
3046 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 4, i ? 0 :
3047 repcount))
3048 hp->save.acc.ahi_put32(&hp->save.acc, addr,
3049 (uint32_t)llvalue);
3050 }
3051 mutex_exit(&bofi_mutex);
3052 }
3053
3054
3055 /*
3056 * our rep_putll() routine - use tryenter
3057 */
3058 static void
bofi_rep_wr64(ddi_acc_impl_t * handle,uint64_t * host_addr,uint64_t * dev_addr,size_t repcount,uint_t flags)3059 bofi_rep_wr64(ddi_acc_impl_t *handle, uint64_t *host_addr,
3060 uint64_t *dev_addr, size_t repcount, uint_t flags)
3061 {
3062 struct bofi_shadow *hp;
3063 int i;
3064 uint64_t llvalue;
3065 uint64_t *addr;
3066
3067 hp = handle->ahi_common.ah_bus_private;
3068 BOFI_REP_WRITE_TESTS(uint64_t)
3069 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
3070 hp->save.acc.ahi_rep_put64(&hp->save.acc, host_addr, dev_addr,
3071 repcount, flags);
3072 return;
3073 }
3074 for (i = 0; i < repcount; i++) {
3075 llvalue = *(host_addr + i);
3076 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
3077 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 8, i ? 0 :
3078 repcount))
3079 hp->save.acc.ahi_put64(&hp->save.acc, addr,
3080 (uint64_t)llvalue);
3081 }
3082 mutex_exit(&bofi_mutex);
3083 }
3084
3085
3086 /*
3087 * our ddi_map routine
3088 */
3089 static int
bofi_map(dev_info_t * dip,dev_info_t * rdip,ddi_map_req_t * reqp,off_t offset,off_t len,caddr_t * vaddrp)3090 bofi_map(dev_info_t *dip, dev_info_t *rdip,
3091 ddi_map_req_t *reqp, off_t offset, off_t len, caddr_t *vaddrp)
3092 {
3093 ddi_acc_impl_t *ap;
3094 struct bofi_shadow *hp;
3095 struct bofi_errent *ep;
3096 struct bofi_link *lp, *next_lp;
3097 int retval;
3098 struct bofi_shadow *dhashp;
3099 struct bofi_shadow *hhashp;
3100
3101 switch (reqp->map_op) {
3102 case DDI_MO_MAP_LOCKED:
3103 /*
3104 * for this case get nexus to do real work first
3105 */
3106 retval = save_bus_ops.bus_map(dip, rdip, reqp, offset, len,
3107 vaddrp);
3108 if (retval != DDI_SUCCESS)
3109 return (retval);
3110
3111 ap = (ddi_acc_impl_t *)reqp->map_handlep;
3112 if (ap == NULL)
3113 return (DDI_SUCCESS);
3114 /*
3115 * if driver_list is set, only intercept those drivers
3116 */
3117 if (!driver_under_test(ap->ahi_common.ah_dip))
3118 return (DDI_SUCCESS);
3119
3120 /*
3121 * support for ddi_regs_map_setup()
3122 * - allocate shadow handle structure and fill it in
3123 */
3124 hp = kmem_zalloc(sizeof (struct bofi_shadow), KM_SLEEP);
3125 (void) strncpy(hp->name, ddi_get_name(ap->ahi_common.ah_dip),
3126 NAMESIZE);
3127 hp->instance = ddi_get_instance(ap->ahi_common.ah_dip);
3128 hp->dip = ap->ahi_common.ah_dip;
3129 hp->addr = *vaddrp;
3130 /*
3131 * return spurious value to catch direct access to registers
3132 */
3133 if (bofi_ddi_check)
3134 *vaddrp = (caddr_t)64;
3135 hp->rnumber = ((ddi_acc_hdl_t *)ap)->ah_rnumber;
3136 hp->offset = offset;
3137 if (len == 0)
3138 hp->len = INT_MAX - offset;
3139 else
3140 hp->len = min(len, INT_MAX - offset);
3141 hp->hdl.acc_handle = (ddi_acc_handle_t)ap;
3142 hp->link = NULL;
3143 hp->type = BOFI_ACC_HDL;
3144 /*
3145 * save existing function pointers and plug in our own
3146 */
3147 hp->save.acc = *ap;
3148 ap->ahi_get8 = bofi_rd8;
3149 ap->ahi_get16 = bofi_rd16;
3150 ap->ahi_get32 = bofi_rd32;
3151 ap->ahi_get64 = bofi_rd64;
3152 ap->ahi_put8 = bofi_wr8;
3153 ap->ahi_put16 = bofi_wr16;
3154 ap->ahi_put32 = bofi_wr32;
3155 ap->ahi_put64 = bofi_wr64;
3156 ap->ahi_rep_get8 = bofi_rep_rd8;
3157 ap->ahi_rep_get16 = bofi_rep_rd16;
3158 ap->ahi_rep_get32 = bofi_rep_rd32;
3159 ap->ahi_rep_get64 = bofi_rep_rd64;
3160 ap->ahi_rep_put8 = bofi_rep_wr8;
3161 ap->ahi_rep_put16 = bofi_rep_wr16;
3162 ap->ahi_rep_put32 = bofi_rep_wr32;
3163 ap->ahi_rep_put64 = bofi_rep_wr64;
3164 ap->ahi_fault_check = bofi_check_acc_hdl;
3165 #if defined(__sparc)
3166 #else
3167 ap->ahi_acc_attr &= ~DDI_ACCATTR_DIRECT;
3168 #endif
3169 /*
3170 * stick in a pointer to our shadow handle
3171 */
3172 ap->ahi_common.ah_bus_private = hp;
3173 /*
3174 * add to dhash, hhash and inuse lists
3175 */
3176 mutex_enter(&bofi_low_mutex);
3177 mutex_enter(&bofi_mutex);
3178 hp->next = shadow_list.next;
3179 shadow_list.next->prev = hp;
3180 hp->prev = &shadow_list;
3181 shadow_list.next = hp;
3182 hhashp = HDL_HHASH(ap);
3183 hp->hnext = hhashp->hnext;
3184 hhashp->hnext->hprev = hp;
3185 hp->hprev = hhashp;
3186 hhashp->hnext = hp;
3187 dhashp = HDL_DHASH(hp->dip);
3188 hp->dnext = dhashp->dnext;
3189 dhashp->dnext->dprev = hp;
3190 hp->dprev = dhashp;
3191 dhashp->dnext = hp;
3192 /*
3193 * chain on any pre-existing errdefs that apply to this
3194 * acc_handle
3195 */
3196 for (ep = errent_listp; ep != NULL; ep = ep->next) {
3197 if (ddi_name_to_major(hp->name) ==
3198 ddi_name_to_major(ep->name) &&
3199 hp->instance == ep->errdef.instance &&
3200 (ep->errdef.access_type & BOFI_PIO_RW) &&
3201 (ep->errdef.rnumber == -1 ||
3202 hp->rnumber == ep->errdef.rnumber) &&
3203 (ep->errdef.len == 0 ||
3204 offset < ep->errdef.offset + ep->errdef.len) &&
3205 offset + hp->len > ep->errdef.offset) {
3206 lp = bofi_link_freelist;
3207 if (lp != NULL) {
3208 bofi_link_freelist = lp->link;
3209 lp->errentp = ep;
3210 lp->link = hp->link;
3211 hp->link = lp;
3212 }
3213 }
3214 }
3215 mutex_exit(&bofi_mutex);
3216 mutex_exit(&bofi_low_mutex);
3217 return (DDI_SUCCESS);
3218 case DDI_MO_UNMAP:
3219
3220 ap = (ddi_acc_impl_t *)reqp->map_handlep;
3221 if (ap == NULL)
3222 break;
3223 /*
3224 * support for ddi_regs_map_free()
3225 * - check we really have a shadow handle for this one
3226 */
3227 mutex_enter(&bofi_low_mutex);
3228 mutex_enter(&bofi_mutex);
3229 hhashp = HDL_HHASH(ap);
3230 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3231 if (hp->hdl.acc_handle == (ddi_acc_handle_t)ap)
3232 break;
3233 if (hp == hhashp) {
3234 mutex_exit(&bofi_mutex);
3235 mutex_exit(&bofi_low_mutex);
3236 break;
3237 }
3238 /*
3239 * got a shadow handle - restore original pointers
3240 */
3241 *ap = hp->save.acc;
3242 *vaddrp = hp->addr;
3243 /*
3244 * remove from dhash, hhash and inuse lists
3245 */
3246 hp->hnext->hprev = hp->hprev;
3247 hp->hprev->hnext = hp->hnext;
3248 hp->dnext->dprev = hp->dprev;
3249 hp->dprev->dnext = hp->dnext;
3250 hp->next->prev = hp->prev;
3251 hp->prev->next = hp->next;
3252 /*
3253 * free any errdef link structures tagged onto the shadow handle
3254 */
3255 for (lp = hp->link; lp != NULL; ) {
3256 next_lp = lp->link;
3257 lp->link = bofi_link_freelist;
3258 bofi_link_freelist = lp;
3259 lp = next_lp;
3260 }
3261 hp->link = NULL;
3262 mutex_exit(&bofi_mutex);
3263 mutex_exit(&bofi_low_mutex);
3264 /*
3265 * finally delete shadow handle
3266 */
3267 kmem_free(hp, sizeof (struct bofi_shadow));
3268 break;
3269 default:
3270 break;
3271 }
3272 return (save_bus_ops.bus_map(dip, rdip, reqp, offset, len, vaddrp));
3273 }
3274
3275
3276 /*
3277 * chain any pre-existing errdefs on to newly created dma handle
3278 * if required call do_dma_corrupt() to corrupt data
3279 */
3280 static void
chain_on_errdefs(struct bofi_shadow * hp)3281 chain_on_errdefs(struct bofi_shadow *hp)
3282 {
3283 struct bofi_errent *ep;
3284 struct bofi_link *lp;
3285
3286 ASSERT(MUTEX_HELD(&bofi_mutex));
3287 /*
3288 * chain on any pre-existing errdefs that apply to this dma_handle
3289 */
3290 for (ep = errent_listp; ep != NULL; ep = ep->next) {
3291 if (ddi_name_to_major(hp->name) ==
3292 ddi_name_to_major(ep->name) &&
3293 hp->instance == ep->errdef.instance &&
3294 (ep->errdef.rnumber == -1 ||
3295 hp->rnumber == ep->errdef.rnumber) &&
3296 ((ep->errdef.access_type & BOFI_DMA_RW) &&
3297 (((uintptr_t)(hp->addr + ep->errdef.offset +
3298 ep->errdef.len) & ~LLSZMASK) >
3299 ((uintptr_t)((hp->addr + ep->errdef.offset) +
3300 LLSZMASK) & ~LLSZMASK)))) {
3301 /*
3302 * got a match - link it on
3303 */
3304 lp = bofi_link_freelist;
3305 if (lp != NULL) {
3306 bofi_link_freelist = lp->link;
3307 lp->errentp = ep;
3308 lp->link = hp->link;
3309 hp->link = lp;
3310 if ((ep->errdef.access_type & BOFI_DMA_W) &&
3311 (hp->flags & DDI_DMA_WRITE) &&
3312 (ep->state & BOFI_DEV_ACTIVE)) {
3313 do_dma_corrupt(hp, ep,
3314 DDI_DMA_SYNC_FORDEV,
3315 0, hp->len);
3316 }
3317 }
3318 }
3319 }
3320 }
3321
3322
3323 /*
3324 * need to do copy byte-by-byte in case one of pages is little-endian
3325 */
3326 static void
xbcopy(void * from,void * to,u_longlong_t len)3327 xbcopy(void *from, void *to, u_longlong_t len)
3328 {
3329 uchar_t *f = from;
3330 uchar_t *t = to;
3331
3332 while (len--)
3333 *t++ = *f++;
3334 }
3335
3336
3337 /*
3338 * our ddi_dma_allochdl routine
3339 */
3340 static int
bofi_dma_allochdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_attr_t * attrp,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * handlep)3341 bofi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp,
3342 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
3343 {
3344 int retval = DDI_DMA_NORESOURCES;
3345 struct bofi_shadow *hp, *xhp;
3346 int maxrnumber = 0;
3347 struct bofi_shadow *dhashp;
3348 struct bofi_shadow *hhashp;
3349 ddi_dma_impl_t *mp;
3350
3351 /*
3352 * if driver_list is set, only intercept those drivers
3353 */
3354 if (!driver_under_test(rdip))
3355 return (save_bus_ops.bus_dma_allochdl(dip, rdip, attrp,
3356 waitfp, arg, handlep));
3357
3358 /*
3359 * allocate shadow handle structure and fill it in
3360 */
3361 hp = kmem_zalloc(sizeof (struct bofi_shadow),
3362 ((waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP));
3363 if (hp == NULL) {
3364 /*
3365 * what to do here? Wait a bit and try again
3366 */
3367 if (waitfp != DDI_DMA_DONTWAIT)
3368 (void) timeout((void (*)())waitfp, arg, 10);
3369 return (retval);
3370 }
3371 (void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
3372 hp->instance = ddi_get_instance(rdip);
3373 hp->dip = rdip;
3374 hp->link = NULL;
3375 hp->type = BOFI_NULL;
3376 /*
3377 * call nexus to do the real work
3378 */
3379 retval = save_bus_ops.bus_dma_allochdl(dip, rdip, attrp, waitfp, arg,
3380 handlep);
3381 if (retval != DDI_SUCCESS) {
3382 kmem_free(hp, sizeof (struct bofi_shadow));
3383 return (retval);
3384 }
3385 /*
3386 * now point set dma_handle to point to real handle
3387 */
3388 hp->hdl.dma_handle = *handlep;
3389 mp = (ddi_dma_impl_t *)*handlep;
3390 mp->dmai_fault_check = bofi_check_dma_hdl;
3391 /*
3392 * bind and unbind are cached in devinfo - must overwrite them
3393 * - note that our bind and unbind are quite happy dealing with
3394 * any handles for this devinfo that were previously allocated
3395 */
3396 if (save_bus_ops.bus_dma_bindhdl == DEVI(rdip)->devi_bus_dma_bindfunc)
3397 DEVI(rdip)->devi_bus_dma_bindfunc = bofi_dma_bindhdl;
3398 if (save_bus_ops.bus_dma_unbindhdl ==
3399 DEVI(rdip)->devi_bus_dma_unbindfunc)
3400 DEVI(rdip)->devi_bus_dma_unbindfunc = bofi_dma_unbindhdl;
3401 mutex_enter(&bofi_low_mutex);
3402 mutex_enter(&bofi_mutex);
3403 /*
3404 * get an "rnumber" for this handle - really just seeking to
3405 * get a unique number - generally only care for early allocated
3406 * handles - so we get as far as INT_MAX, just stay there
3407 */
3408 dhashp = HDL_DHASH(hp->dip);
3409 for (xhp = dhashp->dnext; xhp != dhashp; xhp = xhp->dnext)
3410 if (ddi_name_to_major(xhp->name) ==
3411 ddi_name_to_major(hp->name) &&
3412 xhp->instance == hp->instance &&
3413 (xhp->type == BOFI_DMA_HDL ||
3414 xhp->type == BOFI_NULL))
3415 if (xhp->rnumber >= maxrnumber) {
3416 if (xhp->rnumber == INT_MAX)
3417 maxrnumber = INT_MAX;
3418 else
3419 maxrnumber = xhp->rnumber + 1;
3420 }
3421 hp->rnumber = maxrnumber;
3422 /*
3423 * add to dhash, hhash and inuse lists
3424 */
3425 hp->next = shadow_list.next;
3426 shadow_list.next->prev = hp;
3427 hp->prev = &shadow_list;
3428 shadow_list.next = hp;
3429 hhashp = HDL_HHASH(*handlep);
3430 hp->hnext = hhashp->hnext;
3431 hhashp->hnext->hprev = hp;
3432 hp->hprev = hhashp;
3433 hhashp->hnext = hp;
3434 dhashp = HDL_DHASH(hp->dip);
3435 hp->dnext = dhashp->dnext;
3436 dhashp->dnext->dprev = hp;
3437 hp->dprev = dhashp;
3438 dhashp->dnext = hp;
3439 mutex_exit(&bofi_mutex);
3440 mutex_exit(&bofi_low_mutex);
3441 return (retval);
3442 }
3443
3444
3445 /*
3446 * our ddi_dma_freehdl routine
3447 */
3448 static int
bofi_dma_freehdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)3449 bofi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
3450 {
3451 int retval;
3452 struct bofi_shadow *hp;
3453 struct bofi_shadow *hhashp;
3454
3455 /*
3456 * find shadow for this handle
3457 */
3458 mutex_enter(&bofi_low_mutex);
3459 mutex_enter(&bofi_mutex);
3460 hhashp = HDL_HHASH(handle);
3461 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3462 if (hp->hdl.dma_handle == handle)
3463 break;
3464 mutex_exit(&bofi_mutex);
3465 mutex_exit(&bofi_low_mutex);
3466 /*
3467 * call nexus to do the real work
3468 */
3469 retval = save_bus_ops.bus_dma_freehdl(dip, rdip, handle);
3470 if (retval != DDI_SUCCESS) {
3471 return (retval);
3472 }
3473 /*
3474 * did we really have a shadow for this handle
3475 */
3476 if (hp == hhashp)
3477 return (retval);
3478 /*
3479 * yes we have - see if it's still bound
3480 */
3481 mutex_enter(&bofi_low_mutex);
3482 mutex_enter(&bofi_mutex);
3483 if (hp->type != BOFI_NULL)
3484 panic("driver freeing bound dma_handle");
3485 /*
3486 * remove from dhash, hhash and inuse lists
3487 */
3488 hp->hnext->hprev = hp->hprev;
3489 hp->hprev->hnext = hp->hnext;
3490 hp->dnext->dprev = hp->dprev;
3491 hp->dprev->dnext = hp->dnext;
3492 hp->next->prev = hp->prev;
3493 hp->prev->next = hp->next;
3494 mutex_exit(&bofi_mutex);
3495 mutex_exit(&bofi_low_mutex);
3496
3497 kmem_free(hp, sizeof (struct bofi_shadow));
3498 return (retval);
3499 }
3500
3501
3502 /*
3503 * our ddi_dma_bindhdl routine
3504 */
3505 static int
bofi_dma_bindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,struct ddi_dma_req * dmareqp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)3506 bofi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
3507 ddi_dma_handle_t handle, struct ddi_dma_req *dmareqp,
3508 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
3509 {
3510 int retval = DDI_DMA_NORESOURCES;
3511 auto struct ddi_dma_req dmareq;
3512 struct bofi_shadow *hp;
3513 struct bofi_shadow *hhashp;
3514 ddi_dma_impl_t *mp;
3515 unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
3516
3517 /*
3518 * check we really have a shadow for this handle
3519 */
3520 mutex_enter(&bofi_low_mutex);
3521 mutex_enter(&bofi_mutex);
3522 hhashp = HDL_HHASH(handle);
3523 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3524 if (hp->hdl.dma_handle == handle)
3525 break;
3526 mutex_exit(&bofi_mutex);
3527 mutex_exit(&bofi_low_mutex);
3528 if (hp == hhashp) {
3529 /*
3530 * no we don't - just call nexus to do the real work
3531 */
3532 return save_bus_ops.bus_dma_bindhdl(dip, rdip, handle, dmareqp,
3533 cookiep, ccountp);
3534 }
3535 /*
3536 * yes we have - see if it's already bound
3537 */
3538 if (hp->type != BOFI_NULL)
3539 return (DDI_DMA_INUSE);
3540
3541 hp->flags = dmareqp->dmar_flags;
3542 if (dmareqp->dmar_object.dmao_type == DMA_OTYP_PAGES) {
3543 hp->map_flags = B_PAGEIO;
3544 hp->map_pp = dmareqp->dmar_object.dmao_obj.pp_obj.pp_pp;
3545 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_priv != NULL) {
3546 hp->map_flags = B_SHADOW;
3547 hp->map_pplist = dmareqp->dmar_object.dmao_obj.virt_obj.v_priv;
3548 } else {
3549 hp->map_flags = 0;
3550 }
3551 /*
3552 * get a kernel virtual mapping
3553 */
3554 hp->addr = ddi_dmareq_mapin(dmareqp, &hp->mapaddr, &hp->len);
3555 if (hp->addr == NULL)
3556 goto error;
3557 if (bofi_sync_check) {
3558 /*
3559 * Take a copy and pass pointers to this up to nexus instead.
3560 * Data will be copied from the original on explicit
3561 * and implicit ddi_dma_sync()
3562 *
3563 * - maintain page alignment because some devices assume it.
3564 */
3565 hp->origaddr = hp->addr;
3566 hp->allocaddr = ddi_umem_alloc(
3567 ((uintptr_t)hp->addr & pagemask) + hp->len,
3568 (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP,
3569 &hp->umem_cookie);
3570 if (hp->allocaddr == NULL)
3571 goto error;
3572 hp->addr = hp->allocaddr + ((uintptr_t)hp->addr & pagemask);
3573 if (dmareqp->dmar_flags & DDI_DMA_WRITE)
3574 xbcopy(hp->origaddr, hp->addr, hp->len);
3575 dmareq = *dmareqp;
3576 dmareq.dmar_object.dmao_size = hp->len;
3577 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
3578 dmareq.dmar_object.dmao_obj.virt_obj.v_as = &kas;
3579 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = hp->addr;
3580 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
3581 dmareqp = &dmareq;
3582 }
3583 /*
3584 * call nexus to do the real work
3585 */
3586 retval = save_bus_ops.bus_dma_bindhdl(dip, rdip, handle, dmareqp,
3587 cookiep, ccountp);
3588 if (retval != DDI_SUCCESS)
3589 goto error2;
3590 /*
3591 * unset DMP_NOSYNC
3592 */
3593 mp = (ddi_dma_impl_t *)handle;
3594 mp->dmai_rflags &= ~DMP_NOSYNC;
3595 /*
3596 * chain on any pre-existing errdefs that apply to this
3597 * acc_handle and corrupt if required (as there is an implicit
3598 * ddi_dma_sync() in this call)
3599 */
3600 mutex_enter(&bofi_low_mutex);
3601 mutex_enter(&bofi_mutex);
3602 hp->type = BOFI_DMA_HDL;
3603 chain_on_errdefs(hp);
3604 mutex_exit(&bofi_mutex);
3605 mutex_exit(&bofi_low_mutex);
3606 return (retval);
3607
3608 error:
3609 if (dmareqp->dmar_fp != DDI_DMA_DONTWAIT) {
3610 /*
3611 * what to do here? Wait a bit and try again
3612 */
3613 (void) timeout((void (*)())dmareqp->dmar_fp,
3614 dmareqp->dmar_arg, 10);
3615 }
3616 error2:
3617 if (hp) {
3618 ddi_dmareq_mapout(hp->mapaddr, hp->len, hp->map_flags,
3619 hp->map_pp, hp->map_pplist);
3620 if (bofi_sync_check && hp->allocaddr)
3621 ddi_umem_free(hp->umem_cookie);
3622 hp->mapaddr = NULL;
3623 hp->allocaddr = NULL;
3624 hp->origaddr = NULL;
3625 }
3626 return (retval);
3627 }
3628
3629
3630 /*
3631 * our ddi_dma_unbindhdl routine
3632 */
3633 static int
bofi_dma_unbindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)3634 bofi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
3635 {
3636 struct bofi_link *lp, *next_lp;
3637 struct bofi_errent *ep;
3638 int retval;
3639 struct bofi_shadow *hp;
3640 struct bofi_shadow *hhashp;
3641
3642 /*
3643 * call nexus to do the real work
3644 */
3645 retval = save_bus_ops.bus_dma_unbindhdl(dip, rdip, handle);
3646 if (retval != DDI_SUCCESS)
3647 return (retval);
3648 /*
3649 * check we really have a shadow for this handle
3650 */
3651 mutex_enter(&bofi_low_mutex);
3652 mutex_enter(&bofi_mutex);
3653 hhashp = HDL_HHASH(handle);
3654 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3655 if (hp->hdl.dma_handle == handle)
3656 break;
3657 if (hp == hhashp) {
3658 mutex_exit(&bofi_mutex);
3659 mutex_exit(&bofi_low_mutex);
3660 return (retval);
3661 }
3662 /*
3663 * yes we have - see if it's already unbound
3664 */
3665 if (hp->type == BOFI_NULL)
3666 panic("driver unbinding unbound dma_handle");
3667 /*
3668 * free any errdef link structures tagged on to this
3669 * shadow handle
3670 */
3671 for (lp = hp->link; lp != NULL; ) {
3672 next_lp = lp->link;
3673 /*
3674 * there is an implicit sync_for_cpu on free -
3675 * may need to corrupt
3676 */
3677 ep = lp->errentp;
3678 if ((ep->errdef.access_type & BOFI_DMA_R) &&
3679 (hp->flags & DDI_DMA_READ) &&
3680 (ep->state & BOFI_DEV_ACTIVE)) {
3681 do_dma_corrupt(hp, ep, DDI_DMA_SYNC_FORCPU, 0, hp->len);
3682 }
3683 lp->link = bofi_link_freelist;
3684 bofi_link_freelist = lp;
3685 lp = next_lp;
3686 }
3687 hp->link = NULL;
3688 hp->type = BOFI_NULL;
3689 mutex_exit(&bofi_mutex);
3690 mutex_exit(&bofi_low_mutex);
3691
3692 if (bofi_sync_check && (hp->flags & DDI_DMA_READ))
3693 /*
3694 * implicit sync_for_cpu - copy data back
3695 */
3696 if (hp->allocaddr)
3697 xbcopy(hp->addr, hp->origaddr, hp->len);
3698 ddi_dmareq_mapout(hp->mapaddr, hp->len, hp->map_flags,
3699 hp->map_pp, hp->map_pplist);
3700 if (bofi_sync_check && hp->allocaddr)
3701 ddi_umem_free(hp->umem_cookie);
3702 hp->mapaddr = NULL;
3703 hp->allocaddr = NULL;
3704 hp->origaddr = NULL;
3705 return (retval);
3706 }
3707
3708
3709 /*
3710 * our ddi_dma_sync routine
3711 */
3712 static int
bofi_dma_flush(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,off_t off,size_t len,uint_t flags)3713 bofi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
3714 ddi_dma_handle_t handle, off_t off, size_t len, uint_t flags)
3715 {
3716 struct bofi_link *lp;
3717 struct bofi_errent *ep;
3718 struct bofi_shadow *hp;
3719 struct bofi_shadow *hhashp;
3720 int retval;
3721
3722 if (flags == DDI_DMA_SYNC_FORCPU || flags == DDI_DMA_SYNC_FORKERNEL) {
3723 /*
3724 * in this case get nexus driver to do sync first
3725 */
3726 retval = save_bus_ops.bus_dma_flush(dip, rdip, handle, off,
3727 len, flags);
3728 if (retval != DDI_SUCCESS)
3729 return (retval);
3730 }
3731 /*
3732 * check we really have a shadow for this handle
3733 */
3734 mutex_enter(&bofi_low_mutex);
3735 mutex_enter(&bofi_mutex);
3736 hhashp = HDL_HHASH(handle);
3737 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3738 if (hp->hdl.dma_handle == handle &&
3739 hp->type == BOFI_DMA_HDL)
3740 break;
3741 mutex_exit(&bofi_mutex);
3742 mutex_exit(&bofi_low_mutex);
3743 if (hp != hhashp) {
3744 /*
3745 * yes - do we need to copy data from original
3746 */
3747 if (bofi_sync_check && flags == DDI_DMA_SYNC_FORDEV)
3748 if (hp->allocaddr)
3749 xbcopy(hp->origaddr+off, hp->addr+off,
3750 len ? len : (hp->len - off));
3751 /*
3752 * yes - check if we need to corrupt the data
3753 */
3754 mutex_enter(&bofi_low_mutex);
3755 mutex_enter(&bofi_mutex);
3756 for (lp = hp->link; lp != NULL; lp = lp->link) {
3757 ep = lp->errentp;
3758 if ((((ep->errdef.access_type & BOFI_DMA_R) &&
3759 (flags == DDI_DMA_SYNC_FORCPU ||
3760 flags == DDI_DMA_SYNC_FORKERNEL)) ||
3761 ((ep->errdef.access_type & BOFI_DMA_W) &&
3762 (flags == DDI_DMA_SYNC_FORDEV))) &&
3763 (ep->state & BOFI_DEV_ACTIVE)) {
3764 do_dma_corrupt(hp, ep, flags, off,
3765 len ? len : (hp->len - off));
3766 }
3767 }
3768 mutex_exit(&bofi_mutex);
3769 mutex_exit(&bofi_low_mutex);
3770 /*
3771 * do we need to copy data to original
3772 */
3773 if (bofi_sync_check && (flags == DDI_DMA_SYNC_FORCPU ||
3774 flags == DDI_DMA_SYNC_FORKERNEL))
3775 if (hp->allocaddr)
3776 xbcopy(hp->addr+off, hp->origaddr+off,
3777 len ? len : (hp->len - off));
3778 }
3779 if (flags == DDI_DMA_SYNC_FORDEV)
3780 /*
3781 * in this case get nexus driver to do sync last
3782 */
3783 retval = save_bus_ops.bus_dma_flush(dip, rdip, handle, off,
3784 len, flags);
3785 return (retval);
3786 }
3787
3788
3789 /*
3790 * our dma_win routine
3791 */
3792 static int
bofi_dma_win(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,uint_t win,off_t * offp,size_t * lenp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)3793 bofi_dma_win(dev_info_t *dip, dev_info_t *rdip,
3794 ddi_dma_handle_t handle, uint_t win, off_t *offp,
3795 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
3796 {
3797 struct bofi_shadow *hp;
3798 struct bofi_shadow *hhashp;
3799 int retval;
3800 ddi_dma_impl_t *mp;
3801
3802 /*
3803 * call nexus to do the real work
3804 */
3805 retval = save_bus_ops.bus_dma_win(dip, rdip, handle, win, offp, lenp,
3806 cookiep, ccountp);
3807 if (retval != DDI_SUCCESS)
3808 return (retval);
3809 /*
3810 * check we really have a shadow for this handle
3811 */
3812 mutex_enter(&bofi_low_mutex);
3813 mutex_enter(&bofi_mutex);
3814 hhashp = HDL_HHASH(handle);
3815 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3816 if (hp->hdl.dma_handle == handle)
3817 break;
3818 if (hp != hhashp) {
3819 /*
3820 * yes - make sure DMP_NOSYNC is unset
3821 */
3822 mp = (ddi_dma_impl_t *)handle;
3823 mp->dmai_rflags &= ~DMP_NOSYNC;
3824 }
3825 mutex_exit(&bofi_mutex);
3826 mutex_exit(&bofi_low_mutex);
3827 return (retval);
3828 }
3829
3830
3831 /*
3832 * our dma_ctl routine
3833 */
3834 static int
bofi_dma_ctl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,enum ddi_dma_ctlops request,off_t * offp,size_t * lenp,caddr_t * objp,uint_t flags)3835 bofi_dma_ctl(dev_info_t *dip, dev_info_t *rdip,
3836 ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
3837 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
3838 {
3839 struct bofi_shadow *hp;
3840 struct bofi_shadow *hhashp;
3841 int retval;
3842 int i;
3843 struct bofi_shadow *dummyhp;
3844
3845 /*
3846 * get nexus to do real work
3847 */
3848 retval = save_bus_ops.bus_dma_ctl(dip, rdip, handle, request, offp,
3849 lenp, objp, flags);
3850 if (retval != DDI_SUCCESS)
3851 return (retval);
3852 /*
3853 * if driver_list is set, only intercept those drivers
3854 */
3855 if (!driver_under_test(rdip))
3856 return (DDI_SUCCESS);
3857
3858 #if defined(__sparc)
3859 /*
3860 * check if this is a dvma_reserve - that one's like a
3861 * dma_allochdl and needs to be handled separately
3862 */
3863 if (request == DDI_DMA_RESERVE) {
3864 bofi_dvma_reserve(rdip, *(ddi_dma_handle_t *)objp);
3865 return (DDI_SUCCESS);
3866 }
3867 #endif
3868 /*
3869 * check we really have a shadow for this handle
3870 */
3871 mutex_enter(&bofi_low_mutex);
3872 mutex_enter(&bofi_mutex);
3873 hhashp = HDL_HHASH(handle);
3874 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3875 if (hp->hdl.dma_handle == handle)
3876 break;
3877 if (hp == hhashp) {
3878 mutex_exit(&bofi_mutex);
3879 mutex_exit(&bofi_low_mutex);
3880 return (retval);
3881 }
3882 /*
3883 * yes we have - see what kind of command this is
3884 */
3885 switch (request) {
3886 case DDI_DMA_RELEASE:
3887 /*
3888 * dvma release - release dummy handle and all the index handles
3889 */
3890 dummyhp = hp;
3891 dummyhp->hnext->hprev = dummyhp->hprev;
3892 dummyhp->hprev->hnext = dummyhp->hnext;
3893 mutex_exit(&bofi_mutex);
3894 mutex_exit(&bofi_low_mutex);
3895 for (i = 0; i < dummyhp->len; i++) {
3896 hp = dummyhp->hparrayp[i];
3897 /*
3898 * chek none of the index handles were still loaded
3899 */
3900 if (hp->type != BOFI_NULL)
3901 panic("driver releasing loaded dvma");
3902 /*
3903 * remove from dhash and inuse lists
3904 */
3905 mutex_enter(&bofi_low_mutex);
3906 mutex_enter(&bofi_mutex);
3907 hp->dnext->dprev = hp->dprev;
3908 hp->dprev->dnext = hp->dnext;
3909 hp->next->prev = hp->prev;
3910 hp->prev->next = hp->next;
3911 mutex_exit(&bofi_mutex);
3912 mutex_exit(&bofi_low_mutex);
3913
3914 if (bofi_sync_check && hp->allocaddr)
3915 ddi_umem_free(hp->umem_cookie);
3916 kmem_free(hp, sizeof (struct bofi_shadow));
3917 }
3918 kmem_free(dummyhp->hparrayp, dummyhp->len *
3919 sizeof (struct bofi_shadow *));
3920 kmem_free(dummyhp, sizeof (struct bofi_shadow));
3921 return (retval);
3922 default:
3923 break;
3924 }
3925 mutex_exit(&bofi_mutex);
3926 mutex_exit(&bofi_low_mutex);
3927 return (retval);
3928 }
3929
3930 #if defined(__sparc)
3931 /*
3932 * dvma reserve case from bofi_dma_ctl()
3933 */
3934 static void
bofi_dvma_reserve(dev_info_t * rdip,ddi_dma_handle_t handle)3935 bofi_dvma_reserve(dev_info_t *rdip, ddi_dma_handle_t handle)
3936 {
3937 struct bofi_shadow *hp;
3938 struct bofi_shadow *dummyhp;
3939 struct bofi_shadow *dhashp;
3940 struct bofi_shadow *hhashp;
3941 ddi_dma_impl_t *mp;
3942 struct fast_dvma *nexus_private;
3943 int i, count;
3944
3945 mp = (ddi_dma_impl_t *)handle;
3946 count = mp->dmai_ndvmapages;
3947 /*
3948 * allocate dummy shadow handle structure
3949 */
3950 dummyhp = kmem_zalloc(sizeof (*dummyhp), KM_SLEEP);
3951 if (mp->dmai_rflags & DMP_BYPASSNEXUS) {
3952 /*
3953 * overlay our routines over the nexus's dvma routines
3954 */
3955 nexus_private = (struct fast_dvma *)mp->dmai_nexus_private;
3956 dummyhp->save.dvma_ops = *(nexus_private->ops);
3957 nexus_private->ops = &bofi_dvma_ops;
3958 }
3959 /*
3960 * now fill in the dummy handle. This just gets put on hhash queue
3961 * so our dvma routines can find and index off to the handle they
3962 * really want.
3963 */
3964 (void) strncpy(dummyhp->name, ddi_get_name(rdip), NAMESIZE);
3965 dummyhp->instance = ddi_get_instance(rdip);
3966 dummyhp->rnumber = -1;
3967 dummyhp->dip = rdip;
3968 dummyhp->len = count;
3969 dummyhp->hdl.dma_handle = handle;
3970 dummyhp->link = NULL;
3971 dummyhp->type = BOFI_NULL;
3972 /*
3973 * allocate space for real handles
3974 */
3975 dummyhp->hparrayp = kmem_alloc(count *
3976 sizeof (struct bofi_shadow *), KM_SLEEP);
3977 for (i = 0; i < count; i++) {
3978 /*
3979 * allocate shadow handle structures and fill them in
3980 */
3981 hp = kmem_zalloc(sizeof (*hp), KM_SLEEP);
3982 (void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
3983 hp->instance = ddi_get_instance(rdip);
3984 hp->rnumber = -1;
3985 hp->dip = rdip;
3986 hp->hdl.dma_handle = 0;
3987 hp->link = NULL;
3988 hp->type = BOFI_NULL;
3989 if (bofi_sync_check) {
3990 unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
3991 /*
3992 * Take a copy and set this to be hp->addr
3993 * Data will be copied to and from the original on
3994 * explicit and implicit ddi_dma_sync()
3995 *
3996 * - maintain page alignment because some devices
3997 * assume it.
3998 */
3999 hp->allocaddr = ddi_umem_alloc(
4000 ((int)(uintptr_t)hp->addr & pagemask)
4001 + pagemask + 1,
4002 KM_SLEEP, &hp->umem_cookie);
4003 hp->addr = hp->allocaddr +
4004 ((int)(uintptr_t)hp->addr & pagemask);
4005 }
4006 /*
4007 * add to dhash and inuse lists.
4008 * these don't go on hhash queue.
4009 */
4010 mutex_enter(&bofi_low_mutex);
4011 mutex_enter(&bofi_mutex);
4012 hp->next = shadow_list.next;
4013 shadow_list.next->prev = hp;
4014 hp->prev = &shadow_list;
4015 shadow_list.next = hp;
4016 dhashp = HDL_DHASH(hp->dip);
4017 hp->dnext = dhashp->dnext;
4018 dhashp->dnext->dprev = hp;
4019 hp->dprev = dhashp;
4020 dhashp->dnext = hp;
4021 dummyhp->hparrayp[i] = hp;
4022 mutex_exit(&bofi_mutex);
4023 mutex_exit(&bofi_low_mutex);
4024 }
4025 /*
4026 * add dummy handle to hhash list only
4027 */
4028 mutex_enter(&bofi_low_mutex);
4029 mutex_enter(&bofi_mutex);
4030 hhashp = HDL_HHASH(handle);
4031 dummyhp->hnext = hhashp->hnext;
4032 hhashp->hnext->hprev = dummyhp;
4033 dummyhp->hprev = hhashp;
4034 hhashp->hnext = dummyhp;
4035 mutex_exit(&bofi_mutex);
4036 mutex_exit(&bofi_low_mutex);
4037 }
4038
4039 /*
4040 * our dvma_kaddr_load()
4041 */
4042 static void
bofi_dvma_kaddr_load(ddi_dma_handle_t h,caddr_t a,uint_t len,uint_t index,ddi_dma_cookie_t * cp)4043 bofi_dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
4044 ddi_dma_cookie_t *cp)
4045 {
4046 struct bofi_shadow *dummyhp;
4047 struct bofi_shadow *hp;
4048 struct bofi_shadow *hhashp;
4049 struct bofi_errent *ep;
4050 struct bofi_link *lp;
4051
4052 /*
4053 * check we really have a dummy shadow for this handle
4054 */
4055 mutex_enter(&bofi_low_mutex);
4056 mutex_enter(&bofi_mutex);
4057 hhashp = HDL_HHASH(h);
4058 for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4059 dummyhp = dummyhp->hnext)
4060 if (dummyhp->hdl.dma_handle == h)
4061 break;
4062 mutex_exit(&bofi_mutex);
4063 mutex_exit(&bofi_low_mutex);
4064 if (dummyhp == hhashp) {
4065 /*
4066 * no dummy shadow - panic
4067 */
4068 panic("driver dvma_kaddr_load with no reserve");
4069 }
4070
4071 /*
4072 * find real hp
4073 */
4074 hp = dummyhp->hparrayp[index];
4075 /*
4076 * check its not already loaded
4077 */
4078 if (hp->type != BOFI_NULL)
4079 panic("driver loading loaded dvma");
4080 /*
4081 * if were doing copying, just need to change origaddr and get
4082 * nexus to map hp->addr again
4083 * if not, set hp->addr to new address.
4084 * - note these are always kernel virtual addresses - no need to map
4085 */
4086 if (bofi_sync_check && hp->allocaddr) {
4087 hp->origaddr = a;
4088 a = hp->addr;
4089 } else
4090 hp->addr = a;
4091 hp->len = len;
4092 /*
4093 * get nexus to do the real work
4094 */
4095 dummyhp->save.dvma_ops.dvma_kaddr_load(h, a, len, index, cp);
4096 /*
4097 * chain on any pre-existing errdefs that apply to this dma_handle
4098 * no need to corrupt - there's no implicit dma_sync on this one
4099 */
4100 mutex_enter(&bofi_low_mutex);
4101 mutex_enter(&bofi_mutex);
4102 hp->type = BOFI_DMA_HDL;
4103 for (ep = errent_listp; ep != NULL; ep = ep->next) {
4104 if (ddi_name_to_major(hp->name) ==
4105 ddi_name_to_major(ep->name) &&
4106 hp->instance == ep->errdef.instance &&
4107 (ep->errdef.rnumber == -1 ||
4108 hp->rnumber == ep->errdef.rnumber) &&
4109 ((ep->errdef.access_type & BOFI_DMA_RW) &&
4110 (((uintptr_t)(hp->addr + ep->errdef.offset +
4111 ep->errdef.len) & ~LLSZMASK) >
4112 ((uintptr_t)((hp->addr + ep->errdef.offset) +
4113 LLSZMASK) & ~LLSZMASK)))) {
4114 lp = bofi_link_freelist;
4115 if (lp != NULL) {
4116 bofi_link_freelist = lp->link;
4117 lp->errentp = ep;
4118 lp->link = hp->link;
4119 hp->link = lp;
4120 }
4121 }
4122 }
4123 mutex_exit(&bofi_mutex);
4124 mutex_exit(&bofi_low_mutex);
4125 }
4126
4127 /*
4128 * our dvma_unload()
4129 */
4130 static void
bofi_dvma_unload(ddi_dma_handle_t h,uint_t index,uint_t view)4131 bofi_dvma_unload(ddi_dma_handle_t h, uint_t index, uint_t view)
4132 {
4133 struct bofi_link *lp, *next_lp;
4134 struct bofi_errent *ep;
4135 struct bofi_shadow *dummyhp;
4136 struct bofi_shadow *hp;
4137 struct bofi_shadow *hhashp;
4138
4139 /*
4140 * check we really have a dummy shadow for this handle
4141 */
4142 mutex_enter(&bofi_low_mutex);
4143 mutex_enter(&bofi_mutex);
4144 hhashp = HDL_HHASH(h);
4145 for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4146 dummyhp = dummyhp->hnext)
4147 if (dummyhp->hdl.dma_handle == h)
4148 break;
4149 mutex_exit(&bofi_mutex);
4150 mutex_exit(&bofi_low_mutex);
4151 if (dummyhp == hhashp) {
4152 /*
4153 * no dummy shadow - panic
4154 */
4155 panic("driver dvma_unload with no reserve");
4156 }
4157 dummyhp->save.dvma_ops.dvma_unload(h, index, view);
4158 /*
4159 * find real hp
4160 */
4161 hp = dummyhp->hparrayp[index];
4162 /*
4163 * check its not already unloaded
4164 */
4165 if (hp->type == BOFI_NULL)
4166 panic("driver unloading unloaded dvma");
4167 /*
4168 * free any errdef link structures tagged on to this
4169 * shadow handle - do corruption if necessary
4170 */
4171 mutex_enter(&bofi_low_mutex);
4172 mutex_enter(&bofi_mutex);
4173 for (lp = hp->link; lp != NULL; ) {
4174 next_lp = lp->link;
4175 ep = lp->errentp;
4176 if ((ep->errdef.access_type & BOFI_DMA_R) &&
4177 (view == DDI_DMA_SYNC_FORCPU ||
4178 view == DDI_DMA_SYNC_FORKERNEL) &&
4179 (ep->state & BOFI_DEV_ACTIVE)) {
4180 do_dma_corrupt(hp, ep, view, 0, hp->len);
4181 }
4182 lp->link = bofi_link_freelist;
4183 bofi_link_freelist = lp;
4184 lp = next_lp;
4185 }
4186 hp->link = NULL;
4187 hp->type = BOFI_NULL;
4188 mutex_exit(&bofi_mutex);
4189 mutex_exit(&bofi_low_mutex);
4190 /*
4191 * if there is an explicit sync_for_cpu, then do copy to original
4192 */
4193 if (bofi_sync_check &&
4194 (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL))
4195 if (hp->allocaddr)
4196 xbcopy(hp->addr, hp->origaddr, hp->len);
4197 }
4198
4199 /*
4200 * our dvma_unload()
4201 */
4202 static void
bofi_dvma_sync(ddi_dma_handle_t h,uint_t index,uint_t view)4203 bofi_dvma_sync(ddi_dma_handle_t h, uint_t index, uint_t view)
4204 {
4205 struct bofi_link *lp;
4206 struct bofi_errent *ep;
4207 struct bofi_shadow *hp;
4208 struct bofi_shadow *dummyhp;
4209 struct bofi_shadow *hhashp;
4210
4211 /*
4212 * check we really have a dummy shadow for this handle
4213 */
4214 mutex_enter(&bofi_low_mutex);
4215 mutex_enter(&bofi_mutex);
4216 hhashp = HDL_HHASH(h);
4217 for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4218 dummyhp = dummyhp->hnext)
4219 if (dummyhp->hdl.dma_handle == h)
4220 break;
4221 mutex_exit(&bofi_mutex);
4222 mutex_exit(&bofi_low_mutex);
4223 if (dummyhp == hhashp) {
4224 /*
4225 * no dummy shadow - panic
4226 */
4227 panic("driver dvma_sync with no reserve");
4228 }
4229 /*
4230 * find real hp
4231 */
4232 hp = dummyhp->hparrayp[index];
4233 /*
4234 * check its already loaded
4235 */
4236 if (hp->type == BOFI_NULL)
4237 panic("driver syncing unloaded dvma");
4238 if (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL)
4239 /*
4240 * in this case do sync first
4241 */
4242 dummyhp->save.dvma_ops.dvma_sync(h, index, view);
4243 /*
4244 * if there is an explicit sync_for_dev, then do copy from original
4245 */
4246 if (bofi_sync_check && view == DDI_DMA_SYNC_FORDEV) {
4247 if (hp->allocaddr)
4248 xbcopy(hp->origaddr, hp->addr, hp->len);
4249 }
4250 /*
4251 * do corruption if necessary
4252 */
4253 mutex_enter(&bofi_low_mutex);
4254 mutex_enter(&bofi_mutex);
4255 for (lp = hp->link; lp != NULL; lp = lp->link) {
4256 ep = lp->errentp;
4257 if ((((ep->errdef.access_type & BOFI_DMA_R) &&
4258 (view == DDI_DMA_SYNC_FORCPU ||
4259 view == DDI_DMA_SYNC_FORKERNEL)) ||
4260 ((ep->errdef.access_type & BOFI_DMA_W) &&
4261 (view == DDI_DMA_SYNC_FORDEV))) &&
4262 (ep->state & BOFI_DEV_ACTIVE)) {
4263 do_dma_corrupt(hp, ep, view, 0, hp->len);
4264 }
4265 }
4266 mutex_exit(&bofi_mutex);
4267 mutex_exit(&bofi_low_mutex);
4268 /*
4269 * if there is an explicit sync_for_cpu, then do copy to original
4270 */
4271 if (bofi_sync_check &&
4272 (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL)) {
4273 if (hp->allocaddr)
4274 xbcopy(hp->addr, hp->origaddr, hp->len);
4275 }
4276 if (view == DDI_DMA_SYNC_FORDEV)
4277 /*
4278 * in this case do sync last
4279 */
4280 dummyhp->save.dvma_ops.dvma_sync(h, index, view);
4281 }
4282 #endif
4283
4284 /*
4285 * bofi intercept routine - gets called instead of users interrupt routine
4286 */
4287 static uint_t
bofi_intercept_intr(caddr_t xp,caddr_t arg2)4288 bofi_intercept_intr(caddr_t xp, caddr_t arg2)
4289 {
4290 struct bofi_errent *ep;
4291 struct bofi_link *lp;
4292 struct bofi_shadow *hp;
4293 int intr_count = 1;
4294 int i;
4295 uint_t retval = DDI_INTR_UNCLAIMED;
4296 uint_t result;
4297 int unclaimed_counter = 0;
4298 int jabber_detected = 0;
4299
4300 hp = (struct bofi_shadow *)xp;
4301 /*
4302 * check if nothing to do
4303 */
4304 if (hp->link == NULL)
4305 return (hp->save.intr.int_handler
4306 (hp->save.intr.int_handler_arg1, arg2));
4307 mutex_enter(&bofi_mutex);
4308 /*
4309 * look for any errdefs
4310 */
4311 for (lp = hp->link; lp != NULL; lp = lp->link) {
4312 ep = lp->errentp;
4313 if (ep->state & BOFI_DEV_ACTIVE) {
4314 /*
4315 * got one
4316 */
4317 if ((ep->errdef.access_count ||
4318 ep->errdef.fail_count) &&
4319 (ep->errdef.access_type & BOFI_LOG))
4320 log_acc_event(ep, BOFI_INTR, 0, 0, 1, 0);
4321 if (ep->errdef.access_count > 1) {
4322 ep->errdef.access_count--;
4323 } else if (ep->errdef.fail_count > 0) {
4324 ep->errdef.fail_count--;
4325 ep->errdef.access_count = 0;
4326 /*
4327 * OK do "corruption"
4328 */
4329 if (ep->errstate.fail_time == 0)
4330 ep->errstate.fail_time = bofi_gettime();
4331 switch (ep->errdef.optype) {
4332 case BOFI_DELAY_INTR:
4333 if (!hp->hilevel) {
4334 drv_usecwait
4335 (ep->errdef.operand);
4336 }
4337 break;
4338 case BOFI_LOSE_INTR:
4339 intr_count = 0;
4340 break;
4341 case BOFI_EXTRA_INTR:
4342 intr_count += ep->errdef.operand;
4343 break;
4344 default:
4345 break;
4346 }
4347 }
4348 }
4349 }
4350 mutex_exit(&bofi_mutex);
4351 /*
4352 * send extra or fewer interrupts as requested
4353 */
4354 for (i = 0; i < intr_count; i++) {
4355 result = hp->save.intr.int_handler
4356 (hp->save.intr.int_handler_arg1, arg2);
4357 if (result == DDI_INTR_CLAIMED)
4358 unclaimed_counter >>= 1;
4359 else if (++unclaimed_counter >= 20)
4360 jabber_detected = 1;
4361 if (i == 0)
4362 retval = result;
4363 }
4364 /*
4365 * if more than 1000 spurious interrupts requested and
4366 * jabber not detected - give warning
4367 */
4368 if (intr_count > 1000 && !jabber_detected)
4369 panic("undetected interrupt jabber: %s%d",
4370 hp->name, hp->instance);
4371 /*
4372 * return first response - or "unclaimed" if none
4373 */
4374 return (retval);
4375 }
4376
4377
4378 /*
4379 * our ddi_check_acc_hdl
4380 */
4381 /* ARGSUSED */
4382 static int
bofi_check_acc_hdl(ddi_acc_impl_t * handle)4383 bofi_check_acc_hdl(ddi_acc_impl_t *handle)
4384 {
4385 struct bofi_shadow *hp;
4386 struct bofi_link *lp;
4387 uint_t result = 0;
4388
4389 hp = handle->ahi_common.ah_bus_private;
4390 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
4391 return (0);
4392 }
4393 for (lp = hp->link; lp != NULL; lp = lp->link) {
4394 /*
4395 * OR in error state from all associated
4396 * errdef structures
4397 */
4398 if (lp->errentp->errdef.access_count == 0 &&
4399 (lp->errentp->state & BOFI_DEV_ACTIVE)) {
4400 result = (lp->errentp->errdef.acc_chk & 1);
4401 }
4402 }
4403 mutex_exit(&bofi_mutex);
4404 return (result);
4405 }
4406
4407 /*
4408 * our ddi_check_dma_hdl
4409 */
4410 /* ARGSUSED */
4411 static int
bofi_check_dma_hdl(ddi_dma_impl_t * handle)4412 bofi_check_dma_hdl(ddi_dma_impl_t *handle)
4413 {
4414 struct bofi_shadow *hp;
4415 struct bofi_link *lp;
4416 struct bofi_shadow *hhashp;
4417 uint_t result = 0;
4418
4419 if (!mutex_tryenter(&bofi_mutex)) {
4420 return (0);
4421 }
4422 hhashp = HDL_HHASH(handle);
4423 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
4424 if (hp->hdl.dma_handle == (ddi_dma_handle_t)handle)
4425 break;
4426 if (hp == hhashp) {
4427 mutex_exit(&bofi_mutex);
4428 return (0);
4429 }
4430 if (!hp->link) {
4431 mutex_exit(&bofi_mutex);
4432 return (0);
4433 }
4434 for (lp = hp->link; lp != NULL; lp = lp->link) {
4435 /*
4436 * OR in error state from all associated
4437 * errdef structures
4438 */
4439 if (lp->errentp->errdef.access_count == 0 &&
4440 (lp->errentp->state & BOFI_DEV_ACTIVE)) {
4441 result = ((lp->errentp->errdef.acc_chk & 2) ? 1 : 0);
4442 }
4443 }
4444 mutex_exit(&bofi_mutex);
4445 return (result);
4446 }
4447
4448
4449 /* ARGSUSED */
4450 static int
bofi_post_event(dev_info_t * dip,dev_info_t * rdip,ddi_eventcookie_t eventhdl,void * impl_data)4451 bofi_post_event(dev_info_t *dip, dev_info_t *rdip,
4452 ddi_eventcookie_t eventhdl, void *impl_data)
4453 {
4454 ddi_eventcookie_t ec;
4455 struct ddi_fault_event_data *arg;
4456 struct bofi_errent *ep;
4457 struct bofi_shadow *hp;
4458 struct bofi_shadow *dhashp;
4459 struct bofi_link *lp;
4460
4461 ASSERT(eventhdl);
4462 if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) != DDI_SUCCESS)
4463 return (DDI_FAILURE);
4464
4465 if (ec != eventhdl)
4466 return (save_bus_ops.bus_post_event(dip, rdip, eventhdl,
4467 impl_data));
4468
4469 arg = (struct ddi_fault_event_data *)impl_data;
4470 mutex_enter(&bofi_mutex);
4471 /*
4472 * find shadow handles with appropriate dev_infos
4473 * and set error reported on all associated errdef structures
4474 */
4475 dhashp = HDL_DHASH(arg->f_dip);
4476 for (hp = dhashp->dnext; hp != dhashp; hp = hp->dnext) {
4477 if (hp->dip == arg->f_dip) {
4478 for (lp = hp->link; lp != NULL; lp = lp->link) {
4479 ep = lp->errentp;
4480 ep->errstate.errmsg_count++;
4481 if ((ep->errstate.msg_time == NULL ||
4482 ep->errstate.severity > arg->f_impact) &&
4483 (ep->state & BOFI_DEV_ACTIVE)) {
4484 ep->errstate.msg_time = bofi_gettime();
4485 ep->errstate.severity = arg->f_impact;
4486 (void) strncpy(ep->errstate.buffer,
4487 arg->f_message, ERRMSGSIZE);
4488 ddi_trigger_softintr(ep->softintr_id);
4489 }
4490 }
4491 }
4492 }
4493 mutex_exit(&bofi_mutex);
4494 return (save_bus_ops.bus_post_event(dip, rdip, eventhdl, impl_data));
4495 }
4496
4497 /*ARGSUSED*/
4498 static int
bofi_fm_ereport_callback(sysevent_t * ev,void * cookie)4499 bofi_fm_ereport_callback(sysevent_t *ev, void *cookie)
4500 {
4501 char *class = "";
4502 char *path = "";
4503 char *ptr;
4504 nvlist_t *nvlist;
4505 nvlist_t *detector;
4506 ddi_fault_impact_t impact;
4507 struct bofi_errent *ep;
4508 struct bofi_shadow *hp;
4509 struct bofi_link *lp;
4510 char service_class[FM_MAX_CLASS];
4511 char hppath[MAXPATHLEN];
4512 int service_ereport = 0;
4513
4514 (void) sysevent_get_attr_list(ev, &nvlist);
4515 (void) nvlist_lookup_string(nvlist, FM_CLASS, &class);
4516 if (nvlist_lookup_nvlist(nvlist, FM_EREPORT_DETECTOR, &detector) == 0)
4517 (void) nvlist_lookup_string(detector, FM_FMRI_DEV_PATH, &path);
4518
4519 (void) snprintf(service_class, FM_MAX_CLASS, "%s.%s.%s.",
4520 FM_EREPORT_CLASS, DDI_IO_CLASS, DDI_FM_SERVICE_IMPACT);
4521 if (strncmp(class, service_class, strlen(service_class) - 1) == 0)
4522 service_ereport = 1;
4523
4524 mutex_enter(&bofi_mutex);
4525 /*
4526 * find shadow handles with appropriate dev_infos
4527 * and set error reported on all associated errdef structures
4528 */
4529 for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
4530 (void) ddi_pathname(hp->dip, hppath);
4531 if (strcmp(path, hppath) != 0)
4532 continue;
4533 for (lp = hp->link; lp != NULL; lp = lp->link) {
4534 ep = lp->errentp;
4535 ep->errstate.errmsg_count++;
4536 if (!(ep->state & BOFI_DEV_ACTIVE))
4537 continue;
4538 if (ep->errstate.msg_time != NULL)
4539 continue;
4540 if (service_ereport) {
4541 ptr = class + strlen(service_class);
4542 if (strcmp(ptr, DDI_FM_SERVICE_LOST) == 0)
4543 impact = DDI_SERVICE_LOST;
4544 else if (strcmp(ptr,
4545 DDI_FM_SERVICE_DEGRADED) == 0)
4546 impact = DDI_SERVICE_DEGRADED;
4547 else if (strcmp(ptr,
4548 DDI_FM_SERVICE_RESTORED) == 0)
4549 impact = DDI_SERVICE_RESTORED;
4550 else
4551 impact = DDI_SERVICE_UNAFFECTED;
4552 if (ep->errstate.severity > impact)
4553 ep->errstate.severity = impact;
4554 } else if (ep->errstate.buffer[0] == '\0') {
4555 (void) strncpy(ep->errstate.buffer, class,
4556 ERRMSGSIZE);
4557 }
4558 if (ep->errstate.buffer[0] != '\0' &&
4559 ep->errstate.severity < DDI_SERVICE_RESTORED) {
4560 ep->errstate.msg_time = bofi_gettime();
4561 ddi_trigger_softintr(ep->softintr_id);
4562 }
4563 }
4564 }
4565 nvlist_free(nvlist);
4566 mutex_exit(&bofi_mutex);
4567 return (0);
4568 }
4569
4570 /*
4571 * our intr_ops routine
4572 */
4573 static int
bofi_intr_ops(dev_info_t * dip,dev_info_t * rdip,ddi_intr_op_t intr_op,ddi_intr_handle_impl_t * hdlp,void * result)4574 bofi_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
4575 ddi_intr_handle_impl_t *hdlp, void *result)
4576 {
4577 int retval;
4578 struct bofi_shadow *hp;
4579 struct bofi_shadow *dhashp;
4580 struct bofi_shadow *hhashp;
4581 struct bofi_errent *ep;
4582 struct bofi_link *lp, *next_lp;
4583
4584 switch (intr_op) {
4585 case DDI_INTROP_ADDISR:
4586 /*
4587 * if driver_list is set, only intercept those drivers
4588 */
4589 if (!driver_under_test(rdip))
4590 return (save_bus_ops.bus_intr_op(dip, rdip,
4591 intr_op, hdlp, result));
4592 /*
4593 * allocate shadow handle structure and fill in
4594 */
4595 hp = kmem_zalloc(sizeof (struct bofi_shadow), KM_SLEEP);
4596 (void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
4597 hp->instance = ddi_get_instance(rdip);
4598 hp->save.intr.int_handler = hdlp->ih_cb_func;
4599 hp->save.intr.int_handler_arg1 = hdlp->ih_cb_arg1;
4600 hdlp->ih_cb_func = (ddi_intr_handler_t *)bofi_intercept_intr;
4601 hdlp->ih_cb_arg1 = (caddr_t)hp;
4602 hp->bofi_inum = hdlp->ih_inum;
4603 hp->dip = rdip;
4604 hp->link = NULL;
4605 hp->type = BOFI_INT_HDL;
4606 /*
4607 * save whether hilevel or not
4608 */
4609
4610 if (hdlp->ih_pri >= ddi_intr_get_hilevel_pri())
4611 hp->hilevel = 1;
4612 else
4613 hp->hilevel = 0;
4614
4615 /*
4616 * call nexus to do real work, but specifying our handler, and
4617 * our shadow handle as argument
4618 */
4619 retval = save_bus_ops.bus_intr_op(dip, rdip,
4620 intr_op, hdlp, result);
4621 if (retval != DDI_SUCCESS) {
4622 kmem_free(hp, sizeof (struct bofi_shadow));
4623 return (retval);
4624 }
4625 /*
4626 * add to dhash, hhash and inuse lists
4627 */
4628 mutex_enter(&bofi_low_mutex);
4629 mutex_enter(&bofi_mutex);
4630 hp->next = shadow_list.next;
4631 shadow_list.next->prev = hp;
4632 hp->prev = &shadow_list;
4633 shadow_list.next = hp;
4634 hhashp = HDL_HHASH(hdlp->ih_inum);
4635 hp->hnext = hhashp->hnext;
4636 hhashp->hnext->hprev = hp;
4637 hp->hprev = hhashp;
4638 hhashp->hnext = hp;
4639 dhashp = HDL_DHASH(hp->dip);
4640 hp->dnext = dhashp->dnext;
4641 dhashp->dnext->dprev = hp;
4642 hp->dprev = dhashp;
4643 dhashp->dnext = hp;
4644 /*
4645 * chain on any pre-existing errdefs that apply to this
4646 * acc_handle
4647 */
4648 for (ep = errent_listp; ep != NULL; ep = ep->next) {
4649 if (ddi_name_to_major(hp->name) ==
4650 ddi_name_to_major(ep->name) &&
4651 hp->instance == ep->errdef.instance &&
4652 (ep->errdef.access_type & BOFI_INTR)) {
4653 lp = bofi_link_freelist;
4654 if (lp != NULL) {
4655 bofi_link_freelist = lp->link;
4656 lp->errentp = ep;
4657 lp->link = hp->link;
4658 hp->link = lp;
4659 }
4660 }
4661 }
4662 mutex_exit(&bofi_mutex);
4663 mutex_exit(&bofi_low_mutex);
4664 return (retval);
4665 case DDI_INTROP_REMISR:
4666 /*
4667 * call nexus routine first
4668 */
4669 retval = save_bus_ops.bus_intr_op(dip, rdip,
4670 intr_op, hdlp, result);
4671 /*
4672 * find shadow handle
4673 */
4674 mutex_enter(&bofi_low_mutex);
4675 mutex_enter(&bofi_mutex);
4676 hhashp = HDL_HHASH(hdlp->ih_inum);
4677 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
4678 if (hp->dip == rdip &&
4679 hp->type == BOFI_INT_HDL &&
4680 hp->bofi_inum == hdlp->ih_inum) {
4681 break;
4682 }
4683 }
4684 if (hp == hhashp) {
4685 mutex_exit(&bofi_mutex);
4686 mutex_exit(&bofi_low_mutex);
4687 return (retval);
4688 }
4689 /*
4690 * found one - remove from dhash, hhash and inuse lists
4691 */
4692 hp->hnext->hprev = hp->hprev;
4693 hp->hprev->hnext = hp->hnext;
4694 hp->dnext->dprev = hp->dprev;
4695 hp->dprev->dnext = hp->dnext;
4696 hp->next->prev = hp->prev;
4697 hp->prev->next = hp->next;
4698 /*
4699 * free any errdef link structures
4700 * tagged on to this shadow handle
4701 */
4702 for (lp = hp->link; lp != NULL; ) {
4703 next_lp = lp->link;
4704 lp->link = bofi_link_freelist;
4705 bofi_link_freelist = lp;
4706 lp = next_lp;
4707 }
4708 hp->link = NULL;
4709 mutex_exit(&bofi_mutex);
4710 mutex_exit(&bofi_low_mutex);
4711 kmem_free(hp, sizeof (struct bofi_shadow));
4712 return (retval);
4713 default:
4714 return (save_bus_ops.bus_intr_op(dip, rdip,
4715 intr_op, hdlp, result));
4716 }
4717 }
4718