1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 /* 26 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved. 27 */ 28 29 30 #include <sys/types.h> 31 #include <sys/sysmacros.h> 32 #include <sys/buf.h> 33 #include <sys/errno.h> 34 #include <sys/modctl.h> 35 #include <sys/conf.h> 36 #include <sys/stat.h> 37 #include <sys/kmem.h> 38 #include <sys/proc.h> 39 #include <sys/cpuvar.h> 40 #include <sys/ddi_impldefs.h> 41 #include <sys/ddi.h> 42 #include <sys/fm/protocol.h> 43 #include <sys/fm/util.h> 44 #include <sys/fm/io/ddi.h> 45 #include <sys/sysevent/eventdefs.h> 46 #include <sys/sunddi.h> 47 #include <sys/sunndi.h> 48 #include <sys/debug.h> 49 #include <sys/bofi.h> 50 #include <sys/bofi_impl.h> 51 52 #ifdef __sparc 53 #include <sys/dvma.h> 54 #endif 55 56 /* 57 * Testing the resilience of a hardened device driver requires a suitably wide 58 * range of different types of "typical" hardware faults to be injected, 59 * preferably in a controlled and repeatable fashion. This is not in general 60 * possible via hardware, so the "fault injection test harness" is provided. 61 * This works by intercepting calls from the driver to various DDI routines, 62 * and then corrupting the result of those DDI routine calls as if the 63 * hardware had caused the corruption. 64 * 65 * Conceptually, the bofi driver consists of two parts: 66 * 67 * A driver interface that supports a number of ioctls which allow error 68 * definitions ("errdefs") to be defined and subsequently managed. The 69 * driver is a clone driver, so each open will create a separate 70 * invocation. Any errdefs created by using ioctls to that invocation 71 * will automatically be deleted when that invocation is closed. 72 * 73 * Intercept routines: When the bofi driver is attached, it edits the 74 * bus_ops structure of the bus nexus specified by the "bofi-nexus" 75 * field in the "bofi.conf" file, thus allowing the 76 * bofi driver to intercept various ddi functions. These intercept 77 * routines primarily carry out fault injections based on the errdefs 78 * created for that device. 79 * 80 * Faults can be injected into: 81 * 82 * DMA (corrupting data for DMA to/from memory areas defined by 83 * ddi_dma_setup(), ddi_dma_bind_handle(), etc) 84 * 85 * Physical IO (corrupting data sent/received via ddi_get8(), ddi_put8(), 86 * etc), 87 * 88 * Interrupts (generating spurious interrupts, losing interrupts, 89 * delaying interrupts). 90 * 91 * By default, ddi routines called from all drivers will be intercepted 92 * and faults potentially injected. However, the "bofi-to-test" field in 93 * the "bofi.conf" file can be set to a space-separated list of drivers to 94 * test (or by preceding each driver name in the list with an "!", a list 95 * of drivers not to test). 96 * 97 * In addition to fault injection, the bofi driver does a number of static 98 * checks which are controlled by properties in the "bofi.conf" file. 99 * 100 * "bofi-ddi-check" - if set will validate that there are no PIO access 101 * other than those using the DDI routines (ddi_get8(), ddi_put8(), etc). 102 * 103 * "bofi-range-check" - if set to values 1 (warning) or 2 (panic), will 104 * validate that calls to ddi_get8(), ddi_put8(), etc are not made 105 * specifying addresses outside the range of the access_handle. 106 * 107 * "bofi-sync-check" - if set will validate that calls to ddi_dma_sync() 108 * are being made correctly. 109 */ 110 111 extern void *bp_mapin_common(struct buf *, int); 112 113 static int bofi_ddi_check; 114 static int bofi_sync_check; 115 static int bofi_range_check; 116 117 static struct bofi_link bofi_link_array[BOFI_NLINKS], *bofi_link_freelist; 118 119 #define LLSZMASK (sizeof (uint64_t)-1) 120 121 #define HDL_HASH_TBL_SIZE 64 122 static struct bofi_shadow hhash_table[HDL_HASH_TBL_SIZE]; 123 static struct bofi_shadow dhash_table[HDL_HASH_TBL_SIZE]; 124 #define HDL_DHASH(x) \ 125 (&dhash_table[((uintptr_t)(x) >> 3) & (HDL_HASH_TBL_SIZE-1)]) 126 #define HDL_HHASH(x) \ 127 (&hhash_table[((uintptr_t)(x) >> 5) & (HDL_HASH_TBL_SIZE-1)]) 128 129 static struct bofi_shadow shadow_list; 130 static struct bofi_errent *errent_listp; 131 132 static char driver_list[NAMESIZE]; 133 static int driver_list_size; 134 static int driver_list_neg; 135 static char nexus_name[NAMESIZE]; 136 137 static int initialized = 0; 138 139 #define NCLONES 2560 140 static int clone_tab[NCLONES]; 141 142 static dev_info_t *our_dip; 143 144 static kmutex_t bofi_mutex; 145 static kmutex_t clone_tab_mutex; 146 static kmutex_t bofi_low_mutex; 147 static ddi_iblock_cookie_t bofi_low_cookie; 148 static uint_t bofi_signal(caddr_t arg); 149 static int bofi_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 150 static int bofi_attach(dev_info_t *, ddi_attach_cmd_t); 151 static int bofi_detach(dev_info_t *, ddi_detach_cmd_t); 152 static int bofi_open(dev_t *, int, int, cred_t *); 153 static int bofi_close(dev_t, int, int, cred_t *); 154 static int bofi_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 155 static int bofi_errdef_alloc(struct bofi_errdef *, char *, 156 struct bofi_errent *); 157 static int bofi_errdef_free(struct bofi_errent *); 158 static void bofi_start(struct bofi_errctl *, char *); 159 static void bofi_stop(struct bofi_errctl *, char *); 160 static void bofi_broadcast(struct bofi_errctl *, char *); 161 static void bofi_clear_acc_chk(struct bofi_errctl *, char *); 162 static void bofi_clear_errors(struct bofi_errctl *, char *); 163 static void bofi_clear_errdefs(struct bofi_errctl *, char *); 164 static int bofi_errdef_check(struct bofi_errstate *, 165 struct acc_log_elem **); 166 static int bofi_errdef_check_w(struct bofi_errstate *, 167 struct acc_log_elem **); 168 static int bofi_map(dev_info_t *, dev_info_t *, ddi_map_req_t *, 169 off_t, off_t, caddr_t *); 170 static int bofi_dma_allochdl(dev_info_t *, dev_info_t *, 171 ddi_dma_attr_t *, int (*)(caddr_t), caddr_t, 172 ddi_dma_handle_t *); 173 static int bofi_dma_freehdl(dev_info_t *, dev_info_t *, 174 ddi_dma_handle_t); 175 static int bofi_dma_bindhdl(dev_info_t *, dev_info_t *, 176 ddi_dma_handle_t, struct ddi_dma_req *, ddi_dma_cookie_t *, 177 uint_t *); 178 static int bofi_dma_unbindhdl(dev_info_t *, dev_info_t *, 179 ddi_dma_handle_t); 180 static int bofi_dma_flush(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 181 off_t, size_t, uint_t); 182 static int bofi_dma_ctl(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 183 enum ddi_dma_ctlops, off_t *, size_t *, caddr_t *, uint_t); 184 static int bofi_dma_win(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 185 uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *); 186 static int bofi_intr_ops(dev_info_t *dip, dev_info_t *rdip, 187 ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, 188 void *result); 189 static int bofi_fm_ereport_callback(sysevent_t *ev, void *cookie); 190 191 evchan_t *bofi_error_chan; 192 193 #define FM_SIMULATED_DMA "simulated.dma" 194 #define FM_SIMULATED_PIO "simulated.pio" 195 196 #if defined(__sparc) 197 static void bofi_dvma_kaddr_load(ddi_dma_handle_t, caddr_t, uint_t, 198 uint_t, ddi_dma_cookie_t *); 199 static void bofi_dvma_unload(ddi_dma_handle_t, uint_t, uint_t); 200 static void bofi_dvma_sync(ddi_dma_handle_t, uint_t, uint_t); 201 static void bofi_dvma_reserve(dev_info_t *, ddi_dma_handle_t); 202 #endif 203 static int driver_under_test(dev_info_t *); 204 static int bofi_check_acc_hdl(ddi_acc_impl_t *); 205 static int bofi_check_dma_hdl(ddi_dma_impl_t *); 206 static int bofi_post_event(dev_info_t *dip, dev_info_t *rdip, 207 ddi_eventcookie_t eventhdl, void *impl_data); 208 209 static struct bus_ops bofi_bus_ops = { 210 BUSO_REV, 211 bofi_map, 212 NULL, 213 NULL, 214 NULL, 215 i_ddi_map_fault, 216 NULL, 217 bofi_dma_allochdl, 218 bofi_dma_freehdl, 219 bofi_dma_bindhdl, 220 bofi_dma_unbindhdl, 221 bofi_dma_flush, 222 bofi_dma_win, 223 bofi_dma_ctl, 224 NULL, 225 ddi_bus_prop_op, 226 ndi_busop_get_eventcookie, 227 ndi_busop_add_eventcall, 228 ndi_busop_remove_eventcall, 229 bofi_post_event, 230 NULL, 231 0, 232 0, 233 0, 234 0, 235 0, 236 0, 237 0, 238 bofi_intr_ops 239 }; 240 241 static struct cb_ops bofi_cb_ops = { 242 bofi_open, /* open */ 243 bofi_close, /* close */ 244 nodev, /* strategy */ 245 nodev, /* print */ 246 nodev, /* dump */ 247 nodev, /* read */ 248 nodev, /* write */ 249 bofi_ioctl, /* ioctl */ 250 nodev, /* devmap */ 251 nodev, /* mmap */ 252 nodev, /* segmap */ 253 nochpoll, /* chpoll */ 254 ddi_prop_op, /* prop_op */ 255 NULL, /* for STREAMS drivers */ 256 D_MP, /* driver compatibility flag */ 257 CB_REV, /* cb_ops revision */ 258 nodev, /* aread */ 259 nodev /* awrite */ 260 }; 261 262 static struct dev_ops bofi_ops = { 263 DEVO_REV, /* driver build version */ 264 0, /* device reference count */ 265 bofi_getinfo, 266 nulldev, 267 nulldev, /* probe */ 268 bofi_attach, 269 bofi_detach, 270 nulldev, /* reset */ 271 &bofi_cb_ops, 272 (struct bus_ops *)NULL, 273 nulldev, /* power */ 274 ddi_quiesce_not_needed, /* quiesce */ 275 }; 276 277 /* module configuration stuff */ 278 static void *statep; 279 280 static struct modldrv modldrv = { 281 &mod_driverops, 282 "bofi driver", 283 &bofi_ops 284 }; 285 286 static struct modlinkage modlinkage = { 287 MODREV_1, 288 &modldrv, 289 0 290 }; 291 292 static struct bus_ops save_bus_ops; 293 294 #if defined(__sparc) 295 static struct dvma_ops bofi_dvma_ops = { 296 DVMAO_REV, 297 bofi_dvma_kaddr_load, 298 bofi_dvma_unload, 299 bofi_dvma_sync 300 }; 301 #endif 302 303 /* 304 * support routine - map user page into kernel virtual 305 */ 306 static caddr_t 307 dmareq_mapin(offset_t len, caddr_t addr, struct as *as, int flag) 308 { 309 struct buf buf; 310 struct proc proc; 311 312 /* 313 * mock up a buf structure so we can call bp_mapin_common() 314 */ 315 buf.b_flags = B_PHYS; 316 buf.b_un.b_addr = (caddr_t)addr; 317 buf.b_bcount = (size_t)len; 318 proc.p_as = as; 319 buf.b_proc = &proc; 320 return (bp_mapin_common(&buf, flag)); 321 } 322 323 324 /* 325 * support routine - map page chain into kernel virtual 326 */ 327 static caddr_t 328 dmareq_pp_mapin(offset_t len, uint_t offset, page_t *pp, int flag) 329 { 330 struct buf buf; 331 332 /* 333 * mock up a buf structure so we can call bp_mapin_common() 334 */ 335 buf.b_flags = B_PAGEIO; 336 buf.b_un.b_addr = (caddr_t)(uintptr_t)offset; 337 buf.b_bcount = (size_t)len; 338 buf.b_pages = pp; 339 return (bp_mapin_common(&buf, flag)); 340 } 341 342 343 /* 344 * support routine - map page array into kernel virtual 345 */ 346 static caddr_t 347 dmareq_pplist_mapin(uint_t len, caddr_t addr, page_t **pplist, struct as *as, 348 int flag) 349 { 350 struct buf buf; 351 struct proc proc; 352 353 /* 354 * mock up a buf structure so we can call bp_mapin_common() 355 */ 356 buf.b_flags = B_PHYS|B_SHADOW; 357 buf.b_un.b_addr = addr; 358 buf.b_bcount = len; 359 buf.b_shadow = pplist; 360 proc.p_as = as; 361 buf.b_proc = &proc; 362 return (bp_mapin_common(&buf, flag)); 363 } 364 365 366 /* 367 * support routine - map dmareq into kernel virtual if not already 368 * fills in *lenp with length 369 * *mapaddr will be new kernel virtual address - or null if no mapping needed 370 */ 371 static caddr_t 372 ddi_dmareq_mapin(struct ddi_dma_req *dmareqp, caddr_t *mapaddrp, 373 offset_t *lenp) 374 { 375 int sleep = (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? VM_SLEEP: VM_NOSLEEP; 376 377 *lenp = dmareqp->dmar_object.dmao_size; 378 if (dmareqp->dmar_object.dmao_type == DMA_OTYP_PAGES) { 379 *mapaddrp = dmareq_pp_mapin(dmareqp->dmar_object.dmao_size, 380 dmareqp->dmar_object.dmao_obj.pp_obj.pp_offset, 381 dmareqp->dmar_object.dmao_obj.pp_obj.pp_pp, sleep); 382 return (*mapaddrp); 383 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_priv != NULL) { 384 *mapaddrp = dmareq_pplist_mapin(dmareqp->dmar_object.dmao_size, 385 dmareqp->dmar_object.dmao_obj.virt_obj.v_addr, 386 dmareqp->dmar_object.dmao_obj.virt_obj.v_priv, 387 dmareqp->dmar_object.dmao_obj.virt_obj.v_as, sleep); 388 return (*mapaddrp); 389 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_as == &kas) { 390 *mapaddrp = NULL; 391 return (dmareqp->dmar_object.dmao_obj.virt_obj.v_addr); 392 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_as == NULL) { 393 *mapaddrp = NULL; 394 return (dmareqp->dmar_object.dmao_obj.virt_obj.v_addr); 395 } else { 396 *mapaddrp = dmareq_mapin(dmareqp->dmar_object.dmao_size, 397 dmareqp->dmar_object.dmao_obj.virt_obj.v_addr, 398 dmareqp->dmar_object.dmao_obj.virt_obj.v_as, sleep); 399 return (*mapaddrp); 400 } 401 } 402 403 404 /* 405 * support routine - free off kernel virtual mapping as allocated by 406 * ddi_dmareq_mapin() 407 */ 408 static void 409 ddi_dmareq_mapout(caddr_t addr, offset_t len, int map_flags, page_t *pp, 410 page_t **pplist) 411 { 412 struct buf buf; 413 414 if (addr == NULL) 415 return; 416 /* 417 * mock up a buf structure 418 */ 419 buf.b_flags = B_REMAPPED | map_flags; 420 buf.b_un.b_addr = addr; 421 buf.b_bcount = (size_t)len; 422 buf.b_pages = pp; 423 buf.b_shadow = pplist; 424 bp_mapout(&buf); 425 } 426 427 static time_t 428 bofi_gettime() 429 { 430 timestruc_t ts; 431 432 gethrestime(&ts); 433 return (ts.tv_sec); 434 } 435 436 /* 437 * reset the bus_ops structure of the specified nexus to point to 438 * the original values in the save_bus_ops structure. 439 * 440 * Note that both this routine and modify_bus_ops() rely on the current 441 * behavior of the framework in that nexus drivers are not unloadable 442 * 443 */ 444 445 static int 446 reset_bus_ops(char *name, struct bus_ops *bop) 447 { 448 struct modctl *modp; 449 struct modldrv *mp; 450 struct bus_ops *bp; 451 struct dev_ops *ops; 452 453 mutex_enter(&mod_lock); 454 /* 455 * find specified module 456 */ 457 modp = &modules; 458 do { 459 if (strcmp(name, modp->mod_modname) == 0) { 460 if (!modp->mod_linkage) { 461 mutex_exit(&mod_lock); 462 return (0); 463 } 464 mp = modp->mod_linkage->ml_linkage[0]; 465 if (!mp || !mp->drv_dev_ops) { 466 mutex_exit(&mod_lock); 467 return (0); 468 } 469 ops = mp->drv_dev_ops; 470 bp = ops->devo_bus_ops; 471 if (!bp) { 472 mutex_exit(&mod_lock); 473 return (0); 474 } 475 if (ops->devo_refcnt > 0) { 476 /* 477 * As long as devices are active with modified 478 * bus ops bofi must not go away. There may be 479 * drivers with modified access or dma handles. 480 */ 481 mutex_exit(&mod_lock); 482 return (0); 483 } 484 cmn_err(CE_NOTE, "bofi reset bus_ops for %s", 485 mp->drv_linkinfo); 486 bp->bus_intr_op = bop->bus_intr_op; 487 bp->bus_post_event = bop->bus_post_event; 488 bp->bus_map = bop->bus_map; 489 bp->bus_dma_map = bop->bus_dma_map; 490 bp->bus_dma_allochdl = bop->bus_dma_allochdl; 491 bp->bus_dma_freehdl = bop->bus_dma_freehdl; 492 bp->bus_dma_bindhdl = bop->bus_dma_bindhdl; 493 bp->bus_dma_unbindhdl = bop->bus_dma_unbindhdl; 494 bp->bus_dma_flush = bop->bus_dma_flush; 495 bp->bus_dma_win = bop->bus_dma_win; 496 bp->bus_dma_ctl = bop->bus_dma_ctl; 497 mutex_exit(&mod_lock); 498 return (1); 499 } 500 } while ((modp = modp->mod_next) != &modules); 501 mutex_exit(&mod_lock); 502 return (0); 503 } 504 505 /* 506 * modify the bus_ops structure of the specified nexus to point to bofi 507 * routines, saving the original values in the save_bus_ops structure 508 */ 509 510 static int 511 modify_bus_ops(char *name, struct bus_ops *bop) 512 { 513 struct modctl *modp; 514 struct modldrv *mp; 515 struct bus_ops *bp; 516 struct dev_ops *ops; 517 518 if (ddi_name_to_major(name) == -1) 519 return (0); 520 521 mutex_enter(&mod_lock); 522 /* 523 * find specified module 524 */ 525 modp = &modules; 526 do { 527 if (strcmp(name, modp->mod_modname) == 0) { 528 if (!modp->mod_linkage) { 529 mutex_exit(&mod_lock); 530 return (0); 531 } 532 mp = modp->mod_linkage->ml_linkage[0]; 533 if (!mp || !mp->drv_dev_ops) { 534 mutex_exit(&mod_lock); 535 return (0); 536 } 537 ops = mp->drv_dev_ops; 538 bp = ops->devo_bus_ops; 539 if (!bp) { 540 mutex_exit(&mod_lock); 541 return (0); 542 } 543 if (ops->devo_refcnt == 0) { 544 /* 545 * If there is no device active for this 546 * module then there is nothing to do for bofi. 547 */ 548 mutex_exit(&mod_lock); 549 return (0); 550 } 551 cmn_err(CE_NOTE, "bofi modify bus_ops for %s", 552 mp->drv_linkinfo); 553 save_bus_ops = *bp; 554 bp->bus_intr_op = bop->bus_intr_op; 555 bp->bus_post_event = bop->bus_post_event; 556 bp->bus_map = bop->bus_map; 557 bp->bus_dma_map = bop->bus_dma_map; 558 bp->bus_dma_allochdl = bop->bus_dma_allochdl; 559 bp->bus_dma_freehdl = bop->bus_dma_freehdl; 560 bp->bus_dma_bindhdl = bop->bus_dma_bindhdl; 561 bp->bus_dma_unbindhdl = bop->bus_dma_unbindhdl; 562 bp->bus_dma_flush = bop->bus_dma_flush; 563 bp->bus_dma_win = bop->bus_dma_win; 564 bp->bus_dma_ctl = bop->bus_dma_ctl; 565 mutex_exit(&mod_lock); 566 return (1); 567 } 568 } while ((modp = modp->mod_next) != &modules); 569 mutex_exit(&mod_lock); 570 return (0); 571 } 572 573 574 int 575 _init(void) 576 { 577 int e; 578 579 e = ddi_soft_state_init(&statep, sizeof (struct bofi_errent), 1); 580 if (e != 0) 581 return (e); 582 if ((e = mod_install(&modlinkage)) != 0) 583 ddi_soft_state_fini(&statep); 584 return (e); 585 } 586 587 588 int 589 _fini(void) 590 { 591 int e; 592 593 if ((e = mod_remove(&modlinkage)) != 0) 594 return (e); 595 ddi_soft_state_fini(&statep); 596 return (e); 597 } 598 599 600 int 601 _info(struct modinfo *modinfop) 602 { 603 return (mod_info(&modlinkage, modinfop)); 604 } 605 606 607 static int 608 bofi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 609 { 610 char *name; 611 char buf[80]; 612 int i; 613 int s, ss; 614 int size = NAMESIZE; 615 int new_string; 616 char *ptr; 617 618 if (cmd != DDI_ATTACH) 619 return (DDI_FAILURE); 620 /* 621 * only one instance - but we clone using the open routine 622 */ 623 if (ddi_get_instance(dip) > 0) 624 return (DDI_FAILURE); 625 626 if (!initialized) { 627 if ((name = ddi_get_name(dip)) == NULL) 628 return (DDI_FAILURE); 629 (void) snprintf(buf, sizeof (buf), "%s,ctl", name); 630 if (ddi_create_minor_node(dip, buf, S_IFCHR, 0, 631 DDI_PSEUDO, 0) == DDI_FAILURE) 632 return (DDI_FAILURE); 633 634 if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_MED, 635 &bofi_low_cookie) != DDI_SUCCESS) { 636 ddi_remove_minor_node(dip, buf); 637 return (DDI_FAILURE); /* fail attach */ 638 } 639 /* 640 * get nexus name (from conf file) 641 */ 642 if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 0, 643 "bofi-nexus", nexus_name, &size) != DDI_PROP_SUCCESS) { 644 ddi_remove_minor_node(dip, buf); 645 return (DDI_FAILURE); 646 } 647 /* 648 * get whether to do dma map kmem private checking 649 */ 650 if ((bofi_range_check = ddi_prop_lookup_string(DDI_DEV_T_ANY, 651 dip, 0, "bofi-range-check", &ptr)) != DDI_PROP_SUCCESS) 652 bofi_range_check = 0; 653 else if (strcmp(ptr, "panic") == 0) 654 bofi_range_check = 2; 655 else if (strcmp(ptr, "warn") == 0) 656 bofi_range_check = 1; 657 else 658 bofi_range_check = 0; 659 ddi_prop_free(ptr); 660 661 /* 662 * get whether to prevent direct access to register 663 */ 664 if ((bofi_ddi_check = ddi_prop_lookup_string(DDI_DEV_T_ANY, 665 dip, 0, "bofi-ddi-check", &ptr)) != DDI_PROP_SUCCESS) 666 bofi_ddi_check = 0; 667 else if (strcmp(ptr, "on") == 0) 668 bofi_ddi_check = 1; 669 else 670 bofi_ddi_check = 0; 671 ddi_prop_free(ptr); 672 673 /* 674 * get whether to do copy on ddi_dma_sync 675 */ 676 if ((bofi_sync_check = ddi_prop_lookup_string(DDI_DEV_T_ANY, 677 dip, 0, "bofi-sync-check", &ptr)) != DDI_PROP_SUCCESS) 678 bofi_sync_check = 0; 679 else if (strcmp(ptr, "on") == 0) 680 bofi_sync_check = 1; 681 else 682 bofi_sync_check = 0; 683 ddi_prop_free(ptr); 684 685 /* 686 * get driver-under-test names (from conf file) 687 */ 688 size = NAMESIZE; 689 if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 0, 690 "bofi-to-test", driver_list, &size) != DDI_PROP_SUCCESS) 691 driver_list[0] = 0; 692 /* 693 * and convert into a sequence of strings 694 */ 695 driver_list_neg = 1; 696 new_string = 1; 697 driver_list_size = strlen(driver_list); 698 for (i = 0; i < driver_list_size; i++) { 699 if (driver_list[i] == ' ') { 700 driver_list[i] = '\0'; 701 new_string = 1; 702 } else if (new_string) { 703 if (driver_list[i] != '!') 704 driver_list_neg = 0; 705 new_string = 0; 706 } 707 } 708 /* 709 * initialize mutex, lists 710 */ 711 mutex_init(&clone_tab_mutex, NULL, MUTEX_DRIVER, 712 NULL); 713 /* 714 * fake up iblock cookie - need to protect outselves 715 * against drivers that use hilevel interrupts 716 */ 717 ss = spl8(); 718 s = spl8(); 719 splx(ss); 720 mutex_init(&bofi_mutex, NULL, MUTEX_SPIN, (void *)(uintptr_t)s); 721 mutex_init(&bofi_low_mutex, NULL, MUTEX_DRIVER, 722 (void *)bofi_low_cookie); 723 shadow_list.next = &shadow_list; 724 shadow_list.prev = &shadow_list; 725 for (i = 0; i < HDL_HASH_TBL_SIZE; i++) { 726 hhash_table[i].hnext = &hhash_table[i]; 727 hhash_table[i].hprev = &hhash_table[i]; 728 dhash_table[i].dnext = &dhash_table[i]; 729 dhash_table[i].dprev = &dhash_table[i]; 730 } 731 for (i = 1; i < BOFI_NLINKS; i++) 732 bofi_link_array[i].link = &bofi_link_array[i-1]; 733 bofi_link_freelist = &bofi_link_array[BOFI_NLINKS - 1]; 734 /* 735 * overlay bus_ops structure 736 */ 737 if (modify_bus_ops(nexus_name, &bofi_bus_ops) == 0) { 738 ddi_remove_minor_node(dip, buf); 739 mutex_destroy(&clone_tab_mutex); 740 mutex_destroy(&bofi_mutex); 741 mutex_destroy(&bofi_low_mutex); 742 return (DDI_FAILURE); 743 } 744 if (sysevent_evc_bind(FM_ERROR_CHAN, &bofi_error_chan, 0) == 0) 745 (void) sysevent_evc_subscribe(bofi_error_chan, "bofi", 746 EC_FM, bofi_fm_ereport_callback, NULL, 0); 747 748 /* 749 * save dip for getinfo 750 */ 751 our_dip = dip; 752 ddi_report_dev(dip); 753 initialized = 1; 754 } 755 return (DDI_SUCCESS); 756 } 757 758 759 static int 760 bofi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 761 { 762 char *name; 763 char buf[80]; 764 765 if (cmd != DDI_DETACH) 766 return (DDI_FAILURE); 767 if (ddi_get_instance(dip) > 0) 768 return (DDI_FAILURE); 769 if ((name = ddi_get_name(dip)) == NULL) 770 return (DDI_FAILURE); 771 (void) snprintf(buf, sizeof (buf), "%s,ctl", name); 772 mutex_enter(&bofi_low_mutex); 773 mutex_enter(&bofi_mutex); 774 /* 775 * make sure test bofi is no longer in use 776 */ 777 if (shadow_list.next != &shadow_list || errent_listp != NULL) { 778 mutex_exit(&bofi_mutex); 779 mutex_exit(&bofi_low_mutex); 780 return (DDI_FAILURE); 781 } 782 mutex_exit(&bofi_mutex); 783 mutex_exit(&bofi_low_mutex); 784 785 /* 786 * restore bus_ops structure 787 */ 788 if (reset_bus_ops(nexus_name, &save_bus_ops) == 0) 789 return (DDI_FAILURE); 790 791 (void) sysevent_evc_unbind(bofi_error_chan); 792 793 mutex_destroy(&clone_tab_mutex); 794 mutex_destroy(&bofi_mutex); 795 mutex_destroy(&bofi_low_mutex); 796 ddi_remove_minor_node(dip, buf); 797 our_dip = NULL; 798 initialized = 0; 799 return (DDI_SUCCESS); 800 } 801 802 803 /* ARGSUSED */ 804 static int 805 bofi_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 806 { 807 dev_t dev = (dev_t)arg; 808 int minor = (int)getminor(dev); 809 int retval; 810 811 switch (cmd) { 812 case DDI_INFO_DEVT2DEVINFO: 813 if (minor != 0 || our_dip == NULL) { 814 *result = (void *)NULL; 815 retval = DDI_FAILURE; 816 } else { 817 *result = (void *)our_dip; 818 retval = DDI_SUCCESS; 819 } 820 break; 821 case DDI_INFO_DEVT2INSTANCE: 822 *result = (void *)0; 823 retval = DDI_SUCCESS; 824 break; 825 default: 826 retval = DDI_FAILURE; 827 } 828 return (retval); 829 } 830 831 832 /* ARGSUSED */ 833 static int 834 bofi_open(dev_t *devp, int flag, int otyp, cred_t *credp) 835 { 836 int minor = (int)getminor(*devp); 837 struct bofi_errent *softc; 838 839 /* 840 * only allow open on minor=0 - the clone device 841 */ 842 if (minor != 0) 843 return (ENXIO); 844 /* 845 * fail if not attached 846 */ 847 if (!initialized) 848 return (ENXIO); 849 /* 850 * find a free slot and grab it 851 */ 852 mutex_enter(&clone_tab_mutex); 853 for (minor = 1; minor < NCLONES; minor++) { 854 if (clone_tab[minor] == 0) { 855 clone_tab[minor] = 1; 856 break; 857 } 858 } 859 mutex_exit(&clone_tab_mutex); 860 if (minor == NCLONES) 861 return (EAGAIN); 862 /* 863 * soft state structure for this clone is used to maintain a list 864 * of allocated errdefs so they can be freed on close 865 */ 866 if (ddi_soft_state_zalloc(statep, minor) != DDI_SUCCESS) { 867 mutex_enter(&clone_tab_mutex); 868 clone_tab[minor] = 0; 869 mutex_exit(&clone_tab_mutex); 870 return (EAGAIN); 871 } 872 softc = ddi_get_soft_state(statep, minor); 873 softc->cnext = softc; 874 softc->cprev = softc; 875 876 *devp = makedevice(getmajor(*devp), minor); 877 return (0); 878 } 879 880 881 /* ARGSUSED */ 882 static int 883 bofi_close(dev_t dev, int flag, int otyp, cred_t *credp) 884 { 885 int minor = (int)getminor(dev); 886 struct bofi_errent *softc; 887 struct bofi_errent *ep, *next_ep; 888 889 softc = ddi_get_soft_state(statep, minor); 890 if (softc == NULL) 891 return (ENXIO); 892 /* 893 * find list of errdefs and free them off 894 */ 895 for (ep = softc->cnext; ep != softc; ) { 896 next_ep = ep->cnext; 897 (void) bofi_errdef_free(ep); 898 ep = next_ep; 899 } 900 /* 901 * free clone tab slot 902 */ 903 mutex_enter(&clone_tab_mutex); 904 clone_tab[minor] = 0; 905 mutex_exit(&clone_tab_mutex); 906 907 ddi_soft_state_free(statep, minor); 908 return (0); 909 } 910 911 912 /* ARGSUSED */ 913 static int 914 bofi_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 915 int *rvalp) 916 { 917 struct bofi_errent *softc; 918 int minor = (int)getminor(dev); 919 struct bofi_errdef errdef; 920 struct bofi_errctl errctl; 921 struct bofi_errstate errstate; 922 void *ed_handle; 923 struct bofi_get_handles get_handles; 924 struct bofi_get_hdl_info hdl_info; 925 struct handle_info *hdlip; 926 struct handle_info *hib; 927 928 char *buffer; 929 char *bufptr; 930 char *endbuf; 931 int req_count, count, err; 932 char *namep; 933 struct bofi_shadow *hp; 934 int retval; 935 struct bofi_shadow *hhashp; 936 int i; 937 938 switch (cmd) { 939 case BOFI_ADD_DEF: 940 /* 941 * add a new error definition 942 */ 943 #ifdef _MULTI_DATAMODEL 944 switch (ddi_model_convert_from(mode & FMODELS)) { 945 case DDI_MODEL_ILP32: 946 { 947 /* 948 * For use when a 32 bit app makes a call into a 949 * 64 bit ioctl 950 */ 951 struct bofi_errdef32 errdef_32; 952 953 if (ddi_copyin((void *)arg, &errdef_32, 954 sizeof (struct bofi_errdef32), mode)) { 955 return (EFAULT); 956 } 957 errdef.namesize = errdef_32.namesize; 958 (void) strncpy(errdef.name, errdef_32.name, NAMESIZE); 959 errdef.instance = errdef_32.instance; 960 errdef.rnumber = errdef_32.rnumber; 961 errdef.offset = errdef_32.offset; 962 errdef.len = errdef_32.len; 963 errdef.access_type = errdef_32.access_type; 964 errdef.access_count = errdef_32.access_count; 965 errdef.fail_count = errdef_32.fail_count; 966 errdef.acc_chk = errdef_32.acc_chk; 967 errdef.optype = errdef_32.optype; 968 errdef.operand = errdef_32.operand; 969 errdef.log.logsize = errdef_32.log.logsize; 970 errdef.log.entries = errdef_32.log.entries; 971 errdef.log.flags = errdef_32.log.flags; 972 errdef.log.wrapcnt = errdef_32.log.wrapcnt; 973 errdef.log.start_time = errdef_32.log.start_time; 974 errdef.log.stop_time = errdef_32.log.stop_time; 975 errdef.log.logbase = 976 (caddr_t)(uintptr_t)errdef_32.log.logbase; 977 errdef.errdef_handle = errdef_32.errdef_handle; 978 break; 979 } 980 case DDI_MODEL_NONE: 981 if (ddi_copyin((void *)arg, &errdef, 982 sizeof (struct bofi_errdef), mode)) 983 return (EFAULT); 984 break; 985 } 986 #else /* ! _MULTI_DATAMODEL */ 987 if (ddi_copyin((void *)arg, &errdef, 988 sizeof (struct bofi_errdef), mode) != 0) 989 return (EFAULT); 990 #endif /* _MULTI_DATAMODEL */ 991 /* 992 * do some validation 993 */ 994 if (errdef.fail_count == 0) 995 errdef.optype = 0; 996 if (errdef.optype != 0) { 997 if (errdef.access_type & BOFI_INTR && 998 errdef.optype != BOFI_DELAY_INTR && 999 errdef.optype != BOFI_LOSE_INTR && 1000 errdef.optype != BOFI_EXTRA_INTR) 1001 return (EINVAL); 1002 if ((errdef.access_type & (BOFI_DMA_RW|BOFI_PIO_R)) && 1003 errdef.optype == BOFI_NO_TRANSFER) 1004 return (EINVAL); 1005 if ((errdef.access_type & (BOFI_PIO_RW)) && 1006 errdef.optype != BOFI_EQUAL && 1007 errdef.optype != BOFI_OR && 1008 errdef.optype != BOFI_XOR && 1009 errdef.optype != BOFI_AND && 1010 errdef.optype != BOFI_NO_TRANSFER) 1011 return (EINVAL); 1012 } 1013 /* 1014 * find softstate for this clone, so we can tag 1015 * new errdef on to it 1016 */ 1017 softc = ddi_get_soft_state(statep, minor); 1018 if (softc == NULL) 1019 return (ENXIO); 1020 /* 1021 * read in name 1022 */ 1023 if (errdef.namesize > NAMESIZE) 1024 return (EINVAL); 1025 namep = kmem_zalloc(errdef.namesize+1, KM_SLEEP); 1026 (void) strncpy(namep, errdef.name, errdef.namesize); 1027 1028 if (bofi_errdef_alloc(&errdef, namep, softc) != DDI_SUCCESS) { 1029 (void) bofi_errdef_free((struct bofi_errent *) 1030 (uintptr_t)errdef.errdef_handle); 1031 kmem_free(namep, errdef.namesize+1); 1032 return (EINVAL); 1033 } 1034 /* 1035 * copy out errdef again, including filled in errdef_handle 1036 */ 1037 #ifdef _MULTI_DATAMODEL 1038 switch (ddi_model_convert_from(mode & FMODELS)) { 1039 case DDI_MODEL_ILP32: 1040 { 1041 /* 1042 * For use when a 32 bit app makes a call into a 1043 * 64 bit ioctl 1044 */ 1045 struct bofi_errdef32 errdef_32; 1046 1047 errdef_32.namesize = errdef.namesize; 1048 (void) strncpy(errdef_32.name, errdef.name, NAMESIZE); 1049 errdef_32.instance = errdef.instance; 1050 errdef_32.rnumber = errdef.rnumber; 1051 errdef_32.offset = errdef.offset; 1052 errdef_32.len = errdef.len; 1053 errdef_32.access_type = errdef.access_type; 1054 errdef_32.access_count = errdef.access_count; 1055 errdef_32.fail_count = errdef.fail_count; 1056 errdef_32.acc_chk = errdef.acc_chk; 1057 errdef_32.optype = errdef.optype; 1058 errdef_32.operand = errdef.operand; 1059 errdef_32.log.logsize = errdef.log.logsize; 1060 errdef_32.log.entries = errdef.log.entries; 1061 errdef_32.log.flags = errdef.log.flags; 1062 errdef_32.log.wrapcnt = errdef.log.wrapcnt; 1063 errdef_32.log.start_time = errdef.log.start_time; 1064 errdef_32.log.stop_time = errdef.log.stop_time; 1065 errdef_32.log.logbase = 1066 (caddr32_t)(uintptr_t)errdef.log.logbase; 1067 errdef_32.errdef_handle = errdef.errdef_handle; 1068 if (ddi_copyout(&errdef_32, (void *)arg, 1069 sizeof (struct bofi_errdef32), mode) != 0) { 1070 (void) bofi_errdef_free((struct bofi_errent *) 1071 errdef.errdef_handle); 1072 kmem_free(namep, errdef.namesize+1); 1073 return (EFAULT); 1074 } 1075 break; 1076 } 1077 case DDI_MODEL_NONE: 1078 if (ddi_copyout(&errdef, (void *)arg, 1079 sizeof (struct bofi_errdef), mode) != 0) { 1080 (void) bofi_errdef_free((struct bofi_errent *) 1081 errdef.errdef_handle); 1082 kmem_free(namep, errdef.namesize+1); 1083 return (EFAULT); 1084 } 1085 break; 1086 } 1087 #else /* ! _MULTI_DATAMODEL */ 1088 if (ddi_copyout(&errdef, (void *)arg, 1089 sizeof (struct bofi_errdef), mode) != 0) { 1090 (void) bofi_errdef_free((struct bofi_errent *) 1091 (uintptr_t)errdef.errdef_handle); 1092 kmem_free(namep, errdef.namesize+1); 1093 return (EFAULT); 1094 } 1095 #endif /* _MULTI_DATAMODEL */ 1096 return (0); 1097 case BOFI_DEL_DEF: 1098 /* 1099 * delete existing errdef 1100 */ 1101 if (ddi_copyin((void *)arg, &ed_handle, 1102 sizeof (void *), mode) != 0) 1103 return (EFAULT); 1104 return (bofi_errdef_free((struct bofi_errent *)ed_handle)); 1105 case BOFI_START: 1106 /* 1107 * start all errdefs corresponding to 1108 * this name and instance 1109 */ 1110 if (ddi_copyin((void *)arg, &errctl, 1111 sizeof (struct bofi_errctl), mode) != 0) 1112 return (EFAULT); 1113 /* 1114 * copy in name 1115 */ 1116 if (errctl.namesize > NAMESIZE) 1117 return (EINVAL); 1118 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP); 1119 (void) strncpy(namep, errctl.name, errctl.namesize); 1120 bofi_start(&errctl, namep); 1121 kmem_free(namep, errctl.namesize+1); 1122 return (0); 1123 case BOFI_STOP: 1124 /* 1125 * stop all errdefs corresponding to 1126 * this name and instance 1127 */ 1128 if (ddi_copyin((void *)arg, &errctl, 1129 sizeof (struct bofi_errctl), mode) != 0) 1130 return (EFAULT); 1131 /* 1132 * copy in name 1133 */ 1134 if (errctl.namesize > NAMESIZE) 1135 return (EINVAL); 1136 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP); 1137 (void) strncpy(namep, errctl.name, errctl.namesize); 1138 bofi_stop(&errctl, namep); 1139 kmem_free(namep, errctl.namesize+1); 1140 return (0); 1141 case BOFI_BROADCAST: 1142 /* 1143 * wakeup all errdefs corresponding to 1144 * this name and instance 1145 */ 1146 if (ddi_copyin((void *)arg, &errctl, 1147 sizeof (struct bofi_errctl), mode) != 0) 1148 return (EFAULT); 1149 /* 1150 * copy in name 1151 */ 1152 if (errctl.namesize > NAMESIZE) 1153 return (EINVAL); 1154 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP); 1155 (void) strncpy(namep, errctl.name, errctl.namesize); 1156 bofi_broadcast(&errctl, namep); 1157 kmem_free(namep, errctl.namesize+1); 1158 return (0); 1159 case BOFI_CLEAR_ACC_CHK: 1160 /* 1161 * clear "acc_chk" for all errdefs corresponding to 1162 * this name and instance 1163 */ 1164 if (ddi_copyin((void *)arg, &errctl, 1165 sizeof (struct bofi_errctl), mode) != 0) 1166 return (EFAULT); 1167 /* 1168 * copy in name 1169 */ 1170 if (errctl.namesize > NAMESIZE) 1171 return (EINVAL); 1172 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP); 1173 (void) strncpy(namep, errctl.name, errctl.namesize); 1174 bofi_clear_acc_chk(&errctl, namep); 1175 kmem_free(namep, errctl.namesize+1); 1176 return (0); 1177 case BOFI_CLEAR_ERRORS: 1178 /* 1179 * set "fail_count" to 0 for all errdefs corresponding to 1180 * this name and instance whose "access_count" 1181 * has expired. 1182 */ 1183 if (ddi_copyin((void *)arg, &errctl, 1184 sizeof (struct bofi_errctl), mode) != 0) 1185 return (EFAULT); 1186 /* 1187 * copy in name 1188 */ 1189 if (errctl.namesize > NAMESIZE) 1190 return (EINVAL); 1191 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP); 1192 (void) strncpy(namep, errctl.name, errctl.namesize); 1193 bofi_clear_errors(&errctl, namep); 1194 kmem_free(namep, errctl.namesize+1); 1195 return (0); 1196 case BOFI_CLEAR_ERRDEFS: 1197 /* 1198 * set "access_count" and "fail_count" to 0 for all errdefs 1199 * corresponding to this name and instance 1200 */ 1201 if (ddi_copyin((void *)arg, &errctl, 1202 sizeof (struct bofi_errctl), mode) != 0) 1203 return (EFAULT); 1204 /* 1205 * copy in name 1206 */ 1207 if (errctl.namesize > NAMESIZE) 1208 return (EINVAL); 1209 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP); 1210 (void) strncpy(namep, errctl.name, errctl.namesize); 1211 bofi_clear_errdefs(&errctl, namep); 1212 kmem_free(namep, errctl.namesize+1); 1213 return (0); 1214 case BOFI_CHK_STATE: 1215 { 1216 struct acc_log_elem *klg; 1217 size_t uls; 1218 /* 1219 * get state for this errdef - read in dummy errstate 1220 * with just the errdef_handle filled in 1221 */ 1222 #ifdef _MULTI_DATAMODEL 1223 switch (ddi_model_convert_from(mode & FMODELS)) { 1224 case DDI_MODEL_ILP32: 1225 { 1226 /* 1227 * For use when a 32 bit app makes a call into a 1228 * 64 bit ioctl 1229 */ 1230 struct bofi_errstate32 errstate_32; 1231 1232 if (ddi_copyin((void *)arg, &errstate_32, 1233 sizeof (struct bofi_errstate32), mode) != 0) { 1234 return (EFAULT); 1235 } 1236 errstate.fail_time = errstate_32.fail_time; 1237 errstate.msg_time = errstate_32.msg_time; 1238 errstate.access_count = errstate_32.access_count; 1239 errstate.fail_count = errstate_32.fail_count; 1240 errstate.acc_chk = errstate_32.acc_chk; 1241 errstate.errmsg_count = errstate_32.errmsg_count; 1242 (void) strncpy(errstate.buffer, errstate_32.buffer, 1243 ERRMSGSIZE); 1244 errstate.severity = errstate_32.severity; 1245 errstate.log.logsize = errstate_32.log.logsize; 1246 errstate.log.entries = errstate_32.log.entries; 1247 errstate.log.flags = errstate_32.log.flags; 1248 errstate.log.wrapcnt = errstate_32.log.wrapcnt; 1249 errstate.log.start_time = errstate_32.log.start_time; 1250 errstate.log.stop_time = errstate_32.log.stop_time; 1251 errstate.log.logbase = 1252 (caddr_t)(uintptr_t)errstate_32.log.logbase; 1253 errstate.errdef_handle = errstate_32.errdef_handle; 1254 break; 1255 } 1256 case DDI_MODEL_NONE: 1257 if (ddi_copyin((void *)arg, &errstate, 1258 sizeof (struct bofi_errstate), mode) != 0) 1259 return (EFAULT); 1260 break; 1261 } 1262 #else /* ! _MULTI_DATAMODEL */ 1263 if (ddi_copyin((void *)arg, &errstate, 1264 sizeof (struct bofi_errstate), mode) != 0) 1265 return (EFAULT); 1266 #endif /* _MULTI_DATAMODEL */ 1267 if ((retval = bofi_errdef_check(&errstate, &klg)) == EINVAL) 1268 return (EINVAL); 1269 /* 1270 * copy out real errstate structure 1271 */ 1272 uls = errstate.log.logsize; 1273 if (errstate.log.entries > uls && uls) 1274 /* insufficient user memory */ 1275 errstate.log.entries = uls; 1276 /* always pass back a time */ 1277 if (errstate.log.stop_time == 0ul) 1278 (void) drv_getparm(TIME, &(errstate.log.stop_time)); 1279 1280 #ifdef _MULTI_DATAMODEL 1281 switch (ddi_model_convert_from(mode & FMODELS)) { 1282 case DDI_MODEL_ILP32: 1283 { 1284 /* 1285 * For use when a 32 bit app makes a call into a 1286 * 64 bit ioctl 1287 */ 1288 struct bofi_errstate32 errstate_32; 1289 1290 errstate_32.fail_time = errstate.fail_time; 1291 errstate_32.msg_time = errstate.msg_time; 1292 errstate_32.access_count = errstate.access_count; 1293 errstate_32.fail_count = errstate.fail_count; 1294 errstate_32.acc_chk = errstate.acc_chk; 1295 errstate_32.errmsg_count = errstate.errmsg_count; 1296 (void) strncpy(errstate_32.buffer, errstate.buffer, 1297 ERRMSGSIZE); 1298 errstate_32.severity = errstate.severity; 1299 errstate_32.log.logsize = errstate.log.logsize; 1300 errstate_32.log.entries = errstate.log.entries; 1301 errstate_32.log.flags = errstate.log.flags; 1302 errstate_32.log.wrapcnt = errstate.log.wrapcnt; 1303 errstate_32.log.start_time = errstate.log.start_time; 1304 errstate_32.log.stop_time = errstate.log.stop_time; 1305 errstate_32.log.logbase = 1306 (caddr32_t)(uintptr_t)errstate.log.logbase; 1307 errstate_32.errdef_handle = errstate.errdef_handle; 1308 if (ddi_copyout(&errstate_32, (void *)arg, 1309 sizeof (struct bofi_errstate32), mode) != 0) 1310 return (EFAULT); 1311 break; 1312 } 1313 case DDI_MODEL_NONE: 1314 if (ddi_copyout(&errstate, (void *)arg, 1315 sizeof (struct bofi_errstate), mode) != 0) 1316 return (EFAULT); 1317 break; 1318 } 1319 #else /* ! _MULTI_DATAMODEL */ 1320 if (ddi_copyout(&errstate, (void *)arg, 1321 sizeof (struct bofi_errstate), mode) != 0) 1322 return (EFAULT); 1323 #endif /* _MULTI_DATAMODEL */ 1324 if (uls && errstate.log.entries && 1325 ddi_copyout(klg, errstate.log.logbase, 1326 errstate.log.entries * sizeof (struct acc_log_elem), 1327 mode) != 0) { 1328 return (EFAULT); 1329 } 1330 return (retval); 1331 } 1332 case BOFI_CHK_STATE_W: 1333 { 1334 struct acc_log_elem *klg; 1335 size_t uls; 1336 /* 1337 * get state for this errdef - read in dummy errstate 1338 * with just the errdef_handle filled in. Then wait for 1339 * a ddi_report_fault message to come back 1340 */ 1341 #ifdef _MULTI_DATAMODEL 1342 switch (ddi_model_convert_from(mode & FMODELS)) { 1343 case DDI_MODEL_ILP32: 1344 { 1345 /* 1346 * For use when a 32 bit app makes a call into a 1347 * 64 bit ioctl 1348 */ 1349 struct bofi_errstate32 errstate_32; 1350 1351 if (ddi_copyin((void *)arg, &errstate_32, 1352 sizeof (struct bofi_errstate32), mode) != 0) { 1353 return (EFAULT); 1354 } 1355 errstate.fail_time = errstate_32.fail_time; 1356 errstate.msg_time = errstate_32.msg_time; 1357 errstate.access_count = errstate_32.access_count; 1358 errstate.fail_count = errstate_32.fail_count; 1359 errstate.acc_chk = errstate_32.acc_chk; 1360 errstate.errmsg_count = errstate_32.errmsg_count; 1361 (void) strncpy(errstate.buffer, errstate_32.buffer, 1362 ERRMSGSIZE); 1363 errstate.severity = errstate_32.severity; 1364 errstate.log.logsize = errstate_32.log.logsize; 1365 errstate.log.entries = errstate_32.log.entries; 1366 errstate.log.flags = errstate_32.log.flags; 1367 errstate.log.wrapcnt = errstate_32.log.wrapcnt; 1368 errstate.log.start_time = errstate_32.log.start_time; 1369 errstate.log.stop_time = errstate_32.log.stop_time; 1370 errstate.log.logbase = 1371 (caddr_t)(uintptr_t)errstate_32.log.logbase; 1372 errstate.errdef_handle = errstate_32.errdef_handle; 1373 break; 1374 } 1375 case DDI_MODEL_NONE: 1376 if (ddi_copyin((void *)arg, &errstate, 1377 sizeof (struct bofi_errstate), mode) != 0) 1378 return (EFAULT); 1379 break; 1380 } 1381 #else /* ! _MULTI_DATAMODEL */ 1382 if (ddi_copyin((void *)arg, &errstate, 1383 sizeof (struct bofi_errstate), mode) != 0) 1384 return (EFAULT); 1385 #endif /* _MULTI_DATAMODEL */ 1386 if ((retval = bofi_errdef_check_w(&errstate, &klg)) == EINVAL) 1387 return (EINVAL); 1388 /* 1389 * copy out real errstate structure 1390 */ 1391 uls = errstate.log.logsize; 1392 uls = errstate.log.logsize; 1393 if (errstate.log.entries > uls && uls) 1394 /* insufficient user memory */ 1395 errstate.log.entries = uls; 1396 /* always pass back a time */ 1397 if (errstate.log.stop_time == 0ul) 1398 (void) drv_getparm(TIME, &(errstate.log.stop_time)); 1399 1400 #ifdef _MULTI_DATAMODEL 1401 switch (ddi_model_convert_from(mode & FMODELS)) { 1402 case DDI_MODEL_ILP32: 1403 { 1404 /* 1405 * For use when a 32 bit app makes a call into a 1406 * 64 bit ioctl 1407 */ 1408 struct bofi_errstate32 errstate_32; 1409 1410 errstate_32.fail_time = errstate.fail_time; 1411 errstate_32.msg_time = errstate.msg_time; 1412 errstate_32.access_count = errstate.access_count; 1413 errstate_32.fail_count = errstate.fail_count; 1414 errstate_32.acc_chk = errstate.acc_chk; 1415 errstate_32.errmsg_count = errstate.errmsg_count; 1416 (void) strncpy(errstate_32.buffer, errstate.buffer, 1417 ERRMSGSIZE); 1418 errstate_32.severity = errstate.severity; 1419 errstate_32.log.logsize = errstate.log.logsize; 1420 errstate_32.log.entries = errstate.log.entries; 1421 errstate_32.log.flags = errstate.log.flags; 1422 errstate_32.log.wrapcnt = errstate.log.wrapcnt; 1423 errstate_32.log.start_time = errstate.log.start_time; 1424 errstate_32.log.stop_time = errstate.log.stop_time; 1425 errstate_32.log.logbase = 1426 (caddr32_t)(uintptr_t)errstate.log.logbase; 1427 errstate_32.errdef_handle = errstate.errdef_handle; 1428 if (ddi_copyout(&errstate_32, (void *)arg, 1429 sizeof (struct bofi_errstate32), mode) != 0) 1430 return (EFAULT); 1431 break; 1432 } 1433 case DDI_MODEL_NONE: 1434 if (ddi_copyout(&errstate, (void *)arg, 1435 sizeof (struct bofi_errstate), mode) != 0) 1436 return (EFAULT); 1437 break; 1438 } 1439 #else /* ! _MULTI_DATAMODEL */ 1440 if (ddi_copyout(&errstate, (void *)arg, 1441 sizeof (struct bofi_errstate), mode) != 0) 1442 return (EFAULT); 1443 #endif /* _MULTI_DATAMODEL */ 1444 1445 if (uls && errstate.log.entries && 1446 ddi_copyout(klg, errstate.log.logbase, 1447 errstate.log.entries * sizeof (struct acc_log_elem), 1448 mode) != 0) { 1449 return (EFAULT); 1450 } 1451 return (retval); 1452 } 1453 case BOFI_GET_HANDLES: 1454 /* 1455 * display existing handles 1456 */ 1457 #ifdef _MULTI_DATAMODEL 1458 switch (ddi_model_convert_from(mode & FMODELS)) { 1459 case DDI_MODEL_ILP32: 1460 { 1461 /* 1462 * For use when a 32 bit app makes a call into a 1463 * 64 bit ioctl 1464 */ 1465 struct bofi_get_handles32 get_handles_32; 1466 1467 if (ddi_copyin((void *)arg, &get_handles_32, 1468 sizeof (get_handles_32), mode) != 0) { 1469 return (EFAULT); 1470 } 1471 get_handles.namesize = get_handles_32.namesize; 1472 (void) strncpy(get_handles.name, get_handles_32.name, 1473 NAMESIZE); 1474 get_handles.instance = get_handles_32.instance; 1475 get_handles.count = get_handles_32.count; 1476 get_handles.buffer = 1477 (caddr_t)(uintptr_t)get_handles_32.buffer; 1478 break; 1479 } 1480 case DDI_MODEL_NONE: 1481 if (ddi_copyin((void *)arg, &get_handles, 1482 sizeof (get_handles), mode) != 0) 1483 return (EFAULT); 1484 break; 1485 } 1486 #else /* ! _MULTI_DATAMODEL */ 1487 if (ddi_copyin((void *)arg, &get_handles, 1488 sizeof (get_handles), mode) != 0) 1489 return (EFAULT); 1490 #endif /* _MULTI_DATAMODEL */ 1491 /* 1492 * read in name 1493 */ 1494 if (get_handles.namesize > NAMESIZE) 1495 return (EINVAL); 1496 namep = kmem_zalloc(get_handles.namesize+1, KM_SLEEP); 1497 (void) strncpy(namep, get_handles.name, get_handles.namesize); 1498 req_count = get_handles.count; 1499 bufptr = buffer = kmem_zalloc(req_count, KM_SLEEP); 1500 endbuf = bufptr + req_count; 1501 /* 1502 * display existing handles 1503 */ 1504 mutex_enter(&bofi_low_mutex); 1505 mutex_enter(&bofi_mutex); 1506 for (i = 0; i < HDL_HASH_TBL_SIZE; i++) { 1507 hhashp = &hhash_table[i]; 1508 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) { 1509 if (!driver_under_test(hp->dip)) 1510 continue; 1511 if (ddi_name_to_major(ddi_get_name(hp->dip)) != 1512 ddi_name_to_major(namep)) 1513 continue; 1514 if (hp->instance != get_handles.instance) 1515 continue; 1516 /* 1517 * print information per handle - note that 1518 * DMA* means an unbound DMA handle 1519 */ 1520 (void) snprintf(bufptr, (size_t)(endbuf-bufptr), 1521 " %s %d %s ", hp->name, hp->instance, 1522 (hp->type == BOFI_INT_HDL) ? "INTR" : 1523 (hp->type == BOFI_ACC_HDL) ? "PIO" : 1524 (hp->type == BOFI_DMA_HDL) ? "DMA" : 1525 (hp->hparrayp != NULL) ? "DVMA" : "DMA*"); 1526 bufptr += strlen(bufptr); 1527 if (hp->type == BOFI_ACC_HDL) { 1528 if (hp->len == INT_MAX - hp->offset) 1529 (void) snprintf(bufptr, 1530 (size_t)(endbuf-bufptr), 1531 "reg set %d off 0x%llx\n", 1532 hp->rnumber, hp->offset); 1533 else 1534 (void) snprintf(bufptr, 1535 (size_t)(endbuf-bufptr), 1536 "reg set %d off 0x%llx" 1537 " len 0x%llx\n", 1538 hp->rnumber, hp->offset, 1539 hp->len); 1540 } else if (hp->type == BOFI_DMA_HDL) 1541 (void) snprintf(bufptr, 1542 (size_t)(endbuf-bufptr), 1543 "handle no %d len 0x%llx" 1544 " addr 0x%p\n", hp->rnumber, 1545 hp->len, (void *)hp->addr); 1546 else if (hp->type == BOFI_NULL && 1547 hp->hparrayp == NULL) 1548 (void) snprintf(bufptr, 1549 (size_t)(endbuf-bufptr), 1550 "handle no %d\n", hp->rnumber); 1551 else 1552 (void) snprintf(bufptr, 1553 (size_t)(endbuf-bufptr), "\n"); 1554 bufptr += strlen(bufptr); 1555 } 1556 } 1557 mutex_exit(&bofi_mutex); 1558 mutex_exit(&bofi_low_mutex); 1559 err = ddi_copyout(buffer, get_handles.buffer, req_count, mode); 1560 kmem_free(namep, get_handles.namesize+1); 1561 kmem_free(buffer, req_count); 1562 if (err != 0) 1563 return (EFAULT); 1564 else 1565 return (0); 1566 case BOFI_GET_HANDLE_INFO: 1567 /* 1568 * display existing handles 1569 */ 1570 #ifdef _MULTI_DATAMODEL 1571 switch (ddi_model_convert_from(mode & FMODELS)) { 1572 case DDI_MODEL_ILP32: 1573 { 1574 /* 1575 * For use when a 32 bit app makes a call into a 1576 * 64 bit ioctl 1577 */ 1578 struct bofi_get_hdl_info32 hdl_info_32; 1579 1580 if (ddi_copyin((void *)arg, &hdl_info_32, 1581 sizeof (hdl_info_32), mode)) { 1582 return (EFAULT); 1583 } 1584 hdl_info.namesize = hdl_info_32.namesize; 1585 (void) strncpy(hdl_info.name, hdl_info_32.name, 1586 NAMESIZE); 1587 hdl_info.count = hdl_info_32.count; 1588 hdl_info.hdli = (caddr_t)(uintptr_t)hdl_info_32.hdli; 1589 break; 1590 } 1591 case DDI_MODEL_NONE: 1592 if (ddi_copyin((void *)arg, &hdl_info, 1593 sizeof (hdl_info), mode)) 1594 return (EFAULT); 1595 break; 1596 } 1597 #else /* ! _MULTI_DATAMODEL */ 1598 if (ddi_copyin((void *)arg, &hdl_info, 1599 sizeof (hdl_info), mode)) 1600 return (EFAULT); 1601 #endif /* _MULTI_DATAMODEL */ 1602 if (hdl_info.namesize > NAMESIZE) 1603 return (EINVAL); 1604 namep = kmem_zalloc(hdl_info.namesize + 1, KM_SLEEP); 1605 (void) strncpy(namep, hdl_info.name, hdl_info.namesize); 1606 req_count = hdl_info.count; 1607 count = hdl_info.count = 0; /* the actual no of handles */ 1608 if (req_count > 0) { 1609 hib = hdlip = 1610 kmem_zalloc(req_count * sizeof (struct handle_info), 1611 KM_SLEEP); 1612 } else { 1613 hib = hdlip = 0; 1614 req_count = hdl_info.count = 0; 1615 } 1616 1617 /* 1618 * display existing handles 1619 */ 1620 mutex_enter(&bofi_low_mutex); 1621 mutex_enter(&bofi_mutex); 1622 for (i = 0; i < HDL_HASH_TBL_SIZE; i++) { 1623 hhashp = &hhash_table[i]; 1624 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) { 1625 if (!driver_under_test(hp->dip) || 1626 ddi_name_to_major(ddi_get_name(hp->dip)) != 1627 ddi_name_to_major(namep) || 1628 ++(hdl_info.count) > req_count || 1629 count == req_count) 1630 continue; 1631 1632 hdlip->instance = hp->instance; 1633 hdlip->rnumber = hp->rnumber; 1634 switch (hp->type) { 1635 case BOFI_ACC_HDL: 1636 hdlip->access_type = BOFI_PIO_RW; 1637 hdlip->offset = hp->offset; 1638 hdlip->len = hp->len; 1639 break; 1640 case BOFI_DMA_HDL: 1641 hdlip->access_type = 0; 1642 if (hp->flags & DDI_DMA_WRITE) 1643 hdlip->access_type |= 1644 BOFI_DMA_W; 1645 if (hp->flags & DDI_DMA_READ) 1646 hdlip->access_type |= 1647 BOFI_DMA_R; 1648 hdlip->len = hp->len; 1649 hdlip->addr_cookie = 1650 (uint64_t)(uintptr_t)hp->addr; 1651 break; 1652 case BOFI_INT_HDL: 1653 hdlip->access_type = BOFI_INTR; 1654 break; 1655 default: 1656 hdlip->access_type = 0; 1657 break; 1658 } 1659 hdlip++; 1660 count++; 1661 } 1662 } 1663 mutex_exit(&bofi_mutex); 1664 mutex_exit(&bofi_low_mutex); 1665 err = 0; 1666 #ifdef _MULTI_DATAMODEL 1667 switch (ddi_model_convert_from(mode & FMODELS)) { 1668 case DDI_MODEL_ILP32: 1669 { 1670 /* 1671 * For use when a 32 bit app makes a call into a 1672 * 64 bit ioctl 1673 */ 1674 struct bofi_get_hdl_info32 hdl_info_32; 1675 1676 hdl_info_32.namesize = hdl_info.namesize; 1677 (void) strncpy(hdl_info_32.name, hdl_info.name, 1678 NAMESIZE); 1679 hdl_info_32.count = hdl_info.count; 1680 hdl_info_32.hdli = (caddr32_t)(uintptr_t)hdl_info.hdli; 1681 if (ddi_copyout(&hdl_info_32, (void *)arg, 1682 sizeof (hdl_info_32), mode) != 0) { 1683 kmem_free(namep, hdl_info.namesize+1); 1684 if (req_count > 0) 1685 kmem_free(hib, 1686 req_count * sizeof (*hib)); 1687 return (EFAULT); 1688 } 1689 break; 1690 } 1691 case DDI_MODEL_NONE: 1692 if (ddi_copyout(&hdl_info, (void *)arg, 1693 sizeof (hdl_info), mode) != 0) { 1694 kmem_free(namep, hdl_info.namesize+1); 1695 if (req_count > 0) 1696 kmem_free(hib, 1697 req_count * sizeof (*hib)); 1698 return (EFAULT); 1699 } 1700 break; 1701 } 1702 #else /* ! _MULTI_DATAMODEL */ 1703 if (ddi_copyout(&hdl_info, (void *)arg, 1704 sizeof (hdl_info), mode) != 0) { 1705 kmem_free(namep, hdl_info.namesize+1); 1706 if (req_count > 0) 1707 kmem_free(hib, req_count * sizeof (*hib)); 1708 return (EFAULT); 1709 } 1710 #endif /* ! _MULTI_DATAMODEL */ 1711 if (count > 0) { 1712 if (ddi_copyout(hib, hdl_info.hdli, 1713 count * sizeof (*hib), mode) != 0) { 1714 kmem_free(namep, hdl_info.namesize+1); 1715 if (req_count > 0) 1716 kmem_free(hib, 1717 req_count * sizeof (*hib)); 1718 return (EFAULT); 1719 } 1720 } 1721 kmem_free(namep, hdl_info.namesize+1); 1722 if (req_count > 0) 1723 kmem_free(hib, req_count * sizeof (*hib)); 1724 return (err); 1725 default: 1726 return (ENOTTY); 1727 } 1728 } 1729 1730 1731 /* 1732 * add a new error definition 1733 */ 1734 static int 1735 bofi_errdef_alloc(struct bofi_errdef *errdefp, char *namep, 1736 struct bofi_errent *softc) 1737 { 1738 struct bofi_errent *ep; 1739 struct bofi_shadow *hp; 1740 struct bofi_link *lp; 1741 1742 /* 1743 * allocate errdef structure and put on in-use list 1744 */ 1745 ep = kmem_zalloc(sizeof (struct bofi_errent), KM_SLEEP); 1746 ep->errdef = *errdefp; 1747 ep->name = namep; 1748 ep->errdef.errdef_handle = (uint64_t)(uintptr_t)ep; 1749 ep->errstate.severity = DDI_SERVICE_RESTORED; 1750 ep->errstate.errdef_handle = (uint64_t)(uintptr_t)ep; 1751 cv_init(&ep->cv, NULL, CV_DRIVER, NULL); 1752 /* 1753 * allocate space for logging 1754 */ 1755 ep->errdef.log.entries = 0; 1756 ep->errdef.log.wrapcnt = 0; 1757 if (ep->errdef.access_type & BOFI_LOG) 1758 ep->logbase = kmem_alloc(sizeof (struct acc_log_elem) * 1759 ep->errdef.log.logsize, KM_SLEEP); 1760 else 1761 ep->logbase = NULL; 1762 /* 1763 * put on in-use list 1764 */ 1765 mutex_enter(&bofi_low_mutex); 1766 mutex_enter(&bofi_mutex); 1767 ep->next = errent_listp; 1768 errent_listp = ep; 1769 /* 1770 * and add it to the per-clone list 1771 */ 1772 ep->cnext = softc->cnext; 1773 softc->cnext->cprev = ep; 1774 ep->cprev = softc; 1775 softc->cnext = ep; 1776 1777 /* 1778 * look for corresponding shadow handle structures and if we find any 1779 * tag this errdef structure on to their link lists. 1780 */ 1781 for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) { 1782 if (ddi_name_to_major(hp->name) == ddi_name_to_major(namep) && 1783 hp->instance == errdefp->instance && 1784 (((errdefp->access_type & BOFI_DMA_RW) && 1785 (ep->errdef.rnumber == -1 || 1786 hp->rnumber == ep->errdef.rnumber) && 1787 hp->type == BOFI_DMA_HDL && 1788 (((uintptr_t)(hp->addr + ep->errdef.offset + 1789 ep->errdef.len) & ~LLSZMASK) > 1790 ((uintptr_t)((hp->addr + ep->errdef.offset) + 1791 LLSZMASK) & ~LLSZMASK))) || 1792 ((errdefp->access_type & BOFI_INTR) && 1793 hp->type == BOFI_INT_HDL) || 1794 ((errdefp->access_type & BOFI_PIO_RW) && 1795 hp->type == BOFI_ACC_HDL && 1796 (errdefp->rnumber == -1 || 1797 hp->rnumber == errdefp->rnumber) && 1798 (errdefp->len == 0 || 1799 hp->offset < errdefp->offset + errdefp->len) && 1800 hp->offset + hp->len > errdefp->offset))) { 1801 lp = bofi_link_freelist; 1802 if (lp != NULL) { 1803 bofi_link_freelist = lp->link; 1804 lp->errentp = ep; 1805 lp->link = hp->link; 1806 hp->link = lp; 1807 } 1808 } 1809 } 1810 errdefp->errdef_handle = (uint64_t)(uintptr_t)ep; 1811 mutex_exit(&bofi_mutex); 1812 mutex_exit(&bofi_low_mutex); 1813 ep->softintr_id = NULL; 1814 return (ddi_add_softintr(our_dip, DDI_SOFTINT_MED, &ep->softintr_id, 1815 NULL, NULL, bofi_signal, (caddr_t)&ep->errdef)); 1816 } 1817 1818 1819 /* 1820 * delete existing errdef 1821 */ 1822 static int 1823 bofi_errdef_free(struct bofi_errent *ep) 1824 { 1825 struct bofi_errent *hep, *prev_hep; 1826 struct bofi_link *lp, *prev_lp, *next_lp; 1827 struct bofi_shadow *hp; 1828 1829 mutex_enter(&bofi_low_mutex); 1830 mutex_enter(&bofi_mutex); 1831 /* 1832 * don't just assume its a valid ep - check that its on the 1833 * in-use list 1834 */ 1835 prev_hep = NULL; 1836 for (hep = errent_listp; hep != NULL; ) { 1837 if (hep == ep) 1838 break; 1839 prev_hep = hep; 1840 hep = hep->next; 1841 } 1842 if (hep == NULL) { 1843 mutex_exit(&bofi_mutex); 1844 mutex_exit(&bofi_low_mutex); 1845 return (EINVAL); 1846 } 1847 /* 1848 * found it - delete from in-use list 1849 */ 1850 1851 if (prev_hep) 1852 prev_hep->next = hep->next; 1853 else 1854 errent_listp = hep->next; 1855 /* 1856 * and take it off the per-clone list 1857 */ 1858 hep->cnext->cprev = hep->cprev; 1859 hep->cprev->cnext = hep->cnext; 1860 /* 1861 * see if we are on any shadow handle link lists - and if we 1862 * are then take us off 1863 */ 1864 for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) { 1865 prev_lp = NULL; 1866 for (lp = hp->link; lp != NULL; ) { 1867 if (lp->errentp == ep) { 1868 if (prev_lp) 1869 prev_lp->link = lp->link; 1870 else 1871 hp->link = lp->link; 1872 next_lp = lp->link; 1873 lp->link = bofi_link_freelist; 1874 bofi_link_freelist = lp; 1875 lp = next_lp; 1876 } else { 1877 prev_lp = lp; 1878 lp = lp->link; 1879 } 1880 } 1881 } 1882 mutex_exit(&bofi_mutex); 1883 mutex_exit(&bofi_low_mutex); 1884 1885 cv_destroy(&ep->cv); 1886 kmem_free(ep->name, ep->errdef.namesize+1); 1887 if ((ep->errdef.access_type & BOFI_LOG) && 1888 ep->errdef.log.logsize && ep->logbase) /* double check */ 1889 kmem_free(ep->logbase, 1890 sizeof (struct acc_log_elem) * ep->errdef.log.logsize); 1891 1892 if (ep->softintr_id) 1893 ddi_remove_softintr(ep->softintr_id); 1894 kmem_free(ep, sizeof (struct bofi_errent)); 1895 return (0); 1896 } 1897 1898 1899 /* 1900 * start all errdefs corresponding to this name and instance 1901 */ 1902 static void 1903 bofi_start(struct bofi_errctl *errctlp, char *namep) 1904 { 1905 struct bofi_errent *ep; 1906 1907 /* 1908 * look for any errdefs with matching name and instance 1909 */ 1910 mutex_enter(&bofi_low_mutex); 1911 for (ep = errent_listp; ep != NULL; ep = ep->next) 1912 if (strncmp(namep, ep->name, NAMESIZE) == 0 && 1913 errctlp->instance == ep->errdef.instance) { 1914 ep->state |= BOFI_DEV_ACTIVE; 1915 (void) drv_getparm(TIME, &(ep->errdef.log.start_time)); 1916 ep->errdef.log.stop_time = 0ul; 1917 } 1918 mutex_exit(&bofi_low_mutex); 1919 } 1920 1921 1922 /* 1923 * stop all errdefs corresponding to this name and instance 1924 */ 1925 static void 1926 bofi_stop(struct bofi_errctl *errctlp, char *namep) 1927 { 1928 struct bofi_errent *ep; 1929 1930 /* 1931 * look for any errdefs with matching name and instance 1932 */ 1933 mutex_enter(&bofi_low_mutex); 1934 for (ep = errent_listp; ep != NULL; ep = ep->next) 1935 if (strncmp(namep, ep->name, NAMESIZE) == 0 && 1936 errctlp->instance == ep->errdef.instance) { 1937 ep->state &= ~BOFI_DEV_ACTIVE; 1938 if (ep->errdef.log.stop_time == 0ul) 1939 (void) drv_getparm(TIME, 1940 &(ep->errdef.log.stop_time)); 1941 } 1942 mutex_exit(&bofi_low_mutex); 1943 } 1944 1945 1946 /* 1947 * wake up any thread waiting on this errdefs 1948 */ 1949 static uint_t 1950 bofi_signal(caddr_t arg) 1951 { 1952 struct bofi_errdef *edp = (struct bofi_errdef *)arg; 1953 struct bofi_errent *hep; 1954 struct bofi_errent *ep = 1955 (struct bofi_errent *)(uintptr_t)edp->errdef_handle; 1956 1957 mutex_enter(&bofi_low_mutex); 1958 for (hep = errent_listp; hep != NULL; ) { 1959 if (hep == ep) 1960 break; 1961 hep = hep->next; 1962 } 1963 if (hep == NULL) { 1964 mutex_exit(&bofi_low_mutex); 1965 return (DDI_INTR_UNCLAIMED); 1966 } 1967 if ((ep->errdef.access_type & BOFI_LOG) && 1968 (edp->log.flags & BOFI_LOG_FULL)) { 1969 edp->log.stop_time = bofi_gettime(); 1970 ep->state |= BOFI_NEW_MESSAGE; 1971 if (ep->state & BOFI_MESSAGE_WAIT) 1972 cv_broadcast(&ep->cv); 1973 ep->state &= ~BOFI_MESSAGE_WAIT; 1974 } 1975 if (ep->errstate.msg_time != 0) { 1976 ep->state |= BOFI_NEW_MESSAGE; 1977 if (ep->state & BOFI_MESSAGE_WAIT) 1978 cv_broadcast(&ep->cv); 1979 ep->state &= ~BOFI_MESSAGE_WAIT; 1980 } 1981 mutex_exit(&bofi_low_mutex); 1982 return (DDI_INTR_CLAIMED); 1983 } 1984 1985 1986 /* 1987 * wake up all errdefs corresponding to this name and instance 1988 */ 1989 static void 1990 bofi_broadcast(struct bofi_errctl *errctlp, char *namep) 1991 { 1992 struct bofi_errent *ep; 1993 1994 /* 1995 * look for any errdefs with matching name and instance 1996 */ 1997 mutex_enter(&bofi_low_mutex); 1998 for (ep = errent_listp; ep != NULL; ep = ep->next) 1999 if (strncmp(namep, ep->name, NAMESIZE) == 0 && 2000 errctlp->instance == ep->errdef.instance) { 2001 /* 2002 * wake up sleepers 2003 */ 2004 ep->state |= BOFI_NEW_MESSAGE; 2005 if (ep->state & BOFI_MESSAGE_WAIT) 2006 cv_broadcast(&ep->cv); 2007 ep->state &= ~BOFI_MESSAGE_WAIT; 2008 } 2009 mutex_exit(&bofi_low_mutex); 2010 } 2011 2012 2013 /* 2014 * clear "acc_chk" for all errdefs corresponding to this name and instance 2015 * and wake them up. 2016 */ 2017 static void 2018 bofi_clear_acc_chk(struct bofi_errctl *errctlp, char *namep) 2019 { 2020 struct bofi_errent *ep; 2021 2022 /* 2023 * look for any errdefs with matching name and instance 2024 */ 2025 mutex_enter(&bofi_low_mutex); 2026 for (ep = errent_listp; ep != NULL; ep = ep->next) 2027 if (strncmp(namep, ep->name, NAMESIZE) == 0 && 2028 errctlp->instance == ep->errdef.instance) { 2029 mutex_enter(&bofi_mutex); 2030 if (ep->errdef.access_count == 0 && 2031 ep->errdef.fail_count == 0) 2032 ep->errdef.acc_chk = 0; 2033 mutex_exit(&bofi_mutex); 2034 /* 2035 * wake up sleepers 2036 */ 2037 ep->state |= BOFI_NEW_MESSAGE; 2038 if (ep->state & BOFI_MESSAGE_WAIT) 2039 cv_broadcast(&ep->cv); 2040 ep->state &= ~BOFI_MESSAGE_WAIT; 2041 } 2042 mutex_exit(&bofi_low_mutex); 2043 } 2044 2045 2046 /* 2047 * set "fail_count" to 0 for all errdefs corresponding to this name and instance 2048 * whose "access_count" has expired, set "acc_chk" to 0 and wake them up. 2049 */ 2050 static void 2051 bofi_clear_errors(struct bofi_errctl *errctlp, char *namep) 2052 { 2053 struct bofi_errent *ep; 2054 2055 /* 2056 * look for any errdefs with matching name and instance 2057 */ 2058 mutex_enter(&bofi_low_mutex); 2059 for (ep = errent_listp; ep != NULL; ep = ep->next) 2060 if (strncmp(namep, ep->name, NAMESIZE) == 0 && 2061 errctlp->instance == ep->errdef.instance) { 2062 mutex_enter(&bofi_mutex); 2063 if (ep->errdef.access_count == 0) { 2064 ep->errdef.acc_chk = 0; 2065 ep->errdef.fail_count = 0; 2066 mutex_exit(&bofi_mutex); 2067 if (ep->errdef.log.stop_time == 0ul) 2068 (void) drv_getparm(TIME, 2069 &(ep->errdef.log.stop_time)); 2070 } else 2071 mutex_exit(&bofi_mutex); 2072 /* 2073 * wake up sleepers 2074 */ 2075 ep->state |= BOFI_NEW_MESSAGE; 2076 if (ep->state & BOFI_MESSAGE_WAIT) 2077 cv_broadcast(&ep->cv); 2078 ep->state &= ~BOFI_MESSAGE_WAIT; 2079 } 2080 mutex_exit(&bofi_low_mutex); 2081 } 2082 2083 2084 /* 2085 * set "access_count" and "fail_count" to 0 for all errdefs corresponding to 2086 * this name and instance, set "acc_chk" to 0, and wake them up. 2087 */ 2088 static void 2089 bofi_clear_errdefs(struct bofi_errctl *errctlp, char *namep) 2090 { 2091 struct bofi_errent *ep; 2092 2093 /* 2094 * look for any errdefs with matching name and instance 2095 */ 2096 mutex_enter(&bofi_low_mutex); 2097 for (ep = errent_listp; ep != NULL; ep = ep->next) 2098 if (strncmp(namep, ep->name, NAMESIZE) == 0 && 2099 errctlp->instance == ep->errdef.instance) { 2100 mutex_enter(&bofi_mutex); 2101 ep->errdef.acc_chk = 0; 2102 ep->errdef.access_count = 0; 2103 ep->errdef.fail_count = 0; 2104 mutex_exit(&bofi_mutex); 2105 if (ep->errdef.log.stop_time == 0ul) 2106 (void) drv_getparm(TIME, 2107 &(ep->errdef.log.stop_time)); 2108 /* 2109 * wake up sleepers 2110 */ 2111 ep->state |= BOFI_NEW_MESSAGE; 2112 if (ep->state & BOFI_MESSAGE_WAIT) 2113 cv_broadcast(&ep->cv); 2114 ep->state &= ~BOFI_MESSAGE_WAIT; 2115 } 2116 mutex_exit(&bofi_low_mutex); 2117 } 2118 2119 2120 /* 2121 * get state for this errdef 2122 */ 2123 static int 2124 bofi_errdef_check(struct bofi_errstate *errstatep, struct acc_log_elem **logpp) 2125 { 2126 struct bofi_errent *hep; 2127 struct bofi_errent *ep; 2128 2129 ep = (struct bofi_errent *)(uintptr_t)errstatep->errdef_handle; 2130 mutex_enter(&bofi_low_mutex); 2131 /* 2132 * don't just assume its a valid ep - check that its on the 2133 * in-use list 2134 */ 2135 for (hep = errent_listp; hep != NULL; hep = hep->next) 2136 if (hep == ep) 2137 break; 2138 if (hep == NULL) { 2139 mutex_exit(&bofi_low_mutex); 2140 return (EINVAL); 2141 } 2142 mutex_enter(&bofi_mutex); 2143 ep->errstate.access_count = ep->errdef.access_count; 2144 ep->errstate.fail_count = ep->errdef.fail_count; 2145 ep->errstate.acc_chk = ep->errdef.acc_chk; 2146 ep->errstate.log = ep->errdef.log; 2147 *logpp = ep->logbase; 2148 *errstatep = ep->errstate; 2149 mutex_exit(&bofi_mutex); 2150 mutex_exit(&bofi_low_mutex); 2151 return (0); 2152 } 2153 2154 2155 /* 2156 * Wait for a ddi_report_fault message to come back for this errdef 2157 * Then return state for this errdef. 2158 * fault report is intercepted by bofi_post_event, which triggers 2159 * bofi_signal via a softint, which will wake up this routine if 2160 * we are waiting 2161 */ 2162 static int 2163 bofi_errdef_check_w(struct bofi_errstate *errstatep, 2164 struct acc_log_elem **logpp) 2165 { 2166 struct bofi_errent *hep; 2167 struct bofi_errent *ep; 2168 int rval = 0; 2169 2170 ep = (struct bofi_errent *)(uintptr_t)errstatep->errdef_handle; 2171 mutex_enter(&bofi_low_mutex); 2172 retry: 2173 /* 2174 * don't just assume its a valid ep - check that its on the 2175 * in-use list 2176 */ 2177 for (hep = errent_listp; hep != NULL; hep = hep->next) 2178 if (hep == ep) 2179 break; 2180 if (hep == NULL) { 2181 mutex_exit(&bofi_low_mutex); 2182 return (EINVAL); 2183 } 2184 /* 2185 * wait for ddi_report_fault for the devinfo corresponding 2186 * to this errdef 2187 */ 2188 if (rval == 0 && !(ep->state & BOFI_NEW_MESSAGE)) { 2189 ep->state |= BOFI_MESSAGE_WAIT; 2190 if (cv_wait_sig(&ep->cv, &bofi_low_mutex) == 0) { 2191 if (!(ep->state & BOFI_NEW_MESSAGE)) 2192 rval = EINTR; 2193 } 2194 goto retry; 2195 } 2196 ep->state &= ~BOFI_NEW_MESSAGE; 2197 /* 2198 * we either didn't need to sleep, we've been woken up or we've been 2199 * signaled - either way return state now 2200 */ 2201 mutex_enter(&bofi_mutex); 2202 ep->errstate.access_count = ep->errdef.access_count; 2203 ep->errstate.fail_count = ep->errdef.fail_count; 2204 ep->errstate.acc_chk = ep->errdef.acc_chk; 2205 ep->errstate.log = ep->errdef.log; 2206 *logpp = ep->logbase; 2207 *errstatep = ep->errstate; 2208 mutex_exit(&bofi_mutex); 2209 mutex_exit(&bofi_low_mutex); 2210 return (rval); 2211 } 2212 2213 2214 /* 2215 * support routine - check if requested driver is defined as under test in the 2216 * conf file. 2217 */ 2218 static int 2219 driver_under_test(dev_info_t *rdip) 2220 { 2221 int i; 2222 char *rname; 2223 major_t rmaj; 2224 2225 rname = ddi_get_name(rdip); 2226 rmaj = ddi_name_to_major(rname); 2227 2228 /* 2229 * Enforce the user to specifically request the following drivers. 2230 */ 2231 for (i = 0; i < driver_list_size; i += (1 + strlen(&driver_list[i]))) { 2232 if (driver_list_neg == 0) { 2233 if (rmaj == ddi_name_to_major(&driver_list[i])) 2234 return (1); 2235 } else { 2236 if (rmaj == ddi_name_to_major(&driver_list[i+1])) 2237 return (0); 2238 } 2239 } 2240 if (driver_list_neg == 0) 2241 return (0); 2242 else 2243 return (1); 2244 2245 } 2246 2247 2248 static void 2249 log_acc_event(struct bofi_errent *ep, uint_t at, offset_t offset, off_t len, 2250 size_t repcount, uint64_t *valuep) 2251 { 2252 struct bofi_errdef *edp = &(ep->errdef); 2253 struct acc_log *log = &edp->log; 2254 2255 ASSERT(log != NULL); 2256 ASSERT(MUTEX_HELD(&bofi_mutex)); 2257 2258 if (log->flags & BOFI_LOG_REPIO) 2259 repcount = 1; 2260 else if (repcount == 0 && edp->access_count > 0 && 2261 (log->flags & BOFI_LOG_FULL) == 0) 2262 edp->access_count += 1; 2263 2264 if (repcount && log->entries < log->logsize) { 2265 struct acc_log_elem *elem = ep->logbase + log->entries; 2266 2267 if (log->flags & BOFI_LOG_TIMESTAMP) 2268 elem->access_time = bofi_gettime(); 2269 elem->access_type = at; 2270 elem->offset = offset; 2271 elem->value = valuep ? *valuep : 0ll; 2272 elem->size = len; 2273 elem->repcount = repcount; 2274 ++log->entries; 2275 if (log->entries == log->logsize) { 2276 log->flags |= BOFI_LOG_FULL; 2277 ddi_trigger_softintr(((struct bofi_errent *) 2278 (uintptr_t)edp->errdef_handle)->softintr_id); 2279 } 2280 } 2281 if ((log->flags & BOFI_LOG_WRAP) && edp->access_count <= 1) { 2282 log->wrapcnt++; 2283 edp->access_count = log->logsize; 2284 log->entries = 0; /* wrap back to the start */ 2285 } 2286 } 2287 2288 2289 /* 2290 * got a condition match on dma read/write - check counts and corrupt 2291 * data if necessary 2292 * 2293 * bofi_mutex always held when this is called. 2294 */ 2295 static void 2296 do_dma_corrupt(struct bofi_shadow *hp, struct bofi_errent *ep, 2297 uint_t synctype, off_t off, off_t length) 2298 { 2299 uint64_t operand; 2300 int i; 2301 off_t len; 2302 caddr_t logaddr; 2303 uint64_t *addr; 2304 uint64_t *endaddr; 2305 ddi_dma_impl_t *hdlp; 2306 ndi_err_t *errp; 2307 2308 ASSERT(MUTEX_HELD(&bofi_mutex)); 2309 if ((ep->errdef.access_count || 2310 ep->errdef.fail_count) && 2311 (ep->errdef.access_type & BOFI_LOG)) { 2312 uint_t atype; 2313 2314 if (synctype == DDI_DMA_SYNC_FORDEV) 2315 atype = BOFI_DMA_W; 2316 else if (synctype == DDI_DMA_SYNC_FORCPU || 2317 synctype == DDI_DMA_SYNC_FORKERNEL) 2318 atype = BOFI_DMA_R; 2319 else 2320 atype = 0; 2321 if ((off <= ep->errdef.offset && 2322 off + length > ep->errdef.offset) || 2323 (off > ep->errdef.offset && 2324 off < ep->errdef.offset + ep->errdef.len)) { 2325 logaddr = (caddr_t)((uintptr_t)(hp->addr + 2326 off + LLSZMASK) & ~LLSZMASK); 2327 2328 log_acc_event(ep, atype, logaddr - hp->addr, 2329 length, 1, 0); 2330 } 2331 } 2332 if (ep->errdef.access_count > 1) { 2333 ep->errdef.access_count--; 2334 } else if (ep->errdef.fail_count > 0) { 2335 ep->errdef.fail_count--; 2336 ep->errdef.access_count = 0; 2337 /* 2338 * OK do the corruption 2339 */ 2340 if (ep->errstate.fail_time == 0) 2341 ep->errstate.fail_time = bofi_gettime(); 2342 /* 2343 * work out how much to corrupt 2344 * 2345 * Make sure endaddr isn't greater than hp->addr + hp->len. 2346 * If endaddr becomes less than addr len becomes negative 2347 * and the following loop isn't entered. 2348 */ 2349 addr = (uint64_t *)((uintptr_t)((hp->addr + 2350 ep->errdef.offset) + LLSZMASK) & ~LLSZMASK); 2351 endaddr = (uint64_t *)((uintptr_t)(hp->addr + min(hp->len, 2352 ep->errdef.offset + ep->errdef.len)) & ~LLSZMASK); 2353 len = endaddr - addr; 2354 operand = ep->errdef.operand; 2355 hdlp = (ddi_dma_impl_t *)(hp->hdl.dma_handle); 2356 errp = &hdlp->dmai_error; 2357 if (ep->errdef.acc_chk & 2) { 2358 uint64_t ena; 2359 char buf[FM_MAX_CLASS]; 2360 2361 errp->err_status = DDI_FM_NONFATAL; 2362 (void) snprintf(buf, FM_MAX_CLASS, FM_SIMULATED_DMA); 2363 ena = fm_ena_generate(0, FM_ENA_FMT1); 2364 ddi_fm_ereport_post(hp->dip, buf, ena, 2365 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 2366 FM_EREPORT_VERS0, NULL); 2367 } 2368 switch (ep->errdef.optype) { 2369 case BOFI_EQUAL : 2370 for (i = 0; i < len; i++) 2371 *(addr + i) = operand; 2372 break; 2373 case BOFI_AND : 2374 for (i = 0; i < len; i++) 2375 *(addr + i) &= operand; 2376 break; 2377 case BOFI_OR : 2378 for (i = 0; i < len; i++) 2379 *(addr + i) |= operand; 2380 break; 2381 case BOFI_XOR : 2382 for (i = 0; i < len; i++) 2383 *(addr + i) ^= operand; 2384 break; 2385 default: 2386 /* do nothing */ 2387 break; 2388 } 2389 } 2390 } 2391 2392 2393 static uint64_t do_bofi_rd8(struct bofi_shadow *, caddr_t); 2394 static uint64_t do_bofi_rd16(struct bofi_shadow *, caddr_t); 2395 static uint64_t do_bofi_rd32(struct bofi_shadow *, caddr_t); 2396 static uint64_t do_bofi_rd64(struct bofi_shadow *, caddr_t); 2397 2398 2399 /* 2400 * check all errdefs linked to this shadow handle. If we've got a condition 2401 * match check counts and corrupt data if necessary 2402 * 2403 * bofi_mutex always held when this is called. 2404 * 2405 * because of possibility of BOFI_NO_TRANSFER, we couldn't get data 2406 * from io-space before calling this, so we pass in the func to do the 2407 * transfer as a parameter. 2408 */ 2409 static uint64_t 2410 do_pior_corrupt(struct bofi_shadow *hp, caddr_t addr, 2411 uint64_t (*func)(), size_t repcount, size_t accsize) 2412 { 2413 struct bofi_errent *ep; 2414 struct bofi_link *lp; 2415 uint64_t operand; 2416 uintptr_t minlen; 2417 intptr_t base; 2418 int done_get = 0; 2419 uint64_t get_val, gv; 2420 ddi_acc_impl_t *hdlp; 2421 ndi_err_t *errp; 2422 2423 ASSERT(MUTEX_HELD(&bofi_mutex)); 2424 /* 2425 * check through all errdefs associated with this shadow handle 2426 */ 2427 for (lp = hp->link; lp != NULL; lp = lp->link) { 2428 ep = lp->errentp; 2429 if (ep->errdef.len == 0) 2430 minlen = hp->len; 2431 else 2432 minlen = min(hp->len, ep->errdef.len); 2433 base = addr - hp->addr - ep->errdef.offset + hp->offset; 2434 if ((ep->errdef.access_type & BOFI_PIO_R) && 2435 (ep->state & BOFI_DEV_ACTIVE) && 2436 base >= 0 && base < minlen) { 2437 /* 2438 * condition match for pio read 2439 */ 2440 if (ep->errdef.access_count > 1) { 2441 ep->errdef.access_count--; 2442 if (done_get == 0) { 2443 done_get = 1; 2444 gv = get_val = func(hp, addr); 2445 } 2446 if (ep->errdef.access_type & BOFI_LOG) { 2447 log_acc_event(ep, BOFI_PIO_R, 2448 addr - hp->addr, 2449 accsize, repcount, &gv); 2450 } 2451 } else if (ep->errdef.fail_count > 0) { 2452 ep->errdef.fail_count--; 2453 ep->errdef.access_count = 0; 2454 /* 2455 * OK do corruption 2456 */ 2457 if (ep->errstate.fail_time == 0) 2458 ep->errstate.fail_time = bofi_gettime(); 2459 operand = ep->errdef.operand; 2460 if (done_get == 0) { 2461 if (ep->errdef.optype == 2462 BOFI_NO_TRANSFER) 2463 /* 2464 * no transfer - bomb out 2465 */ 2466 return (operand); 2467 done_get = 1; 2468 gv = get_val = func(hp, addr); 2469 2470 } 2471 if (ep->errdef.access_type & BOFI_LOG) { 2472 log_acc_event(ep, BOFI_PIO_R, 2473 addr - hp->addr, 2474 accsize, repcount, &gv); 2475 } 2476 hdlp = (ddi_acc_impl_t *)(hp->hdl.acc_handle); 2477 errp = hdlp->ahi_err; 2478 if (ep->errdef.acc_chk & 1) { 2479 uint64_t ena; 2480 char buf[FM_MAX_CLASS]; 2481 2482 errp->err_status = DDI_FM_NONFATAL; 2483 (void) snprintf(buf, FM_MAX_CLASS, 2484 FM_SIMULATED_PIO); 2485 ena = fm_ena_generate(0, FM_ENA_FMT1); 2486 ddi_fm_ereport_post(hp->dip, buf, ena, 2487 DDI_NOSLEEP, FM_VERSION, 2488 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 2489 NULL); 2490 } 2491 switch (ep->errdef.optype) { 2492 case BOFI_EQUAL : 2493 get_val = operand; 2494 break; 2495 case BOFI_AND : 2496 get_val &= operand; 2497 break; 2498 case BOFI_OR : 2499 get_val |= operand; 2500 break; 2501 case BOFI_XOR : 2502 get_val ^= operand; 2503 break; 2504 default: 2505 /* do nothing */ 2506 break; 2507 } 2508 } 2509 } 2510 } 2511 if (done_get == 0) 2512 return (func(hp, addr)); 2513 else 2514 return (get_val); 2515 } 2516 2517 2518 /* 2519 * check all errdefs linked to this shadow handle. If we've got a condition 2520 * match check counts and corrupt data if necessary 2521 * 2522 * bofi_mutex always held when this is called. 2523 * 2524 * because of possibility of BOFI_NO_TRANSFER, we return 0 if no data 2525 * is to be written out to io-space, 1 otherwise 2526 */ 2527 static int 2528 do_piow_corrupt(struct bofi_shadow *hp, caddr_t addr, uint64_t *valuep, 2529 size_t size, size_t repcount) 2530 { 2531 struct bofi_errent *ep; 2532 struct bofi_link *lp; 2533 uintptr_t minlen; 2534 intptr_t base; 2535 uint64_t v = *valuep; 2536 ddi_acc_impl_t *hdlp; 2537 ndi_err_t *errp; 2538 2539 ASSERT(MUTEX_HELD(&bofi_mutex)); 2540 /* 2541 * check through all errdefs associated with this shadow handle 2542 */ 2543 for (lp = hp->link; lp != NULL; lp = lp->link) { 2544 ep = lp->errentp; 2545 if (ep->errdef.len == 0) 2546 minlen = hp->len; 2547 else 2548 minlen = min(hp->len, ep->errdef.len); 2549 base = (caddr_t)addr - hp->addr - ep->errdef.offset +hp->offset; 2550 if ((ep->errdef.access_type & BOFI_PIO_W) && 2551 (ep->state & BOFI_DEV_ACTIVE) && 2552 base >= 0 && base < minlen) { 2553 /* 2554 * condition match for pio write 2555 */ 2556 2557 if (ep->errdef.access_count > 1) { 2558 ep->errdef.access_count--; 2559 if (ep->errdef.access_type & BOFI_LOG) 2560 log_acc_event(ep, BOFI_PIO_W, 2561 addr - hp->addr, size, 2562 repcount, &v); 2563 } else if (ep->errdef.fail_count > 0) { 2564 ep->errdef.fail_count--; 2565 ep->errdef.access_count = 0; 2566 if (ep->errdef.access_type & BOFI_LOG) 2567 log_acc_event(ep, BOFI_PIO_W, 2568 addr - hp->addr, size, 2569 repcount, &v); 2570 /* 2571 * OK do corruption 2572 */ 2573 if (ep->errstate.fail_time == 0) 2574 ep->errstate.fail_time = bofi_gettime(); 2575 hdlp = (ddi_acc_impl_t *)(hp->hdl.acc_handle); 2576 errp = hdlp->ahi_err; 2577 if (ep->errdef.acc_chk & 1) { 2578 uint64_t ena; 2579 char buf[FM_MAX_CLASS]; 2580 2581 errp->err_status = DDI_FM_NONFATAL; 2582 (void) snprintf(buf, FM_MAX_CLASS, 2583 FM_SIMULATED_PIO); 2584 ena = fm_ena_generate(0, FM_ENA_FMT1); 2585 ddi_fm_ereport_post(hp->dip, buf, ena, 2586 DDI_NOSLEEP, FM_VERSION, 2587 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 2588 NULL); 2589 } 2590 switch (ep->errdef.optype) { 2591 case BOFI_EQUAL : 2592 *valuep = ep->errdef.operand; 2593 break; 2594 case BOFI_AND : 2595 *valuep &= ep->errdef.operand; 2596 break; 2597 case BOFI_OR : 2598 *valuep |= ep->errdef.operand; 2599 break; 2600 case BOFI_XOR : 2601 *valuep ^= ep->errdef.operand; 2602 break; 2603 case BOFI_NO_TRANSFER : 2604 /* 2605 * no transfer - bomb out 2606 */ 2607 return (0); 2608 default: 2609 /* do nothing */ 2610 break; 2611 } 2612 } 2613 } 2614 } 2615 return (1); 2616 } 2617 2618 2619 static uint64_t 2620 do_bofi_rd8(struct bofi_shadow *hp, caddr_t addr) 2621 { 2622 return (hp->save.acc.ahi_get8(&hp->save.acc, (uint8_t *)addr)); 2623 } 2624 2625 #define BOFI_READ_CHECKS(type) \ 2626 if (bofi_ddi_check) \ 2627 addr = (type *)((uintptr_t)addr - 64 + hp->addr); \ 2628 if (bofi_range_check && ((caddr_t)addr < hp->addr || \ 2629 (caddr_t)addr - hp->addr >= hp->len)) { \ 2630 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \ 2631 "ddi_get() out of range addr %p not in %p/%llx", \ 2632 (void *)addr, (void *)hp->addr, hp->len); \ 2633 return (0); \ 2634 } 2635 2636 /* 2637 * our getb() routine - use tryenter 2638 */ 2639 static uint8_t 2640 bofi_rd8(ddi_acc_impl_t *handle, uint8_t *addr) 2641 { 2642 struct bofi_shadow *hp; 2643 uint8_t retval; 2644 2645 hp = handle->ahi_common.ah_bus_private; 2646 BOFI_READ_CHECKS(uint8_t) 2647 if (!hp->link || !mutex_tryenter(&bofi_mutex)) 2648 return (hp->save.acc.ahi_get8(&hp->save.acc, addr)); 2649 retval = (uint8_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd8, 1, 2650 1); 2651 mutex_exit(&bofi_mutex); 2652 return (retval); 2653 } 2654 2655 2656 static uint64_t 2657 do_bofi_rd16(struct bofi_shadow *hp, caddr_t addr) 2658 { 2659 return (hp->save.acc.ahi_get16(&hp->save.acc, (uint16_t *)addr)); 2660 } 2661 2662 2663 /* 2664 * our getw() routine - use tryenter 2665 */ 2666 static uint16_t 2667 bofi_rd16(ddi_acc_impl_t *handle, uint16_t *addr) 2668 { 2669 struct bofi_shadow *hp; 2670 uint16_t retval; 2671 2672 hp = handle->ahi_common.ah_bus_private; 2673 BOFI_READ_CHECKS(uint16_t) 2674 if (!hp->link || !mutex_tryenter(&bofi_mutex)) 2675 return (hp->save.acc.ahi_get16(&hp->save.acc, addr)); 2676 retval = (uint16_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd16, 1, 2677 2); 2678 mutex_exit(&bofi_mutex); 2679 return (retval); 2680 } 2681 2682 2683 static uint64_t 2684 do_bofi_rd32(struct bofi_shadow *hp, caddr_t addr) 2685 { 2686 return (hp->save.acc.ahi_get32(&hp->save.acc, (uint32_t *)addr)); 2687 } 2688 2689 2690 /* 2691 * our getl() routine - use tryenter 2692 */ 2693 static uint32_t 2694 bofi_rd32(ddi_acc_impl_t *handle, uint32_t *addr) 2695 { 2696 struct bofi_shadow *hp; 2697 uint32_t retval; 2698 2699 hp = handle->ahi_common.ah_bus_private; 2700 BOFI_READ_CHECKS(uint32_t) 2701 if (!hp->link || !mutex_tryenter(&bofi_mutex)) 2702 return (hp->save.acc.ahi_get32(&hp->save.acc, addr)); 2703 retval = (uint32_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd32, 1, 2704 4); 2705 mutex_exit(&bofi_mutex); 2706 return (retval); 2707 } 2708 2709 2710 static uint64_t 2711 do_bofi_rd64(struct bofi_shadow *hp, caddr_t addr) 2712 { 2713 return (hp->save.acc.ahi_get64(&hp->save.acc, (uint64_t *)addr)); 2714 } 2715 2716 2717 /* 2718 * our getll() routine - use tryenter 2719 */ 2720 static uint64_t 2721 bofi_rd64(ddi_acc_impl_t *handle, uint64_t *addr) 2722 { 2723 struct bofi_shadow *hp; 2724 uint64_t retval; 2725 2726 hp = handle->ahi_common.ah_bus_private; 2727 BOFI_READ_CHECKS(uint64_t) 2728 if (!hp->link || !mutex_tryenter(&bofi_mutex)) 2729 return (hp->save.acc.ahi_get64(&hp->save.acc, addr)); 2730 retval = (uint64_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd64, 1, 2731 8); 2732 mutex_exit(&bofi_mutex); 2733 return (retval); 2734 } 2735 2736 #define BOFI_WRITE_TESTS(type) \ 2737 if (bofi_ddi_check) \ 2738 addr = (type *)((uintptr_t)addr - 64 + hp->addr); \ 2739 if (bofi_range_check && ((caddr_t)addr < hp->addr || \ 2740 (caddr_t)addr - hp->addr >= hp->len)) { \ 2741 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \ 2742 "ddi_put() out of range addr %p not in %p/%llx\n", \ 2743 (void *)addr, (void *)hp->addr, hp->len); \ 2744 return; \ 2745 } 2746 2747 /* 2748 * our putb() routine - use tryenter 2749 */ 2750 static void 2751 bofi_wr8(ddi_acc_impl_t *handle, uint8_t *addr, uint8_t value) 2752 { 2753 struct bofi_shadow *hp; 2754 uint64_t llvalue = value; 2755 2756 hp = handle->ahi_common.ah_bus_private; 2757 BOFI_WRITE_TESTS(uint8_t) 2758 if (!hp->link || !mutex_tryenter(&bofi_mutex)) { 2759 hp->save.acc.ahi_put8(&hp->save.acc, addr, (uint8_t)llvalue); 2760 return; 2761 } 2762 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 1, 1)) 2763 hp->save.acc.ahi_put8(&hp->save.acc, addr, (uint8_t)llvalue); 2764 mutex_exit(&bofi_mutex); 2765 } 2766 2767 2768 /* 2769 * our putw() routine - use tryenter 2770 */ 2771 static void 2772 bofi_wr16(ddi_acc_impl_t *handle, uint16_t *addr, uint16_t value) 2773 { 2774 struct bofi_shadow *hp; 2775 uint64_t llvalue = value; 2776 2777 hp = handle->ahi_common.ah_bus_private; 2778 BOFI_WRITE_TESTS(uint16_t) 2779 if (!hp->link || !mutex_tryenter(&bofi_mutex)) { 2780 hp->save.acc.ahi_put16(&hp->save.acc, addr, (uint16_t)llvalue); 2781 return; 2782 } 2783 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 2, 1)) 2784 hp->save.acc.ahi_put16(&hp->save.acc, addr, (uint16_t)llvalue); 2785 mutex_exit(&bofi_mutex); 2786 } 2787 2788 2789 /* 2790 * our putl() routine - use tryenter 2791 */ 2792 static void 2793 bofi_wr32(ddi_acc_impl_t *handle, uint32_t *addr, uint32_t value) 2794 { 2795 struct bofi_shadow *hp; 2796 uint64_t llvalue = value; 2797 2798 hp = handle->ahi_common.ah_bus_private; 2799 BOFI_WRITE_TESTS(uint32_t) 2800 if (!hp->link || !mutex_tryenter(&bofi_mutex)) { 2801 hp->save.acc.ahi_put32(&hp->save.acc, addr, (uint32_t)llvalue); 2802 return; 2803 } 2804 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 4, 1)) 2805 hp->save.acc.ahi_put32(&hp->save.acc, addr, (uint32_t)llvalue); 2806 mutex_exit(&bofi_mutex); 2807 } 2808 2809 2810 /* 2811 * our putll() routine - use tryenter 2812 */ 2813 static void 2814 bofi_wr64(ddi_acc_impl_t *handle, uint64_t *addr, uint64_t value) 2815 { 2816 struct bofi_shadow *hp; 2817 uint64_t llvalue = value; 2818 2819 hp = handle->ahi_common.ah_bus_private; 2820 BOFI_WRITE_TESTS(uint64_t) 2821 if (!hp->link || !mutex_tryenter(&bofi_mutex)) { 2822 hp->save.acc.ahi_put64(&hp->save.acc, addr, (uint64_t)llvalue); 2823 return; 2824 } 2825 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 8, 1)) 2826 hp->save.acc.ahi_put64(&hp->save.acc, addr, (uint64_t)llvalue); 2827 mutex_exit(&bofi_mutex); 2828 } 2829 2830 #define BOFI_REP_READ_TESTS(type) \ 2831 if (bofi_ddi_check) \ 2832 dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \ 2833 if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \ 2834 (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \ 2835 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \ 2836 "ddi_rep_get() out of range addr %p not in %p/%llx\n", \ 2837 (void *)dev_addr, (void *)hp->addr, hp->len); \ 2838 if ((caddr_t)dev_addr < hp->addr || \ 2839 (caddr_t)dev_addr - hp->addr >= hp->len) \ 2840 return; \ 2841 repcount = (type *)(hp->addr + hp->len) - dev_addr; \ 2842 } 2843 2844 /* 2845 * our rep_getb() routine - use tryenter 2846 */ 2847 static void 2848 bofi_rep_rd8(ddi_acc_impl_t *handle, uint8_t *host_addr, uint8_t *dev_addr, 2849 size_t repcount, uint_t flags) 2850 { 2851 struct bofi_shadow *hp; 2852 int i; 2853 uint8_t *addr; 2854 2855 hp = handle->ahi_common.ah_bus_private; 2856 BOFI_REP_READ_TESTS(uint8_t) 2857 if (!hp->link || !mutex_tryenter(&bofi_mutex)) { 2858 hp->save.acc.ahi_rep_get8(&hp->save.acc, host_addr, dev_addr, 2859 repcount, flags); 2860 return; 2861 } 2862 for (i = 0; i < repcount; i++) { 2863 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0); 2864 *(host_addr + i) = (uint8_t)do_pior_corrupt(hp, (caddr_t)addr, 2865 do_bofi_rd8, i ? 0 : repcount, 1); 2866 } 2867 mutex_exit(&bofi_mutex); 2868 } 2869 2870 2871 /* 2872 * our rep_getw() routine - use tryenter 2873 */ 2874 static void 2875 bofi_rep_rd16(ddi_acc_impl_t *handle, uint16_t *host_addr, 2876 uint16_t *dev_addr, size_t repcount, uint_t flags) 2877 { 2878 struct bofi_shadow *hp; 2879 int i; 2880 uint16_t *addr; 2881 2882 hp = handle->ahi_common.ah_bus_private; 2883 BOFI_REP_READ_TESTS(uint16_t) 2884 if (!hp->link || !mutex_tryenter(&bofi_mutex)) { 2885 hp->save.acc.ahi_rep_get16(&hp->save.acc, host_addr, dev_addr, 2886 repcount, flags); 2887 return; 2888 } 2889 for (i = 0; i < repcount; i++) { 2890 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0); 2891 *(host_addr + i) = (uint16_t)do_pior_corrupt(hp, (caddr_t)addr, 2892 do_bofi_rd16, i ? 0 : repcount, 2); 2893 } 2894 mutex_exit(&bofi_mutex); 2895 } 2896 2897 2898 /* 2899 * our rep_getl() routine - use tryenter 2900 */ 2901 static void 2902 bofi_rep_rd32(ddi_acc_impl_t *handle, uint32_t *host_addr, 2903 uint32_t *dev_addr, size_t repcount, uint_t flags) 2904 { 2905 struct bofi_shadow *hp; 2906 int i; 2907 uint32_t *addr; 2908 2909 hp = handle->ahi_common.ah_bus_private; 2910 BOFI_REP_READ_TESTS(uint32_t) 2911 if (!hp->link || !mutex_tryenter(&bofi_mutex)) { 2912 hp->save.acc.ahi_rep_get32(&hp->save.acc, host_addr, dev_addr, 2913 repcount, flags); 2914 return; 2915 } 2916 for (i = 0; i < repcount; i++) { 2917 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0); 2918 *(host_addr + i) = (uint32_t)do_pior_corrupt(hp, (caddr_t)addr, 2919 do_bofi_rd32, i ? 0 : repcount, 4); 2920 } 2921 mutex_exit(&bofi_mutex); 2922 } 2923 2924 2925 /* 2926 * our rep_getll() routine - use tryenter 2927 */ 2928 static void 2929 bofi_rep_rd64(ddi_acc_impl_t *handle, uint64_t *host_addr, 2930 uint64_t *dev_addr, size_t repcount, uint_t flags) 2931 { 2932 struct bofi_shadow *hp; 2933 int i; 2934 uint64_t *addr; 2935 2936 hp = handle->ahi_common.ah_bus_private; 2937 BOFI_REP_READ_TESTS(uint64_t) 2938 if (!hp->link || !mutex_tryenter(&bofi_mutex)) { 2939 hp->save.acc.ahi_rep_get64(&hp->save.acc, host_addr, dev_addr, 2940 repcount, flags); 2941 return; 2942 } 2943 for (i = 0; i < repcount; i++) { 2944 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0); 2945 *(host_addr + i) = (uint64_t)do_pior_corrupt(hp, (caddr_t)addr, 2946 do_bofi_rd64, i ? 0 : repcount, 8); 2947 } 2948 mutex_exit(&bofi_mutex); 2949 } 2950 2951 #define BOFI_REP_WRITE_TESTS(type) \ 2952 if (bofi_ddi_check) \ 2953 dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \ 2954 if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \ 2955 (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \ 2956 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \ 2957 "ddi_rep_put() out of range addr %p not in %p/%llx\n", \ 2958 (void *)dev_addr, (void *)hp->addr, hp->len); \ 2959 if ((caddr_t)dev_addr < hp->addr || \ 2960 (caddr_t)dev_addr - hp->addr >= hp->len) \ 2961 return; \ 2962 repcount = (type *)(hp->addr + hp->len) - dev_addr; \ 2963 } 2964 2965 /* 2966 * our rep_putb() routine - use tryenter 2967 */ 2968 static void 2969 bofi_rep_wr8(ddi_acc_impl_t *handle, uint8_t *host_addr, uint8_t *dev_addr, 2970 size_t repcount, uint_t flags) 2971 { 2972 struct bofi_shadow *hp; 2973 int i; 2974 uint64_t llvalue; 2975 uint8_t *addr; 2976 2977 hp = handle->ahi_common.ah_bus_private; 2978 BOFI_REP_WRITE_TESTS(uint8_t) 2979 if (!hp->link || !mutex_tryenter(&bofi_mutex)) { 2980 hp->save.acc.ahi_rep_put8(&hp->save.acc, host_addr, dev_addr, 2981 repcount, flags); 2982 return; 2983 } 2984 for (i = 0; i < repcount; i++) { 2985 llvalue = *(host_addr + i); 2986 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0); 2987 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 1, i ? 0 : 2988 repcount)) 2989 hp->save.acc.ahi_put8(&hp->save.acc, addr, 2990 (uint8_t)llvalue); 2991 } 2992 mutex_exit(&bofi_mutex); 2993 } 2994 2995 2996 /* 2997 * our rep_putw() routine - use tryenter 2998 */ 2999 static void 3000 bofi_rep_wr16(ddi_acc_impl_t *handle, uint16_t *host_addr, 3001 uint16_t *dev_addr, size_t repcount, uint_t flags) 3002 { 3003 struct bofi_shadow *hp; 3004 int i; 3005 uint64_t llvalue; 3006 uint16_t *addr; 3007 3008 hp = handle->ahi_common.ah_bus_private; 3009 BOFI_REP_WRITE_TESTS(uint16_t) 3010 if (!hp->link || !mutex_tryenter(&bofi_mutex)) { 3011 hp->save.acc.ahi_rep_put16(&hp->save.acc, host_addr, dev_addr, 3012 repcount, flags); 3013 return; 3014 } 3015 for (i = 0; i < repcount; i++) { 3016 llvalue = *(host_addr + i); 3017 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0); 3018 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 2, i ? 0 : 3019 repcount)) 3020 hp->save.acc.ahi_put16(&hp->save.acc, addr, 3021 (uint16_t)llvalue); 3022 } 3023 mutex_exit(&bofi_mutex); 3024 } 3025 3026 3027 /* 3028 * our rep_putl() routine - use tryenter 3029 */ 3030 static void 3031 bofi_rep_wr32(ddi_acc_impl_t *handle, uint32_t *host_addr, 3032 uint32_t *dev_addr, size_t repcount, uint_t flags) 3033 { 3034 struct bofi_shadow *hp; 3035 int i; 3036 uint64_t llvalue; 3037 uint32_t *addr; 3038 3039 hp = handle->ahi_common.ah_bus_private; 3040 BOFI_REP_WRITE_TESTS(uint32_t) 3041 if (!hp->link || !mutex_tryenter(&bofi_mutex)) { 3042 hp->save.acc.ahi_rep_put32(&hp->save.acc, host_addr, dev_addr, 3043 repcount, flags); 3044 return; 3045 } 3046 for (i = 0; i < repcount; i++) { 3047 llvalue = *(host_addr + i); 3048 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0); 3049 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 4, i ? 0 : 3050 repcount)) 3051 hp->save.acc.ahi_put32(&hp->save.acc, addr, 3052 (uint32_t)llvalue); 3053 } 3054 mutex_exit(&bofi_mutex); 3055 } 3056 3057 3058 /* 3059 * our rep_putll() routine - use tryenter 3060 */ 3061 static void 3062 bofi_rep_wr64(ddi_acc_impl_t *handle, uint64_t *host_addr, 3063 uint64_t *dev_addr, size_t repcount, uint_t flags) 3064 { 3065 struct bofi_shadow *hp; 3066 int i; 3067 uint64_t llvalue; 3068 uint64_t *addr; 3069 3070 hp = handle->ahi_common.ah_bus_private; 3071 BOFI_REP_WRITE_TESTS(uint64_t) 3072 if (!hp->link || !mutex_tryenter(&bofi_mutex)) { 3073 hp->save.acc.ahi_rep_put64(&hp->save.acc, host_addr, dev_addr, 3074 repcount, flags); 3075 return; 3076 } 3077 for (i = 0; i < repcount; i++) { 3078 llvalue = *(host_addr + i); 3079 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0); 3080 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 8, i ? 0 : 3081 repcount)) 3082 hp->save.acc.ahi_put64(&hp->save.acc, addr, 3083 (uint64_t)llvalue); 3084 } 3085 mutex_exit(&bofi_mutex); 3086 } 3087 3088 3089 /* 3090 * our ddi_map routine 3091 */ 3092 static int 3093 bofi_map(dev_info_t *dip, dev_info_t *rdip, 3094 ddi_map_req_t *reqp, off_t offset, off_t len, caddr_t *vaddrp) 3095 { 3096 ddi_acc_impl_t *ap; 3097 struct bofi_shadow *hp; 3098 struct bofi_errent *ep; 3099 struct bofi_link *lp, *next_lp; 3100 int retval; 3101 struct bofi_shadow *dhashp; 3102 struct bofi_shadow *hhashp; 3103 3104 switch (reqp->map_op) { 3105 case DDI_MO_MAP_LOCKED: 3106 /* 3107 * for this case get nexus to do real work first 3108 */ 3109 retval = save_bus_ops.bus_map(dip, rdip, reqp, offset, len, 3110 vaddrp); 3111 if (retval != DDI_SUCCESS) 3112 return (retval); 3113 3114 ap = (ddi_acc_impl_t *)reqp->map_handlep; 3115 if (ap == NULL) 3116 return (DDI_SUCCESS); 3117 /* 3118 * if driver_list is set, only intercept those drivers 3119 */ 3120 if (!driver_under_test(ap->ahi_common.ah_dip)) 3121 return (DDI_SUCCESS); 3122 3123 /* 3124 * support for ddi_regs_map_setup() 3125 * - allocate shadow handle structure and fill it in 3126 */ 3127 hp = kmem_zalloc(sizeof (struct bofi_shadow), KM_SLEEP); 3128 (void) strncpy(hp->name, ddi_get_name(ap->ahi_common.ah_dip), 3129 NAMESIZE); 3130 hp->instance = ddi_get_instance(ap->ahi_common.ah_dip); 3131 hp->dip = ap->ahi_common.ah_dip; 3132 hp->addr = *vaddrp; 3133 /* 3134 * return spurious value to catch direct access to registers 3135 */ 3136 if (bofi_ddi_check) 3137 *vaddrp = (caddr_t)64; 3138 hp->rnumber = ((ddi_acc_hdl_t *)ap)->ah_rnumber; 3139 hp->offset = offset; 3140 if (len == 0) 3141 hp->len = INT_MAX - offset; 3142 else 3143 hp->len = min(len, INT_MAX - offset); 3144 hp->hdl.acc_handle = (ddi_acc_handle_t)ap; 3145 hp->link = NULL; 3146 hp->type = BOFI_ACC_HDL; 3147 /* 3148 * save existing function pointers and plug in our own 3149 */ 3150 hp->save.acc = *ap; 3151 ap->ahi_get8 = bofi_rd8; 3152 ap->ahi_get16 = bofi_rd16; 3153 ap->ahi_get32 = bofi_rd32; 3154 ap->ahi_get64 = bofi_rd64; 3155 ap->ahi_put8 = bofi_wr8; 3156 ap->ahi_put16 = bofi_wr16; 3157 ap->ahi_put32 = bofi_wr32; 3158 ap->ahi_put64 = bofi_wr64; 3159 ap->ahi_rep_get8 = bofi_rep_rd8; 3160 ap->ahi_rep_get16 = bofi_rep_rd16; 3161 ap->ahi_rep_get32 = bofi_rep_rd32; 3162 ap->ahi_rep_get64 = bofi_rep_rd64; 3163 ap->ahi_rep_put8 = bofi_rep_wr8; 3164 ap->ahi_rep_put16 = bofi_rep_wr16; 3165 ap->ahi_rep_put32 = bofi_rep_wr32; 3166 ap->ahi_rep_put64 = bofi_rep_wr64; 3167 ap->ahi_fault_check = bofi_check_acc_hdl; 3168 #if defined(__sparc) 3169 #else 3170 ap->ahi_acc_attr &= ~DDI_ACCATTR_DIRECT; 3171 #endif 3172 /* 3173 * stick in a pointer to our shadow handle 3174 */ 3175 ap->ahi_common.ah_bus_private = hp; 3176 /* 3177 * add to dhash, hhash and inuse lists 3178 */ 3179 mutex_enter(&bofi_low_mutex); 3180 mutex_enter(&bofi_mutex); 3181 hp->next = shadow_list.next; 3182 shadow_list.next->prev = hp; 3183 hp->prev = &shadow_list; 3184 shadow_list.next = hp; 3185 hhashp = HDL_HHASH(ap); 3186 hp->hnext = hhashp->hnext; 3187 hhashp->hnext->hprev = hp; 3188 hp->hprev = hhashp; 3189 hhashp->hnext = hp; 3190 dhashp = HDL_DHASH(hp->dip); 3191 hp->dnext = dhashp->dnext; 3192 dhashp->dnext->dprev = hp; 3193 hp->dprev = dhashp; 3194 dhashp->dnext = hp; 3195 /* 3196 * chain on any pre-existing errdefs that apply to this 3197 * acc_handle 3198 */ 3199 for (ep = errent_listp; ep != NULL; ep = ep->next) { 3200 if (ddi_name_to_major(hp->name) == 3201 ddi_name_to_major(ep->name) && 3202 hp->instance == ep->errdef.instance && 3203 (ep->errdef.access_type & BOFI_PIO_RW) && 3204 (ep->errdef.rnumber == -1 || 3205 hp->rnumber == ep->errdef.rnumber) && 3206 (ep->errdef.len == 0 || 3207 offset < ep->errdef.offset + ep->errdef.len) && 3208 offset + hp->len > ep->errdef.offset) { 3209 lp = bofi_link_freelist; 3210 if (lp != NULL) { 3211 bofi_link_freelist = lp->link; 3212 lp->errentp = ep; 3213 lp->link = hp->link; 3214 hp->link = lp; 3215 } 3216 } 3217 } 3218 mutex_exit(&bofi_mutex); 3219 mutex_exit(&bofi_low_mutex); 3220 return (DDI_SUCCESS); 3221 case DDI_MO_UNMAP: 3222 3223 ap = (ddi_acc_impl_t *)reqp->map_handlep; 3224 if (ap == NULL) 3225 break; 3226 /* 3227 * support for ddi_regs_map_free() 3228 * - check we really have a shadow handle for this one 3229 */ 3230 mutex_enter(&bofi_low_mutex); 3231 mutex_enter(&bofi_mutex); 3232 hhashp = HDL_HHASH(ap); 3233 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) 3234 if (hp->hdl.acc_handle == (ddi_acc_handle_t)ap) 3235 break; 3236 if (hp == hhashp) { 3237 mutex_exit(&bofi_mutex); 3238 mutex_exit(&bofi_low_mutex); 3239 break; 3240 } 3241 /* 3242 * got a shadow handle - restore original pointers 3243 */ 3244 *ap = hp->save.acc; 3245 *vaddrp = hp->addr; 3246 /* 3247 * remove from dhash, hhash and inuse lists 3248 */ 3249 hp->hnext->hprev = hp->hprev; 3250 hp->hprev->hnext = hp->hnext; 3251 hp->dnext->dprev = hp->dprev; 3252 hp->dprev->dnext = hp->dnext; 3253 hp->next->prev = hp->prev; 3254 hp->prev->next = hp->next; 3255 /* 3256 * free any errdef link structures tagged onto the shadow handle 3257 */ 3258 for (lp = hp->link; lp != NULL; ) { 3259 next_lp = lp->link; 3260 lp->link = bofi_link_freelist; 3261 bofi_link_freelist = lp; 3262 lp = next_lp; 3263 } 3264 hp->link = NULL; 3265 mutex_exit(&bofi_mutex); 3266 mutex_exit(&bofi_low_mutex); 3267 /* 3268 * finally delete shadow handle 3269 */ 3270 kmem_free(hp, sizeof (struct bofi_shadow)); 3271 break; 3272 default: 3273 break; 3274 } 3275 return (save_bus_ops.bus_map(dip, rdip, reqp, offset, len, vaddrp)); 3276 } 3277 3278 3279 /* 3280 * chain any pre-existing errdefs on to newly created dma handle 3281 * if required call do_dma_corrupt() to corrupt data 3282 */ 3283 static void 3284 chain_on_errdefs(struct bofi_shadow *hp) 3285 { 3286 struct bofi_errent *ep; 3287 struct bofi_link *lp; 3288 3289 ASSERT(MUTEX_HELD(&bofi_mutex)); 3290 /* 3291 * chain on any pre-existing errdefs that apply to this dma_handle 3292 */ 3293 for (ep = errent_listp; ep != NULL; ep = ep->next) { 3294 if (ddi_name_to_major(hp->name) == 3295 ddi_name_to_major(ep->name) && 3296 hp->instance == ep->errdef.instance && 3297 (ep->errdef.rnumber == -1 || 3298 hp->rnumber == ep->errdef.rnumber) && 3299 ((ep->errdef.access_type & BOFI_DMA_RW) && 3300 (((uintptr_t)(hp->addr + ep->errdef.offset + 3301 ep->errdef.len) & ~LLSZMASK) > 3302 ((uintptr_t)((hp->addr + ep->errdef.offset) + 3303 LLSZMASK) & ~LLSZMASK)))) { 3304 /* 3305 * got a match - link it on 3306 */ 3307 lp = bofi_link_freelist; 3308 if (lp != NULL) { 3309 bofi_link_freelist = lp->link; 3310 lp->errentp = ep; 3311 lp->link = hp->link; 3312 hp->link = lp; 3313 if ((ep->errdef.access_type & BOFI_DMA_W) && 3314 (hp->flags & DDI_DMA_WRITE) && 3315 (ep->state & BOFI_DEV_ACTIVE)) { 3316 do_dma_corrupt(hp, ep, 3317 DDI_DMA_SYNC_FORDEV, 3318 0, hp->len); 3319 } 3320 } 3321 } 3322 } 3323 } 3324 3325 3326 /* 3327 * need to do copy byte-by-byte in case one of pages is little-endian 3328 */ 3329 static void 3330 xbcopy(void *from, void *to, u_longlong_t len) 3331 { 3332 uchar_t *f = from; 3333 uchar_t *t = to; 3334 3335 while (len--) 3336 *t++ = *f++; 3337 } 3338 3339 3340 /* 3341 * our ddi_dma_allochdl routine 3342 */ 3343 static int 3344 bofi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, 3345 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 3346 { 3347 int retval = DDI_DMA_NORESOURCES; 3348 struct bofi_shadow *hp, *xhp; 3349 int maxrnumber = 0; 3350 struct bofi_shadow *dhashp; 3351 struct bofi_shadow *hhashp; 3352 ddi_dma_impl_t *mp; 3353 3354 /* 3355 * if driver_list is set, only intercept those drivers 3356 */ 3357 if (!driver_under_test(rdip)) 3358 return (save_bus_ops.bus_dma_allochdl(dip, rdip, attrp, 3359 waitfp, arg, handlep)); 3360 3361 /* 3362 * allocate shadow handle structure and fill it in 3363 */ 3364 hp = kmem_zalloc(sizeof (struct bofi_shadow), 3365 ((waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP)); 3366 if (hp == NULL) { 3367 /* 3368 * what to do here? Wait a bit and try again 3369 */ 3370 if (waitfp != DDI_DMA_DONTWAIT) 3371 (void) timeout((void (*)())(uintptr_t)waitfp, arg, 10); 3372 return (retval); 3373 } 3374 (void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE); 3375 hp->instance = ddi_get_instance(rdip); 3376 hp->dip = rdip; 3377 hp->link = NULL; 3378 hp->type = BOFI_NULL; 3379 /* 3380 * call nexus to do the real work 3381 */ 3382 retval = save_bus_ops.bus_dma_allochdl(dip, rdip, attrp, waitfp, arg, 3383 handlep); 3384 if (retval != DDI_SUCCESS) { 3385 kmem_free(hp, sizeof (struct bofi_shadow)); 3386 return (retval); 3387 } 3388 /* 3389 * now point set dma_handle to point to real handle 3390 */ 3391 hp->hdl.dma_handle = *handlep; 3392 mp = (ddi_dma_impl_t *)*handlep; 3393 mp->dmai_fault_check = bofi_check_dma_hdl; 3394 /* 3395 * bind and unbind are cached in devinfo - must overwrite them 3396 * - note that our bind and unbind are quite happy dealing with 3397 * any handles for this devinfo that were previously allocated 3398 */ 3399 if (save_bus_ops.bus_dma_bindhdl == DEVI(rdip)->devi_bus_dma_bindfunc) 3400 DEVI(rdip)->devi_bus_dma_bindfunc = bofi_dma_bindhdl; 3401 if (save_bus_ops.bus_dma_unbindhdl == 3402 DEVI(rdip)->devi_bus_dma_unbindfunc) 3403 DEVI(rdip)->devi_bus_dma_unbindfunc = bofi_dma_unbindhdl; 3404 mutex_enter(&bofi_low_mutex); 3405 mutex_enter(&bofi_mutex); 3406 /* 3407 * get an "rnumber" for this handle - really just seeking to 3408 * get a unique number - generally only care for early allocated 3409 * handles - so we get as far as INT_MAX, just stay there 3410 */ 3411 dhashp = HDL_DHASH(hp->dip); 3412 for (xhp = dhashp->dnext; xhp != dhashp; xhp = xhp->dnext) 3413 if (ddi_name_to_major(xhp->name) == 3414 ddi_name_to_major(hp->name) && 3415 xhp->instance == hp->instance && 3416 (xhp->type == BOFI_DMA_HDL || 3417 xhp->type == BOFI_NULL)) 3418 if (xhp->rnumber >= maxrnumber) { 3419 if (xhp->rnumber == INT_MAX) 3420 maxrnumber = INT_MAX; 3421 else 3422 maxrnumber = xhp->rnumber + 1; 3423 } 3424 hp->rnumber = maxrnumber; 3425 /* 3426 * add to dhash, hhash and inuse lists 3427 */ 3428 hp->next = shadow_list.next; 3429 shadow_list.next->prev = hp; 3430 hp->prev = &shadow_list; 3431 shadow_list.next = hp; 3432 hhashp = HDL_HHASH(*handlep); 3433 hp->hnext = hhashp->hnext; 3434 hhashp->hnext->hprev = hp; 3435 hp->hprev = hhashp; 3436 hhashp->hnext = hp; 3437 dhashp = HDL_DHASH(hp->dip); 3438 hp->dnext = dhashp->dnext; 3439 dhashp->dnext->dprev = hp; 3440 hp->dprev = dhashp; 3441 dhashp->dnext = hp; 3442 mutex_exit(&bofi_mutex); 3443 mutex_exit(&bofi_low_mutex); 3444 return (retval); 3445 } 3446 3447 3448 /* 3449 * our ddi_dma_freehdl routine 3450 */ 3451 static int 3452 bofi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 3453 { 3454 int retval; 3455 struct bofi_shadow *hp; 3456 struct bofi_shadow *hhashp; 3457 3458 /* 3459 * find shadow for this handle 3460 */ 3461 mutex_enter(&bofi_low_mutex); 3462 mutex_enter(&bofi_mutex); 3463 hhashp = HDL_HHASH(handle); 3464 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) 3465 if (hp->hdl.dma_handle == handle) 3466 break; 3467 mutex_exit(&bofi_mutex); 3468 mutex_exit(&bofi_low_mutex); 3469 /* 3470 * call nexus to do the real work 3471 */ 3472 retval = save_bus_ops.bus_dma_freehdl(dip, rdip, handle); 3473 if (retval != DDI_SUCCESS) { 3474 return (retval); 3475 } 3476 /* 3477 * did we really have a shadow for this handle 3478 */ 3479 if (hp == hhashp) 3480 return (retval); 3481 /* 3482 * yes we have - see if it's still bound 3483 */ 3484 mutex_enter(&bofi_low_mutex); 3485 mutex_enter(&bofi_mutex); 3486 if (hp->type != BOFI_NULL) 3487 panic("driver freeing bound dma_handle"); 3488 /* 3489 * remove from dhash, hhash and inuse lists 3490 */ 3491 hp->hnext->hprev = hp->hprev; 3492 hp->hprev->hnext = hp->hnext; 3493 hp->dnext->dprev = hp->dprev; 3494 hp->dprev->dnext = hp->dnext; 3495 hp->next->prev = hp->prev; 3496 hp->prev->next = hp->next; 3497 mutex_exit(&bofi_mutex); 3498 mutex_exit(&bofi_low_mutex); 3499 3500 kmem_free(hp, sizeof (struct bofi_shadow)); 3501 return (retval); 3502 } 3503 3504 3505 /* 3506 * our ddi_dma_bindhdl routine 3507 */ 3508 static int 3509 bofi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 3510 ddi_dma_handle_t handle, struct ddi_dma_req *dmareqp, 3511 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 3512 { 3513 int retval = DDI_DMA_NORESOURCES; 3514 auto struct ddi_dma_req dmareq; 3515 struct bofi_shadow *hp; 3516 struct bofi_shadow *hhashp; 3517 ddi_dma_impl_t *mp; 3518 unsigned long pagemask = ddi_ptob(rdip, 1) - 1; 3519 3520 /* 3521 * check we really have a shadow for this handle 3522 */ 3523 mutex_enter(&bofi_low_mutex); 3524 mutex_enter(&bofi_mutex); 3525 hhashp = HDL_HHASH(handle); 3526 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) 3527 if (hp->hdl.dma_handle == handle) 3528 break; 3529 mutex_exit(&bofi_mutex); 3530 mutex_exit(&bofi_low_mutex); 3531 if (hp == hhashp) { 3532 /* 3533 * no we don't - just call nexus to do the real work 3534 */ 3535 return save_bus_ops.bus_dma_bindhdl(dip, rdip, handle, dmareqp, 3536 cookiep, ccountp); 3537 } 3538 /* 3539 * yes we have - see if it's already bound 3540 */ 3541 if (hp->type != BOFI_NULL) 3542 return (DDI_DMA_INUSE); 3543 3544 hp->flags = dmareqp->dmar_flags; 3545 if (dmareqp->dmar_object.dmao_type == DMA_OTYP_PAGES) { 3546 hp->map_flags = B_PAGEIO; 3547 hp->map_pp = dmareqp->dmar_object.dmao_obj.pp_obj.pp_pp; 3548 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_priv != NULL) { 3549 hp->map_flags = B_SHADOW; 3550 hp->map_pplist = dmareqp->dmar_object.dmao_obj.virt_obj.v_priv; 3551 } else { 3552 hp->map_flags = 0; 3553 } 3554 /* 3555 * get a kernel virtual mapping 3556 */ 3557 hp->addr = ddi_dmareq_mapin(dmareqp, &hp->mapaddr, &hp->len); 3558 if (hp->addr == NULL) 3559 goto error; 3560 if (bofi_sync_check) { 3561 /* 3562 * Take a copy and pass pointers to this up to nexus instead. 3563 * Data will be copied from the original on explicit 3564 * and implicit ddi_dma_sync() 3565 * 3566 * - maintain page alignment because some devices assume it. 3567 */ 3568 hp->origaddr = hp->addr; 3569 hp->allocaddr = ddi_umem_alloc( 3570 ((uintptr_t)hp->addr & pagemask) + hp->len, 3571 (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP, 3572 &hp->umem_cookie); 3573 if (hp->allocaddr == NULL) 3574 goto error; 3575 hp->addr = hp->allocaddr + ((uintptr_t)hp->addr & pagemask); 3576 if (dmareqp->dmar_flags & DDI_DMA_WRITE) 3577 xbcopy(hp->origaddr, hp->addr, hp->len); 3578 dmareq = *dmareqp; 3579 dmareq.dmar_object.dmao_size = hp->len; 3580 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR; 3581 dmareq.dmar_object.dmao_obj.virt_obj.v_as = &kas; 3582 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = hp->addr; 3583 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 3584 dmareqp = &dmareq; 3585 } 3586 /* 3587 * call nexus to do the real work 3588 */ 3589 retval = save_bus_ops.bus_dma_bindhdl(dip, rdip, handle, dmareqp, 3590 cookiep, ccountp); 3591 if (retval != DDI_SUCCESS) 3592 goto error2; 3593 /* 3594 * unset DMP_NOSYNC 3595 */ 3596 mp = (ddi_dma_impl_t *)handle; 3597 mp->dmai_rflags &= ~DMP_NOSYNC; 3598 /* 3599 * chain on any pre-existing errdefs that apply to this 3600 * acc_handle and corrupt if required (as there is an implicit 3601 * ddi_dma_sync() in this call) 3602 */ 3603 mutex_enter(&bofi_low_mutex); 3604 mutex_enter(&bofi_mutex); 3605 hp->type = BOFI_DMA_HDL; 3606 chain_on_errdefs(hp); 3607 mutex_exit(&bofi_mutex); 3608 mutex_exit(&bofi_low_mutex); 3609 return (retval); 3610 3611 error: 3612 if (dmareqp->dmar_fp != DDI_DMA_DONTWAIT) { 3613 /* 3614 * what to do here? Wait a bit and try again 3615 */ 3616 (void) timeout((void (*)())(uintptr_t)dmareqp->dmar_fp, 3617 dmareqp->dmar_arg, 10); 3618 } 3619 error2: 3620 if (hp) { 3621 ddi_dmareq_mapout(hp->mapaddr, hp->len, hp->map_flags, 3622 hp->map_pp, hp->map_pplist); 3623 if (bofi_sync_check && hp->allocaddr) 3624 ddi_umem_free(hp->umem_cookie); 3625 hp->mapaddr = NULL; 3626 hp->allocaddr = NULL; 3627 hp->origaddr = NULL; 3628 } 3629 return (retval); 3630 } 3631 3632 3633 /* 3634 * our ddi_dma_unbindhdl routine 3635 */ 3636 static int 3637 bofi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 3638 { 3639 struct bofi_link *lp, *next_lp; 3640 struct bofi_errent *ep; 3641 int retval; 3642 struct bofi_shadow *hp; 3643 struct bofi_shadow *hhashp; 3644 3645 /* 3646 * call nexus to do the real work 3647 */ 3648 retval = save_bus_ops.bus_dma_unbindhdl(dip, rdip, handle); 3649 if (retval != DDI_SUCCESS) 3650 return (retval); 3651 /* 3652 * check we really have a shadow for this handle 3653 */ 3654 mutex_enter(&bofi_low_mutex); 3655 mutex_enter(&bofi_mutex); 3656 hhashp = HDL_HHASH(handle); 3657 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) 3658 if (hp->hdl.dma_handle == handle) 3659 break; 3660 if (hp == hhashp) { 3661 mutex_exit(&bofi_mutex); 3662 mutex_exit(&bofi_low_mutex); 3663 return (retval); 3664 } 3665 /* 3666 * yes we have - see if it's already unbound 3667 */ 3668 if (hp->type == BOFI_NULL) 3669 panic("driver unbinding unbound dma_handle"); 3670 /* 3671 * free any errdef link structures tagged on to this 3672 * shadow handle 3673 */ 3674 for (lp = hp->link; lp != NULL; ) { 3675 next_lp = lp->link; 3676 /* 3677 * there is an implicit sync_for_cpu on free - 3678 * may need to corrupt 3679 */ 3680 ep = lp->errentp; 3681 if ((ep->errdef.access_type & BOFI_DMA_R) && 3682 (hp->flags & DDI_DMA_READ) && 3683 (ep->state & BOFI_DEV_ACTIVE)) { 3684 do_dma_corrupt(hp, ep, DDI_DMA_SYNC_FORCPU, 0, hp->len); 3685 } 3686 lp->link = bofi_link_freelist; 3687 bofi_link_freelist = lp; 3688 lp = next_lp; 3689 } 3690 hp->link = NULL; 3691 hp->type = BOFI_NULL; 3692 mutex_exit(&bofi_mutex); 3693 mutex_exit(&bofi_low_mutex); 3694 3695 if (bofi_sync_check && (hp->flags & DDI_DMA_READ)) 3696 /* 3697 * implicit sync_for_cpu - copy data back 3698 */ 3699 if (hp->allocaddr) 3700 xbcopy(hp->addr, hp->origaddr, hp->len); 3701 ddi_dmareq_mapout(hp->mapaddr, hp->len, hp->map_flags, 3702 hp->map_pp, hp->map_pplist); 3703 if (bofi_sync_check && hp->allocaddr) 3704 ddi_umem_free(hp->umem_cookie); 3705 hp->mapaddr = NULL; 3706 hp->allocaddr = NULL; 3707 hp->origaddr = NULL; 3708 return (retval); 3709 } 3710 3711 3712 /* 3713 * our ddi_dma_sync routine 3714 */ 3715 static int 3716 bofi_dma_flush(dev_info_t *dip, dev_info_t *rdip, 3717 ddi_dma_handle_t handle, off_t off, size_t len, uint_t flags) 3718 { 3719 struct bofi_link *lp; 3720 struct bofi_errent *ep; 3721 struct bofi_shadow *hp; 3722 struct bofi_shadow *hhashp; 3723 int retval; 3724 3725 if (flags == DDI_DMA_SYNC_FORCPU || flags == DDI_DMA_SYNC_FORKERNEL) { 3726 /* 3727 * in this case get nexus driver to do sync first 3728 */ 3729 retval = save_bus_ops.bus_dma_flush(dip, rdip, handle, off, 3730 len, flags); 3731 if (retval != DDI_SUCCESS) 3732 return (retval); 3733 } 3734 /* 3735 * check we really have a shadow for this handle 3736 */ 3737 mutex_enter(&bofi_low_mutex); 3738 mutex_enter(&bofi_mutex); 3739 hhashp = HDL_HHASH(handle); 3740 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) 3741 if (hp->hdl.dma_handle == handle && 3742 hp->type == BOFI_DMA_HDL) 3743 break; 3744 mutex_exit(&bofi_mutex); 3745 mutex_exit(&bofi_low_mutex); 3746 if (hp != hhashp) { 3747 /* 3748 * yes - do we need to copy data from original 3749 */ 3750 if (bofi_sync_check && flags == DDI_DMA_SYNC_FORDEV) 3751 if (hp->allocaddr) 3752 xbcopy(hp->origaddr+off, hp->addr+off, 3753 len ? len : (hp->len - off)); 3754 /* 3755 * yes - check if we need to corrupt the data 3756 */ 3757 mutex_enter(&bofi_low_mutex); 3758 mutex_enter(&bofi_mutex); 3759 for (lp = hp->link; lp != NULL; lp = lp->link) { 3760 ep = lp->errentp; 3761 if ((((ep->errdef.access_type & BOFI_DMA_R) && 3762 (flags == DDI_DMA_SYNC_FORCPU || 3763 flags == DDI_DMA_SYNC_FORKERNEL)) || 3764 ((ep->errdef.access_type & BOFI_DMA_W) && 3765 (flags == DDI_DMA_SYNC_FORDEV))) && 3766 (ep->state & BOFI_DEV_ACTIVE)) { 3767 do_dma_corrupt(hp, ep, flags, off, 3768 len ? len : (hp->len - off)); 3769 } 3770 } 3771 mutex_exit(&bofi_mutex); 3772 mutex_exit(&bofi_low_mutex); 3773 /* 3774 * do we need to copy data to original 3775 */ 3776 if (bofi_sync_check && (flags == DDI_DMA_SYNC_FORCPU || 3777 flags == DDI_DMA_SYNC_FORKERNEL)) 3778 if (hp->allocaddr) 3779 xbcopy(hp->addr+off, hp->origaddr+off, 3780 len ? len : (hp->len - off)); 3781 } 3782 if (flags == DDI_DMA_SYNC_FORDEV) 3783 /* 3784 * in this case get nexus driver to do sync last 3785 */ 3786 retval = save_bus_ops.bus_dma_flush(dip, rdip, handle, off, 3787 len, flags); 3788 return (retval); 3789 } 3790 3791 3792 /* 3793 * our dma_win routine 3794 */ 3795 static int 3796 bofi_dma_win(dev_info_t *dip, dev_info_t *rdip, 3797 ddi_dma_handle_t handle, uint_t win, off_t *offp, 3798 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 3799 { 3800 struct bofi_shadow *hp; 3801 struct bofi_shadow *hhashp; 3802 int retval; 3803 ddi_dma_impl_t *mp; 3804 3805 /* 3806 * call nexus to do the real work 3807 */ 3808 retval = save_bus_ops.bus_dma_win(dip, rdip, handle, win, offp, lenp, 3809 cookiep, ccountp); 3810 if (retval != DDI_SUCCESS) 3811 return (retval); 3812 /* 3813 * check we really have a shadow for this handle 3814 */ 3815 mutex_enter(&bofi_low_mutex); 3816 mutex_enter(&bofi_mutex); 3817 hhashp = HDL_HHASH(handle); 3818 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) 3819 if (hp->hdl.dma_handle == handle) 3820 break; 3821 if (hp != hhashp) { 3822 /* 3823 * yes - make sure DMP_NOSYNC is unset 3824 */ 3825 mp = (ddi_dma_impl_t *)handle; 3826 mp->dmai_rflags &= ~DMP_NOSYNC; 3827 } 3828 mutex_exit(&bofi_mutex); 3829 mutex_exit(&bofi_low_mutex); 3830 return (retval); 3831 } 3832 3833 3834 /* 3835 * our dma_ctl routine 3836 */ 3837 static int 3838 bofi_dma_ctl(dev_info_t *dip, dev_info_t *rdip, 3839 ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 3840 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags) 3841 { 3842 struct bofi_shadow *hp; 3843 struct bofi_shadow *hhashp; 3844 int retval; 3845 int i; 3846 struct bofi_shadow *dummyhp; 3847 3848 /* 3849 * get nexus to do real work 3850 */ 3851 retval = save_bus_ops.bus_dma_ctl(dip, rdip, handle, request, offp, 3852 lenp, objp, flags); 3853 if (retval != DDI_SUCCESS) 3854 return (retval); 3855 /* 3856 * if driver_list is set, only intercept those drivers 3857 */ 3858 if (!driver_under_test(rdip)) 3859 return (DDI_SUCCESS); 3860 3861 #if defined(__sparc) 3862 /* 3863 * check if this is a dvma_reserve - that one's like a 3864 * dma_allochdl and needs to be handled separately 3865 */ 3866 if (request == DDI_DMA_RESERVE) { 3867 bofi_dvma_reserve(rdip, *(ddi_dma_handle_t *)objp); 3868 return (DDI_SUCCESS); 3869 } 3870 #endif 3871 /* 3872 * check we really have a shadow for this handle 3873 */ 3874 mutex_enter(&bofi_low_mutex); 3875 mutex_enter(&bofi_mutex); 3876 hhashp = HDL_HHASH(handle); 3877 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) 3878 if (hp->hdl.dma_handle == handle) 3879 break; 3880 if (hp == hhashp) { 3881 mutex_exit(&bofi_mutex); 3882 mutex_exit(&bofi_low_mutex); 3883 return (retval); 3884 } 3885 /* 3886 * yes we have - see what kind of command this is 3887 */ 3888 switch (request) { 3889 case DDI_DMA_RELEASE: 3890 /* 3891 * dvma release - release dummy handle and all the index handles 3892 */ 3893 dummyhp = hp; 3894 dummyhp->hnext->hprev = dummyhp->hprev; 3895 dummyhp->hprev->hnext = dummyhp->hnext; 3896 mutex_exit(&bofi_mutex); 3897 mutex_exit(&bofi_low_mutex); 3898 for (i = 0; i < dummyhp->len; i++) { 3899 hp = dummyhp->hparrayp[i]; 3900 /* 3901 * chek none of the index handles were still loaded 3902 */ 3903 if (hp->type != BOFI_NULL) 3904 panic("driver releasing loaded dvma"); 3905 /* 3906 * remove from dhash and inuse lists 3907 */ 3908 mutex_enter(&bofi_low_mutex); 3909 mutex_enter(&bofi_mutex); 3910 hp->dnext->dprev = hp->dprev; 3911 hp->dprev->dnext = hp->dnext; 3912 hp->next->prev = hp->prev; 3913 hp->prev->next = hp->next; 3914 mutex_exit(&bofi_mutex); 3915 mutex_exit(&bofi_low_mutex); 3916 3917 if (bofi_sync_check && hp->allocaddr) 3918 ddi_umem_free(hp->umem_cookie); 3919 kmem_free(hp, sizeof (struct bofi_shadow)); 3920 } 3921 kmem_free(dummyhp->hparrayp, dummyhp->len * 3922 sizeof (struct bofi_shadow *)); 3923 kmem_free(dummyhp, sizeof (struct bofi_shadow)); 3924 return (retval); 3925 default: 3926 break; 3927 } 3928 mutex_exit(&bofi_mutex); 3929 mutex_exit(&bofi_low_mutex); 3930 return (retval); 3931 } 3932 3933 #if defined(__sparc) 3934 /* 3935 * dvma reserve case from bofi_dma_ctl() 3936 */ 3937 static void 3938 bofi_dvma_reserve(dev_info_t *rdip, ddi_dma_handle_t handle) 3939 { 3940 struct bofi_shadow *hp; 3941 struct bofi_shadow *dummyhp; 3942 struct bofi_shadow *dhashp; 3943 struct bofi_shadow *hhashp; 3944 ddi_dma_impl_t *mp; 3945 struct fast_dvma *nexus_private; 3946 int i, count; 3947 3948 mp = (ddi_dma_impl_t *)handle; 3949 count = mp->dmai_ndvmapages; 3950 /* 3951 * allocate dummy shadow handle structure 3952 */ 3953 dummyhp = kmem_zalloc(sizeof (*dummyhp), KM_SLEEP); 3954 if (mp->dmai_rflags & DMP_BYPASSNEXUS) { 3955 /* 3956 * overlay our routines over the nexus's dvma routines 3957 */ 3958 nexus_private = (struct fast_dvma *)mp->dmai_nexus_private; 3959 dummyhp->save.dvma_ops = *(nexus_private->ops); 3960 nexus_private->ops = &bofi_dvma_ops; 3961 } 3962 /* 3963 * now fill in the dummy handle. This just gets put on hhash queue 3964 * so our dvma routines can find and index off to the handle they 3965 * really want. 3966 */ 3967 (void) strncpy(dummyhp->name, ddi_get_name(rdip), NAMESIZE); 3968 dummyhp->instance = ddi_get_instance(rdip); 3969 dummyhp->rnumber = -1; 3970 dummyhp->dip = rdip; 3971 dummyhp->len = count; 3972 dummyhp->hdl.dma_handle = handle; 3973 dummyhp->link = NULL; 3974 dummyhp->type = BOFI_NULL; 3975 /* 3976 * allocate space for real handles 3977 */ 3978 dummyhp->hparrayp = kmem_alloc(count * 3979 sizeof (struct bofi_shadow *), KM_SLEEP); 3980 for (i = 0; i < count; i++) { 3981 /* 3982 * allocate shadow handle structures and fill them in 3983 */ 3984 hp = kmem_zalloc(sizeof (*hp), KM_SLEEP); 3985 (void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE); 3986 hp->instance = ddi_get_instance(rdip); 3987 hp->rnumber = -1; 3988 hp->dip = rdip; 3989 hp->hdl.dma_handle = 0; 3990 hp->link = NULL; 3991 hp->type = BOFI_NULL; 3992 if (bofi_sync_check) { 3993 unsigned long pagemask = ddi_ptob(rdip, 1) - 1; 3994 /* 3995 * Take a copy and set this to be hp->addr 3996 * Data will be copied to and from the original on 3997 * explicit and implicit ddi_dma_sync() 3998 * 3999 * - maintain page alignment because some devices 4000 * assume it. 4001 */ 4002 hp->allocaddr = ddi_umem_alloc( 4003 ((int)(uintptr_t)hp->addr & pagemask) 4004 + pagemask + 1, 4005 KM_SLEEP, &hp->umem_cookie); 4006 hp->addr = hp->allocaddr + 4007 ((int)(uintptr_t)hp->addr & pagemask); 4008 } 4009 /* 4010 * add to dhash and inuse lists. 4011 * these don't go on hhash queue. 4012 */ 4013 mutex_enter(&bofi_low_mutex); 4014 mutex_enter(&bofi_mutex); 4015 hp->next = shadow_list.next; 4016 shadow_list.next->prev = hp; 4017 hp->prev = &shadow_list; 4018 shadow_list.next = hp; 4019 dhashp = HDL_DHASH(hp->dip); 4020 hp->dnext = dhashp->dnext; 4021 dhashp->dnext->dprev = hp; 4022 hp->dprev = dhashp; 4023 dhashp->dnext = hp; 4024 dummyhp->hparrayp[i] = hp; 4025 mutex_exit(&bofi_mutex); 4026 mutex_exit(&bofi_low_mutex); 4027 } 4028 /* 4029 * add dummy handle to hhash list only 4030 */ 4031 mutex_enter(&bofi_low_mutex); 4032 mutex_enter(&bofi_mutex); 4033 hhashp = HDL_HHASH(handle); 4034 dummyhp->hnext = hhashp->hnext; 4035 hhashp->hnext->hprev = dummyhp; 4036 dummyhp->hprev = hhashp; 4037 hhashp->hnext = dummyhp; 4038 mutex_exit(&bofi_mutex); 4039 mutex_exit(&bofi_low_mutex); 4040 } 4041 4042 /* 4043 * our dvma_kaddr_load() 4044 */ 4045 static void 4046 bofi_dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index, 4047 ddi_dma_cookie_t *cp) 4048 { 4049 struct bofi_shadow *dummyhp; 4050 struct bofi_shadow *hp; 4051 struct bofi_shadow *hhashp; 4052 struct bofi_errent *ep; 4053 struct bofi_link *lp; 4054 4055 /* 4056 * check we really have a dummy shadow for this handle 4057 */ 4058 mutex_enter(&bofi_low_mutex); 4059 mutex_enter(&bofi_mutex); 4060 hhashp = HDL_HHASH(h); 4061 for (dummyhp = hhashp->hnext; dummyhp != hhashp; 4062 dummyhp = dummyhp->hnext) 4063 if (dummyhp->hdl.dma_handle == h) 4064 break; 4065 mutex_exit(&bofi_mutex); 4066 mutex_exit(&bofi_low_mutex); 4067 if (dummyhp == hhashp) { 4068 /* 4069 * no dummy shadow - panic 4070 */ 4071 panic("driver dvma_kaddr_load with no reserve"); 4072 } 4073 4074 /* 4075 * find real hp 4076 */ 4077 hp = dummyhp->hparrayp[index]; 4078 /* 4079 * check its not already loaded 4080 */ 4081 if (hp->type != BOFI_NULL) 4082 panic("driver loading loaded dvma"); 4083 /* 4084 * if were doing copying, just need to change origaddr and get 4085 * nexus to map hp->addr again 4086 * if not, set hp->addr to new address. 4087 * - note these are always kernel virtual addresses - no need to map 4088 */ 4089 if (bofi_sync_check && hp->allocaddr) { 4090 hp->origaddr = a; 4091 a = hp->addr; 4092 } else 4093 hp->addr = a; 4094 hp->len = len; 4095 /* 4096 * get nexus to do the real work 4097 */ 4098 dummyhp->save.dvma_ops.dvma_kaddr_load(h, a, len, index, cp); 4099 /* 4100 * chain on any pre-existing errdefs that apply to this dma_handle 4101 * no need to corrupt - there's no implicit dma_sync on this one 4102 */ 4103 mutex_enter(&bofi_low_mutex); 4104 mutex_enter(&bofi_mutex); 4105 hp->type = BOFI_DMA_HDL; 4106 for (ep = errent_listp; ep != NULL; ep = ep->next) { 4107 if (ddi_name_to_major(hp->name) == 4108 ddi_name_to_major(ep->name) && 4109 hp->instance == ep->errdef.instance && 4110 (ep->errdef.rnumber == -1 || 4111 hp->rnumber == ep->errdef.rnumber) && 4112 ((ep->errdef.access_type & BOFI_DMA_RW) && 4113 (((uintptr_t)(hp->addr + ep->errdef.offset + 4114 ep->errdef.len) & ~LLSZMASK) > 4115 ((uintptr_t)((hp->addr + ep->errdef.offset) + 4116 LLSZMASK) & ~LLSZMASK)))) { 4117 lp = bofi_link_freelist; 4118 if (lp != NULL) { 4119 bofi_link_freelist = lp->link; 4120 lp->errentp = ep; 4121 lp->link = hp->link; 4122 hp->link = lp; 4123 } 4124 } 4125 } 4126 mutex_exit(&bofi_mutex); 4127 mutex_exit(&bofi_low_mutex); 4128 } 4129 4130 /* 4131 * our dvma_unload() 4132 */ 4133 static void 4134 bofi_dvma_unload(ddi_dma_handle_t h, uint_t index, uint_t view) 4135 { 4136 struct bofi_link *lp, *next_lp; 4137 struct bofi_errent *ep; 4138 struct bofi_shadow *dummyhp; 4139 struct bofi_shadow *hp; 4140 struct bofi_shadow *hhashp; 4141 4142 /* 4143 * check we really have a dummy shadow for this handle 4144 */ 4145 mutex_enter(&bofi_low_mutex); 4146 mutex_enter(&bofi_mutex); 4147 hhashp = HDL_HHASH(h); 4148 for (dummyhp = hhashp->hnext; dummyhp != hhashp; 4149 dummyhp = dummyhp->hnext) 4150 if (dummyhp->hdl.dma_handle == h) 4151 break; 4152 mutex_exit(&bofi_mutex); 4153 mutex_exit(&bofi_low_mutex); 4154 if (dummyhp == hhashp) { 4155 /* 4156 * no dummy shadow - panic 4157 */ 4158 panic("driver dvma_unload with no reserve"); 4159 } 4160 dummyhp->save.dvma_ops.dvma_unload(h, index, view); 4161 /* 4162 * find real hp 4163 */ 4164 hp = dummyhp->hparrayp[index]; 4165 /* 4166 * check its not already unloaded 4167 */ 4168 if (hp->type == BOFI_NULL) 4169 panic("driver unloading unloaded dvma"); 4170 /* 4171 * free any errdef link structures tagged on to this 4172 * shadow handle - do corruption if necessary 4173 */ 4174 mutex_enter(&bofi_low_mutex); 4175 mutex_enter(&bofi_mutex); 4176 for (lp = hp->link; lp != NULL; ) { 4177 next_lp = lp->link; 4178 ep = lp->errentp; 4179 if ((ep->errdef.access_type & BOFI_DMA_R) && 4180 (view == DDI_DMA_SYNC_FORCPU || 4181 view == DDI_DMA_SYNC_FORKERNEL) && 4182 (ep->state & BOFI_DEV_ACTIVE)) { 4183 do_dma_corrupt(hp, ep, view, 0, hp->len); 4184 } 4185 lp->link = bofi_link_freelist; 4186 bofi_link_freelist = lp; 4187 lp = next_lp; 4188 } 4189 hp->link = NULL; 4190 hp->type = BOFI_NULL; 4191 mutex_exit(&bofi_mutex); 4192 mutex_exit(&bofi_low_mutex); 4193 /* 4194 * if there is an explicit sync_for_cpu, then do copy to original 4195 */ 4196 if (bofi_sync_check && 4197 (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL)) 4198 if (hp->allocaddr) 4199 xbcopy(hp->addr, hp->origaddr, hp->len); 4200 } 4201 4202 /* 4203 * our dvma_unload() 4204 */ 4205 static void 4206 bofi_dvma_sync(ddi_dma_handle_t h, uint_t index, uint_t view) 4207 { 4208 struct bofi_link *lp; 4209 struct bofi_errent *ep; 4210 struct bofi_shadow *hp; 4211 struct bofi_shadow *dummyhp; 4212 struct bofi_shadow *hhashp; 4213 4214 /* 4215 * check we really have a dummy shadow for this handle 4216 */ 4217 mutex_enter(&bofi_low_mutex); 4218 mutex_enter(&bofi_mutex); 4219 hhashp = HDL_HHASH(h); 4220 for (dummyhp = hhashp->hnext; dummyhp != hhashp; 4221 dummyhp = dummyhp->hnext) 4222 if (dummyhp->hdl.dma_handle == h) 4223 break; 4224 mutex_exit(&bofi_mutex); 4225 mutex_exit(&bofi_low_mutex); 4226 if (dummyhp == hhashp) { 4227 /* 4228 * no dummy shadow - panic 4229 */ 4230 panic("driver dvma_sync with no reserve"); 4231 } 4232 /* 4233 * find real hp 4234 */ 4235 hp = dummyhp->hparrayp[index]; 4236 /* 4237 * check its already loaded 4238 */ 4239 if (hp->type == BOFI_NULL) 4240 panic("driver syncing unloaded dvma"); 4241 if (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL) 4242 /* 4243 * in this case do sync first 4244 */ 4245 dummyhp->save.dvma_ops.dvma_sync(h, index, view); 4246 /* 4247 * if there is an explicit sync_for_dev, then do copy from original 4248 */ 4249 if (bofi_sync_check && view == DDI_DMA_SYNC_FORDEV) { 4250 if (hp->allocaddr) 4251 xbcopy(hp->origaddr, hp->addr, hp->len); 4252 } 4253 /* 4254 * do corruption if necessary 4255 */ 4256 mutex_enter(&bofi_low_mutex); 4257 mutex_enter(&bofi_mutex); 4258 for (lp = hp->link; lp != NULL; lp = lp->link) { 4259 ep = lp->errentp; 4260 if ((((ep->errdef.access_type & BOFI_DMA_R) && 4261 (view == DDI_DMA_SYNC_FORCPU || 4262 view == DDI_DMA_SYNC_FORKERNEL)) || 4263 ((ep->errdef.access_type & BOFI_DMA_W) && 4264 (view == DDI_DMA_SYNC_FORDEV))) && 4265 (ep->state & BOFI_DEV_ACTIVE)) { 4266 do_dma_corrupt(hp, ep, view, 0, hp->len); 4267 } 4268 } 4269 mutex_exit(&bofi_mutex); 4270 mutex_exit(&bofi_low_mutex); 4271 /* 4272 * if there is an explicit sync_for_cpu, then do copy to original 4273 */ 4274 if (bofi_sync_check && 4275 (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL)) { 4276 if (hp->allocaddr) 4277 xbcopy(hp->addr, hp->origaddr, hp->len); 4278 } 4279 if (view == DDI_DMA_SYNC_FORDEV) 4280 /* 4281 * in this case do sync last 4282 */ 4283 dummyhp->save.dvma_ops.dvma_sync(h, index, view); 4284 } 4285 #endif 4286 4287 /* 4288 * bofi intercept routine - gets called instead of users interrupt routine 4289 */ 4290 static uint_t 4291 bofi_intercept_intr(caddr_t xp, caddr_t arg2) 4292 { 4293 struct bofi_errent *ep; 4294 struct bofi_link *lp; 4295 struct bofi_shadow *hp; 4296 int intr_count = 1; 4297 int i; 4298 uint_t retval = DDI_INTR_UNCLAIMED; 4299 uint_t result; 4300 int unclaimed_counter = 0; 4301 int jabber_detected = 0; 4302 4303 hp = (struct bofi_shadow *)xp; 4304 /* 4305 * check if nothing to do 4306 */ 4307 if (hp->link == NULL) 4308 return (hp->save.intr.int_handler 4309 (hp->save.intr.int_handler_arg1, arg2)); 4310 mutex_enter(&bofi_mutex); 4311 /* 4312 * look for any errdefs 4313 */ 4314 for (lp = hp->link; lp != NULL; lp = lp->link) { 4315 ep = lp->errentp; 4316 if (ep->state & BOFI_DEV_ACTIVE) { 4317 /* 4318 * got one 4319 */ 4320 if ((ep->errdef.access_count || 4321 ep->errdef.fail_count) && 4322 (ep->errdef.access_type & BOFI_LOG)) 4323 log_acc_event(ep, BOFI_INTR, 0, 0, 1, 0); 4324 if (ep->errdef.access_count > 1) { 4325 ep->errdef.access_count--; 4326 } else if (ep->errdef.fail_count > 0) { 4327 ep->errdef.fail_count--; 4328 ep->errdef.access_count = 0; 4329 /* 4330 * OK do "corruption" 4331 */ 4332 if (ep->errstate.fail_time == 0) 4333 ep->errstate.fail_time = bofi_gettime(); 4334 switch (ep->errdef.optype) { 4335 case BOFI_DELAY_INTR: 4336 if (!hp->hilevel) { 4337 drv_usecwait 4338 (ep->errdef.operand); 4339 } 4340 break; 4341 case BOFI_LOSE_INTR: 4342 intr_count = 0; 4343 break; 4344 case BOFI_EXTRA_INTR: 4345 intr_count += ep->errdef.operand; 4346 break; 4347 default: 4348 break; 4349 } 4350 } 4351 } 4352 } 4353 mutex_exit(&bofi_mutex); 4354 /* 4355 * send extra or fewer interrupts as requested 4356 */ 4357 for (i = 0; i < intr_count; i++) { 4358 result = hp->save.intr.int_handler 4359 (hp->save.intr.int_handler_arg1, arg2); 4360 if (result == DDI_INTR_CLAIMED) 4361 unclaimed_counter >>= 1; 4362 else if (++unclaimed_counter >= 20) 4363 jabber_detected = 1; 4364 if (i == 0) 4365 retval = result; 4366 } 4367 /* 4368 * if more than 1000 spurious interrupts requested and 4369 * jabber not detected - give warning 4370 */ 4371 if (intr_count > 1000 && !jabber_detected) 4372 panic("undetected interrupt jabber: %s%d", 4373 hp->name, hp->instance); 4374 /* 4375 * return first response - or "unclaimed" if none 4376 */ 4377 return (retval); 4378 } 4379 4380 4381 /* 4382 * our ddi_check_acc_hdl 4383 */ 4384 /* ARGSUSED */ 4385 static int 4386 bofi_check_acc_hdl(ddi_acc_impl_t *handle) 4387 { 4388 struct bofi_shadow *hp; 4389 struct bofi_link *lp; 4390 uint_t result = 0; 4391 4392 hp = handle->ahi_common.ah_bus_private; 4393 if (!hp->link || !mutex_tryenter(&bofi_mutex)) { 4394 return (0); 4395 } 4396 for (lp = hp->link; lp != NULL; lp = lp->link) { 4397 /* 4398 * OR in error state from all associated 4399 * errdef structures 4400 */ 4401 if (lp->errentp->errdef.access_count == 0 && 4402 (lp->errentp->state & BOFI_DEV_ACTIVE)) { 4403 result = (lp->errentp->errdef.acc_chk & 1); 4404 } 4405 } 4406 mutex_exit(&bofi_mutex); 4407 return (result); 4408 } 4409 4410 /* 4411 * our ddi_check_dma_hdl 4412 */ 4413 /* ARGSUSED */ 4414 static int 4415 bofi_check_dma_hdl(ddi_dma_impl_t *handle) 4416 { 4417 struct bofi_shadow *hp; 4418 struct bofi_link *lp; 4419 struct bofi_shadow *hhashp; 4420 uint_t result = 0; 4421 4422 if (!mutex_tryenter(&bofi_mutex)) { 4423 return (0); 4424 } 4425 hhashp = HDL_HHASH(handle); 4426 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) 4427 if (hp->hdl.dma_handle == (ddi_dma_handle_t)handle) 4428 break; 4429 if (hp == hhashp) { 4430 mutex_exit(&bofi_mutex); 4431 return (0); 4432 } 4433 if (!hp->link) { 4434 mutex_exit(&bofi_mutex); 4435 return (0); 4436 } 4437 for (lp = hp->link; lp != NULL; lp = lp->link) { 4438 /* 4439 * OR in error state from all associated 4440 * errdef structures 4441 */ 4442 if (lp->errentp->errdef.access_count == 0 && 4443 (lp->errentp->state & BOFI_DEV_ACTIVE)) { 4444 result = ((lp->errentp->errdef.acc_chk & 2) ? 1 : 0); 4445 } 4446 } 4447 mutex_exit(&bofi_mutex); 4448 return (result); 4449 } 4450 4451 4452 /* ARGSUSED */ 4453 static int 4454 bofi_post_event(dev_info_t *dip, dev_info_t *rdip, 4455 ddi_eventcookie_t eventhdl, void *impl_data) 4456 { 4457 ddi_eventcookie_t ec; 4458 struct ddi_fault_event_data *arg; 4459 struct bofi_errent *ep; 4460 struct bofi_shadow *hp; 4461 struct bofi_shadow *dhashp; 4462 struct bofi_link *lp; 4463 4464 ASSERT(eventhdl); 4465 if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) != DDI_SUCCESS) 4466 return (DDI_FAILURE); 4467 4468 if (ec != eventhdl) 4469 return (save_bus_ops.bus_post_event(dip, rdip, eventhdl, 4470 impl_data)); 4471 4472 arg = (struct ddi_fault_event_data *)impl_data; 4473 mutex_enter(&bofi_mutex); 4474 /* 4475 * find shadow handles with appropriate dev_infos 4476 * and set error reported on all associated errdef structures 4477 */ 4478 dhashp = HDL_DHASH(arg->f_dip); 4479 for (hp = dhashp->dnext; hp != dhashp; hp = hp->dnext) { 4480 if (hp->dip == arg->f_dip) { 4481 for (lp = hp->link; lp != NULL; lp = lp->link) { 4482 ep = lp->errentp; 4483 ep->errstate.errmsg_count++; 4484 if ((ep->errstate.msg_time == 0 || 4485 ep->errstate.severity > arg->f_impact) && 4486 (ep->state & BOFI_DEV_ACTIVE)) { 4487 ep->errstate.msg_time = bofi_gettime(); 4488 ep->errstate.severity = arg->f_impact; 4489 (void) strncpy(ep->errstate.buffer, 4490 arg->f_message, ERRMSGSIZE); 4491 ddi_trigger_softintr(ep->softintr_id); 4492 } 4493 } 4494 } 4495 } 4496 mutex_exit(&bofi_mutex); 4497 return (save_bus_ops.bus_post_event(dip, rdip, eventhdl, impl_data)); 4498 } 4499 4500 /*ARGSUSED*/ 4501 static int 4502 bofi_fm_ereport_callback(sysevent_t *ev, void *cookie) 4503 { 4504 char *class = ""; 4505 char *path = ""; 4506 char *ptr; 4507 nvlist_t *nvlist; 4508 nvlist_t *detector; 4509 ddi_fault_impact_t impact; 4510 struct bofi_errent *ep; 4511 struct bofi_shadow *hp; 4512 struct bofi_link *lp; 4513 char service_class[FM_MAX_CLASS]; 4514 char hppath[MAXPATHLEN]; 4515 int service_ereport = 0; 4516 4517 (void) sysevent_get_attr_list(ev, &nvlist); 4518 (void) nvlist_lookup_string(nvlist, FM_CLASS, &class); 4519 if (nvlist_lookup_nvlist(nvlist, FM_EREPORT_DETECTOR, &detector) == 0) 4520 (void) nvlist_lookup_string(detector, FM_FMRI_DEV_PATH, &path); 4521 4522 (void) snprintf(service_class, FM_MAX_CLASS, "%s.%s.%s.", 4523 FM_EREPORT_CLASS, DDI_IO_CLASS, DDI_FM_SERVICE_IMPACT); 4524 if (strncmp(class, service_class, strlen(service_class) - 1) == 0) 4525 service_ereport = 1; 4526 4527 mutex_enter(&bofi_mutex); 4528 /* 4529 * find shadow handles with appropriate dev_infos 4530 * and set error reported on all associated errdef structures 4531 */ 4532 for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) { 4533 (void) ddi_pathname(hp->dip, hppath); 4534 if (strcmp(path, hppath) != 0) 4535 continue; 4536 for (lp = hp->link; lp != NULL; lp = lp->link) { 4537 ep = lp->errentp; 4538 ep->errstate.errmsg_count++; 4539 if (!(ep->state & BOFI_DEV_ACTIVE)) 4540 continue; 4541 if (ep->errstate.msg_time != 0) 4542 continue; 4543 if (service_ereport) { 4544 ptr = class + strlen(service_class); 4545 if (strcmp(ptr, DDI_FM_SERVICE_LOST) == 0) 4546 impact = DDI_SERVICE_LOST; 4547 else if (strcmp(ptr, 4548 DDI_FM_SERVICE_DEGRADED) == 0) 4549 impact = DDI_SERVICE_DEGRADED; 4550 else if (strcmp(ptr, 4551 DDI_FM_SERVICE_RESTORED) == 0) 4552 impact = DDI_SERVICE_RESTORED; 4553 else 4554 impact = DDI_SERVICE_UNAFFECTED; 4555 if (ep->errstate.severity > impact) 4556 ep->errstate.severity = impact; 4557 } else if (ep->errstate.buffer[0] == '\0') { 4558 (void) strncpy(ep->errstate.buffer, class, 4559 ERRMSGSIZE); 4560 } 4561 if (ep->errstate.buffer[0] != '\0' && 4562 ep->errstate.severity < DDI_SERVICE_RESTORED) { 4563 ep->errstate.msg_time = bofi_gettime(); 4564 ddi_trigger_softintr(ep->softintr_id); 4565 } 4566 } 4567 } 4568 nvlist_free(nvlist); 4569 mutex_exit(&bofi_mutex); 4570 return (0); 4571 } 4572 4573 /* 4574 * our intr_ops routine 4575 */ 4576 static int 4577 bofi_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 4578 ddi_intr_handle_impl_t *hdlp, void *result) 4579 { 4580 int retval; 4581 struct bofi_shadow *hp; 4582 struct bofi_shadow *dhashp; 4583 struct bofi_shadow *hhashp; 4584 struct bofi_errent *ep; 4585 struct bofi_link *lp, *next_lp; 4586 4587 switch (intr_op) { 4588 case DDI_INTROP_ADDISR: 4589 /* 4590 * if driver_list is set, only intercept those drivers 4591 */ 4592 if (!driver_under_test(rdip)) 4593 return (save_bus_ops.bus_intr_op(dip, rdip, 4594 intr_op, hdlp, result)); 4595 /* 4596 * allocate shadow handle structure and fill in 4597 */ 4598 hp = kmem_zalloc(sizeof (struct bofi_shadow), KM_SLEEP); 4599 (void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE); 4600 hp->instance = ddi_get_instance(rdip); 4601 hp->save.intr.int_handler = hdlp->ih_cb_func; 4602 hp->save.intr.int_handler_arg1 = hdlp->ih_cb_arg1; 4603 hdlp->ih_cb_func = (ddi_intr_handler_t *)bofi_intercept_intr; 4604 hdlp->ih_cb_arg1 = (caddr_t)hp; 4605 hp->bofi_inum = hdlp->ih_inum; 4606 hp->dip = rdip; 4607 hp->link = NULL; 4608 hp->type = BOFI_INT_HDL; 4609 /* 4610 * save whether hilevel or not 4611 */ 4612 4613 if (hdlp->ih_pri >= ddi_intr_get_hilevel_pri()) 4614 hp->hilevel = 1; 4615 else 4616 hp->hilevel = 0; 4617 4618 /* 4619 * call nexus to do real work, but specifying our handler, and 4620 * our shadow handle as argument 4621 */ 4622 retval = save_bus_ops.bus_intr_op(dip, rdip, 4623 intr_op, hdlp, result); 4624 if (retval != DDI_SUCCESS) { 4625 kmem_free(hp, sizeof (struct bofi_shadow)); 4626 return (retval); 4627 } 4628 /* 4629 * add to dhash, hhash and inuse lists 4630 */ 4631 mutex_enter(&bofi_low_mutex); 4632 mutex_enter(&bofi_mutex); 4633 hp->next = shadow_list.next; 4634 shadow_list.next->prev = hp; 4635 hp->prev = &shadow_list; 4636 shadow_list.next = hp; 4637 hhashp = HDL_HHASH(hdlp->ih_inum); 4638 hp->hnext = hhashp->hnext; 4639 hhashp->hnext->hprev = hp; 4640 hp->hprev = hhashp; 4641 hhashp->hnext = hp; 4642 dhashp = HDL_DHASH(hp->dip); 4643 hp->dnext = dhashp->dnext; 4644 dhashp->dnext->dprev = hp; 4645 hp->dprev = dhashp; 4646 dhashp->dnext = hp; 4647 /* 4648 * chain on any pre-existing errdefs that apply to this 4649 * acc_handle 4650 */ 4651 for (ep = errent_listp; ep != NULL; ep = ep->next) { 4652 if (ddi_name_to_major(hp->name) == 4653 ddi_name_to_major(ep->name) && 4654 hp->instance == ep->errdef.instance && 4655 (ep->errdef.access_type & BOFI_INTR)) { 4656 lp = bofi_link_freelist; 4657 if (lp != NULL) { 4658 bofi_link_freelist = lp->link; 4659 lp->errentp = ep; 4660 lp->link = hp->link; 4661 hp->link = lp; 4662 } 4663 } 4664 } 4665 mutex_exit(&bofi_mutex); 4666 mutex_exit(&bofi_low_mutex); 4667 return (retval); 4668 case DDI_INTROP_REMISR: 4669 /* 4670 * call nexus routine first 4671 */ 4672 retval = save_bus_ops.bus_intr_op(dip, rdip, 4673 intr_op, hdlp, result); 4674 /* 4675 * find shadow handle 4676 */ 4677 mutex_enter(&bofi_low_mutex); 4678 mutex_enter(&bofi_mutex); 4679 hhashp = HDL_HHASH(hdlp->ih_inum); 4680 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) { 4681 if (hp->dip == rdip && 4682 hp->type == BOFI_INT_HDL && 4683 hp->bofi_inum == hdlp->ih_inum) { 4684 break; 4685 } 4686 } 4687 if (hp == hhashp) { 4688 mutex_exit(&bofi_mutex); 4689 mutex_exit(&bofi_low_mutex); 4690 return (retval); 4691 } 4692 /* 4693 * found one - remove from dhash, hhash and inuse lists 4694 */ 4695 hp->hnext->hprev = hp->hprev; 4696 hp->hprev->hnext = hp->hnext; 4697 hp->dnext->dprev = hp->dprev; 4698 hp->dprev->dnext = hp->dnext; 4699 hp->next->prev = hp->prev; 4700 hp->prev->next = hp->next; 4701 /* 4702 * free any errdef link structures 4703 * tagged on to this shadow handle 4704 */ 4705 for (lp = hp->link; lp != NULL; ) { 4706 next_lp = lp->link; 4707 lp->link = bofi_link_freelist; 4708 bofi_link_freelist = lp; 4709 lp = next_lp; 4710 } 4711 hp->link = NULL; 4712 mutex_exit(&bofi_mutex); 4713 mutex_exit(&bofi_low_mutex); 4714 kmem_free(hp, sizeof (struct bofi_shadow)); 4715 return (retval); 4716 default: 4717 return (save_bus_ops.bus_intr_op(dip, rdip, 4718 intr_op, hdlp, result)); 4719 } 4720 } 4721