1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * IOSRAM leaf driver to SBBC nexus driver. This driver is used 31 * by Starcat Domain SW to read/write from/to the IO sram. 32 */ 33 34 #include <sys/types.h> 35 #include <sys/conf.h> 36 #include <sys/ddi.h> 37 #include <sys/sunddi.h> 38 #include <sys/ddi_impldefs.h> 39 #include <sys/obpdefs.h> 40 #include <sys/promif.h> 41 #include <sys/prom_plat.h> 42 #include <sys/cmn_err.h> 43 #include <sys/conf.h> /* req. by dev_ops flags MTSAFE etc. */ 44 #include <sys/modctl.h> /* for modldrv */ 45 #include <sys/stat.h> /* ddi_create_minor_node S_IFCHR */ 46 #include <sys/errno.h> 47 #include <sys/kmem.h> 48 #include <sys/kstat.h> 49 #include <sys/debug.h> 50 51 #include <sys/axq.h> 52 #include <sys/iosramreg.h> 53 #include <sys/iosramio.h> 54 #include <sys/iosramvar.h> 55 56 57 #if defined(DEBUG) 58 int iosram_debug = 0; 59 static void iosram_dprintf(const char *fmt, ...); 60 #define DPRINTF(level, arg) \ 61 { if (iosram_debug >= level) iosram_dprintf arg; } 62 #else /* !DEBUG */ 63 #define DPRINTF(level, arg) 64 #endif /* !DEBUG */ 65 66 67 /* 68 * IOSRAM module global state 69 */ 70 static void *iosramsoft_statep; /* IOSRAM state pointer */ 71 static kmutex_t iosram_mutex; /* mutex lock */ 72 73 static iosram_chunk_t *chunks = NULL; /* array of TOC entries */ 74 static int nchunks = 0; /* # of TOC entries */ 75 static iosram_chunk_t *iosram_hashtab[IOSRAM_HASHSZ]; /* key hash table */ 76 77 static kcondvar_t iosram_tswitch_wait; /* tunnel switch wait cv */ 78 static int iosram_tswitch_wakeup = 0; /* flag indicationg one or */ 79 /* more threads waiting on */ 80 /* iosram_tswitch_wait cv */ 81 static int iosram_tswitch_active = 0; /* tunnel switch active flag */ 82 static int iosram_tswitch_aborted = 0; /* tunnel switch abort flag */ 83 static clock_t iosram_tswitch_tstamp = 0; /* lbolt of last tswitch end */ 84 static kcondvar_t iosram_rw_wait; /* read/write wait cv */ 85 static int iosram_rw_wakeup = 0; /* flag indicationg one or */ 86 /* more threads waiting on */ 87 /* iosram_rw_wait cv */ 88 static int iosram_rw_active = 0; /* # threads accessing IOSRAM */ 89 #if defined(DEBUG) 90 static int iosram_rw_active_max = 0; 91 #endif 92 93 static struct iosramsoft *iosram_new_master = NULL; /* new tunnel target */ 94 static struct iosramsoft *iosram_master = NULL; /* master tunnel */ 95 static struct iosramsoft *iosram_instances = NULL; /* list of softstates */ 96 97 static ddi_acc_handle_t iosram_handle = NULL; /* master IOSRAM map handle */ 98 99 static void (*iosram_hdrchange_handler)() = NULL; 100 101 #if IOSRAM_STATS 102 static struct iosram_stat iosram_stats; /* IOSRAM statistics */ 103 static void iosram_print_stats(); /* forward declaration */ 104 #endif /* IOSRAM_STATS */ 105 106 107 #if IOSRAM_LOG 108 kmutex_t iosram_log_mutex; 109 int iosram_log_level = 1; 110 int iosram_log_print = 0; /* print log when recorded */ 111 uint32_t iosram_logseq; 112 iosram_log_t iosram_logbuf[IOSRAM_MAXLOG]; 113 static void iosram_print_log(int cnt); /* forward declaration */ 114 #endif /* IOSRAM_LOG */ 115 116 117 /* driver entry point fn definitions */ 118 static int iosram_open(dev_t *, int, int, cred_t *); 119 static int iosram_close(dev_t, int, int, cred_t *); 120 static int iosram_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 121 122 /* configuration entry point fn definitions */ 123 static int iosram_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 124 static int iosram_attach(dev_info_t *, ddi_attach_cmd_t); 125 static int iosram_detach(dev_info_t *, ddi_detach_cmd_t); 126 127 128 /* forward declaractions */ 129 static iosram_chunk_t *iosram_find_chunk(uint32_t key); 130 static void iosram_set_master(struct iosramsoft *softp); 131 static int iosram_is_chosen(struct iosramsoft *softp); 132 static int iosram_tunnel_capable(struct iosramsoft *softp); 133 static int iosram_read_toc(struct iosramsoft *softp); 134 static void iosram_init_hashtab(void); 135 static void iosram_update_addrs(struct iosramsoft *softp); 136 137 static int iosram_setup_map(struct iosramsoft *softp); 138 static void iosram_remove_map(struct iosramsoft *softp); 139 static int iosram_add_intr(iosramsoft_t *); 140 static int iosram_remove_intr(iosramsoft_t *); 141 142 static void iosram_add_instance(struct iosramsoft *softp); 143 static void iosram_remove_instance(int instance); 144 static int iosram_switch_tunnel(iosramsoft_t *softp); 145 static void iosram_abort_tswitch(); 146 147 #if defined(DEBUG) 148 /* forward declaractions for debugging */ 149 static int iosram_get_keys(iosram_toc_entry_t *buf, uint32_t *len); 150 static void iosram_print_cback(); 151 static void iosram_print_state(int); 152 static void iosram_print_flags(); 153 #endif 154 155 156 157 /* 158 * cb_ops 159 */ 160 static struct cb_ops iosram_cb_ops = { 161 iosram_open, /* cb_open */ 162 iosram_close, /* cb_close */ 163 nodev, /* cb_strategy */ 164 nodev, /* cb_print */ 165 nodev, /* cb_dump */ 166 nodev, /* cb_read */ 167 nodev, /* cb_write */ 168 iosram_ioctl, /* cb_ioctl */ 169 nodev, /* cb_devmap */ 170 nodev, /* cb_mmap */ 171 nodev, /* cb_segmap */ 172 nochpoll, /* cb_chpoll */ 173 ddi_prop_op, /* cb_prop_op */ 174 NULL, /* cb_stream */ 175 (int)(D_NEW | D_MP | D_HOTPLUG) /* cb_flag */ 176 }; 177 178 /* 179 * Declare ops vectors for auto configuration. 180 */ 181 struct dev_ops iosram_ops = { 182 DEVO_REV, /* devo_rev */ 183 0, /* devo_refcnt */ 184 iosram_getinfo, /* devo_getinfo */ 185 nulldev, /* devo_identify */ 186 nulldev, /* devo_probe */ 187 iosram_attach, /* devo_attach */ 188 iosram_detach, /* devo_detach */ 189 nodev, /* devo_reset */ 190 &iosram_cb_ops, /* devo_cb_ops */ 191 (struct bus_ops *)NULL, /* devo_bus_ops */ 192 nulldev /* devo_power */ 193 }; 194 195 /* 196 * Loadable module support. 197 */ 198 extern struct mod_ops mod_driverops; 199 200 static struct modldrv iosrammodldrv = { 201 &mod_driverops, /* type of module - driver */ 202 "IOSRAM Leaf driver v%I%", 203 &iosram_ops, 204 }; 205 206 static struct modlinkage iosrammodlinkage = { 207 MODREV_1, 208 &iosrammodldrv, 209 NULL 210 }; 211 212 213 int 214 _init(void) 215 { 216 int error; 217 int i; 218 219 mutex_init(&iosram_mutex, NULL, MUTEX_DRIVER, (void *)NULL); 220 cv_init(&iosram_tswitch_wait, NULL, CV_DRIVER, NULL); 221 cv_init(&iosram_rw_wait, NULL, CV_DRIVER, NULL); 222 #if defined(IOSRAM_LOG) 223 mutex_init(&iosram_log_mutex, NULL, MUTEX_DRIVER, (void *)NULL); 224 #endif 225 226 DPRINTF(1, ("_init:IOSRAM\n")); 227 228 for (i = 0; i < IOSRAM_HASHSZ; i++) { 229 iosram_hashtab[i] = NULL; 230 } 231 232 if ((error = ddi_soft_state_init(&iosramsoft_statep, 233 sizeof (struct iosramsoft), 1)) != 0) { 234 goto failed; 235 } 236 if ((error = mod_install(&iosrammodlinkage)) != 0) { 237 ddi_soft_state_fini(&iosramsoft_statep); 238 goto failed; 239 } 240 241 IOSRAMLOG(0, "_init:IOSRAM ... error:%d statep:%p\n", 242 error, iosramsoft_statep, NULL, NULL); 243 244 return (error); 245 246 failed: 247 cv_destroy(&iosram_tswitch_wait); 248 cv_destroy(&iosram_rw_wait); 249 mutex_destroy(&iosram_mutex); 250 #if defined(IOSRAM_LOG) 251 mutex_destroy(&iosram_log_mutex); 252 #endif 253 IOSRAMLOG(0, "_init:IOSRAM ... error:%d statep:%p\n", 254 error, iosramsoft_statep, NULL, NULL); 255 256 return (error); 257 } 258 259 260 int 261 _fini(void) 262 { 263 #ifndef DEBUG 264 return (EBUSY); 265 #else /* !DEBUG */ 266 int error; 267 268 if ((error = mod_remove(&iosrammodlinkage)) == 0) { 269 ddi_soft_state_fini(&iosramsoft_statep); 270 271 cv_destroy(&iosram_tswitch_wait); 272 cv_destroy(&iosram_rw_wait); 273 mutex_destroy(&iosram_mutex); 274 #if defined(IOSRAM_LOG) 275 mutex_destroy(&iosram_log_mutex); 276 #endif 277 } 278 DPRINTF(1, ("_fini:IOSRAM error:%d\n", error)); 279 280 return (error); 281 #endif /* !DEBUG */ 282 } 283 284 285 int 286 _info(struct modinfo *modinfop) 287 { 288 return (mod_info(&iosrammodlinkage, modinfop)); 289 } 290 291 292 static int 293 iosram_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 294 { 295 int instance; 296 int propval; 297 int length; 298 char name[32]; 299 struct iosramsoft *softp; 300 301 instance = ddi_get_instance(dip); 302 303 DPRINTF(1, ("iosram(%d): attach dip:%p\n", instance)); 304 305 IOSRAMLOG(1, "ATTACH: dip:%p instance %d ... start\n", 306 dip, instance, NULL, NULL); 307 switch (cmd) { 308 case DDI_ATTACH: 309 break; 310 case DDI_RESUME: 311 if (!(softp = ddi_get_soft_state(iosramsoft_statep, 312 instance))) { 313 return (DDI_FAILURE); 314 } 315 mutex_enter(&iosram_mutex); 316 mutex_enter(&softp->intr_mutex); 317 if (!softp->suspended) { 318 mutex_exit(&softp->intr_mutex); 319 mutex_exit(&iosram_mutex); 320 return (DDI_FAILURE); 321 } 322 softp->suspended = 0; 323 324 /* 325 * enable SBBC interrupts if SBBC is mapped in 326 * restore the value saved during detach 327 */ 328 if (softp->sbbc_region) { 329 ddi_put32(softp->sbbc_handle, 330 &(softp->sbbc_region->int_enable.reg), 331 softp->int_enable_sav); 332 } 333 334 /* 335 * Trigger soft interrupt handler to process any pending 336 * interrupts. 337 */ 338 if (softp->intr_pending && !softp->intr_busy && 339 (softp->softintr_id != NULL)) { 340 ddi_trigger_softintr(softp->softintr_id); 341 } 342 343 mutex_exit(&softp->intr_mutex); 344 mutex_exit(&iosram_mutex); 345 346 return (DDI_SUCCESS); 347 348 default: 349 return (DDI_FAILURE); 350 } 351 352 if (ddi_soft_state_zalloc(iosramsoft_statep, instance) != 0) { 353 return (DDI_FAILURE); 354 } 355 356 if ((softp = ddi_get_soft_state(iosramsoft_statep, instance)) == NULL) { 357 return (DDI_FAILURE); 358 } 359 softp->dip = dip; 360 softp->instance = instance; 361 softp->sbbc_region = NULL; 362 363 /* 364 * If this instance is not tunnel capable, we don't attach it. 365 */ 366 if (iosram_tunnel_capable(softp) == 0) { 367 DPRINTF(1, ("iosram(%d): not tunnel_capable\n", instance)); 368 IOSRAMLOG(1, "ATTACH(%d): not tunnel_capable\n", instance, NULL, 369 NULL, NULL); 370 goto attach_fail; 371 } 372 373 /* 374 * Need to create an "interrupt-priorities" property to define the PIL 375 * to be used with the interrupt service routine. 376 */ 377 if (ddi_getproplen(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 378 "interrupt-priorities", &length) == DDI_PROP_NOT_FOUND) { 379 DPRINTF(1, ("iosram(%d): creating interrupt priority property", 380 instance)); 381 propval = IOSRAM_PIL; 382 if (ddi_prop_create(DDI_DEV_T_NONE, dip, 0, 383 "interrupt-priorities", (caddr_t)&propval, sizeof (propval)) 384 != DDI_PROP_SUCCESS) { 385 cmn_err(CE_WARN, 386 "iosram_attach: failed to create property"); 387 goto attach_fail; 388 } 389 } 390 391 /* 392 * Get interrupts cookies and initialize per-instance mutexes 393 */ 394 if (ddi_get_iblock_cookie(softp->dip, 0, &softp->real_iblk) 395 != DDI_SUCCESS) { 396 IOSRAMLOG(1, "ATTACH(%d): cannot get soft intr cookie\n", 397 instance, NULL, NULL, NULL); 398 goto attach_fail; 399 } 400 mutex_init(&softp->intr_mutex, NULL, MUTEX_DRIVER, 401 (void *)softp->real_iblk); 402 403 /* 404 * Add this instance to the iosram_instances list so that it can be used 405 * for tunnel in future. 406 */ 407 mutex_enter(&iosram_mutex); 408 softp->state = IOSRAM_STATE_INIT; 409 iosram_add_instance(softp); 410 411 /* 412 * If this is the chosen IOSRAM and there is no master IOSRAM yet, then 413 * let's set this instance as the master. 414 */ 415 if (iosram_master == NULL && iosram_is_chosen(softp)) { 416 iosram_switch_tunnel(softp); 417 418 /* 419 * XXX Do we need to panic if unable to setup master IOSRAM? 420 */ 421 if (iosram_master == NULL) { 422 cmn_err(CE_WARN, 423 "iosram(%d): can't setup master tunnel\n", 424 instance); 425 softp->state = 0; 426 iosram_remove_instance(softp->instance); 427 mutex_exit(&iosram_mutex); 428 mutex_destroy(&softp->intr_mutex); 429 goto attach_fail; 430 } 431 } 432 433 mutex_exit(&iosram_mutex); 434 435 /* 436 * Create minor node 437 */ 438 (void) sprintf(name, "iosram%d", instance); 439 if (ddi_create_minor_node(dip, name, S_IFCHR, instance, NULL, NULL) == 440 DDI_FAILURE) { 441 /* 442 * Minor node seems to be needed only for debugging purposes. 443 * Therefore, there is no need to fail this attach request. 444 * Simply print a message out. 445 */ 446 cmn_err(CE_NOTE, "!iosram(%d): can't create minor node\n", 447 instance); 448 } 449 ddi_report_dev(dip); 450 451 DPRINTF(1, ("iosram_attach(%d): success.\n", instance)); 452 IOSRAMLOG(1, "ATTACH: dip:%p instance:%d ... success softp:%p\n", 453 dip, instance, softp, NULL); 454 455 return (DDI_SUCCESS); 456 457 attach_fail: 458 DPRINTF(1, ("iosram_attach(%d):failed.\n", instance)); 459 IOSRAMLOG(1, "ATTACH: dip:%p instance:%d ... failed.\n", 460 dip, instance, NULL, NULL); 461 462 ddi_soft_state_free(iosramsoft_statep, instance); 463 return (DDI_FAILURE); 464 } 465 466 467 static int 468 iosram_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 469 { 470 int instance; 471 struct iosramsoft *softp; 472 473 instance = ddi_get_instance(dip); 474 if (!(softp = ddi_get_soft_state(iosramsoft_statep, instance))) { 475 return (DDI_FAILURE); 476 } 477 478 IOSRAMLOG(1, "DETACH: dip:%p instance %d softp:%p\n", 479 dip, instance, softp, NULL); 480 481 switch (cmd) { 482 case DDI_DETACH: 483 break; 484 case DDI_SUSPEND: 485 mutex_enter(&iosram_mutex); 486 mutex_enter(&softp->intr_mutex); 487 if (softp->suspended) { 488 mutex_exit(&softp->intr_mutex); 489 mutex_exit(&iosram_mutex); 490 return (DDI_FAILURE); 491 } 492 softp->suspended = 1; 493 /* 494 * Disable SBBC interrupts if SBBC is mapped in 495 */ 496 if (softp->sbbc_region) { 497 /* save current interrupt enable register */ 498 softp->int_enable_sav = ddi_get32(softp->sbbc_handle, 499 &(softp->sbbc_region->int_enable.reg)); 500 ddi_put32(softp->sbbc_handle, 501 &(softp->sbbc_region->int_enable.reg), 0x0); 502 } 503 mutex_exit(&softp->intr_mutex); 504 mutex_exit(&iosram_mutex); 505 return (DDI_SUCCESS); 506 507 default: 508 return (DDI_FAILURE); 509 } 510 511 512 /* 513 * Indicate that this instance is being detached so that this instance 514 * does not become a target for tunnel switch in future. 515 */ 516 mutex_enter(&iosram_mutex); 517 softp->state |= IOSRAM_STATE_DETACH; 518 519 /* 520 * If this instance is currently the master or the target of the tunnel 521 * switch, then we need to wait and switch tunnel, if necessary. 522 */ 523 if (iosram_master == softp || (softp->state & IOSRAM_STATE_TSWITCH)) { 524 mutex_exit(&iosram_mutex); 525 iosram_switchfrom(instance); 526 mutex_enter(&iosram_mutex); 527 } 528 529 /* 530 * If the tunnel switch is in progress and we are the master or target 531 * of tunnel relocation, then we can't detach this instance right now. 532 */ 533 if (softp->state & IOSRAM_STATE_TSWITCH) { 534 softp->state &= ~IOSRAM_STATE_DETACH; 535 mutex_exit(&iosram_mutex); 536 return (DDI_FAILURE); 537 } 538 539 /* 540 * We can't allow master IOSRAM to be detached as we won't be able to 541 * communicate otherwise. 542 */ 543 if (iosram_master == softp) { 544 softp->state &= ~IOSRAM_STATE_DETACH; 545 mutex_exit(&iosram_mutex); 546 return (DDI_FAILURE); 547 } 548 549 /* 550 * Now remove our instance from the iosram_instances list. 551 */ 552 iosram_remove_instance(instance); 553 mutex_exit(&iosram_mutex); 554 555 /* 556 * Instances should only ever be mapped if they are the master and/or 557 * participating in a tunnel switch. Neither should be the case here. 558 */ 559 ASSERT((softp->state & IOSRAM_STATE_MAPPED) == 0); 560 561 /* 562 * Destroy per-instance mutexes 563 */ 564 mutex_destroy(&softp->intr_mutex); 565 566 ddi_remove_minor_node(dip, NULL); 567 568 /* 569 * Finally remove our soft state structure 570 */ 571 ddi_soft_state_free(iosramsoft_statep, instance); 572 573 return (DDI_SUCCESS); 574 } 575 576 577 /* ARGSUSED0 */ 578 static int 579 iosram_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 580 void **result) 581 { 582 dev_t dev = (dev_t)arg; 583 struct iosramsoft *softp; 584 int instance, ret; 585 586 instance = getminor(dev); 587 588 IOSRAMLOG(2, "GETINFO: dip:%x instance %d dev:%x infocmd:%x\n", 589 dip, instance, dev, infocmd); 590 591 switch (infocmd) { 592 case DDI_INFO_DEVT2DEVINFO: 593 softp = ddi_get_soft_state(iosramsoft_statep, instance); 594 if (softp == NULL) { 595 *result = NULL; 596 ret = DDI_FAILURE; 597 } else { 598 *result = softp->dip; 599 ret = DDI_SUCCESS; 600 } 601 break; 602 case DDI_INFO_DEVT2INSTANCE: 603 *result = (void *)(uintptr_t)instance; 604 ret = DDI_SUCCESS; 605 break; 606 default: 607 ret = DDI_FAILURE; 608 break; 609 } 610 611 return (ret); 612 } 613 614 615 /*ARGSUSED1*/ 616 static int 617 iosram_open(dev_t *dev, int flag, int otype, cred_t *credp) 618 { 619 struct iosramsoft *softp; 620 int instance; 621 622 instance = getminor(*dev); 623 softp = ddi_get_soft_state(iosramsoft_statep, instance); 624 625 if (softp == NULL) { 626 return (ENXIO); 627 } 628 629 IOSRAMLOG(1, "OPEN: dev:%p otype:%x ... instance:%d softp:%p\n", 630 *dev, otype, softp->instance, softp); 631 632 return (0); 633 } 634 635 636 /*ARGSUSED1*/ 637 static int 638 iosram_close(dev_t dev, int flag, int otype, cred_t *credp) 639 { 640 struct iosramsoft *softp; 641 int instance; 642 643 instance = getminor(dev); 644 softp = ddi_get_soft_state(iosramsoft_statep, instance); 645 if (softp == NULL) { 646 return (ENXIO); 647 } 648 649 IOSRAMLOG(1, "CLOSE: dev:%p otype:%x ... instance:%d softp:%p\n", 650 dev, otype, softp->instance, softp); 651 652 return (0); 653 } 654 655 656 int 657 iosram_rd(uint32_t key, uint32_t off, uint32_t len, caddr_t dptr) 658 { 659 iosram_chunk_t *chunkp; 660 uint32_t chunk_len; 661 uint8_t *iosramp; 662 ddi_acc_handle_t handle; 663 int boff; 664 union { 665 uchar_t cbuf[UINT32SZ]; 666 uint32_t data; 667 } word; 668 669 int error = 0; 670 uint8_t *buf = (uint8_t *)dptr; 671 672 /* 673 * We try to read from the IOSRAM using double word or word access 674 * provided both "off" and "buf" are (or can be) double word or word 675 * aligned. Othewise, we try to align the "off" to a word boundary and 676 * then try to read data from the IOSRAM using word access, but store it 677 * into buf buffer using byte access. 678 * 679 * If the leading/trailing portion of the IOSRAM data is not word 680 * aligned, it will always be copied using byte access. 681 */ 682 IOSRAMLOG(1, "RD: key: 0x%x off:%x len:%x buf:%p\n", 683 key, off, len, buf); 684 685 /* 686 * Acquire lock and look for the requested chunk. If it exists, make 687 * sure the requested read is within the chunk's bounds and no tunnel 688 * switch is active. 689 */ 690 mutex_enter(&iosram_mutex); 691 chunkp = iosram_find_chunk(key); 692 chunk_len = (chunkp != NULL) ? chunkp->toc_data.len : 0; 693 694 if (iosram_master == NULL) { 695 error = EIO; 696 } else if (chunkp == NULL) { 697 error = EINVAL; 698 } else if ((off >= chunk_len) || (len > chunk_len) || 699 ((off + len) > chunk_len)) { 700 error = EMSGSIZE; 701 } else if (iosram_tswitch_active) { 702 error = EAGAIN; 703 } 704 705 if (error) { 706 mutex_exit(&iosram_mutex); 707 return (error); 708 } 709 710 /* 711 * Bump reference count to indicate #thread accessing IOSRAM and release 712 * the lock. 713 */ 714 iosram_rw_active++; 715 #if defined(DEBUG) 716 if (iosram_rw_active > iosram_rw_active_max) { 717 iosram_rw_active_max = iosram_rw_active; 718 } 719 #endif 720 mutex_exit(&iosram_mutex); 721 722 IOSRAM_STAT(read); 723 IOSRAM_STAT_ADD(bread, len); 724 725 /* Get starting address and map handle */ 726 iosramp = chunkp->basep + off; 727 handle = iosram_handle; 728 729 /* 730 * Align the off to word boundary and then try reading/writing data 731 * using double word or word access. 732 */ 733 if ((boff = ((uintptr_t)iosramp & (UINT32SZ - 1))) != 0) { 734 int cnt = UINT32SZ - boff; 735 736 if (cnt > len) { 737 cnt = len; 738 } 739 IOSRAMLOG(2, 740 "RD: align rep_get8(buf:%p sramp:%p cnt:%x) len:%x\n", 741 buf, iosramp, cnt, len); 742 ddi_rep_get8(handle, buf, iosramp, cnt, DDI_DEV_AUTOINCR); 743 buf += cnt; 744 iosramp += cnt; 745 len -= cnt; 746 } 747 748 if ((len >= UINT64SZ) && 749 ((((uintptr_t)iosramp | (uintptr_t)buf) & (UINT64SZ - 1)) == 0)) { 750 /* 751 * Both source and destination are double word aligned 752 */ 753 int cnt = len/UINT64SZ; 754 755 IOSRAMLOG(2, 756 "RD: rep_get64(buf:%p sramp:%p cnt:%x) len:%x\n", 757 buf, iosramp, cnt, len); 758 ddi_rep_get64(handle, (uint64_t *)buf, (uint64_t *)iosramp, 759 cnt, DDI_DEV_AUTOINCR); 760 iosramp += cnt * UINT64SZ; 761 buf += cnt * UINT64SZ; 762 len -= cnt * UINT64SZ; 763 764 /* 765 * read remaining data using word and byte access 766 */ 767 if (len >= UINT32SZ) { 768 IOSRAMLOG(2, 769 "RD: get32(buf:%p sramp:%p) len:%x\n", 770 buf, iosramp, len, NULL); 771 *(uint32_t *)buf = ddi_get32(handle, 772 (uint32_t *)iosramp); 773 iosramp += UINT32SZ; 774 buf += UINT32SZ; 775 len -= UINT32SZ; 776 } 777 778 if (len != 0) { 779 ddi_rep_get8(handle, buf, iosramp, len, DDI_DEV_AUTOINCR); 780 } 781 } else if ((len >= UINT32SZ) && 782 ((((uintptr_t)iosramp | (uintptr_t)buf) & (UINT32SZ - 1)) == 0)) { 783 /* 784 * Both source and destination are word aligned 785 */ 786 int cnt = len/UINT32SZ; 787 788 IOSRAMLOG(2, 789 "RD: rep_get32(buf:%p sramp:%p cnt:%x) len:%x\n", 790 buf, iosramp, cnt, len); 791 ddi_rep_get32(handle, (uint32_t *)buf, (uint32_t *)iosramp, 792 cnt, DDI_DEV_AUTOINCR); 793 iosramp += cnt * UINT32SZ; 794 buf += cnt * UINT32SZ; 795 len -= cnt * UINT32SZ; 796 797 /* 798 * copy the remainder using byte access 799 */ 800 if (len != 0) { 801 ddi_rep_get8(handle, buf, iosramp, len, DDI_DEV_AUTOINCR); 802 } 803 } else if (len != 0) { 804 /* 805 * We know that the "off" (i.e. iosramp) is at least word 806 * aligned. We need to read IOSRAM word at a time and copy it 807 * byte at a time. 808 */ 809 ASSERT(((uintptr_t)iosramp & (UINT32SZ - 1)) == 0); 810 811 IOSRAMLOG(2, 812 "RD: unaligned get32(buf:%p sramp:%p) len:%x\n", 813 buf, iosramp, len, NULL); 814 for (; len >= UINT32SZ; len -= UINT32SZ, iosramp += UINT32SZ) { 815 word.data = ddi_get32(handle, (uint32_t *)iosramp); 816 *buf++ = word.cbuf[0]; 817 *buf++ = word.cbuf[1]; 818 *buf++ = word.cbuf[2]; 819 *buf++ = word.cbuf[3]; 820 } 821 822 /* 823 * copy the remaining data using byte access 824 */ 825 if (len != 0) { 826 ddi_rep_get8(handle, buf, iosramp, len, 827 DDI_DEV_AUTOINCR); 828 } 829 } 830 831 /* 832 * Reacquire mutex lock, decrement refcnt and if refcnt is 0 and any 833 * threads are waiting for r/w activity to complete, wake them up. 834 */ 835 mutex_enter(&iosram_mutex); 836 ASSERT(iosram_rw_active > 0); 837 838 if ((--iosram_rw_active == 0) && iosram_rw_wakeup) { 839 iosram_rw_wakeup = 0; 840 cv_broadcast(&iosram_rw_wait); 841 } 842 mutex_exit(&iosram_mutex); 843 844 return (error); 845 } 846 847 848 /* 849 * _iosram_write(key, off, len, dptr, force) 850 * Internal common routine to write to the IOSRAM. 851 */ 852 static int 853 _iosram_write(uint32_t key, uint32_t off, uint32_t len, caddr_t dptr, int force) 854 { 855 iosram_chunk_t *chunkp; 856 uint32_t chunk_len; 857 uint8_t *iosramp; 858 ddi_acc_handle_t handle; 859 int boff; 860 union { 861 uint8_t cbuf[UINT32SZ]; 862 uint32_t data; 863 } word; 864 865 int error = 0; 866 uint8_t *buf = (uint8_t *)dptr; 867 868 /* 869 * We try to write to the IOSRAM using double word or word access 870 * provided both "off" and "buf" are (or can be) double word or word 871 * aligned. Othewise, we try to align the "off" to a word boundary and 872 * then try to write data to the IOSRAM using word access, but read data 873 * from the buf buffer using byte access. 874 * 875 * If the leading/trailing portion of the IOSRAM data is not word 876 * aligned, it will always be written using byte access. 877 */ 878 IOSRAMLOG(1, "WR: key: 0x%x off:%x len:%x buf:%p\n", 879 key, off, len, buf); 880 881 /* 882 * Acquire lock and look for the requested chunk. If it exists, make 883 * sure the requested write is within the chunk's bounds and no tunnel 884 * switch is active. 885 */ 886 mutex_enter(&iosram_mutex); 887 chunkp = iosram_find_chunk(key); 888 chunk_len = (chunkp != NULL) ? chunkp->toc_data.len : 0; 889 890 if (iosram_master == NULL) { 891 error = EIO; 892 } else if (chunkp == NULL) { 893 error = EINVAL; 894 } else if ((off >= chunk_len) || (len > chunk_len) || 895 ((off+len) > chunk_len)) { 896 error = EMSGSIZE; 897 } else if (iosram_tswitch_active && !force) { 898 error = EAGAIN; 899 } 900 901 if (error) { 902 mutex_exit(&iosram_mutex); 903 return (error); 904 } 905 906 /* 907 * If this is a forced write and there's a tunnel switch in progress, 908 * abort the switch. 909 */ 910 if (iosram_tswitch_active && force) { 911 cmn_err(CE_NOTE, "!iosram: Aborting tswitch on force_write"); 912 iosram_abort_tswitch(); 913 } 914 915 /* 916 * Bump reference count to indicate #thread accessing IOSRAM 917 * and release the lock. 918 */ 919 iosram_rw_active++; 920 #if defined(DEBUG) 921 if (iosram_rw_active > iosram_rw_active_max) { 922 iosram_rw_active_max = iosram_rw_active; 923 } 924 #endif 925 mutex_exit(&iosram_mutex); 926 927 928 IOSRAM_STAT(write); 929 IOSRAM_STAT_ADD(bwrite, len); 930 931 /* Get starting address and map handle */ 932 iosramp = chunkp->basep + off; 933 handle = iosram_handle; 934 935 /* 936 * Align the off to word boundary and then try reading/writing 937 * data using double word or word access. 938 */ 939 if ((boff = ((uintptr_t)iosramp & (UINT32SZ - 1))) != 0) { 940 int cnt = UINT32SZ - boff; 941 942 if (cnt > len) { 943 cnt = len; 944 } 945 IOSRAMLOG(2, 946 "WR: align rep_put8(buf:%p sramp:%p cnt:%x) len:%x\n", 947 buf, iosramp, cnt, len); 948 ddi_rep_put8(handle, buf, iosramp, cnt, DDI_DEV_AUTOINCR); 949 buf += cnt; 950 iosramp += cnt; 951 len -= cnt; 952 } 953 954 if ((len >= UINT64SZ) && 955 ((((uintptr_t)iosramp | (uintptr_t)buf) & (UINT64SZ - 1)) == 0)) { 956 /* 957 * Both source and destination are double word aligned 958 */ 959 int cnt = len/UINT64SZ; 960 961 IOSRAMLOG(2, 962 "WR: rep_put64(buf:%p sramp:%p cnt:%x) len:%x\n", 963 buf, iosramp, cnt, len); 964 ddi_rep_put64(handle, (uint64_t *)buf, (uint64_t *)iosramp, 965 cnt, DDI_DEV_AUTOINCR); 966 iosramp += cnt * UINT64SZ; 967 buf += cnt * UINT64SZ; 968 len -= cnt * UINT64SZ; 969 970 /* 971 * Copy the remaining data using word & byte access 972 */ 973 if (len >= UINT32SZ) { 974 IOSRAMLOG(2, 975 "WR: put32(buf:%p sramp:%p) len:%x\n", buf, iosramp, 976 len, NULL); 977 ddi_put32(handle, (uint32_t *)iosramp, 978 *(uint32_t *)buf); 979 iosramp += UINT32SZ; 980 buf += UINT32SZ; 981 len -= UINT32SZ; 982 } 983 984 if (len != 0) { 985 ddi_rep_put8(handle, buf, iosramp, len, 986 DDI_DEV_AUTOINCR); 987 } 988 } else if ((len >= UINT32SZ) && 989 ((((uintptr_t)iosramp | (uintptr_t)buf) & (UINT32SZ - 1)) == 0)) { 990 /* 991 * Both source and destination are word aligned 992 */ 993 int cnt = len/UINT32SZ; 994 995 IOSRAMLOG(2, 996 "WR: rep_put32(buf:%p sramp:%p cnt:%x) len:%x\n", 997 buf, iosramp, cnt, len); 998 ddi_rep_put32(handle, (uint32_t *)buf, (uint32_t *)iosramp, 999 cnt, DDI_DEV_AUTOINCR); 1000 iosramp += cnt * UINT32SZ; 1001 buf += cnt * UINT32SZ; 1002 len -= cnt * UINT32SZ; 1003 1004 /* 1005 * copy the remainder using byte access 1006 */ 1007 if (len != 0) { 1008 ddi_rep_put8(handle, buf, iosramp, len, 1009 DDI_DEV_AUTOINCR); 1010 } 1011 } else if (len != 0) { 1012 /* 1013 * We know that the "off" is at least word aligned. We 1014 * need to read data from buf buffer byte at a time, and 1015 * write it to the IOSRAM word at a time. 1016 */ 1017 1018 ASSERT(((uintptr_t)iosramp & (UINT32SZ - 1)) == 0); 1019 1020 IOSRAMLOG(2, 1021 "WR: unaligned put32(buf:%p sramp:%p) len:%x\n", 1022 buf, iosramp, len, NULL); 1023 for (; len >= UINT32SZ; len -= UINT32SZ, iosramp += UINT32SZ) { 1024 word.cbuf[0] = *buf++; 1025 word.cbuf[1] = *buf++; 1026 word.cbuf[2] = *buf++; 1027 word.cbuf[3] = *buf++; 1028 ddi_put32(handle, (uint32_t *)iosramp, word.data); 1029 } 1030 1031 /* 1032 * copy the remaining data using byte access 1033 */ 1034 if (len != 0) { 1035 ddi_rep_put8(handle, buf, iosramp, 1036 len, DDI_DEV_AUTOINCR); 1037 } 1038 } 1039 1040 /* 1041 * Reacquire mutex lock, decrement refcnt and if refcnt is 0 and 1042 * any threads are waiting for r/w activity to complete, wake them up. 1043 */ 1044 mutex_enter(&iosram_mutex); 1045 ASSERT(iosram_rw_active > 0); 1046 1047 if ((--iosram_rw_active == 0) && iosram_rw_wakeup) { 1048 iosram_rw_wakeup = 0; 1049 cv_broadcast(&iosram_rw_wait); 1050 } 1051 mutex_exit(&iosram_mutex); 1052 1053 return (error); 1054 } 1055 1056 1057 int 1058 iosram_force_write(uint32_t key, uint32_t off, uint32_t len, caddr_t dptr) 1059 { 1060 return (_iosram_write(key, off, len, dptr, 1 /* force */)); 1061 } 1062 1063 1064 int 1065 iosram_wr(uint32_t key, uint32_t off, uint32_t len, caddr_t dptr) 1066 { 1067 return (_iosram_write(key, off, len, dptr, 0)); 1068 } 1069 1070 1071 /* 1072 * iosram_register(key, handler, arg) 1073 * Register a handler and an arg for the specified chunk. This handler 1074 * will be invoked when an interrupt is received from the other side and 1075 * the int_pending flag for the corresponding key is marked 1076 * IOSRAM_INT_TO_DOM. 1077 */ 1078 /* ARGSUSED */ 1079 int 1080 iosram_register(uint32_t key, void (*handler)(), void *arg) 1081 { 1082 struct iosram_chunk *chunkp; 1083 int error = 0; 1084 1085 /* 1086 * Acquire lock and look for the requested chunk. If it exists, and no 1087 * other callback is registered, proceed with the registration. 1088 */ 1089 mutex_enter(&iosram_mutex); 1090 chunkp = iosram_find_chunk(key); 1091 1092 if (iosram_master == NULL) { 1093 error = EIO; 1094 } else if (chunkp == NULL) { 1095 error = EINVAL; 1096 } else if (chunkp->cback.handler != NULL) { 1097 error = EBUSY; 1098 } else { 1099 chunkp->cback.busy = 0; 1100 chunkp->cback.unregister = 0; 1101 chunkp->cback.handler = handler; 1102 chunkp->cback.arg = arg; 1103 } 1104 mutex_exit(&iosram_mutex); 1105 1106 IOSRAMLOG(1, "REG: key: 0x%x hdlr:%p arg:%p error:%d\n", 1107 key, handler, arg, error); 1108 1109 return (error); 1110 } 1111 1112 1113 /* 1114 * iosram_unregister() 1115 * Unregister handler associated with the specified chunk. 1116 */ 1117 int 1118 iosram_unregister(uint32_t key) 1119 { 1120 struct iosram_chunk *chunkp; 1121 int error = 0; 1122 1123 /* 1124 * Acquire lock and look for the requested chunk. If it exists and has 1125 * a callback registered, unregister it. 1126 */ 1127 mutex_enter(&iosram_mutex); 1128 chunkp = iosram_find_chunk(key); 1129 1130 if (iosram_master == NULL) { 1131 error = EIO; 1132 } else if (chunkp == NULL) { 1133 error = EINVAL; 1134 } else if (chunkp->cback.busy) { 1135 /* 1136 * If the handler is already busy (being invoked), then we flag 1137 * it so it will be unregistered after the invocation completes. 1138 */ 1139 DPRINTF(1, ("IOSRAM(%d): unregister: delaying unreg k:0x%08x\n", 1140 iosram_master->instance, key)); 1141 chunkp->cback.unregister = 1; 1142 } else if (chunkp->cback.handler != NULL) { 1143 chunkp->cback.handler = NULL; 1144 chunkp->cback.arg = NULL; 1145 } 1146 mutex_exit(&iosram_mutex); 1147 1148 IOSRAMLOG(1, "UNREG: key:%x error:%d\n", key, error, NULL, NULL); 1149 return (error); 1150 } 1151 1152 1153 /* 1154 * iosram_get_flag(): 1155 * Get data_valid and/or int_pending flags associated with the 1156 * specified key. 1157 */ 1158 int 1159 iosram_get_flag(uint32_t key, uint8_t *data_valid, uint8_t *int_pending) 1160 { 1161 iosram_chunk_t *chunkp; 1162 iosram_flags_t flags; 1163 int error = 0; 1164 1165 /* 1166 * Acquire lock and look for the requested chunk. If it exists, and no 1167 * tunnel switch is in progress, read the chunk's flags. 1168 */ 1169 mutex_enter(&iosram_mutex); 1170 chunkp = iosram_find_chunk(key); 1171 1172 if (iosram_master == NULL) { 1173 error = EIO; 1174 } else if (chunkp == NULL) { 1175 error = EINVAL; 1176 } else if (iosram_tswitch_active) { 1177 error = EAGAIN; 1178 } else { 1179 IOSRAM_STAT(getflag); 1180 1181 /* 1182 * Read the flags 1183 */ 1184 ddi_rep_get8(iosram_handle, (uint8_t *)&flags, 1185 (uint8_t *)(chunkp->flagsp), sizeof (iosram_flags_t), 1186 DDI_DEV_AUTOINCR); 1187 1188 /* 1189 * Get each flag value that the caller is interested in. 1190 */ 1191 if (data_valid != NULL) { 1192 *data_valid = flags.data_valid; 1193 } 1194 1195 if (int_pending != NULL) { 1196 *int_pending = flags.int_pending; 1197 } 1198 } 1199 mutex_exit(&iosram_mutex); 1200 1201 IOSRAMLOG(1, "GetFlag key:%x data_valid:%x int_pending:%x error:%d\n", 1202 key, flags.data_valid, flags.int_pending, error); 1203 return (error); 1204 } 1205 1206 1207 /* 1208 * iosram_set_flag(): 1209 * Set data_valid and int_pending flags associated with the specified key. 1210 */ 1211 int 1212 iosram_set_flag(uint32_t key, uint8_t data_valid, uint8_t int_pending) 1213 { 1214 iosram_chunk_t *chunkp; 1215 iosram_flags_t flags; 1216 int error = 0; 1217 1218 /* 1219 * Acquire lock and look for the requested chunk. If it exists, and no 1220 * tunnel switch is in progress, write the chunk's flags. 1221 */ 1222 mutex_enter(&iosram_mutex); 1223 chunkp = iosram_find_chunk(key); 1224 1225 if (iosram_master == NULL) { 1226 error = EIO; 1227 } else if ((chunkp == NULL) || 1228 ((data_valid != IOSRAM_DATA_INVALID) && 1229 (data_valid != IOSRAM_DATA_VALID)) || 1230 ((int_pending != IOSRAM_INT_NONE) && 1231 (int_pending != IOSRAM_INT_TO_SSC) && 1232 (int_pending != IOSRAM_INT_TO_DOM))) { 1233 error = EINVAL; 1234 } else if (iosram_tswitch_active) { 1235 error = EAGAIN; 1236 } else { 1237 IOSRAM_STAT(setflag); 1238 flags.data_valid = data_valid; 1239 flags.int_pending = int_pending; 1240 ddi_rep_put8(iosram_handle, (uint8_t *)&flags, 1241 (uint8_t *)(chunkp->flagsp), sizeof (iosram_flags_t), 1242 DDI_DEV_AUTOINCR); 1243 } 1244 mutex_exit(&iosram_mutex); 1245 1246 IOSRAMLOG(1, "SetFlag key:%x data_valid:%x int_pending:%x error:%d\n", 1247 key, flags.data_valid, flags.int_pending, error); 1248 return (error); 1249 } 1250 1251 1252 /* 1253 * iosram_ctrl() 1254 * This function provides access to a variety of services not available 1255 * through the basic API. 1256 */ 1257 int 1258 iosram_ctrl(uint32_t key, uint32_t cmd, void *arg) 1259 { 1260 struct iosram_chunk *chunkp; 1261 int error = 0; 1262 1263 /* 1264 * Acquire lock and do some argument sanity checking. 1265 */ 1266 mutex_enter(&iosram_mutex); 1267 chunkp = iosram_find_chunk(key); 1268 1269 if (iosram_master == NULL) { 1270 error = EIO; 1271 } else if (chunkp == NULL) { 1272 error = EINVAL; 1273 } 1274 1275 if (error != 0) { 1276 mutex_exit(&iosram_mutex); 1277 return (error); 1278 } 1279 1280 /* 1281 * Arguments seem okay so far, so process the command. 1282 */ 1283 switch (cmd) { 1284 case IOSRAM_CMD_CHUNKLEN: 1285 /* 1286 * Return the length of the chunk indicated by the key. 1287 */ 1288 if (arg == NULL) { 1289 error = EINVAL; 1290 break; 1291 } 1292 1293 *(uint32_t *)arg = chunkp->toc_data.len; 1294 break; 1295 1296 default: 1297 error = ENOTSUP; 1298 break; 1299 } 1300 1301 mutex_exit(&iosram_mutex); 1302 return (error); 1303 } 1304 1305 1306 /* 1307 * iosram_hdr_ctrl() 1308 * This function provides an interface for the Mailbox Protocol 1309 * implementation to use when interacting with the IOSRAM header. 1310 */ 1311 int 1312 iosram_hdr_ctrl(uint32_t cmd, void *arg) 1313 { 1314 int error = 0; 1315 1316 /* 1317 * Acquire lock and do some argument sanity checking. 1318 */ 1319 mutex_enter(&iosram_mutex); 1320 1321 if (iosram_master == NULL) { 1322 error = EIO; 1323 } 1324 1325 if (error != 0) { 1326 mutex_exit(&iosram_mutex); 1327 return (error); 1328 } 1329 1330 switch (cmd) { 1331 case IOSRAM_HDRCMD_GET_SMS_MBOX_VER: 1332 /* 1333 * Return the value of the sms_mbox_version field. 1334 */ 1335 if (arg == NULL) { 1336 error = EINVAL; 1337 break; 1338 } 1339 1340 *(uint32_t *)arg = IOSRAM_GET_HDRFIELD32(iosram_master, 1341 sms_mbox_version); 1342 break; 1343 1344 case IOSRAM_HDRCMD_SET_OS_MBOX_VER: 1345 /* 1346 * Set the value of the os_mbox_version field. 1347 */ 1348 IOSRAM_SET_HDRFIELD32(iosram_master, os_mbox_version, 1349 (uint32_t)(uintptr_t)arg); 1350 IOSRAM_SET_HDRFIELD32(iosram_master, os_change_mask, 1351 IOSRAM_HDRFIELD_OS_MBOX_VER); 1352 iosram_send_intr(); 1353 break; 1354 1355 case IOSRAM_HDRCMD_REG_CALLBACK: 1356 iosram_hdrchange_handler = (void (*)())arg; 1357 break; 1358 1359 default: 1360 error = ENOTSUP; 1361 break; 1362 } 1363 1364 mutex_exit(&iosram_mutex); 1365 return (error); 1366 } 1367 1368 1369 /* 1370 * iosram_softintr() 1371 * IOSRAM soft interrupt handler 1372 */ 1373 static uint_t 1374 iosram_softintr(caddr_t arg) 1375 { 1376 uint32_t hdr_changes; 1377 iosramsoft_t *softp = (iosramsoft_t *)arg; 1378 iosram_chunk_t *chunkp; 1379 void (*handler)(); 1380 int i; 1381 uint8_t flag; 1382 1383 DPRINTF(1, ("iosram(%d): in iosram_softintr\n", softp->instance)); 1384 1385 IOSRAMLOG(2, "SINTR arg/softp:%p pending:%d busy:%d\n", 1386 arg, softp->intr_pending, softp->intr_busy, NULL); 1387 1388 mutex_enter(&iosram_mutex); 1389 mutex_enter(&softp->intr_mutex); 1390 1391 /* 1392 * Do not process interrupt if interrupt handler is already running or 1393 * no interrupts are pending. 1394 */ 1395 if (softp->intr_busy || !softp->intr_pending) { 1396 mutex_exit(&softp->intr_mutex); 1397 mutex_exit(&iosram_mutex); 1398 DPRINTF(1, ("IOSRAM(%d): softintr: busy=%d pending=%d\n", 1399 softp->instance, softp->intr_busy, softp->intr_pending)); 1400 return (softp->intr_pending ? DDI_INTR_CLAIMED : 1401 DDI_INTR_UNCLAIMED); 1402 } 1403 1404 /* 1405 * It's possible for the SC to send an interrupt on the new master 1406 * before we are able to set our internal state. If so, we'll retrigger 1407 * soft interrupt right after tunnel switch completion. 1408 */ 1409 if (softp->state & IOSRAM_STATE_TSWITCH) { 1410 mutex_exit(&softp->intr_mutex); 1411 mutex_exit(&iosram_mutex); 1412 DPRINTF(1, ("IOSRAM(%d): softintr: doing switch " 1413 "state=0x%x\n", softp->instance, softp->state)); 1414 return (DDI_INTR_CLAIMED); 1415 } 1416 1417 /* 1418 * Do not process interrupt if we are not the master. 1419 */ 1420 if (!(softp->state & IOSRAM_STATE_MASTER)) { 1421 mutex_exit(&softp->intr_mutex); 1422 mutex_exit(&iosram_mutex); 1423 DPRINTF(1, ("IOSRAM(%d): softintr: no master state=0x%x\n ", 1424 softp->instance, softp->state)); 1425 return (DDI_INTR_CLAIMED); 1426 } 1427 1428 IOSRAM_STAT(sintr_recv); 1429 1430 /* 1431 * If the driver is suspended, then we should not process any 1432 * interrupts. Instead, we trigger a soft interrupt when the driver 1433 * resumes. 1434 */ 1435 if (softp->suspended) { 1436 mutex_exit(&softp->intr_mutex); 1437 mutex_exit(&iosram_mutex); 1438 DPRINTF(1, ("IOSRAM(%d): softintr: suspended\n", 1439 softp->instance)); 1440 return (DDI_INTR_CLAIMED); 1441 } 1442 1443 /* 1444 * Indicate that the IOSRAM interrupt handler is busy. Note that this 1445 * includes incrementing the reader/writer count, since we don't want 1446 * any tunnel switches to start up while we're processing callbacks. 1447 */ 1448 softp->intr_busy = 1; 1449 iosram_rw_active++; 1450 #if defined(DEBUG) 1451 if (iosram_rw_active > iosram_rw_active_max) { 1452 iosram_rw_active_max = iosram_rw_active; 1453 } 1454 #endif 1455 1456 do { 1457 DPRINTF(1, ("IOSRAM(%d): softintr: processing interrupt\n", 1458 softp->instance)); 1459 1460 softp->intr_pending = 0; 1461 1462 mutex_exit(&softp->intr_mutex); 1463 1464 /* 1465 * Process changes to the IOSRAM header. 1466 */ 1467 hdr_changes = IOSRAM_GET_HDRFIELD32(iosram_master, 1468 sms_change_mask); 1469 if (hdr_changes != 0) { 1470 int error; 1471 1472 IOSRAM_SET_HDRFIELD32(iosram_master, sms_change_mask, 1473 0); 1474 if (hdr_changes & IOSRAM_HDRFIELD_TOC_INDEX) { 1475 /* 1476 * XXX is it safe to temporarily release the 1477 * iosram_mutex here? 1478 */ 1479 mutex_exit(&iosram_mutex); 1480 error = iosram_read_toc(iosram_master); 1481 mutex_enter(&iosram_mutex); 1482 if (error) { 1483 cmn_err(CE_WARN, "iosram_read_toc: new" 1484 " TOC invalid; using old TOC."); 1485 } 1486 iosram_update_addrs(iosram_master); 1487 } 1488 1489 if (iosram_hdrchange_handler != NULL) { 1490 mutex_exit(&iosram_mutex); 1491 iosram_hdrchange_handler(); 1492 mutex_enter(&iosram_mutex); 1493 } 1494 } 1495 1496 /* 1497 * Get data_valid/int_pending flags and generate a callback if 1498 * applicable. For now, we read only those flags for which a 1499 * callback has been registered. We can optimize reading of 1500 * flags by reading them all at once and then process them 1501 * later. 1502 */ 1503 for (i = 0, chunkp = chunks; i < nchunks; i++, 1504 chunkp++) { 1505 #if DEBUG 1506 flag = ddi_get8(iosram_handle, 1507 &(chunkp->flagsp->int_pending)); 1508 DPRINTF(1, ("IOSRAM(%d): softintr chunk #%d " 1509 "flag=0x%x handler=%p\n", 1510 softp->instance, i, (int)flag, 1511 chunkp->cback.handler)); 1512 #endif 1513 if ((handler = chunkp->cback.handler) == NULL) { 1514 continue; 1515 } 1516 flag = ddi_get8(iosram_handle, 1517 &(chunkp->flagsp->int_pending)); 1518 if (flag == IOSRAM_INT_TO_DOM) { 1519 DPRINTF(1, 1520 ("IOSRAM(%d): softintr: invoking handler\n", 1521 softp->instance)); 1522 IOSRAMLOG(1, 1523 "SINTR invoking hdlr:%p arg:%p index:%d\n", 1524 handler, chunkp->cback.arg, i, NULL); 1525 IOSRAM_STAT(callbacks); 1526 1527 ddi_put8(iosram_handle, 1528 &(chunkp->flagsp->int_pending), 1529 IOSRAM_INT_NONE); 1530 chunkp->cback.busy = 1; 1531 mutex_exit(&iosram_mutex); 1532 (*handler)(chunkp->cback.arg); 1533 mutex_enter(&iosram_mutex); 1534 chunkp->cback.busy = 0; 1535 1536 /* 1537 * If iosram_unregister was called while the 1538 * callback was being invoked, complete the 1539 * unregistration here. 1540 */ 1541 if (chunkp->cback.unregister) { 1542 DPRINTF(1, ("IOSRAM(%d): softintr: " 1543 "delayed unreg k:0x%08x\n", 1544 softp->instance, 1545 chunkp->toc_data.key)); 1546 chunkp->cback.handler = NULL; 1547 chunkp->cback.arg = NULL; 1548 chunkp->cback.unregister = 0; 1549 } 1550 } 1551 1552 /* 1553 * If there's a tunnel switch waiting to run, give it 1554 * higher priority than these callbacks by bailing out. 1555 * They'll still be invoked on the new master iosram 1556 * when the tunnel switch is done. 1557 */ 1558 if (iosram_tswitch_active) { 1559 break; 1560 } 1561 } 1562 1563 mutex_enter(&softp->intr_mutex); 1564 1565 } while (softp->intr_pending && !softp->suspended && 1566 !iosram_tswitch_active); 1567 1568 /* 1569 * Indicate IOSRAM interrupt handler is not BUSY any more 1570 */ 1571 softp->intr_busy = 0; 1572 1573 ASSERT(iosram_rw_active > 0); 1574 if ((--iosram_rw_active == 0) && iosram_rw_wakeup) { 1575 iosram_rw_wakeup = 0; 1576 cv_broadcast(&iosram_rw_wait); 1577 } 1578 1579 mutex_exit(&softp->intr_mutex); 1580 mutex_exit(&iosram_mutex); 1581 1582 DPRINTF(1, ("iosram(%d): softintr exit\n", softp->instance)); 1583 1584 return (DDI_INTR_CLAIMED); 1585 } 1586 1587 1588 /* 1589 * iosram_intr() 1590 * IOSRAM real interrupt handler 1591 */ 1592 static uint_t 1593 iosram_intr(caddr_t arg) 1594 { 1595 iosramsoft_t *softp = (iosramsoft_t *)arg; 1596 int result = DDI_INTR_UNCLAIMED; 1597 uint32_t int_status; 1598 1599 DPRINTF(2, ("iosram(%d): in iosram_intr\n", softp->instance)); 1600 1601 mutex_enter(&softp->intr_mutex); 1602 1603 if (softp->sbbc_handle == NULL) { 1604 /* 1605 * The SBBC registers region is not mapped in. 1606 * Set the interrupt pending flag here, and process the 1607 * interrupt after the tunnel switch. 1608 */ 1609 DPRINTF(1, ("IOSRAM(%d): iosram_intr: SBBC not mapped\n", 1610 softp->instance)); 1611 softp->intr_pending = 1; 1612 mutex_exit(&softp->intr_mutex); 1613 return (DDI_INTR_UNCLAIMED); 1614 } 1615 1616 int_status = ddi_get32(softp->sbbc_handle, 1617 &(softp->sbbc_region->int_status.reg)); 1618 DPRINTF(1, ("iosram_intr: int_status = 0x%08x\n", int_status)); 1619 1620 if (int_status & IOSRAM_SBBC_INT0) { 1621 result = DDI_INTR_CLAIMED; 1622 DPRINTF(1, ("iosram_intr: int0 detected!\n")); 1623 } 1624 1625 if (int_status & IOSRAM_SBBC_INT1) { 1626 result = DDI_INTR_CLAIMED; 1627 DPRINTF(1, ("iosram_intr: int1 detected!\n")); 1628 } 1629 1630 if (result == DDI_INTR_CLAIMED) { 1631 ddi_put32(softp->sbbc_handle, 1632 &(softp->sbbc_region->int_status.reg), int_status); 1633 int_status = ddi_get32(softp->sbbc_handle, 1634 &(softp->sbbc_region->int_status.reg)); 1635 DPRINTF(1, ("iosram_intr: int_status = 0x%08x\n", 1636 int_status)); 1637 1638 softp->intr_pending = 1; 1639 /* 1640 * Trigger soft interrupt if not executing and 1641 * not suspended. 1642 */ 1643 if (!softp->intr_busy && !softp->suspended && 1644 (softp->softintr_id != NULL)) { 1645 DPRINTF(1, ("iosram(%d): trigger softint\n", 1646 softp->instance)); 1647 ddi_trigger_softintr(softp->softintr_id); 1648 } 1649 } 1650 1651 IOSRAM_STAT(intr_recv); 1652 1653 mutex_exit(&softp->intr_mutex); 1654 1655 IOSRAMLOG(2, "INTR arg/softp:%p pending:%d busy:%d\n", 1656 arg, softp->intr_pending, softp->intr_busy, NULL); 1657 DPRINTF(1, ("iosram(%d): iosram_intr exit\n", softp->instance)); 1658 1659 return (result); 1660 } 1661 1662 1663 /* 1664 * iosram_send_intr() 1665 * Send an interrupt to the SSP side via AXQ driver 1666 */ 1667 int 1668 iosram_send_intr() 1669 { 1670 IOSRAMLOG(1, "SendIntr called\n", NULL, NULL, NULL, NULL); 1671 IOSRAM_STAT(intr_send); 1672 DPRINTF(1, ("iosram iosram_send_intr invoked\n")); 1673 1674 return (axq_cpu2ssc_intr(0)); 1675 } 1676 1677 1678 #if defined(DEBUG) 1679 static void 1680 iosram_dummy_cback(void *arg) 1681 { 1682 DPRINTF(1, ("iosram_dummy_cback invoked arg:%p\n", arg)); 1683 } 1684 #endif /* DEBUG */ 1685 1686 1687 /*ARGSUSED1*/ 1688 static int 1689 iosram_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 1690 int *rvalp) 1691 { 1692 struct iosramsoft *softp; 1693 int error = DDI_SUCCESS; 1694 1695 softp = ddi_get_soft_state(iosramsoft_statep, getminor(dev)); 1696 if (softp == NULL) { 1697 return (ENXIO); 1698 } 1699 IOSRAMLOG(1, "IOCTL: dev:%p cmd:%x arg:%p ... instance %d\n", 1700 dev, cmd, arg, softp->instance); 1701 1702 switch (cmd) { 1703 #if defined(DEBUG) 1704 case IOSRAM_GET_FLAG: 1705 { 1706 iosram_io_t req; 1707 uint8_t data_valid, int_pending; 1708 1709 if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) { 1710 return (EFAULT); 1711 } 1712 1713 DPRINTF(2, ("IOSRAM_GET_FLAG(key:%x\n", req.key)); 1714 1715 req.retval = iosram_get_flag(req.key, &data_valid, 1716 &int_pending); 1717 req.data_valid = (uint32_t)data_valid; 1718 req.int_pending = (uint32_t)int_pending; 1719 1720 if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) { 1721 DPRINTF(1, 1722 ("IOSRAM_GET_FLAG: can't copyout req.retval (%x)", 1723 req.retval)); 1724 error = EFAULT; 1725 } 1726 1727 return (error); 1728 } 1729 1730 case IOSRAM_SET_FLAG: 1731 { 1732 iosram_io_t req; 1733 1734 if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) { 1735 return (EFAULT); 1736 } 1737 1738 DPRINTF(2, ("IOSRAM_SET_FLAG(key:%x data_valid:%x " 1739 "int_pending:%x\n", req.key, req.data_valid, 1740 req.int_pending)); 1741 1742 req.retval = iosram_set_flag(req.key, req.data_valid, 1743 req.int_pending); 1744 1745 if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) { 1746 DPRINTF(1, ("IOSRAM_SET_FLAG: can't copyout req.retval" 1747 " (%x)\n", req.retval)); 1748 error = EFAULT; 1749 } 1750 1751 return (error); 1752 } 1753 1754 case IOSRAM_RD: 1755 { 1756 caddr_t bufp; 1757 int len; 1758 iosram_io_t req; 1759 1760 if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) { 1761 return (EFAULT); 1762 } 1763 1764 DPRINTF(2, ("IOSRAM_RD(k:%x o:%x len:%x bufp:%p\n", req.key, 1765 req.off, req.len, (void *)(uintptr_t)req.bufp)); 1766 1767 len = req.len; 1768 bufp = kmem_alloc(len, KM_SLEEP); 1769 1770 req.retval = iosram_rd(req.key, req.off, req.len, bufp); 1771 1772 if (ddi_copyout(bufp, (void *)(uintptr_t)req.bufp, len, mode)) { 1773 DPRINTF(1, ("IOSRAM_RD: copyout(%p, %p,%x,%x) failed\n", 1774 bufp, (void *)(uintptr_t)req.bufp, len, mode)); 1775 error = EFAULT; 1776 } else if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) { 1777 DPRINTF(1, ("IOSRAM_RD: can't copyout retval (%x)\n", 1778 req.retval)); 1779 error = EFAULT; 1780 } 1781 1782 kmem_free(bufp, len); 1783 return (error); 1784 } 1785 1786 case IOSRAM_WR: 1787 { 1788 caddr_t bufp; 1789 iosram_io_t req; 1790 int len; 1791 1792 if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) { 1793 return (EFAULT); 1794 } 1795 1796 DPRINTF(2, ("IOSRAM_WR(k:%x o:%x len:%x bufp:%p\n", 1797 req.key, req.off, req.len, req.bufp)); 1798 len = req.len; 1799 bufp = kmem_alloc(len, KM_SLEEP); 1800 if (ddi_copyin((void *)(uintptr_t)req.bufp, bufp, len, mode)) { 1801 error = EFAULT; 1802 } else { 1803 req.retval = iosram_wr(req.key, req.off, req.len, 1804 bufp); 1805 1806 if (ddi_copyout(&req, (void *)arg, sizeof (req), 1807 mode)) { 1808 error = EFAULT; 1809 } 1810 } 1811 kmem_free(bufp, len); 1812 return (error); 1813 } 1814 1815 case IOSRAM_TOC: 1816 { 1817 caddr_t bufp; 1818 int len; 1819 iosram_io_t req; 1820 1821 if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) { 1822 return (EFAULT); 1823 } 1824 1825 DPRINTF(2, ("IOSRAM_TOC (req.bufp:%x req.len:%x) \n", 1826 req.bufp, req.len)); 1827 1828 len = req.len; 1829 bufp = kmem_alloc(len, KM_SLEEP); 1830 1831 req.retval = iosram_get_keys((iosram_toc_entry_t *)bufp, 1832 &req.len); 1833 1834 if (ddi_copyout(bufp, (void *)(uintptr_t)req.bufp, req.len, 1835 mode)) { 1836 DPRINTF(1, 1837 ("IOSRAM_TOC: copyout(%p, %p,%x,%x) failed\n", 1838 bufp, (void *)(uintptr_t)req.bufp, req.len, mode)); 1839 error = EFAULT; 1840 } else if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) { 1841 DPRINTF(1, ("IOSRAM_TOC: can't copyout retval (%x)\n", 1842 req.retval)); 1843 error = EFAULT; 1844 } 1845 kmem_free(bufp, len); 1846 return (error); 1847 } 1848 1849 case IOSRAM_SEND_INTR: 1850 { 1851 DPRINTF(2, ("IOSRAM_SEND_INTR\n")); 1852 1853 switch ((int)arg) { 1854 case 0x11: 1855 case 0x22: 1856 case 0x44: 1857 case 0x88: 1858 ddi_put32(softp->sbbc_handle, 1859 &(softp->sbbc_region->int_enable.reg), (int)arg); 1860 DPRINTF(1, ("Wrote 0x%x to int_enable.reg\n", 1861 (int)arg)); 1862 break; 1863 case 0xBB: 1864 ddi_put32(softp->sbbc_handle, 1865 &(softp->sbbc_region->p0_int_gen.reg), 1); 1866 DPRINTF(1, ("Wrote 1 to p0_int_gen.reg\n")); 1867 break; 1868 default: 1869 error = iosram_send_intr(); 1870 } 1871 1872 return (error); 1873 } 1874 1875 case IOSRAM_PRINT_CBACK: 1876 iosram_print_cback(); 1877 break; 1878 1879 case IOSRAM_PRINT_STATE: 1880 iosram_print_state((int)arg); 1881 break; 1882 1883 #if IOSRAM_STATS 1884 case IOSRAM_PRINT_STATS: 1885 iosram_print_stats(); 1886 break; 1887 #endif 1888 1889 #if IOSRAM_LOG 1890 case IOSRAM_PRINT_LOG: 1891 iosram_print_log((int)arg); 1892 break; 1893 #endif 1894 1895 case IOSRAM_TUNNEL_SWITCH: 1896 error = iosram_switchfrom((int)arg); 1897 break; 1898 1899 case IOSRAM_PRINT_FLAGS: 1900 iosram_print_flags(); 1901 break; 1902 1903 case IOSRAM_REG_CBACK: 1904 { 1905 iosram_io_t req; 1906 1907 if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) { 1908 return (EFAULT); 1909 } 1910 1911 DPRINTF(2, ("IOSRAM_REG_CBACK(k:%x)\n", req.key)); 1912 1913 req.retval = iosram_register(req.key, iosram_dummy_cback, 1914 (void *)(uintptr_t)req.key); 1915 if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) { 1916 error = EFAULT; 1917 } 1918 1919 return (error); 1920 } 1921 1922 case IOSRAM_UNREG_CBACK: 1923 { 1924 iosram_io_t req; 1925 1926 if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) { 1927 return (EFAULT); 1928 } 1929 1930 DPRINTF(2, ("IOSRAM_REG_CBACK(k:%x)\n", req.key)); 1931 1932 req.retval = iosram_unregister(req.key); 1933 if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) { 1934 error = EFAULT; 1935 } 1936 1937 return (error); 1938 } 1939 1940 case IOSRAM_SEMA_ACQUIRE: 1941 { 1942 DPRINTF(1, ("IOSRAM_SEMA_ACQUIRE\n")); 1943 error = iosram_sema_acquire(NULL); 1944 return (error); 1945 } 1946 1947 case IOSRAM_SEMA_RELEASE: 1948 { 1949 DPRINTF(1, ("IOSRAM_SEMA_RELEASE\n")); 1950 error = iosram_sema_release(); 1951 return (error); 1952 } 1953 1954 #endif /* DEBUG */ 1955 1956 default: 1957 DPRINTF(1, ("iosram_ioctl: Illegal command %x\n", cmd)); 1958 error = ENOTTY; 1959 } 1960 1961 return (error); 1962 } 1963 1964 1965 /* 1966 * iosram_switch_tunnel(softp) 1967 * Switch master tunnel to the specified instance 1968 * Must be called while holding iosram_mutex 1969 */ 1970 /*ARGSUSED*/ 1971 static int 1972 iosram_switch_tunnel(iosramsoft_t *softp) 1973 { 1974 #ifdef DEBUG 1975 int instance = softp->instance; 1976 #endif 1977 int error = 0; 1978 iosramsoft_t *prev_master; 1979 1980 ASSERT(mutex_owned(&iosram_mutex)); 1981 1982 DPRINTF(1, ("tunnel switch new master:%p (%d) current master:%p (%d)\n", 1983 softp, instance, iosram_master, 1984 ((iosram_master) ? iosram_master->instance : -1))); 1985 IOSRAMLOG(1, "TSWTCH: new_master:%p (%p) iosram_master:%p (%d)\n", 1986 softp, instance, iosram_master, 1987 ((iosram_master) ? iosram_master->instance : -1)); 1988 1989 if (softp == NULL || (softp->state & IOSRAM_STATE_DETACH)) { 1990 return (ENXIO); 1991 } 1992 if (iosram_master == softp) { 1993 return (0); 1994 } 1995 1996 1997 /* 1998 * We protect against the softp structure being deallocated by setting 1999 * the IOSRAM_STATE_TSWITCH state flag. The detach routine will check 2000 * for this flag and if set, it will wait for this flag to be reset or 2001 * refuse the detach operation. 2002 */ 2003 iosram_new_master = softp; 2004 softp->state |= IOSRAM_STATE_TSWITCH; 2005 prev_master = iosram_master; 2006 if (prev_master) { 2007 prev_master->state |= IOSRAM_STATE_TSWITCH; 2008 } 2009 mutex_exit(&iosram_mutex); 2010 2011 /* 2012 * Map the target IOSRAM, read the TOC, and register interrupts if not 2013 * already done. 2014 */ 2015 DPRINTF(1, ("iosram(%d): mapping IOSRAM and SBBC\n", 2016 softp->instance)); 2017 IOSRAMLOG(1, "TSWTCH: mapping instance:%d softp:%p\n", 2018 instance, softp, NULL, NULL); 2019 2020 if (iosram_setup_map(softp) != DDI_SUCCESS) { 2021 error = ENXIO; 2022 } else if ((chunks == NULL) && (iosram_read_toc(softp) != 0)) { 2023 iosram_remove_map(softp); 2024 error = EINVAL; 2025 } else if (iosram_add_intr(softp) != DDI_SUCCESS) { 2026 /* 2027 * If there was no previous master, purge the TOC data that 2028 * iosram_read_toc() created. 2029 */ 2030 if ((prev_master == NULL) && (chunks != NULL)) { 2031 kmem_free(chunks, nchunks * sizeof (iosram_chunk_t)); 2032 chunks = NULL; 2033 nchunks = 0; 2034 iosram_init_hashtab(); 2035 } 2036 iosram_remove_map(softp); 2037 error = ENXIO; 2038 } 2039 2040 /* 2041 * If we are asked to abort tunnel switch, do so now, before invoking 2042 * the OBP callback. 2043 */ 2044 if (iosram_tswitch_aborted) { 2045 2046 /* 2047 * Once the tunnel switch is aborted, this thread should not 2048 * resume. If it does, we simply log a message. We can't unmap 2049 * the new master IOSRAM as it may be accessed in 2050 * iosram_abort_tswitch(). It will be unmapped when it is 2051 * detached. 2052 */ 2053 IOSRAMLOG(1, 2054 "TSWTCH: aborted (pre OBP cback). Thread resumed.\n", 2055 NULL, NULL, NULL, NULL); 2056 error = EIO; 2057 } 2058 2059 if (error) { 2060 IOSRAMLOG(1, 2061 "TSWTCH: map failed instance:%d softp:%p error:%x\n", 2062 instance, softp, error, NULL); 2063 goto done; 2064 } 2065 2066 if (prev_master != NULL) { 2067 int result; 2068 2069 /* 2070 * Now invoke the OBP interface to do the tunnel switch. 2071 */ 2072 result = prom_starcat_switch_tunnel(softp->portid, 2073 OBP_TSWITCH_REQREPLY); 2074 if (result != 0) { 2075 error = EIO; 2076 } 2077 IOSRAMLOG(1, 2078 "TSWTCH: OBP tswitch portid:%x result:%x error:%x\n", 2079 softp->portid, result, error, NULL); 2080 IOSRAM_STAT(tswitch); 2081 iosram_tswitch_tstamp = ddi_get_lbolt(); 2082 } 2083 2084 mutex_enter(&iosram_mutex); 2085 if (iosram_tswitch_aborted) { 2086 /* 2087 * Tunnel switch aborted. This thread should not resume. 2088 * For now, we simply log a message, but don't unmap any 2089 * IOSRAM at this stage as it may be accessed within the 2090 * isoram_abort_tswitch(). The IOSRAM will be unmapped 2091 * when that instance is detached. 2092 */ 2093 if (iosram_tswitch_aborted) { 2094 IOSRAMLOG(1, 2095 "TSWTCH: aborted (post OBP cback). Thread" 2096 " resumed.\n", NULL, NULL, NULL, NULL); 2097 error = EIO; 2098 mutex_exit(&iosram_mutex); 2099 } 2100 } else if (error) { 2101 /* 2102 * Tunnel switch failed. Continue using previous tunnel. 2103 * However, unmap new (target) IOSRAM. 2104 */ 2105 iosram_new_master = NULL; 2106 mutex_exit(&iosram_mutex); 2107 iosram_remove_intr(softp); 2108 iosram_remove_map(softp); 2109 } else { 2110 /* 2111 * Tunnel switch was successful. Set the new master. 2112 * Also unmap old master IOSRAM and remove any interrupts 2113 * associated with that. 2114 * 2115 * Note that a call to iosram_force_write() allows access 2116 * to the IOSRAM while tunnel switch is in progress. That 2117 * means we need to set the new master before unmapping 2118 * the old master. 2119 */ 2120 iosram_set_master(softp); 2121 iosram_new_master = NULL; 2122 mutex_exit(&iosram_mutex); 2123 2124 if (prev_master) { 2125 IOSRAMLOG(1, "TSWTCH: unmapping prev_master:%p (%d)\n", 2126 prev_master, prev_master->instance, NULL, NULL); 2127 iosram_remove_intr(prev_master); 2128 iosram_remove_map(prev_master); 2129 } 2130 } 2131 2132 done: 2133 mutex_enter(&iosram_mutex); 2134 2135 /* 2136 * Clear the tunnel switch flag on the source and destination 2137 * instances. 2138 */ 2139 if (prev_master) { 2140 prev_master->state &= ~IOSRAM_STATE_TSWITCH; 2141 } 2142 softp->state &= ~IOSRAM_STATE_TSWITCH; 2143 2144 /* 2145 * Since incoming interrupts could get lost during a tunnel switch, 2146 * trigger a soft interrupt just in case. No harm other than a bit 2147 * of wasted effort will be caused if no interrupts were dropped. 2148 */ 2149 mutex_enter(&softp->intr_mutex); 2150 iosram_master->intr_pending = 1; 2151 if ((iosram_master->softintr_id != NULL) && 2152 (iosram_master->intr_busy == 0)) { 2153 ddi_trigger_softintr(iosram_master->softintr_id); 2154 } 2155 mutex_exit(&softp->intr_mutex); 2156 2157 IOSRAMLOG(1, "TSWTCH: done error:%d iosram_master:%p instance:%d\n", 2158 error, iosram_master, 2159 (iosram_master) ? iosram_master->instance : -1, NULL); 2160 2161 return (error); 2162 } 2163 2164 2165 /* 2166 * iosram_abort_tswitch() 2167 * Must be called while holding iosram_mutex. 2168 */ 2169 static void 2170 iosram_abort_tswitch() 2171 { 2172 uint32_t master_valid, new_master_valid; 2173 2174 ASSERT(mutex_owned(&iosram_mutex)); 2175 2176 if ((!iosram_tswitch_active) || iosram_tswitch_aborted) { 2177 return; 2178 } 2179 2180 ASSERT(iosram_master != NULL); 2181 2182 IOSRAMLOG(1, "ABORT: iosram_master:%p (%d) iosram_new_master:%p (%d)\n", 2183 iosram_master, iosram_master->instance, iosram_new_master, 2184 (iosram_new_master == NULL) ? -1 : iosram_new_master->instance); 2185 2186 /* 2187 * The first call to iosram_force_write() in the middle of tunnel switch 2188 * will get here. We lookup IOSRAM VALID location and setup appropriate 2189 * master, if one is still valid. We also set iosram_tswitch_aborted to 2190 * prevent reentering this code and to catch if the OBP callback thread 2191 * somehow resumes. 2192 */ 2193 iosram_tswitch_aborted = 1; 2194 2195 if ((iosram_new_master == NULL) || 2196 (iosram_new_master = iosram_master)) { 2197 /* 2198 * New master hasn't been selected yet, or OBP callback 2199 * succeeded and we already selected new IOSRAM as master, but 2200 * system crashed in the middle of unmapping previous master or 2201 * cleaning up state. Use the existing master. 2202 */ 2203 ASSERT(iosram_master->iosramp != NULL); 2204 ASSERT(IOSRAM_GET_HDRFIELD32(iosram_master, status) == 2205 IOSRAM_VALID); 2206 IOSRAMLOG(1, "ABORT: master (%d) already determined.\n", 2207 iosram_master->instance, NULL, NULL, NULL); 2208 2209 return; 2210 } 2211 2212 /* 2213 * System crashed in the middle of tunnel switch and we know that the 2214 * new target has not been marked master yet. That means, the old 2215 * master should still be mapped. We need to abort the tunnel switch 2216 * and setup a valid master, if possible, so that we can write to the 2217 * IOSRAM. 2218 * 2219 * We select a new master based upon the IOSRAM header status fields in 2220 * the previous master IOSRAM and the target IOSRAM as follows: 2221 * 2222 * iosram_master iosram-tswitch 2223 * (Prev Master) (New Target) Decision 2224 * --------------- --------------- ----------- 2225 * VALID don't care prev master 2226 * INTRANSIT INVALID prev master 2227 * INTRANSIT INTRANSIT prev master 2228 * INTRANSIT VALID new target 2229 * INVALID INVALID shouldn't ever happen 2230 * INVALID INTRANSIT shouldn't ever happen 2231 * INVALID VALID new target 2232 */ 2233 2234 master_valid = (iosram_master->iosramp != NULL) ? 2235 IOSRAM_GET_HDRFIELD32(iosram_master, status) : IOSRAM_INVALID; 2236 new_master_valid = (iosram_new_master->iosramp != NULL) ? 2237 IOSRAM_GET_HDRFIELD32(iosram_new_master, status) : IOSRAM_INVALID; 2238 2239 if (master_valid == IOSRAM_VALID) { 2240 /* EMPTY */ 2241 /* 2242 * OBP hasn't been called yet or, if it has, it hasn't started 2243 * copying yet. Use the existing master. Note that the new 2244 * master may not be mapped yet. 2245 */ 2246 IOSRAMLOG(1, "ABORT: prev master(%d) is VALID\n", 2247 iosram_master->instance, NULL, NULL, NULL); 2248 } else if (master_valid == IOSRAM_INTRANSIT) { 2249 /* 2250 * The system crashed after OBP started processing the tunnel 2251 * switch but before the iosram driver determined that it was 2252 * complete. Use the new master if it has been marked valid, 2253 * meaning that OBP finished copying data to it, or the old 2254 * master otherwise. 2255 */ 2256 IOSRAMLOG(1, "ABORT: prev master(%d) is INTRANSIT\n", 2257 iosram_master->instance, NULL, NULL, NULL); 2258 2259 if (new_master_valid == IOSRAM_VALID) { 2260 iosram_set_master(iosram_new_master); 2261 IOSRAMLOG(1, "ABORT: new master(%d) is VALID\n", 2262 iosram_new_master->instance, NULL, NULL, 2263 NULL); 2264 } else { 2265 prom_starcat_switch_tunnel(iosram_master->portid, 2266 OBP_TSWITCH_NOREPLY); 2267 2268 IOSRAMLOG(1, "ABORT: new master(%d) is INVALID\n", 2269 iosram_new_master->instance, NULL, NULL, 2270 NULL); 2271 } 2272 } else { 2273 /* 2274 * The system crashed after OBP marked the old master INVALID, 2275 * which means the new master is the way to go. 2276 */ 2277 IOSRAMLOG(1, "ABORT: prev master(%d) is INVALID\n", 2278 iosram_master->instance, NULL, NULL, NULL); 2279 2280 ASSERT(new_master_valid == IOSRAM_VALID); 2281 2282 iosram_set_master(iosram_new_master); 2283 } 2284 2285 IOSRAMLOG(1, "ABORT: Instance %d selected as master\n", 2286 iosram_master->instance, NULL, NULL, NULL); 2287 } 2288 2289 2290 /* 2291 * iosram_switchfrom(instance) 2292 * Switch master tunnel away from the specified instance 2293 */ 2294 /*ARGSUSED*/ 2295 int 2296 iosram_switchfrom(int instance) 2297 { 2298 struct iosramsoft *softp; 2299 int error = 0; 2300 int count; 2301 clock_t current_tstamp; 2302 clock_t tstamp_interval; 2303 struct iosramsoft *last_master = NULL; 2304 static int last_master_instance = -1; 2305 2306 IOSRAMLOG(1, "SwtchFrom: instance:%d iosram_master:%p (%d)\n", 2307 instance, iosram_master, 2308 ((iosram_master) ? iosram_master->instance : -1), NULL); 2309 2310 mutex_enter(&iosram_mutex); 2311 2312 /* 2313 * Wait if another tunnel switch is in progress 2314 */ 2315 for (count = 0; iosram_tswitch_active && count < IOSRAM_TSWITCH_RETRY; 2316 count++) { 2317 iosram_tswitch_wakeup = 1; 2318 cv_wait(&iosram_tswitch_wait, &iosram_mutex); 2319 } 2320 2321 if (iosram_tswitch_active) { 2322 mutex_exit(&iosram_mutex); 2323 return (EAGAIN); 2324 } 2325 2326 /* 2327 * Check if the specified instance holds the tunnel. If not, 2328 * then we are done. 2329 */ 2330 if ((iosram_master == NULL) || (iosram_master->instance != instance)) { 2331 mutex_exit(&iosram_mutex); 2332 return (0); 2333 } 2334 2335 /* 2336 * Before beginning the tunnel switch process, wait for any outstanding 2337 * read/write activity to complete. 2338 */ 2339 iosram_tswitch_active = 1; 2340 while (iosram_rw_active) { 2341 iosram_rw_wakeup = 1; 2342 cv_wait(&iosram_rw_wait, &iosram_mutex); 2343 } 2344 2345 /* 2346 * If a previous tunnel switch just completed, we have to make sure 2347 * HWAD has enough time to find the new tunnel before we switch 2348 * away from it. Otherwise, OBP's mailbox message to OSD will never 2349 * get through. Just to be paranoid about synchronization of lbolt 2350 * across different CPUs, make sure the current attempt isn't noted 2351 * as starting _before_ the last tunnel switch completed. 2352 */ 2353 current_tstamp = ddi_get_lbolt(); 2354 if (current_tstamp > iosram_tswitch_tstamp) { 2355 tstamp_interval = current_tstamp - iosram_tswitch_tstamp; 2356 } else { 2357 tstamp_interval = 0; 2358 } 2359 if (drv_hztousec(tstamp_interval) < IOSRAM_TSWITCH_DELAY_US) { 2360 mutex_exit(&iosram_mutex); 2361 delay(drv_usectohz(IOSRAM_TSWITCH_DELAY_US) - tstamp_interval); 2362 mutex_enter(&iosram_mutex); 2363 } 2364 2365 /* 2366 * The specified instance holds the tunnel. We need to move it to some 2367 * other IOSRAM. Try out all possible IOSRAMs listed in 2368 * iosram_instances. For now, we always search from the first entry. 2369 * In future, it may be desirable to start where we left off. 2370 */ 2371 for (softp = iosram_instances; softp != NULL; softp = softp->next) { 2372 if (iosram_tswitch_aborted) { 2373 break; 2374 } 2375 2376 /* we can't switch _to_ the instance we're switching _from_ */ 2377 if (softp->instance == instance) { 2378 continue; 2379 } 2380 2381 /* skip over instances being detached */ 2382 if (softp->state & IOSRAM_STATE_DETACH) { 2383 continue; 2384 } 2385 2386 /* 2387 * Try to avoid reverting to the last instance we switched away 2388 * from, as we expect that one to be detached eventually. Keep 2389 * track of it, though, so we can go ahead and try switching to 2390 * it if no other viable candidates are found. 2391 */ 2392 if (softp->instance == last_master_instance) { 2393 last_master = softp; 2394 continue; 2395 } 2396 2397 /* 2398 * Do the tunnel switch. If successful, record the instance of 2399 * the master we just left behind so we can try to avoid 2400 * reverting to it next time. 2401 */ 2402 if (iosram_switch_tunnel(softp) == 0) { 2403 last_master_instance = instance; 2404 break; 2405 } 2406 } 2407 2408 /* 2409 * If we failed to switch the tunnel, but we skipped over an instance 2410 * that had previously been switched out of because we expected it to be 2411 * detached, go ahead and try it anyway (unless the tswitch was aborted 2412 * or the instance we skipped is finally being detached). 2413 */ 2414 if ((softp == NULL) && (last_master != NULL) && 2415 !iosram_tswitch_aborted && 2416 !(last_master->state & IOSRAM_STATE_DETACH)) { 2417 if (iosram_switch_tunnel(last_master) == 0) { 2418 softp = last_master; 2419 last_master_instance = instance; 2420 } 2421 } 2422 2423 if ((softp == NULL) || (iosram_tswitch_aborted)) { 2424 error = EIO; 2425 } 2426 2427 /* 2428 * If there are additional tunnel switches queued up waiting for this 2429 * one to complete, wake them up. 2430 */ 2431 if (iosram_tswitch_wakeup) { 2432 iosram_tswitch_wakeup = 0; 2433 cv_broadcast(&iosram_tswitch_wait); 2434 } 2435 iosram_tswitch_active = 0; 2436 mutex_exit(&iosram_mutex); 2437 return (error); 2438 } 2439 2440 2441 /* 2442 * iosram_tunnel_capable(softp) 2443 * Check if this IOSRAM instance is tunnel-capable by looing at 2444 * "tunnel-capable" property. 2445 */ 2446 static int 2447 iosram_tunnel_capable(struct iosramsoft *softp) 2448 { 2449 int proplen; 2450 int tunnel_capable; 2451 2452 /* 2453 * Look up IOSRAM_TUNNELOK_PROP property, if any. 2454 */ 2455 proplen = sizeof (tunnel_capable); 2456 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, softp->dip, 2457 DDI_PROP_DONTPASS, IOSRAM_TUNNELOK_PROP, (caddr_t)&tunnel_capable, 2458 &proplen) != DDI_PROP_SUCCESS) { 2459 tunnel_capable = 0; 2460 } 2461 return (tunnel_capable); 2462 } 2463 2464 2465 static int 2466 iosram_sbbc_setup_map(struct iosramsoft *softp) 2467 { 2468 int rv; 2469 struct ddi_device_acc_attr attr; 2470 dev_info_t *dip = softp->dip; 2471 uint32_t sema_val; 2472 2473 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 2474 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 2475 attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; 2476 2477 mutex_enter(&iosram_mutex); 2478 mutex_enter(&softp->intr_mutex); 2479 2480 /* 2481 * Map SBBC region in 2482 */ 2483 if ((rv = ddi_regs_map_setup(dip, IOSRAM_SBBC_MAP_INDEX, 2484 (caddr_t *)&softp->sbbc_region, 2485 IOSRAM_SBBC_MAP_OFFSET, sizeof (iosram_sbbc_region_t), 2486 &attr, &softp->sbbc_handle)) != DDI_SUCCESS) { 2487 DPRINTF(1, ("Failed to map SBBC region.\n")); 2488 mutex_exit(&softp->intr_mutex); 2489 mutex_exit(&iosram_mutex); 2490 return (rv); 2491 } 2492 2493 /* 2494 * Disable SBBC interrupts. SBBC interrupts are enabled 2495 * once the interrupt handler is registered. 2496 */ 2497 ddi_put32(softp->sbbc_handle, 2498 &(softp->sbbc_region->int_enable.reg), 0x0); 2499 2500 /* 2501 * Clear hardware semaphore value if appropriate. 2502 * When the first SBBC is mapped in by the IOSRAM driver, 2503 * the value of the semaphore should be initialized only 2504 * if it is not held by SMS. For subsequent SBBC's, the 2505 * semaphore will be always initialized. 2506 */ 2507 sema_val = IOSRAM_SEMA_RD(softp); 2508 2509 if (!iosram_master) { 2510 /* the first SBBC is being mapped in */ 2511 if (!(IOSRAM_SEMA_IS_HELD(sema_val) && 2512 IOSRAM_SEMA_GET_IDX(sema_val) == IOSRAM_SEMA_SMS_IDX)) { 2513 /* not held by SMS, we clear the semaphore */ 2514 IOSRAM_SEMA_WR(softp, 0); 2515 } 2516 } else { 2517 /* not the first SBBC, we clear the semaphore */ 2518 IOSRAM_SEMA_WR(softp, 0); 2519 } 2520 2521 mutex_exit(&softp->intr_mutex); 2522 mutex_exit(&iosram_mutex); 2523 return (0); 2524 } 2525 2526 2527 static int 2528 iosram_setup_map(struct iosramsoft *softp) 2529 { 2530 int instance = softp->instance; 2531 dev_info_t *dip = softp->dip; 2532 int portid; 2533 int proplen; 2534 caddr_t propvalue; 2535 struct ddi_device_acc_attr attr; 2536 2537 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 2538 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 2539 attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; 2540 2541 /* 2542 * Lookup IOSRAM_REG_PROP property to find out our IOSRAM length 2543 */ 2544 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 2545 DDI_PROP_DONTPASS, IOSRAM_REG_PROP, (caddr_t)&propvalue, 2546 &proplen) != DDI_PROP_SUCCESS) { 2547 cmn_err(CE_WARN, "iosram(%d): can't find register property.\n", 2548 instance); 2549 return (DDI_FAILURE); 2550 } else { 2551 iosram_reg_t *regprop = (iosram_reg_t *)propvalue; 2552 2553 DPRINTF(1, ("SetupMap(%d): Got reg prop: %x %x %x\n", 2554 instance, regprop->addr_hi, 2555 regprop->addr_lo, regprop->size)); 2556 2557 softp->iosramlen = regprop->size; 2558 2559 kmem_free(propvalue, proplen); 2560 } 2561 DPRINTF(1, ("SetupMap(%d): IOSRAM length: 0x%x\n", instance, 2562 softp->iosramlen)); 2563 softp->handle = NULL; 2564 2565 /* 2566 * To minimize boot time, we map the entire IOSRAM as opposed to 2567 * mapping individual chunk via ddi_regs_map_setup() call. 2568 */ 2569 if (ddi_regs_map_setup(dip, 0, (caddr_t *)&softp->iosramp, 2570 0x0, softp->iosramlen, &attr, &softp->handle) != DDI_SUCCESS) { 2571 cmn_err(CE_WARN, "iosram(%d): failed to map IOSRAM len:%x\n", 2572 instance, softp->iosramlen); 2573 iosram_remove_map(softp); 2574 return (DDI_FAILURE); 2575 } 2576 2577 /* 2578 * Lookup PORTID property on my parent hierarchy 2579 */ 2580 proplen = sizeof (portid); 2581 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 2582 0, IOSRAM_PORTID_PROP, (caddr_t)&portid, 2583 &proplen) != DDI_PROP_SUCCESS) { 2584 cmn_err(CE_WARN, "iosram(%d): can't find portid property.\n", 2585 instance); 2586 iosram_remove_map(softp); 2587 return (DDI_FAILURE); 2588 } 2589 softp->portid = portid; 2590 2591 if (iosram_sbbc_setup_map(softp) != DDI_SUCCESS) { 2592 cmn_err(CE_WARN, "iosram(%d): can't map SBBC region.\n", 2593 instance); 2594 iosram_remove_map(softp); 2595 return (DDI_FAILURE); 2596 } 2597 2598 mutex_enter(&iosram_mutex); 2599 softp->state |= IOSRAM_STATE_MAPPED; 2600 mutex_exit(&iosram_mutex); 2601 2602 return (DDI_SUCCESS); 2603 } 2604 2605 2606 static void 2607 iosram_remove_map(struct iosramsoft *softp) 2608 { 2609 mutex_enter(&iosram_mutex); 2610 2611 ASSERT((softp->state & IOSRAM_STATE_MASTER) == 0); 2612 2613 if (softp->handle) { 2614 ddi_regs_map_free(&softp->handle); 2615 softp->handle = NULL; 2616 } 2617 softp->iosramp = NULL; 2618 2619 /* 2620 * Umap SBBC registers region. Shared with handler for SBBC 2621 * interrupts, take intr_mutex. 2622 */ 2623 mutex_enter(&softp->intr_mutex); 2624 if (softp->sbbc_region) { 2625 ddi_regs_map_free(&softp->sbbc_handle); 2626 softp->sbbc_region = NULL; 2627 } 2628 mutex_exit(&softp->intr_mutex); 2629 2630 softp->state &= ~IOSRAM_STATE_MAPPED; 2631 2632 mutex_exit(&iosram_mutex); 2633 } 2634 2635 2636 /* 2637 * iosram_is_chosen(struct iosramsoft *softp) 2638 * 2639 * Looks up "chosen" node property to 2640 * determine if it is the chosen IOSRAM. 2641 */ 2642 static int 2643 iosram_is_chosen(struct iosramsoft *softp) 2644 { 2645 char chosen_iosram[MAXNAMELEN]; 2646 char pn[MAXNAMELEN]; 2647 int nodeid; 2648 int chosen; 2649 pnode_t dnode; 2650 2651 /* 2652 * Get /chosen node info. prom interface will handle errors. 2653 */ 2654 dnode = prom_chosennode(); 2655 2656 /* 2657 * Look for the "iosram" property on the chosen node with a prom 2658 * interface as ddi_find_devinfo() couldn't be used (calls 2659 * ddi_walk_devs() that creates one extra lock on the device tree). 2660 */ 2661 if (prom_getprop(dnode, IOSRAM_CHOSEN_PROP, (caddr_t)&nodeid) <= 0) { 2662 /* 2663 * Can't find IOSRAM_CHOSEN_PROP property under chosen node 2664 */ 2665 cmn_err(CE_WARN, 2666 "iosram(%d): can't find chosen iosram property\n", 2667 softp->instance); 2668 return (0); 2669 } 2670 2671 DPRINTF(1, ("iosram(%d): Got '%x' for chosen '%s' property\n", 2672 softp->instance, nodeid, IOSRAM_CHOSEN_PROP)); 2673 2674 /* 2675 * get the full OBP pathname of this node 2676 */ 2677 if (prom_phandle_to_path((phandle_t)nodeid, chosen_iosram, 2678 sizeof (chosen_iosram)) < 0) { 2679 cmn_err(CE_NOTE, "prom_phandle_to_path(%x) failed\n", nodeid); 2680 return (0); 2681 } 2682 DPRINTF(1, ("iosram(%d): prom_phandle_to_path(%x) is '%s'\n", 2683 softp->instance, nodeid, chosen_iosram)); 2684 2685 (void) ddi_pathname(softp->dip, pn); 2686 DPRINTF(1, ("iosram(%d): ddi_pathname(%p) is '%s'\n", 2687 softp->instance, softp->dip, pn)); 2688 2689 chosen = (strcmp(chosen_iosram, pn) == 0) ? 1 : 0; 2690 DPRINTF(1, ("iosram(%d): ... %s\n", softp->instance, 2691 chosen ? "MASTER" : "SLAVE")); 2692 IOSRAMLOG(1, "iosram(%d): ... %s\n", softp->instance, 2693 (chosen ? "MASTER" : "SLAVE"), NULL, NULL); 2694 2695 return (chosen); 2696 } 2697 2698 2699 /* 2700 * iosram_set_master(struct iosramsoft *softp) 2701 * 2702 * Set master tunnel to the specified IOSRAM 2703 * Must be called while holding iosram_mutex. 2704 */ 2705 static void 2706 iosram_set_master(struct iosramsoft *softp) 2707 { 2708 ASSERT(mutex_owned(&iosram_mutex)); 2709 ASSERT(softp != NULL); 2710 ASSERT(softp->state & IOSRAM_STATE_MAPPED); 2711 ASSERT(IOSRAM_GET_HDRFIELD32(softp, status) == IOSRAM_VALID); 2712 2713 /* 2714 * Clear MASTER flag on any previous IOSRAM master, if any 2715 */ 2716 if (iosram_master && (iosram_master != softp)) { 2717 iosram_master->state &= ~IOSRAM_STATE_MASTER; 2718 } 2719 2720 /* 2721 * Setup new IOSRAM master 2722 */ 2723 iosram_update_addrs(softp); 2724 iosram_handle = softp->handle; 2725 softp->state |= IOSRAM_STATE_MASTER; 2726 softp->tswitch_ok++; 2727 iosram_master = softp; 2728 2729 IOSRAMLOG(1, "SETMASTER: softp:%p instance:%d\n", softp, 2730 softp->instance, NULL, NULL); 2731 } 2732 2733 2734 /* 2735 * iosram_read_toc() 2736 * 2737 * Read the TOC from an IOSRAM instance that has been mapped in. 2738 * If the TOC is flawed or the IOSRAM isn't valid, return an error. 2739 */ 2740 static int 2741 iosram_read_toc(struct iosramsoft *softp) 2742 { 2743 int i; 2744 int instance = softp->instance; 2745 uint8_t *toc_entryp; 2746 iosram_flags_t *flagsp = NULL; 2747 int new_nchunks; 2748 iosram_chunk_t *new_chunks; 2749 iosram_chunk_t *chunkp; 2750 iosram_chunk_t *old_chunkp; 2751 iosram_toc_entry_t index; 2752 2753 /* 2754 * Never try to read the TOC out of an unmapped IOSRAM. 2755 */ 2756 ASSERT(softp->state & IOSRAM_STATE_MAPPED); 2757 2758 mutex_enter(&iosram_mutex); 2759 2760 /* 2761 * Check to make sure this IOSRAM is marked valid. Return 2762 * an error if it isn't. 2763 */ 2764 if (IOSRAM_GET_HDRFIELD32(softp, status) != IOSRAM_VALID) { 2765 DPRINTF(1, ("iosram_read_toc(%d): IOSRAM not flagged valid\n", 2766 instance)); 2767 mutex_exit(&iosram_mutex); 2768 return (EINVAL); 2769 } 2770 2771 /* 2772 * Get the location of the TOC. 2773 */ 2774 toc_entryp = softp->iosramp + IOSRAM_GET_HDRFIELD32(softp, toc_offset); 2775 2776 /* 2777 * Read the index entry from the TOC and make sure it looks correct. 2778 */ 2779 ddi_rep_get8(softp->handle, (uint8_t *)&index, toc_entryp, 2780 sizeof (iosram_toc_entry_t), DDI_DEV_AUTOINCR); 2781 if ((index.key != IOSRAM_INDEX_KEY) || 2782 (index.off != IOSRAM_INDEX_OFF)) { 2783 cmn_err(CE_WARN, "iosram(%d): invalid TOC index.\n", instance); 2784 mutex_exit(&iosram_mutex); 2785 return (EINVAL); 2786 } 2787 2788 /* 2789 * Allocate storage for the new chunks array and initialize it with data 2790 * from the TOC and callback data from the corresponding old chunk, if 2791 * it exists. 2792 */ 2793 new_nchunks = index.len - 1; 2794 new_chunks = (iosram_chunk_t *)kmem_zalloc(new_nchunks * 2795 sizeof (iosram_chunk_t), KM_SLEEP); 2796 for (i = 0, chunkp = new_chunks; i < new_nchunks; i++, chunkp++) { 2797 toc_entryp += sizeof (iosram_toc_entry_t); 2798 ddi_rep_get8(softp->handle, (uint8_t *)&(chunkp->toc_data), 2799 toc_entryp, sizeof (iosram_toc_entry_t), DDI_DEV_AUTOINCR); 2800 chunkp->hash = NULL; 2801 if ((chunkp->toc_data.off < softp->iosramlen) && 2802 (chunkp->toc_data.len <= softp->iosramlen) && 2803 ((chunkp->toc_data.off + chunkp->toc_data.len) <= 2804 softp->iosramlen)) { 2805 chunkp->basep = softp->iosramp + chunkp->toc_data.off; 2806 DPRINTF(1, 2807 ("iosram_read_toc(%d): k:%x o:%x l:%x p:%x\n", 2808 instance, chunkp->toc_data.key, 2809 chunkp->toc_data.off, chunkp->toc_data.len, 2810 chunkp->basep)); 2811 } else { 2812 cmn_err(CE_WARN, "iosram(%d): TOC entry %d" 2813 "out of range... off:%x len:%x\n", 2814 instance, i + 1, chunkp->toc_data.off, 2815 chunkp->toc_data.len); 2816 kmem_free(new_chunks, new_nchunks * 2817 sizeof (iosram_chunk_t)); 2818 mutex_exit(&iosram_mutex); 2819 return (EINVAL); 2820 } 2821 2822 /* 2823 * Note the existence of the flags chunk, which is required in 2824 * a correct TOC. 2825 */ 2826 if (chunkp->toc_data.key == IOSRAM_FLAGS_KEY) { 2827 flagsp = (iosram_flags_t *)chunkp->basep; 2828 } 2829 2830 /* 2831 * If there was an entry for this chunk in the old list, copy 2832 * the callback data from old to new storage. 2833 */ 2834 if ((nchunks > 0) && 2835 ((old_chunkp = iosram_find_chunk(chunkp->toc_data.key)) != 2836 NULL)) { 2837 bcopy(&(old_chunkp->cback), &(chunkp->cback), 2838 sizeof (iosram_cback_t)); 2839 } 2840 } 2841 /* 2842 * The TOC is malformed if there is no entry for the flags chunk. 2843 */ 2844 if (flagsp == NULL) { 2845 kmem_free(new_chunks, new_nchunks * sizeof (iosram_chunk_t)); 2846 mutex_exit(&iosram_mutex); 2847 return (EINVAL); 2848 } 2849 2850 /* 2851 * Free any memory that is no longer needed and install the new data 2852 * as current data. 2853 */ 2854 if (chunks != NULL) { 2855 kmem_free(chunks, nchunks * sizeof (iosram_chunk_t)); 2856 } 2857 chunks = new_chunks; 2858 nchunks = new_nchunks; 2859 iosram_init_hashtab(); 2860 2861 mutex_exit(&iosram_mutex); 2862 return (0); 2863 } 2864 2865 2866 /* 2867 * iosram_init_hashtab() 2868 * 2869 * Initialize the hash table and populate it with the IOSRAM 2870 * chunks previously read from the TOC. The caller must hold the 2871 * ioram_mutex lock. 2872 */ 2873 static void 2874 iosram_init_hashtab(void) 2875 { 2876 int i, bucket; 2877 iosram_chunk_t *chunkp; 2878 2879 ASSERT(mutex_owned(&iosram_mutex)); 2880 2881 for (i = 0; i < IOSRAM_HASHSZ; i++) { 2882 iosram_hashtab[i] = NULL; 2883 } 2884 2885 if (chunks) { 2886 for (i = 0, chunkp = chunks; i < nchunks; i++, chunkp++) { 2887 /* 2888 * Hide the flags chunk by leaving it out of the hash 2889 * table. 2890 */ 2891 if (chunkp->toc_data.key == IOSRAM_FLAGS_KEY) { 2892 continue; 2893 } 2894 2895 /* 2896 * Add the current chunk to the hash table. 2897 */ 2898 bucket = IOSRAM_HASH(chunkp->toc_data.key); 2899 chunkp->hash = iosram_hashtab[bucket]; 2900 iosram_hashtab[bucket] = chunkp; 2901 } 2902 } 2903 } 2904 2905 2906 /* 2907 * iosram_update_addrs() 2908 * 2909 * Process the chunk list, updating each chunk's basep, which is a pointer 2910 * to the beginning of the chunk's memory in kvaddr space. Record the 2911 * basep value of the flags chunk to speed up flag access. The caller 2912 * must hold the iosram_mutex lock. 2913 */ 2914 static void 2915 iosram_update_addrs(struct iosramsoft *softp) 2916 { 2917 int i; 2918 iosram_flags_t *flagsp; 2919 iosram_chunk_t *chunkp; 2920 2921 ASSERT(mutex_owned(&iosram_mutex)); 2922 2923 /* 2924 * First go through all of the chunks updating their base pointers and 2925 * looking for the flags chunk. 2926 */ 2927 for (i = 0, chunkp = chunks; i < nchunks; i++, chunkp++) { 2928 chunkp->basep = softp->iosramp + chunkp->toc_data.off; 2929 if (chunkp->toc_data.key == IOSRAM_FLAGS_KEY) { 2930 flagsp = (iosram_flags_t *)(chunkp->basep); 2931 DPRINTF(1, 2932 ("iosram_update_addrs flags: o:0x%08x p:%p", 2933 chunkp->toc_data.off, flagsp)); 2934 } 2935 } 2936 2937 /* 2938 * Now, go through and update each chunk's flags pointer. This can't be 2939 * done in the first loop because we don't have the address of the flags 2940 * chunk yet. 2941 */ 2942 for (i = 0, chunkp = chunks; i < nchunks; i++, chunkp++) { 2943 chunkp->flagsp = flagsp++; 2944 DPRINTF(1, ("iosram_update_addrs: k:0x%x f:%p\n", 2945 chunkp->toc_data.key, chunkp->flagsp)); 2946 } 2947 } 2948 2949 /* 2950 * iosram_find_chunk(key) 2951 * 2952 * Return a pointer to iosram_chunk structure corresponding to the 2953 * "key" IOSRAM chunk. The caller must hold the iosram_mutex lock. 2954 */ 2955 static iosram_chunk_t * 2956 iosram_find_chunk(uint32_t key) 2957 { 2958 iosram_chunk_t *chunkp; 2959 int index = IOSRAM_HASH(key); 2960 2961 ASSERT(mutex_owned(&iosram_mutex)); 2962 2963 for (chunkp = iosram_hashtab[index]; chunkp; chunkp = chunkp->hash) { 2964 if (chunkp->toc_data.key == key) { 2965 break; 2966 } 2967 } 2968 2969 return (chunkp); 2970 } 2971 2972 2973 /* 2974 * iosram_add_intr(iosramsoft_t *) 2975 */ 2976 static int 2977 iosram_add_intr(iosramsoft_t *softp) 2978 { 2979 IOSRAMLOG(2, "ADDINTR: softp:%p instance:%d\n", 2980 softp, softp->instance, NULL, NULL); 2981 2982 if (ddi_add_softintr(softp->dip, DDI_SOFTINT_MED, 2983 &softp->softintr_id, &softp->soft_iblk, NULL, 2984 iosram_softintr, (caddr_t)softp) != DDI_SUCCESS) { 2985 cmn_err(CE_WARN, 2986 "iosram(%d): Can't register softintr.\n", 2987 softp->instance); 2988 return (DDI_FAILURE); 2989 } 2990 2991 if (ddi_add_intr(softp->dip, 0, &softp->real_iblk, NULL, 2992 iosram_intr, (caddr_t)softp) != DDI_SUCCESS) { 2993 cmn_err(CE_WARN, 2994 "iosram(%d): Can't register intr" 2995 " handler.\n", softp->instance); 2996 ddi_remove_softintr(softp->softintr_id); 2997 return (DDI_FAILURE); 2998 } 2999 3000 /* 3001 * Enable SBBC interrupts 3002 */ 3003 ddi_put32(softp->sbbc_handle, &(softp->sbbc_region->int_enable.reg), 3004 IOSRAM_SBBC_INT0|IOSRAM_SBBC_INT1); 3005 3006 return (DDI_SUCCESS); 3007 } 3008 3009 3010 /* 3011 * iosram_remove_intr(iosramsoft_t *) 3012 */ 3013 static int 3014 iosram_remove_intr(iosramsoft_t *softp) 3015 { 3016 IOSRAMLOG(2, "REMINTR: softp:%p instance:%d\n", 3017 softp, softp->instance, NULL, NULL); 3018 3019 /* 3020 * Disable SBBC interrupts if SBBC is mapped in 3021 */ 3022 if (softp->sbbc_region) { 3023 ddi_put32(softp->sbbc_handle, 3024 &(softp->sbbc_region->int_enable.reg), 0); 3025 } 3026 3027 /* 3028 * Remove SBBC interrupt handler 3029 */ 3030 ddi_remove_intr(softp->dip, 0, softp->real_iblk); 3031 3032 /* 3033 * Remove soft interrupt handler 3034 */ 3035 mutex_enter(&iosram_mutex); 3036 if (softp->softintr_id != NULL) { 3037 ddi_remove_softintr(softp->softintr_id); 3038 softp->softintr_id = NULL; 3039 } 3040 mutex_exit(&iosram_mutex); 3041 3042 return (0); 3043 } 3044 3045 3046 /* 3047 * iosram_add_instance(iosramsoft_t *) 3048 * Must be called while holding iosram_mutex 3049 */ 3050 static void 3051 iosram_add_instance(iosramsoft_t *new_softp) 3052 { 3053 #ifdef DEBUG 3054 int instance = new_softp->instance; 3055 iosramsoft_t *softp; 3056 #endif 3057 3058 ASSERT(mutex_owned(&iosram_mutex)); 3059 3060 #if defined(DEBUG) 3061 /* Verify that this instance is not in the list */ 3062 for (softp = iosram_instances; softp != NULL; softp = softp->next) { 3063 ASSERT(softp->instance != instance); 3064 } 3065 #endif 3066 3067 /* 3068 * Add this instance to the list 3069 */ 3070 if (iosram_instances != NULL) { 3071 iosram_instances->prev = new_softp; 3072 } 3073 new_softp->next = iosram_instances; 3074 new_softp->prev = NULL; 3075 iosram_instances = new_softp; 3076 } 3077 3078 3079 /* 3080 * iosram_remove_instance(int instance) 3081 * Must be called while holding iosram_mutex 3082 */ 3083 static void 3084 iosram_remove_instance(int instance) 3085 { 3086 iosramsoft_t *softp; 3087 3088 /* 3089 * Remove specified instance from the iosram_instances list so that 3090 * it can't be chosen for tunnel in future. 3091 */ 3092 ASSERT(mutex_owned(&iosram_mutex)); 3093 3094 for (softp = iosram_instances; softp != NULL; softp = softp->next) { 3095 if (softp->instance == instance) { 3096 if (softp->next != NULL) { 3097 softp->next->prev = softp->prev; 3098 } 3099 if (softp->prev != NULL) { 3100 softp->prev->next = softp->next; 3101 } 3102 if (iosram_instances == softp) { 3103 iosram_instances = softp->next; 3104 } 3105 3106 return; 3107 } 3108 } 3109 } 3110 3111 3112 /* 3113 * iosram_sema_acquire: Acquire hardware semaphore. 3114 * Return 0 if the semaphore could be acquired, or one of the following 3115 * possible values: 3116 * EAGAIN: there is a tunnel switch in progress 3117 * EBUSY: the semaphore was already "held" 3118 * ENXIO: an IO error occured (e.g. SBBC not mapped) 3119 * If old_value is not NULL, the location it points to will be updated 3120 * with the semaphore value read when attempting to acquire it. 3121 */ 3122 int 3123 iosram_sema_acquire(uint32_t *old_value) 3124 { 3125 struct iosramsoft *softp; 3126 int rv; 3127 uint32_t sema_val; 3128 3129 DPRINTF(2, ("IOSRAM: in iosram_sema_acquire\n")); 3130 3131 mutex_enter(&iosram_mutex); 3132 3133 /* 3134 * Disallow access if there is a tunnel switch in progress. 3135 */ 3136 if (iosram_tswitch_active) { 3137 mutex_exit(&iosram_mutex); 3138 return (EAGAIN); 3139 } 3140 3141 /* 3142 * Use current master IOSRAM for operation, fail if none is 3143 * currently active. 3144 */ 3145 if ((softp = iosram_master) == NULL) { 3146 mutex_exit(&iosram_mutex); 3147 DPRINTF(1, ("IOSRAM: iosram_sema_acquire: no master\n")); 3148 return (ENXIO); 3149 } 3150 3151 mutex_enter(&softp->intr_mutex); 3152 3153 /* 3154 * Fail if SBBC region has not been mapped. This shouldn't 3155 * happen if we have a master IOSRAM, but we double-check. 3156 */ 3157 if (softp->sbbc_region == NULL) { 3158 mutex_exit(&softp->intr_mutex); 3159 mutex_exit(&iosram_mutex); 3160 DPRINTF(1, ("IOSRAM(%d): iosram_sema_acquire: " 3161 "SBBC not mapped\n", softp->instance)); 3162 return (ENXIO); 3163 } 3164 3165 /* read semaphore value */ 3166 sema_val = IOSRAM_SEMA_RD(softp); 3167 if (old_value != NULL) 3168 *old_value = sema_val; 3169 3170 if (IOSRAM_SEMA_IS_HELD(sema_val)) { 3171 /* semaphore was held by someone else */ 3172 rv = EBUSY; 3173 } else { 3174 /* semaphore was not held, we just acquired it */ 3175 rv = 0; 3176 } 3177 3178 mutex_exit(&softp->intr_mutex); 3179 mutex_exit(&iosram_mutex); 3180 3181 DPRINTF(1, ("IOSRAM(%d): iosram_sema_acquire: " 3182 "old value=0x%x rv=%d\n", softp->instance, sema_val, rv)); 3183 3184 return (rv); 3185 } 3186 3187 3188 /* 3189 * iosram_sema_release: Release hardware semaphore. 3190 * This function will "release" the hardware semaphore, and return 0 on 3191 * success. If an error occured, one of the following values will be 3192 * returned: 3193 * EAGAIN: there is a tunnel switch in progress 3194 * ENXIO: an IO error occured (e.g. SBBC not mapped) 3195 */ 3196 int 3197 iosram_sema_release(void) 3198 { 3199 struct iosramsoft *softp; 3200 3201 DPRINTF(2, ("IOSRAM: in iosram_sema_release\n")); 3202 3203 mutex_enter(&iosram_mutex); 3204 3205 /* 3206 * Disallow access if there is a tunnel switch in progress. 3207 */ 3208 if (iosram_tswitch_active) { 3209 mutex_exit(&iosram_mutex); 3210 return (EAGAIN); 3211 } 3212 3213 /* 3214 * Use current master IOSRAM for operation, fail if none is 3215 * currently active. 3216 */ 3217 if ((softp = iosram_master) == NULL) { 3218 mutex_exit(&iosram_mutex); 3219 DPRINTF(1, ("IOSRAM: iosram_sema_release: no master\n")); 3220 return (ENXIO); 3221 } 3222 3223 mutex_enter(&softp->intr_mutex); 3224 3225 /* 3226 * Fail if SBBC region has not been mapped in. This shouldn't 3227 * happen if we have a master IOSRAM, but we double-check. 3228 */ 3229 if (softp->sbbc_region == NULL) { 3230 mutex_exit(&softp->intr_mutex); 3231 mutex_exit(&iosram_mutex); 3232 DPRINTF(1, ("IOSRAM(%d): iosram_sema_release: " 3233 "SBBC not mapped\n", softp->instance)); 3234 return (ENXIO); 3235 } 3236 3237 /* Release semaphore by clearing our semaphore register */ 3238 IOSRAM_SEMA_WR(softp, 0); 3239 3240 mutex_exit(&softp->intr_mutex); 3241 mutex_exit(&iosram_mutex); 3242 3243 DPRINTF(1, ("IOSRAM(%d): iosram_sema_release: success\n", 3244 softp->instance)); 3245 3246 return (0); 3247 } 3248 3249 3250 #if defined(IOSRAM_LOG) 3251 void 3252 iosram_log(caddr_t fmt, intptr_t a1, intptr_t a2, intptr_t a3, intptr_t a4) 3253 { 3254 uint32_t seq; 3255 iosram_log_t *logp; 3256 3257 mutex_enter(&iosram_log_mutex); 3258 3259 seq = iosram_logseq++; 3260 logp = &iosram_logbuf[seq % IOSRAM_MAXLOG]; 3261 logp->seq = seq; 3262 logp->tstamp = lbolt; 3263 logp->fmt = fmt; 3264 logp->arg1 = a1; 3265 logp->arg2 = a2; 3266 logp->arg3 = a3; 3267 logp->arg4 = a4; 3268 3269 mutex_exit(&iosram_log_mutex); 3270 3271 if (iosram_log_print) { 3272 cmn_err(CE_CONT, "#%x @%lx ", logp->seq, logp->tstamp); 3273 if (logp->fmt) { 3274 cmn_err(CE_CONT, logp->fmt, logp->arg1, logp->arg2, 3275 logp->arg3, logp->arg4); 3276 if (logp->fmt[strlen(logp->fmt)-1] != '\n') { 3277 cmn_err(CE_CONT, "\n"); 3278 } 3279 } else { 3280 cmn_err(CE_CONT, "fmt:%p args: %lx %lx %lx %lx\n", 3281 logp->fmt, logp->arg1, logp->arg2, logp->arg3, 3282 logp->arg4); 3283 } 3284 } 3285 } 3286 #endif /* IOSRAM_LOG */ 3287 3288 3289 #if defined(DEBUG) 3290 /* 3291 * iosram_get_keys(buf, len) 3292 * Return IOSRAM TOC in the specified buffer 3293 */ 3294 static int 3295 iosram_get_keys(iosram_toc_entry_t *bufp, uint32_t *len) 3296 { 3297 struct iosram_chunk *chunkp; 3298 int error = 0; 3299 int i; 3300 int cnt = (*len) / sizeof (iosram_toc_entry_t); 3301 3302 IOSRAMLOG(2, "iosram_get_keys(bufp:%p *len:%x)\n", bufp, *len, NULL, 3303 NULL); 3304 3305 /* 3306 * Copy data while holding the lock to prevent any data 3307 * corruption or invalid pointer dereferencing. 3308 */ 3309 mutex_enter(&iosram_mutex); 3310 3311 if (iosram_master == NULL) { 3312 error = EIO; 3313 } else { 3314 for (i = 0, chunkp = chunks; i < nchunks && i < cnt; 3315 i++, chunkp++) { 3316 bufp[i].key = chunkp->toc_data.key; 3317 bufp[i].off = chunkp->toc_data.off; 3318 bufp[i].len = chunkp->toc_data.len; 3319 bufp[i].unused = chunkp->toc_data.unused; 3320 } 3321 *len = i * sizeof (iosram_toc_entry_t); 3322 } 3323 3324 mutex_exit(&iosram_mutex); 3325 return (error); 3326 } 3327 3328 3329 /* 3330 * iosram_print_state(instance) 3331 */ 3332 static void 3333 iosram_print_state(int instance) 3334 { 3335 struct iosramsoft *softp; 3336 char pn[MAXNAMELEN]; 3337 3338 if (instance < 0) { 3339 softp = iosram_master; 3340 } else { 3341 softp = ddi_get_soft_state(iosramsoft_statep, instance); 3342 } 3343 3344 if (softp == NULL) { 3345 cmn_err(CE_CONT, "iosram_print_state: Can't find instance %d\n", 3346 instance); 3347 return; 3348 } 3349 instance = softp->instance; 3350 3351 mutex_enter(&iosram_mutex); 3352 mutex_enter(&softp->intr_mutex); 3353 3354 cmn_err(CE_CONT, "iosram_print_state(%d): ... %s\n", instance, 3355 ((softp == iosram_master) ? "MASTER" : "SLAVE")); 3356 3357 (void) ddi_pathname(softp->dip, pn); 3358 cmn_err(CE_CONT, " pathname:%s\n", pn); 3359 cmn_err(CE_CONT, " instance:%d portid:%d iosramlen:0x%x\n", 3360 softp->instance, softp->portid, softp->iosramlen); 3361 cmn_err(CE_CONT, " softp:%p handle:%p iosramp:%p\n", softp, 3362 softp->handle, softp->iosramp); 3363 cmn_err(CE_CONT, " state:0x%x tswitch_ok:%x tswitch_fail:%x\n", 3364 softp->state, softp->tswitch_ok, softp->tswitch_fail); 3365 cmn_err(CE_CONT, " softintr_id:%p intr_busy:%x intr_pending:%x\n", 3366 softp->softintr_id, softp->intr_busy, softp->intr_pending); 3367 3368 mutex_exit(&softp->intr_mutex); 3369 mutex_exit(&iosram_mutex); 3370 } 3371 3372 3373 /* 3374 * iosram_print_stats() 3375 */ 3376 static void 3377 iosram_print_stats() 3378 { 3379 uint32_t calls; 3380 3381 cmn_err(CE_CONT, "iosram_stats:\n"); 3382 calls = iosram_stats.read; 3383 cmn_err(CE_CONT, " read ... calls:%x bytes:%lx avg_sz:%x\n", 3384 calls, iosram_stats.bread, 3385 (uint32_t)((calls != 0) ? (iosram_stats.bread/calls) : 0)); 3386 3387 calls = iosram_stats.write; 3388 cmn_err(CE_CONT, " write ... calls:%x bytes:%lx avg_sz:%x\n", 3389 calls, iosram_stats.bwrite, 3390 (uint32_t)((calls != 0) ? (iosram_stats.bwrite/calls) : 0)); 3391 3392 cmn_err(CE_CONT, " intr recv (real:%x soft:%x) sent:%x cback:%x\n", 3393 iosram_stats.intr_recv, iosram_stats.sintr_recv, 3394 iosram_stats.intr_send, iosram_stats.callbacks); 3395 3396 cmn_err(CE_CONT, " tswitch: %x getflag:%x setflag:%x\n", 3397 iosram_stats.tswitch, iosram_stats.getflag, 3398 iosram_stats.setflag); 3399 3400 cmn_err(CE_CONT, " iosram_rw_active_max: %x\n", iosram_rw_active_max); 3401 } 3402 3403 3404 static void 3405 iosram_print_cback() 3406 { 3407 iosram_chunk_t *chunkp; 3408 int i; 3409 3410 /* 3411 * Print callback handlers 3412 */ 3413 mutex_enter(&iosram_mutex); 3414 3415 cmn_err(CE_CONT, "IOSRAM callbacks:\n"); 3416 for (i = 0, chunkp = chunks; i < nchunks; i++, chunkp++) { 3417 if (chunkp->cback.handler) { 3418 cmn_err(CE_CONT, " %2d: key:0x%x hdlr:%p arg:%p " 3419 "busy:%d unreg:%d\n", i, chunkp->toc_data.key, 3420 chunkp->cback.handler, chunkp->cback.arg, 3421 chunkp->cback.busy, chunkp->cback.unregister); 3422 } 3423 } 3424 mutex_exit(&iosram_mutex); 3425 } 3426 3427 3428 static void 3429 iosram_print_flags() 3430 { 3431 int i; 3432 uint32_t *keys; 3433 iosram_flags_t *flags; 3434 3435 mutex_enter(&iosram_mutex); 3436 3437 if (iosram_master == NULL) { 3438 mutex_exit(&iosram_mutex); 3439 cmn_err(CE_CONT, "IOSRAM Flags: not accessible\n"); 3440 return; 3441 } 3442 3443 keys = kmem_alloc(nchunks * sizeof (uint32_t), KM_SLEEP); 3444 flags = kmem_alloc(nchunks * sizeof (iosram_flags_t), KM_SLEEP); 3445 3446 for (i = 0; i < nchunks; i++) { 3447 keys[i] = chunks[i].toc_data.key; 3448 ddi_rep_get8(iosram_handle, (uint8_t *)&(flags[i]), 3449 (uint8_t *)(chunks[i].flagsp), sizeof (iosram_flags_t), 3450 DDI_DEV_AUTOINCR); 3451 } 3452 3453 mutex_exit(&iosram_mutex); 3454 3455 cmn_err(CE_CONT, "IOSRAM Flags:\n"); 3456 for (i = 0; i < nchunks; i++) { 3457 cmn_err(CE_CONT, 3458 " %2d: key: 0x%x data_valid:%x int_pending:%x\n", 3459 i, keys[i], flags[i].data_valid, flags[i].int_pending); 3460 } 3461 3462 kmem_free(keys, nchunks * sizeof (uint32_t)); 3463 kmem_free(flags, nchunks * sizeof (iosram_flags_t)); 3464 } 3465 3466 3467 /*PRINTFLIKE1*/ 3468 static void 3469 iosram_dprintf(const char *fmt, ...) 3470 { 3471 char msg_buf[256]; 3472 va_list adx; 3473 3474 va_start(adx, fmt); 3475 vsprintf(msg_buf, fmt, adx); 3476 va_end(adx); 3477 3478 cmn_err(CE_CONT, "%s", msg_buf); 3479 } 3480 #endif /* DEBUG */ 3481 3482 3483 #if IOSRAM_LOG 3484 /* 3485 * iosram_print_log(int cnt) 3486 * Print last few entries of the IOSRAM log in reverse order 3487 */ 3488 static void 3489 iosram_print_log(int cnt) 3490 { 3491 int i; 3492 3493 if (cnt <= 0) { 3494 cnt = 20; 3495 } else if (cnt > IOSRAM_MAXLOG) { 3496 cnt = IOSRAM_MAXLOG; 3497 } 3498 3499 3500 cmn_err(CE_CONT, 3501 "\niosram_logseq: 0x%x lbolt: %lx iosram_log_level:%x\n", 3502 iosram_logseq, lbolt, iosram_log_level); 3503 cmn_err(CE_CONT, "iosram_logbuf: %p max entries:0x%x\n", 3504 iosram_logbuf, IOSRAM_MAXLOG); 3505 for (i = iosram_logseq; --i >= 0 && --cnt >= 0; ) { 3506 iosram_log_t *logp; 3507 3508 mutex_enter(&iosram_log_mutex); 3509 3510 logp = &iosram_logbuf[i %IOSRAM_MAXLOG]; 3511 cmn_err(CE_CONT, "#%x @%lx ", logp->seq, logp->tstamp); 3512 3513 if (logp->fmt) { 3514 cmn_err(CE_CONT, logp->fmt, logp->arg1, logp->arg2, 3515 logp->arg3, logp->arg4); 3516 if (logp->fmt[strlen(logp->fmt)-1] != '\n') { 3517 cmn_err(CE_CONT, "\n"); 3518 } 3519 } else { 3520 cmn_err(CE_CONT, "fmt:%p args: %lx %lx %lx %lx\n", 3521 logp->fmt, logp->arg1, logp->arg2, 3522 logp->arg3, logp->arg4); 3523 } 3524 3525 mutex_exit(&iosram_log_mutex); 3526 } 3527 } 3528 #endif /* IOSRAM_LOG */ 3529