1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * OPL CMU-CH PCI nexus driver. 30 * 31 */ 32 33 #include <sys/types.h> 34 #include <sys/sysmacros.h> 35 #include <sys/systm.h> 36 #include <sys/intreg.h> 37 #include <sys/intr.h> 38 #include <sys/machsystm.h> 39 #include <sys/conf.h> 40 #include <sys/stat.h> 41 #include <sys/kmem.h> 42 #include <sys/async.h> 43 #include <sys/ivintr.h> 44 #include <sys/sunddi.h> 45 #include <sys/sunndi.h> 46 #include <sys/ndifm.h> 47 #include <sys/ontrap.h> 48 #include <sys/ddi_impldefs.h> 49 #include <sys/ddi_subrdefs.h> 50 #include <sys/epm.h> 51 #include <sys/spl.h> 52 #include <sys/fm/util.h> 53 #include <sys/fm/util.h> 54 #include <sys/fm/protocol.h> 55 #include <sys/fm/io/pci.h> 56 #include <sys/fm/io/sun4upci.h> 57 #include <sys/pcicmu/pcicmu.h> 58 59 #include <sys/cmn_err.h> 60 #include <sys/time.h> 61 #include <sys/pci.h> 62 #include <sys/modctl.h> 63 #include <sys/open.h> 64 #include <sys/errno.h> 65 #include <sys/file.h> 66 67 68 uint32_t pcmu_spurintr_duration = 60000000; /* One minute */ 69 70 /* 71 * The variable controls the default setting of the command register 72 * for pci devices. See pcmu_init_child() for details. 73 * 74 * This flags also controls the setting of bits in the bridge control 75 * register pci to pci bridges. See pcmu_init_child() for details. 76 */ 77 ushort_t pcmu_command_default = PCI_COMM_SERR_ENABLE | 78 PCI_COMM_WAIT_CYC_ENAB | 79 PCI_COMM_PARITY_DETECT | 80 PCI_COMM_ME | 81 PCI_COMM_MAE | 82 PCI_COMM_IO; 83 /* 84 * The following driver parameters are defined as variables to allow 85 * patching for debugging and tuning. Flags that can be set on a per 86 * PBM basis are bit fields where the PBM device instance number maps 87 * to the bit position. 88 */ 89 #ifdef DEBUG 90 uint64_t pcmu_debug_flags = 0; 91 #endif 92 uint_t ecc_error_intr_enable = 1; 93 94 uint_t pcmu_ecc_afsr_retries = 100; /* XXX - what's a good value? */ 95 96 uint_t pcmu_intr_retry_intv = 5; /* for interrupt retry reg */ 97 uint_t pcmu_panic_on_fatal_errors = 1; /* should be 1 at beta */ 98 99 hrtime_t pcmu_intrpend_timeout = 5ll * NANOSEC; /* 5 seconds in nanoseconds */ 100 101 uint64_t pcmu_errtrig_pa = 0x0; 102 103 104 /* 105 * The following value is the number of consecutive unclaimed interrupts that 106 * will be tolerated for a particular ino_p before the interrupt is deemed to 107 * be jabbering and is blocked. 108 */ 109 uint_t pcmu_unclaimed_intr_max = 20; 110 111 /* 112 * function prototypes for dev ops routines: 113 */ 114 static int pcmu_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 115 static int pcmu_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 116 static int pcmu_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 117 void *arg, void **result); 118 static int pcmu_open(dev_t *devp, int flags, int otyp, cred_t *credp); 119 static int pcmu_close(dev_t dev, int flags, int otyp, cred_t *credp); 120 static int pcmu_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, 121 cred_t *credp, int *rvalp); 122 static int pcmu_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 123 int flags, char *name, caddr_t valuep, int *lengthp); 124 static int pcmu_ctlops_poke(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args); 125 static int pcmu_ctlops_peek(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args, 126 void *result); 127 128 static int map_pcmu_registers(pcmu_t *, dev_info_t *); 129 static void unmap_pcmu_registers(pcmu_t *); 130 static void pcmu_pbm_clear_error(pcmu_pbm_t *); 131 132 static int pcmu_ctlops(dev_info_t *, dev_info_t *, ddi_ctl_enum_t, 133 void *, void *); 134 static int pcmu_map(dev_info_t *, dev_info_t *, ddi_map_req_t *, 135 off_t, off_t, caddr_t *); 136 static int pcmu_intr_ops(dev_info_t *, dev_info_t *, ddi_intr_op_t, 137 ddi_intr_handle_impl_t *, void *); 138 139 static uint32_t pcmu_identity_init(pcmu_t *pcmu_p); 140 static int pcmu_intr_setup(pcmu_t *pcmu_p); 141 static void pcmu_pbm_errstate_get(pcmu_t *pcmu_p, 142 pcmu_pbm_errstate_t *pbm_err_p); 143 static int pcmu_obj_setup(pcmu_t *pcmu_p); 144 static void pcmu_obj_destroy(pcmu_t *pcmu_p); 145 static void pcmu_obj_resume(pcmu_t *pcmu_p); 146 static void pcmu_obj_suspend(pcmu_t *pcmu_p); 147 148 static void u2u_ittrans_init(pcmu_t *, u2u_ittrans_data_t **); 149 static void u2u_ittrans_resume(u2u_ittrans_data_t **); 150 static void u2u_ittrans_uninit(u2u_ittrans_data_t *); 151 152 static pcmu_ksinfo_t *pcmu_name_kstat; 153 154 /* 155 * bus ops and dev ops structures: 156 */ 157 static struct bus_ops pcmu_bus_ops = { 158 BUSO_REV, 159 pcmu_map, 160 0, 161 0, 162 0, 163 i_ddi_map_fault, 164 0, 165 0, 166 0, 167 0, 168 0, 169 0, 170 0, 171 0, 172 pcmu_ctlops, 173 ddi_bus_prop_op, 174 ndi_busop_get_eventcookie, /* (*bus_get_eventcookie)(); */ 175 ndi_busop_add_eventcall, /* (*bus_add_eventcall)(); */ 176 ndi_busop_remove_eventcall, /* (*bus_remove_eventcall)(); */ 177 ndi_post_event, /* (*bus_post_event)(); */ 178 NULL, /* (*bus_intr_ctl)(); */ 179 NULL, /* (*bus_config)(); */ 180 NULL, /* (*bus_unconfig)(); */ 181 NULL, /* (*bus_fm_init)(); */ 182 NULL, /* (*bus_fm_fini)(); */ 183 NULL, /* (*bus_fm_access_enter)(); */ 184 NULL, /* (*bus_fm_access_fini)(); */ 185 NULL, /* (*bus_power)(); */ 186 pcmu_intr_ops /* (*bus_intr_op)(); */ 187 }; 188 189 struct cb_ops pcmu_cb_ops = { 190 pcmu_open, /* open */ 191 pcmu_close, /* close */ 192 nodev, /* strategy */ 193 nodev, /* print */ 194 nodev, /* dump */ 195 nodev, /* read */ 196 nodev, /* write */ 197 pcmu_ioctl, /* ioctl */ 198 nodev, /* devmap */ 199 nodev, /* mmap */ 200 nodev, /* segmap */ 201 nochpoll, /* poll */ 202 pcmu_prop_op, /* cb_prop_op */ 203 NULL, /* streamtab */ 204 D_NEW | D_MP | D_HOTPLUG, /* Driver compatibility flag */ 205 CB_REV, /* rev */ 206 nodev, /* int (*cb_aread)() */ 207 nodev /* int (*cb_awrite)() */ 208 }; 209 210 static struct dev_ops pcmu_ops = { 211 DEVO_REV, 212 0, 213 pcmu_info, 214 nulldev, 215 0, 216 pcmu_attach, 217 pcmu_detach, 218 nodev, 219 &pcmu_cb_ops, 220 &pcmu_bus_ops, 221 0 222 }; 223 224 /* 225 * module definitions: 226 */ 227 extern struct mod_ops mod_driverops; 228 229 static struct modldrv modldrv = { 230 &mod_driverops, /* Type of module - driver */ 231 "OPL CMU-CH PCI Nexus driver %I%", /* Name of module. */ 232 &pcmu_ops, /* driver ops */ 233 }; 234 235 static struct modlinkage modlinkage = { 236 MODREV_1, (void *)&modldrv, NULL 237 }; 238 239 /* 240 * driver global data: 241 */ 242 void *per_pcmu_state; /* per-pbm soft state pointer */ 243 kmutex_t pcmu_global_mutex; /* attach/detach common struct lock */ 244 errorq_t *pcmu_ecc_queue = NULL; /* per-system ecc handling queue */ 245 246 extern void pcmu_child_cfg_save(dev_info_t *dip); 247 extern void pcmu_child_cfg_restore(dev_info_t *dip); 248 249 int 250 _init(void) 251 { 252 int e; 253 254 /* 255 * Initialize per-pci bus soft state pointer. 256 */ 257 e = ddi_soft_state_init(&per_pcmu_state, sizeof (pcmu_t), 1); 258 if (e != 0) 259 return (e); 260 261 /* 262 * Initialize global mutexes. 263 */ 264 mutex_init(&pcmu_global_mutex, NULL, MUTEX_DRIVER, NULL); 265 266 /* 267 * Create the performance kstats. 268 */ 269 pcmu_kstat_init(); 270 271 /* 272 * Install the module. 273 */ 274 e = mod_install(&modlinkage); 275 if (e != 0) { 276 ddi_soft_state_fini(&per_pcmu_state); 277 mutex_destroy(&pcmu_global_mutex); 278 } 279 return (e); 280 } 281 282 int 283 _fini(void) 284 { 285 int e; 286 287 /* 288 * Remove the module. 289 */ 290 e = mod_remove(&modlinkage); 291 if (e != 0) { 292 return (e); 293 } 294 295 /* 296 * Destroy pcmu_ecc_queue, and set it to NULL. 297 */ 298 if (pcmu_ecc_queue) { 299 errorq_destroy(pcmu_ecc_queue); 300 pcmu_ecc_queue = NULL; 301 } 302 303 /* 304 * Destroy the performance kstats. 305 */ 306 pcmu_kstat_fini(); 307 308 /* 309 * Free the per-pci and per-CMU-CH soft state info and destroy 310 * mutex for per-CMU-CH soft state. 311 */ 312 ddi_soft_state_fini(&per_pcmu_state); 313 mutex_destroy(&pcmu_global_mutex); 314 return (e); 315 } 316 317 int 318 _info(struct modinfo *modinfop) 319 { 320 return (mod_info(&modlinkage, modinfop)); 321 } 322 323 /*ARGSUSED*/ 324 static int 325 pcmu_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 326 { 327 int instance = getminor((dev_t)arg) >> 8; 328 pcmu_t *pcmu_p = get_pcmu_soft_state(instance); 329 330 switch (infocmd) { 331 case DDI_INFO_DEVT2INSTANCE: 332 *result = (void *)(uintptr_t)instance; 333 return (DDI_SUCCESS); 334 335 case DDI_INFO_DEVT2DEVINFO: 336 if (pcmu_p == NULL) 337 return (DDI_FAILURE); 338 *result = (void *)pcmu_p->pcmu_dip; 339 return (DDI_SUCCESS); 340 341 default: 342 return (DDI_FAILURE); 343 } 344 } 345 346 347 /* device driver entry points */ 348 /* 349 * attach entry point: 350 */ 351 static int 352 pcmu_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 353 { 354 pcmu_t *pcmu_p; 355 int instance = ddi_get_instance(dip); 356 357 switch (cmd) { 358 case DDI_ATTACH: 359 PCMU_DBG0(PCMU_DBG_ATTACH, dip, "DDI_ATTACH\n"); 360 361 /* 362 * Allocate and get the per-pci soft state structure. 363 */ 364 if (alloc_pcmu_soft_state(instance) != DDI_SUCCESS) { 365 cmn_err(CE_WARN, "%s%d: can't allocate pci state", 366 ddi_driver_name(dip), instance); 367 goto err_bad_pcmu_softstate; 368 } 369 pcmu_p = get_pcmu_soft_state(instance); 370 pcmu_p->pcmu_dip = dip; 371 mutex_init(&pcmu_p->pcmu_mutex, NULL, MUTEX_DRIVER, NULL); 372 pcmu_p->pcmu_soft_state = PCMU_SOFT_STATE_CLOSED; 373 pcmu_p->pcmu_open_count = 0; 374 375 /* 376 * Get key properties of the pci bridge node. 377 */ 378 if (get_pcmu_properties(pcmu_p, dip) == DDI_FAILURE) { 379 goto err_bad_pcmu_prop; 380 } 381 382 /* 383 * Map in the registers. 384 */ 385 if (map_pcmu_registers(pcmu_p, dip) == DDI_FAILURE) { 386 goto err_bad_reg_prop; 387 } 388 if (pcmu_obj_setup(pcmu_p) != DDI_SUCCESS) { 389 goto err_bad_objs; 390 } 391 392 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 393 (uint_t)instance<<8 | 0xff, 394 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 395 goto err_bad_devctl_node; 396 } 397 398 /* 399 * Due to unresolved hardware issues, disable PCIPM until 400 * the problem is fully understood. 401 * 402 * pcmu_pwr_setup(pcmu_p, dip); 403 */ 404 405 ddi_report_dev(dip); 406 407 pcmu_p->pcmu_state = PCMU_ATTACHED; 408 PCMU_DBG0(PCMU_DBG_ATTACH, dip, "attach success\n"); 409 break; 410 411 err_bad_objs: 412 ddi_remove_minor_node(dip, "devctl"); 413 err_bad_devctl_node: 414 unmap_pcmu_registers(pcmu_p); 415 err_bad_reg_prop: 416 free_pcmu_properties(pcmu_p); 417 err_bad_pcmu_prop: 418 mutex_destroy(&pcmu_p->pcmu_mutex); 419 free_pcmu_soft_state(instance); 420 err_bad_pcmu_softstate: 421 return (DDI_FAILURE); 422 423 case DDI_RESUME: 424 PCMU_DBG0(PCMU_DBG_ATTACH, dip, "DDI_RESUME\n"); 425 426 /* 427 * Make sure the CMU-CH control registers 428 * are configured properly. 429 */ 430 pcmu_p = get_pcmu_soft_state(instance); 431 mutex_enter(&pcmu_p->pcmu_mutex); 432 433 /* 434 * Make sure this instance has been suspended. 435 */ 436 if (pcmu_p->pcmu_state != PCMU_SUSPENDED) { 437 PCMU_DBG0(PCMU_DBG_ATTACH, dip, 438 "instance NOT suspended\n"); 439 mutex_exit(&pcmu_p->pcmu_mutex); 440 return (DDI_FAILURE); 441 } 442 pcmu_obj_resume(pcmu_p); 443 pcmu_p->pcmu_state = PCMU_ATTACHED; 444 445 pcmu_child_cfg_restore(dip); 446 447 mutex_exit(&pcmu_p->pcmu_mutex); 448 break; 449 450 default: 451 PCMU_DBG0(PCMU_DBG_ATTACH, dip, "unsupported attach op\n"); 452 return (DDI_FAILURE); 453 } 454 455 return (DDI_SUCCESS); 456 } 457 458 /* 459 * detach entry point: 460 */ 461 static int 462 pcmu_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 463 { 464 int instance = ddi_get_instance(dip); 465 pcmu_t *pcmu_p = get_pcmu_soft_state(instance); 466 int len; 467 468 /* 469 * Make sure we are currently attached 470 */ 471 if (pcmu_p->pcmu_state != PCMU_ATTACHED) { 472 PCMU_DBG0(PCMU_DBG_ATTACH, dip, 473 "failed - instance not attached\n"); 474 return (DDI_FAILURE); 475 } 476 477 mutex_enter(&pcmu_p->pcmu_mutex); 478 479 switch (cmd) { 480 case DDI_DETACH: 481 PCMU_DBG0(PCMU_DBG_DETACH, dip, "DDI_DETACH\n"); 482 pcmu_obj_destroy(pcmu_p); 483 484 /* 485 * Free the pci soft state structure and the rest of the 486 * resources it's using. 487 */ 488 free_pcmu_properties(pcmu_p); 489 unmap_pcmu_registers(pcmu_p); 490 mutex_exit(&pcmu_p->pcmu_mutex); 491 mutex_destroy(&pcmu_p->pcmu_mutex); 492 free_pcmu_soft_state(instance); 493 494 /* Free the interrupt-priorities prop if we created it. */ 495 if (ddi_getproplen(DDI_DEV_T_ANY, dip, 496 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 497 "interrupt-priorities", &len) == DDI_PROP_SUCCESS) { 498 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, 499 "interrupt-priorities"); 500 } 501 return (DDI_SUCCESS); 502 503 case DDI_SUSPEND: 504 pcmu_child_cfg_save(dip); 505 pcmu_obj_suspend(pcmu_p); 506 pcmu_p->pcmu_state = PCMU_SUSPENDED; 507 508 mutex_exit(&pcmu_p->pcmu_mutex); 509 return (DDI_SUCCESS); 510 511 default: 512 PCMU_DBG0(PCMU_DBG_DETACH, dip, "unsupported detach op\n"); 513 mutex_exit(&pcmu_p->pcmu_mutex); 514 return (DDI_FAILURE); 515 } 516 } 517 518 /* ARGSUSED3 */ 519 static int 520 pcmu_open(dev_t *devp, int flags, int otyp, cred_t *credp) 521 { 522 pcmu_t *pcmu_p; 523 524 if (otyp != OTYP_CHR) { 525 return (EINVAL); 526 } 527 528 /* 529 * Get the soft state structure for the device. 530 */ 531 pcmu_p = DEV_TO_SOFTSTATE(*devp); 532 if (pcmu_p == NULL) { 533 return (ENXIO); 534 } 535 536 /* 537 * Handle the open by tracking the device state. 538 */ 539 PCMU_DBG2(PCMU_DBG_OPEN, pcmu_p->pcmu_dip, 540 "devp=%x: flags=%x\n", devp, flags); 541 mutex_enter(&pcmu_p->pcmu_mutex); 542 if (flags & FEXCL) { 543 if (pcmu_p->pcmu_soft_state != PCMU_SOFT_STATE_CLOSED) { 544 mutex_exit(&pcmu_p->pcmu_mutex); 545 PCMU_DBG0(PCMU_DBG_OPEN, pcmu_p->pcmu_dip, "busy\n"); 546 return (EBUSY); 547 } 548 pcmu_p->pcmu_soft_state = PCMU_SOFT_STATE_OPEN_EXCL; 549 } else { 550 if (pcmu_p->pcmu_soft_state == PCMU_SOFT_STATE_OPEN_EXCL) { 551 mutex_exit(&pcmu_p->pcmu_mutex); 552 PCMU_DBG0(PCMU_DBG_OPEN, pcmu_p->pcmu_dip, "busy\n"); 553 return (EBUSY); 554 } 555 pcmu_p->pcmu_soft_state = PCMU_SOFT_STATE_OPEN; 556 } 557 pcmu_p->pcmu_open_count++; 558 mutex_exit(&pcmu_p->pcmu_mutex); 559 return (0); 560 } 561 562 563 /* ARGSUSED */ 564 static int 565 pcmu_close(dev_t dev, int flags, int otyp, cred_t *credp) 566 { 567 pcmu_t *pcmu_p; 568 569 if (otyp != OTYP_CHR) { 570 return (EINVAL); 571 } 572 573 pcmu_p = DEV_TO_SOFTSTATE(dev); 574 if (pcmu_p == NULL) { 575 return (ENXIO); 576 } 577 578 PCMU_DBG2(PCMU_DBG_CLOSE, pcmu_p->pcmu_dip, 579 "dev=%x: flags=%x\n", dev, flags); 580 mutex_enter(&pcmu_p->pcmu_mutex); 581 pcmu_p->pcmu_soft_state = PCMU_SOFT_STATE_CLOSED; 582 pcmu_p->pcmu_open_count = 0; 583 mutex_exit(&pcmu_p->pcmu_mutex); 584 return (0); 585 } 586 587 /* ARGSUSED */ 588 static int 589 pcmu_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, 590 cred_t *credp, int *rvalp) 591 { 592 pcmu_t *pcmu_p; 593 dev_info_t *dip; 594 struct devctl_iocdata *dcp; 595 uint_t bus_state; 596 int rv = 0; 597 598 pcmu_p = DEV_TO_SOFTSTATE(dev); 599 if (pcmu_p == NULL) { 600 return (ENXIO); 601 } 602 603 dip = pcmu_p->pcmu_dip; 604 PCMU_DBG2(PCMU_DBG_IOCTL, dip, "dev=%x: cmd=%x\n", dev, cmd); 605 606 /* 607 * We can use the generic implementation for these ioctls 608 */ 609 switch (cmd) { 610 case DEVCTL_DEVICE_GETSTATE: 611 case DEVCTL_DEVICE_ONLINE: 612 case DEVCTL_DEVICE_OFFLINE: 613 case DEVCTL_BUS_GETSTATE: 614 return (ndi_devctl_ioctl(dip, cmd, arg, mode, 0)); 615 } 616 617 /* 618 * read devctl ioctl data 619 */ 620 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) 621 return (EFAULT); 622 623 switch (cmd) { 624 625 case DEVCTL_DEVICE_RESET: 626 PCMU_DBG0(PCMU_DBG_IOCTL, dip, "DEVCTL_DEVICE_RESET\n"); 627 rv = ENOTSUP; 628 break; 629 630 631 case DEVCTL_BUS_QUIESCE: 632 PCMU_DBG0(PCMU_DBG_IOCTL, dip, "DEVCTL_BUS_QUIESCE\n"); 633 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) { 634 if (bus_state == BUS_QUIESCED) { 635 break; 636 } 637 } 638 (void) ndi_set_bus_state(dip, BUS_QUIESCED); 639 break; 640 641 case DEVCTL_BUS_UNQUIESCE: 642 PCMU_DBG0(PCMU_DBG_IOCTL, dip, "DEVCTL_BUS_UNQUIESCE\n"); 643 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) { 644 if (bus_state == BUS_ACTIVE) { 645 break; 646 } 647 } 648 (void) ndi_set_bus_state(dip, BUS_ACTIVE); 649 break; 650 651 case DEVCTL_BUS_RESET: 652 PCMU_DBG0(PCMU_DBG_IOCTL, dip, "DEVCTL_BUS_RESET\n"); 653 rv = ENOTSUP; 654 break; 655 656 case DEVCTL_BUS_RESETALL: 657 PCMU_DBG0(PCMU_DBG_IOCTL, dip, "DEVCTL_BUS_RESETALL\n"); 658 rv = ENOTSUP; 659 break; 660 661 default: 662 rv = ENOTTY; 663 } 664 665 ndi_dc_freehdl(dcp); 666 return (rv); 667 } 668 669 static int pcmu_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 670 int flags, char *name, caddr_t valuep, int *lengthp) 671 { 672 return (ddi_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp)); 673 } 674 /* bus driver entry points */ 675 676 /* 677 * bus map entry point: 678 * 679 * if map request is for an rnumber 680 * get the corresponding regspec from device node 681 * build a new regspec in our parent's format 682 * build a new map_req with the new regspec 683 * call up the tree to complete the mapping 684 */ 685 static int 686 pcmu_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 687 off_t off, off_t len, caddr_t *addrp) 688 { 689 pcmu_t *pcmu_p = get_pcmu_soft_state(ddi_get_instance(dip)); 690 struct regspec p_regspec; 691 ddi_map_req_t p_mapreq; 692 int reglen, rval, r_no; 693 pci_regspec_t reloc_reg, *rp = &reloc_reg; 694 695 PCMU_DBG2(PCMU_DBG_MAP, dip, "rdip=%s%d:", 696 ddi_driver_name(rdip), ddi_get_instance(rdip)); 697 698 if (mp->map_flags & DDI_MF_USER_MAPPING) { 699 return (DDI_ME_UNIMPLEMENTED); 700 } 701 702 switch (mp->map_type) { 703 case DDI_MT_REGSPEC: 704 reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */ 705 break; 706 707 case DDI_MT_RNUMBER: 708 r_no = mp->map_obj.rnumber; 709 PCMU_DBG1(PCMU_DBG_MAP | PCMU_DBG_CONT, dip, " r#=%x", r_no); 710 711 if (ddi_getlongprop(DDI_DEV_T_NONE, rdip, DDI_PROP_DONTPASS, 712 "reg", (caddr_t)&rp, ®len) != DDI_SUCCESS) { 713 return (DDI_ME_RNUMBER_RANGE); 714 } 715 716 if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) { 717 kmem_free(rp, reglen); 718 return (DDI_ME_RNUMBER_RANGE); 719 } 720 rp += r_no; 721 break; 722 723 default: 724 return (DDI_ME_INVAL); 725 } 726 PCMU_DBG0(PCMU_DBG_MAP | PCMU_DBG_CONT, dip, "\n"); 727 728 /* use "assigned-addresses" to relocate regspec within pci space */ 729 if (rval = pcmu_reloc_reg(dip, rdip, pcmu_p, rp)) { 730 goto done; 731 } 732 733 /* adjust regspec according to mapping request */ 734 if (len) { 735 rp->pci_size_low = (uint_t)len; 736 } 737 rp->pci_phys_low += off; 738 739 /* use "ranges" to translate relocated pci regspec into parent space */ 740 if (rval = pcmu_xlate_reg(pcmu_p, rp, &p_regspec)) { 741 goto done; 742 } 743 744 p_mapreq = *mp; /* dup the whole structure */ 745 p_mapreq.map_type = DDI_MT_REGSPEC; 746 p_mapreq.map_obj.rp = &p_regspec; 747 rval = ddi_map(dip, &p_mapreq, 0, 0, addrp); 748 749 done: 750 if (mp->map_type == DDI_MT_RNUMBER) { 751 kmem_free(rp - r_no, reglen); 752 } 753 return (rval); 754 } 755 756 #ifdef DEBUG 757 int pcmu_peekfault_cnt = 0; 758 int pcmu_pokefault_cnt = 0; 759 #endif /* DEBUG */ 760 761 static int 762 pcmu_do_poke(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args) 763 { 764 pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p; 765 int err = DDI_SUCCESS; 766 on_trap_data_t otd; 767 768 mutex_enter(&pcbm_p->pcbm_pokeflt_mutex); 769 pcbm_p->pcbm_ontrap_data = &otd; 770 771 /* Set up protected environment. */ 772 if (!on_trap(&otd, OT_DATA_ACCESS)) { 773 uintptr_t tramp = otd.ot_trampoline; 774 775 otd.ot_trampoline = (uintptr_t)&poke_fault; 776 err = do_poke(in_args->size, (void *)in_args->dev_addr, 777 (void *)in_args->host_addr); 778 otd.ot_trampoline = tramp; 779 } else { 780 err = DDI_FAILURE; 781 } 782 783 /* 784 * Read the async fault register for the PBM to see it sees 785 * a master-abort. 786 */ 787 pcmu_pbm_clear_error(pcbm_p); 788 789 if (otd.ot_trap & OT_DATA_ACCESS) { 790 err = DDI_FAILURE; 791 } 792 793 /* Take down protected environment. */ 794 no_trap(); 795 796 pcbm_p->pcbm_ontrap_data = NULL; 797 mutex_exit(&pcbm_p->pcbm_pokeflt_mutex); 798 799 #ifdef DEBUG 800 if (err == DDI_FAILURE) 801 pcmu_pokefault_cnt++; 802 #endif 803 return (err); 804 } 805 806 807 static int 808 pcmu_ctlops_poke(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args) 809 { 810 return (pcmu_do_poke(pcmu_p, in_args)); 811 } 812 813 /* ARGSUSED */ 814 static int 815 pcmu_do_peek(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args) 816 { 817 int err = DDI_SUCCESS; 818 on_trap_data_t otd; 819 820 if (!on_trap(&otd, OT_DATA_ACCESS)) { 821 uintptr_t tramp = otd.ot_trampoline; 822 823 otd.ot_trampoline = (uintptr_t)&peek_fault; 824 err = do_peek(in_args->size, (void *)in_args->dev_addr, 825 (void *)in_args->host_addr); 826 otd.ot_trampoline = tramp; 827 } else 828 err = DDI_FAILURE; 829 830 no_trap(); 831 832 #ifdef DEBUG 833 if (err == DDI_FAILURE) 834 pcmu_peekfault_cnt++; 835 #endif 836 return (err); 837 } 838 839 840 static int 841 pcmu_ctlops_peek(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args, void *result) 842 { 843 result = (void *)in_args->host_addr; 844 return (pcmu_do_peek(pcmu_p, in_args)); 845 } 846 847 /* 848 * control ops entry point: 849 * 850 * Requests handled completely: 851 * DDI_CTLOPS_INITCHILD see pcmu_init_child() for details 852 * DDI_CTLOPS_UNINITCHILD 853 * DDI_CTLOPS_REPORTDEV see report_dev() for details 854 * DDI_CTLOPS_XLATE_INTRS nothing to do 855 * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1 856 * DDI_CTLOPS_REGSIZE 857 * DDI_CTLOPS_NREGS 858 * DDI_CTLOPS_NINTRS 859 * DDI_CTLOPS_DVMAPAGESIZE 860 * DDI_CTLOPS_POKE 861 * DDI_CTLOPS_PEEK 862 * DDI_CTLOPS_QUIESCE 863 * DDI_CTLOPS_UNQUIESCE 864 * 865 * All others passed to parent. 866 */ 867 static int 868 pcmu_ctlops(dev_info_t *dip, dev_info_t *rdip, 869 ddi_ctl_enum_t op, void *arg, void *result) 870 { 871 pcmu_t *pcmu_p = get_pcmu_soft_state(ddi_get_instance(dip)); 872 873 switch (op) { 874 case DDI_CTLOPS_INITCHILD: 875 return (pcmu_init_child(pcmu_p, (dev_info_t *)arg)); 876 877 case DDI_CTLOPS_UNINITCHILD: 878 return (pcmu_uninit_child(pcmu_p, (dev_info_t *)arg)); 879 880 case DDI_CTLOPS_REPORTDEV: 881 return (pcmu_report_dev(rdip)); 882 883 case DDI_CTLOPS_IOMIN: 884 /* 885 * If we are using the streaming cache, align at 886 * least on a cache line boundary. Otherwise use 887 * whatever alignment is passed in. 888 */ 889 return (DDI_SUCCESS); 890 891 case DDI_CTLOPS_REGSIZE: 892 *((off_t *)result) = pcmu_get_reg_set_size(rdip, *((int *)arg)); 893 return (DDI_SUCCESS); 894 895 case DDI_CTLOPS_NREGS: 896 *((uint_t *)result) = pcmu_get_nreg_set(rdip); 897 return (DDI_SUCCESS); 898 899 case DDI_CTLOPS_DVMAPAGESIZE: 900 *((ulong_t *)result) = 0; 901 return (DDI_SUCCESS); 902 903 case DDI_CTLOPS_POKE: 904 return (pcmu_ctlops_poke(pcmu_p, (peekpoke_ctlops_t *)arg)); 905 906 case DDI_CTLOPS_PEEK: 907 return (pcmu_ctlops_peek(pcmu_p, (peekpoke_ctlops_t *)arg, 908 result)); 909 910 case DDI_CTLOPS_AFFINITY: 911 break; 912 913 case DDI_CTLOPS_QUIESCE: 914 return (DDI_FAILURE); 915 916 case DDI_CTLOPS_UNQUIESCE: 917 return (DDI_FAILURE); 918 919 default: 920 break; 921 } 922 923 /* 924 * Now pass the request up to our parent. 925 */ 926 PCMU_DBG2(PCMU_DBG_CTLOPS, dip, 927 "passing request to parent: rdip=%s%d\n", 928 ddi_driver_name(rdip), ddi_get_instance(rdip)); 929 return (ddi_ctlops(dip, rdip, op, arg, result)); 930 } 931 932 933 /* ARGSUSED */ 934 static int 935 pcmu_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 936 ddi_intr_handle_impl_t *hdlp, void *result) 937 { 938 pcmu_t *pcmu_p = get_pcmu_soft_state(ddi_get_instance(dip)); 939 int ret = DDI_SUCCESS; 940 941 switch (intr_op) { 942 case DDI_INTROP_GETCAP: 943 /* GetCap will always fail for all non PCI devices */ 944 (void) pci_intx_get_cap(rdip, (int *)result); 945 break; 946 case DDI_INTROP_SETCAP: 947 ret = DDI_ENOTSUP; 948 break; 949 case DDI_INTROP_ALLOC: 950 *(int *)result = hdlp->ih_scratch1; 951 break; 952 case DDI_INTROP_FREE: 953 break; 954 case DDI_INTROP_GETPRI: 955 *(int *)result = hdlp->ih_pri ? hdlp->ih_pri : 0; 956 break; 957 case DDI_INTROP_SETPRI: 958 break; 959 case DDI_INTROP_ADDISR: 960 ret = pcmu_add_intr(dip, rdip, hdlp); 961 break; 962 case DDI_INTROP_REMISR: 963 ret = pcmu_remove_intr(dip, rdip, hdlp); 964 break; 965 case DDI_INTROP_ENABLE: 966 ret = pcmu_ib_update_intr_state(pcmu_p, rdip, hdlp, 967 PCMU_INTR_STATE_ENABLE); 968 break; 969 case DDI_INTROP_DISABLE: 970 ret = pcmu_ib_update_intr_state(pcmu_p, rdip, hdlp, 971 PCMU_INTR_STATE_DISABLE); 972 break; 973 case DDI_INTROP_SETMASK: 974 ret = pci_intx_set_mask(rdip); 975 break; 976 case DDI_INTROP_CLRMASK: 977 ret = pci_intx_clr_mask(rdip); 978 break; 979 case DDI_INTROP_GETPENDING: 980 ret = pci_intx_get_pending(rdip, (int *)result); 981 break; 982 case DDI_INTROP_NINTRS: 983 case DDI_INTROP_NAVAIL: 984 *(int *)result = i_ddi_get_intx_nintrs(rdip); 985 break; 986 case DDI_INTROP_SUPPORTED_TYPES: 987 /* PCI nexus driver supports only fixed interrupts */ 988 *(int *)result = i_ddi_get_intx_nintrs(rdip) ? 989 DDI_INTR_TYPE_FIXED : 0; 990 break; 991 default: 992 ret = DDI_ENOTSUP; 993 break; 994 } 995 996 return (ret); 997 } 998 999 /* 1000 * CMU-CH specifics implementation: 1001 * interrupt mapping register 1002 * PBM configuration 1003 * ECC and PBM error handling 1004 */ 1005 1006 /* called by pcmu_attach() DDI_ATTACH to initialize pci objects */ 1007 static int 1008 pcmu_obj_setup(pcmu_t *pcmu_p) 1009 { 1010 int ret; 1011 1012 mutex_enter(&pcmu_global_mutex); 1013 pcmu_p->pcmu_rev = ddi_prop_get_int(DDI_DEV_T_ANY, pcmu_p->pcmu_dip, 1014 DDI_PROP_DONTPASS, "module-revision#", 0); 1015 1016 pcmu_ib_create(pcmu_p); 1017 pcmu_cb_create(pcmu_p); 1018 pcmu_ecc_create(pcmu_p); 1019 pcmu_pbm_create(pcmu_p); 1020 pcmu_err_create(pcmu_p); 1021 if ((ret = pcmu_intr_setup(pcmu_p)) != DDI_SUCCESS) 1022 goto done; 1023 1024 pcmu_kstat_create(pcmu_p); 1025 done: 1026 mutex_exit(&pcmu_global_mutex); 1027 if (ret != DDI_SUCCESS) { 1028 cmn_err(CE_NOTE, "Interrupt register failure, returning 0x%x\n", 1029 ret); 1030 } 1031 return (ret); 1032 } 1033 1034 /* called by pcmu_detach() DDI_DETACH to destroy pci objects */ 1035 static void 1036 pcmu_obj_destroy(pcmu_t *pcmu_p) 1037 { 1038 mutex_enter(&pcmu_global_mutex); 1039 1040 pcmu_kstat_destroy(pcmu_p); 1041 pcmu_pbm_destroy(pcmu_p); 1042 pcmu_err_destroy(pcmu_p); 1043 pcmu_ecc_destroy(pcmu_p); 1044 pcmu_cb_destroy(pcmu_p); 1045 pcmu_ib_destroy(pcmu_p); 1046 pcmu_intr_teardown(pcmu_p); 1047 1048 mutex_exit(&pcmu_global_mutex); 1049 } 1050 1051 /* called by pcmu_attach() DDI_RESUME to (re)initialize pci objects */ 1052 static void 1053 pcmu_obj_resume(pcmu_t *pcmu_p) 1054 { 1055 mutex_enter(&pcmu_global_mutex); 1056 1057 pcmu_ib_configure(pcmu_p->pcmu_ib_p); 1058 pcmu_ecc_configure(pcmu_p); 1059 pcmu_ib_resume(pcmu_p->pcmu_ib_p); 1060 u2u_ittrans_resume((u2u_ittrans_data_t **) 1061 &(pcmu_p->pcmu_cb_p->pcb_ittrans_cookie)); 1062 1063 pcmu_pbm_configure(pcmu_p->pcmu_pcbm_p); 1064 1065 pcmu_cb_resume(pcmu_p->pcmu_cb_p); 1066 1067 pcmu_pbm_resume(pcmu_p->pcmu_pcbm_p); 1068 1069 mutex_exit(&pcmu_global_mutex); 1070 } 1071 1072 /* called by pcmu_detach() DDI_SUSPEND to suspend pci objects */ 1073 static void 1074 pcmu_obj_suspend(pcmu_t *pcmu_p) 1075 { 1076 mutex_enter(&pcmu_global_mutex); 1077 1078 pcmu_pbm_suspend(pcmu_p->pcmu_pcbm_p); 1079 pcmu_ib_suspend(pcmu_p->pcmu_ib_p); 1080 pcmu_cb_suspend(pcmu_p->pcmu_cb_p); 1081 1082 mutex_exit(&pcmu_global_mutex); 1083 } 1084 1085 static int 1086 pcmu_intr_setup(pcmu_t *pcmu_p) 1087 { 1088 dev_info_t *dip = pcmu_p->pcmu_dip; 1089 pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p; 1090 pcmu_cb_t *pcb_p = pcmu_p->pcmu_cb_p; 1091 int i, no_of_intrs; 1092 1093 /* 1094 * Get the interrupts property. 1095 */ 1096 if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 1097 "interrupts", (caddr_t)&pcmu_p->pcmu_inos, 1098 &pcmu_p->pcmu_inos_len) != DDI_SUCCESS) { 1099 cmn_err(CE_PANIC, "%s%d: no interrupts property\n", 1100 ddi_driver_name(dip), ddi_get_instance(dip)); 1101 } 1102 1103 /* 1104 * figure out number of interrupts in the "interrupts" property 1105 * and convert them all into ino. 1106 */ 1107 i = ddi_getprop(DDI_DEV_T_ANY, dip, 0, "#interrupt-cells", 1); 1108 i = CELLS_1275_TO_BYTES(i); 1109 no_of_intrs = pcmu_p->pcmu_inos_len / i; 1110 for (i = 0; i < no_of_intrs; i++) { 1111 pcmu_p->pcmu_inos[i] = 1112 PCMU_IB_MONDO_TO_INO(pcmu_p->pcmu_inos[i]); 1113 } 1114 1115 pcb_p->pcb_no_of_inos = no_of_intrs; 1116 if (i = pcmu_ecc_register_intr(pcmu_p)) { 1117 goto teardown; 1118 } 1119 1120 intr_dist_add(pcmu_cb_intr_dist, pcb_p); 1121 pcmu_ecc_enable_intr(pcmu_p); 1122 1123 if (i = pcmu_pbm_register_intr(pcbm_p)) { 1124 intr_dist_rem(pcmu_cb_intr_dist, pcb_p); 1125 goto teardown; 1126 } 1127 intr_dist_add(pcmu_pbm_intr_dist, pcbm_p); 1128 pcmu_ib_intr_enable(pcmu_p, pcmu_p->pcmu_inos[CBNINTR_PBM]); 1129 1130 intr_dist_add_weighted(pcmu_ib_intr_dist_all, pcmu_p->pcmu_ib_p); 1131 return (DDI_SUCCESS); 1132 teardown: 1133 pcmu_intr_teardown(pcmu_p); 1134 return (i); 1135 } 1136 1137 /* 1138 * pcmu_fix_ranges - fixes the config space entry of the "ranges" 1139 * property on CMU-CH platforms 1140 */ 1141 void 1142 pcmu_fix_ranges(pcmu_ranges_t *rng_p, int rng_entries) 1143 { 1144 int i; 1145 for (i = 0; i < rng_entries; i++, rng_p++) { 1146 if ((rng_p->child_high & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) 1147 rng_p->parent_low |= rng_p->child_high; 1148 } 1149 } 1150 1151 /* 1152 * map_pcmu_registers 1153 * 1154 * This function is called from the attach routine to map the registers 1155 * accessed by this driver. 1156 * 1157 * used by: pcmu_attach() 1158 * 1159 * return value: DDI_FAILURE on failure 1160 */ 1161 static int 1162 map_pcmu_registers(pcmu_t *pcmu_p, dev_info_t *dip) 1163 { 1164 ddi_device_acc_attr_t attr; 1165 1166 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1167 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1168 1169 attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; 1170 if (ddi_regs_map_setup(dip, 0, &pcmu_p->pcmu_address[0], 0, 0, 1171 &attr, &pcmu_p->pcmu_ac[0]) != DDI_SUCCESS) { 1172 cmn_err(CE_WARN, "%s%d: unable to map reg entry 0\n", 1173 ddi_driver_name(dip), ddi_get_instance(dip)); 1174 return (DDI_FAILURE); 1175 } 1176 1177 /* 1178 * We still use pcmu_address[2] 1179 */ 1180 if (ddi_regs_map_setup(dip, 2, &pcmu_p->pcmu_address[2], 0, 0, 1181 &attr, &pcmu_p->pcmu_ac[2]) != DDI_SUCCESS) { 1182 cmn_err(CE_WARN, "%s%d: unable to map reg entry 2\n", 1183 ddi_driver_name(dip), ddi_get_instance(dip)); 1184 ddi_regs_map_free(&pcmu_p->pcmu_ac[0]); 1185 return (DDI_FAILURE); 1186 } 1187 1188 /* 1189 * The second register set contains the bridge's configuration 1190 * header. This header is at the very beginning of the bridge's 1191 * configuration space. This space has litte-endian byte order. 1192 */ 1193 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 1194 if (ddi_regs_map_setup(dip, 1, &pcmu_p->pcmu_address[1], 0, 1195 PCI_CONF_HDR_SIZE, &attr, &pcmu_p->pcmu_ac[1]) != DDI_SUCCESS) { 1196 1197 cmn_err(CE_WARN, "%s%d: unable to map reg entry 1\n", 1198 ddi_driver_name(dip), ddi_get_instance(dip)); 1199 ddi_regs_map_free(&pcmu_p->pcmu_ac[0]); 1200 return (DDI_FAILURE); 1201 } 1202 PCMU_DBG2(PCMU_DBG_ATTACH, dip, "address (%p,%p)\n", 1203 pcmu_p->pcmu_address[0], pcmu_p->pcmu_address[1]); 1204 return (DDI_SUCCESS); 1205 } 1206 1207 /* 1208 * unmap_pcmu_registers: 1209 * 1210 * This routine unmap the registers mapped by map_pcmu_registers. 1211 * 1212 * used by: pcmu_detach() 1213 * 1214 * return value: none 1215 */ 1216 static void 1217 unmap_pcmu_registers(pcmu_t *pcmu_p) 1218 { 1219 ddi_regs_map_free(&pcmu_p->pcmu_ac[0]); 1220 ddi_regs_map_free(&pcmu_p->pcmu_ac[1]); 1221 ddi_regs_map_free(&pcmu_p->pcmu_ac[2]); 1222 } 1223 1224 /* 1225 * These convenience wrappers relies on map_pcmu_registers() to setup 1226 * pcmu_address[0-2] correctly at first. 1227 */ 1228 static uintptr_t 1229 get_reg_base(pcmu_t *pcmu_p) 1230 { 1231 return ((uintptr_t)pcmu_p->pcmu_address[2]); 1232 } 1233 1234 /* The CMU-CH config reg base is always the 2nd reg entry */ 1235 static uintptr_t 1236 get_config_reg_base(pcmu_t *pcmu_p) 1237 { 1238 return ((uintptr_t)(pcmu_p->pcmu_address[1])); 1239 } 1240 1241 uint64_t 1242 ib_get_map_reg(pcmu_ib_mondo_t mondo, uint32_t cpu_id) 1243 { 1244 return ((mondo) | (cpu_id << PCMU_INTR_MAP_REG_TID_SHIFT) | 1245 PCMU_INTR_MAP_REG_VALID); 1246 1247 } 1248 1249 uint32_t 1250 ib_map_reg_get_cpu(volatile uint64_t reg) 1251 { 1252 return ((reg & PCMU_INTR_MAP_REG_TID) >> 1253 PCMU_INTR_MAP_REG_TID_SHIFT); 1254 } 1255 1256 uint64_t * 1257 ib_intr_map_reg_addr(pcmu_ib_t *pib_p, pcmu_ib_ino_t ino) 1258 { 1259 uint64_t *addr; 1260 1261 ASSERT(ino & 0x20); 1262 addr = (uint64_t *)(pib_p->pib_obio_intr_map_regs + 1263 (((uint_t)ino & 0x1f) << 3)); 1264 return (addr); 1265 } 1266 1267 uint64_t * 1268 ib_clear_intr_reg_addr(pcmu_ib_t *pib_p, pcmu_ib_ino_t ino) 1269 { 1270 uint64_t *addr; 1271 1272 ASSERT(ino & 0x20); 1273 addr = (uint64_t *)(pib_p->pib_obio_clear_intr_regs + 1274 (((uint_t)ino & 0x1f) << 3)); 1275 return (addr); 1276 } 1277 1278 uintptr_t 1279 pcmu_ib_setup(pcmu_ib_t *pib_p) 1280 { 1281 pcmu_t *pcmu_p = pib_p->pib_pcmu_p; 1282 uintptr_t a = get_reg_base(pcmu_p); 1283 1284 pib_p->pib_ign = PCMU_ID_TO_IGN(pcmu_p->pcmu_id); 1285 pib_p->pib_max_ino = PCMU_MAX_INO; 1286 pib_p->pib_obio_intr_map_regs = a + PCMU_IB_OBIO_INTR_MAP_REG_OFFSET; 1287 pib_p->pib_obio_clear_intr_regs = 1288 a + PCMU_IB_OBIO_CLEAR_INTR_REG_OFFSET; 1289 return (a); 1290 } 1291 1292 /* 1293 * Return the cpuid to to be used for an ino. 1294 * 1295 * On multi-function pci devices, functions have separate devinfo nodes and 1296 * interrupts. 1297 * 1298 * This function determines if there is already an established slot-oriented 1299 * interrupt-to-cpu binding established, if there is then it returns that 1300 * cpu. Otherwise a new cpu is selected by intr_dist_cpuid(). 1301 * 1302 * The devinfo node we are trying to associate a cpu with is 1303 * ino_p->pino_ih_head->ih_dip. 1304 */ 1305 uint32_t 1306 pcmu_intr_dist_cpuid(pcmu_ib_t *pib_p, pcmu_ib_ino_info_t *ino_p) 1307 { 1308 dev_info_t *rdip = ino_p->pino_ih_head->ih_dip; 1309 dev_info_t *prdip = ddi_get_parent(rdip); 1310 pcmu_ib_ino_info_t *sino_p; 1311 dev_info_t *sdip; 1312 dev_info_t *psdip; 1313 char *buf1 = NULL, *buf2 = NULL; 1314 char *s1, *s2, *s3; 1315 int l2; 1316 int cpu_id; 1317 1318 /* must be CMU-CH driver parent (not ebus) */ 1319 if (strcmp(ddi_driver_name(prdip), "pcicmu") != 0) 1320 goto newcpu; 1321 1322 /* 1323 * From PCI 1275 binding: 2.2.1.3 Unit Address representation: 1324 * Since the "unit-number" is the address that appears in on Open 1325 * Firmware 'device path', it follows that only the DD and DD,FF 1326 * forms of the text representation can appear in a 'device path'. 1327 * 1328 * The rdip unit address is of the form "DD[,FF]". Define two 1329 * unit address strings that represent same-slot use: "DD" and "DD,". 1330 * The first compare uses strcmp, the second uses strncmp. 1331 */ 1332 s1 = ddi_get_name_addr(rdip); 1333 if (s1 == NULL) { 1334 goto newcpu; 1335 } 1336 1337 buf1 = kmem_alloc(MAXNAMELEN, KM_SLEEP); /* strcmp */ 1338 buf2 = kmem_alloc(MAXNAMELEN, KM_SLEEP); /* strncmp */ 1339 s1 = strcpy(buf1, s1); 1340 s2 = strcpy(buf2, s1); 1341 1342 s1 = strrchr(s1, ','); 1343 if (s1) { 1344 *s1 = '\0'; /* have "DD,FF" */ 1345 s1 = buf1; /* search via strcmp "DD" */ 1346 1347 s2 = strrchr(s2, ','); 1348 *(s2 + 1) = '\0'; 1349 s2 = buf2; 1350 l2 = strlen(s2); /* search via strncmp "DD," */ 1351 } else { 1352 (void) strcat(s2, ","); /* have "DD" */ 1353 l2 = strlen(s2); /* search via strncmp "DD," */ 1354 } 1355 1356 /* 1357 * Search the established ino list for devinfo nodes bound 1358 * to an ino that matches one of the slot use strings. 1359 */ 1360 ASSERT(MUTEX_HELD(&pib_p->pib_ino_lst_mutex)); 1361 for (sino_p = pib_p->pib_ino_lst; sino_p; sino_p = sino_p->pino_next) { 1362 /* skip self and non-established */ 1363 if ((sino_p == ino_p) || (sino_p->pino_established == 0)) 1364 continue; 1365 1366 /* skip non-siblings */ 1367 sdip = sino_p->pino_ih_head->ih_dip; 1368 psdip = ddi_get_parent(sdip); 1369 if (psdip != prdip) 1370 continue; 1371 1372 /* must be CMU-CH driver parent (not ebus) */ 1373 if (strcmp(ddi_driver_name(psdip), "pcicmu") != 0) 1374 continue; 1375 1376 s3 = ddi_get_name_addr(sdip); 1377 if ((s1 && (strcmp(s1, s3) == 0)) || 1378 (strncmp(s2, s3, l2) == 0)) { 1379 extern int intr_dist_debug; 1380 1381 if (intr_dist_debug) { 1382 cmn_err(CE_CONT, "intr_dist: " 1383 "pcicmu`pcmu_intr_dist_cpuid " 1384 "%s#%d %s: cpu %d established " 1385 "by %s#%d %s\n", ddi_driver_name(rdip), 1386 ddi_get_instance(rdip), 1387 ddi_deviname(rdip, buf1), 1388 sino_p->pino_cpuid, 1389 ddi_driver_name(sdip), 1390 ddi_get_instance(sdip), 1391 ddi_deviname(sdip, buf2)); 1392 } 1393 break; 1394 } 1395 } 1396 1397 /* If a slot use match is found then use established cpu */ 1398 if (sino_p) { 1399 cpu_id = sino_p->pino_cpuid; /* target established cpu */ 1400 goto out; 1401 } 1402 1403 newcpu: cpu_id = intr_dist_cpuid(); /* target new cpu */ 1404 1405 out: if (buf1) 1406 kmem_free(buf1, MAXNAMELEN); 1407 if (buf2) 1408 kmem_free(buf2, MAXNAMELEN); 1409 return (cpu_id); 1410 } 1411 1412 void 1413 pcmu_cb_teardown(pcmu_t *pcmu_p) 1414 { 1415 pcmu_cb_t *pcb_p = pcmu_p->pcmu_cb_p; 1416 1417 u2u_ittrans_uninit((u2u_ittrans_data_t *)pcb_p->pcb_ittrans_cookie); 1418 } 1419 1420 int 1421 pcmu_ecc_add_intr(pcmu_t *pcmu_p, int inum, pcmu_ecc_intr_info_t *eii_p) 1422 { 1423 uint32_t mondo; 1424 1425 mondo = ((pcmu_p->pcmu_cb_p->pcb_ign << PCMU_INO_BITS) | 1426 pcmu_p->pcmu_inos[inum]); 1427 1428 VERIFY(add_ivintr(mondo, pcmu_pil[inum], (intrfunc)pcmu_ecc_intr, 1429 (caddr_t)eii_p, NULL, NULL) == 0); 1430 1431 return (PCMU_ATTACH_RETCODE(PCMU_ECC_OBJ, 1432 PCMU_OBJ_INTR_ADD, DDI_SUCCESS)); 1433 } 1434 1435 /* ARGSUSED */ 1436 void 1437 pcmu_ecc_rem_intr(pcmu_t *pcmu_p, int inum, pcmu_ecc_intr_info_t *eii_p) 1438 { 1439 uint32_t mondo; 1440 1441 mondo = ((pcmu_p->pcmu_cb_p->pcb_ign << PCMU_INO_BITS) | 1442 pcmu_p->pcmu_inos[inum]); 1443 1444 VERIFY(rem_ivintr(mondo, pcmu_pil[inum]) == 0); 1445 } 1446 1447 void 1448 pcmu_pbm_configure(pcmu_pbm_t *pcbm_p) 1449 { 1450 pcmu_t *pcmu_p = pcbm_p->pcbm_pcmu_p; 1451 dev_info_t *dip = pcmu_p->pcmu_dip; 1452 1453 #define pbm_err ((PCMU_PCI_AFSR_E_MASK << PCMU_PCI_AFSR_PE_SHIFT) | \ 1454 (PCMU_PCI_AFSR_E_MASK << PCMU_PCI_AFSR_SE_SHIFT)) 1455 #define csr_err (PCI_STAT_PERROR | PCI_STAT_S_PERROR | \ 1456 PCI_STAT_R_MAST_AB | PCI_STAT_R_TARG_AB | \ 1457 PCI_STAT_S_TARG_AB | PCI_STAT_S_PERROR) 1458 1459 /* 1460 * Clear any PBM errors. 1461 */ 1462 *pcbm_p->pcbm_async_flt_status_reg = pbm_err; 1463 1464 /* 1465 * Clear error bits in configuration status register. 1466 */ 1467 PCMU_DBG1(PCMU_DBG_ATTACH, dip, 1468 "pcmu_pbm_configure: conf status reg=%x\n", csr_err); 1469 1470 pcbm_p->pcbm_config_header->ch_status_reg = csr_err; 1471 1472 PCMU_DBG1(PCMU_DBG_ATTACH, dip, 1473 "pcmu_pbm_configure: conf status reg==%x\n", 1474 pcbm_p->pcbm_config_header->ch_status_reg); 1475 1476 (void) ndi_prop_update_int(DDI_DEV_T_ANY, dip, "latency-timer", 1477 (int)pcbm_p->pcbm_config_header->ch_latency_timer_reg); 1478 #undef pbm_err 1479 #undef csr_err 1480 } 1481 1482 uint_t 1483 pcmu_pbm_disable_errors(pcmu_pbm_t *pcbm_p) 1484 { 1485 pcmu_t *pcmu_p = pcbm_p->pcbm_pcmu_p; 1486 pcmu_ib_t *pib_p = pcmu_p->pcmu_ib_p; 1487 1488 /* 1489 * Disable error and streaming byte hole interrupts via the 1490 * PBM control register. 1491 */ 1492 *pcbm_p->pcbm_ctrl_reg &= ~PCMU_PCI_CTRL_ERR_INT_EN; 1493 1494 /* 1495 * Disable error interrupts via the interrupt mapping register. 1496 */ 1497 pcmu_ib_intr_disable(pib_p, 1498 pcmu_p->pcmu_inos[CBNINTR_PBM], PCMU_IB_INTR_NOWAIT); 1499 return (BF_NONE); 1500 } 1501 1502 void 1503 pcmu_cb_setup(pcmu_t *pcmu_p) 1504 { 1505 uint64_t csr, csr_pa, pa; 1506 pcmu_cb_t *pcb_p = pcmu_p->pcmu_cb_p; 1507 1508 pcb_p->pcb_ign = PCMU_ID_TO_IGN(pcmu_p->pcmu_id); 1509 pa = (uint64_t)hat_getpfnum(kas.a_hat, pcmu_p->pcmu_address[0]); 1510 pcb_p->pcb_base_pa = pa = pa >> (32 - MMU_PAGESHIFT) << 32; 1511 pcb_p->pcb_map_pa = pa + PCMU_IB_OBIO_INTR_MAP_REG_OFFSET; 1512 pcb_p->pcb_clr_pa = pa + PCMU_IB_OBIO_CLEAR_INTR_REG_OFFSET; 1513 pcb_p->pcb_obsta_pa = pa + PCMU_IB_OBIO_INTR_STATE_DIAG_REG; 1514 1515 csr_pa = pa + PCMU_CB_CONTROL_STATUS_REG_OFFSET; 1516 csr = lddphysio(csr_pa); 1517 1518 /* 1519 * Clear any pending address parity errors. 1520 */ 1521 if (csr & PCMU_CB_CONTROL_STATUS_APERR) { 1522 csr |= PCMU_CB_CONTROL_STATUS_APERR; 1523 cmn_err(CE_WARN, "clearing UPA address parity error\n"); 1524 } 1525 csr |= PCMU_CB_CONTROL_STATUS_APCKEN; 1526 csr &= ~PCMU_CB_CONTROL_STATUS_IAP; 1527 stdphysio(csr_pa, csr); 1528 1529 u2u_ittrans_init(pcmu_p, 1530 (u2u_ittrans_data_t **)&pcb_p->pcb_ittrans_cookie); 1531 } 1532 1533 void 1534 pcmu_ecc_setup(pcmu_ecc_t *pecc_p) 1535 { 1536 pecc_p->pecc_ue.pecc_errpndg_mask = 0; 1537 pecc_p->pecc_ue.pecc_offset_mask = PCMU_ECC_UE_AFSR_DW_OFFSET; 1538 pecc_p->pecc_ue.pecc_offset_shift = PCMU_ECC_UE_AFSR_DW_OFFSET_SHIFT; 1539 pecc_p->pecc_ue.pecc_size_log2 = 3; 1540 } 1541 1542 static uintptr_t 1543 get_pbm_reg_base(pcmu_t *pcmu_p) 1544 { 1545 return ((uintptr_t)(pcmu_p->pcmu_address[0])); 1546 } 1547 1548 void 1549 pcmu_pbm_setup(pcmu_pbm_t *pcbm_p) 1550 { 1551 pcmu_t *pcmu_p = pcbm_p->pcbm_pcmu_p; 1552 1553 /* 1554 * Get the base virtual address for the PBM control block. 1555 */ 1556 uintptr_t a = get_pbm_reg_base(pcmu_p); 1557 1558 /* 1559 * Get the virtual address of the PCI configuration header. 1560 * This should be mapped little-endian. 1561 */ 1562 pcbm_p->pcbm_config_header = 1563 (config_header_t *)get_config_reg_base(pcmu_p); 1564 1565 /* 1566 * Get the virtual addresses for control, error and diag 1567 * registers. 1568 */ 1569 pcbm_p->pcbm_ctrl_reg = (uint64_t *)(a + PCMU_PCI_CTRL_REG_OFFSET); 1570 pcbm_p->pcbm_diag_reg = (uint64_t *)(a + PCMU_PCI_DIAG_REG_OFFSET); 1571 pcbm_p->pcbm_async_flt_status_reg = 1572 (uint64_t *)(a + PCMU_PCI_ASYNC_FLT_STATUS_REG_OFFSET); 1573 pcbm_p->pcbm_async_flt_addr_reg = 1574 (uint64_t *)(a + PCMU_PCI_ASYNC_FLT_ADDR_REG_OFFSET); 1575 } 1576 1577 /*ARGSUSED*/ 1578 void 1579 pcmu_pbm_teardown(pcmu_pbm_t *pcbm_p) 1580 { 1581 } 1582 1583 int 1584 pcmu_get_numproxy(dev_info_t *dip) 1585 { 1586 return (ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1587 "#upa-interrupt-proxies", 1)); 1588 } 1589 1590 int 1591 pcmu_get_portid(dev_info_t *dip) 1592 { 1593 return (ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1594 "portid", -1)); 1595 } 1596 1597 /* 1598 * CMU-CH Performance Events. 1599 */ 1600 static pcmu_kev_mask_t 1601 pcicmu_pcmu_events[] = { 1602 {"pio_cycles_b", 0xf}, {"interrupts", 0x11}, 1603 {"upa_inter_nack", 0x12}, {"pio_reads", 0x13}, 1604 {"pio_writes", 0x14}, 1605 {"clear_pic", 0x1f} 1606 }; 1607 1608 /* 1609 * Create the picN kstat's. 1610 */ 1611 void 1612 pcmu_kstat_init() 1613 { 1614 pcmu_name_kstat = (pcmu_ksinfo_t *)kmem_alloc(sizeof (pcmu_ksinfo_t), 1615 KM_NOSLEEP); 1616 1617 if (pcmu_name_kstat == NULL) { 1618 cmn_err(CE_WARN, "pcicmu : no space for kstat\n"); 1619 } else { 1620 pcmu_name_kstat->pic_no_evs = 1621 sizeof (pcicmu_pcmu_events) / sizeof (pcmu_kev_mask_t); 1622 pcmu_name_kstat->pic_shift[0] = PCMU_SHIFT_PIC0; 1623 pcmu_name_kstat->pic_shift[1] = PCMU_SHIFT_PIC1; 1624 pcmu_create_name_kstat("pcmup", 1625 pcmu_name_kstat, pcicmu_pcmu_events); 1626 } 1627 } 1628 1629 /* 1630 * Called from _fini() 1631 */ 1632 void 1633 pcmu_kstat_fini() 1634 { 1635 if (pcmu_name_kstat != NULL) { 1636 pcmu_delete_name_kstat(pcmu_name_kstat); 1637 kmem_free(pcmu_name_kstat, sizeof (pcmu_ksinfo_t)); 1638 pcmu_name_kstat = NULL; 1639 } 1640 } 1641 1642 /* 1643 * Create the performance 'counters' kstat. 1644 */ 1645 void 1646 pcmu_add_upstream_kstat(pcmu_t *pcmu_p) 1647 { 1648 pcmu_cntr_pa_t *cntr_pa_p = &pcmu_p->pcmu_uks_pa; 1649 uint64_t regbase = va_to_pa((void *)get_reg_base(pcmu_p)); 1650 1651 cntr_pa_p->pcr_pa = regbase + PCMU_PERF_PCR_OFFSET; 1652 cntr_pa_p->pic_pa = regbase + PCMU_PERF_PIC_OFFSET; 1653 pcmu_p->pcmu_uksp = pcmu_create_cntr_kstat(pcmu_p, "pcmup", 1654 NUM_OF_PICS, pcmu_cntr_kstat_pa_update, cntr_pa_p); 1655 } 1656 1657 /* 1658 * u2u_ittrans_init() is caled from in pci.c's pcmu_cb_setup() per CMU. 1659 * Second argument "ittrans_cookie" is address of pcb_ittrans_cookie in 1660 * pcb_p member. allocated interrupt block is returned in it. 1661 */ 1662 static void 1663 u2u_ittrans_init(pcmu_t *pcmu_p, u2u_ittrans_data_t **ittrans_cookie) 1664 { 1665 1666 u2u_ittrans_data_t *u2u_trans_p; 1667 ddi_device_acc_attr_t attr; 1668 int ret; 1669 int board; 1670 1671 /* 1672 * Allocate the data structure to support U2U's 1673 * interrupt target translations. 1674 */ 1675 u2u_trans_p = (u2u_ittrans_data_t *) 1676 kmem_zalloc(sizeof (u2u_ittrans_data_t), KM_SLEEP); 1677 1678 /* 1679 * Get other properties, "board#" 1680 */ 1681 board = ddi_getprop(DDI_DEV_T_ANY, pcmu_p->pcmu_dip, 1682 DDI_PROP_DONTPASS, "board#", -1); 1683 1684 u2u_trans_p->u2u_board = board; 1685 1686 if (board == -1) { 1687 /* this cannot happen on production systems */ 1688 cmn_err(CE_PANIC, "u2u:Invalid property;board = %d", board); 1689 } 1690 1691 /* 1692 * Initialize interrupt target translations mutex. 1693 */ 1694 mutex_init(&(u2u_trans_p->u2u_ittrans_lock), "u2u_ittrans_lock", 1695 MUTEX_DEFAULT, NULL); 1696 1697 /* 1698 * Get U2U's registers space by ddi_regs_map_setup(9F) 1699 */ 1700 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1701 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1702 attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; 1703 1704 ret = ddi_regs_map_setup(pcmu_p->pcmu_dip, 1705 REGS_INDEX_OF_U2U, (caddr_t *)(&(u2u_trans_p->u2u_regs_base)), 1706 0, 0, &attr, &(u2u_trans_p->u2u_acc)); 1707 1708 /* 1709 * check result of ddi_regs_map_setup(). 1710 */ 1711 if (ret != DDI_SUCCESS) { 1712 cmn_err(CE_PANIC, "u2u%d: registers map setup failed", board); 1713 } 1714 1715 /* 1716 * Read Port-id(1 byte) in u2u 1717 */ 1718 u2u_trans_p->u2u_port_id = *(volatile int32_t *) 1719 (u2u_trans_p->u2u_regs_base + U2U_PID_REGISTER_OFFSET); 1720 1721 if (pcmu_p->pcmu_id != u2u_trans_p->u2u_port_id) { 1722 cmn_err(CE_PANIC, "u2u%d: Invalid Port-ID", board); 1723 } 1724 1725 *ittrans_cookie = u2u_trans_p; 1726 } 1727 1728 /* 1729 * u2u_ittras_resume() is called from pcmu_obj_resume() at DDI_RESUME entry. 1730 */ 1731 static void 1732 u2u_ittrans_resume(u2u_ittrans_data_t **ittrans_cookie) 1733 { 1734 1735 u2u_ittrans_data_t *u2u_trans_p; 1736 u2u_ittrans_id_t *ittrans_id_p; 1737 uintptr_t data_reg_addr; 1738 int ix; 1739 1740 u2u_trans_p = *ittrans_cookie; 1741 1742 /* 1743 * Set U2U Data Register 1744 */ 1745 for (ix = 0; ix < U2U_DATA_NUM; ix++) { 1746 ittrans_id_p = &(u2u_trans_p->u2u_ittrans_id[ix]); 1747 data_reg_addr = u2u_trans_p->u2u_regs_base + 1748 U2U_DATA_REGISTER_OFFSET + (ix * sizeof (uint64_t)); 1749 if (ittrans_id_p->u2u_ino_map_reg == NULL) { 1750 /* This index was not set */ 1751 continue; 1752 } 1753 *(volatile uint32_t *) (data_reg_addr) = 1754 (uint32_t)ittrans_id_p->u2u_tgt_cpu_id; 1755 1756 } 1757 } 1758 1759 /* 1760 * u2u_ittras_uninit() is called from ib_destroy() at detach, 1761 * or occuring error in attach. 1762 */ 1763 static void 1764 u2u_ittrans_uninit(u2u_ittrans_data_t *ittrans_cookie) 1765 { 1766 1767 if (ittrans_cookie == NULL) { 1768 return; /* not support */ 1769 } 1770 1771 if (ittrans_cookie == (u2u_ittrans_data_t *)(-1)) { 1772 return; /* illeagal case */ 1773 } 1774 1775 ddi_regs_map_free(&(ittrans_cookie->u2u_acc)); 1776 mutex_destroy(&(ittrans_cookie->u2u_ittrans_lock)); 1777 kmem_free((void *)ittrans_cookie, sizeof (u2u_ittrans_data_t)); 1778 } 1779 1780 /* 1781 * This routine,u2u_translate_tgtid(, , cpu_id, pino_map_reg), 1782 * searches index having same value of pino_map_reg, or empty. 1783 * Then, stores cpu_id in a U2U Data Register as this index, 1784 * and return this index. 1785 */ 1786 int 1787 u2u_translate_tgtid(pcmu_t *pcmu_p, uint_t cpu_id, 1788 volatile uint64_t *pino_map_reg) 1789 { 1790 1791 int index = -1; 1792 int ix; 1793 int err_level; /* severity level for cmn_err */ 1794 u2u_ittrans_id_t *ittrans_id_p; 1795 uintptr_t data_reg_addr; 1796 u2u_ittrans_data_t *ittrans_cookie; 1797 1798 ittrans_cookie = 1799 (u2u_ittrans_data_t *)(pcmu_p->pcmu_cb_p->pcb_ittrans_cookie); 1800 1801 if (ittrans_cookie == NULL) { 1802 return (cpu_id); 1803 } 1804 1805 if (ittrans_cookie == (u2u_ittrans_data_t *)(-1)) { 1806 return (-1); /* illeagal case */ 1807 } 1808 1809 mutex_enter(&(ittrans_cookie->u2u_ittrans_lock)); 1810 1811 /* 1812 * Decide index No. of U2U Data registers in either 1813 * already used by same pino_map_reg, or empty. 1814 */ 1815 for (ix = 0; ix < U2U_DATA_NUM; ix++) { 1816 ittrans_id_p = &(ittrans_cookie->u2u_ittrans_id[ix]); 1817 if (ittrans_id_p->u2u_ino_map_reg == pino_map_reg) { 1818 /* already used this pino_map_reg */ 1819 index = ix; 1820 break; 1821 } 1822 if (index == -1 && 1823 ittrans_id_p->u2u_ino_map_reg == NULL) { 1824 index = ix; 1825 } 1826 } 1827 1828 if (index == -1) { 1829 if (panicstr) { 1830 err_level = CE_WARN; 1831 } else { 1832 err_level = CE_PANIC; 1833 } 1834 cmn_err(err_level, "u2u%d:No more U2U-Data regs!!", 1835 ittrans_cookie->u2u_board); 1836 return (cpu_id); 1837 } 1838 1839 /* 1840 * For U2U 1841 * set cpu_id into u2u_data_reg by index. 1842 * ((uint64_t)(u2u_regs_base 1843 * + U2U_DATA_REGISTER_OFFSET))[index] = cpu_id; 1844 */ 1845 1846 data_reg_addr = ittrans_cookie->u2u_regs_base 1847 + U2U_DATA_REGISTER_OFFSET 1848 + (index * sizeof (uint64_t)); 1849 1850 /* 1851 * Set cpu_id into U2U Data register[index] 1852 */ 1853 *(volatile uint32_t *) (data_reg_addr) = (uint32_t)cpu_id; 1854 1855 /* 1856 * Setup for software, excepting at panicing. 1857 * and rebooting, etc...? 1858 */ 1859 if (!panicstr) { 1860 ittrans_id_p = &(ittrans_cookie->u2u_ittrans_id[index]); 1861 ittrans_id_p->u2u_tgt_cpu_id = cpu_id; 1862 ittrans_id_p->u2u_ino_map_reg = pino_map_reg; 1863 } 1864 1865 mutex_exit(&(ittrans_cookie->u2u_ittrans_lock)); 1866 1867 return (index); 1868 } 1869 1870 /* 1871 * u2u_ittrans_cleanup() is called from common_pcmu_ib_intr_disable() 1872 * after called intr_rem_cpu(mondo). 1873 */ 1874 void 1875 u2u_ittrans_cleanup(u2u_ittrans_data_t *ittrans_cookie, 1876 volatile uint64_t *pino_map_reg) 1877 { 1878 1879 int ix; 1880 u2u_ittrans_id_t *ittrans_id_p; 1881 1882 if (ittrans_cookie == NULL) { 1883 return; 1884 } 1885 1886 if (ittrans_cookie == (u2u_ittrans_data_t *)(-1)) { 1887 return; /* illeagal case */ 1888 } 1889 1890 mutex_enter(&(ittrans_cookie->u2u_ittrans_lock)); 1891 1892 for (ix = 0; ix < U2U_DATA_NUM; ix++) { 1893 ittrans_id_p = &(ittrans_cookie->u2u_ittrans_id[ix]); 1894 if (ittrans_id_p->u2u_ino_map_reg == pino_map_reg) { 1895 ittrans_id_p->u2u_ino_map_reg = NULL; 1896 break; 1897 } 1898 } 1899 1900 mutex_exit(&(ittrans_cookie->u2u_ittrans_lock)); 1901 } 1902 1903 /* 1904 * pcmu_ecc_classify, called by ecc_handler to classify ecc errors 1905 * and determine if we should panic or not. 1906 */ 1907 void 1908 pcmu_ecc_classify(uint64_t err, pcmu_ecc_errstate_t *ecc_err_p) 1909 { 1910 struct async_flt *ecc = &ecc_err_p->ecc_aflt; 1911 /* LINTED */ 1912 pcmu_t *pcmu_p = ecc_err_p->ecc_ii_p.pecc_p->pecc_pcmu_p; 1913 1914 ASSERT(MUTEX_HELD(&pcmu_p->pcmu_err_mutex)); 1915 1916 ecc_err_p->ecc_bridge_type = PCI_OPLCMU; /* RAGS */ 1917 /* 1918 * Get the parent bus id that caused the error. 1919 */ 1920 ecc_err_p->ecc_dev_id = (ecc_err_p->ecc_afsr & PCMU_ECC_UE_AFSR_ID) 1921 >> PCMU_ECC_UE_AFSR_ID_SHIFT; 1922 /* 1923 * Determine the doubleword offset of the error. 1924 */ 1925 ecc_err_p->ecc_dw_offset = (ecc_err_p->ecc_afsr & 1926 PCMU_ECC_UE_AFSR_DW_OFFSET) >> PCMU_ECC_UE_AFSR_DW_OFFSET_SHIFT; 1927 /* 1928 * Determine the primary error type. 1929 */ 1930 switch (err) { 1931 case PCMU_ECC_UE_AFSR_E_PIO: 1932 if (ecc_err_p->pecc_pri) { 1933 ecc->flt_erpt_class = PCI_ECC_PIO_UE; 1934 } else { 1935 ecc->flt_erpt_class = PCI_ECC_SEC_PIO_UE; 1936 } 1937 /* For CMU-CH, a UE is always fatal. */ 1938 ecc->flt_panic = 1; 1939 break; 1940 1941 default: 1942 return; 1943 } 1944 } 1945 1946 /* 1947 * pcmu_pbm_classify, called by pcmu_pbm_afsr_report to classify piow afsr. 1948 */ 1949 int 1950 pcmu_pbm_classify(pcmu_pbm_errstate_t *pbm_err_p) 1951 { 1952 uint32_t e; 1953 int nerr = 0; 1954 char **tmp_class; 1955 1956 if (pbm_err_p->pcbm_pri) { 1957 tmp_class = &pbm_err_p->pcbm_pci.pcmu_err_class; 1958 e = PBM_AFSR_TO_PRIERR(pbm_err_p->pbm_afsr); 1959 pbm_err_p->pbm_log = FM_LOG_PCI; 1960 } else { 1961 tmp_class = &pbm_err_p->pbm_err_class; 1962 e = PBM_AFSR_TO_SECERR(pbm_err_p->pbm_afsr); 1963 pbm_err_p->pbm_log = FM_LOG_PBM; 1964 } 1965 1966 if (e & PCMU_PCI_AFSR_E_MA) { 1967 *tmp_class = pbm_err_p->pcbm_pri ? PCI_MA : PCI_SEC_MA; 1968 nerr++; 1969 } 1970 return (nerr); 1971 } 1972 1973 /* 1974 * Function used to clear PBM/PCI/IOMMU error state after error handling 1975 * is complete. Only clearing error bits which have been logged. Called by 1976 * pcmu_pbm_err_handler and pcmu_bus_exit. 1977 */ 1978 static void 1979 pcmu_clear_error(pcmu_t *pcmu_p, pcmu_pbm_errstate_t *pbm_err_p) 1980 { 1981 pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p; 1982 1983 ASSERT(MUTEX_HELD(&pcbm_p->pcbm_pcmu_p->pcmu_err_mutex)); 1984 1985 *pcbm_p->pcbm_ctrl_reg = pbm_err_p->pbm_ctl_stat; 1986 *pcbm_p->pcbm_async_flt_status_reg = pbm_err_p->pbm_afsr; 1987 pcbm_p->pcbm_config_header->ch_status_reg = 1988 pbm_err_p->pcbm_pci.pcmu_cfg_stat; 1989 } 1990 1991 /*ARGSUSED*/ 1992 int 1993 pcmu_pbm_err_handler(dev_info_t *dip, ddi_fm_error_t *derr, 1994 const void *impl_data, int caller) 1995 { 1996 int fatal = 0; 1997 int nonfatal = 0; 1998 int unknown = 0; 1999 uint32_t prierr, secerr; 2000 pcmu_pbm_errstate_t pbm_err; 2001 pcmu_t *pcmu_p = (pcmu_t *)impl_data; 2002 int ret = 0; 2003 2004 ASSERT(MUTEX_HELD(&pcmu_p->pcmu_err_mutex)); 2005 pcmu_pbm_errstate_get(pcmu_p, &pbm_err); 2006 2007 derr->fme_ena = derr->fme_ena ? derr->fme_ena : 2008 fm_ena_generate(0, FM_ENA_FMT1); 2009 2010 prierr = PBM_AFSR_TO_PRIERR(pbm_err.pbm_afsr); 2011 secerr = PBM_AFSR_TO_SECERR(pbm_err.pbm_afsr); 2012 2013 if (derr->fme_flag == DDI_FM_ERR_PEEK) { 2014 /* 2015 * For ddi_peek treat all events as nonfatal. We only 2016 * really call this function so that pcmu_clear_error() 2017 * and ndi_fm_handler_dispatch() will get called. 2018 */ 2019 nonfatal++; 2020 goto done; 2021 } else if (derr->fme_flag == DDI_FM_ERR_POKE) { 2022 /* 2023 * For ddi_poke we can treat as nonfatal if the 2024 * following conditions are met : 2025 * 1. Make sure only primary error is MA/TA 2026 * 2. Make sure no secondary error 2027 * 3. check pci config header stat reg to see MA/TA is 2028 * logged. We cannot verify only MA/TA is recorded 2029 * since it gets much more complicated when a 2030 * PCI-to-PCI bridge is present. 2031 */ 2032 if ((prierr == PCMU_PCI_AFSR_E_MA) && !secerr && 2033 (pbm_err.pcbm_pci.pcmu_cfg_stat & PCI_STAT_R_MAST_AB)) { 2034 nonfatal++; 2035 goto done; 2036 } 2037 } 2038 2039 if (prierr || secerr) { 2040 ret = pcmu_pbm_afsr_report(dip, derr->fme_ena, &pbm_err); 2041 if (ret == DDI_FM_FATAL) { 2042 fatal++; 2043 } else { 2044 nonfatal++; 2045 } 2046 } 2047 2048 ret = pcmu_cfg_report(dip, derr, &pbm_err.pcbm_pci, caller, prierr); 2049 if (ret == DDI_FM_FATAL) { 2050 fatal++; 2051 } else if (ret == DDI_FM_NONFATAL) { 2052 nonfatal++; 2053 } 2054 2055 done: 2056 if (ret == DDI_FM_FATAL) { 2057 fatal++; 2058 } else if (ret == DDI_FM_NONFATAL) { 2059 nonfatal++; 2060 } else if (ret == DDI_FM_UNKNOWN) { 2061 unknown++; 2062 } 2063 2064 /* Cleanup and reset error bits */ 2065 pcmu_clear_error(pcmu_p, &pbm_err); 2066 2067 return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL : 2068 (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK))); 2069 } 2070 2071 int 2072 pcmu_check_error(pcmu_t *pcmu_p) 2073 { 2074 pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p; 2075 uint16_t pcmu_cfg_stat; 2076 uint64_t pbm_afsr; 2077 2078 ASSERT(MUTEX_HELD(&pcmu_p->pcmu_err_mutex)); 2079 2080 pcmu_cfg_stat = pcbm_p->pcbm_config_header->ch_status_reg; 2081 pbm_afsr = *pcbm_p->pcbm_async_flt_status_reg; 2082 2083 if ((pcmu_cfg_stat & (PCI_STAT_S_PERROR | PCI_STAT_S_TARG_AB | 2084 PCI_STAT_R_TARG_AB | PCI_STAT_R_MAST_AB | 2085 PCI_STAT_S_SYSERR | PCI_STAT_PERROR)) || 2086 (PBM_AFSR_TO_PRIERR(pbm_afsr))) { 2087 return (1); 2088 } 2089 return (0); 2090 2091 } 2092 2093 /* 2094 * Function used to gather PBM/PCI error state for the 2095 * pcmu_pbm_err_handler. This function must be called while pcmu_err_mutex 2096 * is held. 2097 */ 2098 static void 2099 pcmu_pbm_errstate_get(pcmu_t *pcmu_p, pcmu_pbm_errstate_t *pbm_err_p) 2100 { 2101 pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p; 2102 2103 ASSERT(MUTEX_HELD(&pcmu_p->pcmu_err_mutex)); 2104 bzero(pbm_err_p, sizeof (pcmu_pbm_errstate_t)); 2105 2106 /* 2107 * Capture all pbm error state for later logging 2108 */ 2109 pbm_err_p->pbm_bridge_type = PCI_OPLCMU; /* RAGS */ 2110 pbm_err_p->pcbm_pci.pcmu_cfg_stat = 2111 pcbm_p->pcbm_config_header->ch_status_reg; 2112 pbm_err_p->pbm_ctl_stat = *pcbm_p->pcbm_ctrl_reg; 2113 pbm_err_p->pcbm_pci.pcmu_cfg_comm = 2114 pcbm_p->pcbm_config_header->ch_command_reg; 2115 pbm_err_p->pbm_afsr = *pcbm_p->pcbm_async_flt_status_reg; 2116 pbm_err_p->pbm_afar = *pcbm_p->pcbm_async_flt_addr_reg; 2117 pbm_err_p->pcbm_pci.pcmu_pa = *pcbm_p->pcbm_async_flt_addr_reg; 2118 } 2119 2120 static void 2121 pcmu_pbm_clear_error(pcmu_pbm_t *pcbm_p) 2122 { 2123 uint64_t pbm_afsr; 2124 2125 /* 2126 * for poke() support - called from POKE_FLUSH. Spin waiting 2127 * for MA, TA or SERR to be cleared by a pcmu_pbm_error_intr(). 2128 * We have to wait for SERR too in case the device is beyond 2129 * a pci-pci bridge. 2130 */ 2131 pbm_afsr = *pcbm_p->pcbm_async_flt_status_reg; 2132 while (((pbm_afsr >> PCMU_PCI_AFSR_PE_SHIFT) & 2133 (PCMU_PCI_AFSR_E_MA | PCMU_PCI_AFSR_E_TA))) { 2134 pbm_afsr = *pcbm_p->pcbm_async_flt_status_reg; 2135 } 2136 } 2137 2138 void 2139 pcmu_err_create(pcmu_t *pcmu_p) 2140 { 2141 /* 2142 * PCI detected ECC errorq, to schedule async handling 2143 * of ECC errors and logging. 2144 * The errorq is created here but destroyed when _fini is called 2145 * for the pci module. 2146 */ 2147 if (pcmu_ecc_queue == NULL) { 2148 pcmu_ecc_queue = errorq_create("pcmu_ecc_queue", 2149 (errorq_func_t)pcmu_ecc_err_drain, 2150 (void *)NULL, 2151 ECC_MAX_ERRS, sizeof (pcmu_ecc_errstate_t), 2152 PIL_2, ERRORQ_VITAL); 2153 if (pcmu_ecc_queue == NULL) 2154 panic("failed to create required system error queue"); 2155 } 2156 2157 /* 2158 * Initialize error handling mutex. 2159 */ 2160 mutex_init(&pcmu_p->pcmu_err_mutex, NULL, MUTEX_DRIVER, 2161 (void *)pcmu_p->pcmu_fm_ibc); 2162 } 2163 2164 void 2165 pcmu_err_destroy(pcmu_t *pcmu_p) 2166 { 2167 mutex_destroy(&pcmu_p->pcmu_err_mutex); 2168 } 2169 2170 /* 2171 * Function used to post PCI block module specific ereports. 2172 */ 2173 void 2174 pcmu_pbm_ereport_post(dev_info_t *dip, uint64_t ena, 2175 pcmu_pbm_errstate_t *pbm_err) 2176 { 2177 char *aux_msg; 2178 uint32_t prierr, secerr; 2179 pcmu_t *pcmu_p; 2180 int instance = ddi_get_instance(dip); 2181 2182 ena = ena ? ena : fm_ena_generate(0, FM_ENA_FMT1); 2183 2184 pcmu_p = get_pcmu_soft_state(instance); 2185 prierr = PBM_AFSR_TO_PRIERR(pbm_err->pbm_afsr); 2186 secerr = PBM_AFSR_TO_SECERR(pbm_err->pbm_afsr); 2187 if (prierr) 2188 aux_msg = "PCI primary error: Master Abort"; 2189 else if (secerr) 2190 aux_msg = "PCI secondary error: Master Abort"; 2191 else 2192 aux_msg = ""; 2193 cmn_err(CE_WARN, "%s %s: %s %s=0x%lx, %s=0x%lx, %s=0x%lx %s=0x%x", 2194 (pcmu_p->pcmu_pcbm_p)->pcbm_nameinst_str, 2195 (pcmu_p->pcmu_pcbm_p)->pcbm_nameaddr_str, 2196 aux_msg, 2197 PCI_PBM_AFAR, pbm_err->pbm_afar, 2198 PCI_PBM_AFSR, pbm_err->pbm_afsr, 2199 PCI_PBM_CSR, pbm_err->pbm_ctl_stat, 2200 "portid", pcmu_p->pcmu_id); 2201 } 2202