1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/conf.h> 29 #include <sys/kmem.h> 30 #include <sys/debug.h> 31 #include <sys/modctl.h> 32 #include <sys/autoconf.h> 33 #include <sys/hwconf.h> 34 #include <sys/ddi_impldefs.h> 35 #include <sys/ddi.h> 36 #include <sys/sunddi.h> 37 #include <sys/sunndi.h> 38 #include <sys/ndi_impldefs.h> 39 #include <sys/machsystm.h> 40 #include <sys/fcode.h> 41 #include <sys/promif.h> 42 #include <sys/promimpl.h> 43 #include <sys/opl_cfg.h> 44 #include <sys/scfd/scfostoescf.h> 45 46 static unsigned int opl_cfg_inited; 47 static opl_board_cfg_t opl_boards[HWD_SBS_PER_DOMAIN]; 48 49 /* 50 * Module control operations 51 */ 52 53 extern struct mod_ops mod_miscops; 54 55 static struct modlmisc modlmisc = { 56 &mod_miscops, /* Type of module */ 57 "OPL opl_cfg %I%" 58 }; 59 60 static struct modlinkage modlinkage = { 61 MODREV_1, (void *)&modlmisc, NULL 62 }; 63 64 static int opl_map_in(dev_info_t *, fco_handle_t, fc_ci_t *); 65 static int opl_map_out(dev_info_t *, fco_handle_t, fc_ci_t *); 66 static int opl_register_fetch(dev_info_t *, fco_handle_t, fc_ci_t *); 67 static int opl_register_store(dev_info_t *, fco_handle_t, fc_ci_t *); 68 69 static int opl_claim_memory(dev_info_t *, fco_handle_t, fc_ci_t *); 70 static int opl_release_memory(dev_info_t *, fco_handle_t, fc_ci_t *); 71 static int opl_vtop(dev_info_t *, fco_handle_t, fc_ci_t *); 72 73 static int opl_config_child(dev_info_t *, fco_handle_t, fc_ci_t *); 74 75 static int opl_get_fcode_size(dev_info_t *, fco_handle_t, fc_ci_t *); 76 static int opl_get_fcode(dev_info_t *, fco_handle_t, fc_ci_t *); 77 78 static int opl_map_phys(dev_info_t *, struct regspec *, caddr_t *, 79 ddi_device_acc_attr_t *, ddi_acc_handle_t *); 80 static void opl_unmap_phys(ddi_acc_handle_t *); 81 static int opl_get_hwd_va(dev_info_t *, fco_handle_t, fc_ci_t *); 82 83 extern int prom_get_fcode_size(char *); 84 extern int prom_get_fcode(char *, char *); 85 86 #define PROBE_STR_SIZE 64 87 #define UNIT_ADDR_SIZE 64 88 89 opl_fc_ops_t opl_fc_ops[] = { 90 91 { FC_MAP_IN, opl_map_in}, 92 { FC_MAP_OUT, opl_map_out}, 93 { "rx@", opl_register_fetch}, 94 { FC_RL_FETCH, opl_register_fetch}, 95 { FC_RW_FETCH, opl_register_fetch}, 96 { FC_RB_FETCH, opl_register_fetch}, 97 { "rx!", opl_register_store}, 98 { FC_RL_STORE, opl_register_store}, 99 { FC_RW_STORE, opl_register_store}, 100 { FC_RB_STORE, opl_register_store}, 101 { "claim-memory", opl_claim_memory}, 102 { "release-memory", opl_release_memory}, 103 { "vtop", opl_vtop}, 104 { FC_CONFIG_CHILD, opl_config_child}, 105 { FC_GET_FCODE_SIZE, opl_get_fcode_size}, 106 { FC_GET_FCODE, opl_get_fcode}, 107 { "get-hwd-va", opl_get_hwd_va}, 108 { NULL, NULL} 109 110 }; 111 112 extern caddr_t efcode_vaddr; 113 extern int efcode_size; 114 115 #ifdef DEBUG 116 #define HWDDUMP_OFFSETS 1 117 #define HWDDUMP_ALL_STATUS 2 118 #define HWDDUMP_CHUNKS 3 119 #define HWDDUMP_SBP 4 120 121 int hwddump_flags = HWDDUMP_SBP | HWDDUMP_CHUNKS; 122 #endif 123 124 int 125 _init() 126 { 127 int err = 0; 128 129 /* 130 * Create a resource map for the contiguous memory allocated 131 * at start-of-day in startup.c 132 */ 133 err = ndi_ra_map_setup(ddi_root_node(), "opl-fcodemem"); 134 if (err == NDI_FAILURE) { 135 cmn_err(CE_WARN, "Cannot setup resource map opl-fcodemem\n"); 136 return (1); 137 } 138 139 /* 140 * Put the allocated memory into the pool. 141 */ 142 (void) ndi_ra_free(ddi_root_node(), (uint64_t)efcode_vaddr, 143 (uint64_t)efcode_size, "opl-fcodemem", 0); 144 145 if ((err = mod_install(&modlinkage)) != 0) { 146 cmn_err(CE_WARN, "opl_cfg failed to load, error=%d", err); 147 (void) ndi_ra_map_destroy(ddi_root_node(), "opl-fcodemem"); 148 } 149 150 return (err); 151 } 152 153 int 154 _fini(void) 155 { 156 int ret; 157 158 ret = (mod_remove(&modlinkage)); 159 if (ret != 0) 160 return (ret); 161 162 (void) ndi_ra_map_destroy(ddi_root_node(), "opl-fcodemem"); 163 164 return (ret); 165 } 166 167 int 168 _info(modinfop) 169 struct modinfo *modinfop; 170 { 171 return (mod_info(&modlinkage, modinfop)); 172 } 173 174 #ifdef DEBUG 175 static void 176 opl_dump_hwd(opl_probe_t *probe) 177 { 178 hwd_header_t *hdrp; 179 hwd_sb_status_t *statp; 180 hwd_domain_info_t *dinfop; 181 hwd_sb_t *sbp; 182 hwd_cpu_chip_t *chips; 183 hwd_pci_ch_t *channels; 184 int board, i, status; 185 186 board = probe->pr_board; 187 188 hdrp = probe->pr_hdr; 189 statp = probe->pr_sb_status; 190 dinfop = probe->pr_dinfo; 191 sbp = probe->pr_sb; 192 193 printf("HWD: board %d\n", board); 194 printf("HWD:magic = 0x%x\n", hdrp->hdr_magic); 195 printf("HWD:version = 0x%x.%x\n", hdrp->hdr_version.major, 196 hdrp->hdr_version.minor); 197 198 if (hwddump_flags & HWDDUMP_OFFSETS) { 199 printf("HWD:status offset = 0x%x\n", 200 hdrp->hdr_sb_status_offset); 201 printf("HWD:domain offset = 0x%x\n", 202 hdrp->hdr_domain_info_offset); 203 printf("HWD:board offset = 0x%x\n", hdrp->hdr_sb_info_offset); 204 } 205 206 if (hwddump_flags & HWDDUMP_SBP) 207 printf("HWD:sb_t ptr = 0x%p\n", (void *)probe->pr_sb); 208 209 if (hwddump_flags & HWDDUMP_ALL_STATUS) { 210 int bd; 211 printf("HWD:board status ="); 212 for (bd = 0; bd < HWD_SBS_PER_DOMAIN; bd++) 213 printf("%x ", statp->sb_status[bd]); 214 printf("\n"); 215 } else { 216 printf("HWD:board status = %d\n", statp->sb_status[board]); 217 } 218 219 printf("HWD:banner name = %s\n", dinfop->dinf_banner_name); 220 printf("HWD:platform = %s\n", dinfop->dinf_platform_token); 221 222 printf("HWD:chip status:\n"); 223 chips = &sbp->sb_cmu.cmu_cpu_chips[0]; 224 for (i = 0; i < HWD_CPU_CHIPS_PER_CMU; i++) { 225 226 status = chips[i].chip_status; 227 printf("chip[%d] = ", i); 228 if (HWD_STATUS_NONE(status)) 229 printf("none"); 230 else if (HWD_STATUS_FAILED(status)) 231 printf("fail"); 232 else if (HWD_STATUS_OK(status)) 233 printf("ok"); 234 printf("\n"); 235 } 236 237 if (hwddump_flags & HWDDUMP_CHUNKS) { 238 int chunk; 239 hwd_memory_t *mem = &sbp->sb_cmu.cmu_memory; 240 printf("HWD:chunks:\n"); 241 for (chunk = 0; chunk < HWD_MAX_MEM_CHUNKS; chunk++) 242 printf("\t%d 0x%lx 0x%lx\n", chunk, 243 mem->mem_chunks[chunk].chnk_start_address, 244 mem->mem_chunks[chunk].chnk_size); 245 } 246 247 printf("HWD:channel status:\n"); 248 channels = &sbp->sb_pci_ch[0]; 249 for (i = 0; i < HWD_PCI_CHANNELS_PER_SB; i++) { 250 251 status = channels[i].pci_status; 252 printf("channels[%d] = ", i); 253 if (HWD_STATUS_NONE(status)) 254 printf("none"); 255 else if (HWD_STATUS_FAILED(status)) 256 printf("fail"); 257 else if (HWD_STATUS_OK(status)) 258 printf("ok"); 259 printf("\n"); 260 } 261 printf("channels[%d] = ", i); 262 status = sbp->sb_cmu.cmu_ch.chan_status; 263 if (HWD_STATUS_NONE(status)) 264 printf("none"); 265 else if (HWD_STATUS_FAILED(status)) 266 printf("fail"); 267 else if (HWD_STATUS_OK(status)) 268 printf("ok"); 269 printf("\n"); 270 } 271 #endif /* DEBUG */ 272 273 #ifdef UCTEST 274 /* 275 * For SesamI debugging, just map the SRAM directly to a kernel 276 * VA and read it out from there 277 */ 278 279 #include <sys/vmem.h> 280 #include <vm/seg_kmem.h> 281 282 /* 283 * 0x4081F1323000LL is the HWD base address for LSB 0. But we need to map 284 * at page boundaries. So, we use a base address of 0x4081F1322000LL. 285 * Note that this has to match the HWD base pa set in .sesami-common-defs. 286 * 287 * The size specified for the HWD in the SCF spec is 36K. But since 288 * we adjusted the base address by 4K, we need to use 40K for the 289 * mapping size to cover the HWD. And 40K is also a multiple of the 290 * base page size. 291 */ 292 #define OPL_HWD_BASE(lsb) \ 293 (0x4081F1322000LL | (((uint64_t)(lsb)) << 40)) 294 295 void *opl_hwd_vaddr; 296 #endif /* UCTEST */ 297 298 /* 299 * Get the hardware descriptor from SCF. 300 */ 301 302 /*ARGSUSED*/ 303 int 304 opl_read_hwd(int board, hwd_header_t **hdrp, hwd_sb_status_t **statp, 305 hwd_domain_info_t **dinfop, hwd_sb_t **sbp) 306 { 307 static int (*getinfop)(uint32_t, uint8_t, uint32_t, uint32_t *, 308 void *) = NULL; 309 void *hwdp; 310 311 uint32_t key = KEY_ESCF; /* required value */ 312 uint8_t type = 0x40; /* SUB_OS_RECEIVE_HWD */ 313 uint32_t transid = board; 314 uint32_t datasize = HWD_DATA_SIZE; 315 316 hwd_header_t *hd; 317 hwd_sb_status_t *st; 318 hwd_domain_info_t *di; 319 hwd_sb_t *sb; 320 321 int ret; 322 323 if (opl_boards[board].cfg_hwd == NULL) { 324 #ifdef UCTEST 325 /* 326 * Just map the HWD in SRAM to a kernel VA 327 */ 328 329 size_t size; 330 pfn_t pfn; 331 332 size = 0xA000; 333 334 opl_hwd_vaddr = vmem_alloc(heap_arena, size, VM_SLEEP); 335 if (opl_hwd_vaddr == NULL) { 336 cmn_err(CE_NOTE, "No space for HWD"); 337 return (-1); 338 } 339 340 pfn = btop(OPL_HWD_BASE(board)); 341 hat_devload(kas.a_hat, opl_hwd_vaddr, size, pfn, PROT_READ, 342 HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK); 343 344 hwdp = (void *)((char *)opl_hwd_vaddr + 0x1000); 345 opl_boards[board].cfg_hwd = hwdp; 346 ret = 0; 347 #else 348 349 /* find the scf_service_getinfo() function */ 350 if (getinfop == NULL) 351 getinfop = (int (*)(uint32_t, uint8_t, uint32_t, 352 uint32_t *, 353 void *))modgetsymvalue("scf_service_getinfo", 0); 354 355 if (getinfop == NULL) 356 return (-1); 357 358 /* allocate memory to receive the data */ 359 hwdp = kmem_alloc(HWD_DATA_SIZE, KM_SLEEP); 360 361 /* get the HWD */ 362 ret = (*getinfop)(key, type, transid, &datasize, hwdp); 363 if (ret == 0) 364 opl_boards[board].cfg_hwd = hwdp; 365 else 366 kmem_free(hwdp, HWD_DATA_SIZE); 367 #endif 368 } else { 369 hwdp = opl_boards[board].cfg_hwd; 370 ret = 0; 371 } 372 373 /* copy the data to the destination */ 374 if (ret == 0) { 375 hd = (hwd_header_t *)hwdp; 376 st = (hwd_sb_status_t *) 377 ((char *)hwdp + hd->hdr_sb_status_offset); 378 di = (hwd_domain_info_t *) 379 ((char *)hwdp + hd->hdr_domain_info_offset); 380 sb = (hwd_sb_t *) 381 ((char *)hwdp + hd->hdr_sb_info_offset); 382 if (hdrp != NULL) 383 *hdrp = hd; 384 if (statp != NULL) 385 *statp = st; 386 if (dinfop != NULL) 387 *dinfop = di; 388 if (sbp != NULL) 389 *sbp = sb; 390 } 391 392 return (ret); 393 } 394 395 /* 396 * The opl_probe_t probe structure is used to pass all sorts of parameters 397 * to callback functions during probing. It also contains a snapshot of 398 * the hardware descriptor that is taken at the beginning of a probe. 399 */ 400 static int 401 opl_probe_init(opl_probe_t *probe) 402 { 403 hwd_header_t **hdrp; 404 hwd_sb_status_t **statp; 405 hwd_domain_info_t **dinfop; 406 hwd_sb_t **sbp; 407 int board, ret; 408 409 board = probe->pr_board; 410 411 hdrp = &probe->pr_hdr; 412 statp = &probe->pr_sb_status; 413 dinfop = &probe->pr_dinfo; 414 sbp = &probe->pr_sb; 415 416 /* 417 * Read the hardware descriptor. 418 */ 419 ret = opl_read_hwd(board, hdrp, statp, dinfop, sbp); 420 if (ret != 0) { 421 422 cmn_err(CE_WARN, "IKP: failed to read HWD header"); 423 return (-1); 424 } 425 426 #ifdef DEBUG 427 opl_dump_hwd(probe); 428 #endif 429 return (0); 430 } 431 432 /* 433 * This function is used to obtain pointers to relevant device nodes 434 * which are created by Solaris at boot time. 435 * 436 * This function walks the child nodes of a given node, extracts 437 * the "name" property, if it exists, and passes the node to a 438 * callback init function. The callback determines if this node is 439 * interesting or not. If it is, then a pointer to the node is 440 * stored away by the callback for use during unprobe. 441 * 442 * The DDI get property function allocates storage for the name 443 * property. That needs to be freed within this function. 444 */ 445 static int 446 opl_init_nodes(dev_info_t *parent, opl_init_func_t init) 447 { 448 dev_info_t *node; 449 char *name; 450 int circ, ret; 451 int len; 452 453 ASSERT(parent != NULL); 454 455 /* 456 * Hold parent node busy to walk its child list 457 */ 458 ndi_devi_enter(parent, &circ); 459 node = ddi_get_child(parent); 460 461 while (node != NULL) { 462 463 ret = OPL_GET_PROP(string, node, "name", &name, &len); 464 if (ret != DDI_PROP_SUCCESS) { 465 /* 466 * The property does not exist for this node. 467 */ 468 node = ddi_get_next_sibling(node); 469 continue; 470 } 471 472 ret = init(node, name, len); 473 kmem_free(name, len); 474 if (ret != 0) { 475 476 ndi_devi_exit(parent, circ); 477 return (-1); 478 } 479 480 node = ddi_get_next_sibling(node); 481 } 482 483 ndi_devi_exit(parent, circ); 484 485 return (0); 486 } 487 488 /* 489 * This init function finds all the interesting nodes under the 490 * root node and stores pointers to them. The following nodes 491 * are considered interesting by this implementation: 492 * 493 * "cmp" 494 * These are nodes that represent processor chips. 495 * 496 * "pci" 497 * These are nodes that represent PCI leaves. 498 * 499 * "pseudo-mc" 500 * These are nodes that contain memory information. 501 */ 502 static int 503 opl_init_root_nodes(dev_info_t *node, char *name, int len) 504 { 505 int portid, board, chip, channel, leaf; 506 int ret; 507 508 if (strncmp(name, OPL_CPU_CHIP_NODE, len) == 0) { 509 510 ret = OPL_GET_PROP(int, node, "portid", &portid, -1); 511 if (ret != DDI_PROP_SUCCESS) 512 return (-1); 513 514 ret = OPL_GET_PROP(int, node, "board#", &board, -1); 515 if (ret != DDI_PROP_SUCCESS) 516 return (-1); 517 518 chip = OPL_CPU_CHIP(portid); 519 opl_boards[board].cfg_cpu_chips[chip] = node; 520 521 } else if (strncmp(name, OPL_PCI_LEAF_NODE, len) == 0) { 522 523 ret = OPL_GET_PROP(int, node, "portid", &portid, -1); 524 if (ret != DDI_PROP_SUCCESS) 525 return (-1); 526 527 board = OPL_IO_PORTID_TO_LSB(portid); 528 channel = OPL_PORTID_TO_CHANNEL(portid); 529 530 if (channel == OPL_CMU_CHANNEL) { 531 532 opl_boards[board].cfg_cmuch_leaf = node; 533 534 } else { 535 536 leaf = OPL_PORTID_TO_LEAF(portid); 537 opl_boards[board].cfg_pcich_leaf[channel][leaf] = node; 538 } 539 } else if (strncmp(name, OPL_PSEUDO_MC_NODE, len) == 0) { 540 541 ret = OPL_GET_PROP(int, node, "board#", &board, -1); 542 if (ret != DDI_PROP_SUCCESS) 543 return (-1); 544 545 ASSERT((board >= 0) && (board < HWD_SBS_PER_DOMAIN)); 546 547 opl_boards[board].cfg_pseudo_mc = node; 548 } 549 550 return (0); 551 } 552 553 /* 554 * This function initializes the OPL IKP feature. Currently, all it does 555 * is find the interesting nodes that Solaris has created at boot time 556 * for boards present at boot time and store pointers to them. This 557 * is useful if those boards are unprobed by DR. 558 */ 559 int 560 opl_init_cfg() 561 { 562 dev_info_t *root; 563 564 if (opl_cfg_inited == 0) { 565 566 root = ddi_root_node(); 567 if ((opl_init_nodes(root, opl_init_root_nodes) != 0)) { 568 cmn_err(CE_WARN, "IKP: init failed"); 569 return (1); 570 } 571 572 opl_cfg_inited = 1; 573 } 574 575 return (0); 576 } 577 578 /* 579 * When DR is initialized, we walk the device tree and acquire a hold on 580 * all the nodes that are interesting to IKP. This is so that the corresponding 581 * branches cannot be deleted. 582 * 583 * The following function informs the walk about which nodes are interesting 584 * so that it can hold the corresponding branches. 585 */ 586 static int 587 opl_hold_node(char *name) 588 { 589 /* 590 * We only need to hold/release the following nodes which 591 * represent separate branches that must be managed. 592 */ 593 return ((strcmp(name, OPL_CPU_CHIP_NODE) == 0) || 594 (strcmp(name, OPL_PSEUDO_MC_NODE) == 0) || 595 (strcmp(name, OPL_PCI_LEAF_NODE) == 0)); 596 } 597 598 static int 599 opl_hold_rele_devtree(dev_info_t *rdip, void *arg) 600 { 601 602 int *holdp = (int *)arg; 603 char *name = ddi_node_name(rdip); 604 605 /* 606 * We only need to hold/release the following nodes which 607 * represent separate branches that must be managed. 608 */ 609 if (opl_hold_node(name) == 0) { 610 /* Not of interest to us */ 611 return (DDI_WALK_PRUNECHILD); 612 } 613 if (*holdp) { 614 ASSERT(!e_ddi_branch_held(rdip)); 615 e_ddi_branch_hold(rdip); 616 } else { 617 ASSERT(e_ddi_branch_held(rdip)); 618 e_ddi_branch_rele(rdip); 619 } 620 621 return (DDI_WALK_PRUNECHILD); 622 } 623 624 void 625 opl_hold_devtree() 626 { 627 dev_info_t *dip; 628 int circ; 629 int hold = 1; 630 631 dip = ddi_root_node(); 632 ndi_devi_enter(dip, &circ); 633 ddi_walk_devs(ddi_get_child(dip), opl_hold_rele_devtree, &hold); 634 ndi_devi_exit(dip, circ); 635 } 636 637 void 638 opl_release_devtree() 639 { 640 dev_info_t *dip; 641 int circ; 642 int hold = 0; 643 644 dip = ddi_root_node(); 645 ndi_devi_enter(dip, &circ); 646 ddi_walk_devs(ddi_get_child(dip), opl_hold_rele_devtree, &hold); 647 ndi_devi_exit(dip, circ); 648 } 649 650 /* 651 * This is a helper function that allows opl_create_node() to return a 652 * pointer to a newly created node to its caller. 653 */ 654 /*ARGSUSED*/ 655 static void 656 opl_set_node(dev_info_t *node, void *arg, uint_t flags) 657 { 658 opl_probe_t *probe; 659 660 probe = arg; 661 probe->pr_node = node; 662 } 663 664 /* 665 * Function to create a node in the device tree under a specified parent. 666 * 667 * e_ddi_branch_create() allows the creation of a whole branch with a 668 * single call of the function. However, we only use it to create one node 669 * at a time in the case of non-I/O device nodes. In other words, we 670 * create branches by repeatedly using this function. This makes the 671 * code more readable. 672 * 673 * The branch descriptor passed to e_ddi_branch_create() takes two 674 * callbacks. The create() callback is used to set the properties of a 675 * newly created node. The other callback is used to return a pointer 676 * to the newly created node. The create() callback is passed by the 677 * caller of this function based on the kind of node he wishes to 678 * create. 679 * 680 * e_ddi_branch_create() returns with the newly created node held. We 681 * only need to hold the top nodes of the branches we create. We release 682 * the hold for the others. E.g., the "cmp" node needs to be held. Since 683 * we hold the "cmp" node, there is no need to hold the "core" and "cpu" 684 * nodes below it. 685 */ 686 static dev_info_t * 687 opl_create_node(opl_probe_t *probe) 688 { 689 devi_branch_t branch; 690 691 probe->pr_node = NULL; 692 693 branch.arg = probe; 694 branch.type = DEVI_BRANCH_SID; 695 branch.create.sid_branch_create = probe->pr_create; 696 branch.devi_branch_callback = opl_set_node; 697 698 if (e_ddi_branch_create(probe->pr_parent, &branch, NULL, 0) != 0) 699 return (NULL); 700 701 ASSERT(probe->pr_node != NULL); 702 703 if (probe->pr_hold == 0) 704 e_ddi_branch_rele(probe->pr_node); 705 706 return (probe->pr_node); 707 } 708 709 /* 710 * Function to tear down a whole branch rooted at the specified node. 711 * 712 * Although we create each node of a branch individually, we destroy 713 * a whole branch in one call. This is more efficient. 714 */ 715 static int 716 opl_destroy_node(dev_info_t *node) 717 { 718 if (e_ddi_branch_destroy(node, NULL, 0) != 0) 719 return (-1); 720 721 return (0); 722 } 723 724 /* 725 * Set the properties for a "cpu" node. 726 */ 727 /*ARGSUSED*/ 728 static int 729 opl_create_cpu(dev_info_t *node, void *arg, uint_t flags) 730 { 731 opl_probe_t *probe; 732 hwd_cpu_chip_t *chip; 733 hwd_core_t *core; 734 hwd_cpu_t *cpu; 735 int ret; 736 737 probe = arg; 738 chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip]; 739 core = &chip->chip_cores[probe->pr_core]; 740 cpu = &core->core_cpus[probe->pr_cpu]; 741 OPL_UPDATE_PROP(string, node, "name", OPL_CPU_NODE); 742 OPL_UPDATE_PROP(string, node, "device_type", OPL_CPU_NODE); 743 744 OPL_UPDATE_PROP(int, node, "cpuid", cpu->cpu_cpuid); 745 OPL_UPDATE_PROP(int, node, "reg", probe->pr_cpu); 746 747 OPL_UPDATE_PROP(string, node, "status", "okay"); 748 749 return (DDI_WALK_TERMINATE); 750 } 751 752 /* 753 * Create "cpu" nodes as child nodes of a given "core" node. 754 */ 755 static int 756 opl_probe_cpus(opl_probe_t *probe) 757 { 758 int i; 759 hwd_cpu_chip_t *chip; 760 hwd_core_t *core; 761 hwd_cpu_t *cpus; 762 763 chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip]; 764 core = &chip->chip_cores[probe->pr_core]; 765 cpus = &core->core_cpus[0]; 766 767 for (i = 0; i < HWD_CPUS_PER_CORE; i++) { 768 769 /* 770 * Olympus-C has 2 cpus per core. 771 * Jupiter has 4 cpus per core. 772 * For the Olympus-C based platform, we expect the cpu_status 773 * of the non-existent cpus to be set to missing. 774 */ 775 if (!HWD_STATUS_OK(cpus[i].cpu_status)) 776 continue; 777 778 probe->pr_create = opl_create_cpu; 779 probe->pr_cpu = i; 780 if (opl_create_node(probe) == NULL) { 781 782 cmn_err(CE_WARN, "IKP: create cpu (%d-%d-%d-%d) failed", 783 probe->pr_board, probe->pr_cpu_chip, 784 probe->pr_core, probe->pr_cpu); 785 return (-1); 786 } 787 } 788 789 return (0); 790 } 791 792 /* 793 * Set the properties for a "core" node. 794 */ 795 /*ARGSUSED*/ 796 static int 797 opl_create_core(dev_info_t *node, void *arg, uint_t flags) 798 { 799 opl_probe_t *probe; 800 hwd_cpu_chip_t *chip; 801 hwd_core_t *core; 802 int sharing[2]; 803 int ret; 804 805 probe = arg; 806 chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip]; 807 core = &chip->chip_cores[probe->pr_core]; 808 809 OPL_UPDATE_PROP(string, node, "name", OPL_CORE_NODE); 810 OPL_UPDATE_PROP(string, node, "device_type", OPL_CORE_NODE); 811 OPL_UPDATE_PROP(string, node, "compatible", chip->chip_compatible); 812 813 OPL_UPDATE_PROP(int, node, "reg", probe->pr_core); 814 OPL_UPDATE_PROP(int, node, "manufacturer#", core->core_manufacturer); 815 OPL_UPDATE_PROP(int, node, "implementation#", 816 core->core_implementation); 817 OPL_UPDATE_PROP(int, node, "mask#", core->core_mask); 818 819 OPL_UPDATE_PROP(int, node, "sparc-version", core->core_version); 820 OPL_UPDATE_PROP(int, node, "clock-frequency", core->core_frequency); 821 822 OPL_UPDATE_PROP(int, node, "l1-icache-size", core->core_l1_icache_size); 823 OPL_UPDATE_PROP(int, node, "l1-icache-line-size", 824 core->core_l1_icache_line_size); 825 OPL_UPDATE_PROP(int, node, "l1-icache-associativity", 826 core->core_l1_icache_associativity); 827 OPL_UPDATE_PROP(int, node, "#itlb-entries", 828 core->core_num_itlb_entries); 829 830 OPL_UPDATE_PROP(int, node, "l1-dcache-size", core->core_l1_dcache_size); 831 OPL_UPDATE_PROP(int, node, "l1-dcache-line-size", 832 core->core_l1_dcache_line_size); 833 OPL_UPDATE_PROP(int, node, "l1-dcache-associativity", 834 core->core_l1_dcache_associativity); 835 OPL_UPDATE_PROP(int, node, "#dtlb-entries", 836 core->core_num_dtlb_entries); 837 838 OPL_UPDATE_PROP(int, node, "l2-cache-size", core->core_l2_cache_size); 839 OPL_UPDATE_PROP(int, node, "l2-cache-line-size", 840 core->core_l2_cache_line_size); 841 OPL_UPDATE_PROP(int, node, "l2-cache-associativity", 842 core->core_l2_cache_associativity); 843 sharing[0] = 0; 844 sharing[1] = core->core_l2_cache_sharing; 845 OPL_UPDATE_PROP_ARRAY(int, node, "l2-cache-sharing", sharing, 2); 846 847 OPL_UPDATE_PROP(string, node, "status", "okay"); 848 849 return (DDI_WALK_TERMINATE); 850 } 851 852 /* 853 * Create "core" nodes as child nodes of a given "cmp" node. 854 * 855 * Create the branch below each "core" node". 856 */ 857 static int 858 opl_probe_cores(opl_probe_t *probe) 859 { 860 int i; 861 hwd_cpu_chip_t *chip; 862 hwd_core_t *cores; 863 dev_info_t *parent, *node; 864 865 chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip]; 866 cores = &chip->chip_cores[0]; 867 parent = probe->pr_parent; 868 869 for (i = 0; i < HWD_CORES_PER_CPU_CHIP; i++) { 870 871 if (!HWD_STATUS_OK(cores[i].core_status)) 872 continue; 873 874 probe->pr_parent = parent; 875 probe->pr_create = opl_create_core; 876 probe->pr_core = i; 877 node = opl_create_node(probe); 878 if (node == NULL) { 879 880 cmn_err(CE_WARN, "IKP: create core (%d-%d-%d) failed", 881 probe->pr_board, probe->pr_cpu_chip, 882 probe->pr_core); 883 return (-1); 884 } 885 886 /* 887 * Create "cpu" nodes below "core". 888 */ 889 probe->pr_parent = node; 890 if (opl_probe_cpus(probe) != 0) 891 return (-1); 892 } 893 894 return (0); 895 } 896 897 /* 898 * Set the properties for a "cmp" node. 899 */ 900 /*ARGSUSED*/ 901 static int 902 opl_create_cpu_chip(dev_info_t *node, void *arg, uint_t flags) 903 { 904 opl_probe_t *probe; 905 hwd_cpu_chip_t *chip; 906 opl_range_t range; 907 uint64_t dummy_addr; 908 int ret; 909 910 probe = arg; 911 chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip]; 912 913 OPL_UPDATE_PROP(string, node, "name", OPL_CPU_CHIP_NODE); 914 915 OPL_UPDATE_PROP(int, node, "portid", chip->chip_portid); 916 OPL_UPDATE_PROP(int, node, "board#", probe->pr_board); 917 918 dummy_addr = OPL_PROC_AS(probe->pr_board, probe->pr_cpu_chip); 919 range.rg_addr_hi = OPL_HI(dummy_addr); 920 range.rg_addr_lo = OPL_LO(dummy_addr); 921 range.rg_size_hi = 0; 922 range.rg_size_lo = 0; 923 OPL_UPDATE_PROP_ARRAY(int, node, "reg", (int *)&range, 4); 924 925 OPL_UPDATE_PROP(int, node, "#address-cells", 1); 926 OPL_UPDATE_PROP(int, node, "#size-cells", 0); 927 928 OPL_UPDATE_PROP(string, node, "status", "okay"); 929 930 return (DDI_WALK_TERMINATE); 931 } 932 933 /* 934 * Create "cmp" nodes as child nodes of the root node. 935 * 936 * Create the branch below each "cmp" node. 937 */ 938 static int 939 opl_probe_cpu_chips(opl_probe_t *probe) 940 { 941 int i; 942 dev_info_t **cfg_cpu_chips; 943 hwd_cpu_chip_t *chips; 944 dev_info_t *node; 945 946 cfg_cpu_chips = opl_boards[probe->pr_board].cfg_cpu_chips; 947 chips = &probe->pr_sb->sb_cmu.cmu_cpu_chips[0]; 948 949 for (i = 0; i < HWD_CPU_CHIPS_PER_CMU; i++) { 950 951 ASSERT(cfg_cpu_chips[i] == NULL); 952 953 if (!HWD_STATUS_OK(chips[i].chip_status)) 954 continue; 955 956 probe->pr_parent = ddi_root_node(); 957 probe->pr_create = opl_create_cpu_chip; 958 probe->pr_cpu_chip = i; 959 probe->pr_hold = 1; 960 node = opl_create_node(probe); 961 if (node == NULL) { 962 963 cmn_err(CE_WARN, "IKP: create chip (%d-%d) failed", 964 probe->pr_board, probe->pr_cpu_chip); 965 return (-1); 966 } 967 968 cfg_cpu_chips[i] = node; 969 970 /* 971 * Create "core" nodes below "cmp". 972 * We hold the "cmp" node. So, there is no need to hold 973 * the "core" and "cpu" nodes below it. 974 */ 975 probe->pr_parent = node; 976 probe->pr_hold = 0; 977 if (opl_probe_cores(probe) != 0) 978 return (-1); 979 } 980 981 return (0); 982 } 983 984 /* 985 * Set the properties for a "pseudo-mc" node. 986 */ 987 /*ARGSUSED*/ 988 static int 989 opl_create_pseudo_mc(dev_info_t *node, void *arg, uint_t flags) 990 { 991 opl_probe_t *probe; 992 int board, portid; 993 hwd_bank_t *bank; 994 hwd_memory_t *mem; 995 opl_range_t range; 996 opl_mc_addr_t mc[HWD_BANKS_PER_CMU]; 997 int status[2][7]; 998 int i, j; 999 int ret; 1000 1001 probe = arg; 1002 board = probe->pr_board; 1003 1004 OPL_UPDATE_PROP(string, node, "name", OPL_PSEUDO_MC_NODE); 1005 OPL_UPDATE_PROP(string, node, "device_type", "memory-controller"); 1006 OPL_UPDATE_PROP(string, node, "compatible", "FJSV,oplmc"); 1007 1008 portid = OPL_LSB_TO_PSEUDOMC_PORTID(board); 1009 OPL_UPDATE_PROP(int, node, "portid", portid); 1010 1011 range.rg_addr_hi = OPL_HI(OPL_MC_AS(board)); 1012 range.rg_addr_lo = 0x200; 1013 range.rg_size_hi = 0; 1014 range.rg_size_lo = 0; 1015 OPL_UPDATE_PROP_ARRAY(int, node, "reg", (int *)&range, 4); 1016 1017 OPL_UPDATE_PROP(int, node, "board#", board); 1018 OPL_UPDATE_PROP(int, node, "physical-board#", 1019 probe->pr_sb->sb_psb_number); 1020 1021 OPL_UPDATE_PROP(int, node, "#address-cells", 1); 1022 OPL_UPDATE_PROP(int, node, "#size-cells", 2); 1023 1024 mem = &probe->pr_sb->sb_cmu.cmu_memory; 1025 1026 range.rg_addr_hi = OPL_HI(mem->mem_start_address); 1027 range.rg_addr_lo = OPL_LO(mem->mem_start_address); 1028 range.rg_size_hi = OPL_HI(mem->mem_size); 1029 range.rg_size_lo = OPL_LO(mem->mem_size); 1030 OPL_UPDATE_PROP_ARRAY(int, node, "sb-mem-ranges", (int *)&range, 4); 1031 1032 bank = probe->pr_sb->sb_cmu.cmu_memory.mem_banks; 1033 for (i = 0, j = 0; i < HWD_BANKS_PER_CMU; i++) { 1034 1035 if (!HWD_STATUS_OK(bank[i].bank_status)) 1036 continue; 1037 1038 mc[j].mc_bank = i; 1039 mc[j].mc_hi = OPL_HI(bank[i].bank_register_address); 1040 mc[j].mc_lo = OPL_LO(bank[i].bank_register_address); 1041 j++; 1042 } 1043 ASSERT(j > 0); 1044 OPL_UPDATE_PROP_ARRAY(int, node, "mc-addr", (int *)mc, j*3); 1045 1046 OPL_UPDATE_PROP_ARRAY(byte, node, "cs0-mc-pa-trans-table", 1047 mem->mem_cs[0].cs_pa_mac_table, 64); 1048 OPL_UPDATE_PROP_ARRAY(byte, node, "cs1-mc-pa-trans-table", 1049 mem->mem_cs[1].cs_pa_mac_table, 64); 1050 1051 #define CS_PER_MEM 2 1052 1053 for (i = 0, j = 0; i < CS_PER_MEM; i++) { 1054 if (HWD_STATUS_OK(mem->mem_cs[i].cs_status) || 1055 HWD_STATUS_FAILED(mem->mem_cs[i].cs_status)) { 1056 status[j][0] = i; 1057 if (HWD_STATUS_OK(mem->mem_cs[i].cs_status)) 1058 status[j][1] = 0; 1059 else 1060 status[j][1] = 1; 1061 status[j][2] = 1062 OPL_HI(mem->mem_cs[i].cs_available_capacity); 1063 status[j][3] = 1064 OPL_LO(mem->mem_cs[i].cs_available_capacity); 1065 status[j][4] = OPL_HI(mem->mem_cs[i].cs_dimm_capacity); 1066 status[j][5] = OPL_LO(mem->mem_cs[i].cs_dimm_capacity); 1067 status[j][6] = mem->mem_cs[i].cs_number_of_dimms; 1068 j++; 1069 } 1070 } 1071 ASSERT(j > 0); 1072 OPL_UPDATE_PROP_ARRAY(int, node, "cs-status", (int *)status, 1073 j*7); 1074 1075 return (DDI_WALK_TERMINATE); 1076 } 1077 1078 /* 1079 * Create "pseudo-mc" nodes 1080 */ 1081 static int 1082 opl_probe_memory(opl_probe_t *probe) 1083 { 1084 int board; 1085 opl_board_cfg_t *board_cfg; 1086 dev_info_t *node; 1087 1088 board = probe->pr_board; 1089 board_cfg = &opl_boards[board]; 1090 1091 ASSERT(board_cfg->cfg_pseudo_mc == NULL); 1092 1093 probe->pr_parent = ddi_root_node(); 1094 probe->pr_create = opl_create_pseudo_mc; 1095 probe->pr_hold = 1; 1096 node = opl_create_node(probe); 1097 if (node == NULL) { 1098 1099 cmn_err(CE_WARN, "IKP: create pseudo-mc (%d) failed", board); 1100 return (-1); 1101 } 1102 1103 board_cfg->cfg_pseudo_mc = node; 1104 1105 return (0); 1106 } 1107 1108 /* 1109 * Allocate the fcode ops handle. 1110 */ 1111 /*ARGSUSED*/ 1112 static 1113 fco_handle_t 1114 opl_fc_ops_alloc_handle(dev_info_t *parent, dev_info_t *child, 1115 void *fcode, size_t fcode_size, char *unit_address, 1116 char *my_args) 1117 { 1118 fco_handle_t rp; 1119 phandle_t h; 1120 char *buf; 1121 1122 rp = kmem_zalloc(sizeof (struct fc_resource_list), KM_SLEEP); 1123 rp->next_handle = fc_ops_alloc_handle(parent, child, fcode, fcode_size, 1124 unit_address, NULL); 1125 rp->ap = parent; 1126 rp->child = child; 1127 rp->fcode = fcode; 1128 rp->fcode_size = fcode_size; 1129 rp->my_args = my_args; 1130 1131 if (unit_address) { 1132 buf = kmem_zalloc(UNIT_ADDR_SIZE, KM_SLEEP); 1133 (void) strcpy(buf, unit_address); 1134 rp->unit_address = buf; 1135 } 1136 1137 /* 1138 * Add the child's nodeid to our table... 1139 */ 1140 h = ddi_get_nodeid(rp->child); 1141 fc_add_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child, h); 1142 1143 return (rp); 1144 } 1145 1146 1147 static void 1148 opl_fc_ops_free_handle(fco_handle_t rp) 1149 { 1150 struct fc_resource *resp, *nresp; 1151 1152 ASSERT(rp); 1153 1154 if (rp->next_handle) 1155 fc_ops_free_handle(rp->next_handle); 1156 if (rp->unit_address) 1157 kmem_free(rp->unit_address, UNIT_ADDR_SIZE); 1158 1159 /* 1160 * Release all the resources from the resource list 1161 */ 1162 for (resp = rp->head; resp != NULL; resp = nresp) { 1163 nresp = resp->next; 1164 switch (resp->type) { 1165 1166 case RT_MAP: 1167 break; 1168 1169 case RT_DMA: 1170 /* 1171 * DMA has to be freed up at exit time. 1172 */ 1173 cmn_err(CE_CONT, 1174 "opl_fc_ops_free_handle: Unexpected DMA seen!"); 1175 break; 1176 1177 case RT_CONTIGIOUS: 1178 FC_DEBUG2(1, CE_CONT, "opl_fc_ops_free: " 1179 "Free claim-memory resource 0x%lx size 0x%x\n", 1180 resp->fc_contig_virt, resp->fc_contig_len); 1181 1182 (void) ndi_ra_free(ddi_root_node(), 1183 (uint64_t)resp->fc_contig_virt, 1184 resp->fc_contig_len, "opl-fcodemem", 1185 NDI_RA_PASS); 1186 1187 break; 1188 1189 default: 1190 cmn_err(CE_CONT, "opl_fc_ops_free: " 1191 "unknown resource type %d", resp->type); 1192 break; 1193 } 1194 fc_rem_resource(rp, resp); 1195 kmem_free(resp, sizeof (struct fc_resource)); 1196 } 1197 1198 kmem_free(rp, sizeof (struct fc_resource_list)); 1199 } 1200 1201 int 1202 opl_fc_do_op(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp) 1203 { 1204 opl_fc_ops_t *op; 1205 char *service = fc_cell2ptr(cp->svc_name); 1206 1207 ASSERT(rp); 1208 1209 FC_DEBUG1(1, CE_CONT, "opl_fc_do_op: <%s>\n", service); 1210 1211 /* 1212 * First try the generic fc_ops. 1213 */ 1214 if (fc_ops(ap, rp->next_handle, cp) == 0) 1215 return (0); 1216 1217 /* 1218 * Now try the Jupiter-specific ops. 1219 */ 1220 for (op = opl_fc_ops; op->fc_service != NULL; ++op) 1221 if (strcmp(op->fc_service, service) == 0) 1222 return (op->fc_op(ap, rp, cp)); 1223 1224 FC_DEBUG1(9, CE_CONT, "opl_fc_do_op: <%s> not serviced\n", service); 1225 1226 return (-1); 1227 } 1228 1229 /* 1230 * map-in (phys.lo phys.hi size -- virt) 1231 */ 1232 static int 1233 opl_map_in(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp) 1234 { 1235 size_t len; 1236 int error; 1237 caddr_t virt; 1238 struct fc_resource *resp; 1239 struct regspec rspec; 1240 ddi_device_acc_attr_t acc; 1241 ddi_acc_handle_t h; 1242 1243 if (fc_cell2int(cp->nargs) != 3) 1244 return (fc_syntax_error(cp, "nargs must be 3")); 1245 1246 if (fc_cell2int(cp->nresults) < 1) 1247 return (fc_syntax_error(cp, "nresults must be >= 1")); 1248 1249 rspec.regspec_size = len = fc_cell2size(fc_arg(cp, 0)); 1250 rspec.regspec_bustype = fc_cell2uint(fc_arg(cp, 1)); 1251 rspec.regspec_addr = fc_cell2uint(fc_arg(cp, 2)); 1252 1253 acc.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1254 acc.devacc_attr_endian_flags = DDI_STRUCTURE_BE_ACC; 1255 acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1256 1257 FC_DEBUG3(1, CE_CONT, "opl_map_in: attempting map in " 1258 "address 0x%08x.%08x length %x\n", rspec.regspec_bustype, 1259 rspec.regspec_addr, rspec.regspec_size); 1260 1261 error = opl_map_phys(rp->child, &rspec, &virt, &acc, &h); 1262 1263 if (error) { 1264 FC_DEBUG3(1, CE_CONT, "opl_map_in: map in failed - " 1265 "address 0x%08x.%08x length %x\n", rspec.regspec_bustype, 1266 rspec.regspec_addr, rspec.regspec_size); 1267 1268 return (fc_priv_error(cp, "opl map-in failed")); 1269 } 1270 1271 FC_DEBUG1(3, CE_CONT, "opl_map_in: returning virt %p\n", virt); 1272 1273 cp->nresults = fc_int2cell(1); 1274 fc_result(cp, 0) = fc_ptr2cell(virt); 1275 1276 /* 1277 * Log this resource ... 1278 */ 1279 resp = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP); 1280 resp->type = RT_MAP; 1281 resp->fc_map_virt = virt; 1282 resp->fc_map_len = len; 1283 resp->fc_map_handle = h; 1284 fc_add_resource(rp, resp); 1285 1286 return (fc_success_op(ap, rp, cp)); 1287 } 1288 1289 /* 1290 * map-out (virt size -- ) 1291 */ 1292 static int 1293 opl_map_out(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp) 1294 { 1295 caddr_t virt; 1296 size_t len; 1297 struct fc_resource *resp; 1298 1299 if (fc_cell2int(cp->nargs) != 2) 1300 return (fc_syntax_error(cp, "nargs must be 2")); 1301 1302 virt = fc_cell2ptr(fc_arg(cp, 1)); 1303 1304 len = fc_cell2size(fc_arg(cp, 0)); 1305 1306 FC_DEBUG2(1, CE_CONT, "opl_map_out: attempting map out %p %x\n", 1307 virt, len); 1308 1309 /* 1310 * Find if this request matches a mapping resource we set up. 1311 */ 1312 fc_lock_resource_list(rp); 1313 for (resp = rp->head; resp != NULL; resp = resp->next) { 1314 if (resp->type != RT_MAP) 1315 continue; 1316 if (resp->fc_map_virt != virt) 1317 continue; 1318 if (resp->fc_map_len == len) 1319 break; 1320 } 1321 fc_unlock_resource_list(rp); 1322 1323 if (resp == NULL) 1324 return (fc_priv_error(cp, "request doesn't match a " 1325 "known mapping")); 1326 1327 opl_unmap_phys(&resp->fc_map_handle); 1328 1329 /* 1330 * remove the resource from the list and release it. 1331 */ 1332 fc_rem_resource(rp, resp); 1333 kmem_free(resp, sizeof (struct fc_resource)); 1334 1335 cp->nresults = fc_int2cell(0); 1336 return (fc_success_op(ap, rp, cp)); 1337 } 1338 1339 static int 1340 opl_register_fetch(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp) 1341 { 1342 size_t len; 1343 caddr_t virt; 1344 int error = 0; 1345 uint64_t v; 1346 uint64_t x; 1347 uint32_t l; 1348 uint16_t w; 1349 uint8_t b; 1350 char *service = fc_cell2ptr(cp->svc_name); 1351 struct fc_resource *resp; 1352 1353 if (fc_cell2int(cp->nargs) != 1) 1354 return (fc_syntax_error(cp, "nargs must be 1")); 1355 1356 if (fc_cell2int(cp->nresults) < 1) 1357 return (fc_syntax_error(cp, "nresults must be >= 1")); 1358 1359 virt = fc_cell2ptr(fc_arg(cp, 0)); 1360 1361 /* 1362 * Determine the access width .. we can switch on the 2nd 1363 * character of the name which is "rx@", "rl@", "rb@" or "rw@" 1364 */ 1365 switch (*(service + 1)) { 1366 case 'x': len = sizeof (x); break; 1367 case 'l': len = sizeof (l); break; 1368 case 'w': len = sizeof (w); break; 1369 case 'b': len = sizeof (b); break; 1370 } 1371 1372 /* 1373 * Check the alignment ... 1374 */ 1375 if (((intptr_t)virt & (len - 1)) != 0) 1376 return (fc_priv_error(cp, "unaligned access")); 1377 1378 /* 1379 * Find if this virt is 'within' a request we know about 1380 */ 1381 fc_lock_resource_list(rp); 1382 for (resp = rp->head; resp != NULL; resp = resp->next) { 1383 if (resp->type == RT_MAP) { 1384 if ((virt >= (caddr_t)resp->fc_map_virt) && 1385 ((virt + len) <= 1386 ((caddr_t)resp->fc_map_virt + resp->fc_map_len))) 1387 break; 1388 } else if (resp->type == RT_CONTIGIOUS) { 1389 if ((virt >= (caddr_t)resp->fc_contig_virt) && ((virt + len) 1390 <= ((caddr_t)resp->fc_contig_virt + 1391 resp->fc_contig_len))) 1392 break; 1393 } 1394 } 1395 fc_unlock_resource_list(rp); 1396 1397 if (resp == NULL) { 1398 return (fc_priv_error(cp, "request not within " 1399 "known mappings")); 1400 } 1401 1402 switch (len) { 1403 case sizeof (x): 1404 if (resp->type == RT_MAP) 1405 error = ddi_peek64(rp->child, 1406 (int64_t *)virt, (int64_t *)&x); 1407 else /* RT_CONTIGIOUS */ 1408 x = *(int64_t *)virt; 1409 v = x; 1410 break; 1411 case sizeof (l): 1412 if (resp->type == RT_MAP) 1413 error = ddi_peek32(rp->child, 1414 (int32_t *)virt, (int32_t *)&l); 1415 else /* RT_CONTIGIOUS */ 1416 l = *(int32_t *)virt; 1417 v = l; 1418 break; 1419 case sizeof (w): 1420 if (resp->type == RT_MAP) 1421 error = ddi_peek16(rp->child, 1422 (int16_t *)virt, (int16_t *)&w); 1423 else /* RT_CONTIGIOUS */ 1424 w = *(int16_t *)virt; 1425 v = w; 1426 break; 1427 case sizeof (b): 1428 if (resp->type == RT_MAP) 1429 error = ddi_peek8(rp->child, 1430 (int8_t *)virt, (int8_t *)&b); 1431 else /* RT_CONTIGIOUS */ 1432 b = *(int8_t *)virt; 1433 v = b; 1434 break; 1435 } 1436 1437 if (error == DDI_FAILURE) { 1438 FC_DEBUG2(1, CE_CONT, "opl_register_fetch: access error " 1439 "accessing virt %p len %d\n", virt, len); 1440 return (fc_priv_error(cp, "access error")); 1441 } 1442 1443 FC_DEBUG3(1, CE_CONT, "register_fetch (%s) %llx %llx\n", 1444 service, virt, v); 1445 1446 cp->nresults = fc_int2cell(1); 1447 switch (len) { 1448 case sizeof (x): fc_result(cp, 0) = x; break; 1449 case sizeof (l): fc_result(cp, 0) = fc_uint32_t2cell(l); break; 1450 case sizeof (w): fc_result(cp, 0) = fc_uint16_t2cell(w); break; 1451 case sizeof (b): fc_result(cp, 0) = fc_uint8_t2cell(b); break; 1452 } 1453 return (fc_success_op(ap, rp, cp)); 1454 } 1455 1456 static int 1457 opl_register_store(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp) 1458 { 1459 size_t len; 1460 caddr_t virt; 1461 uint64_t v; 1462 uint64_t x; 1463 uint32_t l; 1464 uint16_t w; 1465 uint8_t b; 1466 char *service = fc_cell2ptr(cp->svc_name); 1467 struct fc_resource *resp; 1468 int error = 0; 1469 1470 if (fc_cell2int(cp->nargs) != 2) 1471 return (fc_syntax_error(cp, "nargs must be 2")); 1472 1473 virt = fc_cell2ptr(fc_arg(cp, 0)); 1474 1475 /* 1476 * Determine the access width .. we can switch on the 2nd 1477 * character of the name which is "rx!", "rl!", "rb!" or "rw!" 1478 */ 1479 switch (*(service + 1)) { 1480 case 'x': 1481 len = sizeof (x); 1482 x = fc_arg(cp, 1); 1483 v = x; 1484 break; 1485 case 'l': 1486 len = sizeof (l); 1487 l = fc_cell2uint32_t(fc_arg(cp, 1)); 1488 v = l; 1489 break; 1490 case 'w': 1491 len = sizeof (w); 1492 w = fc_cell2uint16_t(fc_arg(cp, 1)); 1493 v = w; 1494 break; 1495 case 'b': 1496 len = sizeof (b); 1497 b = fc_cell2uint8_t(fc_arg(cp, 1)); 1498 v = b; 1499 break; 1500 } 1501 1502 FC_DEBUG3(1, CE_CONT, "register_store (%s) %llx %llx\n", 1503 service, virt, v); 1504 1505 /* 1506 * Check the alignment ... 1507 */ 1508 if (((intptr_t)virt & (len - 1)) != 0) 1509 return (fc_priv_error(cp, "unaligned access")); 1510 1511 /* 1512 * Find if this virt is 'within' a request we know about 1513 */ 1514 fc_lock_resource_list(rp); 1515 for (resp = rp->head; resp != NULL; resp = resp->next) { 1516 if (resp->type == RT_MAP) { 1517 if ((virt >= (caddr_t)resp->fc_map_virt) && 1518 ((virt + len) <= 1519 ((caddr_t)resp->fc_map_virt + resp->fc_map_len))) 1520 break; 1521 } else if (resp->type == RT_CONTIGIOUS) { 1522 if ((virt >= (caddr_t)resp->fc_contig_virt) && ((virt + len) 1523 <= ((caddr_t)resp->fc_contig_virt + 1524 resp->fc_contig_len))) 1525 break; 1526 } 1527 } 1528 fc_unlock_resource_list(rp); 1529 1530 if (resp == NULL) 1531 return (fc_priv_error(cp, "request not within" 1532 "known mappings")); 1533 1534 switch (len) { 1535 case sizeof (x): 1536 if (resp->type == RT_MAP) 1537 error = ddi_poke64(rp->child, (int64_t *)virt, x); 1538 else if (resp->type == RT_CONTIGIOUS) 1539 *(uint64_t *)virt = x; 1540 break; 1541 case sizeof (l): 1542 if (resp->type == RT_MAP) 1543 error = ddi_poke32(rp->child, (int32_t *)virt, l); 1544 else if (resp->type == RT_CONTIGIOUS) 1545 *(uint32_t *)virt = l; 1546 break; 1547 case sizeof (w): 1548 if (resp->type == RT_MAP) 1549 error = ddi_poke16(rp->child, (int16_t *)virt, w); 1550 else if (resp->type == RT_CONTIGIOUS) 1551 *(uint16_t *)virt = w; 1552 break; 1553 case sizeof (b): 1554 if (resp->type == RT_MAP) 1555 error = ddi_poke8(rp->child, (int8_t *)virt, b); 1556 else if (resp->type == RT_CONTIGIOUS) 1557 *(uint8_t *)virt = b; 1558 break; 1559 } 1560 1561 if (error == DDI_FAILURE) { 1562 FC_DEBUG2(1, CE_CONT, "opl_register_store: access error " 1563 "accessing virt %p len %d\n", virt, len); 1564 return (fc_priv_error(cp, "access error")); 1565 } 1566 1567 cp->nresults = fc_int2cell(0); 1568 return (fc_success_op(ap, rp, cp)); 1569 } 1570 1571 /* 1572 * opl_claim_memory 1573 * 1574 * claim-memory (align size vhint -- vaddr) 1575 */ 1576 static int 1577 opl_claim_memory(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp) 1578 { 1579 int align, size, vhint; 1580 uint64_t answer, alen; 1581 ndi_ra_request_t request; 1582 struct fc_resource *resp; 1583 1584 if (fc_cell2int(cp->nargs) != 3) 1585 return (fc_syntax_error(cp, "nargs must be 3")); 1586 1587 if (fc_cell2int(cp->nresults) < 1) 1588 return (fc_syntax_error(cp, "nresults must be >= 1")); 1589 1590 vhint = fc_cell2int(fc_arg(cp, 2)); 1591 size = fc_cell2int(fc_arg(cp, 1)); 1592 align = fc_cell2int(fc_arg(cp, 0)); 1593 1594 FC_DEBUG3(1, CE_CONT, "opl_claim_memory: align=0x%x size=0x%x " 1595 "vhint=0x%x\n", align, size, vhint); 1596 1597 if (size == 0) { 1598 cmn_err(CE_WARN, "opl_claim_memory - unable to allocate " 1599 "contiguous memory of size zero\n"); 1600 return (fc_priv_error(cp, "allocation error")); 1601 } 1602 1603 if (vhint) { 1604 cmn_err(CE_WARN, "opl_claim_memory - vhint is not zero " 1605 "vhint=0x%x - Ignoring Argument\n", vhint); 1606 } 1607 1608 bzero((caddr_t)&request, sizeof (ndi_ra_request_t)); 1609 request.ra_flags = NDI_RA_ALLOC_BOUNDED; 1610 request.ra_boundbase = 0; 1611 request.ra_boundlen = 0xffffffff; 1612 request.ra_len = size; 1613 request.ra_align_mask = align - 1; 1614 1615 if (ndi_ra_alloc(ddi_root_node(), &request, &answer, &alen, 1616 "opl-fcodemem", NDI_RA_PASS) != NDI_SUCCESS) { 1617 cmn_err(CE_WARN, "opl_claim_memory - unable to allocate " 1618 "contiguous memory\n"); 1619 return (fc_priv_error(cp, "allocation error")); 1620 } 1621 1622 FC_DEBUG2(1, CE_CONT, "opl_claim_memory: address allocated=0x%lx " 1623 "size=0x%x\n", answer, alen); 1624 1625 cp->nresults = fc_int2cell(1); 1626 fc_result(cp, 0) = answer; 1627 1628 /* 1629 * Log this resource ... 1630 */ 1631 resp = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP); 1632 resp->type = RT_CONTIGIOUS; 1633 resp->fc_contig_virt = (void *)answer; 1634 resp->fc_contig_len = size; 1635 fc_add_resource(rp, resp); 1636 1637 return (fc_success_op(ap, rp, cp)); 1638 } 1639 1640 /* 1641 * opl_release_memory 1642 * 1643 * release-memory (size vaddr -- ) 1644 */ 1645 static int 1646 opl_release_memory(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp) 1647 { 1648 int32_t vaddr, size; 1649 struct fc_resource *resp; 1650 1651 if (fc_cell2int(cp->nargs) != 2) 1652 return (fc_syntax_error(cp, "nargs must be 2")); 1653 1654 if (fc_cell2int(cp->nresults) != 0) 1655 return (fc_syntax_error(cp, "nresults must be 0")); 1656 1657 vaddr = fc_cell2int(fc_arg(cp, 1)); 1658 size = fc_cell2int(fc_arg(cp, 0)); 1659 1660 FC_DEBUG2(1, CE_CONT, "opl_release_memory: vaddr=0x%x size=0x%x\n", 1661 vaddr, size); 1662 1663 /* 1664 * Find if this request matches a mapping resource we set up. 1665 */ 1666 fc_lock_resource_list(rp); 1667 for (resp = rp->head; resp != NULL; resp = resp->next) { 1668 if (resp->type != RT_CONTIGIOUS) 1669 continue; 1670 if (resp->fc_contig_virt != (void *)(uintptr_t)vaddr) 1671 continue; 1672 if (resp->fc_contig_len == size) 1673 break; 1674 } 1675 fc_unlock_resource_list(rp); 1676 1677 if (resp == NULL) 1678 return (fc_priv_error(cp, "request doesn't match a " 1679 "known mapping")); 1680 1681 (void) ndi_ra_free(ddi_root_node(), vaddr, size, 1682 "opl-fcodemem", NDI_RA_PASS); 1683 1684 /* 1685 * remove the resource from the list and release it. 1686 */ 1687 fc_rem_resource(rp, resp); 1688 kmem_free(resp, sizeof (struct fc_resource)); 1689 1690 cp->nresults = fc_int2cell(0); 1691 1692 return (fc_success_op(ap, rp, cp)); 1693 } 1694 1695 /* 1696 * opl_vtop 1697 * 1698 * vtop (vaddr -- paddr.lo paddr.hi) 1699 */ 1700 static int 1701 opl_vtop(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp) 1702 { 1703 int vaddr; 1704 uint64_t paddr; 1705 struct fc_resource *resp; 1706 1707 if (fc_cell2int(cp->nargs) != 1) 1708 return (fc_syntax_error(cp, "nargs must be 1")); 1709 1710 if (fc_cell2int(cp->nresults) >= 3) 1711 return (fc_syntax_error(cp, "nresults must be less than 2")); 1712 1713 vaddr = fc_cell2int(fc_arg(cp, 0)); 1714 1715 /* 1716 * Find if this request matches a mapping resource we set up. 1717 */ 1718 fc_lock_resource_list(rp); 1719 for (resp = rp->head; resp != NULL; resp = resp->next) { 1720 if (resp->type != RT_CONTIGIOUS) 1721 continue; 1722 if (resp->fc_contig_virt == (void *)(uintptr_t)vaddr) 1723 break; 1724 } 1725 fc_unlock_resource_list(rp); 1726 1727 if (resp == NULL) 1728 return (fc_priv_error(cp, "request doesn't match a " 1729 "known mapping")); 1730 1731 paddr = va_to_pa((void *)(uintptr_t)vaddr); 1732 1733 FC_DEBUG2(1, CE_CONT, "opl_vtop: vaddr=0x%x paddr=0x%x\n", 1734 vaddr, paddr); 1735 1736 cp->nresults = fc_int2cell(2); 1737 1738 fc_result(cp, 0) = paddr; 1739 fc_result(cp, 1) = 0; 1740 1741 return (fc_success_op(ap, rp, cp)); 1742 } 1743 1744 static int 1745 opl_config_child(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp) 1746 { 1747 fc_phandle_t h; 1748 1749 if (fc_cell2int(cp->nargs) != 0) 1750 return (fc_syntax_error(cp, "nargs must be 0")); 1751 1752 if (fc_cell2int(cp->nresults) < 1) 1753 return (fc_syntax_error(cp, "nresults must be >= 1")); 1754 1755 h = fc_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child); 1756 1757 cp->nresults = fc_int2cell(1); 1758 fc_result(cp, 0) = fc_phandle2cell(h); 1759 1760 return (fc_success_op(ap, rp, cp)); 1761 } 1762 1763 static int 1764 opl_get_fcode(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp) 1765 { 1766 caddr_t dropin_name_virt, fcode_virt; 1767 char *dropin_name, *fcode; 1768 int fcode_len, status; 1769 1770 if (fc_cell2int(cp->nargs) != 3) 1771 return (fc_syntax_error(cp, "nargs must be 3")); 1772 1773 if (fc_cell2int(cp->nresults) < 1) 1774 return (fc_syntax_error(cp, "nresults must be >= 1")); 1775 1776 dropin_name_virt = fc_cell2ptr(fc_arg(cp, 0)); 1777 1778 fcode_virt = fc_cell2ptr(fc_arg(cp, 1)); 1779 1780 fcode_len = fc_cell2int(fc_arg(cp, 2)); 1781 1782 dropin_name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP); 1783 1784 FC_DEBUG2(1, CE_CONT, "get_fcode: %x %d\n", fcode_virt, fcode_len); 1785 1786 if (copyinstr(fc_cell2ptr(dropin_name_virt), dropin_name, 1787 FC_SVC_NAME_LEN - 1, NULL)) { 1788 FC_DEBUG1(1, CE_CONT, "opl_get_fcode: " 1789 "fault copying in drop in name %p\n", dropin_name_virt); 1790 status = 0; 1791 } else { 1792 FC_DEBUG1(1, CE_CONT, "get_fcode: %s\n", dropin_name); 1793 1794 fcode = kmem_zalloc(fcode_len, KM_SLEEP); 1795 1796 if ((status = prom_get_fcode(dropin_name, fcode)) != 0) { 1797 1798 if (copyout((void *)fcode, (void *)fcode_virt, 1799 fcode_len)) { 1800 cmn_err(CE_WARN, " opl_get_fcode: Unable " 1801 "to copy out fcode image"); 1802 status = 0; 1803 } 1804 } 1805 1806 kmem_free(fcode, fcode_len); 1807 } 1808 1809 kmem_free(dropin_name, FC_SVC_NAME_LEN); 1810 1811 cp->nresults = fc_int2cell(1); 1812 fc_result(cp, 0) = status; 1813 1814 return (fc_success_op(ap, rp, cp)); 1815 } 1816 1817 static int 1818 opl_get_fcode_size(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp) 1819 { 1820 caddr_t virt; 1821 char *dropin_name; 1822 int len; 1823 1824 if (fc_cell2int(cp->nargs) != 1) 1825 return (fc_syntax_error(cp, "nargs must be 1")); 1826 1827 if (fc_cell2int(cp->nresults) < 1) 1828 return (fc_syntax_error(cp, "nresults must be >= 1")); 1829 1830 virt = fc_cell2ptr(fc_arg(cp, 0)); 1831 1832 dropin_name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP); 1833 1834 FC_DEBUG0(1, CE_CONT, "opl_get_fcode_size:\n"); 1835 1836 if (copyinstr(fc_cell2ptr(virt), dropin_name, 1837 FC_SVC_NAME_LEN - 1, NULL)) { 1838 FC_DEBUG1(1, CE_CONT, "opl_get_fcode_size: " 1839 "fault copying in drop in name %p\n", virt); 1840 len = 0; 1841 } else { 1842 FC_DEBUG1(1, CE_CONT, "opl_get_fcode_size: %s\n", dropin_name); 1843 1844 len = prom_get_fcode_size(dropin_name); 1845 } 1846 1847 kmem_free(dropin_name, FC_SVC_NAME_LEN); 1848 1849 FC_DEBUG1(1, CE_CONT, "opl_get_fcode_size: fcode_len = %d\n", len); 1850 1851 cp->nresults = fc_int2cell(1); 1852 fc_result(cp, 0) = len; 1853 1854 return (fc_success_op(ap, rp, cp)); 1855 } 1856 1857 static int 1858 opl_map_phys(dev_info_t *dip, struct regspec *phys_spec, 1859 caddr_t *addrp, ddi_device_acc_attr_t *accattrp, 1860 ddi_acc_handle_t *handlep) 1861 { 1862 ddi_map_req_t mapreq; 1863 ddi_acc_hdl_t *acc_handlep; 1864 int result; 1865 struct regspec *rspecp; 1866 1867 *handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL); 1868 acc_handlep = impl_acc_hdl_get(*handlep); 1869 acc_handlep->ah_vers = VERS_ACCHDL; 1870 acc_handlep->ah_dip = dip; 1871 acc_handlep->ah_rnumber = 0; 1872 acc_handlep->ah_offset = 0; 1873 acc_handlep->ah_len = 0; 1874 acc_handlep->ah_acc = *accattrp; 1875 rspecp = kmem_zalloc(sizeof (struct regspec), KM_SLEEP); 1876 *rspecp = *phys_spec; 1877 /* 1878 * cache a copy of the reg spec 1879 */ 1880 acc_handlep->ah_bus_private = rspecp; 1881 1882 mapreq.map_op = DDI_MO_MAP_LOCKED; 1883 mapreq.map_type = DDI_MT_REGSPEC; 1884 mapreq.map_obj.rp = (struct regspec *)phys_spec; 1885 mapreq.map_prot = PROT_READ | PROT_WRITE; 1886 mapreq.map_flags = DDI_MF_KERNEL_MAPPING; 1887 mapreq.map_handlep = acc_handlep; 1888 mapreq.map_vers = DDI_MAP_VERSION; 1889 1890 result = ddi_map(dip, &mapreq, 0, 0, addrp); 1891 1892 if (result != DDI_SUCCESS) { 1893 impl_acc_hdl_free(*handlep); 1894 *handlep = (ddi_acc_handle_t)NULL; 1895 } else { 1896 acc_handlep->ah_addr = *addrp; 1897 } 1898 1899 return (result); 1900 } 1901 1902 static void 1903 opl_unmap_phys(ddi_acc_handle_t *handlep) 1904 { 1905 ddi_map_req_t mapreq; 1906 ddi_acc_hdl_t *acc_handlep; 1907 struct regspec *rspecp; 1908 1909 acc_handlep = impl_acc_hdl_get(*handlep); 1910 ASSERT(acc_handlep); 1911 rspecp = acc_handlep->ah_bus_private; 1912 1913 mapreq.map_op = DDI_MO_UNMAP; 1914 mapreq.map_type = DDI_MT_REGSPEC; 1915 mapreq.map_obj.rp = (struct regspec *)rspecp; 1916 mapreq.map_prot = PROT_READ | PROT_WRITE; 1917 mapreq.map_flags = DDI_MF_KERNEL_MAPPING; 1918 mapreq.map_handlep = acc_handlep; 1919 mapreq.map_vers = DDI_MAP_VERSION; 1920 1921 (void) ddi_map(acc_handlep->ah_dip, &mapreq, acc_handlep->ah_offset, 1922 acc_handlep->ah_len, &acc_handlep->ah_addr); 1923 1924 impl_acc_hdl_free(*handlep); 1925 /* 1926 * Free the cached copy 1927 */ 1928 kmem_free(rspecp, sizeof (struct regspec)); 1929 *handlep = (ddi_acc_handle_t)NULL; 1930 } 1931 1932 static int 1933 opl_get_hwd_va(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp) 1934 { 1935 uint32_t portid; 1936 void *hwd_virt; 1937 hwd_header_t *hwd_h = NULL; 1938 hwd_sb_t *hwd_sb = NULL; 1939 int lsb, ch, leaf; 1940 int status = 1; 1941 1942 /* Check the argument */ 1943 if (fc_cell2int(cp->nargs) != 2) 1944 return (fc_syntax_error(cp, "nargs must be 2")); 1945 1946 if (fc_cell2int(cp->nresults) < 1) 1947 return (fc_syntax_error(cp, "nresults must be >= 1")); 1948 1949 /* Get the parameters */ 1950 portid = fc_cell2uint32_t(fc_arg(cp, 0)); 1951 hwd_virt = (void *)fc_cell2ptr(fc_arg(cp, 1)); 1952 1953 /* Get the ID numbers */ 1954 lsb = OPL_IO_PORTID_TO_LSB(portid); 1955 ch = OPL_PORTID_TO_CHANNEL(portid); 1956 leaf = OPL_PORTID_TO_LEAF(portid); 1957 ASSERT(OPL_IO_PORTID(lsb, ch, leaf) == portid); 1958 1959 /* Set the pointer of hwd. */ 1960 if ((hwd_h = (hwd_header_t *)opl_boards[lsb].cfg_hwd) == NULL) { 1961 return (fc_priv_error(cp, "null hwd header")); 1962 } 1963 /* Set the pointer of hwd sb. */ 1964 if ((hwd_sb = (hwd_sb_t *)((char *)hwd_h + hwd_h->hdr_sb_info_offset)) 1965 == NULL) { 1966 return (fc_priv_error(cp, "null hwd sb")); 1967 } 1968 1969 if (ch == OPL_CMU_CHANNEL) { 1970 /* Copyout CMU-CH HW Descriptor */ 1971 if (copyout((void *)&hwd_sb->sb_cmu.cmu_ch, 1972 (void *)hwd_virt, sizeof (hwd_cmu_chan_t))) { 1973 cmn_err(CE_WARN, "opl_get_hwd_va: " 1974 "Unable to copy out cmuch descriptor for %x", 1975 portid); 1976 status = 0; 1977 } 1978 } else { 1979 /* Copyout PCI-CH HW Descriptor */ 1980 if (copyout((void *)&hwd_sb->sb_pci_ch[ch].pci_leaf[leaf], 1981 (void *)hwd_virt, sizeof (hwd_leaf_t))) { 1982 cmn_err(CE_WARN, "opl_get_hwd_va: " 1983 "Unable to copy out pcich descriptor for %x", 1984 portid); 1985 status = 0; 1986 } 1987 } 1988 1989 cp->nresults = fc_int2cell(1); 1990 fc_result(cp, 0) = status; 1991 1992 return (fc_success_op(ap, rp, cp)); 1993 } 1994 1995 /* 1996 * Set the properties for a leaf node (Oberon leaf or CMU channel leaf). 1997 */ 1998 /*ARGSUSED*/ 1999 static int 2000 opl_create_leaf(dev_info_t *node, void *arg, uint_t flags) 2001 { 2002 int ret; 2003 2004 OPL_UPDATE_PROP(string, node, "name", OPL_PCI_LEAF_NODE); 2005 2006 OPL_UPDATE_PROP(string, node, "status", "okay"); 2007 2008 return (DDI_WALK_TERMINATE); 2009 } 2010 2011 static char * 2012 opl_get_probe_string(opl_probe_t *probe, int channel, int leaf) 2013 { 2014 char *probe_string; 2015 int portid; 2016 2017 probe_string = kmem_zalloc(PROBE_STR_SIZE, KM_SLEEP); 2018 2019 if (channel == OPL_CMU_CHANNEL) 2020 portid = probe->pr_sb->sb_cmu.cmu_ch.chan_portid; 2021 else 2022 portid = probe-> 2023 pr_sb->sb_pci_ch[channel].pci_leaf[leaf].leaf_port_id; 2024 2025 (void) sprintf(probe_string, "%x", portid); 2026 2027 return (probe_string); 2028 } 2029 2030 static int 2031 opl_probe_leaf(opl_probe_t *probe) 2032 { 2033 int channel, leaf, portid, error, circ; 2034 int board; 2035 fco_handle_t fco_handle, *cfg_handle; 2036 dev_info_t *parent, *leaf_node; 2037 char unit_address[UNIT_ADDR_SIZE]; 2038 char *probe_string; 2039 opl_board_cfg_t *board_cfg; 2040 2041 board = probe->pr_board; 2042 channel = probe->pr_channel; 2043 leaf = probe->pr_leaf; 2044 parent = ddi_root_node(); 2045 board_cfg = &opl_boards[board]; 2046 2047 ASSERT(OPL_VALID_CHANNEL(channel)); 2048 ASSERT(OPL_VALID_LEAF(leaf)); 2049 2050 if (channel == OPL_CMU_CHANNEL) { 2051 portid = probe->pr_sb->sb_cmu.cmu_ch.chan_portid; 2052 cfg_handle = &board_cfg->cfg_cmuch_handle; 2053 } else { 2054 portid = probe-> 2055 pr_sb->sb_pci_ch[channel].pci_leaf[leaf].leaf_port_id; 2056 cfg_handle = &board_cfg->cfg_pcich_handle[channel][leaf]; 2057 } 2058 2059 /* 2060 * Prevent any changes to leaf_node until we have bound 2061 * it to the correct driver. 2062 */ 2063 ndi_devi_enter(parent, &circ); 2064 2065 /* 2066 * Ideally, fcode would be run from the "sid_branch_create" 2067 * callback (that is the primary purpose of that callback). 2068 * However, the fcode interpreter was written with the 2069 * assumption that the "new_child" was linked into the 2070 * device tree. The callback is invoked with the devinfo node 2071 * in the DS_PROTO state. More investigation is needed before 2072 * we can invoke the interpreter from the callback. For now, 2073 * we create the "new_child" in the BOUND state, invoke the 2074 * fcode interpreter and then rebind the dip to use any 2075 * compatible properties created by fcode. 2076 */ 2077 2078 probe->pr_parent = parent; 2079 probe->pr_create = opl_create_leaf; 2080 probe->pr_hold = 1; 2081 2082 leaf_node = opl_create_node(probe); 2083 if (leaf_node == NULL) { 2084 2085 cmn_err(CE_WARN, "IKP: create leaf (%d-%d-%d) failed", 2086 probe->pr_board, probe->pr_channel, probe->pr_leaf); 2087 ndi_devi_exit(parent, circ); 2088 return (-1); 2089 } 2090 2091 /* 2092 * The platform DR interfaces created the dip in 2093 * bound state. Bring devinfo node down to linked 2094 * state and hold it there until compatible 2095 * properties are created. 2096 */ 2097 e_ddi_branch_rele(leaf_node); 2098 (void) i_ndi_unconfig_node(leaf_node, DS_LINKED, 0); 2099 ASSERT(i_ddi_node_state(leaf_node) == DS_LINKED); 2100 e_ddi_branch_hold(leaf_node); 2101 2102 mutex_enter(&DEVI(leaf_node)->devi_lock); 2103 DEVI(leaf_node)->devi_flags |= DEVI_NO_BIND; 2104 mutex_exit(&DEVI(leaf_node)->devi_lock); 2105 2106 /* 2107 * Drop the busy-hold on parent before calling 2108 * fcode_interpreter to prevent potential deadlocks 2109 */ 2110 ndi_devi_exit(parent, circ); 2111 2112 (void) sprintf(unit_address, "%x", portid); 2113 2114 /* 2115 * Get the probe string 2116 */ 2117 probe_string = opl_get_probe_string(probe, channel, leaf); 2118 2119 /* 2120 * The fcode pointer specified here is NULL and the fcode 2121 * size specified here is 0. This causes the user-level 2122 * fcode interpreter to issue a request to the fcode 2123 * driver to get the Oberon/cmu-ch fcode. 2124 */ 2125 fco_handle = opl_fc_ops_alloc_handle(parent, leaf_node, 2126 NULL, 0, unit_address, probe_string); 2127 2128 error = fcode_interpreter(parent, &opl_fc_do_op, fco_handle); 2129 2130 if (error != 0) { 2131 cmn_err(CE_WARN, "IKP: Unable to probe PCI leaf (%d-%d-%d)", 2132 probe->pr_board, probe->pr_channel, probe->pr_leaf); 2133 2134 opl_fc_ops_free_handle(fco_handle); 2135 2136 if (probe_string != NULL) 2137 kmem_free(probe_string, PROBE_STR_SIZE); 2138 2139 (void) opl_destroy_node(leaf_node); 2140 } else { 2141 *cfg_handle = fco_handle; 2142 2143 if (channel == OPL_CMU_CHANNEL) 2144 board_cfg->cfg_cmuch_probe_str = probe_string; 2145 else 2146 board_cfg->cfg_pcich_probe_str[channel][leaf] 2147 = probe_string; 2148 2149 /* 2150 * Compatible properties (if any) have been created, 2151 * so bind driver. 2152 */ 2153 ndi_devi_enter(parent, &circ); 2154 ASSERT(i_ddi_node_state(leaf_node) <= DS_LINKED); 2155 2156 mutex_enter(&DEVI(leaf_node)->devi_lock); 2157 DEVI(leaf_node)->devi_flags &= ~DEVI_NO_BIND; 2158 mutex_exit(&DEVI(leaf_node)->devi_lock); 2159 2160 ndi_devi_exit(parent, circ); 2161 2162 if (ndi_devi_bind_driver(leaf_node, 0) != 2163 DDI_SUCCESS) { 2164 cmn_err(CE_WARN, 2165 "IKP: Unable to bind PCI leaf (%d-%d-%d)", 2166 probe->pr_board, probe->pr_channel, 2167 probe->pr_leaf); 2168 } 2169 } 2170 2171 if ((error != 0) && (channel == OPL_CMU_CHANNEL)) 2172 return (-1); 2173 2174 return (0); 2175 } 2176 2177 static void 2178 opl_init_leaves(int myboard) 2179 { 2180 dev_info_t *parent, *node; 2181 char *name; 2182 int circ, ret; 2183 int len, portid, board, channel, leaf; 2184 opl_board_cfg_t *cfg; 2185 2186 parent = ddi_root_node(); 2187 2188 /* 2189 * Hold parent node busy to walk its child list 2190 */ 2191 ndi_devi_enter(parent, &circ); 2192 2193 for (node = ddi_get_child(parent); 2194 (node != NULL); 2195 node = ddi_get_next_sibling(node)) { 2196 2197 ret = OPL_GET_PROP(string, node, "name", &name, &len); 2198 if (ret != DDI_PROP_SUCCESS) { 2199 /* 2200 * The property does not exist for this node. 2201 */ 2202 continue; 2203 } 2204 2205 if (strncmp(name, OPL_PCI_LEAF_NODE, len) == 0) { 2206 2207 ret = OPL_GET_PROP(int, node, "portid", &portid, -1); 2208 if (ret == DDI_PROP_SUCCESS) { 2209 2210 ret = OPL_GET_PROP(int, node, "board#", 2211 &board, -1); 2212 if ((ret != DDI_PROP_SUCCESS) || 2213 (board != myboard)) 2214 continue; 2215 2216 cfg = &opl_boards[board]; 2217 channel = OPL_PORTID_TO_CHANNEL(portid); 2218 if (channel == OPL_CMU_CHANNEL) { 2219 2220 if (cfg->cfg_cmuch_handle != NULL) 2221 cfg->cfg_cmuch_leaf = node; 2222 2223 } else { 2224 2225 leaf = OPL_PORTID_TO_LEAF(portid); 2226 if (cfg->cfg_pcich_handle 2227 [channel][leaf] != NULL) 2228 cfg->cfg_pcich_leaf 2229 [channel][leaf] = node; 2230 } 2231 } 2232 } 2233 2234 kmem_free(name, len); 2235 if (ret != DDI_PROP_SUCCESS) 2236 break; 2237 } 2238 2239 ndi_devi_exit(parent, circ); 2240 } 2241 2242 /* 2243 * Create "pci" node and hierarchy for the Oberon channels and the 2244 * CMU channel. 2245 */ 2246 /*ARGSUSED*/ 2247 static int 2248 opl_probe_io(opl_probe_t *probe) 2249 { 2250 2251 int i, j; 2252 hwd_pci_ch_t *channels; 2253 2254 if (HWD_STATUS_OK(probe->pr_sb->sb_cmu.cmu_ch.chan_status)) { 2255 2256 probe->pr_channel = HWD_CMU_CHANNEL; 2257 probe->pr_channel_status = 2258 probe->pr_sb->sb_cmu.cmu_ch.chan_status; 2259 probe->pr_leaf = 0; 2260 probe->pr_leaf_status = probe->pr_channel_status; 2261 2262 if (opl_probe_leaf(probe) != 0) 2263 return (-1); 2264 } 2265 2266 channels = &probe->pr_sb->sb_pci_ch[0]; 2267 2268 for (i = 0; i < HWD_PCI_CHANNELS_PER_SB; i++) { 2269 2270 if (!HWD_STATUS_OK(channels[i].pci_status)) 2271 continue; 2272 2273 probe->pr_channel = i; 2274 probe->pr_channel_status = channels[i].pci_status; 2275 2276 for (j = 0; j < HWD_LEAVES_PER_PCI_CHANNEL; j++) { 2277 2278 probe->pr_leaf = j; 2279 probe->pr_leaf_status = 2280 channels[i].pci_leaf[j].leaf_status; 2281 2282 if (!HWD_STATUS_OK(probe->pr_leaf_status)) 2283 continue; 2284 2285 (void) opl_probe_leaf(probe); 2286 } 2287 } 2288 opl_init_leaves(probe->pr_board); 2289 return (0); 2290 } 2291 2292 /* 2293 * Perform the probe in the following order: 2294 * 2295 * processors 2296 * memory 2297 * IO 2298 * 2299 * Each probe function returns 0 on sucess and a non-zero value on failure. 2300 * What is a failure is determined by the implementor of the probe function. 2301 * For example, while probing CPUs, any error encountered during probe 2302 * is considered a failure and causes the whole probe operation to fail. 2303 * However, for I/O, an error encountered while probing one device 2304 * should not prevent other devices from being probed. It should not cause 2305 * the whole probe operation to fail. 2306 */ 2307 int 2308 opl_probe_sb(int board) 2309 { 2310 opl_probe_t *probe; 2311 int ret; 2312 2313 if ((board < 0) || (board >= HWD_SBS_PER_DOMAIN)) 2314 return (-1); 2315 2316 ASSERT(opl_cfg_inited != 0); 2317 2318 /* 2319 * If the previous probe failed and left a partially configured 2320 * board, we need to unprobe the board and start with a clean slate. 2321 */ 2322 if ((opl_boards[board].cfg_hwd != NULL) && 2323 (opl_unprobe_sb(board) != 0)) 2324 return (-1); 2325 2326 ret = 0; 2327 2328 probe = kmem_zalloc(sizeof (opl_probe_t), KM_SLEEP); 2329 probe->pr_board = board; 2330 2331 if ((opl_probe_init(probe) != 0) || 2332 2333 (opl_probe_cpu_chips(probe) != 0) || 2334 2335 (opl_probe_memory(probe) != 0) || 2336 2337 (opl_probe_io(probe) != 0)) { 2338 2339 /* 2340 * Probe failed. Perform cleanup. 2341 */ 2342 (void) opl_unprobe_sb(board); 2343 ret = -1; 2344 } 2345 2346 kmem_free(probe, sizeof (opl_probe_t)); 2347 2348 return (ret); 2349 } 2350 2351 /* 2352 * This unprobing also includes CMU-CH. 2353 */ 2354 /*ARGSUSED*/ 2355 static int 2356 opl_unprobe_io(int board) 2357 { 2358 int i, j, ret; 2359 opl_board_cfg_t *board_cfg; 2360 dev_info_t **node; 2361 fco_handle_t *hand; 2362 char **probe_str; 2363 2364 board_cfg = &opl_boards[board]; 2365 2366 for (i = 0; i < HWD_PCI_CHANNELS_PER_SB; i++) { 2367 2368 for (j = 0; j < HWD_LEAVES_PER_PCI_CHANNEL; j++) { 2369 2370 node = &board_cfg->cfg_pcich_leaf[i][j]; 2371 hand = &board_cfg->cfg_pcich_handle[i][j]; 2372 probe_str = &board_cfg->cfg_pcich_probe_str[i][j]; 2373 2374 if (*node == NULL) 2375 continue; 2376 2377 if (*hand != NULL) { 2378 opl_fc_ops_free_handle(*hand); 2379 *hand = NULL; 2380 } 2381 2382 if (*probe_str != NULL) { 2383 kmem_free(*probe_str, PROBE_STR_SIZE); 2384 *probe_str = NULL; 2385 } 2386 2387 ret = opl_destroy_node(*node); 2388 if (ret != 0) { 2389 2390 cmn_err(CE_WARN, 2391 "IKP: destroy pci (%d-%d-%d) failed", 2392 board, i, j); 2393 return (-1); 2394 } 2395 2396 *node = NULL; 2397 2398 } 2399 } 2400 2401 node = &board_cfg->cfg_cmuch_leaf; 2402 hand = &board_cfg->cfg_cmuch_handle; 2403 probe_str = &board_cfg->cfg_cmuch_probe_str; 2404 2405 if (*node == NULL) 2406 return (0); 2407 2408 if (*hand != NULL) { 2409 opl_fc_ops_free_handle(*hand); 2410 *hand = NULL; 2411 } 2412 2413 if (*probe_str != NULL) { 2414 kmem_free(*probe_str, PROBE_STR_SIZE); 2415 *probe_str = NULL; 2416 } 2417 2418 if (opl_destroy_node(*node) != 0) { 2419 2420 cmn_err(CE_WARN, "IKP: destroy pci (%d-%d-%d) failed", 2421 board, OPL_CMU_CHANNEL, 0); 2422 return (-1); 2423 } 2424 2425 *node = NULL; 2426 2427 return (0); 2428 } 2429 2430 /* 2431 * Destroy the "pseudo-mc" node for a board. 2432 */ 2433 static int 2434 opl_unprobe_memory(int board) 2435 { 2436 opl_board_cfg_t *board_cfg; 2437 2438 board_cfg = &opl_boards[board]; 2439 2440 if (board_cfg->cfg_pseudo_mc == NULL) 2441 return (0); 2442 2443 if (opl_destroy_node(board_cfg->cfg_pseudo_mc) != 0) { 2444 2445 cmn_err(CE_WARN, "IKP: destroy pseudo-mc (%d) failed", board); 2446 return (-1); 2447 } 2448 2449 board_cfg->cfg_pseudo_mc = NULL; 2450 2451 return (0); 2452 } 2453 2454 /* 2455 * Destroy the "cmp" nodes for a board. This also destroys the "core" 2456 * and "cpu" nodes below the "cmp" nodes. 2457 */ 2458 static int 2459 opl_unprobe_processors(int board) 2460 { 2461 int i; 2462 dev_info_t **cfg_cpu_chips; 2463 2464 cfg_cpu_chips = opl_boards[board].cfg_cpu_chips; 2465 2466 for (i = 0; i < HWD_CPU_CHIPS_PER_CMU; i++) { 2467 2468 if (cfg_cpu_chips[i] == NULL) 2469 continue; 2470 2471 if (opl_destroy_node(cfg_cpu_chips[i]) != 0) { 2472 2473 cmn_err(CE_WARN, 2474 "IKP: destroy chip (%d-%d) failed", board, i); 2475 return (-1); 2476 } 2477 2478 cfg_cpu_chips[i] = NULL; 2479 } 2480 2481 return (0); 2482 } 2483 2484 /* 2485 * Perform the unprobe in the following order: 2486 * 2487 * IO 2488 * memory 2489 * processors 2490 */ 2491 int 2492 opl_unprobe_sb(int board) 2493 { 2494 if ((board < 0) || (board >= HWD_SBS_PER_DOMAIN)) 2495 return (-1); 2496 2497 ASSERT(opl_cfg_inited != 0); 2498 2499 if ((opl_unprobe_io(board) != 0) || 2500 2501 (opl_unprobe_memory(board) != 0) || 2502 2503 (opl_unprobe_processors(board) != 0)) 2504 2505 return (-1); 2506 2507 if (opl_boards[board].cfg_hwd != NULL) { 2508 #ifdef UCTEST 2509 size_t size = 0xA000; 2510 #endif 2511 /* Release the memory for the HWD */ 2512 void *hwdp = opl_boards[board].cfg_hwd; 2513 opl_boards[board].cfg_hwd = NULL; 2514 #ifdef UCTEST 2515 hwdp = (void *)((char *)hwdp - 0x1000); 2516 hat_unload(kas.a_hat, hwdp, size, HAT_UNLOAD_UNLOCK); 2517 vmem_free(heap_arena, hwdp, size); 2518 #else 2519 kmem_free(hwdp, HWD_DATA_SIZE); 2520 #endif 2521 } 2522 return (0); 2523 } 2524 2525 /* 2526 * For MAC patrol support, we need to update the PA-related properties 2527 * when there is a copy-rename event. This should be called after the 2528 * physical copy and rename has been done by DR, and before the MAC 2529 * patrol is restarted. 2530 */ 2531 int 2532 oplcfg_pa_swap(int from, int to) 2533 { 2534 dev_info_t *from_node = opl_boards[from].cfg_pseudo_mc; 2535 dev_info_t *to_node = opl_boards[to].cfg_pseudo_mc; 2536 opl_range_t *rangef, *ranget; 2537 int elems; 2538 int ret; 2539 2540 if ((OPL_GET_PROP_ARRAY(int, from_node, "sb-mem-ranges", rangef, 2541 elems) != DDI_SUCCESS) || (elems != 4)) { 2542 /* XXX -- bad news */ 2543 return (-1); 2544 } 2545 if ((OPL_GET_PROP_ARRAY(int, to_node, "sb-mem-ranges", ranget, 2546 elems) != DDI_SUCCESS) || (elems != 4)) { 2547 /* XXX -- bad news */ 2548 return (-1); 2549 } 2550 OPL_UPDATE_PROP_ARRAY(int, from_node, "sb-mem-ranges", (int *)ranget, 2551 4); 2552 OPL_UPDATE_PROP_ARRAY(int, to_node, "sb-mem-ranges", (int *)rangef, 2553 4); 2554 2555 OPL_FREE_PROP(ranget); 2556 OPL_FREE_PROP(rangef); 2557 2558 return (0); 2559 } 2560