1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2019 Joyent, Inc. 24 * Copyright 2019 Western Digital Corporation 25 * Copyright 2020 OmniOS Community Edition (OmniOSce) Association. 26 */ 27 28 /* 29 * PCI bus enumeration and device programming are done in several passes. The 30 * following is a high level overview of this process. 31 * 32 * pci_enumerate(reprogram=0) 33 * The main entry point to PCI bus enumeration is 34 * pci_enumerate(). This function is invoked 35 * twice, once to set up the PCI portion of the 36 * device tree, and then a second time to 37 * reprogram any devices which were not set up by 38 * the system firmware. On this first call, the 39 * reprogram parameter is set to 0. 40 * add_pci_fixes() 41 * enumerate_bus_devs(CONFIG_FIX) 42 * <foreach bus> 43 * process_devfunc(CONFIG_FIX) 44 * Some devices need a specific action taking in 45 * order for subsequent enumeration to be 46 * successful. add_pci_fixes() retrieves the 47 * vendor and device IDs for each item on the bus 48 * and applies fixes as required. It also creates 49 * a list which is used by undo_pci_fixes() to 50 * reverse the process later. 51 * pci_setup_tree() 52 * enumerate_bus_devs(CONFIG_INFO) 53 * <foreach bus> 54 * process_devfunc(CONFIG_INFO) 55 * <set up most device properties> 56 * The next stage is to enumerate the bus and set 57 * up the bulk of the properties for each device. 58 * This is where the generic properties such as 59 * 'device-id' are created. 60 * <if PPB device> 61 * add_ppb_props() 62 * For a PCI-to-PCI bridge (ppb) device, any 63 * memory ranges for IO, memory or pre-fetchable 64 * memory that have been programmed by the system 65 * firmware (BIOS/EFI) are retrieved and stored in 66 * bus-specific lists (pci_bus_res[bus].io_avail, 67 * mem_avail and pmem_avail). The contents of 68 * these lists are used to set the initial 'ranges' 69 * property on the ppb device. Later, as children 70 * are found for this bridge, resources will be 71 * removed from these avail lists as necessary. 72 * This is an initial pass so the ppb devices can 73 * still be reprogrammed later in fix_ppb_res(). 74 * <else> 75 * <add to list of non-PPB devices for the bus> 76 * Any non-PPB device on the bus is recorded in a 77 * bus-specific list, to be set up (and possibly 78 * reprogrammed) later. 79 * add_reg_props(CONFIG_INFO) 80 * The final step in this phase is to add the 81 * initial 'reg' and 'assigned-addresses' 82 * properties to all devices. At the same time, 83 * any IO or memory ranges which have been 84 * assigned to the bus are moved from the avail 85 * list to the corresponding used one. If no 86 * resources have been assigned to a device at 87 * this stage, then it is flagged for subsequent 88 * reprogramming. 89 * undo_pci_fixes() 90 * Any fixes which were applied in add_pci_fixes() 91 * are now undone before returning, using the 92 * undo list which was created earier. 93 * 94 * pci_enumerate(reprogram=1) 95 * The second bus enumeration pass is to take care 96 * of any devices that were not set up by the 97 * system firmware. These devices were flagged 98 * during the first pass. This pass is bracketed 99 * by the same pci fix application and removal as 100 * the first. 101 * add_pci_fixes() 102 * As for first pass. 103 * pci_reprogram() 104 * pci_scan_bbn() 105 * The ACPI namespace is scanned for top-level 106 * instances of _BBN in order to enumerate the 107 * root-bridges in the system. If a root bridge is 108 * found that has not been previously discovered 109 * (existence inferred through its children) then 110 * it is added to the system. 111 * <foreach ROOT bus> 112 * populate_bus_res() 113 * Find resources associated with this root bus 114 * from either ACPI or BIOS tables. See 115 * find_bus_res() in pci_resource.c 116 * <foreach bus> 117 * fix_ppb_res() 118 * Reprogram pci(e) bridges which have not already 119 * had resources assigned, or which are under a 120 * bus that has been flagged for reprogramming. 121 * If the parent bus has not been flagged, then IO 122 * space is reprogrammed only if there are no 123 * assigned IO resources. Memory space is 124 * reprogrammed only if there is both no assigned 125 * ordinary memory AND no assigned pre-fetchable 126 * memory. However, if memory reprogramming is 127 * necessary then both ordinary and prefetch are 128 * done together so that both memory ranges end up 129 * in the avail lists for add_reg_props() to find 130 * later. 131 * enumerate_bus_devs(CONFIG_NEW) 132 * <foreach non-PPB device on the bus> 133 * add_reg_props(CONFIG_NEW) 134 * Using the list of non-PPB devices on the bus 135 * which was assembled during the first pass, add 136 * or update the 'reg' and 'assigned-address' 137 * properties for these devices. For devices which 138 * have been flagged for reprogramming or have no 139 * assigned resources, this is where resources are 140 * finally assigned and programmed into the 141 * device. This can result in these properties 142 * changing from their previous values. 143 * <foreach bus> 144 * add_bus_available_prop() 145 * Finally, the 'available' properties is set on 146 * each device, representing that device's final 147 * unallocated (available) IO and memory ranges. 148 * undo_pci_fixes() 149 * As for first pass. 150 */ 151 152 #include <sys/types.h> 153 #include <sys/stat.h> 154 #include <sys/sysmacros.h> 155 #include <sys/sunndi.h> 156 #include <sys/pci.h> 157 #include <sys/pci_impl.h> 158 #include <sys/pcie_impl.h> 159 #include <sys/memlist.h> 160 #include <sys/bootconf.h> 161 #include <io/pci/mps_table.h> 162 #include <sys/pci_cfgacc.h> 163 #include <sys/pci_cfgspace.h> 164 #include <sys/pci_cfgspace_impl.h> 165 #include <sys/psw.h> 166 #include "../../../../common/pci/pci_strings.h" 167 #include <sys/apic.h> 168 #include <io/pciex/pcie_nvidia.h> 169 #include <sys/hotplug/pci/pciehpc_acpi.h> 170 #include <sys/acpi/acpi.h> 171 #include <sys/acpica.h> 172 #include <sys/iommulib.h> 173 #include <sys/devcache.h> 174 #include <sys/pci_cfgacc_x86.h> 175 176 #define pci_getb (*pci_getb_func) 177 #define pci_getw (*pci_getw_func) 178 #define pci_getl (*pci_getl_func) 179 #define pci_putb (*pci_putb_func) 180 #define pci_putw (*pci_putw_func) 181 #define pci_putl (*pci_putl_func) 182 #define dcmn_err if (pci_boot_debug != 0) cmn_err 183 #define bus_debug(bus) (pci_boot_debug != 0 && pci_debug_bus_start != -1 && \ 184 pci_debug_bus_end != -1 && (bus) >= pci_debug_bus_start && \ 185 (bus) <= pci_debug_bus_end) 186 #define dump_memlists(tag, bus) \ 187 if (bus_debug((bus))) dump_memlists_impl((tag), (bus)) 188 189 #define CONFIG_INFO 0 190 #define CONFIG_UPDATE 1 191 #define CONFIG_NEW 2 192 #define CONFIG_FIX 3 193 #define COMPAT_BUFSIZE 512 194 195 #define PPB_IO_ALIGNMENT 0x1000 /* 4K aligned */ 196 #define PPB_MEM_ALIGNMENT 0x100000 /* 1M aligned */ 197 /* round down to nearest power of two */ 198 #define P2LE(align) \ 199 { \ 200 int i = 0; \ 201 while (align >>= 1) \ 202 i ++; \ 203 align = 1 << i; \ 204 } \ 205 206 /* for is_vga and list_is_vga_only */ 207 208 enum io_mem { 209 IO, 210 MEM 211 }; 212 213 /* for get_parbus_res */ 214 215 enum parbus_mem { 216 PB_IO, 217 PB_MEM, 218 PB_PMEM 219 }; 220 221 222 /* See AMD-8111 Datasheet Rev 3.03, Page 149: */ 223 #define LPC_IO_CONTROL_REG_1 0x40 224 #define AMD8111_ENABLENMI (uint8_t)0x80 225 #define DEVID_AMD8111_LPC 0x7468 226 227 struct pci_fixundo { 228 uint8_t bus; 229 uint8_t dev; 230 uint8_t fn; 231 void (*undofn)(uint8_t, uint8_t, uint8_t); 232 struct pci_fixundo *next; 233 }; 234 235 struct pci_devfunc { 236 struct pci_devfunc *next; 237 dev_info_t *dip; 238 uchar_t dev; 239 uchar_t func; 240 boolean_t reprogram; /* this device needs to be reprogrammed */ 241 }; 242 243 extern int apic_nvidia_io_max; 244 extern int pseudo_isa; 245 extern int pci_bios_maxbus; 246 static uchar_t max_dev_pci = 32; /* PCI standard */ 247 int pci_boot_debug = 0; 248 int pci_debug_bus_start = -1; 249 int pci_debug_bus_end = -1; 250 extern struct memlist *find_bus_res(int, int); 251 static struct pci_fixundo *undolist = NULL; 252 static int num_root_bus = 0; /* count of root buses */ 253 extern volatile int acpi_resource_discovery; 254 extern uint64_t mcfg_mem_base; 255 extern void pci_cfgacc_add_workaround(uint16_t, uchar_t, uchar_t); 256 extern dev_info_t *pcie_get_rc_dip(dev_info_t *); 257 258 /* 259 * Module prototypes 260 */ 261 static void enumerate_bus_devs(uchar_t bus, int config_op); 262 static void create_root_bus_dip(uchar_t bus); 263 static void process_devfunc(uchar_t, uchar_t, uchar_t, uchar_t, 264 ushort_t, int); 265 static void add_compatible(dev_info_t *, ushort_t, ushort_t, 266 ushort_t, ushort_t, uchar_t, uint_t, int); 267 static int add_reg_props(dev_info_t *, uchar_t, uchar_t, uchar_t, int, int); 268 static void add_ppb_props(dev_info_t *, uchar_t, uchar_t, uchar_t, int, 269 ushort_t); 270 static void add_model_prop(dev_info_t *, uint_t); 271 static void add_bus_range_prop(int); 272 static void add_bus_slot_names_prop(int); 273 static void add_ranges_prop(int, int); 274 static void add_bus_available_prop(int); 275 static int get_pci_cap(uchar_t bus, uchar_t dev, uchar_t func, uint8_t cap_id); 276 static void fix_ppb_res(uchar_t, boolean_t); 277 static void alloc_res_array(); 278 static void create_ioapic_node(int bus, int dev, int fn, ushort_t vendorid, 279 ushort_t deviceid); 280 static void pciex_slot_names_prop(dev_info_t *, ushort_t); 281 static void populate_bus_res(uchar_t bus); 282 static void memlist_remove_list(struct memlist **list, 283 struct memlist *remove_list); 284 static void ck804_fix_aer_ptr(dev_info_t *, pcie_req_id_t); 285 286 static void pci_scan_bbn(void); 287 static int pci_unitaddr_cache_valid(void); 288 static int pci_bus_unitaddr(int); 289 static void pci_unitaddr_cache_create(void); 290 291 static int pci_cache_unpack_nvlist(nvf_handle_t, nvlist_t *, char *); 292 static int pci_cache_pack_nvlist(nvf_handle_t, nvlist_t **); 293 static void pci_cache_free_list(nvf_handle_t); 294 295 extern int pci_slot_names_prop(int, char *, int); 296 297 /* set non-zero to force PCI peer-bus renumbering */ 298 int pci_bus_always_renumber = 0; 299 300 /* 301 * used to register ISA resource usage which must not be made 302 * "available" from other PCI node' resource maps 303 */ 304 static struct { 305 struct memlist *io_used; 306 struct memlist *mem_used; 307 } isa_res; 308 309 /* 310 * PCI unit-address cache management 311 */ 312 static nvf_ops_t pci_unitaddr_cache_ops = { 313 "/etc/devices/pci_unitaddr_persistent", /* path to cache */ 314 pci_cache_unpack_nvlist, /* read in nvlist form */ 315 pci_cache_pack_nvlist, /* convert to nvlist form */ 316 pci_cache_free_list, /* free data list */ 317 NULL /* write complete callback */ 318 }; 319 320 typedef struct { 321 list_node_t pua_nodes; 322 int pua_index; 323 int pua_addr; 324 } pua_node_t; 325 326 nvf_handle_t puafd_handle; 327 int pua_cache_valid = 0; 328 329 static void 330 dump_memlists_impl(const char *tag, int bus) 331 { 332 printf("Memlist dump at %s - bus %x\n", tag, bus); 333 if (pci_bus_res[bus].io_used != NULL) { 334 printf(" io_used "); 335 memlist_dump(pci_bus_res[bus].io_used); 336 } 337 if (pci_bus_res[bus].io_avail != NULL) { 338 printf(" io_avail "); 339 memlist_dump(pci_bus_res[bus].io_avail); 340 } 341 if (pci_bus_res[bus].mem_used != NULL) { 342 printf(" mem_used "); 343 memlist_dump(pci_bus_res[bus].mem_used); 344 } 345 if (pci_bus_res[bus].mem_avail != NULL) { 346 printf(" mem_avail "); 347 memlist_dump(pci_bus_res[bus].mem_avail); 348 } 349 if (pci_bus_res[bus].pmem_used != NULL) { 350 printf(" pmem_used "); 351 memlist_dump(pci_bus_res[bus].pmem_used); 352 } 353 if (pci_bus_res[bus].pmem_avail != NULL) { 354 printf(" pmem_avail "); 355 memlist_dump(pci_bus_res[bus].pmem_avail); 356 } 357 } 358 359 /*ARGSUSED*/ 360 static ACPI_STATUS 361 pci_process_acpi_device(ACPI_HANDLE hdl, UINT32 level, void *ctx, void **rv) 362 { 363 ACPI_DEVICE_INFO *adi; 364 int busnum; 365 366 /* 367 * Use AcpiGetObjectInfo() to find the device _HID 368 * If not a PCI root-bus, ignore this device and continue 369 * the walk 370 */ 371 if (ACPI_FAILURE(AcpiGetObjectInfo(hdl, &adi))) 372 return (AE_OK); 373 374 if (!(adi->Valid & ACPI_VALID_HID)) { 375 AcpiOsFree(adi); 376 return (AE_OK); 377 } 378 379 if (strncmp(adi->HardwareId.String, PCI_ROOT_HID_STRING, 380 sizeof (PCI_ROOT_HID_STRING)) && 381 strncmp(adi->HardwareId.String, PCI_EXPRESS_ROOT_HID_STRING, 382 sizeof (PCI_EXPRESS_ROOT_HID_STRING))) { 383 AcpiOsFree(adi); 384 return (AE_OK); 385 } 386 387 AcpiOsFree(adi); 388 389 /* 390 * acpica_get_busno() will check the presence of _BBN and 391 * fail if not present. It will then use the _CRS method to 392 * retrieve the actual bus number assigned, it will fall back 393 * to _BBN should the _CRS method fail. 394 */ 395 if (ACPI_SUCCESS(acpica_get_busno(hdl, &busnum))) { 396 /* 397 * Ignore invalid _BBN return values here (rather 398 * than panic) and emit a warning; something else 399 * may suffer failure as a result of the broken BIOS. 400 */ 401 if ((busnum < 0) || (busnum > pci_bios_maxbus)) { 402 dcmn_err(CE_NOTE, 403 "pci_process_acpi_device: invalid _BBN 0x%x", 404 busnum); 405 return (AE_CTRL_DEPTH); 406 } 407 408 /* PCI with valid _BBN */ 409 if (pci_bus_res[busnum].par_bus == (uchar_t)-1 && 410 pci_bus_res[busnum].dip == NULL) 411 create_root_bus_dip((uchar_t)busnum); 412 return (AE_CTRL_DEPTH); 413 } 414 415 /* PCI and no _BBN, continue walk */ 416 return (AE_OK); 417 } 418 419 /* 420 * Scan the ACPI namespace for all top-level instances of _BBN 421 * in order to discover childless root-bridges (which enumeration 422 * may not find; root-bridges are inferred by the existence of 423 * children). This scan should find all root-bridges that have 424 * been enumerated, and any childless root-bridges not enumerated. 425 * Root-bridge for bus 0 may not have a _BBN object. 426 */ 427 static void 428 pci_scan_bbn() 429 { 430 void *rv; 431 432 (void) AcpiGetDevices(NULL, pci_process_acpi_device, NULL, &rv); 433 } 434 435 static void 436 pci_unitaddr_cache_init(void) 437 { 438 439 puafd_handle = nvf_register_file(&pci_unitaddr_cache_ops); 440 ASSERT(puafd_handle); 441 442 list_create(nvf_list(puafd_handle), sizeof (pua_node_t), 443 offsetof(pua_node_t, pua_nodes)); 444 445 rw_enter(nvf_lock(puafd_handle), RW_WRITER); 446 (void) nvf_read_file(puafd_handle); 447 rw_exit(nvf_lock(puafd_handle)); 448 } 449 450 /* 451 * Format of /etc/devices/pci_unitaddr_persistent: 452 * 453 * The persistent record of unit-address assignments contains 454 * a list of name/value pairs, where name is a string representation 455 * of the "index value" of the PCI root-bus and the value is 456 * the assigned unit-address. 457 * 458 * The "index value" is simply the zero-based index of the PCI 459 * root-buses ordered by physical bus number; first PCI bus is 0, 460 * second is 1, and so on. 461 */ 462 463 /*ARGSUSED*/ 464 static int 465 pci_cache_unpack_nvlist(nvf_handle_t hdl, nvlist_t *nvl, char *name) 466 { 467 long index; 468 int32_t value; 469 nvpair_t *np; 470 pua_node_t *node; 471 472 np = NULL; 473 while ((np = nvlist_next_nvpair(nvl, np)) != NULL) { 474 /* name of nvpair is index value */ 475 if (ddi_strtol(nvpair_name(np), NULL, 10, &index) != 0) 476 continue; 477 478 if (nvpair_value_int32(np, &value) != 0) 479 continue; 480 481 node = kmem_zalloc(sizeof (pua_node_t), KM_SLEEP); 482 node->pua_index = index; 483 node->pua_addr = value; 484 list_insert_tail(nvf_list(hdl), node); 485 } 486 487 pua_cache_valid = 1; 488 return (DDI_SUCCESS); 489 } 490 491 static int 492 pci_cache_pack_nvlist(nvf_handle_t hdl, nvlist_t **ret_nvl) 493 { 494 int rval; 495 nvlist_t *nvl, *sub_nvl; 496 list_t *listp; 497 pua_node_t *pua; 498 char buf[13]; 499 500 ASSERT(RW_WRITE_HELD(nvf_lock(hdl))); 501 502 rval = nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP); 503 if (rval != DDI_SUCCESS) { 504 nvf_error("%s: nvlist alloc error %d\n", 505 nvf_cache_name(hdl), rval); 506 return (DDI_FAILURE); 507 } 508 509 sub_nvl = NULL; 510 rval = nvlist_alloc(&sub_nvl, NV_UNIQUE_NAME, KM_SLEEP); 511 if (rval != DDI_SUCCESS) 512 goto error; 513 514 listp = nvf_list(hdl); 515 for (pua = list_head(listp); pua != NULL; 516 pua = list_next(listp, pua)) { 517 (void) snprintf(buf, sizeof (buf), "%d", pua->pua_index); 518 rval = nvlist_add_int32(sub_nvl, buf, pua->pua_addr); 519 if (rval != DDI_SUCCESS) 520 goto error; 521 } 522 523 rval = nvlist_add_nvlist(nvl, "table", sub_nvl); 524 if (rval != DDI_SUCCESS) 525 goto error; 526 nvlist_free(sub_nvl); 527 528 *ret_nvl = nvl; 529 return (DDI_SUCCESS); 530 531 error: 532 nvlist_free(sub_nvl); 533 ASSERT(nvl); 534 nvlist_free(nvl); 535 *ret_nvl = NULL; 536 return (DDI_FAILURE); 537 } 538 539 static void 540 pci_cache_free_list(nvf_handle_t hdl) 541 { 542 list_t *listp; 543 pua_node_t *pua; 544 545 ASSERT(RW_WRITE_HELD(nvf_lock(hdl))); 546 547 listp = nvf_list(hdl); 548 for (pua = list_head(listp); pua != NULL; 549 pua = list_next(listp, pua)) { 550 list_remove(listp, pua); 551 kmem_free(pua, sizeof (pua_node_t)); 552 } 553 } 554 555 556 static int 557 pci_unitaddr_cache_valid(void) 558 { 559 560 /* read only, no need for rw lock */ 561 return (pua_cache_valid); 562 } 563 564 565 static int 566 pci_bus_unitaddr(int index) 567 { 568 pua_node_t *pua; 569 list_t *listp; 570 int addr; 571 572 rw_enter(nvf_lock(puafd_handle), RW_READER); 573 574 addr = -1; /* default return if no match */ 575 listp = nvf_list(puafd_handle); 576 for (pua = list_head(listp); pua != NULL; 577 pua = list_next(listp, pua)) { 578 if (pua->pua_index == index) { 579 addr = pua->pua_addr; 580 break; 581 } 582 } 583 584 rw_exit(nvf_lock(puafd_handle)); 585 return (addr); 586 } 587 588 static void 589 pci_unitaddr_cache_create(void) 590 { 591 int i, index; 592 pua_node_t *node; 593 list_t *listp; 594 595 rw_enter(nvf_lock(puafd_handle), RW_WRITER); 596 597 index = 0; 598 listp = nvf_list(puafd_handle); 599 for (i = 0; i <= pci_bios_maxbus; i++) { 600 /* skip non-root (peer) PCI busses */ 601 if ((pci_bus_res[i].par_bus != (uchar_t)-1) || 602 (pci_bus_res[i].dip == NULL)) 603 continue; 604 node = kmem_zalloc(sizeof (pua_node_t), KM_SLEEP); 605 node->pua_index = index++; 606 node->pua_addr = pci_bus_res[i].root_addr; 607 list_insert_tail(listp, node); 608 } 609 610 (void) nvf_mark_dirty(puafd_handle); 611 rw_exit(nvf_lock(puafd_handle)); 612 nvf_wake_daemon(); 613 } 614 615 616 /* 617 * Enumerate all PCI devices 618 */ 619 void 620 pci_setup_tree(void) 621 { 622 uint_t i, root_bus_addr = 0; 623 624 alloc_res_array(); 625 for (i = 0; i <= pci_bios_maxbus; i++) { 626 pci_bus_res[i].par_bus = (uchar_t)-1; 627 pci_bus_res[i].root_addr = (uchar_t)-1; 628 pci_bus_res[i].sub_bus = i; 629 } 630 631 pci_bus_res[0].root_addr = root_bus_addr++; 632 create_root_bus_dip(0); 633 enumerate_bus_devs(0, CONFIG_INFO); 634 635 /* 636 * Now enumerate peer busses 637 * 638 * We loop till pci_bios_maxbus. On most systems, there is 639 * one more bus at the high end, which implements the ISA 640 * compatibility bus. We don't care about that. 641 * 642 * Note: In the old (bootconf) enumeration, the peer bus 643 * address did not use the bus number, and there were 644 * too many peer busses created. The root_bus_addr is 645 * used to maintain the old peer bus address assignment. 646 * However, we stop enumerating phantom peers with no 647 * device below. 648 */ 649 for (i = 1; i <= pci_bios_maxbus; i++) { 650 if (pci_bus_res[i].dip == NULL) { 651 pci_bus_res[i].root_addr = root_bus_addr++; 652 } 653 enumerate_bus_devs(i, CONFIG_INFO); 654 655 /* add slot-names property for named pci hot-plug slots */ 656 add_bus_slot_names_prop(i); 657 } 658 } 659 660 /* 661 * >0 = present, 0 = not present, <0 = error 662 */ 663 static int 664 pci_bbn_present(int bus) 665 { 666 ACPI_HANDLE hdl; 667 int rv; 668 669 /* no dip means no _BBN */ 670 if (pci_bus_res[bus].dip == NULL) 671 return (0); 672 673 rv = -1; /* default return value in case of error below */ 674 if (ACPI_SUCCESS(acpica_get_handle(pci_bus_res[bus].dip, &hdl))) { 675 switch (AcpiEvaluateObject(hdl, "_BBN", NULL, NULL)) { 676 case AE_OK: 677 rv = 1; 678 break; 679 case AE_NOT_FOUND: 680 rv = 0; 681 break; 682 default: 683 break; 684 } 685 } 686 687 return (rv); 688 } 689 690 /* 691 * Return non-zero if any PCI bus in the system has an associated 692 * _BBN object, 0 otherwise. 693 */ 694 static int 695 pci_roots_have_bbn(void) 696 { 697 int i; 698 699 /* 700 * Scan the PCI busses and look for at least 1 _BBN 701 */ 702 for (i = 0; i <= pci_bios_maxbus; i++) { 703 /* skip non-root (peer) PCI busses */ 704 if (pci_bus_res[i].par_bus != (uchar_t)-1) 705 continue; 706 707 if (pci_bbn_present(i) > 0) 708 return (1); 709 } 710 return (0); 711 712 } 713 714 /* 715 * return non-zero if the machine is one on which we renumber 716 * the internal pci unit-addresses 717 */ 718 static int 719 pci_bus_renumber() 720 { 721 ACPI_TABLE_HEADER *fadt; 722 723 if (pci_bus_always_renumber) 724 return (1); 725 726 /* get the FADT */ 727 if (AcpiGetTable(ACPI_SIG_FADT, 1, (ACPI_TABLE_HEADER **)&fadt) != 728 AE_OK) 729 return (0); 730 731 /* compare OEM Table ID to "SUNm31" */ 732 if (strncmp("SUNm31", fadt->OemId, 6)) 733 return (0); 734 else 735 return (1); 736 } 737 738 /* 739 * Initial enumeration of the physical PCI bus hierarchy can 740 * leave 'gaps' in the order of peer PCI bus unit-addresses. 741 * Systems with more than one peer PCI bus *must* have an ACPI 742 * _BBN object associated with each peer bus; use the presence 743 * of this object to remove gaps in the numbering of the peer 744 * PCI bus unit-addresses - only peer busses with an associated 745 * _BBN are counted. 746 */ 747 static void 748 pci_renumber_root_busses(void) 749 { 750 int pci_regs[] = {0, 0, 0}; 751 int i, root_addr = 0; 752 753 /* 754 * Currently, we only enable the re-numbering on specific 755 * Sun machines; this is a work-around for the more complicated 756 * issue of upgrade changing physical device paths 757 */ 758 if (!pci_bus_renumber()) 759 return; 760 761 /* 762 * If we find no _BBN objects at all, we either don't need 763 * to do anything or can't do anything anyway 764 */ 765 if (!pci_roots_have_bbn()) 766 return; 767 768 for (i = 0; i <= pci_bios_maxbus; i++) { 769 /* skip non-root (peer) PCI busses */ 770 if (pci_bus_res[i].par_bus != (uchar_t)-1) 771 continue; 772 773 if (pci_bbn_present(i) < 1) { 774 pci_bus_res[i].root_addr = (uchar_t)-1; 775 continue; 776 } 777 778 ASSERT(pci_bus_res[i].dip != NULL); 779 if (pci_bus_res[i].root_addr != root_addr) { 780 /* update reg property for node */ 781 pci_bus_res[i].root_addr = root_addr; 782 pci_regs[0] = pci_bus_res[i].root_addr; 783 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, 784 pci_bus_res[i].dip, "reg", (int *)pci_regs, 3); 785 } 786 root_addr++; 787 } 788 } 789 790 void 791 pci_register_isa_resources(int type, uint32_t base, uint32_t size) 792 { 793 (void) memlist_insert( 794 (type == 1) ? &isa_res.io_used : &isa_res.mem_used, 795 base, size); 796 } 797 798 /* 799 * Remove the resources which are already used by devices under a subtractive 800 * bridge from the bus's resources lists, because they're not available, and 801 * shouldn't be allocated to other buses. This is necessary because tracking 802 * resources for subtractive bridges is not complete. (Subtractive bridges only 803 * track some of their claimed resources, not "the rest of the address space" as 804 * they should, so that allocation to peer non-subtractive PPBs is easier. We 805 * need a fully-capable global resource allocator). 806 */ 807 static void 808 remove_subtractive_res() 809 { 810 int i, j; 811 struct memlist *list; 812 813 for (i = 0; i <= pci_bios_maxbus; i++) { 814 if (pci_bus_res[i].subtractive) { 815 /* remove used io ports */ 816 list = pci_bus_res[i].io_used; 817 while (list) { 818 for (j = 0; j <= pci_bios_maxbus; j++) 819 (void) memlist_remove( 820 &pci_bus_res[j].io_avail, 821 list->ml_address, list->ml_size); 822 list = list->ml_next; 823 } 824 /* remove used mem resource */ 825 list = pci_bus_res[i].mem_used; 826 while (list) { 827 for (j = 0; j <= pci_bios_maxbus; j++) { 828 (void) memlist_remove( 829 &pci_bus_res[j].mem_avail, 830 list->ml_address, list->ml_size); 831 (void) memlist_remove( 832 &pci_bus_res[j].pmem_avail, 833 list->ml_address, list->ml_size); 834 } 835 list = list->ml_next; 836 } 837 /* remove used prefetchable mem resource */ 838 list = pci_bus_res[i].pmem_used; 839 while (list) { 840 for (j = 0; j <= pci_bios_maxbus; j++) { 841 (void) memlist_remove( 842 &pci_bus_res[j].pmem_avail, 843 list->ml_address, list->ml_size); 844 (void) memlist_remove( 845 &pci_bus_res[j].mem_avail, 846 list->ml_address, list->ml_size); 847 } 848 list = list->ml_next; 849 } 850 } 851 } 852 } 853 854 /* 855 * Set up (or complete the setup of) the bus_avail resource list 856 */ 857 static void 858 setup_bus_res(int bus) 859 { 860 uchar_t par_bus; 861 862 if (pci_bus_res[bus].dip == NULL) /* unused bus */ 863 return; 864 865 /* 866 * Set up bus_avail if not already filled in by populate_bus_res() 867 */ 868 if (pci_bus_res[bus].bus_avail == NULL) { 869 ASSERT(pci_bus_res[bus].sub_bus >= bus); 870 memlist_insert(&pci_bus_res[bus].bus_avail, bus, 871 pci_bus_res[bus].sub_bus - bus + 1); 872 } 873 874 ASSERT(pci_bus_res[bus].bus_avail != NULL); 875 876 /* 877 * Remove resources from parent bus node if this is not a 878 * root bus. 879 */ 880 par_bus = pci_bus_res[bus].par_bus; 881 if (par_bus != (uchar_t)-1) { 882 ASSERT(pci_bus_res[par_bus].bus_avail != NULL); 883 memlist_remove_list(&pci_bus_res[par_bus].bus_avail, 884 pci_bus_res[bus].bus_avail); 885 } 886 887 /* remove self from bus_avail */; 888 (void) memlist_remove(&pci_bus_res[bus].bus_avail, bus, 1); 889 } 890 891 /* 892 * Allocate a resource from the parent bus 893 */ 894 static uint64_t 895 get_parbus_res(uchar_t parbus, uchar_t bus, uint64_t size, uint64_t align, 896 enum parbus_mem mem) 897 { 898 uint64_t addr = 0; 899 uchar_t res_bus; 900 901 /* 902 * Skip root(peer) buses in multiple-root-bus systems when 903 * ACPI resource discovery was not successfully done; the 904 * initial resources set on each root bus might not be correctly 905 * accounted for in this case. 906 */ 907 if ((pci_bus_res[parbus].par_bus == (uchar_t)-1) && 908 (num_root_bus > 1) && (acpi_resource_discovery <= 0)) { 909 return (0); 910 } 911 912 /* 913 * Set res_bus to the bus from which resources should be allocated. 914 * A device under a subtractive PPB can allocate resources from its 915 * parent bus if there are no resources available on its own bus, so 916 * iterate up the chain until resources are found or the root is 917 * reached. 918 */ 919 res_bus = parbus; 920 while (pci_bus_res[res_bus].subtractive) { 921 if (mem == PB_IO && pci_bus_res[res_bus].io_avail != NULL) 922 break; 923 if (mem == PB_MEM && pci_bus_res[res_bus].mem_avail != NULL) 924 break; 925 if (mem == PB_PMEM && pci_bus_res[res_bus].pmem_avail != NULL) 926 break; 927 res_bus = pci_bus_res[res_bus].par_bus; 928 /* Has the root bus been reached? */ 929 if (res_bus == (uchar_t)-1) 930 break; 931 } 932 933 switch (mem) { 934 case PB_IO: 935 if (pci_bus_res[res_bus].io_avail == NULL) 936 break; 937 addr = memlist_find(&pci_bus_res[res_bus].io_avail, 938 size, align); 939 if (addr > 0) { 940 memlist_insert(&pci_bus_res[res_bus].io_used, 941 addr, size); 942 943 /* free the old resource */ 944 memlist_free_all(&pci_bus_res[bus].io_avail); 945 memlist_free_all(&pci_bus_res[bus].io_used); 946 947 /* add the new resource */ 948 memlist_insert(&pci_bus_res[bus].io_avail, addr, size); 949 } 950 break; 951 case PB_MEM: 952 if (pci_bus_res[res_bus].mem_avail == NULL) 953 break; 954 addr = memlist_find(&pci_bus_res[res_bus].mem_avail, 955 size, align); 956 if (addr > 0) { 957 memlist_insert(&pci_bus_res[res_bus].mem_used, 958 addr, size); 959 (void) memlist_remove(&pci_bus_res[res_bus].pmem_avail, 960 addr, size); 961 962 /* free the old resource */ 963 memlist_free_all(&pci_bus_res[bus].mem_avail); 964 memlist_free_all(&pci_bus_res[bus].mem_used); 965 966 /* add the new resource */ 967 memlist_insert(&pci_bus_res[bus].mem_avail, addr, size); 968 } 969 break; 970 case PB_PMEM: 971 if (pci_bus_res[res_bus].pmem_avail == NULL) 972 break; 973 addr = memlist_find(&pci_bus_res[res_bus].pmem_avail, 974 size, align); 975 if (addr > 0) { 976 memlist_insert(&pci_bus_res[res_bus].pmem_used, 977 addr, size); 978 (void) memlist_remove(&pci_bus_res[res_bus].mem_avail, 979 addr, size); 980 981 /* free the old resource */ 982 memlist_free_all(&pci_bus_res[bus].pmem_avail); 983 memlist_free_all(&pci_bus_res[bus].pmem_used); 984 985 /* add the new resource */ 986 memlist_insert(&pci_bus_res[bus].pmem_avail, 987 addr, size); 988 } 989 break; 990 } 991 992 return (addr); 993 } 994 995 /* 996 * given a cap_id, return its cap_id location in config space 997 */ 998 static int 999 get_pci_cap(uchar_t bus, uchar_t dev, uchar_t func, uint8_t cap_id) 1000 { 1001 uint8_t curcap, cap_id_loc; 1002 uint16_t status; 1003 int location = -1; 1004 1005 /* 1006 * Need to check the Status register for ECP support first. 1007 * Also please note that for type 1 devices, the 1008 * offset could change. Should support type 1 next. 1009 */ 1010 status = pci_getw(bus, dev, func, PCI_CONF_STAT); 1011 if (!(status & PCI_STAT_CAP)) { 1012 return (-1); 1013 } 1014 cap_id_loc = pci_getb(bus, dev, func, PCI_CONF_CAP_PTR); 1015 1016 /* Walk the list of capabilities */ 1017 while (cap_id_loc && cap_id_loc != (uint8_t)-1) { 1018 curcap = pci_getb(bus, dev, func, cap_id_loc); 1019 1020 if (curcap == cap_id) { 1021 location = cap_id_loc; 1022 break; 1023 } 1024 cap_id_loc = pci_getb(bus, dev, func, cap_id_loc + 1); 1025 } 1026 return (location); 1027 } 1028 1029 /* 1030 * Does this resource element live in the legacy VGA range? 1031 */ 1032 1033 static boolean_t 1034 is_vga(struct memlist *elem, enum io_mem io) 1035 { 1036 if (io == IO) { 1037 if ((elem->ml_address == 0x3b0 && elem->ml_size == 0xc) || 1038 (elem->ml_address == 0x3c0 && elem->ml_size == 0x20)) 1039 return (B_TRUE); 1040 } else { 1041 if (elem->ml_address == 0xa0000 && elem->ml_size == 0x20000) 1042 return (B_TRUE); 1043 } 1044 return (B_FALSE); 1045 } 1046 1047 /* 1048 * Does this entire resource list consist only of legacy VGA resources? 1049 */ 1050 1051 static boolean_t 1052 list_is_vga_only(struct memlist *l, enum io_mem io) 1053 { 1054 do { 1055 if (!is_vga(l, io)) 1056 return (B_FALSE); 1057 } while ((l = l->ml_next) != NULL); 1058 return (B_TRUE); 1059 } 1060 1061 /* 1062 * Find the start and end addresses that cover the range for all list entries, 1063 * excluding legacy VGA addresses. Relies on the list being sorted. 1064 */ 1065 static void 1066 pci_memlist_range(struct memlist *list, enum io_mem iomem, uint64_t *basep, 1067 uint64_t *limitp) 1068 { 1069 *limitp = *basep = 0; 1070 1071 for (; list != NULL; list = list->ml_next) { 1072 if (is_vga(list, iomem)) 1073 continue; 1074 1075 if (*basep == 0) 1076 *basep = list->ml_address; 1077 1078 if (list->ml_address + list->ml_size >= *limitp) 1079 *limitp = list->ml_address + list->ml_size - 1; 1080 } 1081 } 1082 1083 /* 1084 * Assign valid resources to unconfigured pci(e) bridges. We are trying 1085 * to reprogram the bridge when its 1086 * i) SECBUS == SUBBUS || 1087 * ii) IOBASE > IOLIM || 1088 * iii) MEMBASE > MEMLIM && PMEMBASE > PMEMLIM 1089 * This must be done after one full pass through the PCI tree to collect 1090 * all BIOS-configured resources, so that we know what resources are 1091 * free and available to assign to the unconfigured PPBs. 1092 */ 1093 static void 1094 fix_ppb_res(uchar_t secbus, boolean_t prog_sub) 1095 { 1096 uchar_t bus, dev, func; 1097 uchar_t parbus, subbus; 1098 uint_t io_base, io_limit, mem_base; 1099 uint_t io_size, io_align; 1100 uint64_t mem_size, mem_align, mem_limit; 1101 uint64_t pmem_size, pmem_base, pmem_limit; 1102 uint64_t addr = 0; 1103 int *regp = NULL; 1104 uint_t val, reglen; 1105 int rv, cap_ptr, physhi; 1106 dev_info_t *dip; 1107 uint16_t cmd_reg; 1108 struct memlist *scratch_list; 1109 boolean_t reprogram_mem; 1110 1111 /* skip root (peer) PCI busses */ 1112 if (pci_bus_res[secbus].par_bus == (uchar_t)-1) 1113 return; 1114 1115 /* skip subtractive PPB when prog_sub is not TRUE */ 1116 if (pci_bus_res[secbus].subtractive && !prog_sub) 1117 return; 1118 1119 /* some entries may be empty due to discontiguous bus numbering */ 1120 dip = pci_bus_res[secbus].dip; 1121 if (dip == NULL) 1122 return; 1123 1124 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1125 "reg", ®p, ®len); 1126 if (rv != DDI_PROP_SUCCESS || reglen == 0) 1127 return; 1128 physhi = regp[0]; 1129 ddi_prop_free(regp); 1130 1131 func = (uchar_t)PCI_REG_FUNC_G(physhi); 1132 dev = (uchar_t)PCI_REG_DEV_G(physhi); 1133 bus = (uchar_t)PCI_REG_BUS_G(physhi); 1134 1135 dump_memlists("fix_ppb_res start bus", bus); 1136 dump_memlists("fix_ppb_res start secbus", secbus); 1137 1138 /* 1139 * If pcie bridge, check to see if link is enabled 1140 */ 1141 cap_ptr = get_pci_cap(bus, dev, func, PCI_CAP_ID_PCI_E); 1142 if (cap_ptr != -1) { 1143 cmd_reg = pci_getw(bus, dev, func, 1144 (uint16_t)cap_ptr + PCIE_LINKCTL); 1145 if (cmd_reg & PCIE_LINKCTL_LINK_DISABLE) { 1146 dcmn_err(CE_NOTE, 1147 "!fix_ppb_res: ppb[%x/%x/%x] link is disabled.", 1148 bus, dev, func); 1149 return; 1150 } 1151 } 1152 1153 subbus = pci_getb(bus, dev, func, PCI_BCNF_SUBBUS); 1154 parbus = pci_bus_res[secbus].par_bus; 1155 ASSERT(parbus == bus); 1156 cmd_reg = pci_getw(bus, dev, func, PCI_CONF_COMM); 1157 1158 /* 1159 * If we have a Cardbus bridge, but no bus space 1160 */ 1161 if (pci_bus_res[secbus].num_cbb != 0 && 1162 pci_bus_res[secbus].bus_avail == NULL) { 1163 uchar_t range; 1164 1165 /* normally there are 2 buses under a cardbus bridge */ 1166 range = pci_bus_res[secbus].num_cbb * 2; 1167 1168 /* 1169 * Try to find and allocate a bus-range starting at subbus+1 1170 * from the parent of the PPB. 1171 */ 1172 for (; range != 0; range--) { 1173 if (memlist_find_with_startaddr( 1174 &pci_bus_res[parbus].bus_avail, 1175 subbus + 1, range, 1) != 0) 1176 break; /* find bus range resource at parent */ 1177 } 1178 if (range != 0) { 1179 memlist_insert(&pci_bus_res[secbus].bus_avail, 1180 subbus + 1, range); 1181 subbus = subbus + range; 1182 pci_bus_res[secbus].sub_bus = subbus; 1183 pci_putb(bus, dev, func, PCI_BCNF_SUBBUS, subbus); 1184 add_bus_range_prop(secbus); 1185 1186 cmn_err(CE_NOTE, "!reprogram bus-range on ppb" 1187 "[%x/%x/%x]: %x ~ %x", bus, dev, func, 1188 secbus, subbus); 1189 } 1190 } 1191 1192 /* 1193 * Calculate required IO size and alignment 1194 * If bus io_size is zero, we are going to assign 512 bytes per bus, 1195 * otherwise, we'll choose the maximum value of such calculation and 1196 * bus io_size. The size needs to be 4K aligned. 1197 * 1198 * We calculate alignment as the largest power of two less than the 1199 * the sum of all children's IO size requirements, because this will 1200 * align to the size of the largest child request within that size 1201 * (which is always a power of two). 1202 */ 1203 io_size = (subbus - secbus + 1) * 0x200; 1204 if (io_size < pci_bus_res[secbus].io_size) 1205 io_size = pci_bus_res[secbus].io_size; 1206 io_size = P2ROUNDUP(io_size, PPB_IO_ALIGNMENT); 1207 io_align = io_size; 1208 P2LE(io_align); 1209 1210 /* 1211 * Calculate required MEM size and alignment 1212 * If bus mem_size is zero, we are going to assign 1M bytes per bus, 1213 * otherwise, we'll choose the maximum value of such calculation and 1214 * bus mem_size. The size needs to be 1M aligned. 1215 * 1216 * For the alignment, refer to the I/O comment above. 1217 */ 1218 mem_size = (subbus - secbus + 1) * PPB_MEM_ALIGNMENT; 1219 if (mem_size < pci_bus_res[secbus].mem_size) { 1220 mem_size = pci_bus_res[secbus].mem_size; 1221 mem_size = P2ROUNDUP(mem_size, PPB_MEM_ALIGNMENT); 1222 } 1223 mem_align = mem_size; 1224 P2LE(mem_align); 1225 1226 /* Subtractive bridge */ 1227 if (pci_bus_res[secbus].subtractive && prog_sub) { 1228 /* 1229 * We program an arbitrary amount of I/O and memory resource 1230 * for the subtractive bridge so that child dynamic-resource- 1231 * allocating devices (such as Cardbus bridges) have a chance 1232 * of success. Until we have full-tree resource rebalancing, 1233 * dynamic resource allocation (thru busra) only looks at the 1234 * parent bridge, so all PPBs must have some allocatable 1235 * resource. For non-subtractive bridges, the resources come 1236 * from the base/limit register "windows", but subtractive 1237 * bridges often don't program those (since they don't need to). 1238 * If we put all the remaining resources on the subtractive 1239 * bridge, then peer non-subtractive bridges can't allocate 1240 * more space (even though this is probably most correct). 1241 * If we put the resources only on the parent, then allocations 1242 * from children of subtractive bridges will fail without 1243 * special-case code for bypassing the subtractive bridge. 1244 * This solution is the middle-ground temporary solution until 1245 * we have fully-capable resource allocation. 1246 */ 1247 1248 /* 1249 * Add an arbitrary I/O resource to the subtractive PPB 1250 */ 1251 if (pci_bus_res[secbus].io_avail == NULL) { 1252 addr = get_parbus_res(parbus, secbus, io_size, 1253 io_align, PB_IO); 1254 if (addr) { 1255 add_ranges_prop(secbus, 1); 1256 pci_bus_res[secbus].io_reprogram = 1257 pci_bus_res[parbus].io_reprogram; 1258 1259 cmn_err(CE_NOTE, "!add io-range on subtractive" 1260 " ppb[%x/%x/%x]: " 1261 "0x%"PRIx64" ~ 0x%"PRIx64"", 1262 bus, dev, func, addr, addr + io_size - 1); 1263 } 1264 } 1265 /* 1266 * Add an arbitrary memory resource to the subtractive PPB 1267 */ 1268 if (pci_bus_res[secbus].mem_avail == NULL) { 1269 addr = get_parbus_res(parbus, secbus, mem_size, 1270 mem_align, PB_MEM); 1271 if (addr) { 1272 add_ranges_prop(secbus, 1); 1273 pci_bus_res[secbus].mem_reprogram = 1274 pci_bus_res[parbus].mem_reprogram; 1275 1276 cmn_err(CE_NOTE, "!add mem-range on " 1277 "subtractive ppb[%x/%x/%x]: " 1278 "0x%"PRIx64" ~ 0x%"PRIx64"", 1279 bus, dev, func, 1280 addr, addr + mem_size - 1); 1281 } 1282 } 1283 1284 goto cmd_enable; 1285 } 1286 1287 /* 1288 * Check to see if we need to reprogram I/O space, either because the 1289 * parent bus needed reprogramming and so do we, or because I/O space is 1290 * disabled in base/limit or command register. 1291 */ 1292 val = io_base = pci_getb(bus, dev, func, PCI_BCNF_IO_BASE_LOW); 1293 io_limit = pci_getb(bus, dev, func, PCI_BCNF_IO_LIMIT_LOW); 1294 io_base = (io_base & PCI_BCNF_IO_MASK) << PCI_BCNF_IO_SHIFT; 1295 io_limit = ((io_limit & PCI_BCNF_IO_MASK) << PCI_BCNF_IO_SHIFT) | 0xfff; 1296 if ((val & PCI_BCNF_ADDR_MASK) == PCI_BCNF_IO_32BIT) { 1297 uint16_t io_base_hi, io_limit_hi; 1298 io_base_hi = pci_getw(bus, dev, func, PCI_BCNF_IO_BASE_HI); 1299 io_limit_hi = pci_getw(bus, dev, func, PCI_BCNF_IO_LIMIT_HI); 1300 1301 io_base |= (uint_t)io_base_hi << 16; 1302 io_limit |= (uint_t)io_limit_hi << 16; 1303 } 1304 1305 /* Form list of all resources passed (avail + used) */ 1306 scratch_list = memlist_dup(pci_bus_res[secbus].io_avail); 1307 memlist_merge(&pci_bus_res[secbus].io_used, &scratch_list); 1308 1309 if ((pci_bus_res[parbus].io_reprogram || 1310 (io_base > io_limit) || 1311 (!(cmd_reg & PCI_COMM_IO))) && 1312 !list_is_vga_only(scratch_list, IO)) { 1313 1314 if (pci_bus_res[secbus].io_used) { 1315 memlist_subsume(&pci_bus_res[secbus].io_used, 1316 &pci_bus_res[secbus].io_avail); 1317 } 1318 1319 if (pci_bus_res[secbus].io_avail && 1320 !pci_bus_res[parbus].io_reprogram && 1321 !pci_bus_res[parbus].subtractive) { 1322 /* re-choose old io ports info */ 1323 1324 uint64_t base, limit; 1325 1326 pci_memlist_range(pci_bus_res[secbus].io_avail, 1327 IO, &base, &limit); 1328 io_base = (uint_t)base; 1329 io_limit = (uint_t)limit; 1330 1331 /* 4K aligned */ 1332 io_base = P2ALIGN(base, PPB_IO_ALIGNMENT); 1333 io_limit = P2ROUNDUP(io_limit, PPB_IO_ALIGNMENT) - 1; 1334 io_size = io_limit - io_base + 1; 1335 ASSERT(io_base <= io_limit); 1336 memlist_free_all(&pci_bus_res[secbus].io_avail); 1337 memlist_insert(&pci_bus_res[secbus].io_avail, 1338 io_base, io_size); 1339 memlist_insert(&pci_bus_res[parbus].io_used, 1340 io_base, io_size); 1341 (void) memlist_remove(&pci_bus_res[parbus].io_avail, 1342 io_base, io_size); 1343 pci_bus_res[secbus].io_reprogram = B_TRUE; 1344 } else { 1345 /* get new io ports from parent bus */ 1346 addr = get_parbus_res(parbus, secbus, io_size, 1347 io_align, PB_IO); 1348 if (addr) { 1349 io_base = addr; 1350 io_limit = addr + io_size - 1; 1351 pci_bus_res[secbus].io_reprogram = B_TRUE; 1352 } 1353 } 1354 if (pci_bus_res[secbus].io_reprogram) { 1355 /* reprogram PPB regs */ 1356 pci_putb(bus, dev, func, PCI_BCNF_IO_BASE_LOW, 1357 (uchar_t)((io_base>>8) & 0xf0)); 1358 pci_putb(bus, dev, func, PCI_BCNF_IO_LIMIT_LOW, 1359 (uchar_t)((io_limit>>8) & 0xf0)); 1360 pci_putb(bus, dev, func, PCI_BCNF_IO_BASE_HI, 0); 1361 pci_putb(bus, dev, func, PCI_BCNF_IO_LIMIT_HI, 0); 1362 add_ranges_prop(secbus, 1); 1363 1364 cmn_err(CE_NOTE, "!reprogram io-range on" 1365 " ppb[%x/%x/%x]: 0x%x ~ 0x%x", 1366 bus, dev, func, io_base, io_limit); 1367 } 1368 } 1369 memlist_free_all(&scratch_list); 1370 1371 /* 1372 * Check memory space as we did I/O space. 1373 */ 1374 1375 mem_base = (uint_t)pci_getw(bus, dev, func, PCI_BCNF_MEM_BASE); 1376 mem_base = (mem_base & PCI_BCNF_MEM_MASK) << PCI_BCNF_MEM_SHIFT; 1377 mem_limit = (uint_t)pci_getw(bus, dev, func, PCI_BCNF_MEM_LIMIT); 1378 mem_limit = ((mem_limit & PCI_BCNF_MEM_MASK) << PCI_BCNF_MEM_SHIFT) 1379 | 0xfffff; 1380 1381 val = (uint_t)pci_getw(bus, dev, func, PCI_BCNF_PF_LIMIT_LOW); 1382 pmem_limit = ((val & PCI_BCNF_MEM_MASK) << PCI_BCNF_MEM_SHIFT) | 1383 0xfffff; 1384 val = (uint_t)pci_getw(bus, dev, func, PCI_BCNF_PF_BASE_LOW); 1385 pmem_base = ((val & PCI_BCNF_MEM_MASK) << PCI_BCNF_MEM_SHIFT); 1386 1387 if ((val & PCI_BCNF_ADDR_MASK) == PCI_BCNF_PF_MEM_64BIT) { 1388 uint32_t pf_addr_hi, pf_limit_hi; 1389 1390 pf_addr_hi = pci_getl(bus, dev, func, PCI_BCNF_PF_BASE_HIGH); 1391 pf_limit_hi = pci_getl(bus, dev, func, PCI_BCNF_PF_LIMIT_HIGH); 1392 pmem_base |= (uint64_t)pf_addr_hi << 32; 1393 pmem_limit |= (uint64_t)pf_limit_hi << 32; 1394 } 1395 1396 /* 1397 * Reprogram memory if any of: 1398 * 1399 * - The parent bus is flagged for reprogramming; 1400 * - Mem space is currently disabled in the command register; 1401 * - Both mem and pmem space are disabled via base/limit. 1402 * 1403 * Always reprogram both mem and pmem together since this leaves 1404 * resources in the 'avail' list for add_reg_props() to subsequently 1405 * find and assign. 1406 */ 1407 reprogram_mem = pci_bus_res[parbus].mem_reprogram || 1408 !(cmd_reg & PCI_COMM_MAE) || 1409 (mem_base > mem_limit && pmem_base > pmem_limit); 1410 1411 scratch_list = memlist_dup(pci_bus_res[secbus].mem_avail); 1412 memlist_merge(&pci_bus_res[secbus].mem_used, &scratch_list); 1413 1414 if (reprogram_mem && !list_is_vga_only(scratch_list, MEM)) { 1415 1416 if (pci_bus_res[secbus].mem_used) { 1417 memlist_subsume(&pci_bus_res[secbus].mem_used, 1418 &pci_bus_res[secbus].mem_avail); 1419 } 1420 1421 /* 1422 * At this point, if the parent bus has not been 1423 * reprogrammed and there is memory in this bus' available 1424 * pool, then it can just be re-used. Otherwise a new range 1425 * is requested from the parent bus - note that 1426 * get_parbus_res() also takes care of constructing new 1427 * avail and used lists for the bus. 1428 * 1429 * For a subtractive parent bus, always request a fresh 1430 * memory range. 1431 */ 1432 if (pci_bus_res[secbus].mem_avail && 1433 !pci_bus_res[parbus].mem_reprogram && 1434 !pci_bus_res[parbus].subtractive) { 1435 /* re-choose old mem resource */ 1436 1437 uint64_t base; 1438 1439 pci_memlist_range(pci_bus_res[secbus].mem_avail, 1440 MEM, &base, &mem_limit); 1441 mem_base = (uint_t)base; 1442 1443 mem_base = P2ALIGN(mem_base, PPB_MEM_ALIGNMENT); 1444 mem_limit = P2ROUNDUP(mem_limit, PPB_MEM_ALIGNMENT) - 1; 1445 mem_size = mem_limit + 1 - mem_base; 1446 ASSERT(mem_base <= mem_limit); 1447 memlist_free_all(&pci_bus_res[secbus].mem_avail); 1448 memlist_insert(&pci_bus_res[secbus].mem_avail, 1449 mem_base, mem_size); 1450 memlist_insert(&pci_bus_res[parbus].mem_used, 1451 mem_base, mem_size); 1452 (void) memlist_remove(&pci_bus_res[parbus].mem_avail, 1453 mem_base, mem_size); 1454 pci_bus_res[secbus].mem_reprogram = B_TRUE; 1455 } else { 1456 /* get new mem resource from parent bus */ 1457 addr = get_parbus_res(parbus, secbus, mem_size, 1458 mem_align, PB_MEM); 1459 if (addr) { 1460 mem_base = addr; 1461 mem_limit = addr + mem_size - 1; 1462 pci_bus_res[secbus].mem_reprogram = B_TRUE; 1463 } 1464 } 1465 } 1466 memlist_free_all(&scratch_list); 1467 1468 /* Prefetch memory */ 1469 1470 scratch_list = memlist_dup(pci_bus_res[secbus].pmem_avail); 1471 memlist_merge(&pci_bus_res[secbus].pmem_used, &scratch_list); 1472 1473 /* 1474 * Only reprogram prefetchable memory If the MEM access bit is 1475 * currently enabled. If it is not, then prefetchable memory will be 1476 * disabled anyway via base/limit below. 1477 */ 1478 if (reprogram_mem && !list_is_vga_only(scratch_list, MEM) && 1479 (cmd_reg & PCI_COMM_MAE)) { 1480 1481 if (pci_bus_res[secbus].pmem_used) { 1482 memlist_subsume(&pci_bus_res[secbus].pmem_used, 1483 &pci_bus_res[secbus].pmem_avail); 1484 } 1485 1486 /* Same logic as for non-prefetch memory, see above */ 1487 if (pci_bus_res[secbus].pmem_avail && 1488 !pci_bus_res[parbus].mem_reprogram && 1489 !pci_bus_res[parbus].subtractive) { 1490 /* re-choose old mem resource */ 1491 1492 pci_memlist_range(pci_bus_res[secbus].pmem_avail, 1493 MEM, &pmem_base, &pmem_limit); 1494 1495 pmem_base = P2ALIGN(pmem_base, PPB_MEM_ALIGNMENT); 1496 pmem_limit = P2ROUNDUP(pmem_limit, 1497 PPB_MEM_ALIGNMENT) - 1; 1498 pmem_size = pmem_limit + 1 - pmem_base; 1499 ASSERT(pmem_base <= pmem_limit); 1500 memlist_free_all(&pci_bus_res[secbus].pmem_avail); 1501 memlist_insert(&pci_bus_res[secbus].pmem_avail, 1502 pmem_base, pmem_size); 1503 memlist_insert(&pci_bus_res[parbus].pmem_used, 1504 pmem_base, pmem_size); 1505 (void) memlist_remove(&pci_bus_res[parbus].pmem_avail, 1506 pmem_base, pmem_size); 1507 pci_bus_res[secbus].mem_reprogram = B_TRUE; 1508 } else { 1509 /* get new mem resource from parent bus */ 1510 addr = get_parbus_res(parbus, secbus, mem_size, 1511 mem_align, PB_PMEM); 1512 if (addr) { 1513 pmem_base = addr; 1514 pmem_limit = addr + mem_size - 1; 1515 pci_bus_res[secbus].mem_reprogram = B_TRUE; 1516 } 1517 } 1518 } 1519 1520 memlist_free_all(&scratch_list); 1521 1522 if (pci_bus_res[secbus].mem_reprogram) { 1523 /* reprogram PPB MEM regs */ 1524 1525 pci_putw(bus, dev, func, PCI_BCNF_MEM_BASE, 1526 (uint16_t)((mem_base >> PCI_BCNF_MEM_SHIFT) & 1527 PCI_BCNF_MEM_MASK)); 1528 pci_putw(bus, dev, func, PCI_BCNF_MEM_LIMIT, 1529 (uint16_t)((mem_limit >> PCI_BCNF_MEM_SHIFT) & 1530 PCI_BCNF_MEM_MASK)); 1531 1532 cmn_err(CE_NOTE, "!reprogram mem-range on" 1533 " ppb[%x/%x/%x]: 0x%x ~ 0x%"PRIx64"", 1534 bus, dev, func, mem_base, mem_limit); 1535 1536 if (!(cmd_reg & PCI_COMM_MAE)) { 1537 /* 1538 * If the MEM access bit is initially disabled by BIOS, 1539 * we disable the PMEM window manually by setting PMEM 1540 * base > PMEM limit here, in case there are incorrect 1541 * values in them from BIOS, so that we won't get in 1542 * trouble once the MEM access bit is enabled at the 1543 * end of this function. 1544 */ 1545 pci_putw(bus, dev, func, PCI_BCNF_PF_BASE_LOW, 0xfff0); 1546 pci_putw(bus, dev, func, PCI_BCNF_PF_LIMIT_LOW, 0x0); 1547 pci_putl(bus, dev, func, PCI_BCNF_PF_BASE_HIGH, 1548 0xffffffff); 1549 pci_putl(bus, dev, func, PCI_BCNF_PF_LIMIT_HIGH, 0x0); 1550 1551 } else { 1552 1553 pci_putw(bus, dev, func, PCI_BCNF_PF_BASE_LOW, 1554 ((pmem_base & 0xffffffff) >> PCI_BCNF_MEM_SHIFT) & 1555 PCI_BCNF_MEM_MASK); 1556 pci_putl(bus, dev, func, PCI_BCNF_PF_BASE_HIGH, 1557 pmem_base >> 32); 1558 1559 pci_putw(bus, dev, func, PCI_BCNF_PF_LIMIT_LOW, 1560 ((pmem_limit & 0xffffffff) >> PCI_BCNF_MEM_SHIFT) & 1561 PCI_BCNF_MEM_MASK); 1562 pci_putl(bus, dev, func, PCI_BCNF_PF_LIMIT_HIGH, 1563 pmem_limit >> 32); 1564 1565 cmn_err(CE_NOTE, "!reprogram pmem-range on" 1566 " ppb[%x/%x/%x]: 0x%"PRIx64" ~ 0x%"PRIx64"", 1567 bus, dev, func, pmem_base, pmem_limit); 1568 } 1569 1570 add_ranges_prop(secbus, 1); 1571 } 1572 1573 cmd_enable: 1574 dump_memlists("fix_ppb_res end bus", bus); 1575 dump_memlists("fix_ppb_res end secbus", secbus); 1576 1577 if (pci_bus_res[secbus].io_avail) 1578 cmd_reg |= PCI_COMM_IO | PCI_COMM_ME; 1579 if (pci_bus_res[secbus].mem_avail) 1580 cmd_reg |= PCI_COMM_MAE | PCI_COMM_ME; 1581 pci_putw(bus, dev, func, PCI_CONF_COMM, cmd_reg); 1582 } 1583 1584 void 1585 pci_reprogram(void) 1586 { 1587 int i, pci_reconfig = 1; 1588 char *onoff; 1589 int bus; 1590 1591 /* 1592 * Scan ACPI namespace for _BBN objects, make sure that 1593 * childless root-bridges appear in devinfo tree 1594 */ 1595 pci_scan_bbn(); 1596 pci_unitaddr_cache_init(); 1597 1598 /* 1599 * Fix-up unit-address assignments if cache is available 1600 */ 1601 if (pci_unitaddr_cache_valid()) { 1602 int pci_regs[] = {0, 0, 0}; 1603 int new_addr; 1604 int index = 0; 1605 1606 for (bus = 0; bus <= pci_bios_maxbus; bus++) { 1607 /* skip non-root (peer) PCI busses */ 1608 if ((pci_bus_res[bus].par_bus != (uchar_t)-1) || 1609 (pci_bus_res[bus].dip == NULL)) 1610 continue; 1611 1612 new_addr = pci_bus_unitaddr(index); 1613 if (pci_bus_res[bus].root_addr != new_addr) { 1614 /* update reg property for node */ 1615 pci_regs[0] = pci_bus_res[bus].root_addr = 1616 new_addr; 1617 (void) ndi_prop_update_int_array( 1618 DDI_DEV_T_NONE, pci_bus_res[bus].dip, 1619 "reg", (int *)pci_regs, 3); 1620 } 1621 index++; 1622 } 1623 } else { 1624 /* perform legacy processing */ 1625 pci_renumber_root_busses(); 1626 pci_unitaddr_cache_create(); 1627 } 1628 1629 /* 1630 * Do root-bus resource discovery 1631 */ 1632 for (bus = 0; bus <= pci_bios_maxbus; bus++) { 1633 /* skip non-root (peer) PCI busses */ 1634 if (pci_bus_res[bus].par_bus != (uchar_t)-1) 1635 continue; 1636 1637 /* 1638 * 1. find resources associated with this root bus 1639 */ 1640 populate_bus_res(bus); 1641 1642 1643 /* 1644 * 2. Remove used PCI and ISA resources from bus resource map 1645 */ 1646 1647 memlist_remove_list(&pci_bus_res[bus].io_avail, 1648 pci_bus_res[bus].io_used); 1649 memlist_remove_list(&pci_bus_res[bus].mem_avail, 1650 pci_bus_res[bus].mem_used); 1651 memlist_remove_list(&pci_bus_res[bus].pmem_avail, 1652 pci_bus_res[bus].pmem_used); 1653 memlist_remove_list(&pci_bus_res[bus].mem_avail, 1654 pci_bus_res[bus].pmem_used); 1655 memlist_remove_list(&pci_bus_res[bus].pmem_avail, 1656 pci_bus_res[bus].mem_used); 1657 1658 memlist_remove_list(&pci_bus_res[bus].io_avail, 1659 isa_res.io_used); 1660 memlist_remove_list(&pci_bus_res[bus].mem_avail, 1661 isa_res.mem_used); 1662 1663 /* 1664 * 3. Exclude <1M address range here in case below reserved 1665 * ranges for BIOS data area, ROM area etc are wrongly reported 1666 * in ACPI resource producer entries for PCI root bus. 1667 * 00000000 - 000003FF RAM 1668 * 00000400 - 000004FF BIOS data area 1669 * 00000500 - 0009FFFF RAM 1670 * 000A0000 - 000BFFFF VGA RAM 1671 * 000C0000 - 000FFFFF ROM area 1672 */ 1673 (void) memlist_remove(&pci_bus_res[bus].mem_avail, 0, 0x100000); 1674 (void) memlist_remove(&pci_bus_res[bus].pmem_avail, 1675 0, 0x100000); 1676 } 1677 1678 memlist_free_all(&isa_res.io_used); 1679 memlist_free_all(&isa_res.mem_used); 1680 1681 /* add bus-range property for root/peer bus nodes */ 1682 for (i = 0; i <= pci_bios_maxbus; i++) { 1683 /* create bus-range property on root/peer buses */ 1684 if (pci_bus_res[i].par_bus == (uchar_t)-1) 1685 add_bus_range_prop(i); 1686 1687 /* setup bus range resource on each bus */ 1688 setup_bus_res(i); 1689 } 1690 1691 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(), 1692 DDI_PROP_DONTPASS, "pci-reprog", &onoff) == DDI_SUCCESS) { 1693 if (strcmp(onoff, "off") == 0) { 1694 pci_reconfig = 0; 1695 cmn_err(CE_NOTE, "pci device reprogramming disabled"); 1696 } 1697 ddi_prop_free(onoff); 1698 } 1699 1700 remove_subtractive_res(); 1701 1702 /* reprogram the non-subtractive PPB */ 1703 if (pci_reconfig) 1704 for (i = 0; i <= pci_bios_maxbus; i++) 1705 fix_ppb_res(i, B_FALSE); 1706 1707 for (i = 0; i <= pci_bios_maxbus; i++) { 1708 /* configure devices not configured by BIOS */ 1709 if (pci_reconfig) { 1710 /* 1711 * Reprogram the subtractive PPB. At this time, all its 1712 * siblings should have got their resources already. 1713 */ 1714 if (pci_bus_res[i].subtractive) 1715 fix_ppb_res(i, B_TRUE); 1716 enumerate_bus_devs(i, CONFIG_NEW); 1717 } 1718 } 1719 1720 /* All dev programmed, so we can create available prop */ 1721 for (i = 0; i <= pci_bios_maxbus; i++) 1722 add_bus_available_prop(i); 1723 } 1724 1725 /* 1726 * populate bus resources 1727 */ 1728 static void 1729 populate_bus_res(uchar_t bus) 1730 { 1731 1732 /* scan BIOS structures */ 1733 pci_bus_res[bus].pmem_avail = find_bus_res(bus, PREFETCH_TYPE); 1734 pci_bus_res[bus].mem_avail = find_bus_res(bus, MEM_TYPE); 1735 pci_bus_res[bus].io_avail = find_bus_res(bus, IO_TYPE); 1736 pci_bus_res[bus].bus_avail = find_bus_res(bus, BUSRANGE_TYPE); 1737 1738 /* 1739 * attempt to initialize sub_bus from the largest range-end 1740 * in the bus_avail list 1741 */ 1742 if (pci_bus_res[bus].bus_avail != NULL) { 1743 struct memlist *entry; 1744 int current; 1745 1746 entry = pci_bus_res[bus].bus_avail; 1747 while (entry != NULL) { 1748 current = entry->ml_address + entry->ml_size - 1; 1749 if (current > pci_bus_res[bus].sub_bus) 1750 pci_bus_res[bus].sub_bus = current; 1751 entry = entry->ml_next; 1752 } 1753 } 1754 1755 if (bus == 0) { 1756 /* 1757 * Special treatment of bus 0: 1758 * If no IO/MEM resource from ACPI/MPSPEC/HRT, copy 1759 * pcimem from boot and make I/O space the entire range 1760 * starting at 0x100. 1761 */ 1762 if (pci_bus_res[0].mem_avail == NULL) 1763 pci_bus_res[0].mem_avail = 1764 memlist_dup(bootops->boot_mem->pcimem); 1765 /* Exclude 0x00 to 0xff of the I/O space, used by all PCs */ 1766 if (pci_bus_res[0].io_avail == NULL) 1767 memlist_insert(&pci_bus_res[0].io_avail, 0x100, 0xffff); 1768 } 1769 1770 /* 1771 * Create 'ranges' property here before any resources are 1772 * removed from the resource lists 1773 */ 1774 add_ranges_prop(bus, 0); 1775 } 1776 1777 /* 1778 * Create top-level bus dips, i.e. /pci@0,0, /pci@1,0... 1779 */ 1780 static void 1781 create_root_bus_dip(uchar_t bus) 1782 { 1783 int pci_regs[] = {0, 0, 0}; 1784 dev_info_t *dip; 1785 1786 ASSERT(pci_bus_res[bus].par_bus == (uchar_t)-1); 1787 1788 num_root_bus++; 1789 ndi_devi_alloc_sleep(ddi_root_node(), "pci", 1790 (pnode_t)DEVI_SID_NODEID, &dip); 1791 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 1792 "#address-cells", 3); 1793 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 1794 "#size-cells", 2); 1795 pci_regs[0] = pci_bus_res[bus].root_addr; 1796 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, 1797 "reg", (int *)pci_regs, 3); 1798 1799 /* 1800 * If system has PCIe bus, then create different properties 1801 */ 1802 if (create_pcie_root_bus(bus, dip) == B_FALSE) 1803 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip, 1804 "device_type", "pci"); 1805 1806 (void) ndi_devi_bind_driver(dip, 0); 1807 pci_bus_res[bus].dip = dip; 1808 } 1809 1810 /* 1811 * For any fixed configuration (often compatability) pci devices 1812 * and those with their own expansion rom, create device nodes 1813 * to hold the already configured device details. 1814 */ 1815 void 1816 enumerate_bus_devs(uchar_t bus, int config_op) 1817 { 1818 uchar_t dev, func, nfunc, header; 1819 ushort_t venid; 1820 struct pci_devfunc *devlist = NULL, *entry; 1821 1822 if (pci_debug_bus_start == -1 || bus_debug(bus)) { 1823 if (config_op == CONFIG_NEW) { 1824 dcmn_err(CE_NOTE, "configuring pci bus 0x%x", bus); 1825 } else if (config_op == CONFIG_FIX) { 1826 dcmn_err(CE_NOTE, 1827 "fixing devices on pci bus 0x%x", bus); 1828 } else { 1829 dcmn_err(CE_NOTE, "enumerating pci bus 0x%x", bus); 1830 } 1831 } 1832 1833 if (config_op == CONFIG_NEW) { 1834 devlist = (struct pci_devfunc *)pci_bus_res[bus].privdata; 1835 while (devlist) { 1836 entry = devlist; 1837 devlist = entry->next; 1838 if (entry->reprogram || 1839 pci_bus_res[bus].io_reprogram || 1840 pci_bus_res[bus].mem_reprogram) { 1841 /* reprogram device(s) */ 1842 (void) add_reg_props(entry->dip, bus, 1843 entry->dev, entry->func, CONFIG_NEW, 0); 1844 } 1845 kmem_free(entry, sizeof (*entry)); 1846 } 1847 pci_bus_res[bus].privdata = NULL; 1848 return; 1849 } 1850 1851 for (dev = 0; dev < max_dev_pci; dev++) { 1852 nfunc = 1; 1853 for (func = 0; func < nfunc; func++) { 1854 1855 venid = pci_getw(bus, dev, func, PCI_CONF_VENID); 1856 1857 if ((venid == 0xffff) || (venid == 0)) { 1858 /* no function at this address */ 1859 continue; 1860 } 1861 1862 header = pci_getb(bus, dev, func, PCI_CONF_HEADER); 1863 if (header == 0xff) { 1864 continue; /* illegal value */ 1865 } 1866 1867 /* 1868 * according to some mail from Microsoft posted 1869 * to the pci-drivers alias, their only requirement 1870 * for a multifunction device is for the 1st 1871 * function to have to PCI_HEADER_MULTI bit set. 1872 */ 1873 if ((func == 0) && (header & PCI_HEADER_MULTI)) { 1874 nfunc = 8; 1875 } 1876 1877 if (config_op == CONFIG_FIX || 1878 config_op == CONFIG_INFO) { 1879 /* 1880 * Create the node, unconditionally, on the 1881 * first pass only. It may still need 1882 * resource assignment, which will be 1883 * done on the second, CONFIG_NEW, pass. 1884 */ 1885 process_devfunc(bus, dev, func, header, 1886 venid, config_op); 1887 1888 } 1889 } 1890 } 1891 1892 /* percolate bus used resources up through parents to root */ 1893 if (config_op == CONFIG_INFO) { 1894 int par_bus; 1895 1896 par_bus = pci_bus_res[bus].par_bus; 1897 while (par_bus != (uchar_t)-1) { 1898 pci_bus_res[par_bus].io_size += 1899 pci_bus_res[bus].io_size; 1900 pci_bus_res[par_bus].mem_size += 1901 pci_bus_res[bus].mem_size; 1902 1903 if (pci_bus_res[bus].io_used) 1904 memlist_merge(&pci_bus_res[bus].io_used, 1905 &pci_bus_res[par_bus].io_used); 1906 1907 if (pci_bus_res[bus].mem_used) 1908 memlist_merge(&pci_bus_res[bus].mem_used, 1909 &pci_bus_res[par_bus].mem_used); 1910 1911 if (pci_bus_res[bus].pmem_used) 1912 memlist_merge(&pci_bus_res[bus].pmem_used, 1913 &pci_bus_res[par_bus].pmem_used); 1914 1915 bus = par_bus; 1916 par_bus = pci_bus_res[par_bus].par_bus; 1917 } 1918 } 1919 } 1920 1921 static int 1922 check_pciide_prop(uchar_t revid, ushort_t venid, ushort_t devid, 1923 ushort_t subvenid, ushort_t subdevid) 1924 { 1925 static int prop_exist = -1; 1926 static char *pciide_str; 1927 char compat[32]; 1928 1929 if (prop_exist == -1) { 1930 prop_exist = (ddi_prop_lookup_string(DDI_DEV_T_ANY, 1931 ddi_root_node(), DDI_PROP_DONTPASS, "pci-ide", 1932 &pciide_str) == DDI_SUCCESS); 1933 } 1934 1935 if (!prop_exist) 1936 return (0); 1937 1938 /* compare property value against various forms of compatible */ 1939 if (subvenid) { 1940 (void) snprintf(compat, sizeof (compat), "pci%x,%x.%x.%x.%x", 1941 venid, devid, subvenid, subdevid, revid); 1942 if (strcmp(pciide_str, compat) == 0) 1943 return (1); 1944 1945 (void) snprintf(compat, sizeof (compat), "pci%x,%x.%x.%x", 1946 venid, devid, subvenid, subdevid); 1947 if (strcmp(pciide_str, compat) == 0) 1948 return (1); 1949 1950 (void) snprintf(compat, sizeof (compat), "pci%x,%x", 1951 subvenid, subdevid); 1952 if (strcmp(pciide_str, compat) == 0) 1953 return (1); 1954 } 1955 (void) snprintf(compat, sizeof (compat), "pci%x,%x.%x", 1956 venid, devid, revid); 1957 if (strcmp(pciide_str, compat) == 0) 1958 return (1); 1959 1960 (void) snprintf(compat, sizeof (compat), "pci%x,%x", venid, devid); 1961 if (strcmp(pciide_str, compat) == 0) 1962 return (1); 1963 1964 return (0); 1965 } 1966 1967 static int 1968 is_pciide(uchar_t basecl, uchar_t subcl, uchar_t revid, 1969 ushort_t venid, ushort_t devid, ushort_t subvenid, ushort_t subdevid) 1970 { 1971 struct ide_table { /* table for PCI_MASS_OTHER */ 1972 ushort_t venid; 1973 ushort_t devid; 1974 } *entry; 1975 1976 /* XXX SATA and other devices: need a way to add dynamically */ 1977 static struct ide_table ide_other[] = { 1978 {0x1095, 0x3112}, 1979 {0x1095, 0x3114}, 1980 {0x1095, 0x3512}, 1981 {0x1095, 0x680}, /* Sil0680 */ 1982 {0x1283, 0x8211}, /* ITE 8211F is subcl PCI_MASS_OTHER */ 1983 {0, 0} 1984 }; 1985 1986 if (basecl != PCI_CLASS_MASS) 1987 return (0); 1988 1989 if (subcl == PCI_MASS_IDE) { 1990 return (1); 1991 } 1992 1993 if (check_pciide_prop(revid, venid, devid, subvenid, subdevid)) 1994 return (1); 1995 1996 if (subcl != PCI_MASS_OTHER && subcl != PCI_MASS_SATA) { 1997 return (0); 1998 } 1999 2000 entry = &ide_other[0]; 2001 while (entry->venid) { 2002 if (entry->venid == venid && entry->devid == devid) 2003 return (1); 2004 entry++; 2005 } 2006 return (0); 2007 } 2008 2009 static int 2010 is_display(uint_t classcode) 2011 { 2012 static uint_t disp_classes[] = { 2013 0x000100, 2014 0x030000, 2015 0x030001 2016 }; 2017 int i, nclasses = sizeof (disp_classes) / sizeof (uint_t); 2018 2019 for (i = 0; i < nclasses; i++) { 2020 if (classcode == disp_classes[i]) 2021 return (1); 2022 } 2023 return (0); 2024 } 2025 2026 static void 2027 add_undofix_entry(uint8_t bus, uint8_t dev, uint8_t fn, 2028 void (*undofn)(uint8_t, uint8_t, uint8_t)) 2029 { 2030 struct pci_fixundo *newundo; 2031 2032 newundo = kmem_alloc(sizeof (struct pci_fixundo), KM_SLEEP); 2033 2034 /* 2035 * Adding an item to this list means that we must turn its NMIENABLE 2036 * bit back on at a later time. 2037 */ 2038 newundo->bus = bus; 2039 newundo->dev = dev; 2040 newundo->fn = fn; 2041 newundo->undofn = undofn; 2042 newundo->next = undolist; 2043 2044 /* add to the undo list in LIFO order */ 2045 undolist = newundo; 2046 } 2047 2048 void 2049 add_pci_fixes(void) 2050 { 2051 int i; 2052 2053 for (i = 0; i <= pci_bios_maxbus; i++) { 2054 /* 2055 * For each bus, apply needed fixes to the appropriate devices. 2056 * This must be done before the main enumeration loop because 2057 * some fixes must be applied to devices normally encountered 2058 * later in the pci scan (e.g. if a fix to device 7 must be 2059 * applied before scanning device 6, applying fixes in the 2060 * normal enumeration loop would obviously be too late). 2061 */ 2062 enumerate_bus_devs(i, CONFIG_FIX); 2063 } 2064 } 2065 2066 void 2067 undo_pci_fixes(void) 2068 { 2069 struct pci_fixundo *nextundo; 2070 uint8_t bus, dev, fn; 2071 2072 /* 2073 * All fixes in the undo list are performed unconditionally. Future 2074 * fixes may require selective undo. 2075 */ 2076 while (undolist != NULL) { 2077 2078 bus = undolist->bus; 2079 dev = undolist->dev; 2080 fn = undolist->fn; 2081 2082 (*(undolist->undofn))(bus, dev, fn); 2083 2084 nextundo = undolist->next; 2085 kmem_free(undolist, sizeof (struct pci_fixundo)); 2086 undolist = nextundo; 2087 } 2088 } 2089 2090 static void 2091 undo_amd8111_pci_fix(uint8_t bus, uint8_t dev, uint8_t fn) 2092 { 2093 uint8_t val8; 2094 2095 val8 = pci_getb(bus, dev, fn, LPC_IO_CONTROL_REG_1); 2096 /* 2097 * The NMIONERR bit is turned back on to allow the SMM BIOS 2098 * to handle more critical PCI errors (e.g. PERR#). 2099 */ 2100 val8 |= AMD8111_ENABLENMI; 2101 pci_putb(bus, dev, fn, LPC_IO_CONTROL_REG_1, val8); 2102 } 2103 2104 static void 2105 pci_fix_amd8111(uint8_t bus, uint8_t dev, uint8_t fn) 2106 { 2107 uint8_t val8; 2108 2109 val8 = pci_getb(bus, dev, fn, LPC_IO_CONTROL_REG_1); 2110 2111 if ((val8 & AMD8111_ENABLENMI) == 0) 2112 return; 2113 2114 /* 2115 * We reset NMIONERR in the LPC because master-abort on the PCI 2116 * bridge side of the 8111 will cause NMI, which might cause SMI, 2117 * which sometimes prevents all devices from being enumerated. 2118 */ 2119 val8 &= ~AMD8111_ENABLENMI; 2120 2121 pci_putb(bus, dev, fn, LPC_IO_CONTROL_REG_1, val8); 2122 2123 add_undofix_entry(bus, dev, fn, undo_amd8111_pci_fix); 2124 } 2125 2126 static void 2127 set_devpm_d0(uchar_t bus, uchar_t dev, uchar_t func) 2128 { 2129 uint16_t status; 2130 uint8_t header; 2131 uint8_t cap_ptr; 2132 uint8_t cap_id; 2133 uint16_t pmcsr; 2134 2135 status = pci_getw(bus, dev, func, PCI_CONF_STAT); 2136 if (!(status & PCI_STAT_CAP)) 2137 return; /* No capabilities list */ 2138 2139 header = pci_getb(bus, dev, func, PCI_CONF_HEADER) & PCI_HEADER_TYPE_M; 2140 if (header == PCI_HEADER_CARDBUS) 2141 cap_ptr = pci_getb(bus, dev, func, PCI_CBUS_CAP_PTR); 2142 else 2143 cap_ptr = pci_getb(bus, dev, func, PCI_CONF_CAP_PTR); 2144 /* 2145 * Walk the capabilities list searching for a PM entry. 2146 */ 2147 while (cap_ptr != PCI_CAP_NEXT_PTR_NULL && cap_ptr >= PCI_CAP_PTR_OFF) { 2148 cap_ptr &= PCI_CAP_PTR_MASK; 2149 cap_id = pci_getb(bus, dev, func, cap_ptr + PCI_CAP_ID); 2150 if (cap_id == PCI_CAP_ID_PM) { 2151 pmcsr = pci_getw(bus, dev, func, cap_ptr + PCI_PMCSR); 2152 pmcsr &= ~(PCI_PMCSR_STATE_MASK); 2153 pmcsr |= PCI_PMCSR_D0; /* D0 state */ 2154 pci_putw(bus, dev, func, cap_ptr + PCI_PMCSR, pmcsr); 2155 break; 2156 } 2157 cap_ptr = pci_getb(bus, dev, func, cap_ptr + PCI_CAP_NEXT_PTR); 2158 } 2159 2160 } 2161 2162 #define is_isa(bc, sc) \ 2163 (((bc) == PCI_CLASS_BRIDGE) && ((sc) == PCI_BRIDGE_ISA)) 2164 2165 static void 2166 process_devfunc(uchar_t bus, uchar_t dev, uchar_t func, uchar_t header, 2167 ushort_t vendorid, int config_op) 2168 { 2169 char nodename[32], unitaddr[5]; 2170 dev_info_t *dip; 2171 uchar_t basecl, subcl, progcl, intr, revid; 2172 ushort_t subvenid, subdevid, status; 2173 ushort_t slot_num; 2174 uint_t classcode, revclass; 2175 int reprogram = 0, pciide = 0; 2176 int power[2] = {1, 1}; 2177 int pciex = 0; 2178 ushort_t is_pci_bridge = 0; 2179 struct pci_devfunc *devlist = NULL, *entry = NULL; 2180 boolean_t slot_valid; 2181 gfx_entry_t *gfxp; 2182 pcie_req_id_t bdf; 2183 2184 ushort_t deviceid = pci_getw(bus, dev, func, PCI_CONF_DEVID); 2185 2186 switch (header & PCI_HEADER_TYPE_M) { 2187 case PCI_HEADER_ZERO: 2188 subvenid = pci_getw(bus, dev, func, PCI_CONF_SUBVENID); 2189 subdevid = pci_getw(bus, dev, func, PCI_CONF_SUBSYSID); 2190 break; 2191 case PCI_HEADER_CARDBUS: 2192 subvenid = pci_getw(bus, dev, func, PCI_CBUS_SUBVENID); 2193 subdevid = pci_getw(bus, dev, func, PCI_CBUS_SUBSYSID); 2194 /* Record the # of cardbus bridges found on the bus */ 2195 if (config_op == CONFIG_INFO) 2196 pci_bus_res[bus].num_cbb++; 2197 break; 2198 default: 2199 subvenid = 0; 2200 subdevid = 0; 2201 break; 2202 } 2203 2204 if (config_op == CONFIG_FIX) { 2205 if (vendorid == VENID_AMD && deviceid == DEVID_AMD8111_LPC) { 2206 pci_fix_amd8111(bus, dev, func); 2207 } 2208 return; 2209 } 2210 2211 /* XXX should be use generic names? derive from class? */ 2212 revclass = pci_getl(bus, dev, func, PCI_CONF_REVID); 2213 classcode = revclass >> 8; 2214 revid = revclass & 0xff; 2215 2216 /* figure out if this is pci-ide */ 2217 basecl = classcode >> 16; 2218 subcl = (classcode >> 8) & 0xff; 2219 progcl = classcode & 0xff; 2220 2221 2222 if (is_display(classcode)) 2223 (void) snprintf(nodename, sizeof (nodename), "display"); 2224 else if (!pseudo_isa && is_isa(basecl, subcl)) 2225 (void) snprintf(nodename, sizeof (nodename), "isa"); 2226 else if (subvenid != 0) 2227 (void) snprintf(nodename, sizeof (nodename), 2228 "pci%x,%x", subvenid, subdevid); 2229 else 2230 (void) snprintf(nodename, sizeof (nodename), 2231 "pci%x,%x", vendorid, deviceid); 2232 2233 /* make sure parent bus dip has been created */ 2234 if (pci_bus_res[bus].dip == NULL) 2235 create_root_bus_dip(bus); 2236 2237 ndi_devi_alloc_sleep(pci_bus_res[bus].dip, nodename, 2238 DEVI_SID_NODEID, &dip); 2239 2240 if (check_if_device_is_pciex(dip, bus, dev, func, &slot_valid, 2241 &slot_num, &is_pci_bridge) == B_TRUE) 2242 pciex = 1; 2243 2244 bdf = PCI_GETBDF(bus, dev, func); 2245 /* 2246 * Record BAD AMD bridges which don't support MMIO config access. 2247 */ 2248 if (IS_BAD_AMD_NTBRIDGE(vendorid, deviceid) || 2249 IS_AMD_8132_CHIP(vendorid, deviceid)) { 2250 uchar_t secbus = 0; 2251 uchar_t subbus = 0; 2252 2253 if ((basecl == PCI_CLASS_BRIDGE) && 2254 (subcl == PCI_BRIDGE_PCI)) { 2255 secbus = pci_getb(bus, dev, func, PCI_BCNF_SECBUS); 2256 subbus = pci_getb(bus, dev, func, PCI_BCNF_SUBBUS); 2257 } 2258 pci_cfgacc_add_workaround(bdf, secbus, subbus); 2259 } 2260 2261 /* 2262 * Only populate bus_t if this device is sitting under a PCIE root 2263 * complex. Some particular machines have both a PCIE root complex and 2264 * a PCI hostbridge, in which case only devices under the PCIE root 2265 * complex will have their bus_t populated. 2266 */ 2267 if (pcie_get_rc_dip(dip) != NULL) { 2268 ck804_fix_aer_ptr(dip, bdf); 2269 (void) pcie_init_bus(dip, bdf, PCIE_BUS_INITIAL); 2270 } 2271 2272 /* add properties */ 2273 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "device-id", deviceid); 2274 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "vendor-id", vendorid); 2275 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "revision-id", revid); 2276 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 2277 "class-code", classcode); 2278 if (func == 0) 2279 (void) snprintf(unitaddr, sizeof (unitaddr), "%x", dev); 2280 else 2281 (void) snprintf(unitaddr, sizeof (unitaddr), 2282 "%x,%x", dev, func); 2283 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip, 2284 "unit-address", unitaddr); 2285 2286 /* add device_type for display nodes */ 2287 if (is_display(classcode)) { 2288 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip, 2289 "device_type", "display"); 2290 } 2291 /* add special stuff for header type */ 2292 if ((header & PCI_HEADER_TYPE_M) == PCI_HEADER_ZERO) { 2293 uchar_t mingrant = pci_getb(bus, dev, func, PCI_CONF_MIN_G); 2294 uchar_t maxlatency = pci_getb(bus, dev, func, PCI_CONF_MAX_L); 2295 2296 if (subvenid != 0) { 2297 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 2298 "subsystem-id", subdevid); 2299 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 2300 "subsystem-vendor-id", subvenid); 2301 } 2302 if (!pciex) 2303 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 2304 "min-grant", mingrant); 2305 if (!pciex) 2306 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 2307 "max-latency", maxlatency); 2308 } 2309 2310 /* interrupt, record if not 0 */ 2311 intr = pci_getb(bus, dev, func, PCI_CONF_IPIN); 2312 if (intr != 0) 2313 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 2314 "interrupts", intr); 2315 2316 /* 2317 * Add support for 133 mhz pci eventually 2318 */ 2319 status = pci_getw(bus, dev, func, PCI_CONF_STAT); 2320 2321 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 2322 "devsel-speed", (status & PCI_STAT_DEVSELT) >> 9); 2323 if (!pciex && (status & PCI_STAT_FBBC)) 2324 (void) ndi_prop_create_boolean(DDI_DEV_T_NONE, dip, 2325 "fast-back-to-back"); 2326 if (!pciex && (status & PCI_STAT_66MHZ)) 2327 (void) ndi_prop_create_boolean(DDI_DEV_T_NONE, dip, 2328 "66mhz-capable"); 2329 if (status & PCI_STAT_UDF) 2330 (void) ndi_prop_create_boolean(DDI_DEV_T_NONE, dip, 2331 "udf-supported"); 2332 if (pciex && slot_valid) { 2333 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 2334 "physical-slot#", slot_num); 2335 if (!is_pci_bridge) 2336 pciex_slot_names_prop(dip, slot_num); 2337 } 2338 2339 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, 2340 "power-consumption", power, 2); 2341 2342 /* Set the device PM state to D0 */ 2343 set_devpm_d0(bus, dev, func); 2344 2345 if ((basecl == PCI_CLASS_BRIDGE) && (subcl == PCI_BRIDGE_PCI)) 2346 add_ppb_props(dip, bus, dev, func, pciex, is_pci_bridge); 2347 else { 2348 /* 2349 * Record the non-PPB devices on the bus for possible 2350 * reprogramming at 2nd bus enumeration. 2351 * Note: PPB reprogramming is done in fix_ppb_res() 2352 */ 2353 devlist = (struct pci_devfunc *)pci_bus_res[bus].privdata; 2354 entry = kmem_zalloc(sizeof (*entry), KM_SLEEP); 2355 entry->dip = dip; 2356 entry->dev = dev; 2357 entry->func = func; 2358 entry->next = devlist; 2359 pci_bus_res[bus].privdata = entry; 2360 } 2361 2362 if (IS_CLASS_IOAPIC(basecl, subcl, progcl)) { 2363 create_ioapic_node(bus, dev, func, vendorid, deviceid); 2364 } 2365 2366 /* check for NVIDIA CK8-04/MCP55 based LPC bridge */ 2367 if (NVIDIA_IS_LPC_BRIDGE(vendorid, deviceid) && (dev == 1) && 2368 (func == 0)) { 2369 add_nvidia_isa_bridge_props(dip, bus, dev, func); 2370 /* each LPC bridge has an integrated IOAPIC */ 2371 apic_nvidia_io_max++; 2372 } 2373 2374 if (pciex && is_pci_bridge) 2375 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip, "model", 2376 (char *)"PCIe-PCI bridge"); 2377 else 2378 add_model_prop(dip, classcode); 2379 2380 add_compatible(dip, subvenid, subdevid, vendorid, deviceid, 2381 revid, classcode, pciex); 2382 2383 /* 2384 * See if this device is a controller that advertises 2385 * itself to be a standard ATA task file controller, or one that 2386 * has been hard coded. 2387 * 2388 * If it is, check if any other higher precedence driver listed in 2389 * driver_aliases will claim the node by calling 2390 * ddi_compatibile_driver_major. If so, clear pciide and do not 2391 * create a pci-ide node or any other special handling. 2392 * 2393 * If another driver does not bind, set the node name to pci-ide 2394 * and then let the special pci-ide handling for registers and 2395 * child pci-ide nodes proceed below. 2396 */ 2397 if (is_pciide(basecl, subcl, revid, vendorid, deviceid, 2398 subvenid, subdevid) == 1) { 2399 if (ddi_compatible_driver_major(dip, NULL) == (major_t)-1) { 2400 (void) ndi_devi_set_nodename(dip, "pci-ide", 0); 2401 pciide = 1; 2402 } 2403 } 2404 2405 DEVI_SET_PCI(dip); 2406 reprogram = add_reg_props(dip, bus, dev, func, config_op, pciide); 2407 (void) ndi_devi_bind_driver(dip, 0); 2408 2409 /* special handling for pci-ide */ 2410 if (pciide) { 2411 dev_info_t *cdip; 2412 2413 /* 2414 * Create properties specified by P1275 Working Group 2415 * Proposal #414 Version 1 2416 */ 2417 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip, 2418 "device_type", "pci-ide"); 2419 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 2420 "#address-cells", 1); 2421 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 2422 "#size-cells", 0); 2423 2424 /* allocate two child nodes */ 2425 ndi_devi_alloc_sleep(dip, "ide", 2426 (pnode_t)DEVI_SID_NODEID, &cdip); 2427 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cdip, 2428 "reg", 0); 2429 (void) ndi_devi_bind_driver(cdip, 0); 2430 ndi_devi_alloc_sleep(dip, "ide", 2431 (pnode_t)DEVI_SID_NODEID, &cdip); 2432 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cdip, 2433 "reg", 1); 2434 (void) ndi_devi_bind_driver(cdip, 0); 2435 2436 reprogram = 0; /* don't reprogram pci-ide bridge */ 2437 } 2438 2439 if (is_display(classcode)) { 2440 gfxp = kmem_zalloc(sizeof (*gfxp), KM_SLEEP); 2441 gfxp->g_dip = dip; 2442 gfxp->g_prev = NULL; 2443 gfxp->g_next = gfx_devinfo_list; 2444 gfx_devinfo_list = gfxp; 2445 if (gfxp->g_next) 2446 gfxp->g_next->g_prev = gfxp; 2447 } 2448 2449 /* special handling for isa */ 2450 if (!pseudo_isa && is_isa(basecl, subcl)) { 2451 /* add device_type */ 2452 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip, 2453 "device_type", "isa"); 2454 } 2455 2456 if (reprogram && (entry != NULL)) 2457 entry->reprogram = B_TRUE; 2458 2459 } 2460 2461 /* 2462 * Some vendors do not use unique subsystem IDs in their products, which 2463 * makes the use of form 2 compatible names (pciSSSS,ssss) inappropriate. 2464 * Allow for these compatible forms to be excluded on a per-device basis. 2465 */ 2466 /*ARGSUSED*/ 2467 static boolean_t 2468 subsys_compat_exclude(ushort_t venid, ushort_t devid, ushort_t subvenid, 2469 ushort_t subdevid, uchar_t revid, uint_t classcode) 2470 { 2471 /* Nvidia display adapters */ 2472 if ((venid == 0x10de) && (is_display(classcode))) 2473 return (B_TRUE); 2474 2475 /* 2476 * 8086,166 is the Ivy Bridge built-in graphics controller on some 2477 * models. Unfortunately 8086,2044 is the Skylake Server processor 2478 * memory channel device. The Ivy Bridge device uses the Skylake 2479 * ID as its sub-device ID. The GPU is not a memory controller DIMM 2480 * channel. 2481 */ 2482 if (venid == 0x8086 && devid == 0x166 && subvenid == 0x8086 && 2483 subdevid == 0x2044) { 2484 return (B_TRUE); 2485 } 2486 2487 return (B_FALSE); 2488 } 2489 2490 /* 2491 * Set the compatible property to a value compliant with rev 2.1 of the IEEE1275 2492 * PCI binding. This is also used for PCI express devices and we have our own 2493 * minor additions. 2494 * 2495 * pciVVVV,DDDD.SSSS.ssss.RR (0) 2496 * pciVVVV,DDDD.SSSS.ssss (1) 2497 * pciSSSS,ssss,s (2+) 2498 * pciSSSS,ssss (2) 2499 * pciVVVV,DDDD.RR (3) 2500 * pciVVVV,DDDD,p (4+) 2501 * pciVVVV,DDDD (4) 2502 * pciclass,CCSSPP (5) 2503 * pciclass,CCSS (6) 2504 * 2505 * The Subsystem (SSSS) forms are not inserted if subsystem-vendor-id is 0 or if 2506 * it is a case where we know that the IDs overlap. 2507 * 2508 * NOTE: For PCI-Express devices "pci" is replaced with "pciex" in 0-6 above and 2509 * property 2 is not created as per "1275 bindings for PCI Express 2510 * Interconnect". 2511 * 2512 * Unlike on SPARC, we generate both the "pciex" and "pci" versions of the 2513 * above. The problem with property 2 is that it has an ambiguity with 2514 * property 4. To make sure that drivers can specify either form of 2 or 4 2515 * without ambiguity we add a suffix. The 'p' suffix represents the primary ID, 2516 * meaning that it is guaranteed to be form 4. The 's' suffix means that it is 2517 * sub-vendor and sub-device form, meaning it is guaranteed to be form 2. 2518 * 2519 * Set with setprop and \x00 between each to generate the encoded string array 2520 * form. 2521 */ 2522 void 2523 add_compatible(dev_info_t *dip, ushort_t subvenid, ushort_t subdevid, 2524 ushort_t vendorid, ushort_t deviceid, uchar_t revid, uint_t classcode, 2525 int pciex) 2526 { 2527 int i = 0; 2528 int size = COMPAT_BUFSIZE; 2529 char *compat[15]; 2530 char *buf, *curr; 2531 2532 curr = buf = kmem_alloc(size, KM_SLEEP); 2533 2534 if (pciex) { 2535 if (subvenid) { 2536 compat[i++] = curr; /* form 0 */ 2537 (void) snprintf(curr, size, "pciex%x,%x.%x.%x.%x", 2538 vendorid, deviceid, subvenid, subdevid, revid); 2539 size -= strlen(curr) + 1; 2540 curr += strlen(curr) + 1; 2541 2542 compat[i++] = curr; /* form 1 */ 2543 (void) snprintf(curr, size, "pciex%x,%x.%x.%x", 2544 vendorid, deviceid, subvenid, subdevid); 2545 size -= strlen(curr) + 1; 2546 curr += strlen(curr) + 1; 2547 2548 } 2549 compat[i++] = curr; /* form 3 */ 2550 (void) snprintf(curr, size, "pciex%x,%x.%x", 2551 vendorid, deviceid, revid); 2552 size -= strlen(curr) + 1; 2553 curr += strlen(curr) + 1; 2554 2555 compat[i++] = curr; /* form 4 */ 2556 (void) snprintf(curr, size, "pciex%x,%x", vendorid, deviceid); 2557 size -= strlen(curr) + 1; 2558 curr += strlen(curr) + 1; 2559 2560 compat[i++] = curr; /* form 5 */ 2561 (void) snprintf(curr, size, "pciexclass,%06x", classcode); 2562 size -= strlen(curr) + 1; 2563 curr += strlen(curr) + 1; 2564 2565 compat[i++] = curr; /* form 6 */ 2566 (void) snprintf(curr, size, "pciexclass,%04x", 2567 (classcode >> 8)); 2568 size -= strlen(curr) + 1; 2569 curr += strlen(curr) + 1; 2570 } 2571 2572 if (subvenid) { 2573 compat[i++] = curr; /* form 0 */ 2574 (void) snprintf(curr, size, "pci%x,%x.%x.%x.%x", 2575 vendorid, deviceid, subvenid, subdevid, revid); 2576 size -= strlen(curr) + 1; 2577 curr += strlen(curr) + 1; 2578 2579 compat[i++] = curr; /* form 1 */ 2580 (void) snprintf(curr, size, "pci%x,%x.%x.%x", 2581 vendorid, deviceid, subvenid, subdevid); 2582 size -= strlen(curr) + 1; 2583 curr += strlen(curr) + 1; 2584 2585 if (subsys_compat_exclude(vendorid, deviceid, subvenid, 2586 subdevid, revid, classcode) == B_FALSE) { 2587 compat[i++] = curr; /* form 2+ */ 2588 (void) snprintf(curr, size, "pci%x,%x,s", subvenid, 2589 subdevid); 2590 size -= strlen(curr) + 1; 2591 curr += strlen(curr) + 1; 2592 2593 compat[i++] = curr; /* form 2 */ 2594 (void) snprintf(curr, size, "pci%x,%x", subvenid, 2595 subdevid); 2596 size -= strlen(curr) + 1; 2597 curr += strlen(curr) + 1; 2598 } 2599 } 2600 compat[i++] = curr; /* form 3 */ 2601 (void) snprintf(curr, size, "pci%x,%x.%x", vendorid, deviceid, revid); 2602 size -= strlen(curr) + 1; 2603 curr += strlen(curr) + 1; 2604 2605 compat[i++] = curr; /* form 4+ */ 2606 (void) snprintf(curr, size, "pci%x,%x,p", vendorid, deviceid); 2607 size -= strlen(curr) + 1; 2608 curr += strlen(curr) + 1; 2609 2610 compat[i++] = curr; /* form 4 */ 2611 (void) snprintf(curr, size, "pci%x,%x", vendorid, deviceid); 2612 size -= strlen(curr) + 1; 2613 curr += strlen(curr) + 1; 2614 2615 compat[i++] = curr; /* form 5 */ 2616 (void) snprintf(curr, size, "pciclass,%06x", classcode); 2617 size -= strlen(curr) + 1; 2618 curr += strlen(curr) + 1; 2619 2620 compat[i++] = curr; /* form 6 */ 2621 (void) snprintf(curr, size, "pciclass,%04x", (classcode >> 8)); 2622 size -= strlen(curr) + 1; 2623 curr += strlen(curr) + 1; 2624 2625 (void) ndi_prop_update_string_array(DDI_DEV_T_NONE, dip, 2626 "compatible", compat, i); 2627 kmem_free(buf, COMPAT_BUFSIZE); 2628 } 2629 2630 /* 2631 * Adjust the reg properties for a dual channel PCI-IDE device. 2632 * 2633 * NOTE: don't do anything that changes the order of the hard-decodes 2634 * and programmed BARs. The kernel driver depends on these values 2635 * being in this order regardless of whether they're for a 'native' 2636 * mode BAR or not. 2637 */ 2638 /* 2639 * config info for pci-ide devices 2640 */ 2641 static struct { 2642 uchar_t native_mask; /* 0 == 'compatibility' mode, 1 == native */ 2643 uchar_t bar_offset; /* offset for alt status register */ 2644 ushort_t addr; /* compatibility mode base address */ 2645 ushort_t length; /* number of ports for this BAR */ 2646 } pciide_bar[] = { 2647 { 0x01, 0, 0x1f0, 8 }, /* primary lower BAR */ 2648 { 0x01, 2, 0x3f6, 1 }, /* primary upper BAR */ 2649 { 0x04, 0, 0x170, 8 }, /* secondary lower BAR */ 2650 { 0x04, 2, 0x376, 1 } /* secondary upper BAR */ 2651 }; 2652 2653 static int 2654 pciIdeAdjustBAR(uchar_t progcl, int index, uint_t *basep, uint_t *lenp) 2655 { 2656 int hard_decode = 0; 2657 2658 /* 2659 * Adjust the base and len for the BARs of the PCI-IDE 2660 * device's primary and secondary controllers. The first 2661 * two BARs are for the primary controller and the next 2662 * two BARs are for the secondary controller. The fifth 2663 * and sixth bars are never adjusted. 2664 */ 2665 if (index >= 0 && index <= 3) { 2666 *lenp = pciide_bar[index].length; 2667 2668 if (progcl & pciide_bar[index].native_mask) { 2669 *basep += pciide_bar[index].bar_offset; 2670 } else { 2671 *basep = pciide_bar[index].addr; 2672 hard_decode = 1; 2673 } 2674 } 2675 2676 /* 2677 * if either base or len is zero make certain both are zero 2678 */ 2679 if (*basep == 0 || *lenp == 0) { 2680 *basep = 0; 2681 *lenp = 0; 2682 hard_decode = 0; 2683 } 2684 2685 return (hard_decode); 2686 } 2687 2688 2689 /* 2690 * Add the "reg" and "assigned-addresses" property 2691 */ 2692 static int 2693 add_reg_props(dev_info_t *dip, uchar_t bus, uchar_t dev, uchar_t func, 2694 int config_op, int pciide) 2695 { 2696 uchar_t baseclass, subclass, progclass, header; 2697 ushort_t bar_sz; 2698 uint64_t value = 0, fbase; 2699 uint_t devloc; 2700 uint_t base, base_hi, type; 2701 ushort_t offset, end; 2702 int max_basereg, j, reprogram = 0; 2703 uint_t phys_hi; 2704 struct memlist **io_avail, **io_used; 2705 struct memlist **mem_avail, **mem_used; 2706 struct memlist **pmem_avail, **pmem_used; 2707 uchar_t res_bus; 2708 2709 pci_regspec_t regs[16] = {{0}}; 2710 pci_regspec_t assigned[15] = {{0}}; 2711 int nreg, nasgn; 2712 2713 io_avail = &pci_bus_res[bus].io_avail; 2714 io_used = &pci_bus_res[bus].io_used; 2715 mem_avail = &pci_bus_res[bus].mem_avail; 2716 mem_used = &pci_bus_res[bus].mem_used; 2717 pmem_avail = &pci_bus_res[bus].pmem_avail; 2718 pmem_used = &pci_bus_res[bus].pmem_used; 2719 2720 dump_memlists("add_reg_props start", bus); 2721 2722 devloc = (uint_t)bus << 16 | (uint_t)dev << 11 | (uint_t)func << 8; 2723 regs[0].pci_phys_hi = devloc; 2724 nreg = 1; /* rest of regs[0] is all zero */ 2725 nasgn = 0; 2726 2727 baseclass = pci_getb(bus, dev, func, PCI_CONF_BASCLASS); 2728 subclass = pci_getb(bus, dev, func, PCI_CONF_SUBCLASS); 2729 progclass = pci_getb(bus, dev, func, PCI_CONF_PROGCLASS); 2730 header = pci_getb(bus, dev, func, PCI_CONF_HEADER) & PCI_HEADER_TYPE_M; 2731 2732 switch (header) { 2733 case PCI_HEADER_ZERO: 2734 max_basereg = PCI_BASE_NUM; 2735 break; 2736 case PCI_HEADER_PPB: 2737 max_basereg = PCI_BCNF_BASE_NUM; 2738 break; 2739 case PCI_HEADER_CARDBUS: 2740 max_basereg = PCI_CBUS_BASE_NUM; 2741 reprogram = 1; 2742 break; 2743 default: 2744 max_basereg = 0; 2745 break; 2746 } 2747 2748 /* 2749 * Create the register property by saving the current 2750 * value of the base register. Write 0xffffffff to the 2751 * base register. Read the value back to determine the 2752 * required size of the address space. Restore the base 2753 * register contents. 2754 * 2755 * Do not disable I/O and memory access for bridges; this 2756 * has the side-effect of making the bridge transparent to 2757 * secondary-bus activity (see sections 4.1-4.3 of the 2758 * PCI-PCI Bridge Spec V1.2). For non-bridges, disable 2759 * I/O and memory access to avoid difficulty with USB 2760 * emulation (see OHCI spec1.0a appendix B 2761 * "Host Controller Mapping") 2762 */ 2763 end = PCI_CONF_BASE0 + max_basereg * sizeof (uint_t); 2764 for (j = 0, offset = PCI_CONF_BASE0; offset < end; 2765 j++, offset += bar_sz) { 2766 uint_t command = 0; 2767 2768 /* determine the size of the address space */ 2769 base = pci_getl(bus, dev, func, offset); 2770 if (baseclass != PCI_CLASS_BRIDGE) { 2771 command = (uint_t)pci_getw(bus, dev, func, 2772 PCI_CONF_COMM); 2773 pci_putw(bus, dev, func, PCI_CONF_COMM, 2774 command & ~(PCI_COMM_MAE | PCI_COMM_IO)); 2775 } 2776 pci_putl(bus, dev, func, offset, 0xffffffff); 2777 value = pci_getl(bus, dev, func, offset); 2778 pci_putl(bus, dev, func, offset, base); 2779 if (baseclass != PCI_CLASS_BRIDGE) 2780 pci_putw(bus, dev, func, PCI_CONF_COMM, command); 2781 2782 /* construct phys hi,med.lo, size hi, lo */ 2783 if ((pciide && j < 4) || (base & PCI_BASE_SPACE_IO)) { 2784 int hard_decode = 0; 2785 uint_t len; 2786 2787 /* i/o space */ 2788 bar_sz = PCI_BAR_SZ_32; 2789 value &= PCI_BASE_IO_ADDR_M; 2790 len = ((value ^ (value-1)) + 1) >> 1; 2791 2792 /* XXX Adjust first 4 IDE registers */ 2793 if (pciide) { 2794 if (subclass != PCI_MASS_IDE) 2795 progclass = (PCI_IDE_IF_NATIVE_PRI | 2796 PCI_IDE_IF_NATIVE_SEC); 2797 hard_decode = pciIdeAdjustBAR(progclass, j, 2798 &base, &len); 2799 } else if (value == 0) { 2800 /* skip base regs with size of 0 */ 2801 continue; 2802 } 2803 2804 regs[nreg].pci_phys_hi = PCI_ADDR_IO | devloc | 2805 (hard_decode ? PCI_RELOCAT_B : offset); 2806 regs[nreg].pci_phys_low = hard_decode ? 2807 base & PCI_BASE_IO_ADDR_M : 0; 2808 assigned[nasgn].pci_phys_hi = 2809 PCI_RELOCAT_B | regs[nreg].pci_phys_hi; 2810 regs[nreg].pci_size_low = 2811 assigned[nasgn].pci_size_low = len; 2812 type = base & (~PCI_BASE_IO_ADDR_M); 2813 base &= PCI_BASE_IO_ADDR_M; 2814 /* 2815 * A device under a subtractive PPB can allocate 2816 * resources from its parent bus if there is no resource 2817 * available on its own bus. 2818 */ 2819 if ((config_op == CONFIG_NEW) && (*io_avail == NULL)) { 2820 res_bus = bus; 2821 while (pci_bus_res[res_bus].subtractive) { 2822 res_bus = pci_bus_res[res_bus].par_bus; 2823 if (res_bus == (uchar_t)-1) 2824 break; /* root bus already */ 2825 if (pci_bus_res[res_bus].io_avail) { 2826 io_avail = &pci_bus_res 2827 [res_bus].io_avail; 2828 break; 2829 } 2830 } 2831 } 2832 2833 /* 2834 * first pass - gather what's there 2835 * update/second pass - adjust/allocate regions 2836 * config - allocate regions 2837 */ 2838 if (config_op == CONFIG_INFO) { /* first pass */ 2839 /* take out of the resource map of the bus */ 2840 if (base != 0) { 2841 (void) memlist_remove(io_avail, base, 2842 len); 2843 memlist_insert(io_used, base, len); 2844 } else { 2845 reprogram = 1; 2846 } 2847 pci_bus_res[bus].io_size += len; 2848 } else if ((*io_avail && base == 0) || 2849 pci_bus_res[bus].io_reprogram) { 2850 base = (uint_t)memlist_find(io_avail, len, len); 2851 if (base != 0) { 2852 memlist_insert(io_used, base, len); 2853 /* XXX need to worry about 64-bit? */ 2854 pci_putl(bus, dev, func, offset, 2855 base | type); 2856 base = pci_getl(bus, dev, func, offset); 2857 base &= PCI_BASE_IO_ADDR_M; 2858 } 2859 if (base == 0) { 2860 cmn_err(CE_WARN, "failed to program" 2861 " IO space [%d/%d/%d] BAR@0x%x" 2862 " length 0x%x", 2863 bus, dev, func, offset, len); 2864 } 2865 } 2866 assigned[nasgn].pci_phys_low = base; 2867 nreg++, nasgn++; 2868 2869 } else { 2870 uint64_t len; 2871 /* memory space */ 2872 if ((base & PCI_BASE_TYPE_M) == PCI_BASE_TYPE_ALL) { 2873 bar_sz = PCI_BAR_SZ_64; 2874 base_hi = pci_getl(bus, dev, func, offset + 4); 2875 pci_putl(bus, dev, func, offset + 4, 2876 0xffffffff); 2877 value |= (uint64_t)pci_getl(bus, dev, func, 2878 offset + 4) << 32; 2879 pci_putl(bus, dev, func, offset + 4, base_hi); 2880 phys_hi = PCI_ADDR_MEM64; 2881 value &= PCI_BASE_M_ADDR64_M; 2882 } else { 2883 bar_sz = PCI_BAR_SZ_32; 2884 base_hi = 0; 2885 phys_hi = PCI_ADDR_MEM32; 2886 value &= PCI_BASE_M_ADDR_M; 2887 } 2888 2889 /* skip base regs with size of 0 */ 2890 if (value == 0) 2891 continue; 2892 2893 len = ((value ^ (value-1)) + 1) >> 1; 2894 regs[nreg].pci_size_low = 2895 assigned[nasgn].pci_size_low = len & 0xffffffff; 2896 regs[nreg].pci_size_hi = 2897 assigned[nasgn].pci_size_hi = len >> 32; 2898 2899 phys_hi |= (devloc | offset); 2900 if (base & PCI_BASE_PREF_M) 2901 phys_hi |= PCI_PREFETCH_B; 2902 2903 /* 2904 * A device under a subtractive PPB can allocate 2905 * resources from its parent bus if there is no resource 2906 * available on its own bus. 2907 */ 2908 if ((config_op == CONFIG_NEW) && (*mem_avail == NULL)) { 2909 res_bus = bus; 2910 while (pci_bus_res[res_bus].subtractive) { 2911 res_bus = pci_bus_res[res_bus].par_bus; 2912 if (res_bus == (uchar_t)-1) 2913 break; /* root bus already */ 2914 mem_avail = 2915 &pci_bus_res[res_bus].mem_avail; 2916 pmem_avail = 2917 &pci_bus_res [res_bus].pmem_avail; 2918 /* 2919 * Break out as long as at least 2920 * mem_avail is available 2921 */ 2922 if ((*pmem_avail && 2923 (phys_hi & PCI_PREFETCH_B)) || 2924 *mem_avail) { 2925 break; 2926 } 2927 } 2928 } 2929 2930 regs[nreg].pci_phys_hi = 2931 assigned[nasgn].pci_phys_hi = phys_hi; 2932 assigned[nasgn].pci_phys_hi |= PCI_RELOCAT_B; 2933 type = base & ~PCI_BASE_M_ADDR_M; 2934 base &= PCI_BASE_M_ADDR_M; 2935 2936 fbase = (((uint64_t)base_hi) << 32) | base; 2937 2938 if (config_op == CONFIG_INFO) { 2939 /* take out of the resource map of the bus */ 2940 if (fbase != 0) { 2941 /* remove from PMEM and MEM space */ 2942 (void) memlist_remove(mem_avail, 2943 fbase, len); 2944 (void) memlist_remove(pmem_avail, 2945 fbase, len); 2946 /* only note as used in correct map */ 2947 if (phys_hi & PCI_PREFETCH_B) 2948 memlist_insert(pmem_used, 2949 fbase, len); 2950 else 2951 memlist_insert(mem_used, 2952 fbase, len); 2953 } else { 2954 reprogram = 1; 2955 } 2956 pci_bus_res[bus].mem_size += len; 2957 } else if (pci_bus_res[bus].mem_reprogram || 2958 (fbase == 0 && 2959 (*mem_avail != NULL || *pmem_avail != NULL))) { 2960 2961 fbase = 0; 2962 2963 /* 2964 * When desired, attempt a prefetchable 2965 * allocation first 2966 */ 2967 if ((phys_hi & PCI_PREFETCH_B) && 2968 *pmem_avail != NULL) { 2969 fbase = memlist_find(pmem_avail, 2970 len, len); 2971 if (fbase != 0) { 2972 memlist_insert(pmem_used, 2973 fbase, len); 2974 (void) memlist_remove( 2975 pmem_avail, fbase, len); 2976 cmn_err(CE_NOTE, "!program " 2977 "[%x/%x/%x] BAR@0x%x" 2978 " 0x%lx length 0x%lx", 2979 bus, dev, func, offset, 2980 fbase, len); 2981 } 2982 } 2983 /* 2984 * If prefetchable allocation was not 2985 * desired, or failed, attempt ordinary 2986 * memory allocation 2987 */ 2988 if (fbase == 0 && *mem_avail != NULL) { 2989 fbase = memlist_find(mem_avail, 2990 len, len); 2991 if (fbase != 0) { 2992 memlist_insert(mem_used, 2993 fbase, len); 2994 (void) memlist_remove( 2995 mem_avail, fbase, len); 2996 cmn_err(CE_NOTE, "!program " 2997 "[%x/%x/%x] BAR@0x%x" 2998 " 0x%lx length 0x%lx", 2999 bus, dev, func, offset, 3000 fbase, len); 3001 } 3002 } 3003 3004 base_hi = fbase >> 32; 3005 base = fbase & 0xffffffff; 3006 3007 if (fbase != 0) { 3008 pci_putl(bus, dev, func, offset, 3009 base | type); 3010 base = pci_getl(bus, dev, func, offset); 3011 3012 if (bar_sz == PCI_BAR_SZ_64) { 3013 pci_putl(bus, dev, func, 3014 offset + 4, base_hi); 3015 base_hi = pci_getl(bus, dev, 3016 func, offset + 4); 3017 } 3018 3019 base &= PCI_BASE_M_ADDR_M; 3020 } else { 3021 cmn_err(CE_WARN, "failed to program " 3022 "mem space [%x/%x/%x] BAR@0x%x" 3023 " length 0x%"PRIx64, 3024 bus, dev, func, offset, len); 3025 } 3026 } 3027 3028 assigned[nasgn].pci_phys_mid = base_hi; 3029 assigned[nasgn].pci_phys_low = base; 3030 3031 dcmn_err(CE_NOTE, 3032 "![%x/%x/%x] --- %08x.%x.%x.%x.%x", 3033 bus, dev, func, 3034 assigned[nasgn].pci_phys_hi, 3035 assigned[nasgn].pci_phys_mid, 3036 assigned[nasgn].pci_phys_low, 3037 assigned[nasgn].pci_size_hi, 3038 assigned[nasgn].pci_size_low); 3039 3040 nreg++, nasgn++; 3041 } 3042 } 3043 switch (header) { 3044 case PCI_HEADER_ZERO: 3045 offset = PCI_CONF_ROM; 3046 break; 3047 case PCI_HEADER_PPB: 3048 offset = PCI_BCNF_ROM; 3049 break; 3050 default: /* including PCI_HEADER_CARDBUS */ 3051 goto done; 3052 } 3053 3054 /* 3055 * Add the expansion rom memory space 3056 * Determine the size of the ROM base reg; don't write reserved bits 3057 * ROM isn't in the PCI memory space. 3058 */ 3059 base = pci_getl(bus, dev, func, offset); 3060 pci_putl(bus, dev, func, offset, PCI_BASE_ROM_ADDR_M); 3061 value = pci_getl(bus, dev, func, offset); 3062 pci_putl(bus, dev, func, offset, base); 3063 if (value & PCI_BASE_ROM_ENABLE) 3064 value &= PCI_BASE_ROM_ADDR_M; 3065 else 3066 value = 0; 3067 3068 if (value != 0) { 3069 uint_t len; 3070 3071 regs[nreg].pci_phys_hi = (PCI_ADDR_MEM32 | devloc) + offset; 3072 assigned[nasgn].pci_phys_hi = (PCI_RELOCAT_B | 3073 PCI_ADDR_MEM32 | devloc) + offset; 3074 base &= PCI_BASE_ROM_ADDR_M; 3075 assigned[nasgn].pci_phys_low = base; 3076 len = ((value ^ (value-1)) + 1) >> 1; 3077 regs[nreg].pci_size_low = assigned[nasgn].pci_size_low = len; 3078 nreg++, nasgn++; 3079 /* take it out of the memory resource */ 3080 if (base != 0) { 3081 (void) memlist_remove(mem_avail, base, len); 3082 memlist_insert(mem_used, base, len); 3083 pci_bus_res[bus].mem_size += len; 3084 } 3085 } 3086 3087 /* 3088 * Account for "legacy" (alias) video adapter resources 3089 */ 3090 3091 /* add the three hard-decode, aliased address spaces for VGA */ 3092 if ((baseclass == PCI_CLASS_DISPLAY && subclass == PCI_DISPLAY_VGA) || 3093 (baseclass == PCI_CLASS_NONE && subclass == PCI_NONE_VGA)) { 3094 3095 /* VGA hard decode 0x3b0-0x3bb */ 3096 regs[nreg].pci_phys_hi = assigned[nasgn].pci_phys_hi = 3097 (PCI_RELOCAT_B | PCI_ALIAS_B | PCI_ADDR_IO | devloc); 3098 regs[nreg].pci_phys_low = assigned[nasgn].pci_phys_low = 0x3b0; 3099 regs[nreg].pci_size_low = assigned[nasgn].pci_size_low = 0xc; 3100 nreg++, nasgn++; 3101 (void) memlist_remove(io_avail, 0x3b0, 0xc); 3102 memlist_insert(io_used, 0x3b0, 0xc); 3103 pci_bus_res[bus].io_size += 0xc; 3104 3105 /* VGA hard decode 0x3c0-0x3df */ 3106 regs[nreg].pci_phys_hi = assigned[nasgn].pci_phys_hi = 3107 (PCI_RELOCAT_B | PCI_ALIAS_B | PCI_ADDR_IO | devloc); 3108 regs[nreg].pci_phys_low = assigned[nasgn].pci_phys_low = 0x3c0; 3109 regs[nreg].pci_size_low = assigned[nasgn].pci_size_low = 0x20; 3110 nreg++, nasgn++; 3111 (void) memlist_remove(io_avail, 0x3c0, 0x20); 3112 memlist_insert(io_used, 0x3c0, 0x20); 3113 pci_bus_res[bus].io_size += 0x20; 3114 3115 /* Video memory */ 3116 regs[nreg].pci_phys_hi = assigned[nasgn].pci_phys_hi = 3117 (PCI_RELOCAT_B | PCI_ALIAS_B | PCI_ADDR_MEM32 | devloc); 3118 regs[nreg].pci_phys_low = 3119 assigned[nasgn].pci_phys_low = 0xa0000; 3120 regs[nreg].pci_size_low = 3121 assigned[nasgn].pci_size_low = 0x20000; 3122 nreg++, nasgn++; 3123 /* remove from MEM and PMEM space */ 3124 (void) memlist_remove(mem_avail, 0xa0000, 0x20000); 3125 (void) memlist_remove(pmem_avail, 0xa0000, 0x20000); 3126 memlist_insert(mem_used, 0xa0000, 0x20000); 3127 pci_bus_res[bus].mem_size += 0x20000; 3128 } 3129 3130 /* add the hard-decode, aliased address spaces for 8514 */ 3131 if ((baseclass == PCI_CLASS_DISPLAY) && 3132 (subclass == PCI_DISPLAY_VGA) && 3133 (progclass & PCI_DISPLAY_IF_8514)) { 3134 3135 /* hard decode 0x2e8 */ 3136 regs[nreg].pci_phys_hi = assigned[nasgn].pci_phys_hi = 3137 (PCI_RELOCAT_B | PCI_ALIAS_B | PCI_ADDR_IO | devloc); 3138 regs[nreg].pci_phys_low = assigned[nasgn].pci_phys_low = 0x2e8; 3139 regs[nreg].pci_size_low = assigned[nasgn].pci_size_low = 0x1; 3140 nreg++, nasgn++; 3141 (void) memlist_remove(io_avail, 0x2e8, 0x1); 3142 memlist_insert(io_used, 0x2e8, 0x1); 3143 pci_bus_res[bus].io_size += 0x1; 3144 3145 /* hard decode 0x2ea-0x2ef */ 3146 regs[nreg].pci_phys_hi = assigned[nasgn].pci_phys_hi = 3147 (PCI_RELOCAT_B | PCI_ALIAS_B | PCI_ADDR_IO | devloc); 3148 regs[nreg].pci_phys_low = assigned[nasgn].pci_phys_low = 0x2ea; 3149 regs[nreg].pci_size_low = assigned[nasgn].pci_size_low = 0x6; 3150 nreg++, nasgn++; 3151 (void) memlist_remove(io_avail, 0x2ea, 0x6); 3152 memlist_insert(io_used, 0x2ea, 0x6); 3153 pci_bus_res[bus].io_size += 0x6; 3154 } 3155 3156 done: 3157 dump_memlists("add_reg_props end", bus); 3158 3159 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, "reg", 3160 (int *)regs, nreg * sizeof (pci_regspec_t) / sizeof (int)); 3161 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, 3162 "assigned-addresses", 3163 (int *)assigned, nasgn * sizeof (pci_regspec_t) / sizeof (int)); 3164 3165 return (reprogram); 3166 } 3167 3168 static void 3169 add_ppb_props(dev_info_t *dip, uchar_t bus, uchar_t dev, uchar_t func, 3170 int pciex, ushort_t is_pci_bridge) 3171 { 3172 char *dev_type; 3173 int i; 3174 uint_t val; 3175 uint64_t io_range[2], mem_range[2], pmem_range[2]; 3176 uchar_t secbus = pci_getb(bus, dev, func, PCI_BCNF_SECBUS); 3177 uchar_t subbus = pci_getb(bus, dev, func, PCI_BCNF_SUBBUS); 3178 uchar_t progclass; 3179 3180 ASSERT(secbus <= subbus); 3181 3182 dump_memlists("add_ppb_props start bus", bus); 3183 dump_memlists("add_ppb_props start secbus", secbus); 3184 3185 /* 3186 * Check if it's a subtractive PPB. 3187 */ 3188 progclass = pci_getb(bus, dev, func, PCI_CONF_PROGCLASS); 3189 if (progclass == PCI_BRIDGE_PCI_IF_SUBDECODE) 3190 pci_bus_res[secbus].subtractive = B_TRUE; 3191 3192 /* 3193 * Some BIOSes lie about max pci busses, we allow for 3194 * such mistakes here 3195 */ 3196 if (subbus > pci_bios_maxbus) { 3197 pci_bios_maxbus = subbus; 3198 alloc_res_array(); 3199 } 3200 3201 ASSERT(pci_bus_res[secbus].dip == NULL); 3202 pci_bus_res[secbus].dip = dip; 3203 pci_bus_res[secbus].par_bus = bus; 3204 3205 dev_type = (pciex && !is_pci_bridge) ? "pciex" : "pci"; 3206 3207 /* setup bus number hierarchy */ 3208 pci_bus_res[secbus].sub_bus = subbus; 3209 /* 3210 * Keep track of the largest subordinate bus number (this is essential 3211 * for peer busses because there is no other way of determining its 3212 * subordinate bus number). 3213 */ 3214 if (subbus > pci_bus_res[bus].sub_bus) 3215 pci_bus_res[bus].sub_bus = subbus; 3216 /* 3217 * Loop through subordinate busses, initializing their parent bus 3218 * field to this bridge's parent. The subordinate busses' parent 3219 * fields may very well be further refined later, as child bridges 3220 * are enumerated. (The value is to note that the subordinate busses 3221 * are not peer busses by changing their par_bus fields to anything 3222 * other than -1.) 3223 */ 3224 for (i = secbus + 1; i <= subbus; i++) 3225 pci_bus_res[i].par_bus = bus; 3226 3227 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip, 3228 "device_type", dev_type); 3229 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 3230 "#address-cells", 3); 3231 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 3232 "#size-cells", 2); 3233 3234 /* 3235 * Collect bridge window specifications, and use them to populate 3236 * the "avail" resources for the bus. Not all of those resources will 3237 * end up being available; this is done top-down, and so the initial 3238 * collection of windows populates the 'ranges' property for the 3239 * bus node. Later, as children are found, resources are removed from 3240 * the 'avail' list, so that it becomes the freelist for 3241 * this point in the tree. ranges may be set again after bridge 3242 * reprogramming in fix_ppb_res(), in which case it's set from 3243 * used + avail. 3244 * 3245 * According to PPB spec, the base register should be programmed 3246 * with a value bigger than the limit register when there are 3247 * no resources available. This applies to io, memory, and 3248 * prefetchable memory. 3249 */ 3250 3251 /* 3252 * io range 3253 * We determine i/o windows that are left unconfigured by BIOS 3254 * through its i/o enable bit as Microsoft recommends OEMs to do. 3255 * If it is unset, we disable i/o and mark it for reconfiguration in 3256 * later passes by setting the base > limit 3257 */ 3258 val = (uint_t)pci_getw(bus, dev, func, PCI_CONF_COMM); 3259 if (val & PCI_COMM_IO) { 3260 val = (uint_t)pci_getb(bus, dev, func, PCI_BCNF_IO_LIMIT_LOW); 3261 io_range[1] = ((val & PCI_BCNF_IO_MASK) << PCI_BCNF_IO_SHIFT) | 3262 0xfff; 3263 val = (uint_t)pci_getb(bus, dev, func, PCI_BCNF_IO_BASE_LOW); 3264 io_range[0] = ((val & PCI_BCNF_IO_MASK) << PCI_BCNF_IO_SHIFT); 3265 if ((val & PCI_BCNF_ADDR_MASK) == PCI_BCNF_IO_32BIT) { 3266 uint16_t io_base_hi, io_limit_hi; 3267 io_base_hi = pci_getw(bus, dev, func, 3268 PCI_BCNF_IO_BASE_HI); 3269 io_limit_hi = pci_getw(bus, dev, func, 3270 PCI_BCNF_IO_LIMIT_HI); 3271 3272 io_range[0] |= (uint32_t)io_base_hi << 16; 3273 io_range[1] |= (uint32_t)io_limit_hi << 16; 3274 } 3275 } else { 3276 io_range[0] = 0x9fff; 3277 io_range[1] = 0x1000; 3278 pci_putb(bus, dev, func, PCI_BCNF_IO_BASE_LOW, 3279 (uint8_t)((io_range[0] >> 8) & 0xf0)); 3280 pci_putb(bus, dev, func, PCI_BCNF_IO_LIMIT_LOW, 3281 (uint8_t)((io_range[1] >> 8) & 0xf0)); 3282 pci_putw(bus, dev, func, PCI_BCNF_IO_BASE_HI, 0); 3283 pci_putw(bus, dev, func, PCI_BCNF_IO_LIMIT_HI, 0); 3284 } 3285 3286 if (io_range[0] != 0 && io_range[0] < io_range[1]) { 3287 memlist_insert(&pci_bus_res[secbus].io_avail, 3288 io_range[0], (io_range[1] - io_range[0] + 1)); 3289 memlist_insert(&pci_bus_res[bus].io_used, 3290 io_range[0], (io_range[1] - io_range[0] + 1)); 3291 if (pci_bus_res[bus].io_avail != NULL) { 3292 (void) memlist_remove(&pci_bus_res[bus].io_avail, 3293 io_range[0], (io_range[1] - io_range[0] + 1)); 3294 } 3295 dcmn_err(CE_NOTE, "bus %x io-range: 0x%" PRIx64 "-%" PRIx64, 3296 secbus, io_range[0], io_range[1]); 3297 } 3298 3299 /* mem range */ 3300 val = (uint_t)pci_getw(bus, dev, func, PCI_BCNF_MEM_BASE); 3301 mem_range[0] = ((val & PCI_BCNF_MEM_MASK) << PCI_BCNF_MEM_SHIFT); 3302 val = (uint_t)pci_getw(bus, dev, func, PCI_BCNF_MEM_LIMIT); 3303 mem_range[1] = ((val & PCI_BCNF_MEM_MASK) << PCI_BCNF_MEM_SHIFT) | 3304 0xfffff; 3305 if (mem_range[0] != 0 && mem_range[0] < mem_range[1]) { 3306 memlist_insert(&pci_bus_res[secbus].mem_avail, 3307 mem_range[0], mem_range[1] - mem_range[0] + 1); 3308 memlist_insert(&pci_bus_res[bus].mem_used, 3309 mem_range[0], mem_range[1] - mem_range[0] + 1); 3310 /* remove from parent resource list */ 3311 (void) memlist_remove(&pci_bus_res[bus].mem_avail, 3312 mem_range[0], mem_range[1] - mem_range[0] + 1); 3313 (void) memlist_remove(&pci_bus_res[bus].pmem_avail, 3314 mem_range[0], mem_range[1] - mem_range[0] + 1); 3315 dcmn_err(CE_NOTE, "bus %x mem-range: 0x%" PRIx64 "-%" PRIx64, 3316 secbus, mem_range[0], mem_range[1]); 3317 } 3318 3319 /* prefetchable memory range */ 3320 val = (uint_t)pci_getw(bus, dev, func, PCI_BCNF_PF_LIMIT_LOW); 3321 pmem_range[1] = ((val & PCI_BCNF_MEM_MASK) << PCI_BCNF_MEM_SHIFT) | 3322 0xfffff; 3323 val = (uint_t)pci_getw(bus, dev, func, PCI_BCNF_PF_BASE_LOW); 3324 pmem_range[0] = ((val & PCI_BCNF_MEM_MASK) << PCI_BCNF_MEM_SHIFT); 3325 if ((val & PCI_BCNF_ADDR_MASK) == PCI_BCNF_PF_MEM_64BIT) { 3326 uint32_t pf_addr_hi, pf_limit_hi; 3327 pf_addr_hi = pci_getl(bus, dev, func, PCI_BCNF_PF_BASE_HIGH); 3328 pf_limit_hi = pci_getl(bus, dev, func, PCI_BCNF_PF_LIMIT_HIGH); 3329 pmem_range[0] |= (uint64_t)pf_addr_hi << 32; 3330 pmem_range[1] |= (uint64_t)pf_limit_hi << 32; 3331 } 3332 if (pmem_range[0] != 0 && pmem_range[0] < pmem_range[1]) { 3333 memlist_insert(&pci_bus_res[secbus].pmem_avail, 3334 pmem_range[0], pmem_range[1] - pmem_range[0] + 1); 3335 memlist_insert(&pci_bus_res[bus].pmem_used, 3336 pmem_range[0], pmem_range[1] - pmem_range[0] + 1); 3337 /* remove from parent resource list */ 3338 (void) memlist_remove(&pci_bus_res[bus].pmem_avail, 3339 pmem_range[0], pmem_range[1] - pmem_range[0] + 1); 3340 (void) memlist_remove(&pci_bus_res[bus].mem_avail, 3341 pmem_range[0], pmem_range[1] - pmem_range[0] + 1); 3342 dcmn_err(CE_NOTE, "bus %x pmem-range: 0x%" PRIx64 "-%" PRIx64, 3343 secbus, pmem_range[0], pmem_range[1]); 3344 } 3345 3346 /* 3347 * Add VGA legacy resources to the bridge's pci_bus_res if it 3348 * has VGA_ENABLE set. Note that we put them in 'avail', 3349 * because that's used to populate the ranges prop; they'll be 3350 * removed from there by the VGA device once it's found. Also, 3351 * remove them from the parent's available list and note them as 3352 * used in the parent. 3353 */ 3354 3355 if (pci_getw(bus, dev, func, PCI_BCNF_BCNTRL) & 3356 PCI_BCNF_BCNTRL_VGA_ENABLE) { 3357 3358 memlist_insert(&pci_bus_res[secbus].io_avail, 0x3b0, 0xc); 3359 3360 memlist_insert(&pci_bus_res[bus].io_used, 0x3b0, 0xc); 3361 if (pci_bus_res[bus].io_avail != NULL) { 3362 (void) memlist_remove(&pci_bus_res[bus].io_avail, 3363 0x3b0, 0xc); 3364 } 3365 3366 memlist_insert(&pci_bus_res[secbus].io_avail, 0x3c0, 0x20); 3367 3368 memlist_insert(&pci_bus_res[bus].io_used, 0x3c0, 0x20); 3369 if (pci_bus_res[bus].io_avail != NULL) { 3370 (void) memlist_remove(&pci_bus_res[bus].io_avail, 3371 0x3c0, 0x20); 3372 } 3373 3374 memlist_insert(&pci_bus_res[secbus].mem_avail, 0xa0000, 3375 0x20000); 3376 3377 memlist_insert(&pci_bus_res[bus].mem_used, 0xa0000, 0x20000); 3378 if (pci_bus_res[bus].mem_avail != NULL) { 3379 (void) memlist_remove(&pci_bus_res[bus].mem_avail, 3380 0xa0000, 0x20000); 3381 } 3382 } 3383 add_bus_range_prop(secbus); 3384 add_ranges_prop(secbus, 1); 3385 3386 dump_memlists("add_ppb_props end bus", bus); 3387 dump_memlists("add_ppb_props end secbus", secbus); 3388 } 3389 3390 extern const struct pci_class_strings_s class_pci[]; 3391 extern int class_pci_items; 3392 3393 static void 3394 add_model_prop(dev_info_t *dip, uint_t classcode) 3395 { 3396 const char *desc; 3397 int i; 3398 uchar_t baseclass = classcode >> 16; 3399 uchar_t subclass = (classcode >> 8) & 0xff; 3400 uchar_t progclass = classcode & 0xff; 3401 3402 if ((baseclass == PCI_CLASS_MASS) && (subclass == PCI_MASS_IDE)) { 3403 desc = "IDE controller"; 3404 } else { 3405 for (desc = 0, i = 0; i < class_pci_items; i++) { 3406 if ((baseclass == class_pci[i].base_class) && 3407 (subclass == class_pci[i].sub_class) && 3408 (progclass == class_pci[i].prog_class)) { 3409 desc = class_pci[i].actual_desc; 3410 break; 3411 } 3412 } 3413 if (i == class_pci_items) 3414 desc = "Unknown class of pci/pnpbios device"; 3415 } 3416 3417 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip, "model", 3418 (char *)desc); 3419 } 3420 3421 static void 3422 add_bus_range_prop(int bus) 3423 { 3424 int bus_range[2]; 3425 3426 if (pci_bus_res[bus].dip == NULL) 3427 return; 3428 bus_range[0] = bus; 3429 bus_range[1] = pci_bus_res[bus].sub_bus; 3430 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, pci_bus_res[bus].dip, 3431 "bus-range", (int *)bus_range, 2); 3432 } 3433 3434 /* 3435 * Add slot-names property for any named pci hot-plug slots 3436 */ 3437 static void 3438 add_bus_slot_names_prop(int bus) 3439 { 3440 char slotprop[256]; 3441 int len; 3442 extern int pci_irq_nroutes; 3443 char *slotcap_name; 3444 3445 /* 3446 * If no irq routing table, then go with the slot-names as set up 3447 * in pciex_slot_names_prop() from slot capability register (if any). 3448 */ 3449 if (pci_irq_nroutes == 0) 3450 return; 3451 3452 /* 3453 * Otherise delete the slot-names we already have and use the irq 3454 * routing table values as returned by pci_slot_names_prop() instead, 3455 * but keep any property of value "pcie0" as that can't be represented 3456 * in the irq routing table. 3457 */ 3458 if (pci_bus_res[bus].dip != NULL) { 3459 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pci_bus_res[bus].dip, 3460 DDI_PROP_DONTPASS, "slot-names", &slotcap_name) != 3461 DDI_SUCCESS || strcmp(slotcap_name, "pcie0") != 0) 3462 (void) ndi_prop_remove(DDI_DEV_T_NONE, 3463 pci_bus_res[bus].dip, "slot-names"); 3464 } 3465 3466 len = pci_slot_names_prop(bus, slotprop, sizeof (slotprop)); 3467 if (len > 0) { 3468 /* 3469 * Only create a peer bus node if this bus may be a peer bus. 3470 * It may be a peer bus if the dip is NULL and if par_bus is 3471 * -1 (par_bus is -1 if this bus was not found to be 3472 * subordinate to any PCI-PCI bridge). 3473 * If it's not a peer bus, then the ACPI BBN-handling code 3474 * will remove it later. 3475 */ 3476 if (pci_bus_res[bus].par_bus == (uchar_t)-1 && 3477 pci_bus_res[bus].dip == NULL) { 3478 3479 create_root_bus_dip(bus); 3480 } 3481 if (pci_bus_res[bus].dip != NULL) { 3482 ASSERT((len % sizeof (int)) == 0); 3483 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, 3484 pci_bus_res[bus].dip, "slot-names", 3485 (int *)slotprop, len / sizeof (int)); 3486 } else { 3487 cmn_err(CE_NOTE, "!BIOS BUG: Invalid bus number in PCI " 3488 "IRQ routing table; Not adding slot-names " 3489 "property for incorrect bus %d", bus); 3490 } 3491 } 3492 } 3493 3494 /* 3495 * Handle both PCI root and PCI-PCI bridge range properties; 3496 * non-zero 'ppb' argument select PCI-PCI bridges versus root. 3497 */ 3498 static void 3499 memlist_to_ranges(void **rp, struct memlist *entry, uint_t type, int ppb) 3500 { 3501 ppb_ranges_t *ppb_rp = *rp; 3502 pci_ranges_t *pci_rp = *rp; 3503 3504 while (entry != NULL) { 3505 uint_t atype = type; 3506 if ((type & PCI_REG_ADDR_M) == PCI_ADDR_MEM32 && 3507 (entry->ml_address >= UINT32_MAX || 3508 entry->ml_size >= UINT32_MAX)) { 3509 atype &= ~PCI_ADDR_MEM32; 3510 atype |= PCI_ADDR_MEM64; 3511 } 3512 if (ppb) { 3513 ppb_rp->child_high = ppb_rp->parent_high = atype; 3514 ppb_rp->child_mid = ppb_rp->parent_mid = 3515 (uint32_t)(entry->ml_address >> 32); 3516 ppb_rp->child_low = ppb_rp->parent_low = 3517 (uint32_t)entry->ml_address; 3518 ppb_rp->size_high = 3519 (uint32_t)(entry->ml_size >> 32); 3520 ppb_rp->size_low = (uint32_t)entry->ml_size; 3521 *rp = ++ppb_rp; 3522 } else { 3523 pci_rp->child_high = atype; 3524 pci_rp->child_mid = pci_rp->parent_high = 3525 (uint32_t)(entry->ml_address >> 32); 3526 pci_rp->child_low = pci_rp->parent_low = 3527 (uint32_t)entry->ml_address; 3528 pci_rp->size_high = 3529 (uint32_t)(entry->ml_size >> 32); 3530 pci_rp->size_low = (uint32_t)entry->ml_size; 3531 *rp = ++pci_rp; 3532 } 3533 entry = entry->ml_next; 3534 } 3535 } 3536 3537 static void 3538 add_ranges_prop(int bus, int ppb) 3539 { 3540 int total, alloc_size; 3541 void *rp, *next_rp; 3542 struct memlist *iolist, *memlist, *pmemlist; 3543 3544 /* no devinfo node - unused bus, return */ 3545 if (pci_bus_res[bus].dip == NULL) 3546 return; 3547 3548 dump_memlists("add_ranges_prop", bus); 3549 3550 iolist = memlist = pmemlist = (struct memlist *)NULL; 3551 3552 memlist_merge(&pci_bus_res[bus].io_avail, &iolist); 3553 memlist_merge(&pci_bus_res[bus].io_used, &iolist); 3554 memlist_merge(&pci_bus_res[bus].mem_avail, &memlist); 3555 memlist_merge(&pci_bus_res[bus].mem_used, &memlist); 3556 memlist_merge(&pci_bus_res[bus].pmem_avail, &pmemlist); 3557 memlist_merge(&pci_bus_res[bus].pmem_used, &pmemlist); 3558 3559 total = memlist_count(iolist); 3560 total += memlist_count(memlist); 3561 total += memlist_count(pmemlist); 3562 3563 /* no property is created if no ranges are present */ 3564 if (total == 0) 3565 return; 3566 3567 alloc_size = total * 3568 (ppb ? sizeof (ppb_ranges_t) : sizeof (pci_ranges_t)); 3569 3570 next_rp = rp = kmem_alloc(alloc_size, KM_SLEEP); 3571 3572 memlist_to_ranges(&next_rp, iolist, PCI_ADDR_IO | PCI_REG_REL_M, ppb); 3573 memlist_to_ranges(&next_rp, memlist, 3574 PCI_ADDR_MEM32 | PCI_REG_REL_M, ppb); 3575 memlist_to_ranges(&next_rp, pmemlist, 3576 PCI_ADDR_MEM32 | PCI_REG_REL_M | PCI_REG_PF_M, ppb); 3577 3578 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, pci_bus_res[bus].dip, 3579 "ranges", (int *)rp, alloc_size / sizeof (int)); 3580 3581 kmem_free(rp, alloc_size); 3582 memlist_free_all(&iolist); 3583 memlist_free_all(&memlist); 3584 memlist_free_all(&pmemlist); 3585 } 3586 3587 static void 3588 memlist_remove_list(struct memlist **list, struct memlist *remove_list) 3589 { 3590 while (list && *list && remove_list) { 3591 (void) memlist_remove(list, remove_list->ml_address, 3592 remove_list->ml_size); 3593 remove_list = remove_list->ml_next; 3594 } 3595 } 3596 3597 static int 3598 memlist_to_spec(struct pci_phys_spec *sp, struct memlist *list, int type) 3599 { 3600 int i = 0; 3601 3602 while (list) { 3603 /* assume 32-bit addresses */ 3604 sp->pci_phys_hi = type; 3605 sp->pci_phys_mid = 0; 3606 sp->pci_phys_low = (uint32_t)list->ml_address; 3607 sp->pci_size_hi = 0; 3608 sp->pci_size_low = (uint32_t)list->ml_size; 3609 3610 list = list->ml_next; 3611 sp++, i++; 3612 } 3613 return (i); 3614 } 3615 3616 static void 3617 add_bus_available_prop(int bus) 3618 { 3619 int i, count; 3620 struct pci_phys_spec *sp; 3621 3622 /* no devinfo node - unused bus, return */ 3623 if (pci_bus_res[bus].dip == NULL) 3624 return; 3625 3626 count = memlist_count(pci_bus_res[bus].io_avail) + 3627 memlist_count(pci_bus_res[bus].mem_avail) + 3628 memlist_count(pci_bus_res[bus].pmem_avail); 3629 3630 if (count == 0) /* nothing available */ 3631 return; 3632 3633 sp = kmem_alloc(count * sizeof (*sp), KM_SLEEP); 3634 i = memlist_to_spec(&sp[0], pci_bus_res[bus].io_avail, 3635 PCI_ADDR_IO | PCI_REG_REL_M); 3636 i += memlist_to_spec(&sp[i], pci_bus_res[bus].mem_avail, 3637 PCI_ADDR_MEM32 | PCI_REG_REL_M); 3638 i += memlist_to_spec(&sp[i], pci_bus_res[bus].pmem_avail, 3639 PCI_ADDR_MEM32 | PCI_REG_REL_M | PCI_REG_PF_M); 3640 ASSERT(i == count); 3641 3642 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, pci_bus_res[bus].dip, 3643 "available", (int *)sp, 3644 i * sizeof (struct pci_phys_spec) / sizeof (int)); 3645 kmem_free(sp, count * sizeof (*sp)); 3646 } 3647 3648 static void 3649 alloc_res_array(void) 3650 { 3651 static int array_size = 0; 3652 int old_size; 3653 void *old_res; 3654 3655 if (array_size > pci_bios_maxbus + 1) 3656 return; /* array is big enough */ 3657 3658 old_size = array_size; 3659 old_res = pci_bus_res; 3660 3661 if (array_size == 0) 3662 array_size = 16; /* start with a reasonable number */ 3663 3664 while (array_size <= pci_bios_maxbus + 1) 3665 array_size <<= 1; 3666 pci_bus_res = (struct pci_bus_resource *)kmem_zalloc( 3667 array_size * sizeof (struct pci_bus_resource), KM_SLEEP); 3668 3669 if (old_res) { /* copy content and free old array */ 3670 bcopy(old_res, pci_bus_res, 3671 old_size * sizeof (struct pci_bus_resource)); 3672 kmem_free(old_res, old_size * sizeof (struct pci_bus_resource)); 3673 } 3674 } 3675 3676 static void 3677 create_ioapic_node(int bus, int dev, int fn, ushort_t vendorid, 3678 ushort_t deviceid) 3679 { 3680 static dev_info_t *ioapicsnode = NULL; 3681 static int numioapics = 0; 3682 dev_info_t *ioapic_node; 3683 uint64_t physaddr; 3684 uint32_t lobase, hibase = 0; 3685 3686 /* BAR 0 contains the IOAPIC's memory-mapped I/O address */ 3687 lobase = (*pci_getl_func)(bus, dev, fn, PCI_CONF_BASE0); 3688 3689 /* We (and the rest of the world) only support memory-mapped IOAPICs */ 3690 if ((lobase & PCI_BASE_SPACE_M) != PCI_BASE_SPACE_MEM) 3691 return; 3692 3693 if ((lobase & PCI_BASE_TYPE_M) == PCI_BASE_TYPE_ALL) 3694 hibase = (*pci_getl_func)(bus, dev, fn, PCI_CONF_BASE0 + 4); 3695 3696 lobase &= PCI_BASE_M_ADDR_M; 3697 3698 physaddr = (((uint64_t)hibase) << 32) | lobase; 3699 3700 /* 3701 * Create a nexus node for all IOAPICs under the root node. 3702 */ 3703 if (ioapicsnode == NULL) { 3704 if (ndi_devi_alloc(ddi_root_node(), IOAPICS_NODE_NAME, 3705 (pnode_t)DEVI_SID_NODEID, &ioapicsnode) != NDI_SUCCESS) { 3706 return; 3707 } 3708 (void) ndi_devi_online(ioapicsnode, 0); 3709 } 3710 3711 /* 3712 * Create a child node for this IOAPIC 3713 */ 3714 ioapic_node = ddi_add_child(ioapicsnode, IOAPICS_CHILD_NAME, 3715 DEVI_SID_NODEID, numioapics++); 3716 if (ioapic_node == NULL) { 3717 return; 3718 } 3719 3720 /* Vendor and Device ID */ 3721 (void) ndi_prop_update_int(DDI_DEV_T_NONE, ioapic_node, 3722 IOAPICS_PROP_VENID, vendorid); 3723 (void) ndi_prop_update_int(DDI_DEV_T_NONE, ioapic_node, 3724 IOAPICS_PROP_DEVID, deviceid); 3725 3726 /* device_type */ 3727 (void) ndi_prop_update_string(DDI_DEV_T_NONE, ioapic_node, 3728 "device_type", IOAPICS_DEV_TYPE); 3729 3730 /* reg */ 3731 (void) ndi_prop_update_int64(DDI_DEV_T_NONE, ioapic_node, 3732 "reg", physaddr); 3733 } 3734 3735 /* 3736 * NOTE: For PCIe slots, the name is generated from the slot number 3737 * information obtained from Slot Capabilities register. 3738 * For non-PCIe slots, it is generated based on the slot number 3739 * information in the PCI IRQ table. 3740 */ 3741 static void 3742 pciex_slot_names_prop(dev_info_t *dip, ushort_t slot_num) 3743 { 3744 char slotprop[256]; 3745 int len; 3746 3747 bzero(slotprop, sizeof (slotprop)); 3748 3749 /* set mask to 1 as there is only one slot (i.e dev 0) */ 3750 *(uint32_t *)slotprop = 1; 3751 len = 4; 3752 (void) snprintf(slotprop + len, sizeof (slotprop) - len, "pcie%d", 3753 slot_num); 3754 len += strlen(slotprop + len) + 1; 3755 len += len % 4; 3756 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, "slot-names", 3757 (int *)slotprop, len / sizeof (int)); 3758 } 3759 3760 /* 3761 * Enable reporting of AER capability next pointer. 3762 * This needs to be done only for CK8-04 devices 3763 * by setting NV_XVR_VEND_CYA1 (offset 0xf40) bit 13 3764 * NOTE: BIOS is disabling this, it needs to be enabled temporarily 3765 * 3766 * This function is adapted from npe_ck804_fix_aer_ptr(), and is 3767 * called from pci_boot.c. 3768 */ 3769 static void 3770 ck804_fix_aer_ptr(dev_info_t *dip, pcie_req_id_t bdf) 3771 { 3772 dev_info_t *rcdip; 3773 ushort_t cya1; 3774 3775 rcdip = pcie_get_rc_dip(dip); 3776 ASSERT(rcdip != NULL); 3777 3778 if ((pci_cfgacc_get16(rcdip, bdf, PCI_CONF_VENID) == 3779 NVIDIA_VENDOR_ID) && 3780 (pci_cfgacc_get16(rcdip, bdf, PCI_CONF_DEVID) == 3781 NVIDIA_CK804_DEVICE_ID) && 3782 (pci_cfgacc_get8(rcdip, bdf, PCI_CONF_REVID) >= 3783 NVIDIA_CK804_AER_VALID_REVID)) { 3784 cya1 = pci_cfgacc_get16(rcdip, bdf, NVIDIA_CK804_VEND_CYA1_OFF); 3785 if (!(cya1 & ~NVIDIA_CK804_VEND_CYA1_ERPT_MASK)) 3786 (void) pci_cfgacc_put16(rcdip, bdf, 3787 NVIDIA_CK804_VEND_CYA1_OFF, 3788 cya1 | NVIDIA_CK804_VEND_CYA1_ERPT_VAL); 3789 } 3790 } 3791