1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2019 Joyent, Inc. 24 * Copyright 2019 Western Digital Corporation 25 * Copyright 2020 OmniOS Community Edition (OmniOSce) Association. 26 * Copyright 2023 Oxide Computer Company 27 */ 28 29 /* 30 * PCI bus enumeration and device programming are done in several passes. The 31 * following is a high level overview of this process. 32 * 33 * pci_enumerate(reprogram=0) 34 * The main entry point to PCI bus enumeration is 35 * pci_enumerate(). This function is invoked 36 * twice, once to set up the PCI portion of the 37 * device tree, and then a second time to 38 * reprogram any devices which were not set up by 39 * the system firmware. On this first call, the 40 * reprogram parameter is set to 0. 41 * add_pci_fixes() 42 * enumerate_bus_devs(CONFIG_FIX) 43 * <foreach bus> 44 * process_devfunc(CONFIG_FIX) 45 * Some devices need a specific action taking in 46 * order for subsequent enumeration to be 47 * successful. add_pci_fixes() retrieves the 48 * vendor and device IDs for each item on the bus 49 * and applies fixes as required. It also creates 50 * a list which is used by undo_pci_fixes() to 51 * reverse the process later. 52 * pci_setup_tree() 53 * enumerate_bus_devs(CONFIG_INFO) 54 * <foreach bus> 55 * process_devfunc(CONFIG_INFO) 56 * <set up most device properties> 57 * The next stage is to enumerate the bus and set 58 * up the bulk of the properties for each device. 59 * This is where the generic properties such as 60 * 'device-id' are created. 61 * <if PPB device> 62 * add_ppb_props() 63 * For a PCI-to-PCI bridge (ppb) device, any 64 * memory ranges for IO, memory or pre-fetchable 65 * memory that have been programmed by the system 66 * firmware (BIOS/EFI) are retrieved and stored in 67 * bus-specific lists (pci_bus_res[bus].io_avail, 68 * mem_avail and pmem_avail). The contents of 69 * these lists are used to set the initial 'ranges' 70 * property on the ppb device. Later, as children 71 * are found for this bridge, resources will be 72 * removed from these avail lists as necessary. 73 * 74 * If the IO or memory ranges have not been 75 * programmed by this point, indicated by the 76 * appropriate bit in the control register being 77 * unset or, in the memory case only, by the base 78 * address being 0, then the range is explicitly 79 * disabled here by setting base > limit for 80 * the resource. Since a zero address is 81 * technically valid for the IO case, the base 82 * address is not checked for IO. 83 * 84 * This is an initial pass so the ppb devices can 85 * still be reprogrammed later in fix_ppb_res(). 86 * <else> 87 * <add to list of non-PPB devices for the bus> 88 * Any non-PPB device on the bus is recorded in a 89 * bus-specific list, to be set up (and possibly 90 * reprogrammed) later. 91 * add_reg_props(CONFIG_INFO) 92 * The final step in this phase is to add the 93 * initial 'reg' and 'assigned-addresses' 94 * properties to all devices. At the same time, 95 * any IO or memory ranges which have been 96 * assigned to the bus are moved from the avail 97 * list to the corresponding used one. If no 98 * resources have been assigned to a device at 99 * this stage, then it is flagged for subsequent 100 * reprogramming. 101 * undo_pci_fixes() 102 * Any fixes which were applied in add_pci_fixes() 103 * are now undone before returning, using the 104 * undo list which was created earier. 105 * 106 * pci_enumerate(reprogram=1) 107 * The second bus enumeration pass is to take care 108 * of any devices that were not set up by the 109 * system firmware. These devices were flagged 110 * during the first pass. This pass is bracketed 111 * by the same pci fix application and removal as 112 * the first. 113 * add_pci_fixes() 114 * As for first pass. 115 * pci_reprogram() 116 * pci_prd_root_complex_iter() 117 * The platform is asked to tell us of all root 118 * complexes that it knows about (e.g. using the 119 * _BBN method via ACPI). This will include buses 120 * that we've already discovered and those that we 121 * potentially haven't. Anything that has not been 122 * previously discovered (or inferred to exist) is 123 * then added to the system. 124 * <foreach ROOT bus> 125 * populate_bus_res() 126 * Find resources associated with this root bus 127 * based on what the platform provides through the 128 * pci platform interfaces defined in 129 * sys/plat/pci_prd.h. On i86pc this is driven by 130 * ACPI and BIOS tables. 131 * <foreach bus> 132 * fix_ppb_res() 133 * Reprogram pci(e) bridges which have not already 134 * had resources assigned, or which are under a 135 * bus that has been flagged for reprogramming. 136 * If the parent bus has not been flagged, then IO 137 * space is reprogrammed only if there are no 138 * assigned IO resources. Memory space is 139 * reprogrammed only if there is both no assigned 140 * ordinary memory AND no assigned pre-fetchable 141 * memory. However, if memory reprogramming is 142 * necessary then both ordinary and prefetch are 143 * done together so that both memory ranges end up 144 * in the avail lists for add_reg_props() to find 145 * later. 146 * enumerate_bus_devs(CONFIG_NEW) 147 * <foreach non-PPB device on the bus> 148 * add_reg_props(CONFIG_NEW) 149 * Using the list of non-PPB devices on the bus 150 * which was assembled during the first pass, add 151 * or update the 'reg' and 'assigned-address' 152 * properties for these devices. For devices which 153 * have been flagged for reprogramming or have no 154 * assigned resources, this is where resources are 155 * finally assigned and programmed into the 156 * device. This can result in these properties 157 * changing from their previous values. 158 * <foreach bus> 159 * add_bus_available_prop() 160 * Finally, the 'available' properties is set on 161 * each device, representing that device's final 162 * unallocated (available) IO and memory ranges. 163 * undo_pci_fixes() 164 * As for first pass. 165 */ 166 167 #include <sys/types.h> 168 #include <sys/stat.h> 169 #include <sys/sysmacros.h> 170 #include <sys/sunndi.h> 171 #include <sys/pci.h> 172 #include <sys/pci_impl.h> 173 #include <sys/pcie_impl.h> 174 #include <sys/pci_props.h> 175 #include <sys/memlist.h> 176 #include <sys/bootconf.h> 177 #include <sys/pci_cfgacc.h> 178 #include <sys/pci_cfgspace.h> 179 #include <sys/pci_cfgspace_impl.h> 180 #include <sys/psw.h> 181 #include "../../../../common/pci/pci_strings.h" 182 #include <sys/apic.h> 183 #include <io/pciex/pcie_nvidia.h> 184 #include <sys/hotplug/pci/pciehpc_acpi.h> 185 #include <sys/acpi/acpi.h> 186 #include <sys/acpica.h> 187 #include <sys/iommulib.h> 188 #include <sys/devcache.h> 189 #include <sys/pci_cfgacc_x86.h> 190 #include <sys/plat/pci_prd.h> 191 192 #define pci_getb (*pci_getb_func) 193 #define pci_getw (*pci_getw_func) 194 #define pci_getl (*pci_getl_func) 195 #define pci_putb (*pci_putb_func) 196 #define pci_putw (*pci_putw_func) 197 #define pci_putl (*pci_putl_func) 198 #define dcmn_err if (pci_boot_debug != 0) cmn_err 199 #define bus_debug(bus) (pci_boot_debug != 0 && pci_debug_bus_start != -1 && \ 200 pci_debug_bus_end != -1 && (bus) >= pci_debug_bus_start && \ 201 (bus) <= pci_debug_bus_end) 202 #define dump_memlists(tag, bus) \ 203 if (bus_debug((bus))) dump_memlists_impl((tag), (bus)) 204 #define MSGHDR "!%s[%02x/%02x/%x]: " 205 206 #define CONFIG_INFO 0 207 #define CONFIG_UPDATE 1 208 #define CONFIG_NEW 2 209 #define CONFIG_FIX 3 210 #define COMPAT_BUFSIZE 512 211 212 #define PPB_IO_ALIGNMENT 0x1000 /* 4K aligned */ 213 #define PPB_MEM_ALIGNMENT 0x100000 /* 1M aligned */ 214 /* round down to nearest power of two */ 215 #define P2LE(align) \ 216 { \ 217 uint_t i = 0; \ 218 while (align >>= 1) \ 219 i++; \ 220 align = 1 << i; \ 221 } \ 222 223 /* 224 * Determining the size of a PCI BAR is done by writing all 1s to the base 225 * register and then reading the value back. The retrieved value will either 226 * be zero, indicating that the BAR is unimplemented, or a mask in which 227 * the significant bits for the required memory space are 0. 228 * For example, a 32-bit BAR could return 0xfff00000 which equates to a 229 * length of 0x100000 (1MiB). The following macro does that conversion. 230 * The input value must have already had the lower encoding bits cleared. 231 */ 232 #define BARMASKTOLEN(value) ((((value) ^ ((value) - 1)) + 1) >> 1) 233 234 typedef enum { 235 RES_IO, 236 RES_MEM, 237 RES_PMEM 238 } mem_res_t; 239 240 /* 241 * In order to disable an IO or memory range on a bridge, the range's base must 242 * be set to a value greater than its limit. The following values are used for 243 * this purpose. 244 */ 245 #define PPB_DISABLE_IORANGE_BASE 0x9fff 246 #define PPB_DISABLE_IORANGE_LIMIT 0x1000 247 #define PPB_DISABLE_MEMRANGE_BASE 0x9ff00000 248 #define PPB_DISABLE_MEMRANGE_LIMIT 0x100fffff 249 250 /* See AMD-8111 Datasheet Rev 3.03, Page 149: */ 251 #define LPC_IO_CONTROL_REG_1 0x40 252 #define AMD8111_ENABLENMI (uint8_t)0x80 253 #define DEVID_AMD8111_LPC 0x7468 254 255 struct pci_fixundo { 256 uint8_t bus; 257 uint8_t dev; 258 uint8_t fn; 259 void (*undofn)(uint8_t, uint8_t, uint8_t); 260 struct pci_fixundo *next; 261 }; 262 263 struct pci_devfunc { 264 struct pci_devfunc *next; 265 dev_info_t *dip; 266 uchar_t dev; 267 uchar_t func; 268 boolean_t reprogram; /* this device needs to be reprogrammed */ 269 }; 270 271 extern int apic_nvidia_io_max; 272 static uchar_t max_dev_pci = 32; /* PCI standard */ 273 int pci_boot_maxbus; 274 275 int pci_boot_debug = 0; 276 int pci_debug_bus_start = -1; 277 int pci_debug_bus_end = -1; 278 279 static struct pci_fixundo *undolist = NULL; 280 static int num_root_bus = 0; /* count of root buses */ 281 extern void pci_cfgacc_add_workaround(uint16_t, uchar_t, uchar_t); 282 extern dev_info_t *pcie_get_rc_dip(dev_info_t *); 283 284 /* 285 * Module prototypes 286 */ 287 static void enumerate_bus_devs(uchar_t bus, int config_op); 288 static void create_root_bus_dip(uchar_t bus); 289 static void process_devfunc(uchar_t, uchar_t, uchar_t, int); 290 static boolean_t add_reg_props(dev_info_t *, uchar_t, uchar_t, uchar_t, int, 291 boolean_t); 292 static void add_ppb_props(dev_info_t *, uchar_t, uchar_t, uchar_t, boolean_t, 293 boolean_t); 294 static void add_bus_range_prop(int); 295 static void add_ranges_prop(int, boolean_t); 296 static void add_bus_available_prop(int); 297 static int get_pci_cap(uchar_t bus, uchar_t dev, uchar_t func, uint8_t cap_id); 298 static void fix_ppb_res(uchar_t, boolean_t); 299 static void alloc_res_array(void); 300 static void create_ioapic_node(int bus, int dev, int fn, ushort_t vendorid, 301 ushort_t deviceid); 302 static void populate_bus_res(uchar_t bus); 303 static void memlist_remove_list(struct memlist **list, 304 struct memlist *remove_list); 305 static void ck804_fix_aer_ptr(dev_info_t *, pcie_req_id_t); 306 307 static int pci_unitaddr_cache_valid(void); 308 static int pci_bus_unitaddr(int); 309 static void pci_unitaddr_cache_create(void); 310 311 static int pci_cache_unpack_nvlist(nvf_handle_t, nvlist_t *, char *); 312 static int pci_cache_pack_nvlist(nvf_handle_t, nvlist_t **); 313 static void pci_cache_free_list(nvf_handle_t); 314 315 /* set non-zero to force PCI peer-bus renumbering */ 316 int pci_bus_always_renumber = 0; 317 318 /* 319 * used to register ISA resource usage which must not be made 320 * "available" from other PCI node' resource maps 321 */ 322 static struct { 323 struct memlist *io_used; 324 struct memlist *mem_used; 325 } isa_res; 326 327 /* 328 * PCI unit-address cache management 329 */ 330 static nvf_ops_t pci_unitaddr_cache_ops = { 331 "/etc/devices/pci_unitaddr_persistent", /* path to cache */ 332 pci_cache_unpack_nvlist, /* read in nvlist form */ 333 pci_cache_pack_nvlist, /* convert to nvlist form */ 334 pci_cache_free_list, /* free data list */ 335 NULL /* write complete callback */ 336 }; 337 338 typedef struct { 339 list_node_t pua_nodes; 340 int pua_index; 341 int pua_addr; 342 } pua_node_t; 343 344 nvf_handle_t puafd_handle; 345 int pua_cache_valid = 0; 346 347 dev_info_t * 348 pci_boot_bus_to_dip(uint32_t busno) 349 { 350 ASSERT3U(busno, <=, pci_boot_maxbus); 351 return (pci_bus_res[busno].dip); 352 } 353 354 static void 355 dump_memlists_impl(const char *tag, int bus) 356 { 357 printf("Memlist dump at %s - bus %x\n", tag, bus); 358 if (pci_bus_res[bus].io_used != NULL) { 359 printf(" io_used "); 360 memlist_dump(pci_bus_res[bus].io_used); 361 } 362 if (pci_bus_res[bus].io_avail != NULL) { 363 printf(" io_avail "); 364 memlist_dump(pci_bus_res[bus].io_avail); 365 } 366 if (pci_bus_res[bus].mem_used != NULL) { 367 printf(" mem_used "); 368 memlist_dump(pci_bus_res[bus].mem_used); 369 } 370 if (pci_bus_res[bus].mem_avail != NULL) { 371 printf(" mem_avail "); 372 memlist_dump(pci_bus_res[bus].mem_avail); 373 } 374 if (pci_bus_res[bus].pmem_used != NULL) { 375 printf(" pmem_used "); 376 memlist_dump(pci_bus_res[bus].pmem_used); 377 } 378 if (pci_bus_res[bus].pmem_avail != NULL) { 379 printf(" pmem_avail "); 380 memlist_dump(pci_bus_res[bus].pmem_avail); 381 } 382 } 383 384 static boolean_t 385 pci_rc_scan_cb(uint32_t busno, void *arg) 386 { 387 if (busno > pci_boot_maxbus) { 388 dcmn_err(CE_NOTE, "platform root complex scan returned bus " 389 "with invalid bus id: 0x%x", busno); 390 return (B_TRUE); 391 } 392 393 if (pci_bus_res[busno].par_bus == (uchar_t)-1 && 394 pci_bus_res[busno].dip == NULL) { 395 create_root_bus_dip((uchar_t)busno); 396 } 397 398 return (B_TRUE); 399 } 400 401 static void 402 pci_unitaddr_cache_init(void) 403 { 404 405 puafd_handle = nvf_register_file(&pci_unitaddr_cache_ops); 406 ASSERT(puafd_handle); 407 408 list_create(nvf_list(puafd_handle), sizeof (pua_node_t), 409 offsetof(pua_node_t, pua_nodes)); 410 411 rw_enter(nvf_lock(puafd_handle), RW_WRITER); 412 (void) nvf_read_file(puafd_handle); 413 rw_exit(nvf_lock(puafd_handle)); 414 } 415 416 /* 417 * Format of /etc/devices/pci_unitaddr_persistent: 418 * 419 * The persistent record of unit-address assignments contains 420 * a list of name/value pairs, where name is a string representation 421 * of the "index value" of the PCI root-bus and the value is 422 * the assigned unit-address. 423 * 424 * The "index value" is simply the zero-based index of the PCI 425 * root-buses ordered by physical bus number; first PCI bus is 0, 426 * second is 1, and so on. 427 */ 428 429 static int 430 pci_cache_unpack_nvlist(nvf_handle_t hdl, nvlist_t *nvl, char *name) 431 { 432 long index; 433 int32_t value; 434 nvpair_t *np; 435 pua_node_t *node; 436 437 np = NULL; 438 while ((np = nvlist_next_nvpair(nvl, np)) != NULL) { 439 /* name of nvpair is index value */ 440 if (ddi_strtol(nvpair_name(np), NULL, 10, &index) != 0) 441 continue; 442 443 if (nvpair_value_int32(np, &value) != 0) 444 continue; 445 446 node = kmem_zalloc(sizeof (pua_node_t), KM_SLEEP); 447 node->pua_index = index; 448 node->pua_addr = value; 449 list_insert_tail(nvf_list(hdl), node); 450 } 451 452 pua_cache_valid = 1; 453 return (DDI_SUCCESS); 454 } 455 456 static int 457 pci_cache_pack_nvlist(nvf_handle_t hdl, nvlist_t **ret_nvl) 458 { 459 int rval; 460 nvlist_t *nvl, *sub_nvl; 461 list_t *listp; 462 pua_node_t *pua; 463 char buf[13]; 464 465 ASSERT(RW_WRITE_HELD(nvf_lock(hdl))); 466 467 rval = nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP); 468 if (rval != DDI_SUCCESS) { 469 nvf_error("%s: nvlist alloc error %d\n", 470 nvf_cache_name(hdl), rval); 471 return (DDI_FAILURE); 472 } 473 474 sub_nvl = NULL; 475 rval = nvlist_alloc(&sub_nvl, NV_UNIQUE_NAME, KM_SLEEP); 476 if (rval != DDI_SUCCESS) 477 goto error; 478 479 listp = nvf_list(hdl); 480 for (pua = list_head(listp); pua != NULL; 481 pua = list_next(listp, pua)) { 482 (void) snprintf(buf, sizeof (buf), "%d", pua->pua_index); 483 rval = nvlist_add_int32(sub_nvl, buf, pua->pua_addr); 484 if (rval != DDI_SUCCESS) 485 goto error; 486 } 487 488 rval = nvlist_add_nvlist(nvl, "table", sub_nvl); 489 if (rval != DDI_SUCCESS) 490 goto error; 491 nvlist_free(sub_nvl); 492 493 *ret_nvl = nvl; 494 return (DDI_SUCCESS); 495 496 error: 497 nvlist_free(sub_nvl); 498 ASSERT(nvl); 499 nvlist_free(nvl); 500 *ret_nvl = NULL; 501 return (DDI_FAILURE); 502 } 503 504 static void 505 pci_cache_free_list(nvf_handle_t hdl) 506 { 507 list_t *listp; 508 pua_node_t *pua; 509 510 ASSERT(RW_WRITE_HELD(nvf_lock(hdl))); 511 512 listp = nvf_list(hdl); 513 for (pua = list_head(listp); pua != NULL; 514 pua = list_next(listp, pua)) { 515 list_remove(listp, pua); 516 kmem_free(pua, sizeof (pua_node_t)); 517 } 518 } 519 520 521 static int 522 pci_unitaddr_cache_valid(void) 523 { 524 525 /* read only, no need for rw lock */ 526 return (pua_cache_valid); 527 } 528 529 530 static int 531 pci_bus_unitaddr(int index) 532 { 533 pua_node_t *pua; 534 list_t *listp; 535 int addr; 536 537 rw_enter(nvf_lock(puafd_handle), RW_READER); 538 539 addr = -1; /* default return if no match */ 540 listp = nvf_list(puafd_handle); 541 for (pua = list_head(listp); pua != NULL; 542 pua = list_next(listp, pua)) { 543 if (pua->pua_index == index) { 544 addr = pua->pua_addr; 545 break; 546 } 547 } 548 549 rw_exit(nvf_lock(puafd_handle)); 550 return (addr); 551 } 552 553 static void 554 pci_unitaddr_cache_create(void) 555 { 556 int i, index; 557 pua_node_t *node; 558 list_t *listp; 559 560 rw_enter(nvf_lock(puafd_handle), RW_WRITER); 561 562 index = 0; 563 listp = nvf_list(puafd_handle); 564 for (i = 0; i <= pci_boot_maxbus; i++) { 565 /* skip non-root (peer) PCI busses */ 566 if ((pci_bus_res[i].par_bus != (uchar_t)-1) || 567 pci_bus_res[i].dip == NULL) 568 continue; 569 node = kmem_zalloc(sizeof (pua_node_t), KM_SLEEP); 570 node->pua_index = index++; 571 node->pua_addr = pci_bus_res[i].root_addr; 572 list_insert_tail(listp, node); 573 } 574 575 (void) nvf_mark_dirty(puafd_handle); 576 rw_exit(nvf_lock(puafd_handle)); 577 nvf_wake_daemon(); 578 } 579 580 581 /* 582 * Enumerate all PCI devices 583 */ 584 void 585 pci_setup_tree(void) 586 { 587 uint_t i, root_bus_addr = 0; 588 589 alloc_res_array(); 590 for (i = 0; i <= pci_boot_maxbus; i++) { 591 pci_bus_res[i].par_bus = (uchar_t)-1; 592 pci_bus_res[i].root_addr = (uchar_t)-1; 593 pci_bus_res[i].sub_bus = i; 594 } 595 596 pci_bus_res[0].root_addr = root_bus_addr++; 597 create_root_bus_dip(0); 598 enumerate_bus_devs(0, CONFIG_INFO); 599 600 /* 601 * Now enumerate peer busses 602 * 603 * We loop till pci_boot_maxbus. On most systems, there is 604 * one more bus at the high end, which implements the ISA 605 * compatibility bus. We don't care about that. 606 * 607 * Note: In the old (bootconf) enumeration, the peer bus 608 * address did not use the bus number, and there were 609 * too many peer busses created. The root_bus_addr is 610 * used to maintain the old peer bus address assignment. 611 * However, we stop enumerating phantom peers with no 612 * device below. 613 */ 614 for (i = 1; i <= pci_boot_maxbus; i++) { 615 if (pci_bus_res[i].dip == NULL) { 616 pci_bus_res[i].root_addr = root_bus_addr++; 617 } 618 enumerate_bus_devs(i, CONFIG_INFO); 619 } 620 } 621 622 void 623 pci_register_isa_resources(int type, uint32_t base, uint32_t size) 624 { 625 (void) memlist_insert( 626 (type == 1) ? &isa_res.io_used : &isa_res.mem_used, 627 base, size); 628 } 629 630 /* 631 * Remove the resources which are already used by devices under a subtractive 632 * bridge from the bus's resources lists, because they're not available, and 633 * shouldn't be allocated to other buses. This is necessary because tracking 634 * resources for subtractive bridges is not complete. (Subtractive bridges only 635 * track some of their claimed resources, not "the rest of the address space" as 636 * they should, so that allocation to peer non-subtractive PPBs is easier. We 637 * need a fully-capable global resource allocator). 638 */ 639 static void 640 remove_subtractive_res() 641 { 642 int i, j; 643 struct memlist *list; 644 645 for (i = 0; i <= pci_boot_maxbus; i++) { 646 if (pci_bus_res[i].subtractive) { 647 /* remove used io ports */ 648 list = pci_bus_res[i].io_used; 649 while (list) { 650 for (j = 0; j <= pci_boot_maxbus; j++) 651 (void) memlist_remove( 652 &pci_bus_res[j].io_avail, 653 list->ml_address, list->ml_size); 654 list = list->ml_next; 655 } 656 /* remove used mem resource */ 657 list = pci_bus_res[i].mem_used; 658 while (list) { 659 for (j = 0; j <= pci_boot_maxbus; j++) { 660 (void) memlist_remove( 661 &pci_bus_res[j].mem_avail, 662 list->ml_address, list->ml_size); 663 (void) memlist_remove( 664 &pci_bus_res[j].pmem_avail, 665 list->ml_address, list->ml_size); 666 } 667 list = list->ml_next; 668 } 669 /* remove used prefetchable mem resource */ 670 list = pci_bus_res[i].pmem_used; 671 while (list) { 672 for (j = 0; j <= pci_boot_maxbus; j++) { 673 (void) memlist_remove( 674 &pci_bus_res[j].pmem_avail, 675 list->ml_address, list->ml_size); 676 (void) memlist_remove( 677 &pci_bus_res[j].mem_avail, 678 list->ml_address, list->ml_size); 679 } 680 list = list->ml_next; 681 } 682 } 683 } 684 } 685 686 /* 687 * Set up (or complete the setup of) the bus_avail resource list 688 */ 689 static void 690 setup_bus_res(int bus) 691 { 692 uchar_t par_bus; 693 694 if (pci_bus_res[bus].dip == NULL) /* unused bus */ 695 return; 696 697 /* 698 * Set up bus_avail if not already filled in by populate_bus_res() 699 */ 700 if (pci_bus_res[bus].bus_avail == NULL) { 701 ASSERT(pci_bus_res[bus].sub_bus >= bus); 702 memlist_insert(&pci_bus_res[bus].bus_avail, bus, 703 pci_bus_res[bus].sub_bus - bus + 1); 704 } 705 706 ASSERT(pci_bus_res[bus].bus_avail != NULL); 707 708 /* 709 * Remove resources from parent bus node if this is not a 710 * root bus. 711 */ 712 par_bus = pci_bus_res[bus].par_bus; 713 if (par_bus != (uchar_t)-1) { 714 ASSERT(pci_bus_res[par_bus].bus_avail != NULL); 715 memlist_remove_list(&pci_bus_res[par_bus].bus_avail, 716 pci_bus_res[bus].bus_avail); 717 } 718 719 /* remove self from bus_avail */; 720 (void) memlist_remove(&pci_bus_res[bus].bus_avail, bus, 1); 721 } 722 723 /* 724 * Return the bus from which resources should be allocated. A device under a 725 * subtractive PPB can allocate resources from its parent bus if there are no 726 * resources available on its own bus, so iterate up the chain until resources 727 * are found or the root is reached. 728 */ 729 static uchar_t 730 resolve_alloc_bus(uchar_t bus, mem_res_t type) 731 { 732 while (pci_bus_res[bus].subtractive) { 733 if (type == RES_IO && pci_bus_res[bus].io_avail != NULL) 734 break; 735 if (type == RES_MEM && pci_bus_res[bus].mem_avail != NULL) 736 break; 737 if (type == RES_PMEM && pci_bus_res[bus].pmem_avail != NULL) 738 break; 739 /* Has the root bus been reached? */ 740 if (pci_bus_res[bus].par_bus == (uchar_t)-1) 741 break; 742 bus = pci_bus_res[bus].par_bus; 743 } 744 745 return (bus); 746 } 747 748 /* 749 * Each root port has a record of the number of PCIe bridges that is under it 750 * and the amount of memory that is has available which is not otherwise 751 * required for BARs. 752 * 753 * This function finds the root port for a given bus and returns the amount of 754 * spare memory that is available for allocation to any one of its bridges. In 755 * general, not all bridges end up being reprogrammed, so this is usually an 756 * underestimate. A smarter allocator could account for this by building up a 757 * better picture of the topology. 758 */ 759 static uint64_t 760 get_per_bridge_avail(uchar_t bus) 761 { 762 uchar_t par_bus; 763 764 par_bus = pci_bus_res[bus].par_bus; 765 while (par_bus != (uchar_t)-1) { 766 bus = par_bus; 767 par_bus = pci_bus_res[par_bus].par_bus; 768 } 769 770 if (pci_bus_res[bus].mem_buffer == 0 || 771 pci_bus_res[bus].num_bridge == 0) { 772 return (0); 773 } 774 775 return (pci_bus_res[bus].mem_buffer / pci_bus_res[bus].num_bridge); 776 } 777 778 static uint64_t 779 lookup_parbus_res(uchar_t parbus, uint64_t size, uint64_t align, mem_res_t type) 780 { 781 struct memlist **list; 782 uint64_t addr; 783 784 /* 785 * Skip root(peer) buses in multiple-root-bus systems when 786 * ACPI resource discovery was not successfully done; the 787 * initial resources set on each root bus might not be correctly 788 * accounted for in this case. 789 */ 790 if (pci_bus_res[parbus].par_bus == (uchar_t)-1 && 791 num_root_bus > 1 && !pci_prd_multi_root_ok()) { 792 return (0); 793 } 794 795 parbus = resolve_alloc_bus(parbus, type); 796 797 switch (type) { 798 case RES_IO: 799 list = &pci_bus_res[parbus].io_avail; 800 break; 801 case RES_MEM: 802 list = &pci_bus_res[parbus].mem_avail; 803 break; 804 case RES_PMEM: 805 list = &pci_bus_res[parbus].pmem_avail; 806 break; 807 default: 808 panic("Invalid resource type %d", type); 809 } 810 811 if (*list == NULL) 812 return (0); 813 814 addr = memlist_find(list, size, align); 815 816 return (addr); 817 } 818 819 /* 820 * Allocate a resource from the parent bus 821 */ 822 static uint64_t 823 get_parbus_res(uchar_t parbus, uchar_t bus, uint64_t size, uint64_t align, 824 mem_res_t type) 825 { 826 struct memlist **par_avail, **par_used, **avail, **used; 827 uint64_t addr; 828 829 parbus = resolve_alloc_bus(parbus, type); 830 831 switch (type) { 832 case RES_IO: 833 par_avail = &pci_bus_res[parbus].io_avail; 834 par_used = &pci_bus_res[parbus].io_used; 835 avail = &pci_bus_res[bus].io_avail; 836 used = &pci_bus_res[bus].io_used; 837 break; 838 case RES_MEM: 839 par_avail = &pci_bus_res[parbus].mem_avail; 840 par_used = &pci_bus_res[parbus].mem_used; 841 avail = &pci_bus_res[bus].mem_avail; 842 used = &pci_bus_res[bus].mem_used; 843 break; 844 case RES_PMEM: 845 par_avail = &pci_bus_res[parbus].pmem_avail; 846 par_used = &pci_bus_res[parbus].pmem_used; 847 avail = &pci_bus_res[bus].pmem_avail; 848 used = &pci_bus_res[bus].pmem_used; 849 break; 850 default: 851 panic("Invalid resource type %d", type); 852 } 853 854 /* Return any existing resources to the parent bus */ 855 memlist_subsume(used, avail); 856 for (struct memlist *m = *avail; m != NULL; m = m->ml_next) { 857 (void) memlist_remove(par_used, m->ml_address, m->ml_size); 858 memlist_insert(par_avail, m->ml_address, m->ml_size); 859 } 860 memlist_free_all(avail); 861 862 addr = lookup_parbus_res(parbus, size, align, type); 863 864 /* 865 * The system may have provided a 64-bit non-PF memory region to the 866 * parent bus, but we cannot use that for programming a bridge. Since 867 * the memlists are kept sorted by base address and searched in order, 868 * then if we received a 64-bit address here we know that the request 869 * is unsatisfiable from the available 32-bit ranges. 870 */ 871 if (type == RES_MEM && 872 (addr >= UINT32_MAX || addr >= UINT32_MAX - size)) { 873 return (0); 874 } 875 876 if (addr != 0) { 877 memlist_insert(par_used, addr, size); 878 (void) memlist_remove(par_avail, addr, size); 879 memlist_insert(avail, addr, size); 880 } 881 882 return (addr); 883 } 884 885 /* 886 * given a cap_id, return its cap_id location in config space 887 */ 888 static int 889 get_pci_cap(uchar_t bus, uchar_t dev, uchar_t func, uint8_t cap_id) 890 { 891 uint8_t curcap, cap_id_loc; 892 uint16_t status; 893 int location = -1; 894 895 /* 896 * Need to check the Status register for ECP support first. 897 * Also please note that for type 1 devices, the 898 * offset could change. Should support type 1 next. 899 */ 900 status = pci_getw(bus, dev, func, PCI_CONF_STAT); 901 if (!(status & PCI_STAT_CAP)) { 902 return (-1); 903 } 904 cap_id_loc = pci_getb(bus, dev, func, PCI_CONF_CAP_PTR); 905 906 /* Walk the list of capabilities */ 907 while (cap_id_loc && cap_id_loc != (uint8_t)-1) { 908 curcap = pci_getb(bus, dev, func, cap_id_loc); 909 910 if (curcap == cap_id) { 911 location = cap_id_loc; 912 break; 913 } 914 cap_id_loc = pci_getb(bus, dev, func, cap_id_loc + 1); 915 } 916 return (location); 917 } 918 919 /* 920 * Does this resource element live in the legacy VGA range? 921 */ 922 923 static boolean_t 924 is_vga(struct memlist *elem, mem_res_t type) 925 { 926 switch (type) { 927 case RES_IO: 928 if ((elem->ml_address == 0x3b0 && elem->ml_size == 0xc) || 929 (elem->ml_address == 0x3c0 && elem->ml_size == 0x20)) { 930 return (B_TRUE); 931 } 932 break; 933 case RES_MEM: 934 if (elem->ml_address == 0xa0000 && elem->ml_size == 0x20000) 935 return (B_TRUE); 936 break; 937 case RES_PMEM: 938 break; 939 } 940 return (B_FALSE); 941 } 942 943 /* 944 * Does this entire resource list consist only of legacy VGA resources? 945 */ 946 947 static boolean_t 948 list_is_vga_only(struct memlist *l, mem_res_t type) 949 { 950 if (l == NULL) { 951 return (B_FALSE); 952 } 953 954 do { 955 if (!is_vga(l, type)) 956 return (B_FALSE); 957 } while ((l = l->ml_next) != NULL); 958 return (B_TRUE); 959 } 960 961 /* 962 * Find the start and end addresses that cover the range for all list entries, 963 * excluding legacy VGA addresses. Relies on the list being sorted. 964 */ 965 static void 966 pci_memlist_range(struct memlist *list, mem_res_t type, uint64_t *basep, 967 uint64_t *limitp) 968 { 969 *limitp = *basep = 0; 970 971 for (; list != NULL; list = list->ml_next) { 972 if (is_vga(list, type)) 973 continue; 974 975 if (*basep == 0) 976 *basep = list->ml_address; 977 978 if (list->ml_address + list->ml_size >= *limitp) 979 *limitp = list->ml_address + list->ml_size - 1; 980 } 981 } 982 983 static void 984 set_ppb_res(uchar_t bus, uchar_t dev, uchar_t func, mem_res_t type, 985 uint64_t base, uint64_t limit) 986 { 987 char *tag; 988 989 switch (type) { 990 case RES_IO: { 991 VERIFY0(base >> 32); 992 VERIFY0(limit >> 32); 993 994 pci_putb(bus, dev, func, PCI_BCNF_IO_BASE_LOW, 995 (uint8_t)((base >> PCI_BCNF_IO_SHIFT) & PCI_BCNF_IO_MASK)); 996 pci_putb(bus, dev, func, PCI_BCNF_IO_LIMIT_LOW, 997 (uint8_t)((limit >> PCI_BCNF_IO_SHIFT) & PCI_BCNF_IO_MASK)); 998 999 uint8_t val = pci_getb(bus, dev, func, PCI_BCNF_IO_BASE_LOW); 1000 if ((val & PCI_BCNF_ADDR_MASK) == PCI_BCNF_IO_32BIT) { 1001 pci_putw(bus, dev, func, PCI_BCNF_IO_BASE_HI, 1002 base >> 16); 1003 pci_putw(bus, dev, func, PCI_BCNF_IO_LIMIT_HI, 1004 limit >> 16); 1005 } else { 1006 VERIFY0(base >> 16); 1007 VERIFY0(limit >> 16); 1008 } 1009 1010 tag = "I/O"; 1011 break; 1012 } 1013 1014 case RES_MEM: 1015 VERIFY0(base >> 32); 1016 VERIFY0(limit >> 32); 1017 1018 pci_putw(bus, dev, func, PCI_BCNF_MEM_BASE, 1019 (uint16_t)((base >> PCI_BCNF_MEM_SHIFT) & 1020 PCI_BCNF_MEM_MASK)); 1021 pci_putw(bus, dev, func, PCI_BCNF_MEM_LIMIT, 1022 (uint16_t)((limit >> PCI_BCNF_MEM_SHIFT) & 1023 PCI_BCNF_MEM_MASK)); 1024 1025 tag = "MEM"; 1026 break; 1027 1028 case RES_PMEM: { 1029 pci_putw(bus, dev, func, PCI_BCNF_PF_BASE_LOW, 1030 (uint16_t)((base >> PCI_BCNF_MEM_SHIFT) & 1031 PCI_BCNF_MEM_MASK)); 1032 pci_putw(bus, dev, func, PCI_BCNF_PF_LIMIT_LOW, 1033 (uint16_t)((limit >> PCI_BCNF_MEM_SHIFT) & 1034 PCI_BCNF_MEM_MASK)); 1035 1036 uint16_t val = pci_getw(bus, dev, func, PCI_BCNF_PF_BASE_LOW); 1037 if ((val & PCI_BCNF_ADDR_MASK) == PCI_BCNF_PF_MEM_64BIT) { 1038 pci_putl(bus, dev, func, PCI_BCNF_PF_BASE_HIGH, 1039 base >> 32); 1040 pci_putl(bus, dev, func, PCI_BCNF_PF_LIMIT_HIGH, 1041 limit >> 32); 1042 } else { 1043 VERIFY0(base >> 32); 1044 VERIFY0(limit >> 32); 1045 } 1046 1047 tag = "PMEM"; 1048 break; 1049 } 1050 1051 default: 1052 panic("Invalid resource type %d", type); 1053 } 1054 1055 if (base > limit) { 1056 cmn_err(CE_NOTE, MSGHDR "DISABLE %4s range", 1057 "ppb", bus, dev, func, tag); 1058 } else { 1059 cmn_err(CE_NOTE, 1060 MSGHDR "PROGRAM %4s range 0x%lx ~ 0x%lx", 1061 "ppb", bus, dev, func, tag, base, limit); 1062 } 1063 } 1064 1065 static void 1066 fetch_ppb_res(uchar_t bus, uchar_t dev, uchar_t func, mem_res_t type, 1067 uint64_t *basep, uint64_t *limitp) 1068 { 1069 uint64_t val, base, limit; 1070 1071 switch (type) { 1072 case RES_IO: 1073 val = pci_getb(bus, dev, func, PCI_BCNF_IO_LIMIT_LOW); 1074 limit = ((val & PCI_BCNF_IO_MASK) << PCI_BCNF_IO_SHIFT) | 1075 PCI_BCNF_IO_LIMIT_BITS; 1076 val = pci_getb(bus, dev, func, PCI_BCNF_IO_BASE_LOW); 1077 base = ((val & PCI_BCNF_IO_MASK) << PCI_BCNF_IO_SHIFT); 1078 1079 if ((val & PCI_BCNF_ADDR_MASK) == PCI_BCNF_IO_32BIT) { 1080 val = pci_getw(bus, dev, func, PCI_BCNF_IO_BASE_HI); 1081 base |= val << 16; 1082 val = pci_getw(bus, dev, func, PCI_BCNF_IO_LIMIT_HI); 1083 limit |= val << 16; 1084 } 1085 VERIFY0(base >> 32); 1086 break; 1087 1088 case RES_MEM: 1089 val = pci_getw(bus, dev, func, PCI_BCNF_MEM_LIMIT); 1090 limit = ((val & PCI_BCNF_MEM_MASK) << PCI_BCNF_MEM_SHIFT) | 1091 PCI_BCNF_MEM_LIMIT_BITS; 1092 val = pci_getw(bus, dev, func, PCI_BCNF_MEM_BASE); 1093 base = ((val & PCI_BCNF_MEM_MASK) << PCI_BCNF_MEM_SHIFT); 1094 VERIFY0(base >> 32); 1095 break; 1096 1097 case RES_PMEM: 1098 val = pci_getw(bus, dev, func, PCI_BCNF_PF_LIMIT_LOW); 1099 limit = ((val & PCI_BCNF_MEM_MASK) << PCI_BCNF_MEM_SHIFT) | 1100 PCI_BCNF_MEM_LIMIT_BITS; 1101 val = pci_getw(bus, dev, func, PCI_BCNF_PF_BASE_LOW); 1102 base = ((val & PCI_BCNF_MEM_MASK) << PCI_BCNF_MEM_SHIFT); 1103 1104 if ((val & PCI_BCNF_ADDR_MASK) == PCI_BCNF_PF_MEM_64BIT) { 1105 val = pci_getl(bus, dev, func, PCI_BCNF_PF_BASE_HIGH); 1106 base |= val << 32; 1107 val = pci_getl(bus, dev, func, PCI_BCNF_PF_LIMIT_HIGH); 1108 limit |= val << 32; 1109 } 1110 break; 1111 default: 1112 panic("Invalid resource type %d", type); 1113 } 1114 1115 *basep = base; 1116 *limitp = limit; 1117 } 1118 1119 /* 1120 * Assign valid resources to unconfigured pci(e) bridges. We are trying 1121 * to reprogram the bridge when its 1122 * i) SECBUS == SUBBUS || 1123 * ii) IOBASE > IOLIM || 1124 * iii) MEMBASE > MEMLIM && PMEMBASE > PMEMLIM 1125 * This must be done after one full pass through the PCI tree to collect 1126 * all firmware-configured resources, so that we know what resources are 1127 * free and available to assign to the unconfigured PPBs. 1128 */ 1129 static void 1130 fix_ppb_res(uchar_t secbus, boolean_t prog_sub) 1131 { 1132 uchar_t bus, dev, func; 1133 uchar_t parbus, subbus; 1134 struct { 1135 uint64_t base; 1136 uint64_t limit; 1137 uint64_t size; 1138 uint64_t align; 1139 } io, mem, pmem; 1140 uint64_t addr = 0; 1141 int *regp = NULL; 1142 uint_t reglen, buscount; 1143 int rv, cap_ptr, physhi; 1144 dev_info_t *dip; 1145 uint16_t cmd_reg; 1146 struct memlist *scratch_list; 1147 boolean_t reprogram_io, reprogram_mem; 1148 1149 /* skip root (peer) PCI busses */ 1150 if (pci_bus_res[secbus].par_bus == (uchar_t)-1) 1151 return; 1152 1153 /* skip subtractive PPB when prog_sub is not TRUE */ 1154 if (pci_bus_res[secbus].subtractive && !prog_sub) 1155 return; 1156 1157 /* some entries may be empty due to discontiguous bus numbering */ 1158 dip = pci_bus_res[secbus].dip; 1159 if (dip == NULL) 1160 return; 1161 1162 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1163 "reg", ®p, ®len); 1164 if (rv != DDI_PROP_SUCCESS || reglen == 0) 1165 return; 1166 physhi = regp[0]; 1167 ddi_prop_free(regp); 1168 1169 func = (uchar_t)PCI_REG_FUNC_G(physhi); 1170 dev = (uchar_t)PCI_REG_DEV_G(physhi); 1171 bus = (uchar_t)PCI_REG_BUS_G(physhi); 1172 1173 dump_memlists("fix_ppb_res start bus", bus); 1174 dump_memlists("fix_ppb_res start secbus", secbus); 1175 1176 /* 1177 * If pcie bridge, check to see if link is enabled 1178 */ 1179 cap_ptr = get_pci_cap(bus, dev, func, PCI_CAP_ID_PCI_E); 1180 if (cap_ptr != -1) { 1181 uint16_t reg = pci_getw(bus, dev, func, 1182 (uint16_t)cap_ptr + PCIE_LINKCTL); 1183 if ((reg & PCIE_LINKCTL_LINK_DISABLE) != 0) { 1184 dcmn_err(CE_NOTE, MSGHDR "link is disabled", 1185 "ppb", bus, dev, func); 1186 return; 1187 } 1188 } 1189 1190 subbus = pci_getb(bus, dev, func, PCI_BCNF_SUBBUS); 1191 parbus = pci_bus_res[secbus].par_bus; 1192 ASSERT(parbus == bus); 1193 cmd_reg = pci_getw(bus, dev, func, PCI_CONF_COMM); 1194 1195 /* 1196 * If we have a Cardbus bridge, but no bus space 1197 */ 1198 if (pci_bus_res[secbus].num_cbb != 0 && 1199 pci_bus_res[secbus].bus_avail == NULL) { 1200 uchar_t range; 1201 1202 /* normally there are 2 buses under a cardbus bridge */ 1203 range = pci_bus_res[secbus].num_cbb * 2; 1204 1205 /* 1206 * Try to find and allocate a bus-range starting at subbus+1 1207 * from the parent of the PPB. 1208 */ 1209 for (; range != 0; range--) { 1210 if (memlist_find_with_startaddr( 1211 &pci_bus_res[parbus].bus_avail, 1212 subbus + 1, range, 1) != 0) { 1213 break; /* find bus range resource at parent */ 1214 } 1215 } 1216 if (range != 0) { 1217 memlist_insert(&pci_bus_res[secbus].bus_avail, 1218 subbus + 1, range); 1219 subbus = subbus + range; 1220 pci_bus_res[secbus].sub_bus = subbus; 1221 pci_putb(bus, dev, func, PCI_BCNF_SUBBUS, subbus); 1222 add_bus_range_prop(secbus); 1223 1224 cmn_err(CE_NOTE, 1225 MSGHDR "PROGRAM cardbus buses 0x%x ~ 0x%x", 1226 "cbb", bus, dev, func, secbus, subbus); 1227 } 1228 } 1229 1230 buscount = subbus - secbus + 1; 1231 1232 dcmn_err(CE_NOTE, MSGHDR 1233 "secbus 0x%x existing sizes I/O 0x%x, MEM 0x%lx, PMEM 0x%lx", 1234 "ppb", bus, dev, func, secbus, 1235 pci_bus_res[secbus].io_size, pci_bus_res[secbus].mem_size, 1236 pci_bus_res[secbus].pmem_size); 1237 1238 /* 1239 * If the bridge's I/O range needs to be reprogrammed, then the 1240 * bridge is going to be allocated the greater of: 1241 * - 512 bytes per downstream bus; 1242 * - the amount required by its current children. 1243 * rounded up to the next 4K. 1244 */ 1245 io.size = MAX(pci_bus_res[secbus].io_size, buscount * 0x200); 1246 1247 /* 1248 * Similarly if the memory ranges need to be reprogrammed, then we'd 1249 * like to assign some extra memory to the bridge in case there is 1250 * anything hotplugged underneath later. 1251 * 1252 * We use the information gathered earlier relating to the number of 1253 * bridges that must share the resource of this bus' root port, and how 1254 * much memory is available that isn't already accounted for to 1255 * determine how much to use. 1256 * 1257 * At least the existing `mem_size` must be allocated as that has been 1258 * gleaned from enumeration. 1259 */ 1260 uint64_t avail = get_per_bridge_avail(bus); 1261 1262 mem.size = 0; 1263 if (avail > 0) { 1264 /* Try 32MiB first, then adjust down until it fits */ 1265 for (uint_t i = 32; i > 0; i >>= 1) { 1266 if (avail >= buscount * PPB_MEM_ALIGNMENT * i) { 1267 mem.size = buscount * PPB_MEM_ALIGNMENT * i; 1268 dcmn_err(CE_NOTE, MSGHDR 1269 "Allocating %uMiB", 1270 "ppb", bus, dev, func, i); 1271 break; 1272 } 1273 } 1274 } 1275 mem.size = MAX(pci_bus_res[secbus].mem_size, mem.size); 1276 1277 /* 1278 * For the PF memory range, illumos has not historically handed out 1279 * any additional memory to bridges. However there are some 1280 * hotpluggable devices which need 64-bit PF space and so we now always 1281 * attempt to allocate at least 32 MiB. If there is enough space 1282 * available from a parent then we will increase this to 512MiB. 1283 * If we're later unable to find memory to satisfy this, we just move 1284 * on and are no worse off than before. 1285 */ 1286 pmem.size = MAX(pci_bus_res[secbus].pmem_size, 1287 buscount * PPB_MEM_ALIGNMENT * 32); 1288 1289 /* 1290 * Check if the parent bus could allocate a 64-bit sized PF 1291 * range and bump the minimum pmem.size to 512MB if so. 1292 */ 1293 if (lookup_parbus_res(parbus, 1ULL << 32, PPB_MEM_ALIGNMENT, 1294 RES_PMEM) > 0) { 1295 pmem.size = MAX(pci_bus_res[secbus].pmem_size, 1296 buscount * PPB_MEM_ALIGNMENT * 512); 1297 } 1298 1299 /* 1300 * I/O space needs to be 4KiB aligned, Memory space needs to be 1MiB 1301 * aligned. 1302 * 1303 * We calculate alignment as the largest power of two less than the 1304 * the sum of all children's size requirements, because this will 1305 * align to the size of the largest child request within that size 1306 * (which is always a power of two). 1307 */ 1308 io.size = P2ROUNDUP(io.size, PPB_IO_ALIGNMENT); 1309 mem.size = P2ROUNDUP(mem.size, PPB_MEM_ALIGNMENT); 1310 pmem.size = P2ROUNDUP(pmem.size, PPB_MEM_ALIGNMENT); 1311 1312 io.align = io.size; 1313 P2LE(io.align); 1314 mem.align = mem.size; 1315 P2LE(mem.align); 1316 pmem.align = pmem.size; 1317 P2LE(pmem.align); 1318 1319 /* Subtractive bridge */ 1320 if (pci_bus_res[secbus].subtractive && prog_sub) { 1321 /* 1322 * We program an arbitrary amount of I/O and memory resource 1323 * for the subtractive bridge so that child dynamic-resource- 1324 * allocating devices (such as Cardbus bridges) have a chance 1325 * of success. Until we have full-tree resource rebalancing, 1326 * dynamic resource allocation (thru busra) only looks at the 1327 * parent bridge, so all PPBs must have some allocatable 1328 * resource. For non-subtractive bridges, the resources come 1329 * from the base/limit register "windows", but subtractive 1330 * bridges often don't program those (since they don't need to). 1331 * If we put all the remaining resources on the subtractive 1332 * bridge, then peer non-subtractive bridges can't allocate 1333 * more space (even though this is probably most correct). 1334 * If we put the resources only on the parent, then allocations 1335 * from children of subtractive bridges will fail without 1336 * special-case code for bypassing the subtractive bridge. 1337 * This solution is the middle-ground temporary solution until 1338 * we have fully-capable resource allocation. 1339 */ 1340 1341 /* 1342 * Add an arbitrary I/O resource to the subtractive PPB 1343 */ 1344 if (pci_bus_res[secbus].io_avail == NULL) { 1345 addr = get_parbus_res(parbus, secbus, io.size, 1346 io.align, RES_IO); 1347 if (addr != 0) { 1348 add_ranges_prop(secbus, B_TRUE); 1349 pci_bus_res[secbus].io_reprogram = 1350 pci_bus_res[parbus].io_reprogram; 1351 1352 cmn_err(CE_NOTE, 1353 MSGHDR "PROGRAM I/O range 0x%lx ~ 0x%lx " 1354 "(subtractive bridge)", 1355 "ppb", bus, dev, func, 1356 addr, addr + io.size - 1); 1357 } 1358 } 1359 /* 1360 * Add an arbitrary memory resource to the subtractive PPB 1361 */ 1362 if (pci_bus_res[secbus].mem_avail == NULL) { 1363 addr = get_parbus_res(parbus, secbus, mem.size, 1364 mem.align, RES_MEM); 1365 if (addr != 0) { 1366 add_ranges_prop(secbus, B_TRUE); 1367 pci_bus_res[secbus].mem_reprogram = 1368 pci_bus_res[parbus].mem_reprogram; 1369 1370 cmn_err(CE_NOTE, 1371 MSGHDR "PROGRAM MEM range 0x%lx ~ 0x%lx " 1372 "(subtractive bridge)", 1373 "ppb", bus, dev, func, 1374 addr, addr + mem.size - 1); 1375 } 1376 } 1377 1378 goto cmd_enable; 1379 } 1380 1381 /* 1382 * Retrieve the various configured ranges from the bridge. 1383 */ 1384 1385 fetch_ppb_res(bus, dev, func, RES_IO, &io.base, &io.limit); 1386 fetch_ppb_res(bus, dev, func, RES_MEM, &mem.base, &mem.limit); 1387 fetch_ppb_res(bus, dev, func, RES_PMEM, &pmem.base, &pmem.limit); 1388 1389 /* 1390 * Reprogram IO if: 1391 * 1392 * - The list does not consist entirely of legacy VGA resources; 1393 * 1394 * and any of 1395 * 1396 * - The parent bus is flagged for reprogramming; 1397 * - IO space is currently disabled in the command register; 1398 * - IO space is disabled via base/limit. 1399 */ 1400 scratch_list = memlist_dup(pci_bus_res[secbus].io_avail); 1401 memlist_merge(&pci_bus_res[secbus].io_used, &scratch_list); 1402 1403 reprogram_io = !list_is_vga_only(scratch_list, RES_IO) && 1404 (pci_bus_res[parbus].io_reprogram || 1405 (cmd_reg & PCI_COMM_IO) == 0 || 1406 io.base > io.limit); 1407 1408 memlist_free_all(&scratch_list); 1409 1410 if (reprogram_io) { 1411 if (pci_bus_res[secbus].io_used != NULL) { 1412 memlist_subsume(&pci_bus_res[secbus].io_used, 1413 &pci_bus_res[secbus].io_avail); 1414 } 1415 1416 if (pci_bus_res[secbus].io_avail != NULL && 1417 !pci_bus_res[parbus].io_reprogram && 1418 !pci_bus_res[parbus].subtractive) { 1419 /* re-choose old io ports info */ 1420 1421 uint64_t base, limit; 1422 1423 pci_memlist_range(pci_bus_res[secbus].io_avail, 1424 RES_IO, &base, &limit); 1425 io.base = (uint_t)base; 1426 io.limit = (uint_t)limit; 1427 1428 /* 4K aligned */ 1429 io.base = P2ALIGN(base, PPB_IO_ALIGNMENT); 1430 io.limit = P2ROUNDUP(io.limit, PPB_IO_ALIGNMENT) - 1; 1431 io.size = io.limit - io.base + 1; 1432 ASSERT3U(io.base, <=, io.limit); 1433 memlist_free_all(&pci_bus_res[secbus].io_avail); 1434 memlist_insert(&pci_bus_res[secbus].io_avail, 1435 io.base, io.size); 1436 memlist_insert(&pci_bus_res[parbus].io_used, 1437 io.base, io.size); 1438 (void) memlist_remove(&pci_bus_res[parbus].io_avail, 1439 io.base, io.size); 1440 pci_bus_res[secbus].io_reprogram = B_TRUE; 1441 } else { 1442 /* get new io ports from parent bus */ 1443 addr = get_parbus_res(parbus, secbus, io.size, 1444 io.align, RES_IO); 1445 if (addr != 0) { 1446 io.base = addr; 1447 io.limit = addr + io.size - 1; 1448 pci_bus_res[secbus].io_reprogram = B_TRUE; 1449 } 1450 } 1451 1452 if (pci_bus_res[secbus].io_reprogram) { 1453 /* reprogram PPB regs */ 1454 set_ppb_res(bus, dev, func, RES_IO, io.base, io.limit); 1455 add_ranges_prop(secbus, B_TRUE); 1456 } 1457 } 1458 1459 /* 1460 * Reprogram memory if: 1461 * 1462 * - The list does not consist entirely of legacy VGA resources; 1463 * 1464 * and any of 1465 * 1466 * - The parent bus is flagged for reprogramming; 1467 * - Mem space is currently disabled in the command register; 1468 * - Both mem and pmem space are disabled via base/limit. 1469 * 1470 * Always reprogram both mem and pmem together since this leaves 1471 * resources in the 'avail' list for add_reg_props() to subsequently 1472 * find and assign. 1473 */ 1474 scratch_list = memlist_dup(pci_bus_res[secbus].mem_avail); 1475 memlist_merge(&pci_bus_res[secbus].mem_used, &scratch_list); 1476 1477 reprogram_mem = !list_is_vga_only(scratch_list, RES_MEM) && 1478 (pci_bus_res[parbus].mem_reprogram || 1479 (cmd_reg & PCI_COMM_MAE) == 0 || 1480 (mem.base > mem.limit && pmem.base > pmem.limit)); 1481 1482 memlist_free_all(&scratch_list); 1483 1484 if (reprogram_mem) { 1485 /* Mem range */ 1486 if (pci_bus_res[secbus].mem_used != NULL) { 1487 memlist_subsume(&pci_bus_res[secbus].mem_used, 1488 &pci_bus_res[secbus].mem_avail); 1489 } 1490 1491 /* 1492 * At this point, if the parent bus has not been 1493 * reprogrammed and there is memory in this bus' available 1494 * pool, then it can just be re-used. Otherwise a new range 1495 * is requested from the parent bus - note that 1496 * get_parbus_res() also takes care of constructing new 1497 * avail and used lists for the bus. 1498 * 1499 * For a subtractive parent bus, always request a fresh 1500 * memory range. 1501 */ 1502 if (pci_bus_res[secbus].mem_avail != NULL && 1503 !pci_bus_res[parbus].mem_reprogram && 1504 !pci_bus_res[parbus].subtractive) { 1505 /* re-choose old mem resource */ 1506 pci_memlist_range(pci_bus_res[secbus].mem_avail, 1507 RES_MEM, &mem.base, &mem.limit); 1508 1509 mem.base = P2ALIGN(mem.base, PPB_MEM_ALIGNMENT); 1510 mem.limit = P2ROUNDUP(mem.limit, PPB_MEM_ALIGNMENT) - 1; 1511 mem.size = mem.limit + 1 - mem.base; 1512 ASSERT3U(mem.base, <=, mem.limit); 1513 memlist_free_all(&pci_bus_res[secbus].mem_avail); 1514 memlist_insert(&pci_bus_res[secbus].mem_avail, 1515 mem.base, mem.size); 1516 memlist_insert(&pci_bus_res[parbus].mem_used, 1517 mem.base, mem.size); 1518 (void) memlist_remove(&pci_bus_res[parbus].mem_avail, 1519 mem.base, mem.size); 1520 pci_bus_res[secbus].mem_reprogram = B_TRUE; 1521 } else { 1522 /* get new mem resource from parent bus */ 1523 addr = get_parbus_res(parbus, secbus, mem.size, 1524 mem.align, RES_MEM); 1525 if (addr != 0) { 1526 mem.base = addr; 1527 mem.limit = addr + mem.size - 1; 1528 pci_bus_res[secbus].mem_reprogram = B_TRUE; 1529 } 1530 } 1531 1532 /* Prefetch mem */ 1533 if (pci_bus_res[secbus].pmem_used != NULL) { 1534 memlist_subsume(&pci_bus_res[secbus].pmem_used, 1535 &pci_bus_res[secbus].pmem_avail); 1536 } 1537 1538 /* Same logic as for non-prefetch memory, see above */ 1539 if (pci_bus_res[secbus].pmem_avail != NULL && 1540 !pci_bus_res[parbus].mem_reprogram && 1541 !pci_bus_res[parbus].subtractive) { 1542 /* re-choose old mem resource */ 1543 1544 pci_memlist_range(pci_bus_res[secbus].pmem_avail, 1545 RES_PMEM, &pmem.base, &pmem.limit); 1546 1547 pmem.base = P2ALIGN(pmem.base, PPB_MEM_ALIGNMENT); 1548 pmem.limit = P2ROUNDUP(pmem.limit, PPB_MEM_ALIGNMENT) 1549 - 1; 1550 pmem.size = pmem.limit + 1 - pmem.base; 1551 ASSERT3U(pmem.base, <=, pmem.limit); 1552 memlist_free_all(&pci_bus_res[secbus].pmem_avail); 1553 memlist_insert(&pci_bus_res[secbus].pmem_avail, 1554 pmem.base, pmem.size); 1555 memlist_insert(&pci_bus_res[parbus].pmem_used, 1556 pmem.base, pmem.size); 1557 (void) memlist_remove(&pci_bus_res[parbus].pmem_avail, 1558 pmem.base, pmem.size); 1559 pci_bus_res[secbus].mem_reprogram = B_TRUE; 1560 } else { 1561 /* get new mem resource from parent bus */ 1562 addr = get_parbus_res(parbus, secbus, pmem.size, 1563 pmem.align, RES_PMEM); 1564 if (addr != 0) { 1565 pmem.base = addr; 1566 pmem.limit = addr + pmem.size - 1; 1567 pci_bus_res[secbus].mem_reprogram = B_TRUE; 1568 } 1569 } 1570 1571 if (pci_bus_res[secbus].mem_reprogram) { 1572 set_ppb_res(bus, dev, func, 1573 RES_MEM, mem.base, mem.limit); 1574 set_ppb_res(bus, dev, func, 1575 RES_PMEM, pmem.base, pmem.limit); 1576 add_ranges_prop(secbus, B_TRUE); 1577 } 1578 } 1579 1580 cmd_enable: 1581 dump_memlists("fix_ppb_res end bus", bus); 1582 dump_memlists("fix_ppb_res end secbus", secbus); 1583 1584 if (pci_bus_res[secbus].io_avail != NULL) 1585 cmd_reg |= PCI_COMM_IO | PCI_COMM_ME; 1586 if (pci_bus_res[secbus].mem_avail != NULL || 1587 pci_bus_res[secbus].pmem_avail != NULL) { 1588 cmd_reg |= PCI_COMM_MAE | PCI_COMM_ME; 1589 } 1590 pci_putw(bus, dev, func, PCI_CONF_COMM, cmd_reg); 1591 } 1592 1593 void 1594 pci_reprogram(void) 1595 { 1596 int i, pci_reconfig = 1; 1597 char *onoff; 1598 int bus; 1599 1600 /* 1601 * Ask platform code for all of the root complexes it knows about in 1602 * case we have missed anything in the scan. This is to ensure that we 1603 * have them show up in the devinfo tree. This scan should find any 1604 * existing entries as well. After this, go through each bus and 1605 * ask the platform if it wants to change the name of the slot. 1606 */ 1607 pci_prd_root_complex_iter(pci_rc_scan_cb, NULL); 1608 for (bus = 0; bus <= pci_boot_maxbus; bus++) { 1609 pci_prd_slot_name(bus, pci_bus_res[bus].dip); 1610 } 1611 pci_unitaddr_cache_init(); 1612 1613 /* 1614 * Fix-up unit-address assignments if cache is available 1615 */ 1616 if (pci_unitaddr_cache_valid()) { 1617 int pci_regs[] = {0, 0, 0}; 1618 int new_addr; 1619 int index = 0; 1620 1621 for (bus = 0; bus <= pci_boot_maxbus; bus++) { 1622 /* skip non-root (peer) PCI busses */ 1623 if ((pci_bus_res[bus].par_bus != (uchar_t)-1) || 1624 (pci_bus_res[bus].dip == NULL)) 1625 continue; 1626 1627 new_addr = pci_bus_unitaddr(index); 1628 if (pci_bus_res[bus].root_addr != new_addr) { 1629 /* update reg property for node */ 1630 pci_regs[0] = pci_bus_res[bus].root_addr = 1631 new_addr; 1632 (void) ndi_prop_update_int_array( 1633 DDI_DEV_T_NONE, pci_bus_res[bus].dip, 1634 "reg", (int *)pci_regs, 3); 1635 } 1636 index++; 1637 } 1638 } else { 1639 /* perform legacy processing */ 1640 pci_unitaddr_cache_create(); 1641 } 1642 1643 /* 1644 * Do root-bus resource discovery 1645 */ 1646 for (bus = 0; bus <= pci_boot_maxbus; bus++) { 1647 /* skip non-root (peer) PCI busses */ 1648 if (pci_bus_res[bus].par_bus != (uchar_t)-1) 1649 continue; 1650 1651 /* 1652 * 1. find resources associated with this root bus 1653 */ 1654 populate_bus_res(bus); 1655 1656 /* 1657 * 2. Exclude <1M address range here in case below reserved 1658 * ranges for BIOS data area, ROM area etc are wrongly reported 1659 * in ACPI resource producer entries for PCI root bus. 1660 * 00000000 - 000003FF RAM 1661 * 00000400 - 000004FF BIOS data area 1662 * 00000500 - 0009FFFF RAM 1663 * 000A0000 - 000BFFFF VGA RAM 1664 * 000C0000 - 000FFFFF ROM area 1665 */ 1666 (void) memlist_remove(&pci_bus_res[bus].mem_avail, 0, 0x100000); 1667 (void) memlist_remove(&pci_bus_res[bus].pmem_avail, 1668 0, 0x100000); 1669 1670 /* 1671 * 3. Calculate the amount of "spare" 32-bit memory so that we 1672 * can use that later to determine how much additional memory 1673 * to allocate to bridges in order that they have a better 1674 * chance of supporting a device being hotplugged under them. 1675 * 1676 * This is a root bus and the previous CONFIG_INFO pass has 1677 * populated `mem_size` with the sum of all of the BAR sizes 1678 * for all devices underneath, possibly adjusted up to allow 1679 * for alignment when it is later allocated. This pass has also 1680 * recorded the number of child bridges found under this bus in 1681 * `num_bridge`. To calculate the memory which can be used for 1682 * additional bridge allocations we sum up the contents of the 1683 * `mem_avail` list and subtract `mem_size`. 1684 * 1685 * When programming child bridges later in fix_ppb_res(), the 1686 * bridge count and spare memory values cached against the 1687 * relevant root port are used to determine how much memory to 1688 * be allocated. 1689 */ 1690 if (pci_bus_res[bus].num_bridge > 0) { 1691 uint64_t mem = 0; 1692 1693 for (struct memlist *ml = pci_bus_res[bus].mem_avail; 1694 ml != NULL; ml = ml->ml_next) { 1695 if (ml->ml_address < UINT32_MAX) 1696 mem += ml->ml_size; 1697 } 1698 1699 if (mem > pci_bus_res[bus].mem_size) 1700 mem -= pci_bus_res[bus].mem_size; 1701 else 1702 mem = 0; 1703 1704 pci_bus_res[bus].mem_buffer = mem; 1705 1706 dcmn_err(CE_NOTE, 1707 "Bus 0x%02x, bridges 0x%x, buffer mem 0x%lx", 1708 bus, pci_bus_res[bus].num_bridge, mem); 1709 } 1710 1711 /* 1712 * 4. Remove used PCI and ISA resources from bus resource map 1713 */ 1714 1715 memlist_remove_list(&pci_bus_res[bus].io_avail, 1716 pci_bus_res[bus].io_used); 1717 memlist_remove_list(&pci_bus_res[bus].mem_avail, 1718 pci_bus_res[bus].mem_used); 1719 memlist_remove_list(&pci_bus_res[bus].pmem_avail, 1720 pci_bus_res[bus].pmem_used); 1721 memlist_remove_list(&pci_bus_res[bus].mem_avail, 1722 pci_bus_res[bus].pmem_used); 1723 memlist_remove_list(&pci_bus_res[bus].pmem_avail, 1724 pci_bus_res[bus].mem_used); 1725 1726 memlist_remove_list(&pci_bus_res[bus].io_avail, 1727 isa_res.io_used); 1728 memlist_remove_list(&pci_bus_res[bus].mem_avail, 1729 isa_res.mem_used); 1730 } 1731 1732 memlist_free_all(&isa_res.io_used); 1733 memlist_free_all(&isa_res.mem_used); 1734 1735 /* add bus-range property for root/peer bus nodes */ 1736 for (i = 0; i <= pci_boot_maxbus; i++) { 1737 /* create bus-range property on root/peer buses */ 1738 if (pci_bus_res[i].par_bus == (uchar_t)-1) 1739 add_bus_range_prop(i); 1740 1741 /* setup bus range resource on each bus */ 1742 setup_bus_res(i); 1743 } 1744 1745 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(), 1746 DDI_PROP_DONTPASS, "pci-reprog", &onoff) == DDI_SUCCESS) { 1747 if (strcmp(onoff, "off") == 0) { 1748 pci_reconfig = 0; 1749 cmn_err(CE_NOTE, "pci device reprogramming disabled"); 1750 } 1751 ddi_prop_free(onoff); 1752 } 1753 1754 remove_subtractive_res(); 1755 1756 /* reprogram the non-subtractive PPB */ 1757 if (pci_reconfig) 1758 for (i = 0; i <= pci_boot_maxbus; i++) 1759 fix_ppb_res(i, B_FALSE); 1760 1761 for (i = 0; i <= pci_boot_maxbus; i++) { 1762 /* configure devices not configured by firmware */ 1763 if (pci_reconfig) { 1764 /* 1765 * Reprogram the subtractive PPB. At this time, all its 1766 * siblings should have got their resources already. 1767 */ 1768 if (pci_bus_res[i].subtractive) 1769 fix_ppb_res(i, B_TRUE); 1770 enumerate_bus_devs(i, CONFIG_NEW); 1771 } 1772 } 1773 1774 /* All dev programmed, so we can create available prop */ 1775 for (i = 0; i <= pci_boot_maxbus; i++) 1776 add_bus_available_prop(i); 1777 } 1778 1779 /* 1780 * populate bus resources 1781 */ 1782 static void 1783 populate_bus_res(uchar_t bus) 1784 { 1785 pci_bus_res[bus].pmem_avail = pci_prd_find_resource(bus, 1786 PCI_PRD_R_PREFETCH); 1787 pci_bus_res[bus].mem_avail = pci_prd_find_resource(bus, PCI_PRD_R_MMIO); 1788 pci_bus_res[bus].io_avail = pci_prd_find_resource(bus, PCI_PRD_R_IO); 1789 pci_bus_res[bus].bus_avail = pci_prd_find_resource(bus, PCI_PRD_R_BUS); 1790 1791 dump_memlists("populate_bus_res", bus); 1792 1793 /* 1794 * attempt to initialize sub_bus from the largest range-end 1795 * in the bus_avail list 1796 */ 1797 if (pci_bus_res[bus].bus_avail != NULL) { 1798 struct memlist *entry; 1799 int current; 1800 1801 entry = pci_bus_res[bus].bus_avail; 1802 while (entry != NULL) { 1803 current = entry->ml_address + entry->ml_size - 1; 1804 if (current > pci_bus_res[bus].sub_bus) 1805 pci_bus_res[bus].sub_bus = current; 1806 entry = entry->ml_next; 1807 } 1808 } 1809 1810 if (bus == 0) { 1811 /* 1812 * Special treatment of bus 0: 1813 * If no IO/MEM resource from ACPI/MPSPEC/HRT, copy 1814 * pcimem from boot and make I/O space the entire range 1815 * starting at 0x100. 1816 */ 1817 if (pci_bus_res[0].mem_avail == NULL) { 1818 pci_bus_res[0].mem_avail = 1819 memlist_dup(bootops->boot_mem->pcimem); 1820 } 1821 /* Exclude 0x00 to 0xff of the I/O space, used by all PCs */ 1822 if (pci_bus_res[0].io_avail == NULL) 1823 memlist_insert(&pci_bus_res[0].io_avail, 0x100, 0xffff); 1824 } 1825 1826 /* 1827 * Create 'ranges' property here before any resources are 1828 * removed from the resource lists 1829 */ 1830 add_ranges_prop(bus, B_FALSE); 1831 } 1832 1833 /* 1834 * Create top-level bus dips, i.e. /pci@0,0, /pci@1,0... 1835 */ 1836 static void 1837 create_root_bus_dip(uchar_t bus) 1838 { 1839 int pci_regs[] = {0, 0, 0}; 1840 dev_info_t *dip; 1841 1842 ASSERT(pci_bus_res[bus].par_bus == (uchar_t)-1); 1843 1844 num_root_bus++; 1845 ndi_devi_alloc_sleep(ddi_root_node(), "pci", 1846 (pnode_t)DEVI_SID_NODEID, &dip); 1847 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 1848 "#address-cells", 3); 1849 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 1850 "#size-cells", 2); 1851 pci_regs[0] = pci_bus_res[bus].root_addr; 1852 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, 1853 "reg", (int *)pci_regs, 3); 1854 1855 /* 1856 * If system has PCIe bus, then create different properties 1857 */ 1858 if (create_pcie_root_bus(bus, dip) == B_FALSE) 1859 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip, 1860 "device_type", "pci"); 1861 1862 (void) ndi_devi_bind_driver(dip, 0); 1863 pci_bus_res[bus].dip = dip; 1864 } 1865 1866 /* 1867 * For any fixed configuration (often compatability) pci devices 1868 * and those with their own expansion rom, create device nodes 1869 * to hold the already configured device details. 1870 */ 1871 void 1872 enumerate_bus_devs(uchar_t bus, int config_op) 1873 { 1874 uchar_t dev, func, nfunc, header; 1875 ushort_t venid; 1876 struct pci_devfunc *devlist = NULL, *entry; 1877 1878 if (bus_debug(bus)) { 1879 if (config_op == CONFIG_NEW) { 1880 dcmn_err(CE_NOTE, "configuring pci bus 0x%x", bus); 1881 } else if (config_op == CONFIG_FIX) { 1882 dcmn_err(CE_NOTE, 1883 "fixing devices on pci bus 0x%x", bus); 1884 } else { 1885 dcmn_err(CE_NOTE, "enumerating pci bus 0x%x", bus); 1886 } 1887 } 1888 1889 if (config_op == CONFIG_NEW) { 1890 devlist = (struct pci_devfunc *)pci_bus_res[bus].privdata; 1891 while (devlist) { 1892 entry = devlist; 1893 devlist = entry->next; 1894 if (entry->reprogram || 1895 pci_bus_res[bus].io_reprogram || 1896 pci_bus_res[bus].mem_reprogram) { 1897 /* reprogram device(s) */ 1898 (void) add_reg_props(entry->dip, bus, 1899 entry->dev, entry->func, CONFIG_NEW, 0); 1900 } 1901 kmem_free(entry, sizeof (*entry)); 1902 } 1903 pci_bus_res[bus].privdata = NULL; 1904 return; 1905 } 1906 1907 for (dev = 0; dev < max_dev_pci; dev++) { 1908 nfunc = 1; 1909 for (func = 0; func < nfunc; func++) { 1910 1911 venid = pci_getw(bus, dev, func, PCI_CONF_VENID); 1912 1913 if ((venid == 0xffff) || (venid == 0)) { 1914 /* no function at this address */ 1915 continue; 1916 } 1917 1918 header = pci_getb(bus, dev, func, PCI_CONF_HEADER); 1919 if (header == 0xff) { 1920 continue; /* illegal value */ 1921 } 1922 1923 /* 1924 * according to some mail from Microsoft posted 1925 * to the pci-drivers alias, their only requirement 1926 * for a multifunction device is for the 1st 1927 * function to have to PCI_HEADER_MULTI bit set. 1928 */ 1929 if ((func == 0) && (header & PCI_HEADER_MULTI)) { 1930 nfunc = 8; 1931 } 1932 1933 if (config_op == CONFIG_FIX || 1934 config_op == CONFIG_INFO) { 1935 /* 1936 * Create the node, unconditionally, on the 1937 * first pass only. It may still need 1938 * resource assignment, which will be 1939 * done on the second, CONFIG_NEW, pass. 1940 */ 1941 process_devfunc(bus, dev, func, config_op); 1942 1943 } 1944 } 1945 } 1946 1947 /* percolate bus used resources up through parents to root */ 1948 if (config_op == CONFIG_INFO) { 1949 int par_bus; 1950 1951 par_bus = pci_bus_res[bus].par_bus; 1952 while (par_bus != (uchar_t)-1) { 1953 pci_bus_res[par_bus].io_size += 1954 pci_bus_res[bus].io_size; 1955 pci_bus_res[par_bus].mem_size += 1956 pci_bus_res[bus].mem_size; 1957 pci_bus_res[par_bus].pmem_size += 1958 pci_bus_res[bus].pmem_size; 1959 1960 if (pci_bus_res[bus].io_used != NULL) { 1961 memlist_merge(&pci_bus_res[bus].io_used, 1962 &pci_bus_res[par_bus].io_used); 1963 } 1964 1965 if (pci_bus_res[bus].mem_used != NULL) { 1966 memlist_merge(&pci_bus_res[bus].mem_used, 1967 &pci_bus_res[par_bus].mem_used); 1968 } 1969 1970 if (pci_bus_res[bus].pmem_used != NULL) { 1971 memlist_merge(&pci_bus_res[bus].pmem_used, 1972 &pci_bus_res[par_bus].pmem_used); 1973 } 1974 1975 pci_bus_res[par_bus].num_bridge += 1976 pci_bus_res[bus].num_bridge; 1977 1978 bus = par_bus; 1979 par_bus = pci_bus_res[par_bus].par_bus; 1980 } 1981 } 1982 } 1983 1984 /* 1985 * As a workaround for devices which is_pciide() (below, which see) would not 1986 * match due to device issues, check an undocumented device tree property 1987 * 'pci-ide', the value of which is a 1275 device identifier. 1988 * 1989 * Should a device matching this (in normal 'compatible' order) be found, and 1990 * the device not otherwise bound, it will be have its node name changed to 1991 * 'pci-ide' so the pci-ide driver will attach. 1992 * 1993 * This can be set via `eeprom pci-ide=pciXXXX,YYYY` (see eeprom(8)) or 1994 * otherwise added to bootenv.rc. 1995 */ 1996 static boolean_t 1997 check_pciide_prop(uchar_t revid, ushort_t venid, ushort_t devid, 1998 ushort_t subvenid, ushort_t subdevid) 1999 { 2000 static int prop_exist = -1; 2001 static char *pciide_str; 2002 char compat[32]; 2003 2004 if (prop_exist == -1) { 2005 prop_exist = (ddi_prop_lookup_string(DDI_DEV_T_ANY, 2006 ddi_root_node(), DDI_PROP_DONTPASS, "pci-ide", 2007 &pciide_str) == DDI_SUCCESS); 2008 } 2009 2010 if (!prop_exist) 2011 return (B_FALSE); 2012 2013 /* compare property value against various forms of compatible */ 2014 if (subvenid) { 2015 (void) snprintf(compat, sizeof (compat), "pci%x,%x.%x.%x.%x", 2016 venid, devid, subvenid, subdevid, revid); 2017 if (strcmp(pciide_str, compat) == 0) 2018 return (B_TRUE); 2019 2020 (void) snprintf(compat, sizeof (compat), "pci%x,%x.%x.%x", 2021 venid, devid, subvenid, subdevid); 2022 if (strcmp(pciide_str, compat) == 0) 2023 return (B_TRUE); 2024 2025 (void) snprintf(compat, sizeof (compat), "pci%x,%x", 2026 subvenid, subdevid); 2027 if (strcmp(pciide_str, compat) == 0) 2028 return (B_TRUE); 2029 } 2030 (void) snprintf(compat, sizeof (compat), "pci%x,%x.%x", 2031 venid, devid, revid); 2032 if (strcmp(pciide_str, compat) == 0) 2033 return (B_TRUE); 2034 2035 (void) snprintf(compat, sizeof (compat), "pci%x,%x", venid, devid); 2036 if (strcmp(pciide_str, compat) == 0) 2037 return (B_TRUE); 2038 2039 return (B_FALSE); 2040 } 2041 2042 static boolean_t 2043 is_pciide(const pci_prop_data_t *prop) 2044 { 2045 struct ide_table { 2046 ushort_t venid; 2047 ushort_t devid; 2048 }; 2049 2050 /* 2051 * Devices which need to be matched specially as pci-ide because of 2052 * various device issues. Commonly their specification as being 2053 * PCI_MASS_OTHER or PCI_MASS_SATA despite our using them in ATA mode. 2054 */ 2055 static struct ide_table ide_other[] = { 2056 {0x1095, 0x3112}, /* Silicon Image 3112 SATALink/SATARaid */ 2057 {0x1095, 0x3114}, /* Silicon Image 3114 SATALink/SATARaid */ 2058 {0x1095, 0x3512}, /* Silicon Image 3512 SATALink/SATARaid */ 2059 {0x1095, 0x680}, /* Silicon Image PCI0680 Ultra ATA-133 */ 2060 {0x1283, 0x8211} /* Integrated Technology Express 8211F */ 2061 }; 2062 2063 if (prop->ppd_class != PCI_CLASS_MASS) 2064 return (B_FALSE); 2065 2066 if (prop->ppd_subclass == PCI_MASS_IDE) { 2067 return (B_TRUE); 2068 } 2069 2070 if (check_pciide_prop(prop->ppd_rev, prop->ppd_vendid, 2071 prop->ppd_devid, prop->ppd_subvid, prop->ppd_subsys)) { 2072 return (B_TRUE); 2073 } 2074 2075 if (prop->ppd_subclass != PCI_MASS_OTHER && 2076 prop->ppd_subclass != PCI_MASS_SATA) { 2077 return (B_FALSE); 2078 } 2079 2080 for (size_t i = 0; i < ARRAY_SIZE(ide_other); i++) { 2081 if (ide_other[i].venid == prop->ppd_vendid && 2082 ide_other[i].devid == prop->ppd_devid) 2083 return (B_TRUE); 2084 } 2085 return (B_FALSE); 2086 } 2087 2088 static void 2089 add_undofix_entry(uint8_t bus, uint8_t dev, uint8_t fn, 2090 void (*undofn)(uint8_t, uint8_t, uint8_t)) 2091 { 2092 struct pci_fixundo *newundo; 2093 2094 newundo = kmem_alloc(sizeof (struct pci_fixundo), KM_SLEEP); 2095 2096 /* 2097 * Adding an item to this list means that we must turn its NMIENABLE 2098 * bit back on at a later time. 2099 */ 2100 newundo->bus = bus; 2101 newundo->dev = dev; 2102 newundo->fn = fn; 2103 newundo->undofn = undofn; 2104 newundo->next = undolist; 2105 2106 /* add to the undo list in LIFO order */ 2107 undolist = newundo; 2108 } 2109 2110 void 2111 add_pci_fixes(void) 2112 { 2113 int i; 2114 2115 for (i = 0; i <= pci_boot_maxbus; i++) { 2116 /* 2117 * For each bus, apply needed fixes to the appropriate devices. 2118 * This must be done before the main enumeration loop because 2119 * some fixes must be applied to devices normally encountered 2120 * later in the pci scan (e.g. if a fix to device 7 must be 2121 * applied before scanning device 6, applying fixes in the 2122 * normal enumeration loop would obviously be too late). 2123 */ 2124 enumerate_bus_devs(i, CONFIG_FIX); 2125 } 2126 } 2127 2128 void 2129 undo_pci_fixes(void) 2130 { 2131 struct pci_fixundo *nextundo; 2132 uint8_t bus, dev, fn; 2133 2134 /* 2135 * All fixes in the undo list are performed unconditionally. Future 2136 * fixes may require selective undo. 2137 */ 2138 while (undolist != NULL) { 2139 2140 bus = undolist->bus; 2141 dev = undolist->dev; 2142 fn = undolist->fn; 2143 2144 (*(undolist->undofn))(bus, dev, fn); 2145 2146 nextundo = undolist->next; 2147 kmem_free(undolist, sizeof (struct pci_fixundo)); 2148 undolist = nextundo; 2149 } 2150 } 2151 2152 static void 2153 undo_amd8111_pci_fix(uint8_t bus, uint8_t dev, uint8_t fn) 2154 { 2155 uint8_t val8; 2156 2157 val8 = pci_getb(bus, dev, fn, LPC_IO_CONTROL_REG_1); 2158 /* 2159 * The NMIONERR bit is turned back on to allow the SMM BIOS 2160 * to handle more critical PCI errors (e.g. PERR#). 2161 */ 2162 val8 |= AMD8111_ENABLENMI; 2163 pci_putb(bus, dev, fn, LPC_IO_CONTROL_REG_1, val8); 2164 } 2165 2166 static void 2167 pci_fix_amd8111(uint8_t bus, uint8_t dev, uint8_t fn) 2168 { 2169 uint8_t val8; 2170 2171 val8 = pci_getb(bus, dev, fn, LPC_IO_CONTROL_REG_1); 2172 2173 if ((val8 & AMD8111_ENABLENMI) == 0) 2174 return; 2175 2176 /* 2177 * We reset NMIONERR in the LPC because master-abort on the PCI 2178 * bridge side of the 8111 will cause NMI, which might cause SMI, 2179 * which sometimes prevents all devices from being enumerated. 2180 */ 2181 val8 &= ~AMD8111_ENABLENMI; 2182 2183 pci_putb(bus, dev, fn, LPC_IO_CONTROL_REG_1, val8); 2184 2185 add_undofix_entry(bus, dev, fn, undo_amd8111_pci_fix); 2186 } 2187 2188 static void 2189 set_devpm_d0(uchar_t bus, uchar_t dev, uchar_t func) 2190 { 2191 uint16_t status; 2192 uint8_t header; 2193 uint8_t cap_ptr; 2194 uint8_t cap_id; 2195 uint16_t pmcsr; 2196 2197 status = pci_getw(bus, dev, func, PCI_CONF_STAT); 2198 if (!(status & PCI_STAT_CAP)) 2199 return; /* No capabilities list */ 2200 2201 header = pci_getb(bus, dev, func, PCI_CONF_HEADER) & PCI_HEADER_TYPE_M; 2202 if (header == PCI_HEADER_CARDBUS) 2203 cap_ptr = pci_getb(bus, dev, func, PCI_CBUS_CAP_PTR); 2204 else 2205 cap_ptr = pci_getb(bus, dev, func, PCI_CONF_CAP_PTR); 2206 /* 2207 * Walk the capabilities list searching for a PM entry. 2208 */ 2209 while (cap_ptr != PCI_CAP_NEXT_PTR_NULL && cap_ptr >= PCI_CAP_PTR_OFF) { 2210 cap_ptr &= PCI_CAP_PTR_MASK; 2211 cap_id = pci_getb(bus, dev, func, cap_ptr + PCI_CAP_ID); 2212 if (cap_id == PCI_CAP_ID_PM) { 2213 pmcsr = pci_getw(bus, dev, func, cap_ptr + PCI_PMCSR); 2214 pmcsr &= ~(PCI_PMCSR_STATE_MASK); 2215 pmcsr |= PCI_PMCSR_D0; /* D0 state */ 2216 pci_putw(bus, dev, func, cap_ptr + PCI_PMCSR, pmcsr); 2217 break; 2218 } 2219 cap_ptr = pci_getb(bus, dev, func, cap_ptr + PCI_CAP_NEXT_PTR); 2220 } 2221 2222 } 2223 2224 static void 2225 process_devfunc(uchar_t bus, uchar_t dev, uchar_t func, int config_op) 2226 { 2227 pci_prop_data_t prop_data; 2228 pci_prop_failure_t prop_ret; 2229 dev_info_t *dip; 2230 boolean_t reprogram = B_FALSE; 2231 boolean_t pciide = B_FALSE; 2232 int power[2] = {1, 1}; 2233 struct pci_devfunc *devlist = NULL, *entry = NULL; 2234 gfx_entry_t *gfxp; 2235 pcie_req_id_t bdf; 2236 2237 prop_ret = pci_prop_data_fill(NULL, bus, dev, func, &prop_data); 2238 if (prop_ret != PCI_PROP_OK) { 2239 cmn_err(CE_WARN, MSGHDR "failed to get basic PCI data: 0x%x", 2240 "pci", bus, dev, func, prop_ret); 2241 return; 2242 } 2243 2244 if (prop_data.ppd_header == PCI_HEADER_CARDBUS && 2245 config_op == CONFIG_INFO) { 2246 /* Record the # of cardbus bridges found on the bus */ 2247 pci_bus_res[bus].num_cbb++; 2248 } 2249 2250 if (config_op == CONFIG_FIX) { 2251 if (prop_data.ppd_vendid == VENID_AMD && 2252 prop_data.ppd_devid == DEVID_AMD8111_LPC) { 2253 pci_fix_amd8111(bus, dev, func); 2254 } 2255 return; 2256 } 2257 2258 /* make sure parent bus dip has been created */ 2259 if (pci_bus_res[bus].dip == NULL) 2260 create_root_bus_dip(bus); 2261 2262 ndi_devi_alloc_sleep(pci_bus_res[bus].dip, DEVI_PSEUDO_NEXNAME, 2263 DEVI_SID_NODEID, &dip); 2264 prop_ret = pci_prop_name_node(dip, &prop_data); 2265 if (prop_ret != PCI_PROP_OK) { 2266 cmn_err(CE_WARN, MSGHDR "failed to set node name: 0x%x; " 2267 "devinfo node not created", "pci", bus, dev, func, 2268 prop_ret); 2269 (void) ndi_devi_free(dip); 2270 return; 2271 } 2272 2273 bdf = PCI_GETBDF(bus, dev, func); 2274 /* 2275 * Record BAD AMD bridges which don't support MMIO config access. 2276 */ 2277 if (IS_BAD_AMD_NTBRIDGE(prop_data.ppd_vendid, prop_data.ppd_devid) || 2278 IS_AMD_8132_CHIP(prop_data.ppd_vendid, prop_data.ppd_devid)) { 2279 uchar_t secbus = 0; 2280 uchar_t subbus = 0; 2281 2282 if (pci_prop_class_is_pcibridge(&prop_data)) { 2283 secbus = pci_getb(bus, dev, func, PCI_BCNF_SECBUS); 2284 subbus = pci_getb(bus, dev, func, PCI_BCNF_SUBBUS); 2285 } 2286 pci_cfgacc_add_workaround(bdf, secbus, subbus); 2287 } 2288 2289 /* 2290 * Only populate bus_t if this device is sitting under a PCIE root 2291 * complex. Some particular machines have both a PCIE root complex and 2292 * a PCI hostbridge, in which case only devices under the PCIE root 2293 * complex will have their bus_t populated. 2294 */ 2295 if (pcie_get_rc_dip(dip) != NULL) { 2296 ck804_fix_aer_ptr(dip, bdf); 2297 (void) pcie_init_bus(dip, bdf, PCIE_BUS_INITIAL); 2298 } 2299 2300 /* 2301 * Go through and set all of the devinfo proprties on this function. 2302 */ 2303 prop_ret = pci_prop_set_common_props(dip, &prop_data); 2304 if (prop_ret != PCI_PROP_OK) { 2305 cmn_err(CE_WARN, MSGHDR "failed to set properties: 0x%x; " 2306 "devinfo node not created", "pci", bus, dev, func, 2307 prop_ret); 2308 if (pcie_get_rc_dip(dip) != NULL) { 2309 pcie_fini_bus(dip, PCIE_BUS_FINAL); 2310 } 2311 (void) ndi_devi_free(dip); 2312 return; 2313 } 2314 2315 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, 2316 "power-consumption", power, 2); 2317 2318 /* Set the device PM state to D0 */ 2319 set_devpm_d0(bus, dev, func); 2320 2321 if (pci_prop_class_is_pcibridge(&prop_data)) { 2322 boolean_t pciex = (prop_data.ppd_flags & PCI_PROP_F_PCIE) != 0; 2323 boolean_t is_pci_bridge = pciex && 2324 prop_data.ppd_pcie_type == PCIE_PCIECAP_DEV_TYPE_PCIE2PCI; 2325 add_ppb_props(dip, bus, dev, func, pciex, is_pci_bridge); 2326 } else { 2327 /* 2328 * Record the non-PPB devices on the bus for possible 2329 * reprogramming at 2nd bus enumeration. 2330 * Note: PPB reprogramming is done in fix_ppb_res() 2331 */ 2332 devlist = (struct pci_devfunc *)pci_bus_res[bus].privdata; 2333 entry = kmem_zalloc(sizeof (*entry), KM_SLEEP); 2334 entry->dip = dip; 2335 entry->dev = dev; 2336 entry->func = func; 2337 entry->next = devlist; 2338 pci_bus_res[bus].privdata = entry; 2339 } 2340 2341 if (pci_prop_class_is_ioapic(&prop_data)) { 2342 create_ioapic_node(bus, dev, func, prop_data.ppd_vendid, 2343 prop_data.ppd_devid); 2344 } 2345 2346 /* check for NVIDIA CK8-04/MCP55 based LPC bridge */ 2347 if (NVIDIA_IS_LPC_BRIDGE(prop_data.ppd_vendid, prop_data.ppd_devid) && 2348 dev == 1 && func == 0) { 2349 add_nvidia_isa_bridge_props(dip, bus, dev, func); 2350 /* each LPC bridge has an integrated IOAPIC */ 2351 apic_nvidia_io_max++; 2352 } 2353 2354 prop_ret = pci_prop_set_compatible(dip, &prop_data); 2355 if (prop_ret != PCI_PROP_OK) { 2356 cmn_err(CE_WARN, MSGHDR "failed to set compatible property: " 2357 "0x%x; device may not bind to a driver", "pci", bus, dev, 2358 func, prop_ret); 2359 } 2360 2361 /* 2362 * See if this device is a controller that advertises 2363 * itself to be a standard ATA task file controller, or one that 2364 * has been hard coded. 2365 * 2366 * If it is, check if any other higher precedence driver listed in 2367 * driver_aliases will claim the node by calling 2368 * ddi_compatible_driver_major. If so, clear pciide and do not 2369 * create a pci-ide node or any other special handling. 2370 * 2371 * If another driver does not bind, set the node name to pci-ide 2372 * and then let the special pci-ide handling for registers and 2373 * child pci-ide nodes proceed below. 2374 */ 2375 if (is_pciide(&prop_data)) { 2376 if (ddi_compatible_driver_major(dip, NULL) == (major_t)-1) { 2377 (void) ndi_devi_set_nodename(dip, "pci-ide", 0); 2378 pciide = B_TRUE; 2379 } 2380 } 2381 2382 DEVI_SET_PCI(dip); 2383 reprogram = add_reg_props(dip, bus, dev, func, config_op, pciide); 2384 (void) ndi_devi_bind_driver(dip, 0); 2385 2386 /* special handling for pci-ide */ 2387 if (pciide) { 2388 dev_info_t *cdip; 2389 2390 /* 2391 * Create properties specified by P1275 Working Group 2392 * Proposal #414 Version 1 2393 */ 2394 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip, 2395 "device_type", "pci-ide"); 2396 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 2397 "#address-cells", 1); 2398 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 2399 "#size-cells", 0); 2400 2401 /* allocate two child nodes */ 2402 ndi_devi_alloc_sleep(dip, "ide", 2403 (pnode_t)DEVI_SID_NODEID, &cdip); 2404 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cdip, 2405 "reg", 0); 2406 (void) ndi_devi_bind_driver(cdip, 0); 2407 ndi_devi_alloc_sleep(dip, "ide", 2408 (pnode_t)DEVI_SID_NODEID, &cdip); 2409 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cdip, 2410 "reg", 1); 2411 (void) ndi_devi_bind_driver(cdip, 0); 2412 2413 reprogram = B_FALSE; /* don't reprogram pci-ide bridge */ 2414 } 2415 2416 if (pci_prop_class_is_vga(&prop_data)) { 2417 gfxp = kmem_zalloc(sizeof (*gfxp), KM_SLEEP); 2418 gfxp->g_dip = dip; 2419 gfxp->g_prev = NULL; 2420 gfxp->g_next = gfx_devinfo_list; 2421 gfx_devinfo_list = gfxp; 2422 if (gfxp->g_next) 2423 gfxp->g_next->g_prev = gfxp; 2424 } 2425 2426 if (reprogram && (entry != NULL)) 2427 entry->reprogram = B_TRUE; 2428 } 2429 2430 /* 2431 * Adjust the reg properties for a dual channel PCI-IDE device. 2432 * 2433 * NOTE: don't do anything that changes the order of the hard-decodes 2434 * and programmed BARs. The kernel driver depends on these values 2435 * being in this order regardless of whether they're for a 'native' 2436 * mode BAR or not. 2437 */ 2438 /* 2439 * config info for pci-ide devices 2440 */ 2441 static struct { 2442 uchar_t native_mask; /* 0 == 'compatibility' mode, 1 == native */ 2443 uchar_t bar_offset; /* offset for alt status register */ 2444 ushort_t addr; /* compatibility mode base address */ 2445 ushort_t length; /* number of ports for this BAR */ 2446 } pciide_bar[] = { 2447 { 0x01, 0, 0x1f0, 8 }, /* primary lower BAR */ 2448 { 0x01, 2, 0x3f6, 1 }, /* primary upper BAR */ 2449 { 0x04, 0, 0x170, 8 }, /* secondary lower BAR */ 2450 { 0x04, 2, 0x376, 1 } /* secondary upper BAR */ 2451 }; 2452 2453 static boolean_t 2454 pciide_adjust_bar(uchar_t progcl, uint_t bar, uint_t *basep, uint_t *lenp) 2455 { 2456 boolean_t hard_decode = B_FALSE; 2457 2458 /* 2459 * Adjust the base and len for the BARs of the PCI-IDE 2460 * device's primary and secondary controllers. The first 2461 * two BARs are for the primary controller and the next 2462 * two BARs are for the secondary controller. The fifth 2463 * and sixth bars are never adjusted. 2464 */ 2465 if (bar <= 3) { 2466 *lenp = pciide_bar[bar].length; 2467 2468 if (progcl & pciide_bar[bar].native_mask) { 2469 *basep += pciide_bar[bar].bar_offset; 2470 } else { 2471 *basep = pciide_bar[bar].addr; 2472 hard_decode = B_TRUE; 2473 } 2474 } 2475 2476 /* 2477 * if either base or len is zero make certain both are zero 2478 */ 2479 if (*basep == 0 || *lenp == 0) { 2480 *basep = 0; 2481 *lenp = 0; 2482 hard_decode = B_FALSE; 2483 } 2484 2485 return (hard_decode); 2486 } 2487 2488 /* 2489 * Where op is one of: 2490 * CONFIG_INFO - first pass, gather what is there. 2491 * CONFIG_UPDATE - second pass, adjust/allocate regions. 2492 * CONFIG_NEW - third pass, allocate regions. 2493 * 2494 * Returns: 2495 * -1 Skip this BAR 2496 * 0 Properties have been assigned 2497 * 1 Properties have been assigned, reprogramming required 2498 */ 2499 static int 2500 add_bar_reg_props(int op, uchar_t bus, uchar_t dev, uchar_t func, uint_t bar, 2501 ushort_t offset, pci_regspec_t *regs, pci_regspec_t *assigned, 2502 ushort_t *bar_sz, boolean_t pciide) 2503 { 2504 uint8_t baseclass, subclass, progclass; 2505 uint32_t base, devloc; 2506 uint16_t command = 0; 2507 int reprogram = 0; 2508 uint64_t value; 2509 2510 devloc = PCI_REG_MAKE_BDFR(bus, dev, func, 0); 2511 baseclass = pci_getb(bus, dev, func, PCI_CONF_BASCLASS); 2512 subclass = pci_getb(bus, dev, func, PCI_CONF_SUBCLASS); 2513 progclass = pci_getb(bus, dev, func, PCI_CONF_PROGCLASS); 2514 2515 /* 2516 * Determine the size of the BAR by writing 0xffffffff to the base 2517 * register and reading the value back before restoring the original. 2518 * 2519 * For non-bridges, disable I/O and Memory access while doing this to 2520 * avoid difficulty with USB emulation (see OHCI spec1.0a appendix B 2521 * "Host Controller Mapping"). Doing this for bridges would have the 2522 * side-effect of making the bridge transparent to secondary-bus 2523 * activity (see sections 4.1-4.3 of the PCI-PCI Bridge Spec V1.2). 2524 */ 2525 base = pci_getl(bus, dev, func, offset); 2526 2527 if (baseclass != PCI_CLASS_BRIDGE) { 2528 command = (uint_t)pci_getw(bus, dev, func, PCI_CONF_COMM); 2529 pci_putw(bus, dev, func, PCI_CONF_COMM, 2530 command & ~(PCI_COMM_MAE | PCI_COMM_IO)); 2531 } 2532 2533 pci_putl(bus, dev, func, offset, 0xffffffff); 2534 value = pci_getl(bus, dev, func, offset); 2535 pci_putl(bus, dev, func, offset, base); 2536 2537 if (baseclass != PCI_CLASS_BRIDGE) 2538 pci_putw(bus, dev, func, PCI_CONF_COMM, command); 2539 2540 /* I/O Space */ 2541 if ((pciide && bar < 4) || (base & PCI_BASE_SPACE_IO) != 0) { 2542 struct memlist **io_avail = &pci_bus_res[bus].io_avail; 2543 struct memlist **io_used = &pci_bus_res[bus].io_used; 2544 boolean_t hard_decode = B_FALSE; 2545 uint_t type, len; 2546 2547 *bar_sz = PCI_BAR_SZ_32; 2548 value &= PCI_BASE_IO_ADDR_M; 2549 len = BARMASKTOLEN(value); 2550 2551 /* XXX Adjust first 4 IDE registers */ 2552 if (pciide) { 2553 if (subclass != PCI_MASS_IDE) { 2554 progclass = (PCI_IDE_IF_NATIVE_PRI | 2555 PCI_IDE_IF_NATIVE_SEC); 2556 } 2557 hard_decode = pciide_adjust_bar(progclass, bar, 2558 &base, &len); 2559 } else if (value == 0) { 2560 /* skip base regs with size of 0 */ 2561 return (-1); 2562 } 2563 2564 regs->pci_phys_hi = PCI_ADDR_IO | devloc; 2565 if (hard_decode) { 2566 regs->pci_phys_hi |= PCI_RELOCAT_B; 2567 regs->pci_phys_low = base & PCI_BASE_IO_ADDR_M; 2568 } else { 2569 regs->pci_phys_hi |= offset; 2570 regs->pci_phys_low = 0; 2571 } 2572 assigned->pci_phys_hi = PCI_RELOCAT_B | regs->pci_phys_hi; 2573 regs->pci_size_low = assigned->pci_size_low = len; 2574 2575 /* 2576 * 'type' holds the non-address part of the base to be re-added 2577 * to any new address in the programming step below. 2578 */ 2579 type = base & ~PCI_BASE_IO_ADDR_M; 2580 base &= PCI_BASE_IO_ADDR_M; 2581 2582 /* 2583 * A device under a subtractive PPB can allocate resources from 2584 * its parent bus if there is no resource available on its own 2585 * bus. 2586 */ 2587 if (op == CONFIG_NEW && pci_bus_res[bus].subtractive && 2588 *io_avail == NULL) { 2589 uchar_t res_bus; 2590 2591 res_bus = resolve_alloc_bus(bus, RES_IO); 2592 io_avail = &pci_bus_res[res_bus].io_avail; 2593 } 2594 2595 if (op == CONFIG_INFO) { /* first pass */ 2596 /* take out of the resource map of the bus */ 2597 if (base != 0) { 2598 (void) memlist_remove(io_avail, base, len); 2599 memlist_insert(io_used, base, len); 2600 } else { 2601 reprogram = 1; 2602 } 2603 dcmn_err(CE_NOTE, 2604 MSGHDR "BAR%u I/O FWINIT 0x%x ~ 0x%x", 2605 "pci", bus, dev, func, bar, base, len); 2606 pci_bus_res[bus].io_size += len; 2607 } else if ((*io_avail != NULL && base == 0) || 2608 pci_bus_res[bus].io_reprogram) { 2609 base = memlist_find(io_avail, len, len); 2610 if (base == 0) { 2611 cmn_err(CE_WARN, MSGHDR "BAR%u I/O " 2612 "failed to find length 0x%x", 2613 "pci", bus, dev, func, bar, len); 2614 } else { 2615 uint32_t nbase; 2616 2617 cmn_err(CE_NOTE, "!" MSGHDR "BAR%u " 2618 "I/O REPROG 0x%x ~ 0x%x", 2619 "pci", bus, dev, func, 2620 bar, base, len); 2621 pci_putl(bus, dev, func, offset, base | type); 2622 nbase = pci_getl(bus, dev, func, offset); 2623 nbase &= PCI_BASE_IO_ADDR_M; 2624 2625 if (base != nbase) { 2626 cmn_err(CE_NOTE, "!" MSGHDR "BAR%u " 2627 "I/O REPROG 0x%x ~ 0x%x " 2628 "FAILED READBACK 0x%x", 2629 "pci", bus, dev, func, 2630 bar, base, len, nbase); 2631 pci_putl(bus, dev, func, offset, 0); 2632 if (baseclass != PCI_CLASS_BRIDGE) { 2633 /* Disable PCI_COMM_IO bit */ 2634 command = pci_getw(bus, dev, 2635 func, PCI_CONF_COMM); 2636 command &= ~PCI_COMM_IO; 2637 pci_putw(bus, dev, func, 2638 PCI_CONF_COMM, command); 2639 } 2640 memlist_insert(io_avail, base, len); 2641 base = 0; 2642 } else { 2643 memlist_insert(io_used, base, len); 2644 } 2645 } 2646 } 2647 assigned->pci_phys_low = base; 2648 2649 } else { /* Memory space */ 2650 struct memlist **mem_avail = &pci_bus_res[bus].mem_avail; 2651 struct memlist **mem_used = &pci_bus_res[bus].mem_used; 2652 struct memlist **pmem_avail = &pci_bus_res[bus].pmem_avail; 2653 struct memlist **pmem_used = &pci_bus_res[bus].pmem_used; 2654 uint_t type, base_hi, phys_hi; 2655 uint64_t len, fbase; 2656 2657 if ((base & PCI_BASE_TYPE_M) == PCI_BASE_TYPE_ALL) { 2658 *bar_sz = PCI_BAR_SZ_64; 2659 base_hi = pci_getl(bus, dev, func, offset + 4); 2660 pci_putl(bus, dev, func, offset + 4, 2661 0xffffffff); 2662 value |= (uint64_t)pci_getl(bus, dev, func, 2663 offset + 4) << 32; 2664 pci_putl(bus, dev, func, offset + 4, base_hi); 2665 phys_hi = PCI_ADDR_MEM64; 2666 value &= PCI_BASE_M_ADDR64_M; 2667 } else { 2668 *bar_sz = PCI_BAR_SZ_32; 2669 base_hi = 0; 2670 phys_hi = PCI_ADDR_MEM32; 2671 value &= PCI_BASE_M_ADDR_M; 2672 } 2673 2674 /* skip base regs with size of 0 */ 2675 if (value == 0) 2676 return (-1); 2677 2678 len = BARMASKTOLEN(value); 2679 regs->pci_size_low = assigned->pci_size_low = len & 0xffffffff; 2680 regs->pci_size_hi = assigned->pci_size_hi = len >> 32; 2681 2682 phys_hi |= devloc | offset; 2683 if (base & PCI_BASE_PREF_M) 2684 phys_hi |= PCI_PREFETCH_B; 2685 2686 /* 2687 * A device under a subtractive PPB can allocate resources from 2688 * its parent bus if there is no resource available on its own 2689 * bus. 2690 */ 2691 if (op == CONFIG_NEW && pci_bus_res[bus].subtractive) { 2692 uchar_t res_bus = bus; 2693 2694 if ((phys_hi & PCI_PREFETCH_B) != 0 && 2695 *pmem_avail == NULL) { 2696 res_bus = resolve_alloc_bus(bus, RES_PMEM); 2697 pmem_avail = &pci_bus_res[res_bus].pmem_avail; 2698 mem_avail = &pci_bus_res[res_bus].mem_avail; 2699 } else if (*mem_avail == NULL) { 2700 res_bus = resolve_alloc_bus(bus, RES_MEM); 2701 pmem_avail = &pci_bus_res[res_bus].pmem_avail; 2702 mem_avail = &pci_bus_res[res_bus].mem_avail; 2703 } 2704 } 2705 2706 regs->pci_phys_hi = assigned->pci_phys_hi = phys_hi; 2707 assigned->pci_phys_hi |= PCI_RELOCAT_B; 2708 2709 /* 2710 * 'type' holds the non-address part of the base to be re-added 2711 * to any new address in the programming step below. 2712 */ 2713 type = base & ~PCI_BASE_M_ADDR_M; 2714 base &= PCI_BASE_M_ADDR_M; 2715 2716 fbase = (((uint64_t)base_hi) << 32) | base; 2717 2718 if (op == CONFIG_INFO) { 2719 2720 dcmn_err(CE_NOTE, 2721 MSGHDR "BAR%u %sMEM FWINIT 0x%lx ~ 0x%lx%s", 2722 "pci", bus, dev, func, bar, 2723 (phys_hi & PCI_PREFETCH_B) ? "P" : " ", 2724 fbase, len, 2725 *bar_sz == PCI_BAR_SZ_64 ? " (64-bit)" : ""); 2726 2727 /* take out of the resource map of the bus */ 2728 if (fbase != 0) { 2729 /* remove from PMEM and MEM space */ 2730 (void) memlist_remove(mem_avail, fbase, len); 2731 (void) memlist_remove(pmem_avail, fbase, len); 2732 /* only note as used in correct map */ 2733 if ((phys_hi & PCI_PREFETCH_B) != 0) 2734 memlist_insert(pmem_used, fbase, len); 2735 else 2736 memlist_insert(mem_used, fbase, len); 2737 } else { 2738 reprogram = 1; 2739 /* 2740 * If we need to reprogram this because we 2741 * don't have a BAR assigned, we need to 2742 * actually increase the amount of memory that 2743 * we request to take into account alignment. 2744 * This is a bit gross, but by doubling the 2745 * request size we are more likely to get the 2746 * size that we need. A more involved fix would 2747 * require a smarter and more involved 2748 * allocator (something we will need 2749 * eventually). 2750 */ 2751 len *= 2; 2752 } 2753 2754 if (phys_hi & PCI_PREFETCH_B) 2755 pci_bus_res[bus].pmem_size += len; 2756 else 2757 pci_bus_res[bus].mem_size += len; 2758 } else if (pci_bus_res[bus].mem_reprogram || (fbase == 0 && 2759 (*mem_avail != NULL || *pmem_avail != NULL))) { 2760 boolean_t pf = B_FALSE; 2761 fbase = 0; 2762 2763 /* 2764 * When desired, attempt a prefetchable allocation first 2765 */ 2766 if ((phys_hi & PCI_PREFETCH_B) != 0 && 2767 *pmem_avail != NULL) { 2768 fbase = memlist_find(pmem_avail, len, len); 2769 if (fbase != 0) 2770 pf = B_TRUE; 2771 } 2772 /* 2773 * If prefetchable allocation was not desired, or 2774 * failed, attempt ordinary memory allocation. 2775 */ 2776 if (fbase == 0 && *mem_avail != NULL) 2777 fbase = memlist_find(mem_avail, len, len); 2778 2779 base_hi = fbase >> 32; 2780 base = fbase & 0xffffffff; 2781 2782 if (fbase == 0) { 2783 cmn_err(CE_WARN, MSGHDR "BAR%u MEM " 2784 "failed to find length 0x%lx", 2785 "pci", bus, dev, func, bar, len); 2786 } else { 2787 uint64_t nbase, nbase_hi = 0; 2788 2789 cmn_err(CE_NOTE, "!" MSGHDR "BAR%u " 2790 "%s%s REPROG 0x%lx ~ 0x%lx", 2791 "pci", bus, dev, func, bar, 2792 pf ? "PMEM" : "MEM", 2793 *bar_sz == PCI_BAR_SZ_64 ? "64" : "", 2794 fbase, len); 2795 pci_putl(bus, dev, func, offset, base | type); 2796 nbase = pci_getl(bus, dev, func, offset); 2797 2798 if (*bar_sz == PCI_BAR_SZ_64) { 2799 pci_putl(bus, dev, func, 2800 offset + 4, base_hi); 2801 nbase_hi = pci_getl(bus, dev, func, 2802 offset + 4); 2803 } 2804 2805 nbase &= PCI_BASE_M_ADDR_M; 2806 2807 if (base != nbase || base_hi != nbase_hi) { 2808 cmn_err(CE_NOTE, "!" MSGHDR "BAR%u " 2809 "%s%s REPROG 0x%lx ~ 0x%lx " 2810 "FAILED READBACK 0x%lx", 2811 "pci", bus, dev, func, bar, 2812 pf ? "PMEM" : "MEM", 2813 *bar_sz == PCI_BAR_SZ_64 ? 2814 "64" : "", 2815 fbase, len, 2816 nbase_hi << 32 | nbase); 2817 2818 pci_putl(bus, dev, func, offset, 0); 2819 if (*bar_sz == PCI_BAR_SZ_64) { 2820 pci_putl(bus, dev, func, 2821 offset + 4, 0); 2822 } 2823 2824 if (baseclass != PCI_CLASS_BRIDGE) { 2825 /* Disable PCI_COMM_MAE bit */ 2826 command = pci_getw(bus, dev, 2827 func, PCI_CONF_COMM); 2828 command &= ~PCI_COMM_MAE; 2829 pci_putw(bus, dev, func, 2830 PCI_CONF_COMM, command); 2831 } 2832 2833 memlist_insert( 2834 pf ? pmem_avail : mem_avail, 2835 base, len); 2836 base = base_hi = 0; 2837 } else { 2838 if (pf) { 2839 memlist_insert(pmem_used, 2840 fbase, len); 2841 (void) memlist_remove( 2842 pmem_avail, fbase, len); 2843 } else { 2844 memlist_insert(mem_used, 2845 fbase, len); 2846 (void) memlist_remove( 2847 mem_avail, fbase, len); 2848 } 2849 } 2850 } 2851 } 2852 2853 assigned->pci_phys_mid = base_hi; 2854 assigned->pci_phys_low = base; 2855 } 2856 2857 dcmn_err(CE_NOTE, MSGHDR "BAR%u ---- %08x.%x.%x.%x.%x", 2858 "pci", bus, dev, func, bar, 2859 assigned->pci_phys_hi, 2860 assigned->pci_phys_mid, 2861 assigned->pci_phys_low, 2862 assigned->pci_size_hi, 2863 assigned->pci_size_low); 2864 2865 return (reprogram); 2866 } 2867 2868 /* 2869 * Add the "reg" and "assigned-addresses" property 2870 */ 2871 static boolean_t 2872 add_reg_props(dev_info_t *dip, uchar_t bus, uchar_t dev, uchar_t func, 2873 int op, boolean_t pciide) 2874 { 2875 uchar_t baseclass, subclass, progclass, header; 2876 uint_t bar, value, devloc, base; 2877 ushort_t bar_sz, offset, end; 2878 int max_basereg, reprogram = B_FALSE; 2879 2880 struct memlist **io_avail, **io_used; 2881 struct memlist **mem_avail, **mem_used; 2882 struct memlist **pmem_avail; 2883 2884 pci_regspec_t regs[16] = {{0}}; 2885 pci_regspec_t assigned[15] = {{0}}; 2886 int nreg, nasgn; 2887 2888 io_avail = &pci_bus_res[bus].io_avail; 2889 io_used = &pci_bus_res[bus].io_used; 2890 mem_avail = &pci_bus_res[bus].mem_avail; 2891 mem_used = &pci_bus_res[bus].mem_used; 2892 pmem_avail = &pci_bus_res[bus].pmem_avail; 2893 2894 dump_memlists("add_reg_props start", bus); 2895 2896 devloc = PCI_REG_MAKE_BDFR(bus, dev, func, 0); 2897 regs[0].pci_phys_hi = devloc; 2898 nreg = 1; /* rest of regs[0] is all zero */ 2899 nasgn = 0; 2900 2901 baseclass = pci_getb(bus, dev, func, PCI_CONF_BASCLASS); 2902 subclass = pci_getb(bus, dev, func, PCI_CONF_SUBCLASS); 2903 progclass = pci_getb(bus, dev, func, PCI_CONF_PROGCLASS); 2904 header = pci_getb(bus, dev, func, PCI_CONF_HEADER) & PCI_HEADER_TYPE_M; 2905 2906 switch (header) { 2907 case PCI_HEADER_ZERO: 2908 max_basereg = PCI_BASE_NUM; 2909 break; 2910 case PCI_HEADER_PPB: 2911 max_basereg = PCI_BCNF_BASE_NUM; 2912 break; 2913 case PCI_HEADER_CARDBUS: 2914 max_basereg = PCI_CBUS_BASE_NUM; 2915 reprogram = B_TRUE; 2916 break; 2917 default: 2918 max_basereg = 0; 2919 break; 2920 } 2921 2922 end = PCI_CONF_BASE0 + max_basereg * sizeof (uint_t); 2923 for (bar = 0, offset = PCI_CONF_BASE0; offset < end; 2924 bar++, offset += bar_sz) { 2925 int ret; 2926 2927 ret = add_bar_reg_props(op, bus, dev, func, bar, offset, 2928 ®s[nreg], &assigned[nasgn], &bar_sz, pciide); 2929 2930 if (bar_sz == PCI_BAR_SZ_64) 2931 bar++; 2932 2933 if (ret == -1) /* Skip BAR */ 2934 continue; 2935 2936 if (ret == 1) 2937 reprogram = B_TRUE; 2938 2939 nreg++; 2940 nasgn++; 2941 } 2942 2943 switch (header) { 2944 case PCI_HEADER_ZERO: 2945 offset = PCI_CONF_ROM; 2946 break; 2947 case PCI_HEADER_PPB: 2948 offset = PCI_BCNF_ROM; 2949 break; 2950 default: /* including PCI_HEADER_CARDBUS */ 2951 goto done; 2952 } 2953 2954 /* 2955 * Add the expansion rom memory space 2956 * Determine the size of the ROM base reg; don't write reserved bits 2957 * ROM isn't in the PCI memory space. 2958 */ 2959 base = pci_getl(bus, dev, func, offset); 2960 pci_putl(bus, dev, func, offset, PCI_BASE_ROM_ADDR_M); 2961 value = pci_getl(bus, dev, func, offset); 2962 pci_putl(bus, dev, func, offset, base); 2963 if (value & PCI_BASE_ROM_ENABLE) 2964 value &= PCI_BASE_ROM_ADDR_M; 2965 else 2966 value = 0; 2967 2968 if (value != 0) { 2969 uint_t len; 2970 2971 regs[nreg].pci_phys_hi = (PCI_ADDR_MEM32 | devloc) + offset; 2972 assigned[nasgn].pci_phys_hi = (PCI_RELOCAT_B | 2973 PCI_ADDR_MEM32 | devloc) + offset; 2974 base &= PCI_BASE_ROM_ADDR_M; 2975 assigned[nasgn].pci_phys_low = base; 2976 len = BARMASKTOLEN(value); 2977 regs[nreg].pci_size_low = assigned[nasgn].pci_size_low = len; 2978 nreg++, nasgn++; 2979 /* take it out of the memory resource */ 2980 if (base != 0) { 2981 (void) memlist_remove(mem_avail, base, len); 2982 memlist_insert(mem_used, base, len); 2983 pci_bus_res[bus].mem_size += len; 2984 } 2985 } 2986 2987 /* 2988 * Account for "legacy" (alias) video adapter resources 2989 */ 2990 2991 /* add the three hard-decode, aliased address spaces for VGA */ 2992 if ((baseclass == PCI_CLASS_DISPLAY && subclass == PCI_DISPLAY_VGA) || 2993 (baseclass == PCI_CLASS_NONE && subclass == PCI_NONE_VGA)) { 2994 2995 /* VGA hard decode 0x3b0-0x3bb */ 2996 regs[nreg].pci_phys_hi = assigned[nasgn].pci_phys_hi = 2997 (PCI_RELOCAT_B | PCI_ALIAS_B | PCI_ADDR_IO | devloc); 2998 regs[nreg].pci_phys_low = assigned[nasgn].pci_phys_low = 0x3b0; 2999 regs[nreg].pci_size_low = assigned[nasgn].pci_size_low = 0xc; 3000 nreg++, nasgn++; 3001 (void) memlist_remove(io_avail, 0x3b0, 0xc); 3002 memlist_insert(io_used, 0x3b0, 0xc); 3003 pci_bus_res[bus].io_size += 0xc; 3004 3005 /* VGA hard decode 0x3c0-0x3df */ 3006 regs[nreg].pci_phys_hi = assigned[nasgn].pci_phys_hi = 3007 (PCI_RELOCAT_B | PCI_ALIAS_B | PCI_ADDR_IO | devloc); 3008 regs[nreg].pci_phys_low = assigned[nasgn].pci_phys_low = 0x3c0; 3009 regs[nreg].pci_size_low = assigned[nasgn].pci_size_low = 0x20; 3010 nreg++, nasgn++; 3011 (void) memlist_remove(io_avail, 0x3c0, 0x20); 3012 memlist_insert(io_used, 0x3c0, 0x20); 3013 pci_bus_res[bus].io_size += 0x20; 3014 3015 /* Video memory */ 3016 regs[nreg].pci_phys_hi = assigned[nasgn].pci_phys_hi = 3017 (PCI_RELOCAT_B | PCI_ALIAS_B | PCI_ADDR_MEM32 | devloc); 3018 regs[nreg].pci_phys_low = 3019 assigned[nasgn].pci_phys_low = 0xa0000; 3020 regs[nreg].pci_size_low = 3021 assigned[nasgn].pci_size_low = 0x20000; 3022 nreg++, nasgn++; 3023 /* remove from MEM and PMEM space */ 3024 (void) memlist_remove(mem_avail, 0xa0000, 0x20000); 3025 (void) memlist_remove(pmem_avail, 0xa0000, 0x20000); 3026 memlist_insert(mem_used, 0xa0000, 0x20000); 3027 pci_bus_res[bus].mem_size += 0x20000; 3028 } 3029 3030 /* add the hard-decode, aliased address spaces for 8514 */ 3031 if ((baseclass == PCI_CLASS_DISPLAY) && 3032 (subclass == PCI_DISPLAY_VGA) && 3033 (progclass & PCI_DISPLAY_IF_8514)) { 3034 3035 /* hard decode 0x2e8 */ 3036 regs[nreg].pci_phys_hi = assigned[nasgn].pci_phys_hi = 3037 (PCI_RELOCAT_B | PCI_ALIAS_B | PCI_ADDR_IO | devloc); 3038 regs[nreg].pci_phys_low = assigned[nasgn].pci_phys_low = 0x2e8; 3039 regs[nreg].pci_size_low = assigned[nasgn].pci_size_low = 0x1; 3040 nreg++, nasgn++; 3041 (void) memlist_remove(io_avail, 0x2e8, 0x1); 3042 memlist_insert(io_used, 0x2e8, 0x1); 3043 pci_bus_res[bus].io_size += 0x1; 3044 3045 /* hard decode 0x2ea-0x2ef */ 3046 regs[nreg].pci_phys_hi = assigned[nasgn].pci_phys_hi = 3047 (PCI_RELOCAT_B | PCI_ALIAS_B | PCI_ADDR_IO | devloc); 3048 regs[nreg].pci_phys_low = assigned[nasgn].pci_phys_low = 0x2ea; 3049 regs[nreg].pci_size_low = assigned[nasgn].pci_size_low = 0x6; 3050 nreg++, nasgn++; 3051 (void) memlist_remove(io_avail, 0x2ea, 0x6); 3052 memlist_insert(io_used, 0x2ea, 0x6); 3053 pci_bus_res[bus].io_size += 0x6; 3054 } 3055 3056 done: 3057 dump_memlists("add_reg_props end", bus); 3058 3059 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, "reg", 3060 (int *)regs, nreg * sizeof (pci_regspec_t) / sizeof (int)); 3061 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, 3062 "assigned-addresses", 3063 (int *)assigned, nasgn * sizeof (pci_regspec_t) / sizeof (int)); 3064 3065 return (reprogram); 3066 } 3067 3068 static void 3069 add_ppb_props(dev_info_t *dip, uchar_t bus, uchar_t dev, uchar_t func, 3070 boolean_t pciex, boolean_t is_pci_bridge) 3071 { 3072 char *dev_type; 3073 int i; 3074 uint_t cmd_reg; 3075 struct { 3076 uint64_t base; 3077 uint64_t limit; 3078 } io, mem, pmem; 3079 uchar_t secbus, subbus; 3080 uchar_t progclass; 3081 3082 secbus = pci_getb(bus, dev, func, PCI_BCNF_SECBUS); 3083 subbus = pci_getb(bus, dev, func, PCI_BCNF_SUBBUS); 3084 ASSERT3U(secbus, <=, subbus); 3085 3086 dump_memlists("add_ppb_props start bus", bus); 3087 dump_memlists("add_ppb_props start secbus", secbus); 3088 3089 /* 3090 * Check if it's a subtractive PPB. 3091 */ 3092 progclass = pci_getb(bus, dev, func, PCI_CONF_PROGCLASS); 3093 if (progclass == PCI_BRIDGE_PCI_IF_SUBDECODE) 3094 pci_bus_res[secbus].subtractive = B_TRUE; 3095 3096 /* 3097 * Some firmware lies about max pci busses, we allow for 3098 * such mistakes here 3099 */ 3100 if (subbus > pci_boot_maxbus) { 3101 pci_boot_maxbus = subbus; 3102 alloc_res_array(); 3103 } 3104 3105 ASSERT(pci_bus_res[secbus].dip == NULL); 3106 pci_bus_res[secbus].dip = dip; 3107 pci_bus_res[secbus].par_bus = bus; 3108 3109 dev_type = (pciex && !is_pci_bridge) ? "pciex" : "pci"; 3110 3111 /* set up bus number hierarchy */ 3112 pci_bus_res[secbus].sub_bus = subbus; 3113 /* 3114 * Keep track of the largest subordinate bus number (this is essential 3115 * for peer busses because there is no other way of determining its 3116 * subordinate bus number). 3117 */ 3118 if (subbus > pci_bus_res[bus].sub_bus) 3119 pci_bus_res[bus].sub_bus = subbus; 3120 /* 3121 * Loop through subordinate busses, initializing their parent bus 3122 * field to this bridge's parent. The subordinate busses' parent 3123 * fields may very well be further refined later, as child bridges 3124 * are enumerated. (The value is to note that the subordinate busses 3125 * are not peer busses by changing their par_bus fields to anything 3126 * other than -1.) 3127 */ 3128 for (i = secbus + 1; i <= subbus; i++) 3129 pci_bus_res[i].par_bus = bus; 3130 3131 /* 3132 * Update the number of bridges on the bus. 3133 */ 3134 if (!is_pci_bridge) 3135 pci_bus_res[bus].num_bridge++; 3136 3137 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip, 3138 "device_type", dev_type); 3139 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 3140 "#address-cells", 3); 3141 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 3142 "#size-cells", 2); 3143 3144 /* 3145 * Collect bridge window specifications, and use them to populate 3146 * the "avail" resources for the bus. Not all of those resources will 3147 * end up being available; this is done top-down, and so the initial 3148 * collection of windows populates the 'ranges' property for the 3149 * bus node. Later, as children are found, resources are removed from 3150 * the 'avail' list, so that it becomes the freelist for 3151 * this point in the tree. ranges may be set again after bridge 3152 * reprogramming in fix_ppb_res(), in which case it's set from 3153 * used + avail. 3154 * 3155 * According to PPB spec, the base register should be programmed 3156 * with a value bigger than the limit register when there are 3157 * no resources available. This applies to io, memory, and 3158 * prefetchable memory. 3159 */ 3160 3161 cmd_reg = (uint_t)pci_getw(bus, dev, func, PCI_CONF_COMM); 3162 fetch_ppb_res(bus, dev, func, RES_IO, &io.base, &io.limit); 3163 fetch_ppb_res(bus, dev, func, RES_MEM, &mem.base, &mem.limit); 3164 fetch_ppb_res(bus, dev, func, RES_PMEM, &pmem.base, &pmem.limit); 3165 3166 if (pci_boot_debug != 0) { 3167 dcmn_err(CE_NOTE, MSGHDR " I/O FWINIT 0x%lx ~ 0x%lx%s", 3168 "ppb", bus, dev, func, io.base, io.limit, 3169 io.base > io.limit ? " (disabled)" : ""); 3170 dcmn_err(CE_NOTE, MSGHDR " MEM FWINIT 0x%lx ~ 0x%lx%s", 3171 "ppb", bus, dev, func, mem.base, mem.limit, 3172 mem.base > mem.limit ? " (disabled)" : ""); 3173 dcmn_err(CE_NOTE, MSGHDR "PMEM FWINIT 0x%lx ~ 0x%lx%s", 3174 "ppb", bus, dev, func, pmem.base, pmem.limit, 3175 pmem.base > pmem.limit ? " (disabled)" : ""); 3176 } 3177 3178 /* 3179 * I/O range 3180 * 3181 * If the command register I/O enable bit is not set then we assume 3182 * that the I/O windows have been left unconfigured by system firmware. 3183 * In that case we leave it disabled and additionally set base > limit 3184 * to indicate there are there are no initial resources available and 3185 * to trigger later reconfiguration. 3186 */ 3187 if ((cmd_reg & PCI_COMM_IO) == 0) { 3188 io.base = PPB_DISABLE_IORANGE_BASE; 3189 io.limit = PPB_DISABLE_IORANGE_LIMIT; 3190 set_ppb_res(bus, dev, func, RES_IO, io.base, io.limit); 3191 } else if (io.base < io.limit) { 3192 uint64_t size = io.limit - io.base + 1; 3193 3194 memlist_insert(&pci_bus_res[secbus].io_avail, io.base, size); 3195 memlist_insert(&pci_bus_res[bus].io_used, io.base, size); 3196 3197 if (pci_bus_res[bus].io_avail != NULL) { 3198 (void) memlist_remove(&pci_bus_res[bus].io_avail, 3199 io.base, size); 3200 } 3201 } 3202 3203 /* 3204 * Memory range 3205 * 3206 * It is possible that the mem range will also have been left 3207 * unconfigured by system firmware. As for the I/O range, we check for 3208 * this by looking at the relevant bit in the command register (Memory 3209 * Access Enable in this case) but we also check if the base address is 3210 * 0, indicating that it is still at PCIe defaults. While 0 technically 3211 * could be a valid base address, it is unlikely. 3212 */ 3213 if ((cmd_reg & PCI_COMM_MAE) == 0 || mem.base == 0) { 3214 mem.base = PPB_DISABLE_MEMRANGE_BASE; 3215 mem.limit = PPB_DISABLE_MEMRANGE_LIMIT; 3216 set_ppb_res(bus, dev, func, RES_MEM, mem.base, mem.limit); 3217 } else if (mem.base < mem.limit) { 3218 uint64_t size = mem.limit - mem.base + 1; 3219 3220 memlist_insert(&pci_bus_res[secbus].mem_avail, mem.base, size); 3221 memlist_insert(&pci_bus_res[bus].mem_used, mem.base, size); 3222 /* remove from parent resource list */ 3223 (void) memlist_remove(&pci_bus_res[bus].mem_avail, 3224 mem.base, size); 3225 (void) memlist_remove(&pci_bus_res[bus].pmem_avail, 3226 mem.base, size); 3227 } 3228 3229 /* 3230 * Prefetchable range - as per MEM range above. 3231 */ 3232 if ((cmd_reg & PCI_COMM_MAE) == 0 || pmem.base == 0) { 3233 pmem.base = PPB_DISABLE_MEMRANGE_BASE; 3234 pmem.limit = PPB_DISABLE_MEMRANGE_LIMIT; 3235 set_ppb_res(bus, dev, func, RES_PMEM, pmem.base, pmem.limit); 3236 } else if (pmem.base < pmem.limit) { 3237 uint64_t size = pmem.limit - pmem.base + 1; 3238 3239 memlist_insert(&pci_bus_res[secbus].pmem_avail, 3240 pmem.base, size); 3241 memlist_insert(&pci_bus_res[bus].pmem_used, pmem.base, size); 3242 /* remove from parent resource list */ 3243 (void) memlist_remove(&pci_bus_res[bus].pmem_avail, 3244 pmem.base, size); 3245 (void) memlist_remove(&pci_bus_res[bus].mem_avail, 3246 pmem.base, size); 3247 } 3248 3249 /* 3250 * Add VGA legacy resources to the bridge's pci_bus_res if it 3251 * has VGA_ENABLE set. Note that we put them in 'avail', 3252 * because that's used to populate the ranges prop; they'll be 3253 * removed from there by the VGA device once it's found. Also, 3254 * remove them from the parent's available list and note them as 3255 * used in the parent. 3256 */ 3257 3258 if (pci_getw(bus, dev, func, PCI_BCNF_BCNTRL) & 3259 PCI_BCNF_BCNTRL_VGA_ENABLE) { 3260 3261 memlist_insert(&pci_bus_res[secbus].io_avail, 0x3b0, 0xc); 3262 3263 memlist_insert(&pci_bus_res[bus].io_used, 0x3b0, 0xc); 3264 if (pci_bus_res[bus].io_avail != NULL) { 3265 (void) memlist_remove(&pci_bus_res[bus].io_avail, 3266 0x3b0, 0xc); 3267 } 3268 3269 memlist_insert(&pci_bus_res[secbus].io_avail, 0x3c0, 0x20); 3270 3271 memlist_insert(&pci_bus_res[bus].io_used, 0x3c0, 0x20); 3272 if (pci_bus_res[bus].io_avail != NULL) { 3273 (void) memlist_remove(&pci_bus_res[bus].io_avail, 3274 0x3c0, 0x20); 3275 } 3276 3277 memlist_insert(&pci_bus_res[secbus].mem_avail, 0xa0000, 3278 0x20000); 3279 3280 memlist_insert(&pci_bus_res[bus].mem_used, 0xa0000, 0x20000); 3281 if (pci_bus_res[bus].mem_avail != NULL) { 3282 (void) memlist_remove(&pci_bus_res[bus].mem_avail, 3283 0xa0000, 0x20000); 3284 } 3285 } 3286 add_bus_range_prop(secbus); 3287 add_ranges_prop(secbus, B_TRUE); 3288 3289 dump_memlists("add_ppb_props end bus", bus); 3290 dump_memlists("add_ppb_props end secbus", secbus); 3291 } 3292 3293 static void 3294 add_bus_range_prop(int bus) 3295 { 3296 int bus_range[2]; 3297 3298 if (pci_bus_res[bus].dip == NULL) 3299 return; 3300 bus_range[0] = bus; 3301 bus_range[1] = pci_bus_res[bus].sub_bus; 3302 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, pci_bus_res[bus].dip, 3303 "bus-range", (int *)bus_range, 2); 3304 } 3305 3306 /* 3307 * Handle both PCI root and PCI-PCI bridge range properties; 3308 * the 'ppb' argument selects PCI-PCI bridges versus root. 3309 */ 3310 static void 3311 memlist_to_ranges(void **rp, struct memlist *list, const int bus, 3312 const uint32_t type, boolean_t ppb) 3313 { 3314 ppb_ranges_t *ppb_rp = *rp; 3315 pci_ranges_t *pci_rp = *rp; 3316 3317 while (list != NULL) { 3318 uint32_t newtype = type; 3319 3320 /* 3321 * If this is in fact a 64-bit address, adjust the address 3322 * type code to match. 3323 */ 3324 if (list->ml_address + (list->ml_size - 1) > UINT32_MAX) { 3325 if ((type & PCI_ADDR_MASK) == PCI_ADDR_IO) { 3326 cmn_err(CE_WARN, "Found invalid 64-bit I/O " 3327 "space address 0x%lx+0x%lx on bus %x", 3328 list->ml_address, list->ml_size, bus); 3329 list = list->ml_next; 3330 continue; 3331 } 3332 newtype &= ~PCI_ADDR_MASK; 3333 newtype |= PCI_ADDR_MEM64; 3334 } 3335 3336 if (ppb) { 3337 ppb_rp->child_high = ppb_rp->parent_high = newtype; 3338 ppb_rp->child_mid = ppb_rp->parent_mid = 3339 (uint32_t)(list->ml_address >> 32); 3340 ppb_rp->child_low = ppb_rp->parent_low = 3341 (uint32_t)list->ml_address; 3342 ppb_rp->size_high = (uint32_t)(list->ml_size >> 32); 3343 ppb_rp->size_low = (uint32_t)list->ml_size; 3344 *rp = ++ppb_rp; 3345 } else { 3346 pci_rp->child_high = newtype; 3347 pci_rp->child_mid = pci_rp->parent_high = 3348 (uint32_t)(list->ml_address >> 32); 3349 pci_rp->child_low = pci_rp->parent_low = 3350 (uint32_t)list->ml_address; 3351 pci_rp->size_high = (uint32_t)(list->ml_size >> 32); 3352 pci_rp->size_low = (uint32_t)list->ml_size; 3353 *rp = ++pci_rp; 3354 } 3355 list = list->ml_next; 3356 } 3357 } 3358 3359 static void 3360 add_ranges_prop(int bus, boolean_t ppb) 3361 { 3362 int total, alloc_size; 3363 void *rp, *next_rp; 3364 struct memlist *iolist, *memlist, *pmemlist; 3365 3366 /* no devinfo node - unused bus, return */ 3367 if (pci_bus_res[bus].dip == NULL) 3368 return; 3369 3370 dump_memlists("add_ranges_prop", bus); 3371 3372 iolist = memlist = pmemlist = (struct memlist *)NULL; 3373 3374 memlist_merge(&pci_bus_res[bus].io_avail, &iolist); 3375 memlist_merge(&pci_bus_res[bus].io_used, &iolist); 3376 memlist_merge(&pci_bus_res[bus].mem_avail, &memlist); 3377 memlist_merge(&pci_bus_res[bus].mem_used, &memlist); 3378 memlist_merge(&pci_bus_res[bus].pmem_avail, &pmemlist); 3379 memlist_merge(&pci_bus_res[bus].pmem_used, &pmemlist); 3380 3381 total = memlist_count(iolist); 3382 total += memlist_count(memlist); 3383 total += memlist_count(pmemlist); 3384 3385 /* no property is created if no ranges are present */ 3386 if (total == 0) 3387 return; 3388 3389 alloc_size = total * 3390 (ppb ? sizeof (ppb_ranges_t) : sizeof (pci_ranges_t)); 3391 3392 next_rp = rp = kmem_alloc(alloc_size, KM_SLEEP); 3393 3394 memlist_to_ranges(&next_rp, iolist, bus, 3395 PCI_ADDR_IO | PCI_RELOCAT_B, ppb); 3396 memlist_to_ranges(&next_rp, memlist, bus, 3397 PCI_ADDR_MEM32 | PCI_RELOCAT_B, ppb); 3398 memlist_to_ranges(&next_rp, pmemlist, bus, 3399 PCI_ADDR_MEM32 | PCI_RELOCAT_B | PCI_PREFETCH_B, ppb); 3400 3401 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, pci_bus_res[bus].dip, 3402 "ranges", (int *)rp, alloc_size / sizeof (int)); 3403 3404 kmem_free(rp, alloc_size); 3405 memlist_free_all(&iolist); 3406 memlist_free_all(&memlist); 3407 memlist_free_all(&pmemlist); 3408 } 3409 3410 static void 3411 memlist_remove_list(struct memlist **list, struct memlist *remove_list) 3412 { 3413 while (list && *list && remove_list) { 3414 (void) memlist_remove(list, remove_list->ml_address, 3415 remove_list->ml_size); 3416 remove_list = remove_list->ml_next; 3417 } 3418 } 3419 3420 static int 3421 memlist_to_spec(struct pci_phys_spec *sp, const int bus, struct memlist *list, 3422 const uint32_t type) 3423 { 3424 uint_t i = 0; 3425 3426 while (list != NULL) { 3427 uint32_t newtype = type; 3428 3429 /* 3430 * If this is in fact a 64-bit address, adjust the address 3431 * type code to match. 3432 */ 3433 if (list->ml_address + (list->ml_size - 1) > UINT32_MAX) { 3434 if ((type & PCI_ADDR_MASK) == PCI_ADDR_IO) { 3435 cmn_err(CE_WARN, "Found invalid 64-bit I/O " 3436 "space address 0x%lx+0x%lx on bus %x", 3437 list->ml_address, list->ml_size, bus); 3438 list = list->ml_next; 3439 continue; 3440 } 3441 newtype &= ~PCI_ADDR_MASK; 3442 newtype |= PCI_ADDR_MEM64; 3443 } 3444 3445 sp->pci_phys_hi = newtype; 3446 sp->pci_phys_mid = (uint32_t)(list->ml_address >> 32); 3447 sp->pci_phys_low = (uint32_t)list->ml_address; 3448 sp->pci_size_hi = (uint32_t)(list->ml_size >> 32); 3449 sp->pci_size_low = (uint32_t)list->ml_size; 3450 3451 list = list->ml_next; 3452 sp++, i++; 3453 } 3454 return (i); 3455 } 3456 3457 static void 3458 add_bus_available_prop(int bus) 3459 { 3460 int i, count; 3461 struct pci_phys_spec *sp; 3462 3463 /* no devinfo node - unused bus, return */ 3464 if (pci_bus_res[bus].dip == NULL) 3465 return; 3466 3467 count = memlist_count(pci_bus_res[bus].io_avail) + 3468 memlist_count(pci_bus_res[bus].mem_avail) + 3469 memlist_count(pci_bus_res[bus].pmem_avail); 3470 3471 if (count == 0) /* nothing available */ 3472 return; 3473 3474 sp = kmem_alloc(count * sizeof (*sp), KM_SLEEP); 3475 i = memlist_to_spec(&sp[0], bus, pci_bus_res[bus].io_avail, 3476 PCI_ADDR_IO | PCI_RELOCAT_B); 3477 i += memlist_to_spec(&sp[i], bus, pci_bus_res[bus].mem_avail, 3478 PCI_ADDR_MEM32 | PCI_RELOCAT_B); 3479 i += memlist_to_spec(&sp[i], bus, pci_bus_res[bus].pmem_avail, 3480 PCI_ADDR_MEM32 | PCI_RELOCAT_B | PCI_PREFETCH_B); 3481 ASSERT(i == count); 3482 3483 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, pci_bus_res[bus].dip, 3484 "available", (int *)sp, 3485 i * sizeof (struct pci_phys_spec) / sizeof (int)); 3486 kmem_free(sp, count * sizeof (*sp)); 3487 } 3488 3489 static void 3490 alloc_res_array(void) 3491 { 3492 static uint_t array_size = 0; 3493 uint_t old_size; 3494 void *old_res; 3495 3496 if (array_size > pci_boot_maxbus + 1) 3497 return; /* array is big enough */ 3498 3499 old_size = array_size; 3500 old_res = pci_bus_res; 3501 3502 if (array_size == 0) 3503 array_size = 16; /* start with a reasonable number */ 3504 3505 while (array_size <= pci_boot_maxbus + 1) 3506 array_size <<= 1; 3507 pci_bus_res = (struct pci_bus_resource *)kmem_zalloc( 3508 array_size * sizeof (struct pci_bus_resource), KM_SLEEP); 3509 3510 if (old_res) { /* copy content and free old array */ 3511 bcopy(old_res, pci_bus_res, 3512 old_size * sizeof (struct pci_bus_resource)); 3513 kmem_free(old_res, old_size * sizeof (struct pci_bus_resource)); 3514 } 3515 } 3516 3517 static void 3518 create_ioapic_node(int bus, int dev, int fn, ushort_t vendorid, 3519 ushort_t deviceid) 3520 { 3521 static dev_info_t *ioapicsnode = NULL; 3522 static int numioapics = 0; 3523 dev_info_t *ioapic_node; 3524 uint64_t physaddr; 3525 uint32_t lobase, hibase = 0; 3526 3527 /* BAR 0 contains the IOAPIC's memory-mapped I/O address */ 3528 lobase = (*pci_getl_func)(bus, dev, fn, PCI_CONF_BASE0); 3529 3530 /* We (and the rest of the world) only support memory-mapped IOAPICs */ 3531 if ((lobase & PCI_BASE_SPACE_M) != PCI_BASE_SPACE_MEM) 3532 return; 3533 3534 if ((lobase & PCI_BASE_TYPE_M) == PCI_BASE_TYPE_ALL) 3535 hibase = (*pci_getl_func)(bus, dev, fn, PCI_CONF_BASE0 + 4); 3536 3537 lobase &= PCI_BASE_M_ADDR_M; 3538 3539 physaddr = (((uint64_t)hibase) << 32) | lobase; 3540 3541 /* 3542 * Create a nexus node for all IOAPICs under the root node. 3543 */ 3544 if (ioapicsnode == NULL) { 3545 if (ndi_devi_alloc(ddi_root_node(), IOAPICS_NODE_NAME, 3546 (pnode_t)DEVI_SID_NODEID, &ioapicsnode) != NDI_SUCCESS) { 3547 return; 3548 } 3549 (void) ndi_devi_online(ioapicsnode, 0); 3550 } 3551 3552 /* 3553 * Create a child node for this IOAPIC 3554 */ 3555 ioapic_node = ddi_add_child(ioapicsnode, IOAPICS_CHILD_NAME, 3556 DEVI_SID_NODEID, numioapics++); 3557 if (ioapic_node == NULL) { 3558 return; 3559 } 3560 3561 /* Vendor and Device ID */ 3562 (void) ndi_prop_update_int(DDI_DEV_T_NONE, ioapic_node, 3563 IOAPICS_PROP_VENID, vendorid); 3564 (void) ndi_prop_update_int(DDI_DEV_T_NONE, ioapic_node, 3565 IOAPICS_PROP_DEVID, deviceid); 3566 3567 /* device_type */ 3568 (void) ndi_prop_update_string(DDI_DEV_T_NONE, ioapic_node, 3569 "device_type", IOAPICS_DEV_TYPE); 3570 3571 /* reg */ 3572 (void) ndi_prop_update_int64(DDI_DEV_T_NONE, ioapic_node, 3573 "reg", physaddr); 3574 } 3575 3576 /* 3577 * Enable reporting of AER capability next pointer. 3578 * This needs to be done only for CK8-04 devices 3579 * by setting NV_XVR_VEND_CYA1 (offset 0xf40) bit 13 3580 * NOTE: BIOS is disabling this, it needs to be enabled temporarily 3581 * 3582 * This function is adapted from npe_ck804_fix_aer_ptr(), and is 3583 * called from pci_boot.c. 3584 */ 3585 static void 3586 ck804_fix_aer_ptr(dev_info_t *dip, pcie_req_id_t bdf) 3587 { 3588 dev_info_t *rcdip; 3589 ushort_t cya1; 3590 3591 rcdip = pcie_get_rc_dip(dip); 3592 ASSERT(rcdip != NULL); 3593 3594 if ((pci_cfgacc_get16(rcdip, bdf, PCI_CONF_VENID) == 3595 NVIDIA_VENDOR_ID) && 3596 (pci_cfgacc_get16(rcdip, bdf, PCI_CONF_DEVID) == 3597 NVIDIA_CK804_DEVICE_ID) && 3598 (pci_cfgacc_get8(rcdip, bdf, PCI_CONF_REVID) >= 3599 NVIDIA_CK804_AER_VALID_REVID)) { 3600 cya1 = pci_cfgacc_get16(rcdip, bdf, NVIDIA_CK804_VEND_CYA1_OFF); 3601 if (!(cya1 & ~NVIDIA_CK804_VEND_CYA1_ERPT_MASK)) 3602 (void) pci_cfgacc_put16(rcdip, bdf, 3603 NVIDIA_CK804_VEND_CYA1_OFF, 3604 cya1 | NVIDIA_CK804_VEND_CYA1_ERPT_VAL); 3605 } 3606 } 3607