1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * PCI <-> OF mapping helpers 4 * 5 * Copyright 2011 IBM Corp. 6 */ 7 #define pr_fmt(fmt) "PCI: OF: " fmt 8 9 #include <linux/irqdomain.h> 10 #include <linux/kernel.h> 11 #include <linux/pci.h> 12 #include <linux/of.h> 13 #include <linux/of_irq.h> 14 #include <linux/of_address.h> 15 #include <linux/of_pci.h> 16 #include "pci.h" 17 18 #ifdef CONFIG_PCI 19 /** 20 * pci_set_of_node - Find and set device's DT device_node 21 * @dev: the PCI device structure to fill 22 * 23 * Returns 0 on success with of_node set or when no device is described in the 24 * DT. Returns -ENODEV if the device is present, but disabled in the DT. 25 */ 26 int pci_set_of_node(struct pci_dev *dev) 27 { 28 struct device_node *node; 29 30 if (!dev->bus->dev.of_node) 31 return 0; 32 33 node = of_pci_find_child_device(dev->bus->dev.of_node, dev->devfn); 34 if (!node) 35 return 0; 36 37 if (!of_device_is_available(node)) { 38 of_node_put(node); 39 return -ENODEV; 40 } 41 42 device_set_node(&dev->dev, of_fwnode_handle(node)); 43 return 0; 44 } 45 46 void pci_release_of_node(struct pci_dev *dev) 47 { 48 of_node_put(dev->dev.of_node); 49 device_set_node(&dev->dev, NULL); 50 } 51 52 void pci_set_bus_of_node(struct pci_bus *bus) 53 { 54 struct device_node *node; 55 56 if (bus->self == NULL) { 57 node = pcibios_get_phb_of_node(bus); 58 } else { 59 node = of_node_get(bus->self->dev.of_node); 60 if (node && of_property_read_bool(node, "external-facing")) 61 bus->self->external_facing = true; 62 } 63 64 device_set_node(&bus->dev, of_fwnode_handle(node)); 65 } 66 67 void pci_release_bus_of_node(struct pci_bus *bus) 68 { 69 of_node_put(bus->dev.of_node); 70 device_set_node(&bus->dev, NULL); 71 } 72 73 struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus) 74 { 75 /* This should only be called for PHBs */ 76 if (WARN_ON(bus->self || bus->parent)) 77 return NULL; 78 79 /* 80 * Look for a node pointer in either the intermediary device we 81 * create above the root bus or its own parent. Normally only 82 * the later is populated. 83 */ 84 if (bus->bridge->of_node) 85 return of_node_get(bus->bridge->of_node); 86 if (bus->bridge->parent && bus->bridge->parent->of_node) 87 return of_node_get(bus->bridge->parent->of_node); 88 return NULL; 89 } 90 91 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus) 92 { 93 #ifdef CONFIG_IRQ_DOMAIN 94 struct irq_domain *d; 95 96 if (!bus->dev.of_node) 97 return NULL; 98 99 /* Start looking for a phandle to an MSI controller. */ 100 d = of_msi_get_domain(&bus->dev, bus->dev.of_node, DOMAIN_BUS_PCI_MSI); 101 if (d) 102 return d; 103 104 /* 105 * If we don't have an msi-parent property, look for a domain 106 * directly attached to the host bridge. 107 */ 108 d = irq_find_matching_host(bus->dev.of_node, DOMAIN_BUS_PCI_MSI); 109 if (d) 110 return d; 111 112 return irq_find_host(bus->dev.of_node); 113 #else 114 return NULL; 115 #endif 116 } 117 118 bool pci_host_of_has_msi_map(struct device *dev) 119 { 120 if (dev && dev->of_node) 121 return of_get_property(dev->of_node, "msi-map", NULL); 122 return false; 123 } 124 125 static inline int __of_pci_pci_compare(struct device_node *node, 126 unsigned int data) 127 { 128 int devfn; 129 130 devfn = of_pci_get_devfn(node); 131 if (devfn < 0) 132 return 0; 133 134 return devfn == data; 135 } 136 137 struct device_node *of_pci_find_child_device(struct device_node *parent, 138 unsigned int devfn) 139 { 140 struct device_node *node, *node2; 141 142 for_each_child_of_node(parent, node) { 143 if (__of_pci_pci_compare(node, devfn)) 144 return node; 145 /* 146 * Some OFs create a parent node "multifunc-device" as 147 * a fake root for all functions of a multi-function 148 * device we go down them as well. 149 */ 150 if (of_node_name_eq(node, "multifunc-device")) { 151 for_each_child_of_node(node, node2) { 152 if (__of_pci_pci_compare(node2, devfn)) { 153 of_node_put(node); 154 return node2; 155 } 156 } 157 } 158 } 159 return NULL; 160 } 161 EXPORT_SYMBOL_GPL(of_pci_find_child_device); 162 163 /** 164 * of_pci_get_devfn() - Get device and function numbers for a device node 165 * @np: device node 166 * 167 * Parses a standard 5-cell PCI resource and returns an 8-bit value that can 168 * be passed to the PCI_SLOT() and PCI_FUNC() macros to extract the device 169 * and function numbers respectively. On error a negative error code is 170 * returned. 171 */ 172 int of_pci_get_devfn(struct device_node *np) 173 { 174 u32 reg[5]; 175 int error; 176 177 error = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg)); 178 if (error) 179 return error; 180 181 return (reg[0] >> 8) & 0xff; 182 } 183 EXPORT_SYMBOL_GPL(of_pci_get_devfn); 184 185 /** 186 * of_pci_parse_bus_range() - parse the bus-range property of a PCI device 187 * @node: device node 188 * @res: address to a struct resource to return the bus-range 189 * 190 * Returns 0 on success or a negative error-code on failure. 191 */ 192 int of_pci_parse_bus_range(struct device_node *node, struct resource *res) 193 { 194 u32 bus_range[2]; 195 int error; 196 197 error = of_property_read_u32_array(node, "bus-range", bus_range, 198 ARRAY_SIZE(bus_range)); 199 if (error) 200 return error; 201 202 res->name = node->name; 203 res->start = bus_range[0]; 204 res->end = bus_range[1]; 205 res->flags = IORESOURCE_BUS; 206 207 return 0; 208 } 209 EXPORT_SYMBOL_GPL(of_pci_parse_bus_range); 210 211 /** 212 * of_get_pci_domain_nr - Find the host bridge domain number 213 * of the given device node. 214 * @node: Device tree node with the domain information. 215 * 216 * This function will try to obtain the host bridge domain number by finding 217 * a property called "linux,pci-domain" of the given device node. 218 * 219 * Return: 220 * * > 0 - On success, an associated domain number. 221 * * -EINVAL - The property "linux,pci-domain" does not exist. 222 * * -ENODATA - The linux,pci-domain" property does not have value. 223 * * -EOVERFLOW - Invalid "linux,pci-domain" property value. 224 * 225 * Returns the associated domain number from DT in the range [0-0xffff], or 226 * a negative value if the required property is not found. 227 */ 228 int of_get_pci_domain_nr(struct device_node *node) 229 { 230 u32 domain; 231 int error; 232 233 error = of_property_read_u32(node, "linux,pci-domain", &domain); 234 if (error) 235 return error; 236 237 return (u16)domain; 238 } 239 EXPORT_SYMBOL_GPL(of_get_pci_domain_nr); 240 241 /** 242 * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only 243 * is present and valid 244 */ 245 void of_pci_check_probe_only(void) 246 { 247 u32 val; 248 int ret; 249 250 ret = of_property_read_u32(of_chosen, "linux,pci-probe-only", &val); 251 if (ret) { 252 if (ret == -ENODATA || ret == -EOVERFLOW) 253 pr_warn("linux,pci-probe-only without valid value, ignoring\n"); 254 return; 255 } 256 257 if (val) 258 pci_add_flags(PCI_PROBE_ONLY); 259 else 260 pci_clear_flags(PCI_PROBE_ONLY); 261 262 pr_info("PROBE_ONLY %s\n", val ? "enabled" : "disabled"); 263 } 264 EXPORT_SYMBOL_GPL(of_pci_check_probe_only); 265 266 /** 267 * devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI 268 * host bridge resources from DT 269 * @dev: host bridge device 270 * @busno: bus number associated with the bridge root bus 271 * @bus_max: maximum number of buses for this bridge 272 * @resources: list where the range of resources will be added after DT parsing 273 * @ib_resources: list where the range of inbound resources (with addresses 274 * from 'dma-ranges') will be added after DT parsing 275 * @io_base: pointer to a variable that will contain on return the physical 276 * address for the start of the I/O range. Can be NULL if the caller doesn't 277 * expect I/O ranges to be present in the device tree. 278 * 279 * This function will parse the "ranges" property of a PCI host bridge device 280 * node and setup the resource mapping based on its content. It is expected 281 * that the property conforms with the Power ePAPR document. 282 * 283 * It returns zero if the range parsing has been successful or a standard error 284 * value if it failed. 285 */ 286 static int devm_of_pci_get_host_bridge_resources(struct device *dev, 287 unsigned char busno, unsigned char bus_max, 288 struct list_head *resources, 289 struct list_head *ib_resources, 290 resource_size_t *io_base) 291 { 292 struct device_node *dev_node = dev->of_node; 293 struct resource *res, tmp_res; 294 struct resource *bus_range; 295 struct of_pci_range range; 296 struct of_pci_range_parser parser; 297 const char *range_type; 298 int err; 299 300 if (io_base) 301 *io_base = (resource_size_t)OF_BAD_ADDR; 302 303 bus_range = devm_kzalloc(dev, sizeof(*bus_range), GFP_KERNEL); 304 if (!bus_range) 305 return -ENOMEM; 306 307 dev_info(dev, "host bridge %pOF ranges:\n", dev_node); 308 309 err = of_pci_parse_bus_range(dev_node, bus_range); 310 if (err) { 311 bus_range->start = busno; 312 bus_range->end = bus_max; 313 bus_range->flags = IORESOURCE_BUS; 314 dev_info(dev, " No bus range found for %pOF, using %pR\n", 315 dev_node, bus_range); 316 } else { 317 if (bus_range->end > bus_range->start + bus_max) 318 bus_range->end = bus_range->start + bus_max; 319 } 320 pci_add_resource(resources, bus_range); 321 322 /* Check for ranges property */ 323 err = of_pci_range_parser_init(&parser, dev_node); 324 if (err) 325 return 0; 326 327 dev_dbg(dev, "Parsing ranges property...\n"); 328 for_each_of_pci_range(&parser, &range) { 329 /* Read next ranges element */ 330 if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO) 331 range_type = "IO"; 332 else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM) 333 range_type = "MEM"; 334 else 335 range_type = "err"; 336 dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n", 337 range_type, range.cpu_addr, 338 range.cpu_addr + range.size - 1, range.pci_addr); 339 340 /* 341 * If we failed translation or got a zero-sized region 342 * then skip this range 343 */ 344 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0) 345 continue; 346 347 err = of_pci_range_to_resource(&range, dev_node, &tmp_res); 348 if (err) 349 continue; 350 351 res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL); 352 if (!res) { 353 err = -ENOMEM; 354 goto failed; 355 } 356 357 if (resource_type(res) == IORESOURCE_IO) { 358 if (!io_base) { 359 dev_err(dev, "I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n", 360 dev_node); 361 err = -EINVAL; 362 goto failed; 363 } 364 if (*io_base != (resource_size_t)OF_BAD_ADDR) 365 dev_warn(dev, "More than one I/O resource converted for %pOF. CPU base address for old range lost!\n", 366 dev_node); 367 *io_base = range.cpu_addr; 368 } else if (resource_type(res) == IORESOURCE_MEM) { 369 res->flags &= ~IORESOURCE_MEM_64; 370 } 371 372 pci_add_resource_offset(resources, res, res->start - range.pci_addr); 373 } 374 375 /* Check for dma-ranges property */ 376 if (!ib_resources) 377 return 0; 378 err = of_pci_dma_range_parser_init(&parser, dev_node); 379 if (err) 380 return 0; 381 382 dev_dbg(dev, "Parsing dma-ranges property...\n"); 383 for_each_of_pci_range(&parser, &range) { 384 /* 385 * If we failed translation or got a zero-sized region 386 * then skip this range 387 */ 388 if (((range.flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM) || 389 range.cpu_addr == OF_BAD_ADDR || range.size == 0) 390 continue; 391 392 dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n", 393 "IB MEM", range.cpu_addr, 394 range.cpu_addr + range.size - 1, range.pci_addr); 395 396 397 err = of_pci_range_to_resource(&range, dev_node, &tmp_res); 398 if (err) 399 continue; 400 401 res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL); 402 if (!res) { 403 err = -ENOMEM; 404 goto failed; 405 } 406 407 pci_add_resource_offset(ib_resources, res, 408 res->start - range.pci_addr); 409 } 410 411 return 0; 412 413 failed: 414 pci_free_resource_list(resources); 415 return err; 416 } 417 418 #if IS_ENABLED(CONFIG_OF_IRQ) 419 /** 420 * of_irq_parse_pci - Resolve the interrupt for a PCI device 421 * @pdev: the device whose interrupt is to be resolved 422 * @out_irq: structure of_phandle_args filled by this function 423 * 424 * This function resolves the PCI interrupt for a given PCI device. If a 425 * device-node exists for a given pci_dev, it will use normal OF tree 426 * walking. If not, it will implement standard swizzling and walk up the 427 * PCI tree until an device-node is found, at which point it will finish 428 * resolving using the OF tree walking. 429 */ 430 static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) 431 { 432 struct device_node *dn, *ppnode = NULL; 433 struct pci_dev *ppdev; 434 __be32 laddr[3]; 435 u8 pin; 436 int rc; 437 438 /* 439 * Check if we have a device node, if yes, fallback to standard 440 * device tree parsing 441 */ 442 dn = pci_device_to_OF_node(pdev); 443 if (dn) { 444 rc = of_irq_parse_one(dn, 0, out_irq); 445 if (!rc) 446 return rc; 447 } 448 449 /* 450 * Ok, we don't, time to have fun. Let's start by building up an 451 * interrupt spec. we assume #interrupt-cells is 1, which is standard 452 * for PCI. If you do different, then don't use that routine. 453 */ 454 rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); 455 if (rc != 0) 456 goto err; 457 /* No pin, exit with no error message. */ 458 if (pin == 0) 459 return -ENODEV; 460 461 /* Local interrupt-map in the device node? Use it! */ 462 if (of_property_present(dn, "interrupt-map")) { 463 pin = pci_swizzle_interrupt_pin(pdev, pin); 464 ppnode = dn; 465 } 466 467 /* Now we walk up the PCI tree */ 468 while (!ppnode) { 469 /* Get the pci_dev of our parent */ 470 ppdev = pdev->bus->self; 471 472 /* Ouch, it's a host bridge... */ 473 if (ppdev == NULL) { 474 ppnode = pci_bus_to_OF_node(pdev->bus); 475 476 /* No node for host bridge ? give up */ 477 if (ppnode == NULL) { 478 rc = -EINVAL; 479 goto err; 480 } 481 } else { 482 /* We found a P2P bridge, check if it has a node */ 483 ppnode = pci_device_to_OF_node(ppdev); 484 } 485 486 /* 487 * Ok, we have found a parent with a device-node, hand over to 488 * the OF parsing code. 489 * We build a unit address from the linux device to be used for 490 * resolution. Note that we use the linux bus number which may 491 * not match your firmware bus numbering. 492 * Fortunately, in most cases, interrupt-map-mask doesn't 493 * include the bus number as part of the matching. 494 * You should still be careful about that though if you intend 495 * to rely on this function (you ship a firmware that doesn't 496 * create device nodes for all PCI devices). 497 */ 498 if (ppnode) 499 break; 500 501 /* 502 * We can only get here if we hit a P2P bridge with no node; 503 * let's do standard swizzling and try again 504 */ 505 pin = pci_swizzle_interrupt_pin(pdev, pin); 506 pdev = ppdev; 507 } 508 509 out_irq->np = ppnode; 510 out_irq->args_count = 1; 511 out_irq->args[0] = pin; 512 laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8)); 513 laddr[1] = laddr[2] = cpu_to_be32(0); 514 rc = of_irq_parse_raw(laddr, out_irq); 515 if (rc) 516 goto err; 517 return 0; 518 err: 519 if (rc == -ENOENT) { 520 dev_warn(&pdev->dev, 521 "%s: no interrupt-map found, INTx interrupts not available\n", 522 __func__); 523 pr_warn_once("%s: possibly some PCI slots don't have level triggered interrupts capability\n", 524 __func__); 525 } else { 526 dev_err(&pdev->dev, "%s: failed with rc=%d\n", __func__, rc); 527 } 528 return rc; 529 } 530 531 /** 532 * of_irq_parse_and_map_pci() - Decode a PCI IRQ from the device tree and map to a VIRQ 533 * @dev: The PCI device needing an IRQ 534 * @slot: PCI slot number; passed when used as map_irq callback. Unused 535 * @pin: PCI IRQ pin number; passed when used as map_irq callback. Unused 536 * 537 * @slot and @pin are unused, but included in the function so that this 538 * function can be used directly as the map_irq callback to 539 * pci_assign_irq() and struct pci_host_bridge.map_irq pointer 540 */ 541 int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin) 542 { 543 struct of_phandle_args oirq; 544 int ret; 545 546 ret = of_irq_parse_pci(dev, &oirq); 547 if (ret) 548 return 0; /* Proper return code 0 == NO_IRQ */ 549 550 return irq_create_of_mapping(&oirq); 551 } 552 EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci); 553 #endif /* CONFIG_OF_IRQ */ 554 555 static int pci_parse_request_of_pci_ranges(struct device *dev, 556 struct pci_host_bridge *bridge) 557 { 558 int err, res_valid = 0; 559 resource_size_t iobase; 560 struct resource_entry *win, *tmp; 561 562 INIT_LIST_HEAD(&bridge->windows); 563 INIT_LIST_HEAD(&bridge->dma_ranges); 564 565 err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &bridge->windows, 566 &bridge->dma_ranges, &iobase); 567 if (err) 568 return err; 569 570 err = devm_request_pci_bus_resources(dev, &bridge->windows); 571 if (err) 572 return err; 573 574 resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { 575 struct resource *res = win->res; 576 577 switch (resource_type(res)) { 578 case IORESOURCE_IO: 579 err = devm_pci_remap_iospace(dev, res, iobase); 580 if (err) { 581 dev_warn(dev, "error %d: failed to map resource %pR\n", 582 err, res); 583 resource_list_destroy_entry(win); 584 } 585 break; 586 case IORESOURCE_MEM: 587 res_valid |= !(res->flags & IORESOURCE_PREFETCH); 588 589 if (!(res->flags & IORESOURCE_PREFETCH)) 590 if (upper_32_bits(resource_size(res))) 591 dev_warn(dev, "Memory resource size exceeds max for 32 bits\n"); 592 593 break; 594 } 595 } 596 597 if (!res_valid) 598 dev_warn(dev, "non-prefetchable memory resource required\n"); 599 600 return 0; 601 } 602 603 int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge) 604 { 605 if (!dev->of_node) 606 return 0; 607 608 bridge->swizzle_irq = pci_common_swizzle; 609 bridge->map_irq = of_irq_parse_and_map_pci; 610 611 return pci_parse_request_of_pci_ranges(dev, bridge); 612 } 613 614 #endif /* CONFIG_PCI */ 615 616 /** 617 * of_pci_get_max_link_speed - Find the maximum link speed of the given device node. 618 * @node: Device tree node with the maximum link speed information. 619 * 620 * This function will try to find the limitation of link speed by finding 621 * a property called "max-link-speed" of the given device node. 622 * 623 * Return: 624 * * > 0 - On success, a maximum link speed. 625 * * -EINVAL - Invalid "max-link-speed" property value, or failure to access 626 * the property of the device tree node. 627 * 628 * Returns the associated max link speed from DT, or a negative value if the 629 * required property is not found or is invalid. 630 */ 631 int of_pci_get_max_link_speed(struct device_node *node) 632 { 633 u32 max_link_speed; 634 635 if (of_property_read_u32(node, "max-link-speed", &max_link_speed) || 636 max_link_speed == 0 || max_link_speed > 4) 637 return -EINVAL; 638 639 return max_link_speed; 640 } 641 EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed); 642 643 /** 644 * of_pci_get_slot_power_limit - Parses the "slot-power-limit-milliwatt" 645 * property. 646 * 647 * @node: device tree node with the slot power limit information 648 * @slot_power_limit_value: pointer where the value should be stored in PCIe 649 * Slot Capabilities Register format 650 * @slot_power_limit_scale: pointer where the scale should be stored in PCIe 651 * Slot Capabilities Register format 652 * 653 * Returns the slot power limit in milliwatts and if @slot_power_limit_value 654 * and @slot_power_limit_scale pointers are non-NULL, fills in the value and 655 * scale in format used by PCIe Slot Capabilities Register. 656 * 657 * If the property is not found or is invalid, returns 0. 658 */ 659 u32 of_pci_get_slot_power_limit(struct device_node *node, 660 u8 *slot_power_limit_value, 661 u8 *slot_power_limit_scale) 662 { 663 u32 slot_power_limit_mw; 664 u8 value, scale; 665 666 if (of_property_read_u32(node, "slot-power-limit-milliwatt", 667 &slot_power_limit_mw)) 668 slot_power_limit_mw = 0; 669 670 /* Calculate Slot Power Limit Value and Slot Power Limit Scale */ 671 if (slot_power_limit_mw == 0) { 672 value = 0x00; 673 scale = 0; 674 } else if (slot_power_limit_mw <= 255) { 675 value = slot_power_limit_mw; 676 scale = 3; 677 } else if (slot_power_limit_mw <= 255*10) { 678 value = slot_power_limit_mw / 10; 679 scale = 2; 680 slot_power_limit_mw = slot_power_limit_mw / 10 * 10; 681 } else if (slot_power_limit_mw <= 255*100) { 682 value = slot_power_limit_mw / 100; 683 scale = 1; 684 slot_power_limit_mw = slot_power_limit_mw / 100 * 100; 685 } else if (slot_power_limit_mw <= 239*1000) { 686 value = slot_power_limit_mw / 1000; 687 scale = 0; 688 slot_power_limit_mw = slot_power_limit_mw / 1000 * 1000; 689 } else if (slot_power_limit_mw < 250*1000) { 690 value = 0xEF; 691 scale = 0; 692 slot_power_limit_mw = 239*1000; 693 } else if (slot_power_limit_mw <= 600*1000) { 694 value = 0xF0 + (slot_power_limit_mw / 1000 - 250) / 25; 695 scale = 0; 696 slot_power_limit_mw = slot_power_limit_mw / (1000*25) * (1000*25); 697 } else { 698 value = 0xFE; 699 scale = 0; 700 slot_power_limit_mw = 600*1000; 701 } 702 703 if (slot_power_limit_value) 704 *slot_power_limit_value = value; 705 706 if (slot_power_limit_scale) 707 *slot_power_limit_scale = scale; 708 709 return slot_power_limit_mw; 710 } 711 EXPORT_SYMBOL_GPL(of_pci_get_slot_power_limit); 712