1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * PCI <-> OF mapping helpers 4 * 5 * Copyright 2011 IBM Corp. 6 */ 7 #define pr_fmt(fmt) "PCI: OF: " fmt 8 9 #include <linux/cleanup.h> 10 #include <linux/irqdomain.h> 11 #include <linux/kernel.h> 12 #include <linux/pci.h> 13 #include <linux/of.h> 14 #include <linux/of_irq.h> 15 #include <linux/of_address.h> 16 #include <linux/of_pci.h> 17 #include <linux/platform_device.h> 18 #include "pci.h" 19 20 #ifdef CONFIG_PCI 21 /** 22 * pci_set_of_node - Find and set device's DT device_node 23 * @dev: the PCI device structure to fill 24 * 25 * Returns 0 on success with of_node set or when no device is described in the 26 * DT. Returns -ENODEV if the device is present, but disabled in the DT. 27 */ 28 int pci_set_of_node(struct pci_dev *dev) 29 { 30 if (!dev->bus->dev.of_node) 31 return 0; 32 33 struct device_node *node __free(device_node) = 34 of_pci_find_child_device(dev->bus->dev.of_node, dev->devfn); 35 if (!node) 36 return 0; 37 38 struct device *pdev __free(put_device) = 39 bus_find_device_by_of_node(&platform_bus_type, node); 40 if (pdev) 41 dev->bus->dev.of_node_reused = true; 42 43 device_set_node(&dev->dev, of_fwnode_handle(no_free_ptr(node))); 44 return 0; 45 } 46 47 void pci_release_of_node(struct pci_dev *dev) 48 { 49 of_node_put(dev->dev.of_node); 50 device_set_node(&dev->dev, NULL); 51 } 52 53 void pci_set_bus_of_node(struct pci_bus *bus) 54 { 55 struct device_node *node; 56 57 if (bus->self == NULL) { 58 node = pcibios_get_phb_of_node(bus); 59 } else { 60 node = of_node_get(bus->self->dev.of_node); 61 if (node && of_property_read_bool(node, "external-facing")) 62 bus->self->external_facing = true; 63 } 64 65 device_set_node(&bus->dev, of_fwnode_handle(node)); 66 } 67 68 void pci_release_bus_of_node(struct pci_bus *bus) 69 { 70 of_node_put(bus->dev.of_node); 71 device_set_node(&bus->dev, NULL); 72 } 73 74 struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus) 75 { 76 /* This should only be called for PHBs */ 77 if (WARN_ON(bus->self || bus->parent)) 78 return NULL; 79 80 /* 81 * Look for a node pointer in either the intermediary device we 82 * create above the root bus or its own parent. Normally only 83 * the later is populated. 84 */ 85 if (bus->bridge->of_node) 86 return of_node_get(bus->bridge->of_node); 87 if (bus->bridge->parent && bus->bridge->parent->of_node) 88 return of_node_get(bus->bridge->parent->of_node); 89 return NULL; 90 } 91 92 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus) 93 { 94 #ifdef CONFIG_IRQ_DOMAIN 95 struct irq_domain *d; 96 97 if (!bus->dev.of_node) 98 return NULL; 99 100 /* Start looking for a phandle to an MSI controller. */ 101 d = of_msi_get_domain(&bus->dev, bus->dev.of_node, DOMAIN_BUS_PCI_MSI); 102 if (d) 103 return d; 104 105 /* 106 * If we don't have an msi-parent property, look for a domain 107 * directly attached to the host bridge. 108 */ 109 d = irq_find_matching_host(bus->dev.of_node, DOMAIN_BUS_PCI_MSI); 110 if (d) 111 return d; 112 113 return irq_find_host(bus->dev.of_node); 114 #else 115 return NULL; 116 #endif 117 } 118 119 bool pci_host_of_has_msi_map(struct device *dev) 120 { 121 if (dev && dev->of_node) 122 return of_get_property(dev->of_node, "msi-map", NULL); 123 return false; 124 } 125 126 static inline int __of_pci_pci_compare(struct device_node *node, 127 unsigned int data) 128 { 129 int devfn; 130 131 devfn = of_pci_get_devfn(node); 132 if (devfn < 0) 133 return 0; 134 135 return devfn == data; 136 } 137 138 struct device_node *of_pci_find_child_device(struct device_node *parent, 139 unsigned int devfn) 140 { 141 struct device_node *node, *node2; 142 143 for_each_child_of_node(parent, node) { 144 if (__of_pci_pci_compare(node, devfn)) 145 return node; 146 /* 147 * Some OFs create a parent node "multifunc-device" as 148 * a fake root for all functions of a multi-function 149 * device we go down them as well. 150 */ 151 if (of_node_name_eq(node, "multifunc-device")) { 152 for_each_child_of_node(node, node2) { 153 if (__of_pci_pci_compare(node2, devfn)) { 154 of_node_put(node); 155 return node2; 156 } 157 } 158 } 159 } 160 return NULL; 161 } 162 EXPORT_SYMBOL_GPL(of_pci_find_child_device); 163 164 /** 165 * of_pci_get_devfn() - Get device and function numbers for a device node 166 * @np: device node 167 * 168 * Parses a standard 5-cell PCI resource and returns an 8-bit value that can 169 * be passed to the PCI_SLOT() and PCI_FUNC() macros to extract the device 170 * and function numbers respectively. On error a negative error code is 171 * returned. 172 */ 173 int of_pci_get_devfn(struct device_node *np) 174 { 175 u32 reg[5]; 176 int error; 177 178 error = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg)); 179 if (error) 180 return error; 181 182 return (reg[0] >> 8) & 0xff; 183 } 184 EXPORT_SYMBOL_GPL(of_pci_get_devfn); 185 186 /** 187 * of_pci_parse_bus_range() - parse the bus-range property of a PCI device 188 * @node: device node 189 * @res: address to a struct resource to return the bus-range 190 * 191 * Returns 0 on success or a negative error-code on failure. 192 */ 193 int of_pci_parse_bus_range(struct device_node *node, struct resource *res) 194 { 195 u32 bus_range[2]; 196 int error; 197 198 error = of_property_read_u32_array(node, "bus-range", bus_range, 199 ARRAY_SIZE(bus_range)); 200 if (error) 201 return error; 202 203 res->name = node->name; 204 res->start = bus_range[0]; 205 res->end = bus_range[1]; 206 res->flags = IORESOURCE_BUS; 207 208 return 0; 209 } 210 EXPORT_SYMBOL_GPL(of_pci_parse_bus_range); 211 212 /** 213 * of_get_pci_domain_nr - Find the host bridge domain number 214 * of the given device node. 215 * @node: Device tree node with the domain information. 216 * 217 * This function will try to obtain the host bridge domain number by finding 218 * a property called "linux,pci-domain" of the given device node. 219 * 220 * Return: 221 * * > 0 - On success, an associated domain number. 222 * * -EINVAL - The property "linux,pci-domain" does not exist. 223 * * -ENODATA - The linux,pci-domain" property does not have value. 224 * * -EOVERFLOW - Invalid "linux,pci-domain" property value. 225 * 226 * Returns the associated domain number from DT in the range [0-0xffff], or 227 * a negative value if the required property is not found. 228 */ 229 int of_get_pci_domain_nr(struct device_node *node) 230 { 231 u32 domain; 232 int error; 233 234 error = of_property_read_u32(node, "linux,pci-domain", &domain); 235 if (error) 236 return error; 237 238 return (u16)domain; 239 } 240 EXPORT_SYMBOL_GPL(of_get_pci_domain_nr); 241 242 /** 243 * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only 244 * is present and valid 245 */ 246 void of_pci_check_probe_only(void) 247 { 248 u32 val; 249 int ret; 250 251 ret = of_property_read_u32(of_chosen, "linux,pci-probe-only", &val); 252 if (ret) { 253 if (ret == -ENODATA || ret == -EOVERFLOW) 254 pr_warn("linux,pci-probe-only without valid value, ignoring\n"); 255 return; 256 } 257 258 if (val) 259 pci_add_flags(PCI_PROBE_ONLY); 260 else 261 pci_clear_flags(PCI_PROBE_ONLY); 262 263 pr_info("PROBE_ONLY %s\n", val ? "enabled" : "disabled"); 264 } 265 EXPORT_SYMBOL_GPL(of_pci_check_probe_only); 266 267 /** 268 * devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI 269 * host bridge resources from DT 270 * @dev: host bridge device 271 * @busno: bus number associated with the bridge root bus 272 * @bus_max: maximum number of buses for this bridge 273 * @resources: list where the range of resources will be added after DT parsing 274 * @ib_resources: list where the range of inbound resources (with addresses 275 * from 'dma-ranges') will be added after DT parsing 276 * @io_base: pointer to a variable that will contain on return the physical 277 * address for the start of the I/O range. Can be NULL if the caller doesn't 278 * expect I/O ranges to be present in the device tree. 279 * 280 * This function will parse the "ranges" property of a PCI host bridge device 281 * node and setup the resource mapping based on its content. It is expected 282 * that the property conforms with the Power ePAPR document. 283 * 284 * It returns zero if the range parsing has been successful or a standard error 285 * value if it failed. 286 */ 287 static int devm_of_pci_get_host_bridge_resources(struct device *dev, 288 unsigned char busno, unsigned char bus_max, 289 struct list_head *resources, 290 struct list_head *ib_resources, 291 resource_size_t *io_base) 292 { 293 struct device_node *dev_node = dev->of_node; 294 struct resource *res, tmp_res; 295 struct resource *bus_range; 296 struct of_pci_range range; 297 struct of_pci_range_parser parser; 298 const char *range_type; 299 int err; 300 301 if (io_base) 302 *io_base = (resource_size_t)OF_BAD_ADDR; 303 304 bus_range = devm_kzalloc(dev, sizeof(*bus_range), GFP_KERNEL); 305 if (!bus_range) 306 return -ENOMEM; 307 308 dev_info(dev, "host bridge %pOF ranges:\n", dev_node); 309 310 err = of_pci_parse_bus_range(dev_node, bus_range); 311 if (err) { 312 bus_range->start = busno; 313 bus_range->end = bus_max; 314 bus_range->flags = IORESOURCE_BUS; 315 dev_info(dev, " No bus range found for %pOF, using %pR\n", 316 dev_node, bus_range); 317 } else { 318 if (bus_range->end > bus_range->start + bus_max) 319 bus_range->end = bus_range->start + bus_max; 320 } 321 pci_add_resource(resources, bus_range); 322 323 /* Check for ranges property */ 324 err = of_pci_range_parser_init(&parser, dev_node); 325 if (err) 326 return 0; 327 328 dev_dbg(dev, "Parsing ranges property...\n"); 329 for_each_of_pci_range(&parser, &range) { 330 /* Read next ranges element */ 331 if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO) 332 range_type = "IO"; 333 else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM) 334 range_type = "MEM"; 335 else 336 range_type = "err"; 337 dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n", 338 range_type, range.cpu_addr, 339 range.cpu_addr + range.size - 1, range.pci_addr); 340 341 /* 342 * If we failed translation or got a zero-sized region 343 * then skip this range 344 */ 345 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0) 346 continue; 347 348 err = of_pci_range_to_resource(&range, dev_node, &tmp_res); 349 if (err) 350 continue; 351 352 res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL); 353 if (!res) { 354 err = -ENOMEM; 355 goto failed; 356 } 357 358 if (resource_type(res) == IORESOURCE_IO) { 359 if (!io_base) { 360 dev_err(dev, "I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n", 361 dev_node); 362 err = -EINVAL; 363 goto failed; 364 } 365 if (*io_base != (resource_size_t)OF_BAD_ADDR) 366 dev_warn(dev, "More than one I/O resource converted for %pOF. CPU base address for old range lost!\n", 367 dev_node); 368 *io_base = range.cpu_addr; 369 } else if (resource_type(res) == IORESOURCE_MEM) { 370 res->flags &= ~IORESOURCE_MEM_64; 371 } 372 373 pci_add_resource_offset(resources, res, res->start - range.pci_addr); 374 } 375 376 /* Check for dma-ranges property */ 377 if (!ib_resources) 378 return 0; 379 err = of_pci_dma_range_parser_init(&parser, dev_node); 380 if (err) 381 return 0; 382 383 dev_dbg(dev, "Parsing dma-ranges property...\n"); 384 for_each_of_pci_range(&parser, &range) { 385 /* 386 * If we failed translation or got a zero-sized region 387 * then skip this range 388 */ 389 if (((range.flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM) || 390 range.cpu_addr == OF_BAD_ADDR || range.size == 0) 391 continue; 392 393 dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n", 394 "IB MEM", range.cpu_addr, 395 range.cpu_addr + range.size - 1, range.pci_addr); 396 397 398 err = of_pci_range_to_resource(&range, dev_node, &tmp_res); 399 if (err) 400 continue; 401 402 res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL); 403 if (!res) { 404 err = -ENOMEM; 405 goto failed; 406 } 407 408 pci_add_resource_offset(ib_resources, res, 409 res->start - range.pci_addr); 410 } 411 412 return 0; 413 414 failed: 415 pci_free_resource_list(resources); 416 return err; 417 } 418 419 #if IS_ENABLED(CONFIG_OF_IRQ) 420 /** 421 * of_irq_parse_pci - Resolve the interrupt for a PCI device 422 * @pdev: the device whose interrupt is to be resolved 423 * @out_irq: structure of_phandle_args filled by this function 424 * 425 * This function resolves the PCI interrupt for a given PCI device. If a 426 * device-node exists for a given pci_dev, it will use normal OF tree 427 * walking. If not, it will implement standard swizzling and walk up the 428 * PCI tree until an device-node is found, at which point it will finish 429 * resolving using the OF tree walking. 430 */ 431 static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) 432 { 433 struct device_node *dn, *ppnode = NULL; 434 struct pci_dev *ppdev; 435 __be32 laddr[3]; 436 u8 pin; 437 int rc; 438 439 /* 440 * Check if we have a device node, if yes, fallback to standard 441 * device tree parsing 442 */ 443 dn = pci_device_to_OF_node(pdev); 444 if (dn) { 445 rc = of_irq_parse_one(dn, 0, out_irq); 446 if (!rc) 447 return rc; 448 } 449 450 /* 451 * Ok, we don't, time to have fun. Let's start by building up an 452 * interrupt spec. we assume #interrupt-cells is 1, which is standard 453 * for PCI. If you do different, then don't use that routine. 454 */ 455 rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); 456 if (rc != 0) 457 goto err; 458 /* No pin, exit with no error message. */ 459 if (pin == 0) 460 return -ENODEV; 461 462 /* Local interrupt-map in the device node? Use it! */ 463 if (of_property_present(dn, "interrupt-map")) { 464 pin = pci_swizzle_interrupt_pin(pdev, pin); 465 ppnode = dn; 466 } 467 468 /* Now we walk up the PCI tree */ 469 while (!ppnode) { 470 /* Get the pci_dev of our parent */ 471 ppdev = pdev->bus->self; 472 473 /* Ouch, it's a host bridge... */ 474 if (ppdev == NULL) { 475 ppnode = pci_bus_to_OF_node(pdev->bus); 476 477 /* No node for host bridge ? give up */ 478 if (ppnode == NULL) { 479 rc = -EINVAL; 480 goto err; 481 } 482 } else { 483 /* We found a P2P bridge, check if it has a node */ 484 ppnode = pci_device_to_OF_node(ppdev); 485 } 486 487 /* 488 * Ok, we have found a parent with a device-node, hand over to 489 * the OF parsing code. 490 * We build a unit address from the linux device to be used for 491 * resolution. Note that we use the linux bus number which may 492 * not match your firmware bus numbering. 493 * Fortunately, in most cases, interrupt-map-mask doesn't 494 * include the bus number as part of the matching. 495 * You should still be careful about that though if you intend 496 * to rely on this function (you ship a firmware that doesn't 497 * create device nodes for all PCI devices). 498 */ 499 if (ppnode) 500 break; 501 502 /* 503 * We can only get here if we hit a P2P bridge with no node; 504 * let's do standard swizzling and try again 505 */ 506 pin = pci_swizzle_interrupt_pin(pdev, pin); 507 pdev = ppdev; 508 } 509 510 out_irq->np = ppnode; 511 out_irq->args_count = 1; 512 out_irq->args[0] = pin; 513 laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8)); 514 laddr[1] = laddr[2] = cpu_to_be32(0); 515 rc = of_irq_parse_raw(laddr, out_irq); 516 if (rc) 517 goto err; 518 return 0; 519 err: 520 if (rc == -ENOENT) { 521 dev_warn(&pdev->dev, 522 "%s: no interrupt-map found, INTx interrupts not available\n", 523 __func__); 524 pr_warn_once("%s: possibly some PCI slots don't have level triggered interrupts capability\n", 525 __func__); 526 } else { 527 dev_err(&pdev->dev, "%s: failed with rc=%d\n", __func__, rc); 528 } 529 return rc; 530 } 531 532 /** 533 * of_irq_parse_and_map_pci() - Decode a PCI IRQ from the device tree and map to a VIRQ 534 * @dev: The PCI device needing an IRQ 535 * @slot: PCI slot number; passed when used as map_irq callback. Unused 536 * @pin: PCI IRQ pin number; passed when used as map_irq callback. Unused 537 * 538 * @slot and @pin are unused, but included in the function so that this 539 * function can be used directly as the map_irq callback to 540 * pci_assign_irq() and struct pci_host_bridge.map_irq pointer 541 */ 542 int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin) 543 { 544 struct of_phandle_args oirq; 545 int ret; 546 547 ret = of_irq_parse_pci(dev, &oirq); 548 if (ret) 549 return 0; /* Proper return code 0 == NO_IRQ */ 550 551 return irq_create_of_mapping(&oirq); 552 } 553 EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci); 554 #endif /* CONFIG_OF_IRQ */ 555 556 static int pci_parse_request_of_pci_ranges(struct device *dev, 557 struct pci_host_bridge *bridge) 558 { 559 int err, res_valid = 0; 560 resource_size_t iobase; 561 struct resource_entry *win, *tmp; 562 563 INIT_LIST_HEAD(&bridge->windows); 564 INIT_LIST_HEAD(&bridge->dma_ranges); 565 566 err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &bridge->windows, 567 &bridge->dma_ranges, &iobase); 568 if (err) 569 return err; 570 571 err = devm_request_pci_bus_resources(dev, &bridge->windows); 572 if (err) 573 return err; 574 575 resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { 576 struct resource *res = win->res; 577 578 switch (resource_type(res)) { 579 case IORESOURCE_IO: 580 err = devm_pci_remap_iospace(dev, res, iobase); 581 if (err) { 582 dev_warn(dev, "error %d: failed to map resource %pR\n", 583 err, res); 584 resource_list_destroy_entry(win); 585 } 586 break; 587 case IORESOURCE_MEM: 588 res_valid |= !(res->flags & IORESOURCE_PREFETCH); 589 590 if (!(res->flags & IORESOURCE_PREFETCH)) 591 if (upper_32_bits(resource_size(res))) 592 dev_warn(dev, "Memory resource size exceeds max for 32 bits\n"); 593 594 break; 595 } 596 } 597 598 if (!res_valid) 599 dev_warn(dev, "non-prefetchable memory resource required\n"); 600 601 return 0; 602 } 603 604 int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge) 605 { 606 if (!dev->of_node) 607 return 0; 608 609 bridge->swizzle_irq = pci_common_swizzle; 610 bridge->map_irq = of_irq_parse_and_map_pci; 611 612 return pci_parse_request_of_pci_ranges(dev, bridge); 613 } 614 615 #ifdef CONFIG_PCI_DYNAMIC_OF_NODES 616 617 void of_pci_remove_node(struct pci_dev *pdev) 618 { 619 struct device_node *np; 620 621 np = pci_device_to_OF_node(pdev); 622 if (!np || !of_node_check_flag(np, OF_DYNAMIC)) 623 return; 624 pdev->dev.of_node = NULL; 625 626 of_changeset_revert(np->data); 627 of_changeset_destroy(np->data); 628 of_node_put(np); 629 } 630 631 void of_pci_make_dev_node(struct pci_dev *pdev) 632 { 633 struct device_node *ppnode, *np = NULL; 634 const char *pci_type; 635 struct of_changeset *cset; 636 const char *name; 637 int ret; 638 639 /* 640 * If there is already a device tree node linked to this device, 641 * return immediately. 642 */ 643 if (pci_device_to_OF_node(pdev)) 644 return; 645 646 /* Check if there is device tree node for parent device */ 647 if (!pdev->bus->self) 648 ppnode = pdev->bus->dev.of_node; 649 else 650 ppnode = pdev->bus->self->dev.of_node; 651 if (!ppnode) 652 return; 653 654 if (pci_is_bridge(pdev)) 655 pci_type = "pci"; 656 else 657 pci_type = "dev"; 658 659 name = kasprintf(GFP_KERNEL, "%s@%x,%x", pci_type, 660 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 661 if (!name) 662 return; 663 664 cset = kmalloc(sizeof(*cset), GFP_KERNEL); 665 if (!cset) 666 goto out_free_name; 667 of_changeset_init(cset); 668 669 np = of_changeset_create_node(cset, ppnode, name); 670 if (!np) 671 goto out_destroy_cset; 672 673 ret = of_pci_add_properties(pdev, cset, np); 674 if (ret) 675 goto out_free_node; 676 677 ret = of_changeset_apply(cset); 678 if (ret) 679 goto out_free_node; 680 681 np->data = cset; 682 pdev->dev.of_node = np; 683 kfree(name); 684 685 return; 686 687 out_free_node: 688 of_node_put(np); 689 out_destroy_cset: 690 of_changeset_destroy(cset); 691 kfree(cset); 692 out_free_name: 693 kfree(name); 694 } 695 #endif 696 697 #endif /* CONFIG_PCI */ 698 699 /** 700 * of_pci_get_max_link_speed - Find the maximum link speed of the given device node. 701 * @node: Device tree node with the maximum link speed information. 702 * 703 * This function will try to find the limitation of link speed by finding 704 * a property called "max-link-speed" of the given device node. 705 * 706 * Return: 707 * * > 0 - On success, a maximum link speed. 708 * * -EINVAL - Invalid "max-link-speed" property value, or failure to access 709 * the property of the device tree node. 710 * 711 * Returns the associated max link speed from DT, or a negative value if the 712 * required property is not found or is invalid. 713 */ 714 int of_pci_get_max_link_speed(struct device_node *node) 715 { 716 u32 max_link_speed; 717 718 if (of_property_read_u32(node, "max-link-speed", &max_link_speed) || 719 max_link_speed == 0 || max_link_speed > 4) 720 return -EINVAL; 721 722 return max_link_speed; 723 } 724 EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed); 725 726 /** 727 * of_pci_get_slot_power_limit - Parses the "slot-power-limit-milliwatt" 728 * property. 729 * 730 * @node: device tree node with the slot power limit information 731 * @slot_power_limit_value: pointer where the value should be stored in PCIe 732 * Slot Capabilities Register format 733 * @slot_power_limit_scale: pointer where the scale should be stored in PCIe 734 * Slot Capabilities Register format 735 * 736 * Returns the slot power limit in milliwatts and if @slot_power_limit_value 737 * and @slot_power_limit_scale pointers are non-NULL, fills in the value and 738 * scale in format used by PCIe Slot Capabilities Register. 739 * 740 * If the property is not found or is invalid, returns 0. 741 */ 742 u32 of_pci_get_slot_power_limit(struct device_node *node, 743 u8 *slot_power_limit_value, 744 u8 *slot_power_limit_scale) 745 { 746 u32 slot_power_limit_mw; 747 u8 value, scale; 748 749 if (of_property_read_u32(node, "slot-power-limit-milliwatt", 750 &slot_power_limit_mw)) 751 slot_power_limit_mw = 0; 752 753 /* Calculate Slot Power Limit Value and Slot Power Limit Scale */ 754 if (slot_power_limit_mw == 0) { 755 value = 0x00; 756 scale = 0; 757 } else if (slot_power_limit_mw <= 255) { 758 value = slot_power_limit_mw; 759 scale = 3; 760 } else if (slot_power_limit_mw <= 255*10) { 761 value = slot_power_limit_mw / 10; 762 scale = 2; 763 slot_power_limit_mw = slot_power_limit_mw / 10 * 10; 764 } else if (slot_power_limit_mw <= 255*100) { 765 value = slot_power_limit_mw / 100; 766 scale = 1; 767 slot_power_limit_mw = slot_power_limit_mw / 100 * 100; 768 } else if (slot_power_limit_mw <= 239*1000) { 769 value = slot_power_limit_mw / 1000; 770 scale = 0; 771 slot_power_limit_mw = slot_power_limit_mw / 1000 * 1000; 772 } else if (slot_power_limit_mw < 250*1000) { 773 value = 0xEF; 774 scale = 0; 775 slot_power_limit_mw = 239*1000; 776 } else if (slot_power_limit_mw <= 600*1000) { 777 value = 0xF0 + (slot_power_limit_mw / 1000 - 250) / 25; 778 scale = 0; 779 slot_power_limit_mw = slot_power_limit_mw / (1000*25) * (1000*25); 780 } else { 781 value = 0xFE; 782 scale = 0; 783 slot_power_limit_mw = 600*1000; 784 } 785 786 if (slot_power_limit_value) 787 *slot_power_limit_value = value; 788 789 if (slot_power_limit_scale) 790 *slot_power_limit_scale = scale; 791 792 return slot_power_limit_mw; 793 } 794 EXPORT_SYMBOL_GPL(of_pci_get_slot_power_limit); 795