1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * From setup-res.c, by: 4 * Dave Rusling (david.rusling@reo.mts.dec.com) 5 * David Mosberger (davidm@cs.arizona.edu) 6 * David Miller (davem@redhat.com) 7 * Ivan Kokshaysky (ink@jurassic.park.msu.ru) 8 */ 9 #include <linux/module.h> 10 #include <linux/kernel.h> 11 #include <linux/pci.h> 12 #include <linux/errno.h> 13 #include <linux/ioport.h> 14 #include <linux/of.h> 15 #include <linux/of_platform.h> 16 #include <linux/proc_fs.h> 17 #include <linux/slab.h> 18 19 #include "pci.h" 20 21 void pci_add_resource_offset(struct list_head *resources, struct resource *res, 22 resource_size_t offset) 23 { 24 struct resource_entry *entry; 25 26 entry = resource_list_create_entry(res, 0); 27 if (!entry) { 28 pr_err("PCI: can't add host bridge window %pR\n", res); 29 return; 30 } 31 32 entry->offset = offset; 33 resource_list_add_tail(entry, resources); 34 } 35 EXPORT_SYMBOL(pci_add_resource_offset); 36 37 void pci_add_resource(struct list_head *resources, struct resource *res) 38 { 39 pci_add_resource_offset(resources, res, 0); 40 } 41 EXPORT_SYMBOL(pci_add_resource); 42 43 void pci_free_resource_list(struct list_head *resources) 44 { 45 resource_list_free(resources); 46 } 47 EXPORT_SYMBOL(pci_free_resource_list); 48 49 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, 50 unsigned int flags) 51 { 52 struct pci_bus_resource *bus_res; 53 54 bus_res = kzalloc(sizeof(struct pci_bus_resource), GFP_KERNEL); 55 if (!bus_res) { 56 dev_err(&bus->dev, "can't add %pR resource\n", res); 57 return; 58 } 59 60 bus_res->res = res; 61 bus_res->flags = flags; 62 list_add_tail(&bus_res->list, &bus->resources); 63 } 64 65 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n) 66 { 67 struct pci_bus_resource *bus_res; 68 69 if (n < PCI_BRIDGE_RESOURCE_NUM) 70 return bus->resource[n]; 71 72 n -= PCI_BRIDGE_RESOURCE_NUM; 73 list_for_each_entry(bus_res, &bus->resources, list) { 74 if (n-- == 0) 75 return bus_res->res; 76 } 77 return NULL; 78 } 79 EXPORT_SYMBOL_GPL(pci_bus_resource_n); 80 81 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res) 82 { 83 struct pci_bus_resource *bus_res, *tmp; 84 int i; 85 86 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { 87 if (bus->resource[i] == res) { 88 bus->resource[i] = NULL; 89 return; 90 } 91 } 92 93 list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) { 94 if (bus_res->res == res) { 95 list_del(&bus_res->list); 96 kfree(bus_res); 97 return; 98 } 99 } 100 } 101 102 void pci_bus_remove_resources(struct pci_bus *bus) 103 { 104 int i; 105 struct pci_bus_resource *bus_res, *tmp; 106 107 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) 108 bus->resource[i] = NULL; 109 110 list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) { 111 list_del(&bus_res->list); 112 kfree(bus_res); 113 } 114 } 115 116 int devm_request_pci_bus_resources(struct device *dev, 117 struct list_head *resources) 118 { 119 struct resource_entry *win; 120 struct resource *parent, *res; 121 int err; 122 123 resource_list_for_each_entry(win, resources) { 124 res = win->res; 125 switch (resource_type(res)) { 126 case IORESOURCE_IO: 127 parent = &ioport_resource; 128 break; 129 case IORESOURCE_MEM: 130 parent = &iomem_resource; 131 break; 132 default: 133 continue; 134 } 135 136 err = devm_request_resource(dev, parent, res); 137 if (err) 138 return err; 139 } 140 141 return 0; 142 } 143 EXPORT_SYMBOL_GPL(devm_request_pci_bus_resources); 144 145 static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL}; 146 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 147 static struct pci_bus_region pci_64_bit = {0, 148 (pci_bus_addr_t) 0xffffffffffffffffULL}; 149 static struct pci_bus_region pci_high = {(pci_bus_addr_t) 0x100000000ULL, 150 (pci_bus_addr_t) 0xffffffffffffffffULL}; 151 #endif 152 153 /* 154 * @res contains CPU addresses. Clip it so the corresponding bus addresses 155 * on @bus are entirely within @region. This is used to control the bus 156 * addresses of resources we allocate, e.g., we may need a resource that 157 * can be mapped by a 32-bit BAR. 158 */ 159 static void pci_clip_resource_to_region(struct pci_bus *bus, 160 struct resource *res, 161 struct pci_bus_region *region) 162 { 163 struct pci_bus_region r; 164 165 pcibios_resource_to_bus(bus, &r, res); 166 if (r.start < region->start) 167 r.start = region->start; 168 if (r.end > region->end) 169 r.end = region->end; 170 171 if (r.end < r.start) 172 res->end = res->start - 1; 173 else 174 pcibios_bus_to_resource(bus, res, &r); 175 } 176 177 static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res, 178 resource_size_t size, resource_size_t align, 179 resource_size_t min, unsigned long type_mask, 180 resource_size_t (*alignf)(void *, 181 const struct resource *, 182 resource_size_t, 183 resource_size_t), 184 void *alignf_data, 185 struct pci_bus_region *region) 186 { 187 struct resource *r, avail; 188 resource_size_t max; 189 int ret; 190 191 type_mask |= IORESOURCE_TYPE_BITS; 192 193 pci_bus_for_each_resource(bus, r) { 194 resource_size_t min_used = min; 195 196 if (!r) 197 continue; 198 199 /* type_mask must match */ 200 if ((res->flags ^ r->flags) & type_mask) 201 continue; 202 203 /* We cannot allocate a non-prefetching resource 204 from a pre-fetching area */ 205 if ((r->flags & IORESOURCE_PREFETCH) && 206 !(res->flags & IORESOURCE_PREFETCH)) 207 continue; 208 209 avail = *r; 210 pci_clip_resource_to_region(bus, &avail, region); 211 212 /* 213 * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to 214 * protect badly documented motherboard resources, but if 215 * this is an already-configured bridge window, its start 216 * overrides "min". 217 */ 218 if (avail.start) 219 min_used = avail.start; 220 221 max = avail.end; 222 223 /* Don't bother if available space isn't large enough */ 224 if (size > max - min_used + 1) 225 continue; 226 227 /* Ok, try it out.. */ 228 ret = allocate_resource(r, res, size, min_used, max, 229 align, alignf, alignf_data); 230 if (ret == 0) 231 return 0; 232 } 233 return -ENOMEM; 234 } 235 236 /** 237 * pci_bus_alloc_resource - allocate a resource from a parent bus 238 * @bus: PCI bus 239 * @res: resource to allocate 240 * @size: size of resource to allocate 241 * @align: alignment of resource to allocate 242 * @min: minimum /proc/iomem address to allocate 243 * @type_mask: IORESOURCE_* type flags 244 * @alignf: resource alignment function 245 * @alignf_data: data argument for resource alignment function 246 * 247 * Given the PCI bus a device resides on, the size, minimum address, 248 * alignment and type, try to find an acceptable resource allocation 249 * for a specific device resource. 250 */ 251 int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, 252 resource_size_t size, resource_size_t align, 253 resource_size_t min, unsigned long type_mask, 254 resource_size_t (*alignf)(void *, 255 const struct resource *, 256 resource_size_t, 257 resource_size_t), 258 void *alignf_data) 259 { 260 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 261 int rc; 262 263 if (res->flags & IORESOURCE_MEM_64) { 264 rc = pci_bus_alloc_from_region(bus, res, size, align, min, 265 type_mask, alignf, alignf_data, 266 &pci_high); 267 if (rc == 0) 268 return 0; 269 270 return pci_bus_alloc_from_region(bus, res, size, align, min, 271 type_mask, alignf, alignf_data, 272 &pci_64_bit); 273 } 274 #endif 275 276 return pci_bus_alloc_from_region(bus, res, size, align, min, 277 type_mask, alignf, alignf_data, 278 &pci_32_bit); 279 } 280 EXPORT_SYMBOL(pci_bus_alloc_resource); 281 282 /* 283 * The @idx resource of @dev should be a PCI-PCI bridge window. If this 284 * resource fits inside a window of an upstream bridge, do nothing. If it 285 * overlaps an upstream window but extends outside it, clip the resource so 286 * it fits completely inside. 287 */ 288 bool pci_bus_clip_resource(struct pci_dev *dev, int idx) 289 { 290 struct pci_bus *bus = dev->bus; 291 struct resource *res = &dev->resource[idx]; 292 struct resource orig_res = *res; 293 struct resource *r; 294 295 pci_bus_for_each_resource(bus, r) { 296 resource_size_t start, end; 297 298 if (!r) 299 continue; 300 301 if (resource_type(res) != resource_type(r)) 302 continue; 303 304 start = max(r->start, res->start); 305 end = min(r->end, res->end); 306 307 if (start > end) 308 continue; /* no overlap */ 309 310 if (res->start == start && res->end == end) 311 return false; /* no change */ 312 313 res->start = start; 314 res->end = end; 315 res->flags &= ~IORESOURCE_UNSET; 316 orig_res.flags &= ~IORESOURCE_UNSET; 317 pci_info(dev, "%pR clipped to %pR\n", &orig_res, res); 318 319 return true; 320 } 321 322 return false; 323 } 324 325 void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { } 326 327 void __weak pcibios_bus_add_device(struct pci_dev *pdev) { } 328 329 /** 330 * pci_bus_add_device - start driver for a single device 331 * @dev: device to add 332 * 333 * This adds add sysfs entries and start device drivers 334 */ 335 void pci_bus_add_device(struct pci_dev *dev) 336 { 337 struct device_node *dn = dev->dev.of_node; 338 int retval; 339 340 /* 341 * Can not put in pci_device_add yet because resources 342 * are not assigned yet for some devices. 343 */ 344 pcibios_bus_add_device(dev); 345 pci_fixup_device(pci_fixup_final, dev); 346 if (pci_is_bridge(dev)) 347 of_pci_make_dev_node(dev); 348 pci_create_sysfs_dev_files(dev); 349 pci_proc_attach_device(dev); 350 pci_bridge_d3_update(dev); 351 352 dev->match_driver = !dn || of_device_is_available(dn); 353 retval = device_attach(&dev->dev); 354 if (retval < 0 && retval != -EPROBE_DEFER) 355 pci_warn(dev, "device attach failed (%d)\n", retval); 356 357 pci_dev_assign_added(dev, true); 358 359 if (dev_of_node(&dev->dev) && pci_is_bridge(dev)) { 360 retval = of_platform_populate(dev_of_node(&dev->dev), NULL, NULL, 361 &dev->dev); 362 if (retval) 363 pci_err(dev, "failed to populate child OF nodes (%d)\n", 364 retval); 365 } 366 } 367 EXPORT_SYMBOL_GPL(pci_bus_add_device); 368 369 /** 370 * pci_bus_add_devices - start driver for PCI devices 371 * @bus: bus to check for new devices 372 * 373 * Start driver for PCI devices and add some sysfs entries. 374 */ 375 void pci_bus_add_devices(const struct pci_bus *bus) 376 { 377 struct pci_dev *dev; 378 struct pci_bus *child; 379 380 list_for_each_entry(dev, &bus->devices, bus_list) { 381 /* Skip already-added devices */ 382 if (pci_dev_is_added(dev)) 383 continue; 384 pci_bus_add_device(dev); 385 } 386 387 list_for_each_entry(dev, &bus->devices, bus_list) { 388 /* Skip if device attach failed */ 389 if (!pci_dev_is_added(dev)) 390 continue; 391 child = dev->subordinate; 392 if (child) 393 pci_bus_add_devices(child); 394 } 395 } 396 EXPORT_SYMBOL(pci_bus_add_devices); 397 398 static void __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), 399 void *userdata, bool locked) 400 { 401 struct pci_dev *dev; 402 struct pci_bus *bus; 403 struct list_head *next; 404 int retval; 405 406 bus = top; 407 if (!locked) 408 down_read(&pci_bus_sem); 409 next = top->devices.next; 410 for (;;) { 411 if (next == &bus->devices) { 412 /* end of this bus, go up or finish */ 413 if (bus == top) 414 break; 415 next = bus->self->bus_list.next; 416 bus = bus->self->bus; 417 continue; 418 } 419 dev = list_entry(next, struct pci_dev, bus_list); 420 if (dev->subordinate) { 421 /* this is a pci-pci bridge, do its devices next */ 422 next = dev->subordinate->devices.next; 423 bus = dev->subordinate; 424 } else 425 next = dev->bus_list.next; 426 427 retval = cb(dev, userdata); 428 if (retval) 429 break; 430 } 431 if (!locked) 432 up_read(&pci_bus_sem); 433 } 434 435 /** 436 * pci_walk_bus - walk devices on/under bus, calling callback. 437 * @top: bus whose devices should be walked 438 * @cb: callback to be called for each device found 439 * @userdata: arbitrary pointer to be passed to callback 440 * 441 * Walk the given bus, including any bridged devices 442 * on buses under this bus. Call the provided callback 443 * on each device found. 444 * 445 * We check the return of @cb each time. If it returns anything 446 * other than 0, we break out. 447 */ 448 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata) 449 { 450 __pci_walk_bus(top, cb, userdata, false); 451 } 452 EXPORT_SYMBOL_GPL(pci_walk_bus); 453 454 void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata) 455 { 456 lockdep_assert_held(&pci_bus_sem); 457 458 __pci_walk_bus(top, cb, userdata, true); 459 } 460 EXPORT_SYMBOL_GPL(pci_walk_bus_locked); 461 462 struct pci_bus *pci_bus_get(struct pci_bus *bus) 463 { 464 if (bus) 465 get_device(&bus->dev); 466 return bus; 467 } 468 469 void pci_bus_put(struct pci_bus *bus) 470 { 471 if (bus) 472 put_device(&bus->dev); 473 } 474