xref: /linux/drivers/pci/bus.c (revision 249872f53d64441690927853e9d3af36394802d5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * From setup-res.c, by:
4  *	Dave Rusling (david.rusling@reo.mts.dec.com)
5  *	David Mosberger (davidm@cs.arizona.edu)
6  *	David Miller (davem@redhat.com)
7  *	Ivan Kokshaysky (ink@jurassic.park.msu.ru)
8  */
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/cleanup.h>
12 #include <linux/pci.h>
13 #include <linux/errno.h>
14 #include <linux/ioport.h>
15 #include <linux/of.h>
16 #include <linux/of_platform.h>
17 #include <linux/platform_device.h>
18 #include <linux/proc_fs.h>
19 #include <linux/slab.h>
20 
21 #include "pci.h"
22 
23 /*
24  * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
25  * to P2P or CardBus bridge windows) go in a table.  Additional ones (for
26  * buses below host bridges or subtractive decode bridges) go in the list.
27  * Use pci_bus_for_each_resource() to iterate through all the resources.
28  */
29 
30 struct pci_bus_resource {
31 	struct list_head	list;
32 	struct resource		*res;
33 };
34 
pci_add_resource_offset(struct list_head * resources,struct resource * res,resource_size_t offset)35 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
36 			     resource_size_t offset)
37 {
38 	struct resource_entry *entry;
39 
40 	entry = resource_list_create_entry(res, 0);
41 	if (!entry) {
42 		pr_err("PCI: can't add host bridge window %pR\n", res);
43 		return;
44 	}
45 
46 	entry->offset = offset;
47 	resource_list_add_tail(entry, resources);
48 }
49 EXPORT_SYMBOL(pci_add_resource_offset);
50 
pci_add_resource(struct list_head * resources,struct resource * res)51 void pci_add_resource(struct list_head *resources, struct resource *res)
52 {
53 	pci_add_resource_offset(resources, res, 0);
54 }
55 EXPORT_SYMBOL(pci_add_resource);
56 
pci_free_resource_list(struct list_head * resources)57 void pci_free_resource_list(struct list_head *resources)
58 {
59 	resource_list_free(resources);
60 }
61 EXPORT_SYMBOL(pci_free_resource_list);
62 
pci_bus_add_resource(struct pci_bus * bus,struct resource * res)63 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res)
64 {
65 	struct pci_bus_resource *bus_res;
66 
67 	bus_res = kzalloc(sizeof(struct pci_bus_resource), GFP_KERNEL);
68 	if (!bus_res) {
69 		dev_err(&bus->dev, "can't add %pR resource\n", res);
70 		return;
71 	}
72 
73 	bus_res->res = res;
74 	list_add_tail(&bus_res->list, &bus->resources);
75 }
76 
pci_bus_resource_n(const struct pci_bus * bus,int n)77 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n)
78 {
79 	struct pci_bus_resource *bus_res;
80 
81 	if (n < PCI_BRIDGE_RESOURCE_NUM)
82 		return bus->resource[n];
83 
84 	n -= PCI_BRIDGE_RESOURCE_NUM;
85 	list_for_each_entry(bus_res, &bus->resources, list) {
86 		if (n-- == 0)
87 			return bus_res->res;
88 	}
89 	return NULL;
90 }
91 EXPORT_SYMBOL_GPL(pci_bus_resource_n);
92 
pci_bus_remove_resource(struct pci_bus * bus,struct resource * res)93 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res)
94 {
95 	struct pci_bus_resource *bus_res, *tmp;
96 	int i;
97 
98 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
99 		if (bus->resource[i] == res) {
100 			bus->resource[i] = NULL;
101 			return;
102 		}
103 	}
104 
105 	list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
106 		if (bus_res->res == res) {
107 			list_del(&bus_res->list);
108 			kfree(bus_res);
109 			return;
110 		}
111 	}
112 }
113 
pci_bus_remove_resources(struct pci_bus * bus)114 void pci_bus_remove_resources(struct pci_bus *bus)
115 {
116 	int i;
117 	struct pci_bus_resource *bus_res, *tmp;
118 
119 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
120 		bus->resource[i] = NULL;
121 
122 	list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
123 		list_del(&bus_res->list);
124 		kfree(bus_res);
125 	}
126 }
127 
devm_request_pci_bus_resources(struct device * dev,struct list_head * resources)128 int devm_request_pci_bus_resources(struct device *dev,
129 				   struct list_head *resources)
130 {
131 	struct resource_entry *win;
132 	struct resource *parent, *res;
133 	int err;
134 
135 	resource_list_for_each_entry(win, resources) {
136 		res = win->res;
137 		switch (resource_type(res)) {
138 		case IORESOURCE_IO:
139 			parent = &ioport_resource;
140 			break;
141 		case IORESOURCE_MEM:
142 			parent = &iomem_resource;
143 			break;
144 		default:
145 			continue;
146 		}
147 
148 		err = devm_request_resource(dev, parent, res);
149 		if (err)
150 			return err;
151 	}
152 
153 	return 0;
154 }
155 EXPORT_SYMBOL_GPL(devm_request_pci_bus_resources);
156 
157 static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL};
158 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
159 static struct pci_bus_region pci_64_bit = {0,
160 				(pci_bus_addr_t) 0xffffffffffffffffULL};
161 static struct pci_bus_region pci_high = {(pci_bus_addr_t) 0x100000000ULL,
162 				(pci_bus_addr_t) 0xffffffffffffffffULL};
163 #endif
164 
165 /*
166  * @res contains CPU addresses.  Clip it so the corresponding bus addresses
167  * on @bus are entirely within @region.  This is used to control the bus
168  * addresses of resources we allocate, e.g., we may need a resource that
169  * can be mapped by a 32-bit BAR.
170  */
pci_clip_resource_to_region(struct pci_bus * bus,struct resource * res,struct pci_bus_region * region)171 static void pci_clip_resource_to_region(struct pci_bus *bus,
172 					struct resource *res,
173 					struct pci_bus_region *region)
174 {
175 	struct pci_bus_region r;
176 
177 	pcibios_resource_to_bus(bus, &r, res);
178 	if (r.start < region->start)
179 		r.start = region->start;
180 	if (r.end > region->end)
181 		r.end = region->end;
182 
183 	if (r.end < r.start)
184 		res->end = res->start - 1;
185 	else
186 		pcibios_bus_to_resource(bus, res, &r);
187 }
188 
pci_bus_alloc_from_region(struct pci_bus * bus,struct resource * res,resource_size_t size,resource_size_t align,resource_size_t min,unsigned long type_mask,resource_alignf alignf,void * alignf_data,struct pci_bus_region * region)189 static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
190 		resource_size_t size, resource_size_t align,
191 		resource_size_t min, unsigned long type_mask,
192 		resource_alignf alignf,
193 		void *alignf_data,
194 		struct pci_bus_region *region)
195 {
196 	struct resource *r, avail;
197 	resource_size_t max;
198 	int ret;
199 
200 	type_mask |= IORESOURCE_TYPE_BITS;
201 
202 	pci_bus_for_each_resource(bus, r) {
203 		resource_size_t min_used = min;
204 
205 		if (!r)
206 			continue;
207 
208 		if (r->flags & (IORESOURCE_UNSET|IORESOURCE_DISABLED))
209 			continue;
210 
211 		/* type_mask must match */
212 		if ((res->flags ^ r->flags) & type_mask)
213 			continue;
214 
215 		/* We cannot allocate a non-prefetching resource
216 		   from a pre-fetching area */
217 		if ((r->flags & IORESOURCE_PREFETCH) &&
218 		    !(res->flags & IORESOURCE_PREFETCH))
219 			continue;
220 
221 		avail = *r;
222 		pci_clip_resource_to_region(bus, &avail, region);
223 
224 		/*
225 		 * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to
226 		 * protect badly documented motherboard resources, but if
227 		 * this is an already-configured bridge window, its start
228 		 * overrides "min".
229 		 */
230 		if (avail.start)
231 			min_used = avail.start;
232 
233 		max = avail.end;
234 
235 		/* Don't bother if available space isn't large enough */
236 		if (size > max - min_used + 1)
237 			continue;
238 
239 		/* Ok, try it out.. */
240 		ret = allocate_resource(r, res, size, min_used, max,
241 					align, alignf, alignf_data);
242 		if (ret == 0)
243 			return 0;
244 	}
245 	return -ENOMEM;
246 }
247 
248 /**
249  * pci_bus_alloc_resource - allocate a resource from a parent bus
250  * @bus: PCI bus
251  * @res: resource to allocate
252  * @size: size of resource to allocate
253  * @align: alignment of resource to allocate
254  * @min: minimum /proc/iomem address to allocate
255  * @type_mask: IORESOURCE_* type flags
256  * @alignf: resource alignment function
257  * @alignf_data: data argument for resource alignment function
258  *
259  * Given the PCI bus a device resides on, the size, minimum address,
260  * alignment and type, try to find an acceptable resource allocation
261  * for a specific device resource.
262  */
pci_bus_alloc_resource(struct pci_bus * bus,struct resource * res,resource_size_t size,resource_size_t align,resource_size_t min,unsigned long type_mask,resource_alignf alignf,void * alignf_data)263 int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
264 		resource_size_t size, resource_size_t align,
265 		resource_size_t min, unsigned long type_mask,
266 		resource_alignf alignf,
267 		void *alignf_data)
268 {
269 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
270 	int rc;
271 
272 	if (res->flags & IORESOURCE_MEM_64) {
273 		rc = pci_bus_alloc_from_region(bus, res, size, align, min,
274 					       type_mask, alignf, alignf_data,
275 					       &pci_high);
276 		if (rc == 0)
277 			return 0;
278 
279 		return pci_bus_alloc_from_region(bus, res, size, align, min,
280 						 type_mask, alignf, alignf_data,
281 						 &pci_64_bit);
282 	}
283 #endif
284 
285 	return pci_bus_alloc_from_region(bus, res, size, align, min,
286 					 type_mask, alignf, alignf_data,
287 					 &pci_32_bit);
288 }
289 EXPORT_SYMBOL(pci_bus_alloc_resource);
290 
291 /*
292  * The @idx resource of @dev should be a PCI-PCI bridge window.  If this
293  * resource fits inside a window of an upstream bridge, do nothing.  If it
294  * overlaps an upstream window but extends outside it, clip the resource so
295  * it fits completely inside.
296  */
pci_bus_clip_resource(struct pci_dev * dev,int idx)297 bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
298 {
299 	struct pci_bus *bus = dev->bus;
300 	struct resource *res = &dev->resource[idx];
301 	struct resource orig_res = *res;
302 	struct resource *r;
303 
304 	pci_bus_for_each_resource(bus, r) {
305 		resource_size_t start, end;
306 
307 		if (!r)
308 			continue;
309 
310 		if (resource_type(res) != resource_type(r))
311 			continue;
312 
313 		start = max(r->start, res->start);
314 		end = min(r->end, res->end);
315 
316 		if (start > end)
317 			continue;	/* no overlap */
318 
319 		if (res->start == start && res->end == end)
320 			return false;	/* no change */
321 
322 		res->start = start;
323 		res->end = end;
324 		res->flags &= ~IORESOURCE_UNSET;
325 		orig_res.flags &= ~IORESOURCE_UNSET;
326 		pci_info(dev, "%pR clipped to %pR\n", &orig_res, res);
327 
328 		return true;
329 	}
330 
331 	return false;
332 }
333 
pcibios_resource_survey_bus(struct pci_bus * bus)334 void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
335 
pcibios_bus_add_device(struct pci_dev * pdev)336 void __weak pcibios_bus_add_device(struct pci_dev *pdev) { }
337 
338 /**
339  * pci_bus_add_device - start driver for a single device
340  * @dev: device to add
341  *
342  * This adds add sysfs entries and start device drivers
343  */
pci_bus_add_device(struct pci_dev * dev)344 void pci_bus_add_device(struct pci_dev *dev)
345 {
346 	struct device_node *dn = dev->dev.of_node;
347 	struct platform_device *pdev;
348 
349 	/*
350 	 * Can not put in pci_device_add yet because resources
351 	 * are not assigned yet for some devices.
352 	 */
353 	pcibios_bus_add_device(dev);
354 	pci_fixup_device(pci_fixup_final, dev);
355 	if (pci_is_bridge(dev))
356 		of_pci_make_dev_node(dev);
357 	pci_create_sysfs_dev_files(dev);
358 	pci_proc_attach_device(dev);
359 	pci_bridge_d3_update(dev);
360 
361 	/* Save config space for error recoverability */
362 	pci_save_state(dev);
363 
364 	/*
365 	 * If the PCI device is associated with a pwrctrl device with a
366 	 * power supply, create a device link between the PCI device and
367 	 * pwrctrl device.  This ensures that pwrctrl drivers are probed
368 	 * before PCI client drivers.
369 	 */
370 	pdev = of_find_device_by_node(dn);
371 	if (pdev) {
372 		if (of_pci_supply_present(dn)) {
373 			if (!device_link_add(&dev->dev, &pdev->dev,
374 					     DL_FLAG_AUTOREMOVE_CONSUMER)) {
375 				pci_err(dev, "failed to add device link to power control device %s\n",
376 					pdev->name);
377 			}
378 		}
379 		put_device(&pdev->dev);
380 	}
381 
382 	if (!dn || of_device_is_available(dn))
383 		pci_dev_allow_binding(dev);
384 
385 	device_initial_probe(&dev->dev);
386 
387 	pci_dev_assign_added(dev);
388 }
389 EXPORT_SYMBOL_GPL(pci_bus_add_device);
390 
391 /**
392  * pci_bus_add_devices - start driver for PCI devices
393  * @bus: bus to check for new devices
394  *
395  * Start driver for PCI devices and add some sysfs entries.
396  */
pci_bus_add_devices(const struct pci_bus * bus)397 void pci_bus_add_devices(const struct pci_bus *bus)
398 {
399 	struct pci_dev *dev;
400 	struct pci_bus *child;
401 
402 	list_for_each_entry(dev, &bus->devices, bus_list) {
403 		/* Skip already-added devices */
404 		if (pci_dev_is_added(dev))
405 			continue;
406 		pci_bus_add_device(dev);
407 	}
408 
409 	list_for_each_entry(dev, &bus->devices, bus_list) {
410 		/* Skip if device attach failed */
411 		if (!pci_dev_is_added(dev))
412 			continue;
413 		child = dev->subordinate;
414 		if (child)
415 			pci_bus_add_devices(child);
416 	}
417 }
418 EXPORT_SYMBOL(pci_bus_add_devices);
419 
__pci_walk_bus(struct pci_bus * top,int (* cb)(struct pci_dev *,void *),void * userdata)420 static int __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
421 			  void *userdata)
422 {
423 	struct pci_dev *dev;
424 	int ret = 0;
425 
426 	list_for_each_entry(dev, &top->devices, bus_list) {
427 		ret = cb(dev, userdata);
428 		if (ret)
429 			break;
430 		if (dev->subordinate) {
431 			ret = __pci_walk_bus(dev->subordinate, cb, userdata);
432 			if (ret)
433 				break;
434 		}
435 	}
436 	return ret;
437 }
438 
__pci_walk_bus_reverse(struct pci_bus * top,int (* cb)(struct pci_dev *,void *),void * userdata)439 static int __pci_walk_bus_reverse(struct pci_bus *top,
440 				  int (*cb)(struct pci_dev *, void *),
441 				  void *userdata)
442 {
443 	struct pci_dev *dev;
444 	int ret = 0;
445 
446 	list_for_each_entry_reverse(dev, &top->devices, bus_list) {
447 		if (dev->subordinate) {
448 			ret = __pci_walk_bus_reverse(dev->subordinate, cb,
449 						     userdata);
450 			if (ret)
451 				break;
452 		}
453 		ret = cb(dev, userdata);
454 		if (ret)
455 			break;
456 	}
457 	return ret;
458 }
459 
460 /**
461  *  pci_walk_bus - walk devices on/under bus, calling callback.
462  *  @top: bus whose devices should be walked
463  *  @cb: callback to be called for each device found
464  *  @userdata: arbitrary pointer to be passed to callback
465  *
466  *  Walk the given bus, including any bridged devices
467  *  on buses under this bus.  Call the provided callback
468  *  on each device found.
469  *
470  *  We check the return of @cb each time. If it returns anything
471  *  other than 0, we break out.
472  */
pci_walk_bus(struct pci_bus * top,int (* cb)(struct pci_dev *,void *),void * userdata)473 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
474 {
475 	down_read(&pci_bus_sem);
476 	__pci_walk_bus(top, cb, userdata);
477 	up_read(&pci_bus_sem);
478 }
479 EXPORT_SYMBOL_GPL(pci_walk_bus);
480 
481 /**
482  * pci_walk_bus_reverse - walk devices on/under bus, calling callback.
483  * @top: bus whose devices should be walked
484  * @cb: callback to be called for each device found
485  * @userdata: arbitrary pointer to be passed to callback
486  *
487  * Same semantics as pci_walk_bus(), but walks the bus in reverse order.
488  */
pci_walk_bus_reverse(struct pci_bus * top,int (* cb)(struct pci_dev *,void *),void * userdata)489 void pci_walk_bus_reverse(struct pci_bus *top,
490 			  int (*cb)(struct pci_dev *, void *), void *userdata)
491 {
492 	down_read(&pci_bus_sem);
493 	__pci_walk_bus_reverse(top, cb, userdata);
494 	up_read(&pci_bus_sem);
495 }
496 EXPORT_SYMBOL_GPL(pci_walk_bus_reverse);
497 
pci_walk_bus_locked(struct pci_bus * top,int (* cb)(struct pci_dev *,void *),void * userdata)498 void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
499 {
500 	lockdep_assert_held(&pci_bus_sem);
501 
502 	__pci_walk_bus(top, cb, userdata);
503 }
504 
pci_bus_get(struct pci_bus * bus)505 struct pci_bus *pci_bus_get(struct pci_bus *bus)
506 {
507 	if (bus)
508 		get_device(&bus->dev);
509 	return bus;
510 }
511 
pci_bus_put(struct pci_bus * bus)512 void pci_bus_put(struct pci_bus *bus)
513 {
514 	if (bus)
515 		put_device(&bus->dev);
516 }
517