xref: /linux/drivers/pci/bus.c (revision bf37448d9b7793544904ccf21e5844b6ff4af3c0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * From setup-res.c, by:
4  *	Dave Rusling (david.rusling@reo.mts.dec.com)
5  *	David Mosberger (davidm@cs.arizona.edu)
6  *	David Miller (davem@redhat.com)
7  *	Ivan Kokshaysky (ink@jurassic.park.msu.ru)
8  */
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/cleanup.h>
12 #include <linux/pci.h>
13 #include <linux/errno.h>
14 #include <linux/ioport.h>
15 #include <linux/of.h>
16 #include <linux/of_platform.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/proc_fs.h>
20 #include <linux/slab.h>
21 
22 #include "pci.h"
23 
24 /*
25  * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
26  * to P2P or CardBus bridge windows) go in a table.  Additional ones (for
27  * buses below host bridges or subtractive decode bridges) go in the list.
28  * Use pci_bus_for_each_resource() to iterate through all the resources.
29  */
30 
31 struct pci_bus_resource {
32 	struct list_head	list;
33 	struct resource		*res;
34 };
35 
36 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
37 			     resource_size_t offset)
38 {
39 	struct resource_entry *entry;
40 
41 	entry = resource_list_create_entry(res, 0);
42 	if (!entry) {
43 		pr_err("PCI: can't add host bridge window %pR\n", res);
44 		return;
45 	}
46 
47 	entry->offset = offset;
48 	resource_list_add_tail(entry, resources);
49 }
50 EXPORT_SYMBOL(pci_add_resource_offset);
51 
52 void pci_add_resource(struct list_head *resources, struct resource *res)
53 {
54 	pci_add_resource_offset(resources, res, 0);
55 }
56 EXPORT_SYMBOL(pci_add_resource);
57 
58 void pci_free_resource_list(struct list_head *resources)
59 {
60 	resource_list_free(resources);
61 }
62 EXPORT_SYMBOL(pci_free_resource_list);
63 
64 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res)
65 {
66 	struct pci_bus_resource *bus_res;
67 
68 	bus_res = kzalloc(sizeof(struct pci_bus_resource), GFP_KERNEL);
69 	if (!bus_res) {
70 		dev_err(&bus->dev, "can't add %pR resource\n", res);
71 		return;
72 	}
73 
74 	bus_res->res = res;
75 	list_add_tail(&bus_res->list, &bus->resources);
76 }
77 
78 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n)
79 {
80 	struct pci_bus_resource *bus_res;
81 
82 	if (n < PCI_BRIDGE_RESOURCE_NUM)
83 		return bus->resource[n];
84 
85 	n -= PCI_BRIDGE_RESOURCE_NUM;
86 	list_for_each_entry(bus_res, &bus->resources, list) {
87 		if (n-- == 0)
88 			return bus_res->res;
89 	}
90 	return NULL;
91 }
92 EXPORT_SYMBOL_GPL(pci_bus_resource_n);
93 
94 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res)
95 {
96 	struct pci_bus_resource *bus_res, *tmp;
97 	int i;
98 
99 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
100 		if (bus->resource[i] == res) {
101 			bus->resource[i] = NULL;
102 			return;
103 		}
104 	}
105 
106 	list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
107 		if (bus_res->res == res) {
108 			list_del(&bus_res->list);
109 			kfree(bus_res);
110 			return;
111 		}
112 	}
113 }
114 
115 void pci_bus_remove_resources(struct pci_bus *bus)
116 {
117 	int i;
118 	struct pci_bus_resource *bus_res, *tmp;
119 
120 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
121 		bus->resource[i] = NULL;
122 
123 	list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
124 		list_del(&bus_res->list);
125 		kfree(bus_res);
126 	}
127 }
128 
129 int devm_request_pci_bus_resources(struct device *dev,
130 				   struct list_head *resources)
131 {
132 	struct resource_entry *win;
133 	struct resource *parent, *res;
134 	int err;
135 
136 	resource_list_for_each_entry(win, resources) {
137 		res = win->res;
138 		switch (resource_type(res)) {
139 		case IORESOURCE_IO:
140 			parent = &ioport_resource;
141 			break;
142 		case IORESOURCE_MEM:
143 			parent = &iomem_resource;
144 			break;
145 		default:
146 			continue;
147 		}
148 
149 		err = devm_request_resource(dev, parent, res);
150 		if (err)
151 			return err;
152 	}
153 
154 	return 0;
155 }
156 EXPORT_SYMBOL_GPL(devm_request_pci_bus_resources);
157 
158 static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL};
159 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
160 static struct pci_bus_region pci_64_bit = {0,
161 				(pci_bus_addr_t) 0xffffffffffffffffULL};
162 static struct pci_bus_region pci_high = {(pci_bus_addr_t) 0x100000000ULL,
163 				(pci_bus_addr_t) 0xffffffffffffffffULL};
164 #endif
165 
166 /*
167  * @res contains CPU addresses.  Clip it so the corresponding bus addresses
168  * on @bus are entirely within @region.  This is used to control the bus
169  * addresses of resources we allocate, e.g., we may need a resource that
170  * can be mapped by a 32-bit BAR.
171  */
172 static void pci_clip_resource_to_region(struct pci_bus *bus,
173 					struct resource *res,
174 					struct pci_bus_region *region)
175 {
176 	struct pci_bus_region r;
177 
178 	pcibios_resource_to_bus(bus, &r, res);
179 	if (r.start < region->start)
180 		r.start = region->start;
181 	if (r.end > region->end)
182 		r.end = region->end;
183 
184 	if (r.end < r.start)
185 		res->end = res->start - 1;
186 	else
187 		pcibios_bus_to_resource(bus, res, &r);
188 }
189 
190 static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
191 		resource_size_t size, resource_size_t align,
192 		resource_size_t min, unsigned long type_mask,
193 		resource_alignf alignf,
194 		void *alignf_data,
195 		struct pci_bus_region *region)
196 {
197 	struct resource *r, avail;
198 	resource_size_t max;
199 	int ret;
200 
201 	type_mask |= IORESOURCE_TYPE_BITS;
202 
203 	pci_bus_for_each_resource(bus, r) {
204 		resource_size_t min_used = min;
205 
206 		if (!r)
207 			continue;
208 
209 		if (r->flags & (IORESOURCE_UNSET|IORESOURCE_DISABLED))
210 			continue;
211 
212 		/* type_mask must match */
213 		if ((res->flags ^ r->flags) & type_mask)
214 			continue;
215 
216 		/* We cannot allocate a non-prefetching resource
217 		   from a pre-fetching area */
218 		if ((r->flags & IORESOURCE_PREFETCH) &&
219 		    !(res->flags & IORESOURCE_PREFETCH))
220 			continue;
221 
222 		avail = *r;
223 		pci_clip_resource_to_region(bus, &avail, region);
224 
225 		/*
226 		 * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to
227 		 * protect badly documented motherboard resources, but if
228 		 * this is an already-configured bridge window, its start
229 		 * overrides "min".
230 		 */
231 		if (avail.start)
232 			min_used = avail.start;
233 
234 		max = avail.end;
235 
236 		/* Don't bother if available space isn't large enough */
237 		if (size > max - min_used + 1)
238 			continue;
239 
240 		/* Ok, try it out.. */
241 		ret = allocate_resource(r, res, size, min_used, max,
242 					align, alignf, alignf_data);
243 		if (ret == 0)
244 			return 0;
245 	}
246 	return -ENOMEM;
247 }
248 
249 /**
250  * pci_bus_alloc_resource - allocate a resource from a parent bus
251  * @bus: PCI bus
252  * @res: resource to allocate
253  * @size: size of resource to allocate
254  * @align: alignment of resource to allocate
255  * @min: minimum /proc/iomem address to allocate
256  * @type_mask: IORESOURCE_* type flags
257  * @alignf: resource alignment function
258  * @alignf_data: data argument for resource alignment function
259  *
260  * Given the PCI bus a device resides on, the size, minimum address,
261  * alignment and type, try to find an acceptable resource allocation
262  * for a specific device resource.
263  */
264 int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
265 		resource_size_t size, resource_size_t align,
266 		resource_size_t min, unsigned long type_mask,
267 		resource_alignf alignf,
268 		void *alignf_data)
269 {
270 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
271 	int rc;
272 
273 	if (res->flags & IORESOURCE_MEM_64) {
274 		rc = pci_bus_alloc_from_region(bus, res, size, align, min,
275 					       type_mask, alignf, alignf_data,
276 					       &pci_high);
277 		if (rc == 0)
278 			return 0;
279 
280 		return pci_bus_alloc_from_region(bus, res, size, align, min,
281 						 type_mask, alignf, alignf_data,
282 						 &pci_64_bit);
283 	}
284 #endif
285 
286 	return pci_bus_alloc_from_region(bus, res, size, align, min,
287 					 type_mask, alignf, alignf_data,
288 					 &pci_32_bit);
289 }
290 EXPORT_SYMBOL(pci_bus_alloc_resource);
291 
292 /*
293  * The @idx resource of @dev should be a PCI-PCI bridge window.  If this
294  * resource fits inside a window of an upstream bridge, do nothing.  If it
295  * overlaps an upstream window but extends outside it, clip the resource so
296  * it fits completely inside.
297  */
298 bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
299 {
300 	struct pci_bus *bus = dev->bus;
301 	struct resource *res = &dev->resource[idx];
302 	struct resource orig_res = *res;
303 	struct resource *r;
304 
305 	pci_bus_for_each_resource(bus, r) {
306 		resource_size_t start, end;
307 
308 		if (!r)
309 			continue;
310 
311 		if (resource_type(res) != resource_type(r))
312 			continue;
313 
314 		start = max(r->start, res->start);
315 		end = min(r->end, res->end);
316 
317 		if (start > end)
318 			continue;	/* no overlap */
319 
320 		if (res->start == start && res->end == end)
321 			return false;	/* no change */
322 
323 		res->start = start;
324 		res->end = end;
325 		res->flags &= ~IORESOURCE_UNSET;
326 		orig_res.flags &= ~IORESOURCE_UNSET;
327 		pci_info(dev, "%pR clipped to %pR\n", &orig_res, res);
328 
329 		return true;
330 	}
331 
332 	return false;
333 }
334 
335 void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
336 
337 void __weak pcibios_bus_add_device(struct pci_dev *pdev) { }
338 
339 /**
340  * pci_bus_add_device - start driver for a single device
341  * @dev: device to add
342  *
343  * This adds add sysfs entries and start device drivers
344  */
345 void pci_bus_add_device(struct pci_dev *dev)
346 {
347 	struct device_node *dn = dev->dev.of_node;
348 
349 	/*
350 	 * Can not put in pci_device_add yet because resources
351 	 * are not assigned yet for some devices.
352 	 */
353 	pcibios_bus_add_device(dev);
354 	pci_fixup_device(pci_fixup_final, dev);
355 	if (pci_is_bridge(dev))
356 		of_pci_make_dev_node(dev);
357 	pci_create_sysfs_dev_files(dev);
358 	pci_proc_attach_device(dev);
359 	pci_bridge_d3_update(dev);
360 
361 	/* Save config space for error recoverability */
362 	pci_save_state(dev);
363 
364 	/*
365 	 * Enable runtime PM, which potentially allows the device to
366 	 * suspend immediately, only after the PCI state has been
367 	 * configured completely.
368 	 */
369 	pm_runtime_enable(&dev->dev);
370 
371 	if (!dn || of_device_is_available(dn))
372 		pci_dev_allow_binding(dev);
373 
374 	device_initial_probe(&dev->dev);
375 
376 	pci_dev_assign_added(dev);
377 }
378 EXPORT_SYMBOL_GPL(pci_bus_add_device);
379 
380 /**
381  * pci_bus_add_devices - start driver for PCI devices
382  * @bus: bus to check for new devices
383  *
384  * Start driver for PCI devices and add some sysfs entries.
385  */
386 void pci_bus_add_devices(const struct pci_bus *bus)
387 {
388 	struct pci_dev *dev;
389 	struct pci_bus *child;
390 
391 	list_for_each_entry(dev, &bus->devices, bus_list) {
392 		/* Skip already-added devices */
393 		if (pci_dev_is_added(dev))
394 			continue;
395 		pci_bus_add_device(dev);
396 	}
397 
398 	list_for_each_entry(dev, &bus->devices, bus_list) {
399 		/* Skip if device attach failed */
400 		if (!pci_dev_is_added(dev))
401 			continue;
402 		child = dev->subordinate;
403 		if (child)
404 			pci_bus_add_devices(child);
405 	}
406 }
407 EXPORT_SYMBOL(pci_bus_add_devices);
408 
409 static int __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
410 			  void *userdata)
411 {
412 	struct pci_dev *dev;
413 	int ret = 0;
414 
415 	list_for_each_entry(dev, &top->devices, bus_list) {
416 		ret = cb(dev, userdata);
417 		if (ret)
418 			break;
419 		if (dev->subordinate) {
420 			ret = __pci_walk_bus(dev->subordinate, cb, userdata);
421 			if (ret)
422 				break;
423 		}
424 	}
425 	return ret;
426 }
427 
428 static int __pci_walk_bus_reverse(struct pci_bus *top,
429 				  int (*cb)(struct pci_dev *, void *),
430 				  void *userdata)
431 {
432 	struct pci_dev *dev;
433 	int ret = 0;
434 
435 	list_for_each_entry_reverse(dev, &top->devices, bus_list) {
436 		if (dev->subordinate) {
437 			ret = __pci_walk_bus_reverse(dev->subordinate, cb,
438 						     userdata);
439 			if (ret)
440 				break;
441 		}
442 		ret = cb(dev, userdata);
443 		if (ret)
444 			break;
445 	}
446 	return ret;
447 }
448 
449 /**
450  *  pci_walk_bus - walk devices on/under bus, calling callback.
451  *  @top: bus whose devices should be walked
452  *  @cb: callback to be called for each device found
453  *  @userdata: arbitrary pointer to be passed to callback
454  *
455  *  Walk the given bus, including any bridged devices
456  *  on buses under this bus.  Call the provided callback
457  *  on each device found.
458  *
459  *  We check the return of @cb each time. If it returns anything
460  *  other than 0, we break out.
461  */
462 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
463 {
464 	down_read(&pci_bus_sem);
465 	__pci_walk_bus(top, cb, userdata);
466 	up_read(&pci_bus_sem);
467 }
468 EXPORT_SYMBOL_GPL(pci_walk_bus);
469 
470 /**
471  * pci_walk_bus_reverse - walk devices on/under bus, calling callback.
472  * @top: bus whose devices should be walked
473  * @cb: callback to be called for each device found
474  * @userdata: arbitrary pointer to be passed to callback
475  *
476  * Same semantics as pci_walk_bus(), but walks the bus in reverse order.
477  */
478 void pci_walk_bus_reverse(struct pci_bus *top,
479 			  int (*cb)(struct pci_dev *, void *), void *userdata)
480 {
481 	down_read(&pci_bus_sem);
482 	__pci_walk_bus_reverse(top, cb, userdata);
483 	up_read(&pci_bus_sem);
484 }
485 EXPORT_SYMBOL_GPL(pci_walk_bus_reverse);
486 
487 void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
488 {
489 	lockdep_assert_held(&pci_bus_sem);
490 
491 	__pci_walk_bus(top, cb, userdata);
492 }
493 
494 struct pci_bus *pci_bus_get(struct pci_bus *bus)
495 {
496 	if (bus)
497 		get_device(&bus->dev);
498 	return bus;
499 }
500 
501 void pci_bus_put(struct pci_bus *bus)
502 {
503 	if (bus)
504 		put_device(&bus->dev);
505 }
506