xref: /linux/drivers/pci/bus.c (revision 2f2c7254931f41b5736e3ba12aaa9ac1bbeeeb92)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * From setup-res.c, by:
4  *	Dave Rusling (david.rusling@reo.mts.dec.com)
5  *	David Mosberger (davidm@cs.arizona.edu)
6  *	David Miller (davem@redhat.com)
7  *	Ivan Kokshaysky (ink@jurassic.park.msu.ru)
8  */
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/pci.h>
12 #include <linux/errno.h>
13 #include <linux/ioport.h>
14 #include <linux/of.h>
15 #include <linux/of_platform.h>
16 #include <linux/platform_device.h>
17 #include <linux/proc_fs.h>
18 #include <linux/slab.h>
19 
20 #include "pci.h"
21 
22 /*
23  * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
24  * to P2P or CardBus bridge windows) go in a table.  Additional ones (for
25  * buses below host bridges or subtractive decode bridges) go in the list.
26  * Use pci_bus_for_each_resource() to iterate through all the resources.
27  */
28 
29 struct pci_bus_resource {
30 	struct list_head	list;
31 	struct resource		*res;
32 };
33 
pci_add_resource_offset(struct list_head * resources,struct resource * res,resource_size_t offset)34 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
35 			     resource_size_t offset)
36 {
37 	struct resource_entry *entry;
38 
39 	entry = resource_list_create_entry(res, 0);
40 	if (!entry) {
41 		pr_err("PCI: can't add host bridge window %pR\n", res);
42 		return;
43 	}
44 
45 	entry->offset = offset;
46 	resource_list_add_tail(entry, resources);
47 }
48 EXPORT_SYMBOL(pci_add_resource_offset);
49 
pci_add_resource(struct list_head * resources,struct resource * res)50 void pci_add_resource(struct list_head *resources, struct resource *res)
51 {
52 	pci_add_resource_offset(resources, res, 0);
53 }
54 EXPORT_SYMBOL(pci_add_resource);
55 
pci_free_resource_list(struct list_head * resources)56 void pci_free_resource_list(struct list_head *resources)
57 {
58 	resource_list_free(resources);
59 }
60 EXPORT_SYMBOL(pci_free_resource_list);
61 
pci_bus_add_resource(struct pci_bus * bus,struct resource * res)62 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res)
63 {
64 	struct pci_bus_resource *bus_res;
65 
66 	bus_res = kzalloc(sizeof(struct pci_bus_resource), GFP_KERNEL);
67 	if (!bus_res) {
68 		dev_err(&bus->dev, "can't add %pR resource\n", res);
69 		return;
70 	}
71 
72 	bus_res->res = res;
73 	list_add_tail(&bus_res->list, &bus->resources);
74 }
75 
pci_bus_resource_n(const struct pci_bus * bus,int n)76 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n)
77 {
78 	struct pci_bus_resource *bus_res;
79 
80 	if (n < PCI_BRIDGE_RESOURCE_NUM)
81 		return bus->resource[n];
82 
83 	n -= PCI_BRIDGE_RESOURCE_NUM;
84 	list_for_each_entry(bus_res, &bus->resources, list) {
85 		if (n-- == 0)
86 			return bus_res->res;
87 	}
88 	return NULL;
89 }
90 EXPORT_SYMBOL_GPL(pci_bus_resource_n);
91 
pci_bus_remove_resource(struct pci_bus * bus,struct resource * res)92 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res)
93 {
94 	struct pci_bus_resource *bus_res, *tmp;
95 	int i;
96 
97 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
98 		if (bus->resource[i] == res) {
99 			bus->resource[i] = NULL;
100 			return;
101 		}
102 	}
103 
104 	list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
105 		if (bus_res->res == res) {
106 			list_del(&bus_res->list);
107 			kfree(bus_res);
108 			return;
109 		}
110 	}
111 }
112 
pci_bus_remove_resources(struct pci_bus * bus)113 void pci_bus_remove_resources(struct pci_bus *bus)
114 {
115 	int i;
116 	struct pci_bus_resource *bus_res, *tmp;
117 
118 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
119 		bus->resource[i] = NULL;
120 
121 	list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
122 		list_del(&bus_res->list);
123 		kfree(bus_res);
124 	}
125 }
126 
devm_request_pci_bus_resources(struct device * dev,struct list_head * resources)127 int devm_request_pci_bus_resources(struct device *dev,
128 				   struct list_head *resources)
129 {
130 	struct resource_entry *win;
131 	struct resource *parent, *res;
132 	int err;
133 
134 	resource_list_for_each_entry(win, resources) {
135 		res = win->res;
136 		switch (resource_type(res)) {
137 		case IORESOURCE_IO:
138 			parent = &ioport_resource;
139 			break;
140 		case IORESOURCE_MEM:
141 			parent = &iomem_resource;
142 			break;
143 		default:
144 			continue;
145 		}
146 
147 		err = devm_request_resource(dev, parent, res);
148 		if (err)
149 			return err;
150 	}
151 
152 	return 0;
153 }
154 EXPORT_SYMBOL_GPL(devm_request_pci_bus_resources);
155 
156 static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL};
157 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
158 static struct pci_bus_region pci_64_bit = {0,
159 				(pci_bus_addr_t) 0xffffffffffffffffULL};
160 static struct pci_bus_region pci_high = {(pci_bus_addr_t) 0x100000000ULL,
161 				(pci_bus_addr_t) 0xffffffffffffffffULL};
162 #endif
163 
164 /*
165  * @res contains CPU addresses.  Clip it so the corresponding bus addresses
166  * on @bus are entirely within @region.  This is used to control the bus
167  * addresses of resources we allocate, e.g., we may need a resource that
168  * can be mapped by a 32-bit BAR.
169  */
pci_clip_resource_to_region(struct pci_bus * bus,struct resource * res,struct pci_bus_region * region)170 static void pci_clip_resource_to_region(struct pci_bus *bus,
171 					struct resource *res,
172 					struct pci_bus_region *region)
173 {
174 	struct pci_bus_region r;
175 
176 	pcibios_resource_to_bus(bus, &r, res);
177 	if (r.start < region->start)
178 		r.start = region->start;
179 	if (r.end > region->end)
180 		r.end = region->end;
181 
182 	if (r.end < r.start)
183 		res->end = res->start - 1;
184 	else
185 		pcibios_bus_to_resource(bus, res, &r);
186 }
187 
pci_bus_alloc_from_region(struct pci_bus * bus,struct resource * res,resource_size_t size,resource_size_t align,resource_size_t min,unsigned long type_mask,resource_alignf alignf,void * alignf_data,struct pci_bus_region * region)188 static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
189 		resource_size_t size, resource_size_t align,
190 		resource_size_t min, unsigned long type_mask,
191 		resource_alignf alignf,
192 		void *alignf_data,
193 		struct pci_bus_region *region)
194 {
195 	struct resource *r, avail;
196 	resource_size_t max;
197 	int ret;
198 
199 	type_mask |= IORESOURCE_TYPE_BITS;
200 
201 	pci_bus_for_each_resource(bus, r) {
202 		resource_size_t min_used = min;
203 
204 		if (!r)
205 			continue;
206 
207 		if (r->flags & (IORESOURCE_UNSET|IORESOURCE_DISABLED))
208 			continue;
209 
210 		/* type_mask must match */
211 		if ((res->flags ^ r->flags) & type_mask)
212 			continue;
213 
214 		/* We cannot allocate a non-prefetching resource
215 		   from a pre-fetching area */
216 		if ((r->flags & IORESOURCE_PREFETCH) &&
217 		    !(res->flags & IORESOURCE_PREFETCH))
218 			continue;
219 
220 		avail = *r;
221 		pci_clip_resource_to_region(bus, &avail, region);
222 
223 		/*
224 		 * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to
225 		 * protect badly documented motherboard resources, but if
226 		 * this is an already-configured bridge window, its start
227 		 * overrides "min".
228 		 */
229 		if (avail.start)
230 			min_used = avail.start;
231 
232 		max = avail.end;
233 
234 		/* Don't bother if available space isn't large enough */
235 		if (size > max - min_used + 1)
236 			continue;
237 
238 		/* Ok, try it out.. */
239 		ret = allocate_resource(r, res, size, min_used, max,
240 					align, alignf, alignf_data);
241 		if (ret == 0)
242 			return 0;
243 	}
244 	return -ENOMEM;
245 }
246 
247 /**
248  * pci_bus_alloc_resource - allocate a resource from a parent bus
249  * @bus: PCI bus
250  * @res: resource to allocate
251  * @size: size of resource to allocate
252  * @align: alignment of resource to allocate
253  * @min: minimum /proc/iomem address to allocate
254  * @type_mask: IORESOURCE_* type flags
255  * @alignf: resource alignment function
256  * @alignf_data: data argument for resource alignment function
257  *
258  * Given the PCI bus a device resides on, the size, minimum address,
259  * alignment and type, try to find an acceptable resource allocation
260  * for a specific device resource.
261  */
pci_bus_alloc_resource(struct pci_bus * bus,struct resource * res,resource_size_t size,resource_size_t align,resource_size_t min,unsigned long type_mask,resource_alignf alignf,void * alignf_data)262 int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
263 		resource_size_t size, resource_size_t align,
264 		resource_size_t min, unsigned long type_mask,
265 		resource_alignf alignf,
266 		void *alignf_data)
267 {
268 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
269 	int rc;
270 
271 	if (res->flags & IORESOURCE_MEM_64) {
272 		rc = pci_bus_alloc_from_region(bus, res, size, align, min,
273 					       type_mask, alignf, alignf_data,
274 					       &pci_high);
275 		if (rc == 0)
276 			return 0;
277 
278 		return pci_bus_alloc_from_region(bus, res, size, align, min,
279 						 type_mask, alignf, alignf_data,
280 						 &pci_64_bit);
281 	}
282 #endif
283 
284 	return pci_bus_alloc_from_region(bus, res, size, align, min,
285 					 type_mask, alignf, alignf_data,
286 					 &pci_32_bit);
287 }
288 EXPORT_SYMBOL(pci_bus_alloc_resource);
289 
290 /*
291  * The @idx resource of @dev should be a PCI-PCI bridge window.  If this
292  * resource fits inside a window of an upstream bridge, do nothing.  If it
293  * overlaps an upstream window but extends outside it, clip the resource so
294  * it fits completely inside.
295  */
pci_bus_clip_resource(struct pci_dev * dev,int idx)296 bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
297 {
298 	struct pci_bus *bus = dev->bus;
299 	struct resource *res = &dev->resource[idx];
300 	struct resource orig_res = *res;
301 	struct resource *r;
302 
303 	pci_bus_for_each_resource(bus, r) {
304 		resource_size_t start, end;
305 
306 		if (!r)
307 			continue;
308 
309 		if (resource_type(res) != resource_type(r))
310 			continue;
311 
312 		start = max(r->start, res->start);
313 		end = min(r->end, res->end);
314 
315 		if (start > end)
316 			continue;	/* no overlap */
317 
318 		if (res->start == start && res->end == end)
319 			return false;	/* no change */
320 
321 		res->start = start;
322 		res->end = end;
323 		res->flags &= ~IORESOURCE_UNSET;
324 		orig_res.flags &= ~IORESOURCE_UNSET;
325 		pci_info(dev, "%pR clipped to %pR\n", &orig_res, res);
326 
327 		return true;
328 	}
329 
330 	return false;
331 }
332 
pcibios_resource_survey_bus(struct pci_bus * bus)333 void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
334 
pcibios_bus_add_device(struct pci_dev * pdev)335 void __weak pcibios_bus_add_device(struct pci_dev *pdev) { }
336 
337 /**
338  * pci_bus_add_device - start driver for a single device
339  * @dev: device to add
340  *
341  * This adds add sysfs entries and start device drivers
342  */
pci_bus_add_device(struct pci_dev * dev)343 void pci_bus_add_device(struct pci_dev *dev)
344 {
345 	struct device_node *dn = dev->dev.of_node;
346 	struct platform_device *pdev;
347 
348 	/*
349 	 * Can not put in pci_device_add yet because resources
350 	 * are not assigned yet for some devices.
351 	 */
352 	pcibios_bus_add_device(dev);
353 	pci_fixup_device(pci_fixup_final, dev);
354 	if (pci_is_bridge(dev))
355 		of_pci_make_dev_node(dev);
356 	pci_create_sysfs_dev_files(dev);
357 	pci_proc_attach_device(dev);
358 	pci_bridge_d3_update(dev);
359 
360 	/*
361 	 * If the PCI device is associated with a pwrctrl device with a
362 	 * power supply, create a device link between the PCI device and
363 	 * pwrctrl device.  This ensures that pwrctrl drivers are probed
364 	 * before PCI client drivers.
365 	 */
366 	pdev = of_find_device_by_node(dn);
367 	if (pdev) {
368 		if (of_pci_supply_present(dn)) {
369 			if (!device_link_add(&dev->dev, &pdev->dev,
370 					     DL_FLAG_AUTOREMOVE_CONSUMER)) {
371 				pci_err(dev, "failed to add device link to power control device %s\n",
372 					pdev->name);
373 			}
374 		}
375 		put_device(&pdev->dev);
376 	}
377 
378 	if (!dn || of_device_is_available(dn))
379 		pci_dev_allow_binding(dev);
380 
381 	device_initial_probe(&dev->dev);
382 
383 	pci_dev_assign_added(dev);
384 }
385 EXPORT_SYMBOL_GPL(pci_bus_add_device);
386 
387 /**
388  * pci_bus_add_devices - start driver for PCI devices
389  * @bus: bus to check for new devices
390  *
391  * Start driver for PCI devices and add some sysfs entries.
392  */
pci_bus_add_devices(const struct pci_bus * bus)393 void pci_bus_add_devices(const struct pci_bus *bus)
394 {
395 	struct pci_dev *dev;
396 	struct pci_bus *child;
397 
398 	list_for_each_entry(dev, &bus->devices, bus_list) {
399 		/* Skip already-added devices */
400 		if (pci_dev_is_added(dev))
401 			continue;
402 		pci_bus_add_device(dev);
403 	}
404 
405 	list_for_each_entry(dev, &bus->devices, bus_list) {
406 		/* Skip if device attach failed */
407 		if (!pci_dev_is_added(dev))
408 			continue;
409 		child = dev->subordinate;
410 		if (child)
411 			pci_bus_add_devices(child);
412 	}
413 }
414 EXPORT_SYMBOL(pci_bus_add_devices);
415 
__pci_walk_bus(struct pci_bus * top,int (* cb)(struct pci_dev *,void *),void * userdata)416 static int __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
417 			  void *userdata)
418 {
419 	struct pci_dev *dev;
420 	int ret = 0;
421 
422 	list_for_each_entry(dev, &top->devices, bus_list) {
423 		ret = cb(dev, userdata);
424 		if (ret)
425 			break;
426 		if (dev->subordinate) {
427 			ret = __pci_walk_bus(dev->subordinate, cb, userdata);
428 			if (ret)
429 				break;
430 		}
431 	}
432 	return ret;
433 }
434 
435 /**
436  *  pci_walk_bus - walk devices on/under bus, calling callback.
437  *  @top: bus whose devices should be walked
438  *  @cb: callback to be called for each device found
439  *  @userdata: arbitrary pointer to be passed to callback
440  *
441  *  Walk the given bus, including any bridged devices
442  *  on buses under this bus.  Call the provided callback
443  *  on each device found.
444  *
445  *  We check the return of @cb each time. If it returns anything
446  *  other than 0, we break out.
447  */
pci_walk_bus(struct pci_bus * top,int (* cb)(struct pci_dev *,void *),void * userdata)448 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
449 {
450 	down_read(&pci_bus_sem);
451 	__pci_walk_bus(top, cb, userdata);
452 	up_read(&pci_bus_sem);
453 }
454 EXPORT_SYMBOL_GPL(pci_walk_bus);
455 
pci_walk_bus_locked(struct pci_bus * top,int (* cb)(struct pci_dev *,void *),void * userdata)456 void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
457 {
458 	lockdep_assert_held(&pci_bus_sem);
459 
460 	__pci_walk_bus(top, cb, userdata);
461 }
462 
pci_bus_get(struct pci_bus * bus)463 struct pci_bus *pci_bus_get(struct pci_bus *bus)
464 {
465 	if (bus)
466 		get_device(&bus->dev);
467 	return bus;
468 }
469 
pci_bus_put(struct pci_bus * bus)470 void pci_bus_put(struct pci_bus *bus)
471 {
472 	if (bus)
473 		put_device(&bus->dev);
474 }
475