xref: /linux/drivers/pci/bus.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * From setup-res.c, by:
4  *	Dave Rusling (david.rusling@reo.mts.dec.com)
5  *	David Mosberger (davidm@cs.arizona.edu)
6  *	David Miller (davem@redhat.com)
7  *	Ivan Kokshaysky (ink@jurassic.park.msu.ru)
8  */
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/pci.h>
12 #include <linux/errno.h>
13 #include <linux/ioport.h>
14 #include <linux/of.h>
15 #include <linux/of_platform.h>
16 #include <linux/proc_fs.h>
17 #include <linux/slab.h>
18 
19 #include "pci.h"
20 
pci_add_resource_offset(struct list_head * resources,struct resource * res,resource_size_t offset)21 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
22 			     resource_size_t offset)
23 {
24 	struct resource_entry *entry;
25 
26 	entry = resource_list_create_entry(res, 0);
27 	if (!entry) {
28 		pr_err("PCI: can't add host bridge window %pR\n", res);
29 		return;
30 	}
31 
32 	entry->offset = offset;
33 	resource_list_add_tail(entry, resources);
34 }
35 EXPORT_SYMBOL(pci_add_resource_offset);
36 
pci_add_resource(struct list_head * resources,struct resource * res)37 void pci_add_resource(struct list_head *resources, struct resource *res)
38 {
39 	pci_add_resource_offset(resources, res, 0);
40 }
41 EXPORT_SYMBOL(pci_add_resource);
42 
pci_free_resource_list(struct list_head * resources)43 void pci_free_resource_list(struct list_head *resources)
44 {
45 	resource_list_free(resources);
46 }
47 EXPORT_SYMBOL(pci_free_resource_list);
48 
pci_bus_add_resource(struct pci_bus * bus,struct resource * res,unsigned int flags)49 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
50 			  unsigned int flags)
51 {
52 	struct pci_bus_resource *bus_res;
53 
54 	bus_res = kzalloc(sizeof(struct pci_bus_resource), GFP_KERNEL);
55 	if (!bus_res) {
56 		dev_err(&bus->dev, "can't add %pR resource\n", res);
57 		return;
58 	}
59 
60 	bus_res->res = res;
61 	bus_res->flags = flags;
62 	list_add_tail(&bus_res->list, &bus->resources);
63 }
64 
pci_bus_resource_n(const struct pci_bus * bus,int n)65 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n)
66 {
67 	struct pci_bus_resource *bus_res;
68 
69 	if (n < PCI_BRIDGE_RESOURCE_NUM)
70 		return bus->resource[n];
71 
72 	n -= PCI_BRIDGE_RESOURCE_NUM;
73 	list_for_each_entry(bus_res, &bus->resources, list) {
74 		if (n-- == 0)
75 			return bus_res->res;
76 	}
77 	return NULL;
78 }
79 EXPORT_SYMBOL_GPL(pci_bus_resource_n);
80 
pci_bus_remove_resource(struct pci_bus * bus,struct resource * res)81 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res)
82 {
83 	struct pci_bus_resource *bus_res, *tmp;
84 	int i;
85 
86 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
87 		if (bus->resource[i] == res) {
88 			bus->resource[i] = NULL;
89 			return;
90 		}
91 	}
92 
93 	list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
94 		if (bus_res->res == res) {
95 			list_del(&bus_res->list);
96 			kfree(bus_res);
97 			return;
98 		}
99 	}
100 }
101 
pci_bus_remove_resources(struct pci_bus * bus)102 void pci_bus_remove_resources(struct pci_bus *bus)
103 {
104 	int i;
105 	struct pci_bus_resource *bus_res, *tmp;
106 
107 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
108 		bus->resource[i] = NULL;
109 
110 	list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
111 		list_del(&bus_res->list);
112 		kfree(bus_res);
113 	}
114 }
115 
devm_request_pci_bus_resources(struct device * dev,struct list_head * resources)116 int devm_request_pci_bus_resources(struct device *dev,
117 				   struct list_head *resources)
118 {
119 	struct resource_entry *win;
120 	struct resource *parent, *res;
121 	int err;
122 
123 	resource_list_for_each_entry(win, resources) {
124 		res = win->res;
125 		switch (resource_type(res)) {
126 		case IORESOURCE_IO:
127 			parent = &ioport_resource;
128 			break;
129 		case IORESOURCE_MEM:
130 			parent = &iomem_resource;
131 			break;
132 		default:
133 			continue;
134 		}
135 
136 		err = devm_request_resource(dev, parent, res);
137 		if (err)
138 			return err;
139 	}
140 
141 	return 0;
142 }
143 EXPORT_SYMBOL_GPL(devm_request_pci_bus_resources);
144 
145 static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL};
146 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
147 static struct pci_bus_region pci_64_bit = {0,
148 				(pci_bus_addr_t) 0xffffffffffffffffULL};
149 static struct pci_bus_region pci_high = {(pci_bus_addr_t) 0x100000000ULL,
150 				(pci_bus_addr_t) 0xffffffffffffffffULL};
151 #endif
152 
153 /*
154  * @res contains CPU addresses.  Clip it so the corresponding bus addresses
155  * on @bus are entirely within @region.  This is used to control the bus
156  * addresses of resources we allocate, e.g., we may need a resource that
157  * can be mapped by a 32-bit BAR.
158  */
pci_clip_resource_to_region(struct pci_bus * bus,struct resource * res,struct pci_bus_region * region)159 static void pci_clip_resource_to_region(struct pci_bus *bus,
160 					struct resource *res,
161 					struct pci_bus_region *region)
162 {
163 	struct pci_bus_region r;
164 
165 	pcibios_resource_to_bus(bus, &r, res);
166 	if (r.start < region->start)
167 		r.start = region->start;
168 	if (r.end > region->end)
169 		r.end = region->end;
170 
171 	if (r.end < r.start)
172 		res->end = res->start - 1;
173 	else
174 		pcibios_bus_to_resource(bus, res, &r);
175 }
176 
pci_bus_alloc_from_region(struct pci_bus * bus,struct resource * res,resource_size_t size,resource_size_t align,resource_size_t min,unsigned long type_mask,resource_alignf alignf,void * alignf_data,struct pci_bus_region * region)177 static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
178 		resource_size_t size, resource_size_t align,
179 		resource_size_t min, unsigned long type_mask,
180 		resource_alignf alignf,
181 		void *alignf_data,
182 		struct pci_bus_region *region)
183 {
184 	struct resource *r, avail;
185 	resource_size_t max;
186 	int ret;
187 
188 	type_mask |= IORESOURCE_TYPE_BITS;
189 
190 	pci_bus_for_each_resource(bus, r) {
191 		resource_size_t min_used = min;
192 
193 		if (!r)
194 			continue;
195 
196 		/* type_mask must match */
197 		if ((res->flags ^ r->flags) & type_mask)
198 			continue;
199 
200 		/* We cannot allocate a non-prefetching resource
201 		   from a pre-fetching area */
202 		if ((r->flags & IORESOURCE_PREFETCH) &&
203 		    !(res->flags & IORESOURCE_PREFETCH))
204 			continue;
205 
206 		avail = *r;
207 		pci_clip_resource_to_region(bus, &avail, region);
208 
209 		/*
210 		 * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to
211 		 * protect badly documented motherboard resources, but if
212 		 * this is an already-configured bridge window, its start
213 		 * overrides "min".
214 		 */
215 		if (avail.start)
216 			min_used = avail.start;
217 
218 		max = avail.end;
219 
220 		/* Don't bother if available space isn't large enough */
221 		if (size > max - min_used + 1)
222 			continue;
223 
224 		/* Ok, try it out.. */
225 		ret = allocate_resource(r, res, size, min_used, max,
226 					align, alignf, alignf_data);
227 		if (ret == 0)
228 			return 0;
229 	}
230 	return -ENOMEM;
231 }
232 
233 /**
234  * pci_bus_alloc_resource - allocate a resource from a parent bus
235  * @bus: PCI bus
236  * @res: resource to allocate
237  * @size: size of resource to allocate
238  * @align: alignment of resource to allocate
239  * @min: minimum /proc/iomem address to allocate
240  * @type_mask: IORESOURCE_* type flags
241  * @alignf: resource alignment function
242  * @alignf_data: data argument for resource alignment function
243  *
244  * Given the PCI bus a device resides on, the size, minimum address,
245  * alignment and type, try to find an acceptable resource allocation
246  * for a specific device resource.
247  */
pci_bus_alloc_resource(struct pci_bus * bus,struct resource * res,resource_size_t size,resource_size_t align,resource_size_t min,unsigned long type_mask,resource_alignf alignf,void * alignf_data)248 int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
249 		resource_size_t size, resource_size_t align,
250 		resource_size_t min, unsigned long type_mask,
251 		resource_alignf alignf,
252 		void *alignf_data)
253 {
254 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
255 	int rc;
256 
257 	if (res->flags & IORESOURCE_MEM_64) {
258 		rc = pci_bus_alloc_from_region(bus, res, size, align, min,
259 					       type_mask, alignf, alignf_data,
260 					       &pci_high);
261 		if (rc == 0)
262 			return 0;
263 
264 		return pci_bus_alloc_from_region(bus, res, size, align, min,
265 						 type_mask, alignf, alignf_data,
266 						 &pci_64_bit);
267 	}
268 #endif
269 
270 	return pci_bus_alloc_from_region(bus, res, size, align, min,
271 					 type_mask, alignf, alignf_data,
272 					 &pci_32_bit);
273 }
274 EXPORT_SYMBOL(pci_bus_alloc_resource);
275 
276 /*
277  * The @idx resource of @dev should be a PCI-PCI bridge window.  If this
278  * resource fits inside a window of an upstream bridge, do nothing.  If it
279  * overlaps an upstream window but extends outside it, clip the resource so
280  * it fits completely inside.
281  */
pci_bus_clip_resource(struct pci_dev * dev,int idx)282 bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
283 {
284 	struct pci_bus *bus = dev->bus;
285 	struct resource *res = &dev->resource[idx];
286 	struct resource orig_res = *res;
287 	struct resource *r;
288 
289 	pci_bus_for_each_resource(bus, r) {
290 		resource_size_t start, end;
291 
292 		if (!r)
293 			continue;
294 
295 		if (resource_type(res) != resource_type(r))
296 			continue;
297 
298 		start = max(r->start, res->start);
299 		end = min(r->end, res->end);
300 
301 		if (start > end)
302 			continue;	/* no overlap */
303 
304 		if (res->start == start && res->end == end)
305 			return false;	/* no change */
306 
307 		res->start = start;
308 		res->end = end;
309 		res->flags &= ~IORESOURCE_UNSET;
310 		orig_res.flags &= ~IORESOURCE_UNSET;
311 		pci_info(dev, "%pR clipped to %pR\n", &orig_res, res);
312 
313 		return true;
314 	}
315 
316 	return false;
317 }
318 
pcibios_resource_survey_bus(struct pci_bus * bus)319 void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
320 
pcibios_bus_add_device(struct pci_dev * pdev)321 void __weak pcibios_bus_add_device(struct pci_dev *pdev) { }
322 
323 /**
324  * pci_bus_add_device - start driver for a single device
325  * @dev: device to add
326  *
327  * This adds add sysfs entries and start device drivers
328  */
pci_bus_add_device(struct pci_dev * dev)329 void pci_bus_add_device(struct pci_dev *dev)
330 {
331 	struct device_node *dn = dev->dev.of_node;
332 	int retval;
333 
334 	/*
335 	 * Can not put in pci_device_add yet because resources
336 	 * are not assigned yet for some devices.
337 	 */
338 	pcibios_bus_add_device(dev);
339 	pci_fixup_device(pci_fixup_final, dev);
340 	if (pci_is_bridge(dev))
341 		of_pci_make_dev_node(dev);
342 	pci_create_sysfs_dev_files(dev);
343 	pci_proc_attach_device(dev);
344 	pci_bridge_d3_update(dev);
345 
346 	dev->match_driver = !dn || of_device_is_available(dn);
347 	retval = device_attach(&dev->dev);
348 	if (retval < 0 && retval != -EPROBE_DEFER)
349 		pci_warn(dev, "device attach failed (%d)\n", retval);
350 
351 	pci_dev_assign_added(dev, true);
352 
353 	if (dev_of_node(&dev->dev) && pci_is_bridge(dev)) {
354 		retval = of_platform_populate(dev_of_node(&dev->dev), NULL, NULL,
355 					      &dev->dev);
356 		if (retval)
357 			pci_err(dev, "failed to populate child OF nodes (%d)\n",
358 				retval);
359 	}
360 }
361 EXPORT_SYMBOL_GPL(pci_bus_add_device);
362 
363 /**
364  * pci_bus_add_devices - start driver for PCI devices
365  * @bus: bus to check for new devices
366  *
367  * Start driver for PCI devices and add some sysfs entries.
368  */
pci_bus_add_devices(const struct pci_bus * bus)369 void pci_bus_add_devices(const struct pci_bus *bus)
370 {
371 	struct pci_dev *dev;
372 	struct pci_bus *child;
373 
374 	list_for_each_entry(dev, &bus->devices, bus_list) {
375 		/* Skip already-added devices */
376 		if (pci_dev_is_added(dev))
377 			continue;
378 		pci_bus_add_device(dev);
379 	}
380 
381 	list_for_each_entry(dev, &bus->devices, bus_list) {
382 		/* Skip if device attach failed */
383 		if (!pci_dev_is_added(dev))
384 			continue;
385 		child = dev->subordinate;
386 		if (child)
387 			pci_bus_add_devices(child);
388 	}
389 }
390 EXPORT_SYMBOL(pci_bus_add_devices);
391 
__pci_walk_bus(struct pci_bus * top,int (* cb)(struct pci_dev *,void *),void * userdata,bool locked)392 static void __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
393 			   void *userdata, bool locked)
394 {
395 	struct pci_dev *dev;
396 	struct pci_bus *bus;
397 	struct list_head *next;
398 	int retval;
399 
400 	bus = top;
401 	if (!locked)
402 		down_read(&pci_bus_sem);
403 	next = top->devices.next;
404 	for (;;) {
405 		if (next == &bus->devices) {
406 			/* end of this bus, go up or finish */
407 			if (bus == top)
408 				break;
409 			next = bus->self->bus_list.next;
410 			bus = bus->self->bus;
411 			continue;
412 		}
413 		dev = list_entry(next, struct pci_dev, bus_list);
414 		if (dev->subordinate) {
415 			/* this is a pci-pci bridge, do its devices next */
416 			next = dev->subordinate->devices.next;
417 			bus = dev->subordinate;
418 		} else
419 			next = dev->bus_list.next;
420 
421 		retval = cb(dev, userdata);
422 		if (retval)
423 			break;
424 	}
425 	if (!locked)
426 		up_read(&pci_bus_sem);
427 }
428 
429 /**
430  *  pci_walk_bus - walk devices on/under bus, calling callback.
431  *  @top: bus whose devices should be walked
432  *  @cb: callback to be called for each device found
433  *  @userdata: arbitrary pointer to be passed to callback
434  *
435  *  Walk the given bus, including any bridged devices
436  *  on buses under this bus.  Call the provided callback
437  *  on each device found.
438  *
439  *  We check the return of @cb each time. If it returns anything
440  *  other than 0, we break out.
441  */
pci_walk_bus(struct pci_bus * top,int (* cb)(struct pci_dev *,void *),void * userdata)442 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
443 {
444 	__pci_walk_bus(top, cb, userdata, false);
445 }
446 EXPORT_SYMBOL_GPL(pci_walk_bus);
447 
pci_walk_bus_locked(struct pci_bus * top,int (* cb)(struct pci_dev *,void *),void * userdata)448 void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
449 {
450 	lockdep_assert_held(&pci_bus_sem);
451 
452 	__pci_walk_bus(top, cb, userdata, true);
453 }
454 EXPORT_SYMBOL_GPL(pci_walk_bus_locked);
455 
pci_bus_get(struct pci_bus * bus)456 struct pci_bus *pci_bus_get(struct pci_bus *bus)
457 {
458 	if (bus)
459 		get_device(&bus->dev);
460 	return bus;
461 }
462 
pci_bus_put(struct pci_bus * bus)463 void pci_bus_put(struct pci_bus *bus)
464 {
465 	if (bus)
466 		put_device(&bus->dev);
467 }
468