xref: /linux/drivers/pci/probe.c (revision 3719a04a80caf660f899a462cd8f3973bcfa676e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI detection and setup code
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/delay.h>
8 #include <linux/init.h>
9 #include <linux/pci.h>
10 #include <linux/msi.h>
11 #include <linux/of_pci.h>
12 #include <linux/of_platform.h>
13 #include <linux/platform_device.h>
14 #include <linux/pci_hotplug.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/cpumask.h>
18 #include <linux/aer.h>
19 #include <linux/acpi.h>
20 #include <linux/hypervisor.h>
21 #include <linux/irqdomain.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/bitfield.h>
24 #include "pci.h"
25 
26 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
27 #define CARDBUS_RESERVE_BUSNR	3
28 
29 static struct resource busn_resource = {
30 	.name	= "PCI busn",
31 	.start	= 0,
32 	.end	= 255,
33 	.flags	= IORESOURCE_BUS,
34 };
35 
36 /* Ugh.  Need to stop exporting this to modules. */
37 LIST_HEAD(pci_root_buses);
38 EXPORT_SYMBOL(pci_root_buses);
39 
40 static LIST_HEAD(pci_domain_busn_res_list);
41 
42 struct pci_domain_busn_res {
43 	struct list_head list;
44 	struct resource res;
45 	int domain_nr;
46 };
47 
get_pci_domain_busn_res(int domain_nr)48 static struct resource *get_pci_domain_busn_res(int domain_nr)
49 {
50 	struct pci_domain_busn_res *r;
51 
52 	list_for_each_entry(r, &pci_domain_busn_res_list, list)
53 		if (r->domain_nr == domain_nr)
54 			return &r->res;
55 
56 	r = kzalloc(sizeof(*r), GFP_KERNEL);
57 	if (!r)
58 		return NULL;
59 
60 	r->domain_nr = domain_nr;
61 	r->res.start = 0;
62 	r->res.end = 0xff;
63 	r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
64 
65 	list_add_tail(&r->list, &pci_domain_busn_res_list);
66 
67 	return &r->res;
68 }
69 
70 /*
71  * Some device drivers need know if PCI is initiated.
72  * Basically, we think PCI is not initiated when there
73  * is no device to be found on the pci_bus_type.
74  */
no_pci_devices(void)75 int no_pci_devices(void)
76 {
77 	struct device *dev;
78 	int no_devices;
79 
80 	dev = bus_find_next_device(&pci_bus_type, NULL);
81 	no_devices = (dev == NULL);
82 	put_device(dev);
83 	return no_devices;
84 }
85 EXPORT_SYMBOL(no_pci_devices);
86 
87 /*
88  * PCI Bus Class
89  */
release_pcibus_dev(struct device * dev)90 static void release_pcibus_dev(struct device *dev)
91 {
92 	struct pci_bus *pci_bus = to_pci_bus(dev);
93 
94 	put_device(pci_bus->bridge);
95 	pci_bus_remove_resources(pci_bus);
96 	pci_release_bus_of_node(pci_bus);
97 	kfree(pci_bus);
98 }
99 
100 static const struct class pcibus_class = {
101 	.name		= "pci_bus",
102 	.dev_release	= &release_pcibus_dev,
103 	.dev_groups	= pcibus_groups,
104 };
105 
pcibus_class_init(void)106 static int __init pcibus_class_init(void)
107 {
108 	return class_register(&pcibus_class);
109 }
110 postcore_initcall(pcibus_class_init);
111 
pci_size(u64 base,u64 maxbase,u64 mask)112 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
113 {
114 	u64 size = mask & maxbase;	/* Find the significant bits */
115 	if (!size)
116 		return 0;
117 
118 	/*
119 	 * Get the lowest of them to find the decode size, and from that
120 	 * the extent.
121 	 */
122 	size = size & ~(size-1);
123 
124 	/*
125 	 * base == maxbase can be valid only if the BAR has already been
126 	 * programmed with all 1s.
127 	 */
128 	if (base == maxbase && ((base | (size - 1)) & mask) != mask)
129 		return 0;
130 
131 	return size;
132 }
133 
decode_bar(struct pci_dev * dev,u32 bar)134 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
135 {
136 	u32 mem_type;
137 	unsigned long flags;
138 
139 	if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
140 		flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
141 		flags |= IORESOURCE_IO;
142 		return flags;
143 	}
144 
145 	flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
146 	flags |= IORESOURCE_MEM;
147 	if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
148 		flags |= IORESOURCE_PREFETCH;
149 
150 	mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
151 	switch (mem_type) {
152 	case PCI_BASE_ADDRESS_MEM_TYPE_32:
153 		break;
154 	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
155 		/* 1M mem BAR treated as 32-bit BAR */
156 		break;
157 	case PCI_BASE_ADDRESS_MEM_TYPE_64:
158 		flags |= IORESOURCE_MEM_64;
159 		break;
160 	default:
161 		/* mem unknown type treated as 32-bit BAR */
162 		break;
163 	}
164 	return flags;
165 }
166 
167 #define PCI_COMMAND_DECODE_ENABLE	(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
168 
169 /**
170  * __pci_size_bars - Read the raw BAR mask for a range of PCI BARs
171  * @dev: the PCI device
172  * @count: number of BARs to size
173  * @pos: starting config space position
174  * @sizes: array to store mask values
175  * @rom: indicate whether to use ROM mask, which avoids enabling ROM BARs
176  *
177  * Provided @sizes array must be sufficiently sized to store results for
178  * @count u32 BARs.  Caller is responsible for disabling decode to specified
179  * BAR range around calling this function.  This function is intended to avoid
180  * disabling decode around sizing each BAR individually, which can result in
181  * non-trivial overhead in virtualized environments with very large PCI BARs.
182  */
__pci_size_bars(struct pci_dev * dev,int count,unsigned int pos,u32 * sizes,bool rom)183 static void __pci_size_bars(struct pci_dev *dev, int count,
184 			    unsigned int pos, u32 *sizes, bool rom)
185 {
186 	u32 orig, mask = rom ? PCI_ROM_ADDRESS_MASK : ~0;
187 	int i;
188 
189 	for (i = 0; i < count; i++, pos += 4, sizes++) {
190 		pci_read_config_dword(dev, pos, &orig);
191 		pci_write_config_dword(dev, pos, mask);
192 		pci_read_config_dword(dev, pos, sizes);
193 		pci_write_config_dword(dev, pos, orig);
194 	}
195 }
196 
__pci_size_stdbars(struct pci_dev * dev,int count,unsigned int pos,u32 * sizes)197 void __pci_size_stdbars(struct pci_dev *dev, int count,
198 			unsigned int pos, u32 *sizes)
199 {
200 	__pci_size_bars(dev, count, pos, sizes, false);
201 }
202 
__pci_size_rom(struct pci_dev * dev,unsigned int pos,u32 * sizes)203 static void __pci_size_rom(struct pci_dev *dev, unsigned int pos, u32 *sizes)
204 {
205 	__pci_size_bars(dev, 1, pos, sizes, true);
206 }
207 
208 /**
209  * __pci_read_base - Read a PCI BAR
210  * @dev: the PCI device
211  * @type: type of the BAR
212  * @res: resource buffer to be filled in
213  * @pos: BAR position in the config space
214  * @sizes: array of one or more pre-read BAR masks
215  *
216  * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
217  */
__pci_read_base(struct pci_dev * dev,enum pci_bar_type type,struct resource * res,unsigned int pos,u32 * sizes)218 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
219 		    struct resource *res, unsigned int pos, u32 *sizes)
220 {
221 	u32 l = 0, sz;
222 	u64 l64, sz64, mask64;
223 	struct pci_bus_region region, inverted_region;
224 	const char *res_name = pci_resource_name(dev, res - dev->resource);
225 
226 	res->name = pci_name(dev);
227 
228 	pci_read_config_dword(dev, pos, &l);
229 	sz = sizes[0];
230 
231 	/*
232 	 * All bits set in sz means the device isn't working properly.
233 	 * If the BAR isn't implemented, all bits must be 0.  If it's a
234 	 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
235 	 * 1 must be clear.
236 	 */
237 	if (PCI_POSSIBLE_ERROR(sz))
238 		sz = 0;
239 
240 	/*
241 	 * I don't know how l can have all bits set.  Copied from old code.
242 	 * Maybe it fixes a bug on some ancient platform.
243 	 */
244 	if (PCI_POSSIBLE_ERROR(l))
245 		l = 0;
246 
247 	if (type == pci_bar_unknown) {
248 		res->flags = decode_bar(dev, l);
249 		res->flags |= IORESOURCE_SIZEALIGN;
250 		if (res->flags & IORESOURCE_IO) {
251 			l64 = l & PCI_BASE_ADDRESS_IO_MASK;
252 			sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
253 			mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
254 		} else {
255 			l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
256 			sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
257 			mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
258 		}
259 	} else {
260 		if (l & PCI_ROM_ADDRESS_ENABLE)
261 			res->flags |= IORESOURCE_ROM_ENABLE;
262 		l64 = l & PCI_ROM_ADDRESS_MASK;
263 		sz64 = sz & PCI_ROM_ADDRESS_MASK;
264 		mask64 = PCI_ROM_ADDRESS_MASK;
265 	}
266 
267 	if (res->flags & IORESOURCE_MEM_64) {
268 		pci_read_config_dword(dev, pos + 4, &l);
269 		sz = sizes[1];
270 
271 		l64 |= ((u64)l << 32);
272 		sz64 |= ((u64)sz << 32);
273 		mask64 |= ((u64)~0 << 32);
274 	}
275 
276 	if (!sz64)
277 		goto fail;
278 
279 	sz64 = pci_size(l64, sz64, mask64);
280 	if (!sz64) {
281 		pci_info(dev, FW_BUG "%s: invalid; can't size\n", res_name);
282 		goto fail;
283 	}
284 
285 	if (res->flags & IORESOURCE_MEM_64) {
286 		if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
287 		    && sz64 > 0x100000000ULL) {
288 			res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
289 			res->start = 0;
290 			res->end = 0;
291 			pci_err(dev, "%s: can't handle BAR larger than 4GB (size %#010llx)\n",
292 				res_name, (unsigned long long)sz64);
293 			goto out;
294 		}
295 
296 		if ((sizeof(pci_bus_addr_t) < 8) && l) {
297 			/* Above 32-bit boundary; try to reallocate */
298 			res->flags |= IORESOURCE_UNSET;
299 			res->start = 0;
300 			res->end = sz64 - 1;
301 			pci_info(dev, "%s: can't handle BAR above 4GB (bus address %#010llx)\n",
302 				 res_name, (unsigned long long)l64);
303 			goto out;
304 		}
305 	}
306 
307 	region.start = l64;
308 	region.end = l64 + sz64 - 1;
309 
310 	pcibios_bus_to_resource(dev->bus, res, &region);
311 	pcibios_resource_to_bus(dev->bus, &inverted_region, res);
312 
313 	/*
314 	 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
315 	 * the corresponding resource address (the physical address used by
316 	 * the CPU.  Converting that resource address back to a bus address
317 	 * should yield the original BAR value:
318 	 *
319 	 *     resource_to_bus(bus_to_resource(A)) == A
320 	 *
321 	 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
322 	 * be claimed by the device.
323 	 */
324 	if (inverted_region.start != region.start) {
325 		res->flags |= IORESOURCE_UNSET;
326 		res->start = 0;
327 		res->end = region.end - region.start;
328 		pci_info(dev, "%s: initial BAR value %#010llx invalid\n",
329 			 res_name, (unsigned long long)region.start);
330 	}
331 
332 	goto out;
333 
334 
335 fail:
336 	res->flags = 0;
337 out:
338 	if (res->flags)
339 		pci_info(dev, "%s %pR\n", res_name, res);
340 
341 	return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
342 }
343 
pci_read_bases(struct pci_dev * dev,unsigned int howmany,int rom)344 static __always_inline void pci_read_bases(struct pci_dev *dev,
345 					   unsigned int howmany, int rom)
346 {
347 	u32 rombar, stdbars[PCI_STD_NUM_BARS];
348 	unsigned int pos, reg;
349 	u16 orig_cmd;
350 
351 	BUILD_BUG_ON(statically_true(howmany > PCI_STD_NUM_BARS));
352 
353 	if (dev->non_compliant_bars)
354 		return;
355 
356 	/* Per PCIe r4.0, sec 9.3.4.1.11, the VF BARs are all RO Zero */
357 	if (dev->is_virtfn)
358 		return;
359 
360 	/* No printks while decoding is disabled! */
361 	if (!dev->mmio_always_on) {
362 		pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
363 		if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
364 			pci_write_config_word(dev, PCI_COMMAND,
365 				orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
366 		}
367 	}
368 
369 	__pci_size_stdbars(dev, howmany, PCI_BASE_ADDRESS_0, stdbars);
370 	if (rom)
371 		__pci_size_rom(dev, rom, &rombar);
372 
373 	if (!dev->mmio_always_on &&
374 	    (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
375 		pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
376 
377 	for (pos = 0; pos < howmany; pos++) {
378 		struct resource *res = &dev->resource[pos];
379 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
380 		pos += __pci_read_base(dev, pci_bar_unknown,
381 				       res, reg, &stdbars[pos]);
382 	}
383 
384 	if (rom) {
385 		struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
386 		dev->rom_base_reg = rom;
387 		res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
388 				IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
389 		__pci_read_base(dev, pci_bar_mem32, res, rom, &rombar);
390 	}
391 }
392 
pci_read_bridge_io(struct pci_dev * dev,struct resource * res,bool log)393 static void pci_read_bridge_io(struct pci_dev *dev, struct resource *res,
394 			       bool log)
395 {
396 	u8 io_base_lo, io_limit_lo;
397 	unsigned long io_mask, io_granularity, base, limit;
398 	struct pci_bus_region region;
399 
400 	io_mask = PCI_IO_RANGE_MASK;
401 	io_granularity = 0x1000;
402 	if (dev->io_window_1k) {
403 		/* Support 1K I/O space granularity */
404 		io_mask = PCI_IO_1K_RANGE_MASK;
405 		io_granularity = 0x400;
406 	}
407 
408 	pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
409 	pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
410 	base = (io_base_lo & io_mask) << 8;
411 	limit = (io_limit_lo & io_mask) << 8;
412 
413 	if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
414 		u16 io_base_hi, io_limit_hi;
415 
416 		pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
417 		pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
418 		base |= ((unsigned long) io_base_hi << 16);
419 		limit |= ((unsigned long) io_limit_hi << 16);
420 	}
421 
422 	if (base <= limit) {
423 		res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
424 		region.start = base;
425 		region.end = limit + io_granularity - 1;
426 		pcibios_bus_to_resource(dev->bus, res, &region);
427 		if (log)
428 			pci_info(dev, "  bridge window %pR\n", res);
429 	}
430 }
431 
pci_read_bridge_mmio(struct pci_dev * dev,struct resource * res,bool log)432 static void pci_read_bridge_mmio(struct pci_dev *dev, struct resource *res,
433 				 bool log)
434 {
435 	u16 mem_base_lo, mem_limit_lo;
436 	unsigned long base, limit;
437 	struct pci_bus_region region;
438 
439 	pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
440 	pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
441 	base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
442 	limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
443 	if (base <= limit) {
444 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
445 		region.start = base;
446 		region.end = limit + 0xfffff;
447 		pcibios_bus_to_resource(dev->bus, res, &region);
448 		if (log)
449 			pci_info(dev, "  bridge window %pR\n", res);
450 	}
451 }
452 
pci_read_bridge_mmio_pref(struct pci_dev * dev,struct resource * res,bool log)453 static void pci_read_bridge_mmio_pref(struct pci_dev *dev, struct resource *res,
454 				      bool log)
455 {
456 	u16 mem_base_lo, mem_limit_lo;
457 	u64 base64, limit64;
458 	pci_bus_addr_t base, limit;
459 	struct pci_bus_region region;
460 
461 	pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
462 	pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
463 	base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
464 	limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
465 
466 	if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
467 		u32 mem_base_hi, mem_limit_hi;
468 
469 		pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
470 		pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
471 
472 		/*
473 		 * Some bridges set the base > limit by default, and some
474 		 * (broken) BIOSes do not initialize them.  If we find
475 		 * this, just assume they are not being used.
476 		 */
477 		if (mem_base_hi <= mem_limit_hi) {
478 			base64 |= (u64) mem_base_hi << 32;
479 			limit64 |= (u64) mem_limit_hi << 32;
480 		}
481 	}
482 
483 	base = (pci_bus_addr_t) base64;
484 	limit = (pci_bus_addr_t) limit64;
485 
486 	if (base != base64) {
487 		pci_err(dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
488 			(unsigned long long) base64);
489 		return;
490 	}
491 
492 	if (base <= limit) {
493 		res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
494 					 IORESOURCE_MEM | IORESOURCE_PREFETCH;
495 		if (res->flags & PCI_PREF_RANGE_TYPE_64)
496 			res->flags |= IORESOURCE_MEM_64;
497 		region.start = base;
498 		region.end = limit + 0xfffff;
499 		pcibios_bus_to_resource(dev->bus, res, &region);
500 		if (log)
501 			pci_info(dev, "  bridge window %pR\n", res);
502 	}
503 }
504 
pci_read_bridge_windows(struct pci_dev * bridge)505 static void pci_read_bridge_windows(struct pci_dev *bridge)
506 {
507 	u32 buses;
508 	u16 io;
509 	u32 pmem, tmp;
510 	struct resource res;
511 
512 	pci_read_config_dword(bridge, PCI_PRIMARY_BUS, &buses);
513 	res.flags = IORESOURCE_BUS;
514 	res.start = (buses >> 8) & 0xff;
515 	res.end = (buses >> 16) & 0xff;
516 	pci_info(bridge, "PCI bridge to %pR%s\n", &res,
517 		 bridge->transparent ? " (subtractive decode)" : "");
518 
519 	pci_read_config_word(bridge, PCI_IO_BASE, &io);
520 	if (!io) {
521 		pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0);
522 		pci_read_config_word(bridge, PCI_IO_BASE, &io);
523 		pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
524 	}
525 	if (io) {
526 		bridge->io_window = 1;
527 		pci_read_bridge_io(bridge, &res, true);
528 	}
529 
530 	pci_read_bridge_mmio(bridge, &res, true);
531 
532 	/*
533 	 * DECchip 21050 pass 2 errata: the bridge may miss an address
534 	 * disconnect boundary by one PCI data phase.  Workaround: do not
535 	 * use prefetching on this device.
536 	 */
537 	if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
538 		return;
539 
540 	pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
541 	if (!pmem) {
542 		pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
543 					       0xffe0fff0);
544 		pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
545 		pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
546 	}
547 	if (!pmem)
548 		return;
549 
550 	bridge->pref_window = 1;
551 
552 	if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
553 
554 		/*
555 		 * Bridge claims to have a 64-bit prefetchable memory
556 		 * window; verify that the upper bits are actually
557 		 * writable.
558 		 */
559 		pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem);
560 		pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
561 				       0xffffffff);
562 		pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
563 		pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem);
564 		if (tmp)
565 			bridge->pref_64_window = 1;
566 	}
567 
568 	pci_read_bridge_mmio_pref(bridge, &res, true);
569 }
570 
pci_read_bridge_bases(struct pci_bus * child)571 void pci_read_bridge_bases(struct pci_bus *child)
572 {
573 	struct pci_dev *dev = child->self;
574 	struct resource *res;
575 	int i;
576 
577 	if (pci_is_root_bus(child))	/* It's a host bus, nothing to read */
578 		return;
579 
580 	pci_info(dev, "PCI bridge to %pR%s\n",
581 		 &child->busn_res,
582 		 dev->transparent ? " (subtractive decode)" : "");
583 
584 	pci_bus_remove_resources(child);
585 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
586 		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
587 
588 	pci_read_bridge_io(child->self, child->resource[0], false);
589 	pci_read_bridge_mmio(child->self, child->resource[1], false);
590 	pci_read_bridge_mmio_pref(child->self, child->resource[2], false);
591 
592 	if (!dev->transparent)
593 		return;
594 
595 	pci_bus_for_each_resource(child->parent, res) {
596 		if (!res || !res->flags)
597 			continue;
598 
599 		pci_bus_add_resource(child, res);
600 		pci_info(dev, "  bridge window %pR (subtractive decode)\n", res);
601 	}
602 }
603 
pci_alloc_bus(struct pci_bus * parent)604 static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
605 {
606 	struct pci_bus *b;
607 
608 	b = kzalloc(sizeof(*b), GFP_KERNEL);
609 	if (!b)
610 		return NULL;
611 
612 	INIT_LIST_HEAD(&b->node);
613 	INIT_LIST_HEAD(&b->children);
614 	INIT_LIST_HEAD(&b->devices);
615 	INIT_LIST_HEAD(&b->slots);
616 	INIT_LIST_HEAD(&b->resources);
617 	b->max_bus_speed = PCI_SPEED_UNKNOWN;
618 	b->cur_bus_speed = PCI_SPEED_UNKNOWN;
619 #ifdef CONFIG_PCI_DOMAINS_GENERIC
620 	if (parent)
621 		b->domain_nr = parent->domain_nr;
622 #endif
623 	return b;
624 }
625 
pci_release_host_bridge_dev(struct device * dev)626 static void pci_release_host_bridge_dev(struct device *dev)
627 {
628 	struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
629 
630 	if (bridge->release_fn)
631 		bridge->release_fn(bridge);
632 
633 	pci_free_resource_list(&bridge->windows);
634 	pci_free_resource_list(&bridge->dma_ranges);
635 	kfree(bridge);
636 }
637 
pci_init_host_bridge(struct pci_host_bridge * bridge)638 static void pci_init_host_bridge(struct pci_host_bridge *bridge)
639 {
640 	INIT_LIST_HEAD(&bridge->windows);
641 	INIT_LIST_HEAD(&bridge->dma_ranges);
642 
643 	/*
644 	 * We assume we can manage these PCIe features.  Some systems may
645 	 * reserve these for use by the platform itself, e.g., an ACPI BIOS
646 	 * may implement its own AER handling and use _OSC to prevent the
647 	 * OS from interfering.
648 	 */
649 	bridge->native_aer = 1;
650 	bridge->native_pcie_hotplug = 1;
651 	bridge->native_shpc_hotplug = 1;
652 	bridge->native_pme = 1;
653 	bridge->native_ltr = 1;
654 	bridge->native_dpc = 1;
655 	bridge->domain_nr = PCI_DOMAIN_NR_NOT_SET;
656 	bridge->native_cxl_error = 1;
657 
658 	device_initialize(&bridge->dev);
659 }
660 
pci_alloc_host_bridge(size_t priv)661 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
662 {
663 	struct pci_host_bridge *bridge;
664 
665 	bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
666 	if (!bridge)
667 		return NULL;
668 
669 	pci_init_host_bridge(bridge);
670 	bridge->dev.release = pci_release_host_bridge_dev;
671 
672 	return bridge;
673 }
674 EXPORT_SYMBOL(pci_alloc_host_bridge);
675 
devm_pci_alloc_host_bridge_release(void * data)676 static void devm_pci_alloc_host_bridge_release(void *data)
677 {
678 	pci_free_host_bridge(data);
679 }
680 
devm_pci_alloc_host_bridge(struct device * dev,size_t priv)681 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
682 						   size_t priv)
683 {
684 	int ret;
685 	struct pci_host_bridge *bridge;
686 
687 	bridge = pci_alloc_host_bridge(priv);
688 	if (!bridge)
689 		return NULL;
690 
691 	bridge->dev.parent = dev;
692 
693 	ret = devm_add_action_or_reset(dev, devm_pci_alloc_host_bridge_release,
694 				       bridge);
695 	if (ret)
696 		return NULL;
697 
698 	ret = devm_of_pci_bridge_init(dev, bridge);
699 	if (ret)
700 		return NULL;
701 
702 	return bridge;
703 }
704 EXPORT_SYMBOL(devm_pci_alloc_host_bridge);
705 
pci_free_host_bridge(struct pci_host_bridge * bridge)706 void pci_free_host_bridge(struct pci_host_bridge *bridge)
707 {
708 	put_device(&bridge->dev);
709 }
710 EXPORT_SYMBOL(pci_free_host_bridge);
711 
712 /* Indexed by PCI_X_SSTATUS_FREQ (secondary bus mode and frequency) */
713 static const unsigned char pcix_bus_speed[] = {
714 	PCI_SPEED_UNKNOWN,		/* 0 */
715 	PCI_SPEED_66MHz_PCIX,		/* 1 */
716 	PCI_SPEED_100MHz_PCIX,		/* 2 */
717 	PCI_SPEED_133MHz_PCIX,		/* 3 */
718 	PCI_SPEED_UNKNOWN,		/* 4 */
719 	PCI_SPEED_66MHz_PCIX_ECC,	/* 5 */
720 	PCI_SPEED_100MHz_PCIX_ECC,	/* 6 */
721 	PCI_SPEED_133MHz_PCIX_ECC,	/* 7 */
722 	PCI_SPEED_UNKNOWN,		/* 8 */
723 	PCI_SPEED_66MHz_PCIX_266,	/* 9 */
724 	PCI_SPEED_100MHz_PCIX_266,	/* A */
725 	PCI_SPEED_133MHz_PCIX_266,	/* B */
726 	PCI_SPEED_UNKNOWN,		/* C */
727 	PCI_SPEED_66MHz_PCIX_533,	/* D */
728 	PCI_SPEED_100MHz_PCIX_533,	/* E */
729 	PCI_SPEED_133MHz_PCIX_533	/* F */
730 };
731 
732 /* Indexed by PCI_EXP_LNKCAP_SLS, PCI_EXP_LNKSTA_CLS */
733 const unsigned char pcie_link_speed[] = {
734 	PCI_SPEED_UNKNOWN,		/* 0 */
735 	PCIE_SPEED_2_5GT,		/* 1 */
736 	PCIE_SPEED_5_0GT,		/* 2 */
737 	PCIE_SPEED_8_0GT,		/* 3 */
738 	PCIE_SPEED_16_0GT,		/* 4 */
739 	PCIE_SPEED_32_0GT,		/* 5 */
740 	PCIE_SPEED_64_0GT,		/* 6 */
741 	PCI_SPEED_UNKNOWN,		/* 7 */
742 	PCI_SPEED_UNKNOWN,		/* 8 */
743 	PCI_SPEED_UNKNOWN,		/* 9 */
744 	PCI_SPEED_UNKNOWN,		/* A */
745 	PCI_SPEED_UNKNOWN,		/* B */
746 	PCI_SPEED_UNKNOWN,		/* C */
747 	PCI_SPEED_UNKNOWN,		/* D */
748 	PCI_SPEED_UNKNOWN,		/* E */
749 	PCI_SPEED_UNKNOWN		/* F */
750 };
751 EXPORT_SYMBOL_GPL(pcie_link_speed);
752 
pci_speed_string(enum pci_bus_speed speed)753 const char *pci_speed_string(enum pci_bus_speed speed)
754 {
755 	/* Indexed by the pci_bus_speed enum */
756 	static const char *speed_strings[] = {
757 	    "33 MHz PCI",		/* 0x00 */
758 	    "66 MHz PCI",		/* 0x01 */
759 	    "66 MHz PCI-X",		/* 0x02 */
760 	    "100 MHz PCI-X",		/* 0x03 */
761 	    "133 MHz PCI-X",		/* 0x04 */
762 	    NULL,			/* 0x05 */
763 	    NULL,			/* 0x06 */
764 	    NULL,			/* 0x07 */
765 	    NULL,			/* 0x08 */
766 	    "66 MHz PCI-X 266",		/* 0x09 */
767 	    "100 MHz PCI-X 266",	/* 0x0a */
768 	    "133 MHz PCI-X 266",	/* 0x0b */
769 	    "Unknown AGP",		/* 0x0c */
770 	    "1x AGP",			/* 0x0d */
771 	    "2x AGP",			/* 0x0e */
772 	    "4x AGP",			/* 0x0f */
773 	    "8x AGP",			/* 0x10 */
774 	    "66 MHz PCI-X 533",		/* 0x11 */
775 	    "100 MHz PCI-X 533",	/* 0x12 */
776 	    "133 MHz PCI-X 533",	/* 0x13 */
777 	    "2.5 GT/s PCIe",		/* 0x14 */
778 	    "5.0 GT/s PCIe",		/* 0x15 */
779 	    "8.0 GT/s PCIe",		/* 0x16 */
780 	    "16.0 GT/s PCIe",		/* 0x17 */
781 	    "32.0 GT/s PCIe",		/* 0x18 */
782 	    "64.0 GT/s PCIe",		/* 0x19 */
783 	};
784 
785 	if (speed < ARRAY_SIZE(speed_strings))
786 		return speed_strings[speed];
787 	return "Unknown";
788 }
789 EXPORT_SYMBOL_GPL(pci_speed_string);
790 
pcie_update_link_speed(struct pci_bus * bus)791 void pcie_update_link_speed(struct pci_bus *bus)
792 {
793 	struct pci_dev *bridge = bus->self;
794 	u16 linksta, linksta2;
795 
796 	pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
797 	pcie_capability_read_word(bridge, PCI_EXP_LNKSTA2, &linksta2);
798 	__pcie_update_link_speed(bus, linksta, linksta2);
799 }
800 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
801 
802 static unsigned char agp_speeds[] = {
803 	AGP_UNKNOWN,
804 	AGP_1X,
805 	AGP_2X,
806 	AGP_4X,
807 	AGP_8X
808 };
809 
agp_speed(int agp3,int agpstat)810 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
811 {
812 	int index = 0;
813 
814 	if (agpstat & 4)
815 		index = 3;
816 	else if (agpstat & 2)
817 		index = 2;
818 	else if (agpstat & 1)
819 		index = 1;
820 	else
821 		goto out;
822 
823 	if (agp3) {
824 		index += 2;
825 		if (index == 5)
826 			index = 0;
827 	}
828 
829  out:
830 	return agp_speeds[index];
831 }
832 
pci_set_bus_speed(struct pci_bus * bus)833 static void pci_set_bus_speed(struct pci_bus *bus)
834 {
835 	struct pci_dev *bridge = bus->self;
836 	int pos;
837 
838 	pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
839 	if (!pos)
840 		pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
841 	if (pos) {
842 		u32 agpstat, agpcmd;
843 
844 		pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
845 		bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
846 
847 		pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
848 		bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
849 	}
850 
851 	pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
852 	if (pos) {
853 		u16 status;
854 		enum pci_bus_speed max;
855 
856 		pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
857 				     &status);
858 
859 		if (status & PCI_X_SSTATUS_533MHZ) {
860 			max = PCI_SPEED_133MHz_PCIX_533;
861 		} else if (status & PCI_X_SSTATUS_266MHZ) {
862 			max = PCI_SPEED_133MHz_PCIX_266;
863 		} else if (status & PCI_X_SSTATUS_133MHZ) {
864 			if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
865 				max = PCI_SPEED_133MHz_PCIX_ECC;
866 			else
867 				max = PCI_SPEED_133MHz_PCIX;
868 		} else {
869 			max = PCI_SPEED_66MHz_PCIX;
870 		}
871 
872 		bus->max_bus_speed = max;
873 		bus->cur_bus_speed =
874 			pcix_bus_speed[FIELD_GET(PCI_X_SSTATUS_FREQ, status)];
875 
876 		return;
877 	}
878 
879 	if (pci_is_pcie(bridge)) {
880 		u32 linkcap;
881 
882 		pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
883 		bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
884 
885 		pcie_update_link_speed(bus);
886 	}
887 }
888 
pci_host_bridge_msi_domain(struct pci_bus * bus)889 static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
890 {
891 	struct irq_domain *d;
892 
893 	/* If the host bridge driver sets a MSI domain of the bridge, use it */
894 	d = dev_get_msi_domain(bus->bridge);
895 
896 	/*
897 	 * Any firmware interface that can resolve the msi_domain
898 	 * should be called from here.
899 	 */
900 	if (!d)
901 		d = pci_host_bridge_of_msi_domain(bus);
902 	if (!d)
903 		d = pci_host_bridge_acpi_msi_domain(bus);
904 
905 	/*
906 	 * If no IRQ domain was found via the OF tree, try looking it up
907 	 * directly through the fwnode_handle.
908 	 */
909 	if (!d) {
910 		struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus);
911 
912 		if (fwnode)
913 			d = irq_find_matching_fwnode(fwnode,
914 						     DOMAIN_BUS_PCI_MSI);
915 	}
916 
917 	return d;
918 }
919 
pci_set_bus_msi_domain(struct pci_bus * bus)920 static void pci_set_bus_msi_domain(struct pci_bus *bus)
921 {
922 	struct irq_domain *d;
923 	struct pci_bus *b;
924 
925 	/*
926 	 * The bus can be a root bus, a subordinate bus, or a virtual bus
927 	 * created by an SR-IOV device.  Walk up to the first bridge device
928 	 * found or derive the domain from the host bridge.
929 	 */
930 	for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
931 		if (b->self)
932 			d = dev_get_msi_domain(&b->self->dev);
933 	}
934 
935 	if (!d)
936 		d = pci_host_bridge_msi_domain(b);
937 
938 	dev_set_msi_domain(&bus->dev, d);
939 }
940 
pci_preserve_config(struct pci_host_bridge * host_bridge)941 static bool pci_preserve_config(struct pci_host_bridge *host_bridge)
942 {
943 	if (pci_acpi_preserve_config(host_bridge))
944 		return true;
945 
946 	if (host_bridge->dev.parent && host_bridge->dev.parent->of_node)
947 		return of_pci_preserve_config(host_bridge->dev.parent->of_node);
948 
949 	return false;
950 }
951 
pci_register_host_bridge(struct pci_host_bridge * bridge)952 static int pci_register_host_bridge(struct pci_host_bridge *bridge)
953 {
954 	struct device *parent = bridge->dev.parent;
955 	struct resource_entry *window, *next, *n;
956 	struct pci_bus *bus, *b;
957 	resource_size_t offset, next_offset;
958 	LIST_HEAD(resources);
959 	struct resource *res, *next_res;
960 	bool bus_registered = false;
961 	char addr[64], *fmt;
962 	const char *name;
963 	int err;
964 
965 	bus = pci_alloc_bus(NULL);
966 	if (!bus)
967 		return -ENOMEM;
968 
969 	bridge->bus = bus;
970 
971 	bus->sysdata = bridge->sysdata;
972 	bus->ops = bridge->ops;
973 	bus->number = bus->busn_res.start = bridge->busnr;
974 #ifdef CONFIG_PCI_DOMAINS_GENERIC
975 	if (bridge->domain_nr == PCI_DOMAIN_NR_NOT_SET)
976 		bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
977 	else
978 		bus->domain_nr = bridge->domain_nr;
979 	if (bus->domain_nr < 0) {
980 		err = bus->domain_nr;
981 		goto free;
982 	}
983 #endif
984 
985 	b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
986 	if (b) {
987 		/* Ignore it if we already got here via a different bridge */
988 		dev_dbg(&b->dev, "bus already known\n");
989 		err = -EEXIST;
990 		goto free;
991 	}
992 
993 	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus),
994 		     bridge->busnr);
995 
996 	err = pcibios_root_bridge_prepare(bridge);
997 	if (err)
998 		goto free;
999 
1000 	/* Temporarily move resources off the list */
1001 	list_splice_init(&bridge->windows, &resources);
1002 	err = device_add(&bridge->dev);
1003 	if (err)
1004 		goto free;
1005 
1006 	bus->bridge = get_device(&bridge->dev);
1007 	device_enable_async_suspend(bus->bridge);
1008 	pci_set_bus_of_node(bus);
1009 	pci_set_bus_msi_domain(bus);
1010 	if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev) &&
1011 	    !pci_host_of_has_msi_map(parent))
1012 		bus->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
1013 
1014 	if (!parent)
1015 		set_dev_node(bus->bridge, pcibus_to_node(bus));
1016 
1017 	bus->dev.class = &pcibus_class;
1018 	bus->dev.parent = bus->bridge;
1019 
1020 	dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number);
1021 	name = dev_name(&bus->dev);
1022 
1023 	err = device_register(&bus->dev);
1024 	bus_registered = true;
1025 	if (err)
1026 		goto unregister;
1027 
1028 	pcibios_add_bus(bus);
1029 
1030 	if (bus->ops->add_bus) {
1031 		err = bus->ops->add_bus(bus);
1032 		if (WARN_ON(err < 0))
1033 			dev_err(&bus->dev, "failed to add bus: %d\n", err);
1034 	}
1035 
1036 	/* Create legacy_io and legacy_mem files for this bus */
1037 	pci_create_legacy_files(bus);
1038 
1039 	if (parent)
1040 		dev_info(parent, "PCI host bridge to bus %s\n", name);
1041 	else
1042 		pr_info("PCI host bridge to bus %s\n", name);
1043 
1044 	if (nr_node_ids > 1 && pcibus_to_node(bus) == NUMA_NO_NODE)
1045 		dev_warn(&bus->dev, "Unknown NUMA node; performance will be reduced\n");
1046 
1047 	/* Check if the boot configuration by FW needs to be preserved */
1048 	bridge->preserve_config = pci_preserve_config(bridge);
1049 
1050 	/* Coalesce contiguous windows */
1051 	resource_list_for_each_entry_safe(window, n, &resources) {
1052 		if (list_is_last(&window->node, &resources))
1053 			break;
1054 
1055 		next = list_next_entry(window, node);
1056 		offset = window->offset;
1057 		res = window->res;
1058 		next_offset = next->offset;
1059 		next_res = next->res;
1060 
1061 		if (res->flags != next_res->flags || offset != next_offset)
1062 			continue;
1063 
1064 		if (res->end + 1 == next_res->start) {
1065 			next_res->start = res->start;
1066 			res->flags = res->start = res->end = 0;
1067 		}
1068 	}
1069 
1070 	/* Add initial resources to the bus */
1071 	resource_list_for_each_entry_safe(window, n, &resources) {
1072 		offset = window->offset;
1073 		res = window->res;
1074 		if (!res->flags && !res->start && !res->end) {
1075 			release_resource(res);
1076 			resource_list_destroy_entry(window);
1077 			continue;
1078 		}
1079 
1080 		list_move_tail(&window->node, &bridge->windows);
1081 
1082 		if (res->flags & IORESOURCE_BUS)
1083 			pci_bus_insert_busn_res(bus, bus->number, res->end);
1084 		else
1085 			pci_bus_add_resource(bus, res);
1086 
1087 		if (offset) {
1088 			if (resource_type(res) == IORESOURCE_IO)
1089 				fmt = " (bus address [%#06llx-%#06llx])";
1090 			else
1091 				fmt = " (bus address [%#010llx-%#010llx])";
1092 
1093 			snprintf(addr, sizeof(addr), fmt,
1094 				 (unsigned long long)(res->start - offset),
1095 				 (unsigned long long)(res->end - offset));
1096 		} else
1097 			addr[0] = '\0';
1098 
1099 		dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
1100 	}
1101 
1102 	of_pci_make_host_bridge_node(bridge);
1103 
1104 	down_write(&pci_bus_sem);
1105 	list_add_tail(&bus->node, &pci_root_buses);
1106 	up_write(&pci_bus_sem);
1107 
1108 	return 0;
1109 
1110 unregister:
1111 	put_device(&bridge->dev);
1112 	device_del(&bridge->dev);
1113 free:
1114 #ifdef CONFIG_PCI_DOMAINS_GENERIC
1115 	pci_bus_release_domain_nr(parent, bus->domain_nr);
1116 #endif
1117 	if (bus_registered)
1118 		put_device(&bus->dev);
1119 	else
1120 		kfree(bus);
1121 
1122 	return err;
1123 }
1124 
pci_bridge_child_ext_cfg_accessible(struct pci_dev * bridge)1125 static bool pci_bridge_child_ext_cfg_accessible(struct pci_dev *bridge)
1126 {
1127 	int pos;
1128 	u32 status;
1129 
1130 	/*
1131 	 * If extended config space isn't accessible on a bridge's primary
1132 	 * bus, we certainly can't access it on the secondary bus.
1133 	 */
1134 	if (bridge->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG)
1135 		return false;
1136 
1137 	/*
1138 	 * PCIe Root Ports and switch ports are PCIe on both sides, so if
1139 	 * extended config space is accessible on the primary, it's also
1140 	 * accessible on the secondary.
1141 	 */
1142 	if (pci_is_pcie(bridge) &&
1143 	    (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT ||
1144 	     pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM ||
1145 	     pci_pcie_type(bridge) == PCI_EXP_TYPE_DOWNSTREAM))
1146 		return true;
1147 
1148 	/*
1149 	 * For the other bridge types:
1150 	 *   - PCI-to-PCI bridges
1151 	 *   - PCIe-to-PCI/PCI-X forward bridges
1152 	 *   - PCI/PCI-X-to-PCIe reverse bridges
1153 	 * extended config space on the secondary side is only accessible
1154 	 * if the bridge supports PCI-X Mode 2.
1155 	 */
1156 	pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
1157 	if (!pos)
1158 		return false;
1159 
1160 	pci_read_config_dword(bridge, pos + PCI_X_STATUS, &status);
1161 	return status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ);
1162 }
1163 
pci_alloc_child_bus(struct pci_bus * parent,struct pci_dev * bridge,int busnr)1164 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
1165 					   struct pci_dev *bridge, int busnr)
1166 {
1167 	struct pci_bus *child;
1168 	struct pci_host_bridge *host;
1169 	int i;
1170 	int ret;
1171 
1172 	/* Allocate a new bus and inherit stuff from the parent */
1173 	child = pci_alloc_bus(parent);
1174 	if (!child)
1175 		return NULL;
1176 
1177 	child->parent = parent;
1178 	child->sysdata = parent->sysdata;
1179 	child->bus_flags = parent->bus_flags;
1180 
1181 	host = pci_find_host_bridge(parent);
1182 	if (host->child_ops)
1183 		child->ops = host->child_ops;
1184 	else
1185 		child->ops = parent->ops;
1186 
1187 	/*
1188 	 * Initialize some portions of the bus device, but don't register
1189 	 * it now as the parent is not properly set up yet.
1190 	 */
1191 	child->dev.class = &pcibus_class;
1192 	dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
1193 
1194 	/* Set up the primary, secondary and subordinate bus numbers */
1195 	child->number = child->busn_res.start = busnr;
1196 	child->primary = parent->busn_res.start;
1197 	child->busn_res.end = 0xff;
1198 
1199 	if (!bridge) {
1200 		child->dev.parent = parent->bridge;
1201 		goto add_dev;
1202 	}
1203 
1204 	child->self = bridge;
1205 	child->bridge = get_device(&bridge->dev);
1206 	child->dev.parent = child->bridge;
1207 	pci_set_bus_of_node(child);
1208 	pci_set_bus_speed(child);
1209 
1210 	/*
1211 	 * Check whether extended config space is accessible on the child
1212 	 * bus.  Note that we currently assume it is always accessible on
1213 	 * the root bus.
1214 	 */
1215 	if (!pci_bridge_child_ext_cfg_accessible(bridge)) {
1216 		child->bus_flags |= PCI_BUS_FLAGS_NO_EXTCFG;
1217 		pci_info(child, "extended config space not accessible\n");
1218 	}
1219 
1220 	/* Set up default resource pointers and names */
1221 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
1222 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
1223 		child->resource[i]->name = child->name;
1224 	}
1225 	bridge->subordinate = child;
1226 
1227 add_dev:
1228 	pci_set_bus_msi_domain(child);
1229 	ret = device_register(&child->dev);
1230 	if (WARN_ON(ret < 0)) {
1231 		put_device(&child->dev);
1232 		return NULL;
1233 	}
1234 
1235 	pcibios_add_bus(child);
1236 
1237 	if (child->ops->add_bus) {
1238 		ret = child->ops->add_bus(child);
1239 		if (WARN_ON(ret < 0))
1240 			dev_err(&child->dev, "failed to add bus: %d\n", ret);
1241 	}
1242 
1243 	/* Create legacy_io and legacy_mem files for this bus */
1244 	pci_create_legacy_files(child);
1245 
1246 	return child;
1247 }
1248 
pci_add_new_bus(struct pci_bus * parent,struct pci_dev * dev,int busnr)1249 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
1250 				int busnr)
1251 {
1252 	struct pci_bus *child;
1253 
1254 	child = pci_alloc_child_bus(parent, dev, busnr);
1255 	if (child) {
1256 		down_write(&pci_bus_sem);
1257 		list_add_tail(&child->node, &parent->children);
1258 		up_write(&pci_bus_sem);
1259 	}
1260 	return child;
1261 }
1262 EXPORT_SYMBOL(pci_add_new_bus);
1263 
pci_enable_rrs_sv(struct pci_dev * pdev)1264 static void pci_enable_rrs_sv(struct pci_dev *pdev)
1265 {
1266 	u16 root_cap = 0;
1267 
1268 	/* Enable Configuration RRS Software Visibility if supported */
1269 	pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
1270 	if (root_cap & PCI_EXP_RTCAP_RRS_SV) {
1271 		pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
1272 					 PCI_EXP_RTCTL_RRS_SVE);
1273 		pdev->config_rrs_sv = 1;
1274 	}
1275 }
1276 
1277 static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
1278 					      unsigned int available_buses);
1279 /**
1280  * pci_ea_fixed_busnrs() - Read fixed Secondary and Subordinate bus
1281  * numbers from EA capability.
1282  * @dev: Bridge
1283  * @sec: updated with secondary bus number from EA
1284  * @sub: updated with subordinate bus number from EA
1285  *
1286  * If @dev is a bridge with EA capability that specifies valid secondary
1287  * and subordinate bus numbers, return true with the bus numbers in @sec
1288  * and @sub.  Otherwise return false.
1289  */
pci_ea_fixed_busnrs(struct pci_dev * dev,u8 * sec,u8 * sub)1290 static bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub)
1291 {
1292 	int ea, offset;
1293 	u32 dw;
1294 	u8 ea_sec, ea_sub;
1295 
1296 	if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
1297 		return false;
1298 
1299 	/* find PCI EA capability in list */
1300 	ea = pci_find_capability(dev, PCI_CAP_ID_EA);
1301 	if (!ea)
1302 		return false;
1303 
1304 	offset = ea + PCI_EA_FIRST_ENT;
1305 	pci_read_config_dword(dev, offset, &dw);
1306 	ea_sec = FIELD_GET(PCI_EA_SEC_BUS_MASK, dw);
1307 	ea_sub = FIELD_GET(PCI_EA_SUB_BUS_MASK, dw);
1308 	if (ea_sec  == 0 || ea_sub < ea_sec)
1309 		return false;
1310 
1311 	*sec = ea_sec;
1312 	*sub = ea_sub;
1313 	return true;
1314 }
1315 
1316 /*
1317  * pci_scan_bridge_extend() - Scan buses behind a bridge
1318  * @bus: Parent bus the bridge is on
1319  * @dev: Bridge itself
1320  * @max: Starting subordinate number of buses behind this bridge
1321  * @available_buses: Total number of buses available for this bridge and
1322  *		     the devices below. After the minimal bus space has
1323  *		     been allocated the remaining buses will be
1324  *		     distributed equally between hotplug-capable bridges.
1325  * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
1326  *        that need to be reconfigured.
1327  *
1328  * If it's a bridge, configure it and scan the bus behind it.
1329  * For CardBus bridges, we don't scan behind as the devices will
1330  * be handled by the bridge driver itself.
1331  *
1332  * We need to process bridges in two passes -- first we scan those
1333  * already configured by the BIOS and after we are done with all of
1334  * them, we proceed to assigning numbers to the remaining buses in
1335  * order to avoid overlaps between old and new bus numbers.
1336  *
1337  * Return: New subordinate number covering all buses behind this bridge.
1338  */
pci_scan_bridge_extend(struct pci_bus * bus,struct pci_dev * dev,int max,unsigned int available_buses,int pass)1339 static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
1340 				  int max, unsigned int available_buses,
1341 				  int pass)
1342 {
1343 	struct pci_bus *child;
1344 	int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
1345 	u32 buses, i, j = 0;
1346 	u16 bctl;
1347 	u8 primary, secondary, subordinate;
1348 	int broken = 0;
1349 	bool fixed_buses;
1350 	u8 fixed_sec, fixed_sub;
1351 	int next_busnr;
1352 
1353 	/*
1354 	 * Make sure the bridge is powered on to be able to access config
1355 	 * space of devices below it.
1356 	 */
1357 	pm_runtime_get_sync(&dev->dev);
1358 
1359 	pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
1360 	primary = buses & 0xFF;
1361 	secondary = (buses >> 8) & 0xFF;
1362 	subordinate = (buses >> 16) & 0xFF;
1363 
1364 	pci_dbg(dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
1365 		secondary, subordinate, pass);
1366 
1367 	if (!primary && (primary != bus->number) && secondary && subordinate) {
1368 		pci_warn(dev, "Primary bus is hard wired to 0\n");
1369 		primary = bus->number;
1370 	}
1371 
1372 	/* Check if setup is sensible at all */
1373 	if (!pass &&
1374 	    (primary != bus->number || secondary <= bus->number ||
1375 	     secondary > subordinate)) {
1376 		pci_info(dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
1377 			 secondary, subordinate);
1378 		broken = 1;
1379 	}
1380 
1381 	/*
1382 	 * Disable Master-Abort Mode during probing to avoid reporting of
1383 	 * bus errors in some architectures.
1384 	 */
1385 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
1386 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
1387 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
1388 
1389 	if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
1390 	    !is_cardbus && !broken) {
1391 		unsigned int cmax, buses;
1392 
1393 		/*
1394 		 * Bus already configured by firmware, process it in the
1395 		 * first pass and just note the configuration.
1396 		 */
1397 		if (pass)
1398 			goto out;
1399 
1400 		/*
1401 		 * The bus might already exist for two reasons: Either we
1402 		 * are rescanning the bus or the bus is reachable through
1403 		 * more than one bridge. The second case can happen with
1404 		 * the i450NX chipset.
1405 		 */
1406 		child = pci_find_bus(pci_domain_nr(bus), secondary);
1407 		if (!child) {
1408 			child = pci_add_new_bus(bus, dev, secondary);
1409 			if (!child)
1410 				goto out;
1411 			child->primary = primary;
1412 			pci_bus_insert_busn_res(child, secondary, subordinate);
1413 			child->bridge_ctl = bctl;
1414 		}
1415 
1416 		buses = subordinate - secondary;
1417 		cmax = pci_scan_child_bus_extend(child, buses);
1418 		if (cmax > subordinate)
1419 			pci_warn(dev, "bridge has subordinate %02x but max busn %02x\n",
1420 				 subordinate, cmax);
1421 
1422 		/* Subordinate should equal child->busn_res.end */
1423 		if (subordinate > max)
1424 			max = subordinate;
1425 	} else {
1426 
1427 		/*
1428 		 * We need to assign a number to this bus which we always
1429 		 * do in the second pass.
1430 		 */
1431 		if (!pass) {
1432 			if (pcibios_assign_all_busses() || broken || is_cardbus)
1433 
1434 				/*
1435 				 * Temporarily disable forwarding of the
1436 				 * configuration cycles on all bridges in
1437 				 * this bus segment to avoid possible
1438 				 * conflicts in the second pass between two
1439 				 * bridges programmed with overlapping bus
1440 				 * ranges.
1441 				 */
1442 				pci_write_config_dword(dev, PCI_PRIMARY_BUS,
1443 						       buses & ~0xffffff);
1444 			goto out;
1445 		}
1446 
1447 		/* Clear errors */
1448 		pci_write_config_word(dev, PCI_STATUS, 0xffff);
1449 
1450 		/* Read bus numbers from EA Capability (if present) */
1451 		fixed_buses = pci_ea_fixed_busnrs(dev, &fixed_sec, &fixed_sub);
1452 		if (fixed_buses)
1453 			next_busnr = fixed_sec;
1454 		else
1455 			next_busnr = max + 1;
1456 
1457 		/*
1458 		 * Prevent assigning a bus number that already exists.
1459 		 * This can happen when a bridge is hot-plugged, so in this
1460 		 * case we only re-scan this bus.
1461 		 */
1462 		child = pci_find_bus(pci_domain_nr(bus), next_busnr);
1463 		if (!child) {
1464 			child = pci_add_new_bus(bus, dev, next_busnr);
1465 			if (!child)
1466 				goto out;
1467 			pci_bus_insert_busn_res(child, next_busnr,
1468 						bus->busn_res.end);
1469 		}
1470 		max++;
1471 		if (available_buses)
1472 			available_buses--;
1473 
1474 		buses = (buses & 0xff000000)
1475 		      | ((unsigned int)(child->primary)     <<  0)
1476 		      | ((unsigned int)(child->busn_res.start)   <<  8)
1477 		      | ((unsigned int)(child->busn_res.end) << 16);
1478 
1479 		/*
1480 		 * yenta.c forces a secondary latency timer of 176.
1481 		 * Copy that behaviour here.
1482 		 */
1483 		if (is_cardbus) {
1484 			buses &= ~0xff000000;
1485 			buses |= CARDBUS_LATENCY_TIMER << 24;
1486 		}
1487 
1488 		/* We need to blast all three values with a single write */
1489 		pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
1490 
1491 		if (!is_cardbus) {
1492 			child->bridge_ctl = bctl;
1493 			max = pci_scan_child_bus_extend(child, available_buses);
1494 		} else {
1495 
1496 			/*
1497 			 * For CardBus bridges, we leave 4 bus numbers as
1498 			 * cards with a PCI-to-PCI bridge can be inserted
1499 			 * later.
1500 			 */
1501 			for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
1502 				struct pci_bus *parent = bus;
1503 				if (pci_find_bus(pci_domain_nr(bus),
1504 							max+i+1))
1505 					break;
1506 				while (parent->parent) {
1507 					if ((!pcibios_assign_all_busses()) &&
1508 					    (parent->busn_res.end > max) &&
1509 					    (parent->busn_res.end <= max+i)) {
1510 						j = 1;
1511 					}
1512 					parent = parent->parent;
1513 				}
1514 				if (j) {
1515 
1516 					/*
1517 					 * Often, there are two CardBus
1518 					 * bridges -- try to leave one
1519 					 * valid bus number for each one.
1520 					 */
1521 					i /= 2;
1522 					break;
1523 				}
1524 			}
1525 			max += i;
1526 		}
1527 
1528 		/*
1529 		 * Set subordinate bus number to its real value.
1530 		 * If fixed subordinate bus number exists from EA
1531 		 * capability then use it.
1532 		 */
1533 		if (fixed_buses)
1534 			max = fixed_sub;
1535 		pci_bus_update_busn_res_end(child, max);
1536 		pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
1537 	}
1538 
1539 	sprintf(child->name,
1540 		(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
1541 		pci_domain_nr(bus), child->number);
1542 
1543 	/* Check that all devices are accessible */
1544 	while (bus->parent) {
1545 		if ((child->busn_res.end > bus->busn_res.end) ||
1546 		    (child->number > bus->busn_res.end) ||
1547 		    (child->number < bus->number) ||
1548 		    (child->busn_res.end < bus->number)) {
1549 			dev_info(&dev->dev, "devices behind bridge are unusable because %pR cannot be assigned for them\n",
1550 				 &child->busn_res);
1551 			break;
1552 		}
1553 		bus = bus->parent;
1554 	}
1555 
1556 out:
1557 	/* Clear errors in the Secondary Status Register */
1558 	pci_write_config_word(dev, PCI_SEC_STATUS, 0xffff);
1559 
1560 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
1561 
1562 	pm_runtime_put(&dev->dev);
1563 
1564 	return max;
1565 }
1566 
1567 /*
1568  * pci_scan_bridge() - Scan buses behind a bridge
1569  * @bus: Parent bus the bridge is on
1570  * @dev: Bridge itself
1571  * @max: Starting subordinate number of buses behind this bridge
1572  * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
1573  *        that need to be reconfigured.
1574  *
1575  * If it's a bridge, configure it and scan the bus behind it.
1576  * For CardBus bridges, we don't scan behind as the devices will
1577  * be handled by the bridge driver itself.
1578  *
1579  * We need to process bridges in two passes -- first we scan those
1580  * already configured by the BIOS and after we are done with all of
1581  * them, we proceed to assigning numbers to the remaining buses in
1582  * order to avoid overlaps between old and new bus numbers.
1583  *
1584  * Return: New subordinate number covering all buses behind this bridge.
1585  */
pci_scan_bridge(struct pci_bus * bus,struct pci_dev * dev,int max,int pass)1586 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
1587 {
1588 	return pci_scan_bridge_extend(bus, dev, max, 0, pass);
1589 }
1590 EXPORT_SYMBOL(pci_scan_bridge);
1591 
1592 /*
1593  * Read interrupt line and base address registers.
1594  * The architecture-dependent code can tweak these, of course.
1595  */
pci_read_irq(struct pci_dev * dev)1596 static void pci_read_irq(struct pci_dev *dev)
1597 {
1598 	unsigned char irq;
1599 
1600 	/* VFs are not allowed to use INTx, so skip the config reads */
1601 	if (dev->is_virtfn) {
1602 		dev->pin = 0;
1603 		dev->irq = 0;
1604 		return;
1605 	}
1606 
1607 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
1608 	dev->pin = irq;
1609 	if (irq)
1610 		pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1611 	dev->irq = irq;
1612 }
1613 
set_pcie_port_type(struct pci_dev * pdev)1614 void set_pcie_port_type(struct pci_dev *pdev)
1615 {
1616 	int pos;
1617 	u16 reg16;
1618 	u32 reg32;
1619 	int type;
1620 	struct pci_dev *parent;
1621 
1622 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1623 	if (!pos)
1624 		return;
1625 
1626 	pdev->pcie_cap = pos;
1627 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
1628 	pdev->pcie_flags_reg = reg16;
1629 
1630 	type = pci_pcie_type(pdev);
1631 	if (type == PCI_EXP_TYPE_ROOT_PORT)
1632 		pci_enable_rrs_sv(pdev);
1633 
1634 	pci_read_config_dword(pdev, pos + PCI_EXP_DEVCAP, &pdev->devcap);
1635 	pdev->pcie_mpss = FIELD_GET(PCI_EXP_DEVCAP_PAYLOAD, pdev->devcap);
1636 
1637 	pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &reg32);
1638 	if (reg32 & PCI_EXP_LNKCAP_DLLLARC)
1639 		pdev->link_active_reporting = 1;
1640 
1641 	parent = pci_upstream_bridge(pdev);
1642 	if (!parent)
1643 		return;
1644 
1645 	/*
1646 	 * Some systems do not identify their upstream/downstream ports
1647 	 * correctly so detect impossible configurations here and correct
1648 	 * the port type accordingly.
1649 	 */
1650 	if (type == PCI_EXP_TYPE_DOWNSTREAM) {
1651 		/*
1652 		 * If pdev claims to be downstream port but the parent
1653 		 * device is also downstream port assume pdev is actually
1654 		 * upstream port.
1655 		 */
1656 		if (pcie_downstream_port(parent)) {
1657 			pci_info(pdev, "claims to be downstream port but is acting as upstream port, correcting type\n");
1658 			pdev->pcie_flags_reg &= ~PCI_EXP_FLAGS_TYPE;
1659 			pdev->pcie_flags_reg |= PCI_EXP_TYPE_UPSTREAM;
1660 		}
1661 	} else if (type == PCI_EXP_TYPE_UPSTREAM) {
1662 		/*
1663 		 * If pdev claims to be upstream port but the parent
1664 		 * device is also upstream port assume pdev is actually
1665 		 * downstream port.
1666 		 */
1667 		if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM) {
1668 			pci_info(pdev, "claims to be upstream port but is acting as downstream port, correcting type\n");
1669 			pdev->pcie_flags_reg &= ~PCI_EXP_FLAGS_TYPE;
1670 			pdev->pcie_flags_reg |= PCI_EXP_TYPE_DOWNSTREAM;
1671 		}
1672 	}
1673 }
1674 
set_pcie_hotplug_bridge(struct pci_dev * pdev)1675 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
1676 {
1677 	u32 reg32;
1678 
1679 	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
1680 	if (reg32 & PCI_EXP_SLTCAP_HPC)
1681 		pdev->is_hotplug_bridge = 1;
1682 }
1683 
set_pcie_thunderbolt(struct pci_dev * dev)1684 static void set_pcie_thunderbolt(struct pci_dev *dev)
1685 {
1686 	u16 vsec;
1687 
1688 	/* Is the device part of a Thunderbolt controller? */
1689 	vsec = pci_find_vsec_capability(dev, PCI_VENDOR_ID_INTEL, PCI_VSEC_ID_INTEL_TBT);
1690 	if (vsec)
1691 		dev->is_thunderbolt = 1;
1692 }
1693 
set_pcie_untrusted(struct pci_dev * dev)1694 static void set_pcie_untrusted(struct pci_dev *dev)
1695 {
1696 	struct pci_dev *parent = pci_upstream_bridge(dev);
1697 
1698 	if (!parent)
1699 		return;
1700 	/*
1701 	 * If the upstream bridge is untrusted we treat this device as
1702 	 * untrusted as well.
1703 	 */
1704 	if (parent->untrusted) {
1705 		dev->untrusted = true;
1706 		return;
1707 	}
1708 
1709 	if (arch_pci_dev_is_removable(dev)) {
1710 		pci_dbg(dev, "marking as untrusted\n");
1711 		dev->untrusted = true;
1712 	}
1713 }
1714 
pci_set_removable(struct pci_dev * dev)1715 static void pci_set_removable(struct pci_dev *dev)
1716 {
1717 	struct pci_dev *parent = pci_upstream_bridge(dev);
1718 
1719 	if (!parent)
1720 		return;
1721 	/*
1722 	 * We (only) consider everything tunneled below an external_facing
1723 	 * device to be removable by the user. We're mainly concerned with
1724 	 * consumer platforms with user accessible thunderbolt ports that are
1725 	 * vulnerable to DMA attacks, and we expect those ports to be marked by
1726 	 * the firmware as external_facing. Devices in traditional hotplug
1727 	 * slots can technically be removed, but the expectation is that unless
1728 	 * the port is marked with external_facing, such devices are less
1729 	 * accessible to user / may not be removed by end user, and thus not
1730 	 * exposed as "removable" to userspace.
1731 	 */
1732 	if (dev_is_removable(&parent->dev)) {
1733 		dev_set_removable(&dev->dev, DEVICE_REMOVABLE);
1734 		return;
1735 	}
1736 
1737 	if (arch_pci_dev_is_removable(dev)) {
1738 		pci_dbg(dev, "marking as removable\n");
1739 		dev_set_removable(&dev->dev, DEVICE_REMOVABLE);
1740 	}
1741 }
1742 
1743 /**
1744  * pci_ext_cfg_is_aliased - Is ext config space just an alias of std config?
1745  * @dev: PCI device
1746  *
1747  * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1748  * when forwarding a type1 configuration request the bridge must check that
1749  * the extended register address field is zero.  The bridge is not permitted
1750  * to forward the transactions and must handle it as an Unsupported Request.
1751  * Some bridges do not follow this rule and simply drop the extended register
1752  * bits, resulting in the standard config space being aliased, every 256
1753  * bytes across the entire configuration space.  Test for this condition by
1754  * comparing the first dword of each potential alias to the vendor/device ID.
1755  * Known offenders:
1756  *   ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1757  *   AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1758  */
pci_ext_cfg_is_aliased(struct pci_dev * dev)1759 static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1760 {
1761 #ifdef CONFIG_PCI_QUIRKS
1762 	int pos, ret;
1763 	u32 header, tmp;
1764 
1765 	pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1766 
1767 	for (pos = PCI_CFG_SPACE_SIZE;
1768 	     pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1769 		ret = pci_read_config_dword(dev, pos, &tmp);
1770 		if ((ret != PCIBIOS_SUCCESSFUL) || (header != tmp))
1771 			return false;
1772 	}
1773 
1774 	return true;
1775 #else
1776 	return false;
1777 #endif
1778 }
1779 
1780 /**
1781  * pci_cfg_space_size_ext - Get the configuration space size of the PCI device
1782  * @dev: PCI device
1783  *
1784  * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1785  * have 4096 bytes.  Even if the device is capable, that doesn't mean we can
1786  * access it.  Maybe we don't have a way to generate extended config space
1787  * accesses, or the device is behind a reverse Express bridge.  So we try
1788  * reading the dword at 0x100 which must either be 0 or a valid extended
1789  * capability header.
1790  */
pci_cfg_space_size_ext(struct pci_dev * dev)1791 static int pci_cfg_space_size_ext(struct pci_dev *dev)
1792 {
1793 	u32 status;
1794 	int pos = PCI_CFG_SPACE_SIZE;
1795 
1796 	if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1797 		return PCI_CFG_SPACE_SIZE;
1798 	if (PCI_POSSIBLE_ERROR(status) || pci_ext_cfg_is_aliased(dev))
1799 		return PCI_CFG_SPACE_SIZE;
1800 
1801 	return PCI_CFG_SPACE_EXP_SIZE;
1802 }
1803 
pci_cfg_space_size(struct pci_dev * dev)1804 int pci_cfg_space_size(struct pci_dev *dev)
1805 {
1806 	int pos;
1807 	u32 status;
1808 	u16 class;
1809 
1810 #ifdef CONFIG_PCI_IOV
1811 	/*
1812 	 * Per the SR-IOV specification (rev 1.1, sec 3.5), VFs are required to
1813 	 * implement a PCIe capability and therefore must implement extended
1814 	 * config space.  We can skip the NO_EXTCFG test below and the
1815 	 * reachability/aliasing test in pci_cfg_space_size_ext() by virtue of
1816 	 * the fact that the SR-IOV capability on the PF resides in extended
1817 	 * config space and must be accessible and non-aliased to have enabled
1818 	 * support for this VF.  This is a micro performance optimization for
1819 	 * systems supporting many VFs.
1820 	 */
1821 	if (dev->is_virtfn)
1822 		return PCI_CFG_SPACE_EXP_SIZE;
1823 #endif
1824 
1825 	if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG)
1826 		return PCI_CFG_SPACE_SIZE;
1827 
1828 	class = dev->class >> 8;
1829 	if (class == PCI_CLASS_BRIDGE_HOST)
1830 		return pci_cfg_space_size_ext(dev);
1831 
1832 	if (pci_is_pcie(dev))
1833 		return pci_cfg_space_size_ext(dev);
1834 
1835 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1836 	if (!pos)
1837 		return PCI_CFG_SPACE_SIZE;
1838 
1839 	pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1840 	if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))
1841 		return pci_cfg_space_size_ext(dev);
1842 
1843 	return PCI_CFG_SPACE_SIZE;
1844 }
1845 
pci_class(struct pci_dev * dev)1846 static u32 pci_class(struct pci_dev *dev)
1847 {
1848 	u32 class;
1849 
1850 #ifdef CONFIG_PCI_IOV
1851 	if (dev->is_virtfn)
1852 		return dev->physfn->sriov->class;
1853 #endif
1854 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1855 	return class;
1856 }
1857 
pci_subsystem_ids(struct pci_dev * dev,u16 * vendor,u16 * device)1858 static void pci_subsystem_ids(struct pci_dev *dev, u16 *vendor, u16 *device)
1859 {
1860 #ifdef CONFIG_PCI_IOV
1861 	if (dev->is_virtfn) {
1862 		*vendor = dev->physfn->sriov->subsystem_vendor;
1863 		*device = dev->physfn->sriov->subsystem_device;
1864 		return;
1865 	}
1866 #endif
1867 	pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, vendor);
1868 	pci_read_config_word(dev, PCI_SUBSYSTEM_ID, device);
1869 }
1870 
pci_hdr_type(struct pci_dev * dev)1871 static u8 pci_hdr_type(struct pci_dev *dev)
1872 {
1873 	u8 hdr_type;
1874 
1875 #ifdef CONFIG_PCI_IOV
1876 	if (dev->is_virtfn)
1877 		return dev->physfn->sriov->hdr_type;
1878 #endif
1879 	pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type);
1880 	return hdr_type;
1881 }
1882 
1883 #define LEGACY_IO_RESOURCE	(IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1884 
1885 /**
1886  * pci_intx_mask_broken - Test PCI_COMMAND_INTX_DISABLE writability
1887  * @dev: PCI device
1888  *
1889  * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev.  Check this
1890  * at enumeration-time to avoid modifying PCI_COMMAND at run-time.
1891  */
pci_intx_mask_broken(struct pci_dev * dev)1892 static int pci_intx_mask_broken(struct pci_dev *dev)
1893 {
1894 	u16 orig, toggle, new;
1895 
1896 	pci_read_config_word(dev, PCI_COMMAND, &orig);
1897 	toggle = orig ^ PCI_COMMAND_INTX_DISABLE;
1898 	pci_write_config_word(dev, PCI_COMMAND, toggle);
1899 	pci_read_config_word(dev, PCI_COMMAND, &new);
1900 
1901 	pci_write_config_word(dev, PCI_COMMAND, orig);
1902 
1903 	/*
1904 	 * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI
1905 	 * r2.3, so strictly speaking, a device is not *broken* if it's not
1906 	 * writable.  But we'll live with the misnomer for now.
1907 	 */
1908 	if (new != toggle)
1909 		return 1;
1910 	return 0;
1911 }
1912 
early_dump_pci_device(struct pci_dev * pdev)1913 static void early_dump_pci_device(struct pci_dev *pdev)
1914 {
1915 	u32 value[256 / 4];
1916 	int i;
1917 
1918 	pci_info(pdev, "config space:\n");
1919 
1920 	for (i = 0; i < 256; i += 4)
1921 		pci_read_config_dword(pdev, i, &value[i / 4]);
1922 
1923 	print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
1924 		       value, 256, false);
1925 }
1926 
pci_type_str(struct pci_dev * dev)1927 static const char *pci_type_str(struct pci_dev *dev)
1928 {
1929 	static const char * const str[] = {
1930 		"PCIe Endpoint",
1931 		"PCIe Legacy Endpoint",
1932 		"PCIe unknown",
1933 		"PCIe unknown",
1934 		"PCIe Root Port",
1935 		"PCIe Switch Upstream Port",
1936 		"PCIe Switch Downstream Port",
1937 		"PCIe to PCI/PCI-X bridge",
1938 		"PCI/PCI-X to PCIe bridge",
1939 		"PCIe Root Complex Integrated Endpoint",
1940 		"PCIe Root Complex Event Collector",
1941 	};
1942 	int type;
1943 
1944 	if (pci_is_pcie(dev)) {
1945 		type = pci_pcie_type(dev);
1946 		if (type < ARRAY_SIZE(str))
1947 			return str[type];
1948 
1949 		return "PCIe unknown";
1950 	}
1951 
1952 	switch (dev->hdr_type) {
1953 	case PCI_HEADER_TYPE_NORMAL:
1954 		return "conventional PCI endpoint";
1955 	case PCI_HEADER_TYPE_BRIDGE:
1956 		return "conventional PCI bridge";
1957 	case PCI_HEADER_TYPE_CARDBUS:
1958 		return "CardBus bridge";
1959 	default:
1960 		return "conventional PCI";
1961 	}
1962 }
1963 
1964 /**
1965  * pci_setup_device - Fill in class and map information of a device
1966  * @dev: the device structure to fill
1967  *
1968  * Initialize the device structure with information about the device's
1969  * vendor,class,memory and IO-space addresses, IRQ lines etc.
1970  * Called at initialisation of the PCI subsystem and by CardBus services.
1971  * Returns 0 on success and negative if unknown type of device (not normal,
1972  * bridge or CardBus).
1973  */
pci_setup_device(struct pci_dev * dev)1974 int pci_setup_device(struct pci_dev *dev)
1975 {
1976 	u32 class;
1977 	u16 cmd;
1978 	u8 hdr_type;
1979 	int err, pos = 0;
1980 	struct pci_bus_region region;
1981 	struct resource *res;
1982 
1983 	hdr_type = pci_hdr_type(dev);
1984 
1985 	dev->sysdata = dev->bus->sysdata;
1986 	dev->dev.parent = dev->bus->bridge;
1987 	dev->dev.bus = &pci_bus_type;
1988 	dev->hdr_type = hdr_type & 0x7f;
1989 	dev->multifunction = !!(hdr_type & 0x80);
1990 	dev->error_state = pci_channel_io_normal;
1991 	set_pcie_port_type(dev);
1992 
1993 	err = pci_set_of_node(dev);
1994 	if (err)
1995 		return err;
1996 	pci_set_acpi_fwnode(dev);
1997 
1998 	pci_dev_assign_slot(dev);
1999 
2000 	/*
2001 	 * Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
2002 	 * set this higher, assuming the system even supports it.
2003 	 */
2004 	dev->dma_mask = 0xffffffff;
2005 
2006 	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
2007 		     dev->bus->number, PCI_SLOT(dev->devfn),
2008 		     PCI_FUNC(dev->devfn));
2009 
2010 	class = pci_class(dev);
2011 
2012 	dev->revision = class & 0xff;
2013 	dev->class = class >> 8;		    /* upper 3 bytes */
2014 
2015 	if (pci_early_dump)
2016 		early_dump_pci_device(dev);
2017 
2018 	/* Need to have dev->class ready */
2019 	dev->cfg_size = pci_cfg_space_size(dev);
2020 
2021 	/* Need to have dev->cfg_size ready */
2022 	set_pcie_thunderbolt(dev);
2023 
2024 	set_pcie_untrusted(dev);
2025 
2026 	if (pci_is_pcie(dev))
2027 		dev->supported_speeds = pcie_get_supported_speeds(dev);
2028 
2029 	/* "Unknown power state" */
2030 	dev->current_state = PCI_UNKNOWN;
2031 
2032 	/* Early fixups, before probing the BARs */
2033 	pci_fixup_device(pci_fixup_early, dev);
2034 
2035 	pci_set_removable(dev);
2036 
2037 	pci_info(dev, "[%04x:%04x] type %02x class %#08x %s\n",
2038 		 dev->vendor, dev->device, dev->hdr_type, dev->class,
2039 		 pci_type_str(dev));
2040 
2041 	/* Device class may be changed after fixup */
2042 	class = dev->class >> 8;
2043 
2044 	if (dev->non_compliant_bars && !dev->mmio_always_on) {
2045 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
2046 		if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
2047 			pci_info(dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
2048 			cmd &= ~PCI_COMMAND_IO;
2049 			cmd &= ~PCI_COMMAND_MEMORY;
2050 			pci_write_config_word(dev, PCI_COMMAND, cmd);
2051 		}
2052 	}
2053 
2054 	dev->broken_intx_masking = pci_intx_mask_broken(dev);
2055 
2056 	switch (dev->hdr_type) {		    /* header type */
2057 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
2058 		if (class == PCI_CLASS_BRIDGE_PCI)
2059 			goto bad;
2060 		pci_read_irq(dev);
2061 		pci_read_bases(dev, PCI_STD_NUM_BARS, PCI_ROM_ADDRESS);
2062 
2063 		pci_subsystem_ids(dev, &dev->subsystem_vendor, &dev->subsystem_device);
2064 
2065 		/*
2066 		 * Do the ugly legacy mode stuff here rather than broken chip
2067 		 * quirk code. Legacy mode ATA controllers have fixed
2068 		 * addresses. These are not always echoed in BAR0-3, and
2069 		 * BAR0-3 in a few cases contain junk!
2070 		 */
2071 		if (class == PCI_CLASS_STORAGE_IDE) {
2072 			u8 progif;
2073 			pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
2074 			if ((progif & 1) == 0) {
2075 				region.start = 0x1F0;
2076 				region.end = 0x1F7;
2077 				res = &dev->resource[0];
2078 				res->flags = LEGACY_IO_RESOURCE;
2079 				pcibios_bus_to_resource(dev->bus, res, &region);
2080 				pci_info(dev, "BAR 0 %pR: legacy IDE quirk\n",
2081 					 res);
2082 				region.start = 0x3F6;
2083 				region.end = 0x3F6;
2084 				res = &dev->resource[1];
2085 				res->flags = LEGACY_IO_RESOURCE;
2086 				pcibios_bus_to_resource(dev->bus, res, &region);
2087 				pci_info(dev, "BAR 1 %pR: legacy IDE quirk\n",
2088 					 res);
2089 			}
2090 			if ((progif & 4) == 0) {
2091 				region.start = 0x170;
2092 				region.end = 0x177;
2093 				res = &dev->resource[2];
2094 				res->flags = LEGACY_IO_RESOURCE;
2095 				pcibios_bus_to_resource(dev->bus, res, &region);
2096 				pci_info(dev, "BAR 2 %pR: legacy IDE quirk\n",
2097 					 res);
2098 				region.start = 0x376;
2099 				region.end = 0x376;
2100 				res = &dev->resource[3];
2101 				res->flags = LEGACY_IO_RESOURCE;
2102 				pcibios_bus_to_resource(dev->bus, res, &region);
2103 				pci_info(dev, "BAR 3 %pR: legacy IDE quirk\n",
2104 					 res);
2105 			}
2106 		}
2107 		break;
2108 
2109 	case PCI_HEADER_TYPE_BRIDGE:		    /* bridge header */
2110 		/*
2111 		 * The PCI-to-PCI bridge spec requires that subtractive
2112 		 * decoding (i.e. transparent) bridge must have programming
2113 		 * interface code of 0x01.
2114 		 */
2115 		pci_read_irq(dev);
2116 		dev->transparent = ((dev->class & 0xff) == 1);
2117 		pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
2118 		pci_read_bridge_windows(dev);
2119 		set_pcie_hotplug_bridge(dev);
2120 		pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
2121 		if (pos) {
2122 			pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
2123 			pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
2124 		}
2125 		break;
2126 
2127 	case PCI_HEADER_TYPE_CARDBUS:		    /* CardBus bridge header */
2128 		if (class != PCI_CLASS_BRIDGE_CARDBUS)
2129 			goto bad;
2130 		pci_read_irq(dev);
2131 		pci_read_bases(dev, 1, 0);
2132 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
2133 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
2134 		break;
2135 
2136 	default:				    /* unknown header */
2137 		pci_err(dev, "unknown header type %02x, ignoring device\n",
2138 			dev->hdr_type);
2139 		pci_release_of_node(dev);
2140 		return -EIO;
2141 
2142 	bad:
2143 		pci_err(dev, "ignoring class %#08x (doesn't match header type %02x)\n",
2144 			dev->class, dev->hdr_type);
2145 		dev->class = PCI_CLASS_NOT_DEFINED << 8;
2146 	}
2147 
2148 	/* We found a fine healthy device, go go go... */
2149 	return 0;
2150 }
2151 
pci_configure_mps(struct pci_dev * dev)2152 static void pci_configure_mps(struct pci_dev *dev)
2153 {
2154 	struct pci_dev *bridge = pci_upstream_bridge(dev);
2155 	int mps, mpss, p_mps, rc;
2156 
2157 	if (!pci_is_pcie(dev))
2158 		return;
2159 
2160 	/* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */
2161 	if (dev->is_virtfn)
2162 		return;
2163 
2164 	/*
2165 	 * For Root Complex Integrated Endpoints, program the maximum
2166 	 * supported value unless limited by the PCIE_BUS_PEER2PEER case.
2167 	 */
2168 	if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
2169 		if (pcie_bus_config == PCIE_BUS_PEER2PEER)
2170 			mps = 128;
2171 		else
2172 			mps = 128 << dev->pcie_mpss;
2173 		rc = pcie_set_mps(dev, mps);
2174 		if (rc) {
2175 			pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
2176 				 mps);
2177 		}
2178 		return;
2179 	}
2180 
2181 	if (!bridge || !pci_is_pcie(bridge))
2182 		return;
2183 
2184 	mps = pcie_get_mps(dev);
2185 	p_mps = pcie_get_mps(bridge);
2186 
2187 	if (mps == p_mps)
2188 		return;
2189 
2190 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
2191 		pci_warn(dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
2192 			 mps, pci_name(bridge), p_mps);
2193 		return;
2194 	}
2195 
2196 	/*
2197 	 * Fancier MPS configuration is done later by
2198 	 * pcie_bus_configure_settings()
2199 	 */
2200 	if (pcie_bus_config != PCIE_BUS_DEFAULT)
2201 		return;
2202 
2203 	mpss = 128 << dev->pcie_mpss;
2204 	if (mpss < p_mps && pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
2205 		pcie_set_mps(bridge, mpss);
2206 		pci_info(dev, "Upstream bridge's Max Payload Size set to %d (was %d, max %d)\n",
2207 			 mpss, p_mps, 128 << bridge->pcie_mpss);
2208 		p_mps = pcie_get_mps(bridge);
2209 	}
2210 
2211 	rc = pcie_set_mps(dev, p_mps);
2212 	if (rc) {
2213 		pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
2214 			 p_mps);
2215 		return;
2216 	}
2217 
2218 	pci_info(dev, "Max Payload Size set to %d (was %d, max %d)\n",
2219 		 p_mps, mps, mpss);
2220 }
2221 
pci_configure_extended_tags(struct pci_dev * dev,void * ign)2222 int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
2223 {
2224 	struct pci_host_bridge *host;
2225 	u32 cap;
2226 	u16 ctl;
2227 	int ret;
2228 
2229 	if (!pci_is_pcie(dev))
2230 		return 0;
2231 
2232 	ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
2233 	if (ret)
2234 		return 0;
2235 
2236 	if (!(cap & PCI_EXP_DEVCAP_EXT_TAG))
2237 		return 0;
2238 
2239 	ret = pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
2240 	if (ret)
2241 		return 0;
2242 
2243 	host = pci_find_host_bridge(dev->bus);
2244 	if (!host)
2245 		return 0;
2246 
2247 	/*
2248 	 * If some device in the hierarchy doesn't handle Extended Tags
2249 	 * correctly, make sure they're disabled.
2250 	 */
2251 	if (host->no_ext_tags) {
2252 		if (ctl & PCI_EXP_DEVCTL_EXT_TAG) {
2253 			pci_info(dev, "disabling Extended Tags\n");
2254 			pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
2255 						   PCI_EXP_DEVCTL_EXT_TAG);
2256 		}
2257 		return 0;
2258 	}
2259 
2260 	if (!(ctl & PCI_EXP_DEVCTL_EXT_TAG)) {
2261 		pci_info(dev, "enabling Extended Tags\n");
2262 		pcie_capability_set_word(dev, PCI_EXP_DEVCTL,
2263 					 PCI_EXP_DEVCTL_EXT_TAG);
2264 	}
2265 	return 0;
2266 }
2267 
2268 /**
2269  * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
2270  * @dev: PCI device to query
2271  *
2272  * Returns true if the device has enabled relaxed ordering attribute.
2273  */
pcie_relaxed_ordering_enabled(struct pci_dev * dev)2274 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev)
2275 {
2276 	u16 v;
2277 
2278 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v);
2279 
2280 	return !!(v & PCI_EXP_DEVCTL_RELAX_EN);
2281 }
2282 EXPORT_SYMBOL(pcie_relaxed_ordering_enabled);
2283 
pci_configure_relaxed_ordering(struct pci_dev * dev)2284 static void pci_configure_relaxed_ordering(struct pci_dev *dev)
2285 {
2286 	struct pci_dev *root;
2287 
2288 	/* PCI_EXP_DEVCTL_RELAX_EN is RsvdP in VFs */
2289 	if (dev->is_virtfn)
2290 		return;
2291 
2292 	if (!pcie_relaxed_ordering_enabled(dev))
2293 		return;
2294 
2295 	/*
2296 	 * For now, we only deal with Relaxed Ordering issues with Root
2297 	 * Ports. Peer-to-Peer DMA is another can of worms.
2298 	 */
2299 	root = pcie_find_root_port(dev);
2300 	if (!root)
2301 		return;
2302 
2303 	if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) {
2304 		pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
2305 					   PCI_EXP_DEVCTL_RELAX_EN);
2306 		pci_info(dev, "Relaxed Ordering disabled because the Root Port didn't support it\n");
2307 	}
2308 }
2309 
pci_configure_eetlp_prefix(struct pci_dev * dev)2310 static void pci_configure_eetlp_prefix(struct pci_dev *dev)
2311 {
2312 	struct pci_dev *bridge;
2313 	unsigned int eetlp_max;
2314 	int pcie_type;
2315 	u32 cap;
2316 
2317 	if (!pci_is_pcie(dev))
2318 		return;
2319 
2320 	pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
2321 	if (!(cap & PCI_EXP_DEVCAP2_EE_PREFIX))
2322 		return;
2323 
2324 	pcie_type = pci_pcie_type(dev);
2325 
2326 	eetlp_max = FIELD_GET(PCI_EXP_DEVCAP2_EE_PREFIX_MAX, cap);
2327 	/* 00b means 4 */
2328 	eetlp_max = eetlp_max ?: 4;
2329 
2330 	if (pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
2331 	    pcie_type == PCI_EXP_TYPE_RC_END)
2332 		dev->eetlp_prefix_max = eetlp_max;
2333 	else {
2334 		bridge = pci_upstream_bridge(dev);
2335 		if (bridge && bridge->eetlp_prefix_max)
2336 			dev->eetlp_prefix_max = eetlp_max;
2337 	}
2338 }
2339 
pci_configure_serr(struct pci_dev * dev)2340 static void pci_configure_serr(struct pci_dev *dev)
2341 {
2342 	u16 control;
2343 
2344 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
2345 
2346 		/*
2347 		 * A bridge will not forward ERR_ messages coming from an
2348 		 * endpoint unless SERR# forwarding is enabled.
2349 		 */
2350 		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &control);
2351 		if (!(control & PCI_BRIDGE_CTL_SERR)) {
2352 			control |= PCI_BRIDGE_CTL_SERR;
2353 			pci_write_config_word(dev, PCI_BRIDGE_CONTROL, control);
2354 		}
2355 	}
2356 }
2357 
pci_configure_device(struct pci_dev * dev)2358 static void pci_configure_device(struct pci_dev *dev)
2359 {
2360 	pci_configure_mps(dev);
2361 	pci_configure_extended_tags(dev, NULL);
2362 	pci_configure_relaxed_ordering(dev);
2363 	pci_configure_ltr(dev);
2364 	pci_configure_aspm_l1ss(dev);
2365 	pci_configure_eetlp_prefix(dev);
2366 	pci_configure_serr(dev);
2367 
2368 	pci_acpi_program_hp_params(dev);
2369 }
2370 
pci_release_capabilities(struct pci_dev * dev)2371 static void pci_release_capabilities(struct pci_dev *dev)
2372 {
2373 	pci_aer_exit(dev);
2374 	pci_rcec_exit(dev);
2375 	pci_iov_release(dev);
2376 	pci_free_cap_save_buffers(dev);
2377 }
2378 
2379 /**
2380  * pci_release_dev - Free a PCI device structure when all users of it are
2381  *		     finished
2382  * @dev: device that's been disconnected
2383  *
2384  * Will be called only by the device core when all users of this PCI device are
2385  * done.
2386  */
pci_release_dev(struct device * dev)2387 static void pci_release_dev(struct device *dev)
2388 {
2389 	struct pci_dev *pci_dev;
2390 
2391 	pci_dev = to_pci_dev(dev);
2392 	pci_release_capabilities(pci_dev);
2393 	pci_release_of_node(pci_dev);
2394 	pcibios_release_device(pci_dev);
2395 	pci_bus_put(pci_dev->bus);
2396 	kfree(pci_dev->driver_override);
2397 	bitmap_free(pci_dev->dma_alias_mask);
2398 	dev_dbg(dev, "device released\n");
2399 	kfree(pci_dev);
2400 }
2401 
2402 static const struct device_type pci_dev_type = {
2403 	.groups = pci_dev_attr_groups,
2404 };
2405 
pci_alloc_dev(struct pci_bus * bus)2406 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
2407 {
2408 	struct pci_dev *dev;
2409 
2410 	dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
2411 	if (!dev)
2412 		return NULL;
2413 
2414 	INIT_LIST_HEAD(&dev->bus_list);
2415 	dev->dev.type = &pci_dev_type;
2416 	dev->bus = pci_bus_get(bus);
2417 	dev->driver_exclusive_resource = (struct resource) {
2418 		.name = "PCI Exclusive",
2419 		.start = 0,
2420 		.end = -1,
2421 	};
2422 
2423 	spin_lock_init(&dev->pcie_cap_lock);
2424 #ifdef CONFIG_PCI_MSI
2425 	raw_spin_lock_init(&dev->msi_lock);
2426 #endif
2427 	return dev;
2428 }
2429 EXPORT_SYMBOL(pci_alloc_dev);
2430 
pci_bus_wait_rrs(struct pci_bus * bus,int devfn,u32 * l,int timeout)2431 static bool pci_bus_wait_rrs(struct pci_bus *bus, int devfn, u32 *l,
2432 			     int timeout)
2433 {
2434 	int delay = 1;
2435 
2436 	if (!pci_bus_rrs_vendor_id(*l))
2437 		return true;	/* not a Configuration RRS completion */
2438 
2439 	if (!timeout)
2440 		return false;	/* RRS, but caller doesn't want to wait */
2441 
2442 	/*
2443 	 * We got the reserved Vendor ID that indicates a completion with
2444 	 * Configuration Request Retry Status (RRS).  Retry until we get a
2445 	 * valid Vendor ID or we time out.
2446 	 */
2447 	while (pci_bus_rrs_vendor_id(*l)) {
2448 		if (delay > timeout) {
2449 			pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n",
2450 				pci_domain_nr(bus), bus->number,
2451 				PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
2452 
2453 			return false;
2454 		}
2455 		if (delay >= 1000)
2456 			pr_info("pci %04x:%02x:%02x.%d: not ready after %dms; waiting\n",
2457 				pci_domain_nr(bus), bus->number,
2458 				PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
2459 
2460 		msleep(delay);
2461 		delay *= 2;
2462 
2463 		if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
2464 			return false;
2465 	}
2466 
2467 	if (delay >= 1000)
2468 		pr_info("pci %04x:%02x:%02x.%d: ready after %dms\n",
2469 			pci_domain_nr(bus), bus->number,
2470 			PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
2471 
2472 	return true;
2473 }
2474 
pci_bus_generic_read_dev_vendor_id(struct pci_bus * bus,int devfn,u32 * l,int timeout)2475 bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
2476 					int timeout)
2477 {
2478 	if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
2479 		return false;
2480 
2481 	/* Some broken boards return 0 or ~0 (PCI_ERROR_RESPONSE) if a slot is empty: */
2482 	if (PCI_POSSIBLE_ERROR(*l) || *l == 0x00000000 ||
2483 	    *l == 0x0000ffff || *l == 0xffff0000)
2484 		return false;
2485 
2486 	if (pci_bus_rrs_vendor_id(*l))
2487 		return pci_bus_wait_rrs(bus, devfn, l, timeout);
2488 
2489 	return true;
2490 }
2491 
pci_bus_read_dev_vendor_id(struct pci_bus * bus,int devfn,u32 * l,int timeout)2492 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
2493 				int timeout)
2494 {
2495 #ifdef CONFIG_PCI_QUIRKS
2496 	struct pci_dev *bridge = bus->self;
2497 
2498 	/*
2499 	 * Certain IDT switches have an issue where they improperly trigger
2500 	 * ACS Source Validation errors on completions for config reads.
2501 	 */
2502 	if (bridge && bridge->vendor == PCI_VENDOR_ID_IDT &&
2503 	    bridge->device == 0x80b5)
2504 		return pci_idt_bus_quirk(bus, devfn, l, timeout);
2505 #endif
2506 
2507 	return pci_bus_generic_read_dev_vendor_id(bus, devfn, l, timeout);
2508 }
2509 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
2510 
pci_pwrctrl_create_device(struct pci_bus * bus,int devfn)2511 static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, int devfn)
2512 {
2513 	struct pci_host_bridge *host = pci_find_host_bridge(bus);
2514 	struct platform_device *pdev;
2515 	struct device_node *np;
2516 
2517 	np = of_pci_find_child_device(dev_of_node(&bus->dev), devfn);
2518 	if (!np || of_find_device_by_node(np))
2519 		return NULL;
2520 
2521 	/*
2522 	 * First check whether the pwrctrl device really needs to be created or
2523 	 * not. This is decided based on at least one of the power supplies
2524 	 * being defined in the devicetree node of the device.
2525 	 */
2526 	if (!of_pci_supply_present(np)) {
2527 		pr_debug("PCI/pwrctrl: Skipping OF node: %s\n", np->name);
2528 		return NULL;
2529 	}
2530 
2531 	/* Now create the pwrctrl device */
2532 	pdev = of_platform_device_create(np, NULL, &host->dev);
2533 	if (!pdev) {
2534 		pr_err("PCI/pwrctrl: Failed to create pwrctrl device for node: %s\n", np->name);
2535 		return NULL;
2536 	}
2537 
2538 	return pdev;
2539 }
2540 
2541 /*
2542  * Read the config data for a PCI device, sanity-check it,
2543  * and fill in the dev structure.
2544  */
pci_scan_device(struct pci_bus * bus,int devfn)2545 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
2546 {
2547 	struct pci_dev *dev;
2548 	u32 l;
2549 
2550 	/*
2551 	 * Create pwrctrl device (if required) for the PCI device to handle the
2552 	 * power state. If the pwrctrl device is created, then skip scanning
2553 	 * further as the pwrctrl core will rescan the bus after powering on
2554 	 * the device.
2555 	 */
2556 	if (pci_pwrctrl_create_device(bus, devfn))
2557 		return NULL;
2558 
2559 	if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
2560 		return NULL;
2561 
2562 	dev = pci_alloc_dev(bus);
2563 	if (!dev)
2564 		return NULL;
2565 
2566 	dev->devfn = devfn;
2567 	dev->vendor = l & 0xffff;
2568 	dev->device = (l >> 16) & 0xffff;
2569 
2570 	if (pci_setup_device(dev)) {
2571 		pci_bus_put(dev->bus);
2572 		kfree(dev);
2573 		return NULL;
2574 	}
2575 
2576 	return dev;
2577 }
2578 
pcie_report_downtraining(struct pci_dev * dev)2579 void pcie_report_downtraining(struct pci_dev *dev)
2580 {
2581 	if (!pci_is_pcie(dev))
2582 		return;
2583 
2584 	/* Look from the device up to avoid downstream ports with no devices */
2585 	if ((pci_pcie_type(dev) != PCI_EXP_TYPE_ENDPOINT) &&
2586 	    (pci_pcie_type(dev) != PCI_EXP_TYPE_LEG_END) &&
2587 	    (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM))
2588 		return;
2589 
2590 	/* Multi-function PCIe devices share the same link/status */
2591 	if (PCI_FUNC(dev->devfn) != 0 || dev->is_virtfn)
2592 		return;
2593 
2594 	/* Print link status only if the device is constrained by the fabric */
2595 	__pcie_print_link_status(dev, false);
2596 }
2597 
pci_init_capabilities(struct pci_dev * dev)2598 static void pci_init_capabilities(struct pci_dev *dev)
2599 {
2600 	pci_ea_init(dev);		/* Enhanced Allocation */
2601 	pci_msi_init(dev);		/* Disable MSI */
2602 	pci_msix_init(dev);		/* Disable MSI-X */
2603 
2604 	/* Buffers for saving PCIe and PCI-X capabilities */
2605 	pci_allocate_cap_save_buffers(dev);
2606 
2607 	pci_pm_init(dev);		/* Power Management */
2608 	pci_vpd_init(dev);		/* Vital Product Data */
2609 	pci_configure_ari(dev);		/* Alternative Routing-ID Forwarding */
2610 	pci_iov_init(dev);		/* Single Root I/O Virtualization */
2611 	pci_ats_init(dev);		/* Address Translation Services */
2612 	pci_pri_init(dev);		/* Page Request Interface */
2613 	pci_pasid_init(dev);		/* Process Address Space ID */
2614 	pci_acs_init(dev);		/* Access Control Services */
2615 	pci_ptm_init(dev);		/* Precision Time Measurement */
2616 	pci_aer_init(dev);		/* Advanced Error Reporting */
2617 	pci_dpc_init(dev);		/* Downstream Port Containment */
2618 	pci_rcec_init(dev);		/* Root Complex Event Collector */
2619 	pci_doe_init(dev);		/* Data Object Exchange */
2620 	pci_tph_init(dev);		/* TLP Processing Hints */
2621 	pci_rebar_init(dev);		/* Resizable BAR */
2622 
2623 	pcie_report_downtraining(dev);
2624 	pci_init_reset_methods(dev);
2625 }
2626 
2627 /*
2628  * This is the equivalent of pci_host_bridge_msi_domain() that acts on
2629  * devices. Firmware interfaces that can select the MSI domain on a
2630  * per-device basis should be called from here.
2631  */
pci_dev_msi_domain(struct pci_dev * dev)2632 static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
2633 {
2634 	struct irq_domain *d;
2635 
2636 	/*
2637 	 * If a domain has been set through the pcibios_device_add()
2638 	 * callback, then this is the one (platform code knows best).
2639 	 */
2640 	d = dev_get_msi_domain(&dev->dev);
2641 	if (d)
2642 		return d;
2643 
2644 	/*
2645 	 * Let's see if we have a firmware interface able to provide
2646 	 * the domain.
2647 	 */
2648 	d = pci_msi_get_device_domain(dev);
2649 	if (d)
2650 		return d;
2651 
2652 	return NULL;
2653 }
2654 
pci_set_msi_domain(struct pci_dev * dev)2655 static void pci_set_msi_domain(struct pci_dev *dev)
2656 {
2657 	struct irq_domain *d;
2658 
2659 	/*
2660 	 * If the platform or firmware interfaces cannot supply a
2661 	 * device-specific MSI domain, then inherit the default domain
2662 	 * from the host bridge itself.
2663 	 */
2664 	d = pci_dev_msi_domain(dev);
2665 	if (!d)
2666 		d = dev_get_msi_domain(&dev->bus->dev);
2667 
2668 	dev_set_msi_domain(&dev->dev, d);
2669 }
2670 
pci_device_add(struct pci_dev * dev,struct pci_bus * bus)2671 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
2672 {
2673 	int ret;
2674 
2675 	pci_configure_device(dev);
2676 
2677 	device_initialize(&dev->dev);
2678 	dev->dev.release = pci_release_dev;
2679 
2680 	set_dev_node(&dev->dev, pcibus_to_node(bus));
2681 	dev->dev.dma_mask = &dev->dma_mask;
2682 	dev->dev.dma_parms = &dev->dma_parms;
2683 	dev->dev.coherent_dma_mask = 0xffffffffull;
2684 
2685 	dma_set_max_seg_size(&dev->dev, 65536);
2686 	dma_set_seg_boundary(&dev->dev, 0xffffffff);
2687 
2688 	pcie_failed_link_retrain(dev);
2689 
2690 	/* Fix up broken headers */
2691 	pci_fixup_device(pci_fixup_header, dev);
2692 
2693 	pci_reassigndev_resource_alignment(dev);
2694 
2695 	dev->state_saved = false;
2696 
2697 	pci_init_capabilities(dev);
2698 
2699 	/*
2700 	 * Add the device to our list of discovered devices
2701 	 * and the bus list for fixup functions, etc.
2702 	 */
2703 	down_write(&pci_bus_sem);
2704 	list_add_tail(&dev->bus_list, &bus->devices);
2705 	up_write(&pci_bus_sem);
2706 
2707 	ret = pcibios_device_add(dev);
2708 	WARN_ON(ret < 0);
2709 
2710 	/* Set up MSI IRQ domain */
2711 	pci_set_msi_domain(dev);
2712 
2713 	/* Notifier could use PCI capabilities */
2714 	ret = device_add(&dev->dev);
2715 	WARN_ON(ret < 0);
2716 
2717 	pci_npem_create(dev);
2718 
2719 	pci_doe_sysfs_init(dev);
2720 }
2721 
pci_scan_single_device(struct pci_bus * bus,int devfn)2722 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
2723 {
2724 	struct pci_dev *dev;
2725 
2726 	dev = pci_get_slot(bus, devfn);
2727 	if (dev) {
2728 		pci_dev_put(dev);
2729 		return dev;
2730 	}
2731 
2732 	dev = pci_scan_device(bus, devfn);
2733 	if (!dev)
2734 		return NULL;
2735 
2736 	pci_device_add(dev, bus);
2737 
2738 	return dev;
2739 }
2740 EXPORT_SYMBOL(pci_scan_single_device);
2741 
next_ari_fn(struct pci_bus * bus,struct pci_dev * dev,int fn)2742 static int next_ari_fn(struct pci_bus *bus, struct pci_dev *dev, int fn)
2743 {
2744 	int pos;
2745 	u16 cap = 0;
2746 	unsigned int next_fn;
2747 
2748 	if (!dev)
2749 		return -ENODEV;
2750 
2751 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
2752 	if (!pos)
2753 		return -ENODEV;
2754 
2755 	pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
2756 	next_fn = PCI_ARI_CAP_NFN(cap);
2757 	if (next_fn <= fn)
2758 		return -ENODEV;	/* protect against malformed list */
2759 
2760 	return next_fn;
2761 }
2762 
next_fn(struct pci_bus * bus,struct pci_dev * dev,int fn)2763 static int next_fn(struct pci_bus *bus, struct pci_dev *dev, int fn)
2764 {
2765 	if (pci_ari_enabled(bus))
2766 		return next_ari_fn(bus, dev, fn);
2767 
2768 	if (fn >= 7)
2769 		return -ENODEV;
2770 	/* only multifunction devices may have more functions */
2771 	if (dev && !dev->multifunction)
2772 		return -ENODEV;
2773 
2774 	return fn + 1;
2775 }
2776 
only_one_child(struct pci_bus * bus)2777 static int only_one_child(struct pci_bus *bus)
2778 {
2779 	struct pci_dev *bridge = bus->self;
2780 
2781 	/*
2782 	 * Systems with unusual topologies set PCI_SCAN_ALL_PCIE_DEVS so
2783 	 * we scan for all possible devices, not just Device 0.
2784 	 */
2785 	if (pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
2786 		return 0;
2787 
2788 	/*
2789 	 * A PCIe Downstream Port normally leads to a Link with only Device
2790 	 * 0 on it (PCIe spec r3.1, sec 7.3.1).  As an optimization, scan
2791 	 * only for Device 0 in that situation.
2792 	 */
2793 	if (bridge && pci_is_pcie(bridge) && pcie_downstream_port(bridge))
2794 		return 1;
2795 
2796 	return 0;
2797 }
2798 
2799 /**
2800  * pci_scan_slot - Scan a PCI slot on a bus for devices
2801  * @bus: PCI bus to scan
2802  * @devfn: slot number to scan (must have zero function)
2803  *
2804  * Scan a PCI slot on the specified PCI bus for devices, adding
2805  * discovered devices to the @bus->devices list.  New devices
2806  * will not have is_added set.
2807  *
2808  * Returns the number of new devices found.
2809  */
pci_scan_slot(struct pci_bus * bus,int devfn)2810 int pci_scan_slot(struct pci_bus *bus, int devfn)
2811 {
2812 	struct pci_dev *dev;
2813 	int fn = 0, nr = 0;
2814 
2815 	if (only_one_child(bus) && (devfn > 0))
2816 		return 0; /* Already scanned the entire slot */
2817 
2818 	do {
2819 		dev = pci_scan_single_device(bus, devfn + fn);
2820 		if (dev) {
2821 			if (!pci_dev_is_added(dev))
2822 				nr++;
2823 			if (fn > 0)
2824 				dev->multifunction = 1;
2825 		} else if (fn == 0) {
2826 			/*
2827 			 * Function 0 is required unless we are running on
2828 			 * a hypervisor that passes through individual PCI
2829 			 * functions.
2830 			 */
2831 			if (!hypervisor_isolated_pci_functions())
2832 				break;
2833 		}
2834 		fn = next_fn(bus, dev, fn);
2835 	} while (fn >= 0);
2836 
2837 	/* Only one slot has PCIe device */
2838 	if (bus->self && nr)
2839 		pcie_aspm_init_link_state(bus->self);
2840 
2841 	return nr;
2842 }
2843 EXPORT_SYMBOL(pci_scan_slot);
2844 
pcie_find_smpss(struct pci_dev * dev,void * data)2845 static int pcie_find_smpss(struct pci_dev *dev, void *data)
2846 {
2847 	u8 *smpss = data;
2848 
2849 	if (!pci_is_pcie(dev))
2850 		return 0;
2851 
2852 	/*
2853 	 * We don't have a way to change MPS settings on devices that have
2854 	 * drivers attached.  A hot-added device might support only the minimum
2855 	 * MPS setting (MPS=128).  Therefore, if the fabric contains a bridge
2856 	 * where devices may be hot-added, we limit the fabric MPS to 128 so
2857 	 * hot-added devices will work correctly.
2858 	 *
2859 	 * However, if we hot-add a device to a slot directly below a Root
2860 	 * Port, it's impossible for there to be other existing devices below
2861 	 * the port.  We don't limit the MPS in this case because we can
2862 	 * reconfigure MPS on both the Root Port and the hot-added device,
2863 	 * and there are no other devices involved.
2864 	 *
2865 	 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
2866 	 */
2867 	if (dev->is_hotplug_bridge &&
2868 	    pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
2869 		*smpss = 0;
2870 
2871 	if (*smpss > dev->pcie_mpss)
2872 		*smpss = dev->pcie_mpss;
2873 
2874 	return 0;
2875 }
2876 
pcie_write_mps(struct pci_dev * dev,int mps)2877 static void pcie_write_mps(struct pci_dev *dev, int mps)
2878 {
2879 	int rc;
2880 
2881 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
2882 		mps = 128 << dev->pcie_mpss;
2883 
2884 		if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
2885 		    dev->bus->self)
2886 
2887 			/*
2888 			 * For "Performance", the assumption is made that
2889 			 * downstream communication will never be larger than
2890 			 * the MRRS.  So, the MPS only needs to be configured
2891 			 * for the upstream communication.  This being the case,
2892 			 * walk from the top down and set the MPS of the child
2893 			 * to that of the parent bus.
2894 			 *
2895 			 * Configure the device MPS with the smaller of the
2896 			 * device MPSS or the bridge MPS (which is assumed to be
2897 			 * properly configured at this point to the largest
2898 			 * allowable MPS based on its parent bus).
2899 			 */
2900 			mps = min(mps, pcie_get_mps(dev->bus->self));
2901 	}
2902 
2903 	rc = pcie_set_mps(dev, mps);
2904 	if (rc)
2905 		pci_err(dev, "Failed attempting to set the MPS\n");
2906 }
2907 
pcie_write_mrrs(struct pci_dev * dev)2908 static void pcie_write_mrrs(struct pci_dev *dev)
2909 {
2910 	int rc, mrrs;
2911 
2912 	/*
2913 	 * In the "safe" case, do not configure the MRRS.  There appear to be
2914 	 * issues with setting MRRS to 0 on a number of devices.
2915 	 */
2916 	if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
2917 		return;
2918 
2919 	/*
2920 	 * For max performance, the MRRS must be set to the largest supported
2921 	 * value.  However, it cannot be configured larger than the MPS the
2922 	 * device or the bus can support.  This should already be properly
2923 	 * configured by a prior call to pcie_write_mps().
2924 	 */
2925 	mrrs = pcie_get_mps(dev);
2926 
2927 	/*
2928 	 * MRRS is a R/W register.  Invalid values can be written, but a
2929 	 * subsequent read will verify if the value is acceptable or not.
2930 	 * If the MRRS value provided is not acceptable (e.g., too large),
2931 	 * shrink the value until it is acceptable to the HW.
2932 	 */
2933 	while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
2934 		rc = pcie_set_readrq(dev, mrrs);
2935 		if (!rc)
2936 			break;
2937 
2938 		pci_warn(dev, "Failed attempting to set the MRRS\n");
2939 		mrrs /= 2;
2940 	}
2941 
2942 	if (mrrs < 128)
2943 		pci_err(dev, "MRRS was unable to be configured with a safe value.  If problems are experienced, try running with pci=pcie_bus_safe\n");
2944 }
2945 
pcie_bus_configure_set(struct pci_dev * dev,void * data)2946 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
2947 {
2948 	int mps, orig_mps;
2949 
2950 	if (!pci_is_pcie(dev))
2951 		return 0;
2952 
2953 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
2954 	    pcie_bus_config == PCIE_BUS_DEFAULT)
2955 		return 0;
2956 
2957 	mps = 128 << *(u8 *)data;
2958 	orig_mps = pcie_get_mps(dev);
2959 
2960 	pcie_write_mps(dev, mps);
2961 	pcie_write_mrrs(dev);
2962 
2963 	pci_info(dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
2964 		 pcie_get_mps(dev), 128 << dev->pcie_mpss,
2965 		 orig_mps, pcie_get_readrq(dev));
2966 
2967 	return 0;
2968 }
2969 
2970 /*
2971  * pcie_bus_configure_settings() requires that pci_walk_bus work in a top-down,
2972  * parents then children fashion.  If this changes, then this code will not
2973  * work as designed.
2974  */
pcie_bus_configure_settings(struct pci_bus * bus)2975 void pcie_bus_configure_settings(struct pci_bus *bus)
2976 {
2977 	u8 smpss = 0;
2978 
2979 	if (!bus->self)
2980 		return;
2981 
2982 	if (!pci_is_pcie(bus->self))
2983 		return;
2984 
2985 	/*
2986 	 * FIXME - Peer to peer DMA is possible, though the endpoint would need
2987 	 * to be aware of the MPS of the destination.  To work around this,
2988 	 * simply force the MPS of the entire system to the smallest possible.
2989 	 */
2990 	if (pcie_bus_config == PCIE_BUS_PEER2PEER)
2991 		smpss = 0;
2992 
2993 	if (pcie_bus_config == PCIE_BUS_SAFE) {
2994 		smpss = bus->self->pcie_mpss;
2995 
2996 		pcie_find_smpss(bus->self, &smpss);
2997 		pci_walk_bus(bus, pcie_find_smpss, &smpss);
2998 	}
2999 
3000 	pcie_bus_configure_set(bus->self, &smpss);
3001 	pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
3002 }
3003 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
3004 
3005 /*
3006  * Called after each bus is probed, but before its children are examined.  This
3007  * is marked as __weak because multiple architectures define it.
3008  */
pcibios_fixup_bus(struct pci_bus * bus)3009 void __weak pcibios_fixup_bus(struct pci_bus *bus)
3010 {
3011        /* nothing to do, expected to be removed in the future */
3012 }
3013 
3014 /**
3015  * pci_scan_child_bus_extend() - Scan devices below a bus
3016  * @bus: Bus to scan for devices
3017  * @available_buses: Total number of buses available (%0 does not try to
3018  *		     extend beyond the minimal)
3019  *
3020  * Scans devices below @bus including subordinate buses. Returns new
3021  * subordinate number including all the found devices. Passing
3022  * @available_buses causes the remaining bus space to be distributed
3023  * equally between hotplug-capable bridges to allow future extension of the
3024  * hierarchy.
3025  */
pci_scan_child_bus_extend(struct pci_bus * bus,unsigned int available_buses)3026 static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
3027 					      unsigned int available_buses)
3028 {
3029 	unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0;
3030 	unsigned int start = bus->busn_res.start;
3031 	unsigned int devfn, cmax, max = start;
3032 	struct pci_dev *dev;
3033 
3034 	dev_dbg(&bus->dev, "scanning bus\n");
3035 
3036 	/* Go find them, Rover! */
3037 	for (devfn = 0; devfn < 256; devfn += 8)
3038 		pci_scan_slot(bus, devfn);
3039 
3040 	/* Reserve buses for SR-IOV capability */
3041 	used_buses = pci_iov_bus_range(bus);
3042 	max += used_buses;
3043 
3044 	/*
3045 	 * After performing arch-dependent fixup of the bus, look behind
3046 	 * all PCI-to-PCI bridges on this bus.
3047 	 */
3048 	if (!bus->is_added) {
3049 		dev_dbg(&bus->dev, "fixups for bus\n");
3050 		pcibios_fixup_bus(bus);
3051 		bus->is_added = 1;
3052 	}
3053 
3054 	/*
3055 	 * Calculate how many hotplug bridges and normal bridges there
3056 	 * are on this bus. We will distribute the additional available
3057 	 * buses between hotplug bridges.
3058 	 */
3059 	for_each_pci_bridge(dev, bus) {
3060 		if (dev->is_hotplug_bridge)
3061 			hotplug_bridges++;
3062 		else
3063 			normal_bridges++;
3064 	}
3065 
3066 	/*
3067 	 * Scan bridges that are already configured. We don't touch them
3068 	 * unless they are misconfigured (which will be done in the second
3069 	 * scan below).
3070 	 */
3071 	for_each_pci_bridge(dev, bus) {
3072 		cmax = max;
3073 		max = pci_scan_bridge_extend(bus, dev, max, 0, 0);
3074 
3075 		/*
3076 		 * Reserve one bus for each bridge now to avoid extending
3077 		 * hotplug bridges too much during the second scan below.
3078 		 */
3079 		used_buses++;
3080 		if (max - cmax > 1)
3081 			used_buses += max - cmax - 1;
3082 	}
3083 
3084 	/* Scan bridges that need to be reconfigured */
3085 	for_each_pci_bridge(dev, bus) {
3086 		unsigned int buses = 0;
3087 
3088 		if (!hotplug_bridges && normal_bridges == 1) {
3089 			/*
3090 			 * There is only one bridge on the bus (upstream
3091 			 * port) so it gets all available buses which it
3092 			 * can then distribute to the possible hotplug
3093 			 * bridges below.
3094 			 */
3095 			buses = available_buses;
3096 		} else if (dev->is_hotplug_bridge) {
3097 			/*
3098 			 * Distribute the extra buses between hotplug
3099 			 * bridges if any.
3100 			 */
3101 			buses = available_buses / hotplug_bridges;
3102 			buses = min(buses, available_buses - used_buses + 1);
3103 		}
3104 
3105 		cmax = max;
3106 		max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1);
3107 		/* One bus is already accounted so don't add it again */
3108 		if (max - cmax > 1)
3109 			used_buses += max - cmax - 1;
3110 	}
3111 
3112 	/*
3113 	 * Make sure a hotplug bridge has at least the minimum requested
3114 	 * number of buses but allow it to grow up to the maximum available
3115 	 * bus number if there is room.
3116 	 */
3117 	if (bus->self && bus->self->is_hotplug_bridge) {
3118 		used_buses = max_t(unsigned int, available_buses,
3119 				   pci_hotplug_bus_size - 1);
3120 		if (max - start < used_buses) {
3121 			max = start + used_buses;
3122 
3123 			/* Do not allocate more buses than we have room left */
3124 			if (max > bus->busn_res.end)
3125 				max = bus->busn_res.end;
3126 
3127 			dev_dbg(&bus->dev, "%pR extended by %#02x\n",
3128 				&bus->busn_res, max - start);
3129 		}
3130 	}
3131 
3132 	/*
3133 	 * We've scanned the bus and so we know all about what's on
3134 	 * the other side of any bridges that may be on this bus plus
3135 	 * any devices.
3136 	 *
3137 	 * Return how far we've got finding sub-buses.
3138 	 */
3139 	dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
3140 	return max;
3141 }
3142 
3143 /**
3144  * pci_scan_child_bus() - Scan devices below a bus
3145  * @bus: Bus to scan for devices
3146  *
3147  * Scans devices below @bus including subordinate buses. Returns new
3148  * subordinate number including all the found devices.
3149  */
pci_scan_child_bus(struct pci_bus * bus)3150 unsigned int pci_scan_child_bus(struct pci_bus *bus)
3151 {
3152 	return pci_scan_child_bus_extend(bus, 0);
3153 }
3154 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
3155 
3156 /**
3157  * pcibios_root_bridge_prepare - Platform-specific host bridge setup
3158  * @bridge: Host bridge to set up
3159  *
3160  * Default empty implementation.  Replace with an architecture-specific setup
3161  * routine, if necessary.
3162  */
pcibios_root_bridge_prepare(struct pci_host_bridge * bridge)3163 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
3164 {
3165 	return 0;
3166 }
3167 
pcibios_add_bus(struct pci_bus * bus)3168 void __weak pcibios_add_bus(struct pci_bus *bus)
3169 {
3170 }
3171 
pcibios_remove_bus(struct pci_bus * bus)3172 void __weak pcibios_remove_bus(struct pci_bus *bus)
3173 {
3174 }
3175 
pci_create_root_bus(struct device * parent,int bus,struct pci_ops * ops,void * sysdata,struct list_head * resources)3176 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
3177 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
3178 {
3179 	int error;
3180 	struct pci_host_bridge *bridge;
3181 
3182 	bridge = pci_alloc_host_bridge(0);
3183 	if (!bridge)
3184 		return NULL;
3185 
3186 	bridge->dev.parent = parent;
3187 
3188 	list_splice_init(resources, &bridge->windows);
3189 	bridge->sysdata = sysdata;
3190 	bridge->busnr = bus;
3191 	bridge->ops = ops;
3192 
3193 	error = pci_register_host_bridge(bridge);
3194 	if (error < 0)
3195 		goto err_out;
3196 
3197 	return bridge->bus;
3198 
3199 err_out:
3200 	put_device(&bridge->dev);
3201 	return NULL;
3202 }
3203 EXPORT_SYMBOL_GPL(pci_create_root_bus);
3204 
pci_host_probe(struct pci_host_bridge * bridge)3205 int pci_host_probe(struct pci_host_bridge *bridge)
3206 {
3207 	struct pci_bus *bus, *child;
3208 	int ret;
3209 
3210 	pci_lock_rescan_remove();
3211 	ret = pci_scan_root_bus_bridge(bridge);
3212 	pci_unlock_rescan_remove();
3213 	if (ret < 0) {
3214 		dev_err(bridge->dev.parent, "Scanning root bridge failed");
3215 		return ret;
3216 	}
3217 
3218 	bus = bridge->bus;
3219 
3220 	/* If we must preserve the resource configuration, claim now */
3221 	if (bridge->preserve_config)
3222 		pci_bus_claim_resources(bus);
3223 
3224 	/*
3225 	 * Assign whatever was left unassigned. If we didn't claim above,
3226 	 * this will reassign everything.
3227 	 */
3228 	pci_assign_unassigned_root_bus_resources(bus);
3229 
3230 	list_for_each_entry(child, &bus->children, node)
3231 		pcie_bus_configure_settings(child);
3232 
3233 	pci_lock_rescan_remove();
3234 	pci_bus_add_devices(bus);
3235 	pci_unlock_rescan_remove();
3236 
3237 	/*
3238 	 * Ensure pm_runtime_enable() is called for the controller drivers
3239 	 * before calling pci_host_probe(). The PM framework expects that
3240 	 * if the parent device supports runtime PM, it will be enabled
3241 	 * before child runtime PM is enabled.
3242 	 */
3243 	pm_runtime_set_active(&bridge->dev);
3244 	pm_runtime_no_callbacks(&bridge->dev);
3245 	devm_pm_runtime_enable(&bridge->dev);
3246 
3247 	return 0;
3248 }
3249 EXPORT_SYMBOL_GPL(pci_host_probe);
3250 
pci_bus_insert_busn_res(struct pci_bus * b,int bus,int bus_max)3251 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
3252 {
3253 	struct resource *res = &b->busn_res;
3254 	struct resource *parent_res, *conflict;
3255 
3256 	res->start = bus;
3257 	res->end = bus_max;
3258 	res->flags = IORESOURCE_BUS;
3259 
3260 	if (!pci_is_root_bus(b))
3261 		parent_res = &b->parent->busn_res;
3262 	else {
3263 		parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
3264 		res->flags |= IORESOURCE_PCI_FIXED;
3265 	}
3266 
3267 	conflict = request_resource_conflict(parent_res, res);
3268 
3269 	if (conflict)
3270 		dev_info(&b->dev,
3271 			   "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
3272 			    res, pci_is_root_bus(b) ? "domain " : "",
3273 			    parent_res, conflict->name, conflict);
3274 
3275 	return conflict == NULL;
3276 }
3277 
pci_bus_update_busn_res_end(struct pci_bus * b,int bus_max)3278 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
3279 {
3280 	struct resource *res = &b->busn_res;
3281 	struct resource old_res = *res;
3282 	resource_size_t size;
3283 	int ret;
3284 
3285 	if (res->start > bus_max)
3286 		return -EINVAL;
3287 
3288 	size = bus_max - res->start + 1;
3289 	ret = adjust_resource(res, res->start, size);
3290 	dev_info(&b->dev, "busn_res: %pR end %s updated to %02x\n",
3291 			&old_res, ret ? "can not be" : "is", bus_max);
3292 
3293 	if (!ret && !res->parent)
3294 		pci_bus_insert_busn_res(b, res->start, res->end);
3295 
3296 	return ret;
3297 }
3298 
pci_bus_release_busn_res(struct pci_bus * b)3299 void pci_bus_release_busn_res(struct pci_bus *b)
3300 {
3301 	struct resource *res = &b->busn_res;
3302 	int ret;
3303 
3304 	if (!res->flags || !res->parent)
3305 		return;
3306 
3307 	ret = release_resource(res);
3308 	dev_info(&b->dev, "busn_res: %pR %s released\n",
3309 			res, ret ? "can not be" : "is");
3310 }
3311 
pci_scan_root_bus_bridge(struct pci_host_bridge * bridge)3312 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge)
3313 {
3314 	struct resource_entry *window;
3315 	bool found = false;
3316 	struct pci_bus *b;
3317 	int max, bus, ret;
3318 
3319 	if (!bridge)
3320 		return -EINVAL;
3321 
3322 	resource_list_for_each_entry(window, &bridge->windows)
3323 		if (window->res->flags & IORESOURCE_BUS) {
3324 			bridge->busnr = window->res->start;
3325 			found = true;
3326 			break;
3327 		}
3328 
3329 	ret = pci_register_host_bridge(bridge);
3330 	if (ret < 0)
3331 		return ret;
3332 
3333 	b = bridge->bus;
3334 	bus = bridge->busnr;
3335 
3336 	if (!found) {
3337 		dev_info(&b->dev,
3338 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
3339 			bus);
3340 		pci_bus_insert_busn_res(b, bus, 255);
3341 	}
3342 
3343 	max = pci_scan_child_bus(b);
3344 
3345 	if (!found)
3346 		pci_bus_update_busn_res_end(b, max);
3347 
3348 	return 0;
3349 }
3350 EXPORT_SYMBOL(pci_scan_root_bus_bridge);
3351 
pci_scan_root_bus(struct device * parent,int bus,struct pci_ops * ops,void * sysdata,struct list_head * resources)3352 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
3353 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
3354 {
3355 	struct resource_entry *window;
3356 	bool found = false;
3357 	struct pci_bus *b;
3358 	int max;
3359 
3360 	resource_list_for_each_entry(window, resources)
3361 		if (window->res->flags & IORESOURCE_BUS) {
3362 			found = true;
3363 			break;
3364 		}
3365 
3366 	b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
3367 	if (!b)
3368 		return NULL;
3369 
3370 	if (!found) {
3371 		dev_info(&b->dev,
3372 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
3373 			bus);
3374 		pci_bus_insert_busn_res(b, bus, 255);
3375 	}
3376 
3377 	max = pci_scan_child_bus(b);
3378 
3379 	if (!found)
3380 		pci_bus_update_busn_res_end(b, max);
3381 
3382 	return b;
3383 }
3384 EXPORT_SYMBOL(pci_scan_root_bus);
3385 
pci_scan_bus(int bus,struct pci_ops * ops,void * sysdata)3386 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
3387 					void *sysdata)
3388 {
3389 	LIST_HEAD(resources);
3390 	struct pci_bus *b;
3391 
3392 	pci_add_resource(&resources, &ioport_resource);
3393 	pci_add_resource(&resources, &iomem_resource);
3394 	pci_add_resource(&resources, &busn_resource);
3395 	b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
3396 	if (b) {
3397 		pci_scan_child_bus(b);
3398 	} else {
3399 		pci_free_resource_list(&resources);
3400 	}
3401 	return b;
3402 }
3403 EXPORT_SYMBOL(pci_scan_bus);
3404 
3405 /**
3406  * pci_rescan_bus_bridge_resize - Scan a PCI bus for devices
3407  * @bridge: PCI bridge for the bus to scan
3408  *
3409  * Scan a PCI bus and child buses for new devices, add them,
3410  * and enable them, resizing bridge mmio/io resource if necessary
3411  * and possible.  The caller must ensure the child devices are already
3412  * removed for resizing to occur.
3413  *
3414  * Returns the max number of subordinate bus discovered.
3415  */
pci_rescan_bus_bridge_resize(struct pci_dev * bridge)3416 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
3417 {
3418 	unsigned int max;
3419 	struct pci_bus *bus = bridge->subordinate;
3420 
3421 	max = pci_scan_child_bus(bus);
3422 
3423 	pci_assign_unassigned_bridge_resources(bridge);
3424 
3425 	pci_bus_add_devices(bus);
3426 
3427 	return max;
3428 }
3429 
3430 /**
3431  * pci_rescan_bus - Scan a PCI bus for devices
3432  * @bus: PCI bus to scan
3433  *
3434  * Scan a PCI bus and child buses for new devices, add them,
3435  * and enable them.
3436  *
3437  * Returns the max number of subordinate bus discovered.
3438  */
pci_rescan_bus(struct pci_bus * bus)3439 unsigned int pci_rescan_bus(struct pci_bus *bus)
3440 {
3441 	unsigned int max;
3442 
3443 	max = pci_scan_child_bus(bus);
3444 	pci_assign_unassigned_bus_resources(bus);
3445 	pci_bus_add_devices(bus);
3446 
3447 	return max;
3448 }
3449 EXPORT_SYMBOL_GPL(pci_rescan_bus);
3450 
3451 /*
3452  * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
3453  * routines should always be executed under this mutex.
3454  */
3455 static DEFINE_MUTEX(pci_rescan_remove_lock);
3456 
pci_lock_rescan_remove(void)3457 void pci_lock_rescan_remove(void)
3458 {
3459 	mutex_lock(&pci_rescan_remove_lock);
3460 }
3461 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
3462 
pci_unlock_rescan_remove(void)3463 void pci_unlock_rescan_remove(void)
3464 {
3465 	mutex_unlock(&pci_rescan_remove_lock);
3466 }
3467 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
3468 
pci_sort_bf_cmp(const struct device * d_a,const struct device * d_b)3469 static int __init pci_sort_bf_cmp(const struct device *d_a,
3470 				  const struct device *d_b)
3471 {
3472 	const struct pci_dev *a = to_pci_dev(d_a);
3473 	const struct pci_dev *b = to_pci_dev(d_b);
3474 
3475 	if      (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
3476 	else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return  1;
3477 
3478 	if      (a->bus->number < b->bus->number) return -1;
3479 	else if (a->bus->number > b->bus->number) return  1;
3480 
3481 	if      (a->devfn < b->devfn) return -1;
3482 	else if (a->devfn > b->devfn) return  1;
3483 
3484 	return 0;
3485 }
3486 
pci_sort_breadthfirst(void)3487 void __init pci_sort_breadthfirst(void)
3488 {
3489 	bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
3490 }
3491 
pci_hp_add_bridge(struct pci_dev * dev)3492 int pci_hp_add_bridge(struct pci_dev *dev)
3493 {
3494 	struct pci_bus *parent = dev->bus;
3495 	int busnr, start = parent->busn_res.start;
3496 	unsigned int available_buses = 0;
3497 	int end = parent->busn_res.end;
3498 
3499 	for (busnr = start; busnr <= end; busnr++) {
3500 		if (!pci_find_bus(pci_domain_nr(parent), busnr))
3501 			break;
3502 	}
3503 	if (busnr-- > end) {
3504 		pci_err(dev, "No bus number available for hot-added bridge\n");
3505 		return -1;
3506 	}
3507 
3508 	/* Scan bridges that are already configured */
3509 	busnr = pci_scan_bridge(parent, dev, busnr, 0);
3510 
3511 	/*
3512 	 * Distribute the available bus numbers between hotplug-capable
3513 	 * bridges to make extending the chain later possible.
3514 	 */
3515 	available_buses = end - busnr;
3516 
3517 	/* Scan bridges that need to be reconfigured */
3518 	pci_scan_bridge_extend(parent, dev, busnr, available_buses, 1);
3519 
3520 	if (!dev->subordinate)
3521 		return -1;
3522 
3523 	return 0;
3524 }
3525 EXPORT_SYMBOL_GPL(pci_hp_add_bridge);
3526