xref: /linux/drivers/pci/probe.c (revision fd639726bf15fca8ee1a00dce8e0096d0ad9bd18)
1 /*
2  * probe.c - PCI detection and setup code
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/of_device.h>
10 #include <linux/of_pci.h>
11 #include <linux/pci_hotplug.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/cpumask.h>
15 #include <linux/pci-aspm.h>
16 #include <linux/aer.h>
17 #include <linux/acpi.h>
18 #include <linux/irqdomain.h>
19 #include <linux/pm_runtime.h>
20 #include "pci.h"
21 
22 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
23 #define CARDBUS_RESERVE_BUSNR	3
24 
25 static struct resource busn_resource = {
26 	.name	= "PCI busn",
27 	.start	= 0,
28 	.end	= 255,
29 	.flags	= IORESOURCE_BUS,
30 };
31 
32 /* Ugh.  Need to stop exporting this to modules. */
33 LIST_HEAD(pci_root_buses);
34 EXPORT_SYMBOL(pci_root_buses);
35 
36 static LIST_HEAD(pci_domain_busn_res_list);
37 
38 struct pci_domain_busn_res {
39 	struct list_head list;
40 	struct resource res;
41 	int domain_nr;
42 };
43 
44 static struct resource *get_pci_domain_busn_res(int domain_nr)
45 {
46 	struct pci_domain_busn_res *r;
47 
48 	list_for_each_entry(r, &pci_domain_busn_res_list, list)
49 		if (r->domain_nr == domain_nr)
50 			return &r->res;
51 
52 	r = kzalloc(sizeof(*r), GFP_KERNEL);
53 	if (!r)
54 		return NULL;
55 
56 	r->domain_nr = domain_nr;
57 	r->res.start = 0;
58 	r->res.end = 0xff;
59 	r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
60 
61 	list_add_tail(&r->list, &pci_domain_busn_res_list);
62 
63 	return &r->res;
64 }
65 
66 static int find_anything(struct device *dev, void *data)
67 {
68 	return 1;
69 }
70 
71 /*
72  * Some device drivers need know if pci is initiated.
73  * Basically, we think pci is not initiated when there
74  * is no device to be found on the pci_bus_type.
75  */
76 int no_pci_devices(void)
77 {
78 	struct device *dev;
79 	int no_devices;
80 
81 	dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
82 	no_devices = (dev == NULL);
83 	put_device(dev);
84 	return no_devices;
85 }
86 EXPORT_SYMBOL(no_pci_devices);
87 
88 /*
89  * PCI Bus Class
90  */
91 static void release_pcibus_dev(struct device *dev)
92 {
93 	struct pci_bus *pci_bus = to_pci_bus(dev);
94 
95 	put_device(pci_bus->bridge);
96 	pci_bus_remove_resources(pci_bus);
97 	pci_release_bus_of_node(pci_bus);
98 	kfree(pci_bus);
99 }
100 
101 static struct class pcibus_class = {
102 	.name		= "pci_bus",
103 	.dev_release	= &release_pcibus_dev,
104 	.dev_groups	= pcibus_groups,
105 };
106 
107 static int __init pcibus_class_init(void)
108 {
109 	return class_register(&pcibus_class);
110 }
111 postcore_initcall(pcibus_class_init);
112 
113 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
114 {
115 	u64 size = mask & maxbase;	/* Find the significant bits */
116 	if (!size)
117 		return 0;
118 
119 	/* Get the lowest of them to find the decode size, and
120 	   from that the extent.  */
121 	size = (size & ~(size-1)) - 1;
122 
123 	/* base == maxbase can be valid only if the BAR has
124 	   already been programmed with all 1s.  */
125 	if (base == maxbase && ((base | size) & mask) != mask)
126 		return 0;
127 
128 	return size;
129 }
130 
131 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
132 {
133 	u32 mem_type;
134 	unsigned long flags;
135 
136 	if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
137 		flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
138 		flags |= IORESOURCE_IO;
139 		return flags;
140 	}
141 
142 	flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
143 	flags |= IORESOURCE_MEM;
144 	if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
145 		flags |= IORESOURCE_PREFETCH;
146 
147 	mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
148 	switch (mem_type) {
149 	case PCI_BASE_ADDRESS_MEM_TYPE_32:
150 		break;
151 	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
152 		/* 1M mem BAR treated as 32-bit BAR */
153 		break;
154 	case PCI_BASE_ADDRESS_MEM_TYPE_64:
155 		flags |= IORESOURCE_MEM_64;
156 		break;
157 	default:
158 		/* mem unknown type treated as 32-bit BAR */
159 		break;
160 	}
161 	return flags;
162 }
163 
164 #define PCI_COMMAND_DECODE_ENABLE	(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
165 
166 /**
167  * pci_read_base - read a PCI BAR
168  * @dev: the PCI device
169  * @type: type of the BAR
170  * @res: resource buffer to be filled in
171  * @pos: BAR position in the config space
172  *
173  * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
174  */
175 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
176 		    struct resource *res, unsigned int pos)
177 {
178 	u32 l = 0, sz = 0, mask;
179 	u64 l64, sz64, mask64;
180 	u16 orig_cmd;
181 	struct pci_bus_region region, inverted_region;
182 
183 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
184 
185 	/* No printks while decoding is disabled! */
186 	if (!dev->mmio_always_on) {
187 		pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
188 		if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
189 			pci_write_config_word(dev, PCI_COMMAND,
190 				orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
191 		}
192 	}
193 
194 	res->name = pci_name(dev);
195 
196 	pci_read_config_dword(dev, pos, &l);
197 	pci_write_config_dword(dev, pos, l | mask);
198 	pci_read_config_dword(dev, pos, &sz);
199 	pci_write_config_dword(dev, pos, l);
200 
201 	/*
202 	 * All bits set in sz means the device isn't working properly.
203 	 * If the BAR isn't implemented, all bits must be 0.  If it's a
204 	 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
205 	 * 1 must be clear.
206 	 */
207 	if (sz == 0xffffffff)
208 		sz = 0;
209 
210 	/*
211 	 * I don't know how l can have all bits set.  Copied from old code.
212 	 * Maybe it fixes a bug on some ancient platform.
213 	 */
214 	if (l == 0xffffffff)
215 		l = 0;
216 
217 	if (type == pci_bar_unknown) {
218 		res->flags = decode_bar(dev, l);
219 		res->flags |= IORESOURCE_SIZEALIGN;
220 		if (res->flags & IORESOURCE_IO) {
221 			l64 = l & PCI_BASE_ADDRESS_IO_MASK;
222 			sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
223 			mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
224 		} else {
225 			l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
226 			sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
227 			mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
228 		}
229 	} else {
230 		if (l & PCI_ROM_ADDRESS_ENABLE)
231 			res->flags |= IORESOURCE_ROM_ENABLE;
232 		l64 = l & PCI_ROM_ADDRESS_MASK;
233 		sz64 = sz & PCI_ROM_ADDRESS_MASK;
234 		mask64 = PCI_ROM_ADDRESS_MASK;
235 	}
236 
237 	if (res->flags & IORESOURCE_MEM_64) {
238 		pci_read_config_dword(dev, pos + 4, &l);
239 		pci_write_config_dword(dev, pos + 4, ~0);
240 		pci_read_config_dword(dev, pos + 4, &sz);
241 		pci_write_config_dword(dev, pos + 4, l);
242 
243 		l64 |= ((u64)l << 32);
244 		sz64 |= ((u64)sz << 32);
245 		mask64 |= ((u64)~0 << 32);
246 	}
247 
248 	if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
249 		pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
250 
251 	if (!sz64)
252 		goto fail;
253 
254 	sz64 = pci_size(l64, sz64, mask64);
255 	if (!sz64) {
256 		dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
257 			 pos);
258 		goto fail;
259 	}
260 
261 	if (res->flags & IORESOURCE_MEM_64) {
262 		if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
263 		    && sz64 > 0x100000000ULL) {
264 			res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
265 			res->start = 0;
266 			res->end = 0;
267 			dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
268 				pos, (unsigned long long)sz64);
269 			goto out;
270 		}
271 
272 		if ((sizeof(pci_bus_addr_t) < 8) && l) {
273 			/* Above 32-bit boundary; try to reallocate */
274 			res->flags |= IORESOURCE_UNSET;
275 			res->start = 0;
276 			res->end = sz64;
277 			dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
278 				 pos, (unsigned long long)l64);
279 			goto out;
280 		}
281 	}
282 
283 	region.start = l64;
284 	region.end = l64 + sz64;
285 
286 	pcibios_bus_to_resource(dev->bus, res, &region);
287 	pcibios_resource_to_bus(dev->bus, &inverted_region, res);
288 
289 	/*
290 	 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
291 	 * the corresponding resource address (the physical address used by
292 	 * the CPU.  Converting that resource address back to a bus address
293 	 * should yield the original BAR value:
294 	 *
295 	 *     resource_to_bus(bus_to_resource(A)) == A
296 	 *
297 	 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
298 	 * be claimed by the device.
299 	 */
300 	if (inverted_region.start != region.start) {
301 		res->flags |= IORESOURCE_UNSET;
302 		res->start = 0;
303 		res->end = region.end - region.start;
304 		dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
305 			 pos, (unsigned long long)region.start);
306 	}
307 
308 	goto out;
309 
310 
311 fail:
312 	res->flags = 0;
313 out:
314 	if (res->flags)
315 		dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
316 
317 	return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
318 }
319 
320 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
321 {
322 	unsigned int pos, reg;
323 
324 	if (dev->non_compliant_bars)
325 		return;
326 
327 	for (pos = 0; pos < howmany; pos++) {
328 		struct resource *res = &dev->resource[pos];
329 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
330 		pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
331 	}
332 
333 	if (rom) {
334 		struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
335 		dev->rom_base_reg = rom;
336 		res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
337 				IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
338 		__pci_read_base(dev, pci_bar_mem32, res, rom);
339 	}
340 }
341 
342 static void pci_read_bridge_io(struct pci_bus *child)
343 {
344 	struct pci_dev *dev = child->self;
345 	u8 io_base_lo, io_limit_lo;
346 	unsigned long io_mask, io_granularity, base, limit;
347 	struct pci_bus_region region;
348 	struct resource *res;
349 
350 	io_mask = PCI_IO_RANGE_MASK;
351 	io_granularity = 0x1000;
352 	if (dev->io_window_1k) {
353 		/* Support 1K I/O space granularity */
354 		io_mask = PCI_IO_1K_RANGE_MASK;
355 		io_granularity = 0x400;
356 	}
357 
358 	res = child->resource[0];
359 	pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
360 	pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
361 	base = (io_base_lo & io_mask) << 8;
362 	limit = (io_limit_lo & io_mask) << 8;
363 
364 	if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
365 		u16 io_base_hi, io_limit_hi;
366 
367 		pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
368 		pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
369 		base |= ((unsigned long) io_base_hi << 16);
370 		limit |= ((unsigned long) io_limit_hi << 16);
371 	}
372 
373 	if (base <= limit) {
374 		res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
375 		region.start = base;
376 		region.end = limit + io_granularity - 1;
377 		pcibios_bus_to_resource(dev->bus, res, &region);
378 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
379 	}
380 }
381 
382 static void pci_read_bridge_mmio(struct pci_bus *child)
383 {
384 	struct pci_dev *dev = child->self;
385 	u16 mem_base_lo, mem_limit_lo;
386 	unsigned long base, limit;
387 	struct pci_bus_region region;
388 	struct resource *res;
389 
390 	res = child->resource[1];
391 	pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
392 	pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
393 	base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
394 	limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
395 	if (base <= limit) {
396 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
397 		region.start = base;
398 		region.end = limit + 0xfffff;
399 		pcibios_bus_to_resource(dev->bus, res, &region);
400 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
401 	}
402 }
403 
404 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
405 {
406 	struct pci_dev *dev = child->self;
407 	u16 mem_base_lo, mem_limit_lo;
408 	u64 base64, limit64;
409 	pci_bus_addr_t base, limit;
410 	struct pci_bus_region region;
411 	struct resource *res;
412 
413 	res = child->resource[2];
414 	pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
415 	pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
416 	base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
417 	limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
418 
419 	if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
420 		u32 mem_base_hi, mem_limit_hi;
421 
422 		pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
423 		pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
424 
425 		/*
426 		 * Some bridges set the base > limit by default, and some
427 		 * (broken) BIOSes do not initialize them.  If we find
428 		 * this, just assume they are not being used.
429 		 */
430 		if (mem_base_hi <= mem_limit_hi) {
431 			base64 |= (u64) mem_base_hi << 32;
432 			limit64 |= (u64) mem_limit_hi << 32;
433 		}
434 	}
435 
436 	base = (pci_bus_addr_t) base64;
437 	limit = (pci_bus_addr_t) limit64;
438 
439 	if (base != base64) {
440 		dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
441 			(unsigned long long) base64);
442 		return;
443 	}
444 
445 	if (base <= limit) {
446 		res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
447 					 IORESOURCE_MEM | IORESOURCE_PREFETCH;
448 		if (res->flags & PCI_PREF_RANGE_TYPE_64)
449 			res->flags |= IORESOURCE_MEM_64;
450 		region.start = base;
451 		region.end = limit + 0xfffff;
452 		pcibios_bus_to_resource(dev->bus, res, &region);
453 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
454 	}
455 }
456 
457 void pci_read_bridge_bases(struct pci_bus *child)
458 {
459 	struct pci_dev *dev = child->self;
460 	struct resource *res;
461 	int i;
462 
463 	if (pci_is_root_bus(child))	/* It's a host bus, nothing to read */
464 		return;
465 
466 	dev_info(&dev->dev, "PCI bridge to %pR%s\n",
467 		 &child->busn_res,
468 		 dev->transparent ? " (subtractive decode)" : "");
469 
470 	pci_bus_remove_resources(child);
471 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
472 		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
473 
474 	pci_read_bridge_io(child);
475 	pci_read_bridge_mmio(child);
476 	pci_read_bridge_mmio_pref(child);
477 
478 	if (dev->transparent) {
479 		pci_bus_for_each_resource(child->parent, res, i) {
480 			if (res && res->flags) {
481 				pci_bus_add_resource(child, res,
482 						     PCI_SUBTRACTIVE_DECODE);
483 				dev_printk(KERN_DEBUG, &dev->dev,
484 					   "  bridge window %pR (subtractive decode)\n",
485 					   res);
486 			}
487 		}
488 	}
489 }
490 
491 static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
492 {
493 	struct pci_bus *b;
494 
495 	b = kzalloc(sizeof(*b), GFP_KERNEL);
496 	if (!b)
497 		return NULL;
498 
499 	INIT_LIST_HEAD(&b->node);
500 	INIT_LIST_HEAD(&b->children);
501 	INIT_LIST_HEAD(&b->devices);
502 	INIT_LIST_HEAD(&b->slots);
503 	INIT_LIST_HEAD(&b->resources);
504 	b->max_bus_speed = PCI_SPEED_UNKNOWN;
505 	b->cur_bus_speed = PCI_SPEED_UNKNOWN;
506 #ifdef CONFIG_PCI_DOMAINS_GENERIC
507 	if (parent)
508 		b->domain_nr = parent->domain_nr;
509 #endif
510 	return b;
511 }
512 
513 static void devm_pci_release_host_bridge_dev(struct device *dev)
514 {
515 	struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
516 
517 	if (bridge->release_fn)
518 		bridge->release_fn(bridge);
519 }
520 
521 static void pci_release_host_bridge_dev(struct device *dev)
522 {
523 	devm_pci_release_host_bridge_dev(dev);
524 	pci_free_host_bridge(to_pci_host_bridge(dev));
525 }
526 
527 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
528 {
529 	struct pci_host_bridge *bridge;
530 
531 	bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
532 	if (!bridge)
533 		return NULL;
534 
535 	INIT_LIST_HEAD(&bridge->windows);
536 	bridge->dev.release = pci_release_host_bridge_dev;
537 
538 	return bridge;
539 }
540 EXPORT_SYMBOL(pci_alloc_host_bridge);
541 
542 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
543 						   size_t priv)
544 {
545 	struct pci_host_bridge *bridge;
546 
547 	bridge = devm_kzalloc(dev, sizeof(*bridge) + priv, GFP_KERNEL);
548 	if (!bridge)
549 		return NULL;
550 
551 	INIT_LIST_HEAD(&bridge->windows);
552 	bridge->dev.release = devm_pci_release_host_bridge_dev;
553 
554 	return bridge;
555 }
556 EXPORT_SYMBOL(devm_pci_alloc_host_bridge);
557 
558 void pci_free_host_bridge(struct pci_host_bridge *bridge)
559 {
560 	pci_free_resource_list(&bridge->windows);
561 
562 	kfree(bridge);
563 }
564 EXPORT_SYMBOL(pci_free_host_bridge);
565 
566 static const unsigned char pcix_bus_speed[] = {
567 	PCI_SPEED_UNKNOWN,		/* 0 */
568 	PCI_SPEED_66MHz_PCIX,		/* 1 */
569 	PCI_SPEED_100MHz_PCIX,		/* 2 */
570 	PCI_SPEED_133MHz_PCIX,		/* 3 */
571 	PCI_SPEED_UNKNOWN,		/* 4 */
572 	PCI_SPEED_66MHz_PCIX_ECC,	/* 5 */
573 	PCI_SPEED_100MHz_PCIX_ECC,	/* 6 */
574 	PCI_SPEED_133MHz_PCIX_ECC,	/* 7 */
575 	PCI_SPEED_UNKNOWN,		/* 8 */
576 	PCI_SPEED_66MHz_PCIX_266,	/* 9 */
577 	PCI_SPEED_100MHz_PCIX_266,	/* A */
578 	PCI_SPEED_133MHz_PCIX_266,	/* B */
579 	PCI_SPEED_UNKNOWN,		/* C */
580 	PCI_SPEED_66MHz_PCIX_533,	/* D */
581 	PCI_SPEED_100MHz_PCIX_533,	/* E */
582 	PCI_SPEED_133MHz_PCIX_533	/* F */
583 };
584 
585 const unsigned char pcie_link_speed[] = {
586 	PCI_SPEED_UNKNOWN,		/* 0 */
587 	PCIE_SPEED_2_5GT,		/* 1 */
588 	PCIE_SPEED_5_0GT,		/* 2 */
589 	PCIE_SPEED_8_0GT,		/* 3 */
590 	PCI_SPEED_UNKNOWN,		/* 4 */
591 	PCI_SPEED_UNKNOWN,		/* 5 */
592 	PCI_SPEED_UNKNOWN,		/* 6 */
593 	PCI_SPEED_UNKNOWN,		/* 7 */
594 	PCI_SPEED_UNKNOWN,		/* 8 */
595 	PCI_SPEED_UNKNOWN,		/* 9 */
596 	PCI_SPEED_UNKNOWN,		/* A */
597 	PCI_SPEED_UNKNOWN,		/* B */
598 	PCI_SPEED_UNKNOWN,		/* C */
599 	PCI_SPEED_UNKNOWN,		/* D */
600 	PCI_SPEED_UNKNOWN,		/* E */
601 	PCI_SPEED_UNKNOWN		/* F */
602 };
603 
604 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
605 {
606 	bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
607 }
608 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
609 
610 static unsigned char agp_speeds[] = {
611 	AGP_UNKNOWN,
612 	AGP_1X,
613 	AGP_2X,
614 	AGP_4X,
615 	AGP_8X
616 };
617 
618 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
619 {
620 	int index = 0;
621 
622 	if (agpstat & 4)
623 		index = 3;
624 	else if (agpstat & 2)
625 		index = 2;
626 	else if (agpstat & 1)
627 		index = 1;
628 	else
629 		goto out;
630 
631 	if (agp3) {
632 		index += 2;
633 		if (index == 5)
634 			index = 0;
635 	}
636 
637  out:
638 	return agp_speeds[index];
639 }
640 
641 static void pci_set_bus_speed(struct pci_bus *bus)
642 {
643 	struct pci_dev *bridge = bus->self;
644 	int pos;
645 
646 	pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
647 	if (!pos)
648 		pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
649 	if (pos) {
650 		u32 agpstat, agpcmd;
651 
652 		pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
653 		bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
654 
655 		pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
656 		bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
657 	}
658 
659 	pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
660 	if (pos) {
661 		u16 status;
662 		enum pci_bus_speed max;
663 
664 		pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
665 				     &status);
666 
667 		if (status & PCI_X_SSTATUS_533MHZ) {
668 			max = PCI_SPEED_133MHz_PCIX_533;
669 		} else if (status & PCI_X_SSTATUS_266MHZ) {
670 			max = PCI_SPEED_133MHz_PCIX_266;
671 		} else if (status & PCI_X_SSTATUS_133MHZ) {
672 			if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
673 				max = PCI_SPEED_133MHz_PCIX_ECC;
674 			else
675 				max = PCI_SPEED_133MHz_PCIX;
676 		} else {
677 			max = PCI_SPEED_66MHz_PCIX;
678 		}
679 
680 		bus->max_bus_speed = max;
681 		bus->cur_bus_speed = pcix_bus_speed[
682 			(status & PCI_X_SSTATUS_FREQ) >> 6];
683 
684 		return;
685 	}
686 
687 	if (pci_is_pcie(bridge)) {
688 		u32 linkcap;
689 		u16 linksta;
690 
691 		pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
692 		bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
693 
694 		pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
695 		pcie_update_link_speed(bus, linksta);
696 	}
697 }
698 
699 static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
700 {
701 	struct irq_domain *d;
702 
703 	/*
704 	 * Any firmware interface that can resolve the msi_domain
705 	 * should be called from here.
706 	 */
707 	d = pci_host_bridge_of_msi_domain(bus);
708 	if (!d)
709 		d = pci_host_bridge_acpi_msi_domain(bus);
710 
711 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
712 	/*
713 	 * If no IRQ domain was found via the OF tree, try looking it up
714 	 * directly through the fwnode_handle.
715 	 */
716 	if (!d) {
717 		struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus);
718 
719 		if (fwnode)
720 			d = irq_find_matching_fwnode(fwnode,
721 						     DOMAIN_BUS_PCI_MSI);
722 	}
723 #endif
724 
725 	return d;
726 }
727 
728 static void pci_set_bus_msi_domain(struct pci_bus *bus)
729 {
730 	struct irq_domain *d;
731 	struct pci_bus *b;
732 
733 	/*
734 	 * The bus can be a root bus, a subordinate bus, or a virtual bus
735 	 * created by an SR-IOV device.  Walk up to the first bridge device
736 	 * found or derive the domain from the host bridge.
737 	 */
738 	for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
739 		if (b->self)
740 			d = dev_get_msi_domain(&b->self->dev);
741 	}
742 
743 	if (!d)
744 		d = pci_host_bridge_msi_domain(b);
745 
746 	dev_set_msi_domain(&bus->dev, d);
747 }
748 
749 static int pci_register_host_bridge(struct pci_host_bridge *bridge)
750 {
751 	struct device *parent = bridge->dev.parent;
752 	struct resource_entry *window, *n;
753 	struct pci_bus *bus, *b;
754 	resource_size_t offset;
755 	LIST_HEAD(resources);
756 	struct resource *res;
757 	char addr[64], *fmt;
758 	const char *name;
759 	int err;
760 
761 	bus = pci_alloc_bus(NULL);
762 	if (!bus)
763 		return -ENOMEM;
764 
765 	bridge->bus = bus;
766 
767 	/* temporarily move resources off the list */
768 	list_splice_init(&bridge->windows, &resources);
769 	bus->sysdata = bridge->sysdata;
770 	bus->msi = bridge->msi;
771 	bus->ops = bridge->ops;
772 	bus->number = bus->busn_res.start = bridge->busnr;
773 #ifdef CONFIG_PCI_DOMAINS_GENERIC
774 	bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
775 #endif
776 
777 	b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
778 	if (b) {
779 		/* If we already got to this bus through a different bridge, ignore it */
780 		dev_dbg(&b->dev, "bus already known\n");
781 		err = -EEXIST;
782 		goto free;
783 	}
784 
785 	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus),
786 		     bridge->busnr);
787 
788 	err = pcibios_root_bridge_prepare(bridge);
789 	if (err)
790 		goto free;
791 
792 	err = device_register(&bridge->dev);
793 	if (err)
794 		put_device(&bridge->dev);
795 
796 	bus->bridge = get_device(&bridge->dev);
797 	device_enable_async_suspend(bus->bridge);
798 	pci_set_bus_of_node(bus);
799 	pci_set_bus_msi_domain(bus);
800 
801 	if (!parent)
802 		set_dev_node(bus->bridge, pcibus_to_node(bus));
803 
804 	bus->dev.class = &pcibus_class;
805 	bus->dev.parent = bus->bridge;
806 
807 	dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number);
808 	name = dev_name(&bus->dev);
809 
810 	err = device_register(&bus->dev);
811 	if (err)
812 		goto unregister;
813 
814 	pcibios_add_bus(bus);
815 
816 	/* Create legacy_io and legacy_mem files for this bus */
817 	pci_create_legacy_files(bus);
818 
819 	if (parent)
820 		dev_info(parent, "PCI host bridge to bus %s\n", name);
821 	else
822 		pr_info("PCI host bridge to bus %s\n", name);
823 
824 	/* Add initial resources to the bus */
825 	resource_list_for_each_entry_safe(window, n, &resources) {
826 		list_move_tail(&window->node, &bridge->windows);
827 		offset = window->offset;
828 		res = window->res;
829 
830 		if (res->flags & IORESOURCE_BUS)
831 			pci_bus_insert_busn_res(bus, bus->number, res->end);
832 		else
833 			pci_bus_add_resource(bus, res, 0);
834 
835 		if (offset) {
836 			if (resource_type(res) == IORESOURCE_IO)
837 				fmt = " (bus address [%#06llx-%#06llx])";
838 			else
839 				fmt = " (bus address [%#010llx-%#010llx])";
840 
841 			snprintf(addr, sizeof(addr), fmt,
842 				 (unsigned long long)(res->start - offset),
843 				 (unsigned long long)(res->end - offset));
844 		} else
845 			addr[0] = '\0';
846 
847 		dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
848 	}
849 
850 	down_write(&pci_bus_sem);
851 	list_add_tail(&bus->node, &pci_root_buses);
852 	up_write(&pci_bus_sem);
853 
854 	return 0;
855 
856 unregister:
857 	put_device(&bridge->dev);
858 	device_unregister(&bridge->dev);
859 
860 free:
861 	kfree(bus);
862 	return err;
863 }
864 
865 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
866 					   struct pci_dev *bridge, int busnr)
867 {
868 	struct pci_bus *child;
869 	int i;
870 	int ret;
871 
872 	/*
873 	 * Allocate a new bus, and inherit stuff from the parent..
874 	 */
875 	child = pci_alloc_bus(parent);
876 	if (!child)
877 		return NULL;
878 
879 	child->parent = parent;
880 	child->ops = parent->ops;
881 	child->msi = parent->msi;
882 	child->sysdata = parent->sysdata;
883 	child->bus_flags = parent->bus_flags;
884 
885 	/* initialize some portions of the bus device, but don't register it
886 	 * now as the parent is not properly set up yet.
887 	 */
888 	child->dev.class = &pcibus_class;
889 	dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
890 
891 	/*
892 	 * Set up the primary, secondary and subordinate
893 	 * bus numbers.
894 	 */
895 	child->number = child->busn_res.start = busnr;
896 	child->primary = parent->busn_res.start;
897 	child->busn_res.end = 0xff;
898 
899 	if (!bridge) {
900 		child->dev.parent = parent->bridge;
901 		goto add_dev;
902 	}
903 
904 	child->self = bridge;
905 	child->bridge = get_device(&bridge->dev);
906 	child->dev.parent = child->bridge;
907 	pci_set_bus_of_node(child);
908 	pci_set_bus_speed(child);
909 
910 	/* Set up default resource pointers and names.. */
911 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
912 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
913 		child->resource[i]->name = child->name;
914 	}
915 	bridge->subordinate = child;
916 
917 add_dev:
918 	pci_set_bus_msi_domain(child);
919 	ret = device_register(&child->dev);
920 	WARN_ON(ret < 0);
921 
922 	pcibios_add_bus(child);
923 
924 	if (child->ops->add_bus) {
925 		ret = child->ops->add_bus(child);
926 		if (WARN_ON(ret < 0))
927 			dev_err(&child->dev, "failed to add bus: %d\n", ret);
928 	}
929 
930 	/* Create legacy_io and legacy_mem files for this bus */
931 	pci_create_legacy_files(child);
932 
933 	return child;
934 }
935 
936 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
937 				int busnr)
938 {
939 	struct pci_bus *child;
940 
941 	child = pci_alloc_child_bus(parent, dev, busnr);
942 	if (child) {
943 		down_write(&pci_bus_sem);
944 		list_add_tail(&child->node, &parent->children);
945 		up_write(&pci_bus_sem);
946 	}
947 	return child;
948 }
949 EXPORT_SYMBOL(pci_add_new_bus);
950 
951 static void pci_enable_crs(struct pci_dev *pdev)
952 {
953 	u16 root_cap = 0;
954 
955 	/* Enable CRS Software Visibility if supported */
956 	pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
957 	if (root_cap & PCI_EXP_RTCAP_CRSVIS)
958 		pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
959 					 PCI_EXP_RTCTL_CRSSVE);
960 }
961 
962 static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
963 					      unsigned int available_buses);
964 
965 /*
966  * pci_scan_bridge_extend() - Scan buses behind a bridge
967  * @bus: Parent bus the bridge is on
968  * @dev: Bridge itself
969  * @max: Starting subordinate number of buses behind this bridge
970  * @available_buses: Total number of buses available for this bridge and
971  *		     the devices below. After the minimal bus space has
972  *		     been allocated the remaining buses will be
973  *		     distributed equally between hotplug-capable bridges.
974  * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
975  *        that need to be reconfigured.
976  *
977  * If it's a bridge, configure it and scan the bus behind it.
978  * For CardBus bridges, we don't scan behind as the devices will
979  * be handled by the bridge driver itself.
980  *
981  * We need to process bridges in two passes -- first we scan those
982  * already configured by the BIOS and after we are done with all of
983  * them, we proceed to assigning numbers to the remaining buses in
984  * order to avoid overlaps between old and new bus numbers.
985  */
986 static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
987 				  int max, unsigned int available_buses,
988 				  int pass)
989 {
990 	struct pci_bus *child;
991 	int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
992 	u32 buses, i, j = 0;
993 	u16 bctl;
994 	u8 primary, secondary, subordinate;
995 	int broken = 0;
996 
997 	/*
998 	 * Make sure the bridge is powered on to be able to access config
999 	 * space of devices below it.
1000 	 */
1001 	pm_runtime_get_sync(&dev->dev);
1002 
1003 	pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
1004 	primary = buses & 0xFF;
1005 	secondary = (buses >> 8) & 0xFF;
1006 	subordinate = (buses >> 16) & 0xFF;
1007 
1008 	dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
1009 		secondary, subordinate, pass);
1010 
1011 	if (!primary && (primary != bus->number) && secondary && subordinate) {
1012 		dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
1013 		primary = bus->number;
1014 	}
1015 
1016 	/* Check if setup is sensible at all */
1017 	if (!pass &&
1018 	    (primary != bus->number || secondary <= bus->number ||
1019 	     secondary > subordinate)) {
1020 		dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
1021 			 secondary, subordinate);
1022 		broken = 1;
1023 	}
1024 
1025 	/* Disable MasterAbortMode during probing to avoid reporting
1026 	   of bus errors (in some architectures) */
1027 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
1028 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
1029 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
1030 
1031 	pci_enable_crs(dev);
1032 
1033 	if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
1034 	    !is_cardbus && !broken) {
1035 		unsigned int cmax;
1036 		/*
1037 		 * Bus already configured by firmware, process it in the first
1038 		 * pass and just note the configuration.
1039 		 */
1040 		if (pass)
1041 			goto out;
1042 
1043 		/*
1044 		 * The bus might already exist for two reasons: Either we are
1045 		 * rescanning the bus or the bus is reachable through more than
1046 		 * one bridge. The second case can happen with the i450NX
1047 		 * chipset.
1048 		 */
1049 		child = pci_find_bus(pci_domain_nr(bus), secondary);
1050 		if (!child) {
1051 			child = pci_add_new_bus(bus, dev, secondary);
1052 			if (!child)
1053 				goto out;
1054 			child->primary = primary;
1055 			pci_bus_insert_busn_res(child, secondary, subordinate);
1056 			child->bridge_ctl = bctl;
1057 		}
1058 
1059 		cmax = pci_scan_child_bus(child);
1060 		if (cmax > subordinate)
1061 			dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
1062 				 subordinate, cmax);
1063 		/* subordinate should equal child->busn_res.end */
1064 		if (subordinate > max)
1065 			max = subordinate;
1066 	} else {
1067 		/*
1068 		 * We need to assign a number to this bus which we always
1069 		 * do in the second pass.
1070 		 */
1071 		if (!pass) {
1072 			if (pcibios_assign_all_busses() || broken || is_cardbus)
1073 				/* Temporarily disable forwarding of the
1074 				   configuration cycles on all bridges in
1075 				   this bus segment to avoid possible
1076 				   conflicts in the second pass between two
1077 				   bridges programmed with overlapping
1078 				   bus ranges. */
1079 				pci_write_config_dword(dev, PCI_PRIMARY_BUS,
1080 						       buses & ~0xffffff);
1081 			goto out;
1082 		}
1083 
1084 		/* Clear errors */
1085 		pci_write_config_word(dev, PCI_STATUS, 0xffff);
1086 
1087 		/* Prevent assigning a bus number that already exists.
1088 		 * This can happen when a bridge is hot-plugged, so in
1089 		 * this case we only re-scan this bus. */
1090 		child = pci_find_bus(pci_domain_nr(bus), max+1);
1091 		if (!child) {
1092 			child = pci_add_new_bus(bus, dev, max+1);
1093 			if (!child)
1094 				goto out;
1095 			pci_bus_insert_busn_res(child, max+1,
1096 						bus->busn_res.end);
1097 		}
1098 		max++;
1099 		if (available_buses)
1100 			available_buses--;
1101 
1102 		buses = (buses & 0xff000000)
1103 		      | ((unsigned int)(child->primary)     <<  0)
1104 		      | ((unsigned int)(child->busn_res.start)   <<  8)
1105 		      | ((unsigned int)(child->busn_res.end) << 16);
1106 
1107 		/*
1108 		 * yenta.c forces a secondary latency timer of 176.
1109 		 * Copy that behaviour here.
1110 		 */
1111 		if (is_cardbus) {
1112 			buses &= ~0xff000000;
1113 			buses |= CARDBUS_LATENCY_TIMER << 24;
1114 		}
1115 
1116 		/*
1117 		 * We need to blast all three values with a single write.
1118 		 */
1119 		pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
1120 
1121 		if (!is_cardbus) {
1122 			child->bridge_ctl = bctl;
1123 			max = pci_scan_child_bus_extend(child, available_buses);
1124 		} else {
1125 			/*
1126 			 * For CardBus bridges, we leave 4 bus numbers
1127 			 * as cards with a PCI-to-PCI bridge can be
1128 			 * inserted later.
1129 			 */
1130 			for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
1131 				struct pci_bus *parent = bus;
1132 				if (pci_find_bus(pci_domain_nr(bus),
1133 							max+i+1))
1134 					break;
1135 				while (parent->parent) {
1136 					if ((!pcibios_assign_all_busses()) &&
1137 					    (parent->busn_res.end > max) &&
1138 					    (parent->busn_res.end <= max+i)) {
1139 						j = 1;
1140 					}
1141 					parent = parent->parent;
1142 				}
1143 				if (j) {
1144 					/*
1145 					 * Often, there are two cardbus bridges
1146 					 * -- try to leave one valid bus number
1147 					 * for each one.
1148 					 */
1149 					i /= 2;
1150 					break;
1151 				}
1152 			}
1153 			max += i;
1154 		}
1155 		/*
1156 		 * Set the subordinate bus number to its real value.
1157 		 */
1158 		pci_bus_update_busn_res_end(child, max);
1159 		pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
1160 	}
1161 
1162 	sprintf(child->name,
1163 		(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
1164 		pci_domain_nr(bus), child->number);
1165 
1166 	/* Has only triggered on CardBus, fixup is in yenta_socket */
1167 	while (bus->parent) {
1168 		if ((child->busn_res.end > bus->busn_res.end) ||
1169 		    (child->number > bus->busn_res.end) ||
1170 		    (child->number < bus->number) ||
1171 		    (child->busn_res.end < bus->number)) {
1172 			dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
1173 				&child->busn_res,
1174 				(bus->number > child->busn_res.end &&
1175 				 bus->busn_res.end < child->number) ?
1176 					"wholly" : "partially",
1177 				bus->self->transparent ? " transparent" : "",
1178 				dev_name(&bus->dev),
1179 				&bus->busn_res);
1180 		}
1181 		bus = bus->parent;
1182 	}
1183 
1184 out:
1185 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
1186 
1187 	pm_runtime_put(&dev->dev);
1188 
1189 	return max;
1190 }
1191 
1192 /*
1193  * pci_scan_bridge() - Scan buses behind a bridge
1194  * @bus: Parent bus the bridge is on
1195  * @dev: Bridge itself
1196  * @max: Starting subordinate number of buses behind this bridge
1197  * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
1198  *        that need to be reconfigured.
1199  *
1200  * If it's a bridge, configure it and scan the bus behind it.
1201  * For CardBus bridges, we don't scan behind as the devices will
1202  * be handled by the bridge driver itself.
1203  *
1204  * We need to process bridges in two passes -- first we scan those
1205  * already configured by the BIOS and after we are done with all of
1206  * them, we proceed to assigning numbers to the remaining buses in
1207  * order to avoid overlaps between old and new bus numbers.
1208  */
1209 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
1210 {
1211 	return pci_scan_bridge_extend(bus, dev, max, 0, pass);
1212 }
1213 EXPORT_SYMBOL(pci_scan_bridge);
1214 
1215 /*
1216  * Read interrupt line and base address registers.
1217  * The architecture-dependent code can tweak these, of course.
1218  */
1219 static void pci_read_irq(struct pci_dev *dev)
1220 {
1221 	unsigned char irq;
1222 
1223 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
1224 	dev->pin = irq;
1225 	if (irq)
1226 		pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1227 	dev->irq = irq;
1228 }
1229 
1230 void set_pcie_port_type(struct pci_dev *pdev)
1231 {
1232 	int pos;
1233 	u16 reg16;
1234 	int type;
1235 	struct pci_dev *parent;
1236 
1237 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1238 	if (!pos)
1239 		return;
1240 
1241 	pdev->pcie_cap = pos;
1242 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
1243 	pdev->pcie_flags_reg = reg16;
1244 	pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
1245 	pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
1246 
1247 	/*
1248 	 * A Root Port or a PCI-to-PCIe bridge is always the upstream end
1249 	 * of a Link.  No PCIe component has two Links.  Two Links are
1250 	 * connected by a Switch that has a Port on each Link and internal
1251 	 * logic to connect the two Ports.
1252 	 */
1253 	type = pci_pcie_type(pdev);
1254 	if (type == PCI_EXP_TYPE_ROOT_PORT ||
1255 	    type == PCI_EXP_TYPE_PCIE_BRIDGE)
1256 		pdev->has_secondary_link = 1;
1257 	else if (type == PCI_EXP_TYPE_UPSTREAM ||
1258 		 type == PCI_EXP_TYPE_DOWNSTREAM) {
1259 		parent = pci_upstream_bridge(pdev);
1260 
1261 		/*
1262 		 * Usually there's an upstream device (Root Port or Switch
1263 		 * Downstream Port), but we can't assume one exists.
1264 		 */
1265 		if (parent && !parent->has_secondary_link)
1266 			pdev->has_secondary_link = 1;
1267 	}
1268 }
1269 
1270 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
1271 {
1272 	u32 reg32;
1273 
1274 	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
1275 	if (reg32 & PCI_EXP_SLTCAP_HPC)
1276 		pdev->is_hotplug_bridge = 1;
1277 }
1278 
1279 static void set_pcie_thunderbolt(struct pci_dev *dev)
1280 {
1281 	int vsec = 0;
1282 	u32 header;
1283 
1284 	while ((vsec = pci_find_next_ext_capability(dev, vsec,
1285 						    PCI_EXT_CAP_ID_VNDR))) {
1286 		pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
1287 
1288 		/* Is the device part of a Thunderbolt controller? */
1289 		if (dev->vendor == PCI_VENDOR_ID_INTEL &&
1290 		    PCI_VNDR_HEADER_ID(header) == PCI_VSEC_ID_INTEL_TBT) {
1291 			dev->is_thunderbolt = 1;
1292 			return;
1293 		}
1294 	}
1295 }
1296 
1297 /**
1298  * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1299  * @dev: PCI device
1300  *
1301  * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1302  * when forwarding a type1 configuration request the bridge must check that
1303  * the extended register address field is zero.  The bridge is not permitted
1304  * to forward the transactions and must handle it as an Unsupported Request.
1305  * Some bridges do not follow this rule and simply drop the extended register
1306  * bits, resulting in the standard config space being aliased, every 256
1307  * bytes across the entire configuration space.  Test for this condition by
1308  * comparing the first dword of each potential alias to the vendor/device ID.
1309  * Known offenders:
1310  *   ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1311  *   AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1312  */
1313 static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1314 {
1315 #ifdef CONFIG_PCI_QUIRKS
1316 	int pos;
1317 	u32 header, tmp;
1318 
1319 	pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1320 
1321 	for (pos = PCI_CFG_SPACE_SIZE;
1322 	     pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1323 		if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1324 		    || header != tmp)
1325 			return false;
1326 	}
1327 
1328 	return true;
1329 #else
1330 	return false;
1331 #endif
1332 }
1333 
1334 /**
1335  * pci_cfg_space_size - get the configuration space size of the PCI device.
1336  * @dev: PCI device
1337  *
1338  * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1339  * have 4096 bytes.  Even if the device is capable, that doesn't mean we can
1340  * access it.  Maybe we don't have a way to generate extended config space
1341  * accesses, or the device is behind a reverse Express bridge.  So we try
1342  * reading the dword at 0x100 which must either be 0 or a valid extended
1343  * capability header.
1344  */
1345 static int pci_cfg_space_size_ext(struct pci_dev *dev)
1346 {
1347 	u32 status;
1348 	int pos = PCI_CFG_SPACE_SIZE;
1349 
1350 	if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1351 		return PCI_CFG_SPACE_SIZE;
1352 	if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
1353 		return PCI_CFG_SPACE_SIZE;
1354 
1355 	return PCI_CFG_SPACE_EXP_SIZE;
1356 }
1357 
1358 int pci_cfg_space_size(struct pci_dev *dev)
1359 {
1360 	int pos;
1361 	u32 status;
1362 	u16 class;
1363 
1364 	class = dev->class >> 8;
1365 	if (class == PCI_CLASS_BRIDGE_HOST)
1366 		return pci_cfg_space_size_ext(dev);
1367 
1368 	if (pci_is_pcie(dev))
1369 		return pci_cfg_space_size_ext(dev);
1370 
1371 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1372 	if (!pos)
1373 		return PCI_CFG_SPACE_SIZE;
1374 
1375 	pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1376 	if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))
1377 		return pci_cfg_space_size_ext(dev);
1378 
1379 	return PCI_CFG_SPACE_SIZE;
1380 }
1381 
1382 #define LEGACY_IO_RESOURCE	(IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1383 
1384 static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1385 {
1386 	/*
1387 	 * Disable the MSI hardware to avoid screaming interrupts
1388 	 * during boot.  This is the power on reset default so
1389 	 * usually this should be a noop.
1390 	 */
1391 	dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1392 	if (dev->msi_cap)
1393 		pci_msi_set_enable(dev, 0);
1394 
1395 	dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1396 	if (dev->msix_cap)
1397 		pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1398 }
1399 
1400 /**
1401  * pci_intx_mask_broken - test PCI_COMMAND_INTX_DISABLE writability
1402  * @dev: PCI device
1403  *
1404  * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev.  Check this
1405  * at enumeration-time to avoid modifying PCI_COMMAND at run-time.
1406  */
1407 static int pci_intx_mask_broken(struct pci_dev *dev)
1408 {
1409 	u16 orig, toggle, new;
1410 
1411 	pci_read_config_word(dev, PCI_COMMAND, &orig);
1412 	toggle = orig ^ PCI_COMMAND_INTX_DISABLE;
1413 	pci_write_config_word(dev, PCI_COMMAND, toggle);
1414 	pci_read_config_word(dev, PCI_COMMAND, &new);
1415 
1416 	pci_write_config_word(dev, PCI_COMMAND, orig);
1417 
1418 	/*
1419 	 * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI
1420 	 * r2.3, so strictly speaking, a device is not *broken* if it's not
1421 	 * writable.  But we'll live with the misnomer for now.
1422 	 */
1423 	if (new != toggle)
1424 		return 1;
1425 	return 0;
1426 }
1427 
1428 /**
1429  * pci_setup_device - fill in class and map information of a device
1430  * @dev: the device structure to fill
1431  *
1432  * Initialize the device structure with information about the device's
1433  * vendor,class,memory and IO-space addresses,IRQ lines etc.
1434  * Called at initialisation of the PCI subsystem and by CardBus services.
1435  * Returns 0 on success and negative if unknown type of device (not normal,
1436  * bridge or CardBus).
1437  */
1438 int pci_setup_device(struct pci_dev *dev)
1439 {
1440 	u32 class;
1441 	u16 cmd;
1442 	u8 hdr_type;
1443 	int pos = 0;
1444 	struct pci_bus_region region;
1445 	struct resource *res;
1446 
1447 	if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1448 		return -EIO;
1449 
1450 	dev->sysdata = dev->bus->sysdata;
1451 	dev->dev.parent = dev->bus->bridge;
1452 	dev->dev.bus = &pci_bus_type;
1453 	dev->hdr_type = hdr_type & 0x7f;
1454 	dev->multifunction = !!(hdr_type & 0x80);
1455 	dev->error_state = pci_channel_io_normal;
1456 	set_pcie_port_type(dev);
1457 
1458 	pci_dev_assign_slot(dev);
1459 	/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1460 	   set this higher, assuming the system even supports it.  */
1461 	dev->dma_mask = 0xffffffff;
1462 
1463 	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1464 		     dev->bus->number, PCI_SLOT(dev->devfn),
1465 		     PCI_FUNC(dev->devfn));
1466 
1467 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1468 	dev->revision = class & 0xff;
1469 	dev->class = class >> 8;		    /* upper 3 bytes */
1470 
1471 	dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1472 		   dev->vendor, dev->device, dev->hdr_type, dev->class);
1473 
1474 	/* need to have dev->class ready */
1475 	dev->cfg_size = pci_cfg_space_size(dev);
1476 
1477 	/* need to have dev->cfg_size ready */
1478 	set_pcie_thunderbolt(dev);
1479 
1480 	/* "Unknown power state" */
1481 	dev->current_state = PCI_UNKNOWN;
1482 
1483 	/* Early fixups, before probing the BARs */
1484 	pci_fixup_device(pci_fixup_early, dev);
1485 	/* device class may be changed after fixup */
1486 	class = dev->class >> 8;
1487 
1488 	if (dev->non_compliant_bars) {
1489 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
1490 		if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
1491 			dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
1492 			cmd &= ~PCI_COMMAND_IO;
1493 			cmd &= ~PCI_COMMAND_MEMORY;
1494 			pci_write_config_word(dev, PCI_COMMAND, cmd);
1495 		}
1496 	}
1497 
1498 	dev->broken_intx_masking = pci_intx_mask_broken(dev);
1499 
1500 	switch (dev->hdr_type) {		    /* header type */
1501 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
1502 		if (class == PCI_CLASS_BRIDGE_PCI)
1503 			goto bad;
1504 		pci_read_irq(dev);
1505 		pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1506 		pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1507 		pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1508 
1509 		/*
1510 		 * Do the ugly legacy mode stuff here rather than broken chip
1511 		 * quirk code. Legacy mode ATA controllers have fixed
1512 		 * addresses. These are not always echoed in BAR0-3, and
1513 		 * BAR0-3 in a few cases contain junk!
1514 		 */
1515 		if (class == PCI_CLASS_STORAGE_IDE) {
1516 			u8 progif;
1517 			pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1518 			if ((progif & 1) == 0) {
1519 				region.start = 0x1F0;
1520 				region.end = 0x1F7;
1521 				res = &dev->resource[0];
1522 				res->flags = LEGACY_IO_RESOURCE;
1523 				pcibios_bus_to_resource(dev->bus, res, &region);
1524 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1525 					 res);
1526 				region.start = 0x3F6;
1527 				region.end = 0x3F6;
1528 				res = &dev->resource[1];
1529 				res->flags = LEGACY_IO_RESOURCE;
1530 				pcibios_bus_to_resource(dev->bus, res, &region);
1531 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1532 					 res);
1533 			}
1534 			if ((progif & 4) == 0) {
1535 				region.start = 0x170;
1536 				region.end = 0x177;
1537 				res = &dev->resource[2];
1538 				res->flags = LEGACY_IO_RESOURCE;
1539 				pcibios_bus_to_resource(dev->bus, res, &region);
1540 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1541 					 res);
1542 				region.start = 0x376;
1543 				region.end = 0x376;
1544 				res = &dev->resource[3];
1545 				res->flags = LEGACY_IO_RESOURCE;
1546 				pcibios_bus_to_resource(dev->bus, res, &region);
1547 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1548 					 res);
1549 			}
1550 		}
1551 		break;
1552 
1553 	case PCI_HEADER_TYPE_BRIDGE:		    /* bridge header */
1554 		if (class != PCI_CLASS_BRIDGE_PCI)
1555 			goto bad;
1556 		/* The PCI-to-PCI bridge spec requires that subtractive
1557 		   decoding (i.e. transparent) bridge must have programming
1558 		   interface code of 0x01. */
1559 		pci_read_irq(dev);
1560 		dev->transparent = ((dev->class & 0xff) == 1);
1561 		pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1562 		set_pcie_hotplug_bridge(dev);
1563 		pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1564 		if (pos) {
1565 			pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1566 			pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1567 		}
1568 		break;
1569 
1570 	case PCI_HEADER_TYPE_CARDBUS:		    /* CardBus bridge header */
1571 		if (class != PCI_CLASS_BRIDGE_CARDBUS)
1572 			goto bad;
1573 		pci_read_irq(dev);
1574 		pci_read_bases(dev, 1, 0);
1575 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1576 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1577 		break;
1578 
1579 	default:				    /* unknown header */
1580 		dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1581 			dev->hdr_type);
1582 		return -EIO;
1583 
1584 	bad:
1585 		dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1586 			dev->class, dev->hdr_type);
1587 		dev->class = PCI_CLASS_NOT_DEFINED << 8;
1588 	}
1589 
1590 	/* We found a fine healthy device, go go go... */
1591 	return 0;
1592 }
1593 
1594 static void pci_configure_mps(struct pci_dev *dev)
1595 {
1596 	struct pci_dev *bridge = pci_upstream_bridge(dev);
1597 	int mps, p_mps, rc;
1598 
1599 	if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
1600 		return;
1601 
1602 	mps = pcie_get_mps(dev);
1603 	p_mps = pcie_get_mps(bridge);
1604 
1605 	if (mps == p_mps)
1606 		return;
1607 
1608 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1609 		dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1610 			 mps, pci_name(bridge), p_mps);
1611 		return;
1612 	}
1613 
1614 	/*
1615 	 * Fancier MPS configuration is done later by
1616 	 * pcie_bus_configure_settings()
1617 	 */
1618 	if (pcie_bus_config != PCIE_BUS_DEFAULT)
1619 		return;
1620 
1621 	rc = pcie_set_mps(dev, p_mps);
1622 	if (rc) {
1623 		dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1624 			 p_mps);
1625 		return;
1626 	}
1627 
1628 	dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
1629 		 p_mps, mps, 128 << dev->pcie_mpss);
1630 }
1631 
1632 static struct hpp_type0 pci_default_type0 = {
1633 	.revision = 1,
1634 	.cache_line_size = 8,
1635 	.latency_timer = 0x40,
1636 	.enable_serr = 0,
1637 	.enable_perr = 0,
1638 };
1639 
1640 static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1641 {
1642 	u16 pci_cmd, pci_bctl;
1643 
1644 	if (!hpp)
1645 		hpp = &pci_default_type0;
1646 
1647 	if (hpp->revision > 1) {
1648 		dev_warn(&dev->dev,
1649 			 "PCI settings rev %d not supported; using defaults\n",
1650 			 hpp->revision);
1651 		hpp = &pci_default_type0;
1652 	}
1653 
1654 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1655 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1656 	pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1657 	if (hpp->enable_serr)
1658 		pci_cmd |= PCI_COMMAND_SERR;
1659 	if (hpp->enable_perr)
1660 		pci_cmd |= PCI_COMMAND_PARITY;
1661 	pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1662 
1663 	/* Program bridge control value */
1664 	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1665 		pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1666 				      hpp->latency_timer);
1667 		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1668 		if (hpp->enable_serr)
1669 			pci_bctl |= PCI_BRIDGE_CTL_SERR;
1670 		if (hpp->enable_perr)
1671 			pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1672 		pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1673 	}
1674 }
1675 
1676 static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1677 {
1678 	int pos;
1679 
1680 	if (!hpp)
1681 		return;
1682 
1683 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1684 	if (!pos)
1685 		return;
1686 
1687 	dev_warn(&dev->dev, "PCI-X settings not supported\n");
1688 }
1689 
1690 static bool pcie_root_rcb_set(struct pci_dev *dev)
1691 {
1692 	struct pci_dev *rp = pcie_find_root_port(dev);
1693 	u16 lnkctl;
1694 
1695 	if (!rp)
1696 		return false;
1697 
1698 	pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
1699 	if (lnkctl & PCI_EXP_LNKCTL_RCB)
1700 		return true;
1701 
1702 	return false;
1703 }
1704 
1705 static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1706 {
1707 	int pos;
1708 	u32 reg32;
1709 
1710 	if (!hpp)
1711 		return;
1712 
1713 	if (!pci_is_pcie(dev))
1714 		return;
1715 
1716 	if (hpp->revision > 1) {
1717 		dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1718 			 hpp->revision);
1719 		return;
1720 	}
1721 
1722 	/*
1723 	 * Don't allow _HPX to change MPS or MRRS settings.  We manage
1724 	 * those to make sure they're consistent with the rest of the
1725 	 * platform.
1726 	 */
1727 	hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1728 				    PCI_EXP_DEVCTL_READRQ;
1729 	hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1730 				    PCI_EXP_DEVCTL_READRQ);
1731 
1732 	/* Initialize Device Control Register */
1733 	pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1734 			~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1735 
1736 	/* Initialize Link Control Register */
1737 	if (pcie_cap_has_lnkctl(dev)) {
1738 
1739 		/*
1740 		 * If the Root Port supports Read Completion Boundary of
1741 		 * 128, set RCB to 128.  Otherwise, clear it.
1742 		 */
1743 		hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
1744 		hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
1745 		if (pcie_root_rcb_set(dev))
1746 			hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
1747 
1748 		pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1749 			~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1750 	}
1751 
1752 	/* Find Advanced Error Reporting Enhanced Capability */
1753 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1754 	if (!pos)
1755 		return;
1756 
1757 	/* Initialize Uncorrectable Error Mask Register */
1758 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1759 	reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1760 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1761 
1762 	/* Initialize Uncorrectable Error Severity Register */
1763 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1764 	reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1765 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1766 
1767 	/* Initialize Correctable Error Mask Register */
1768 	pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1769 	reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1770 	pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1771 
1772 	/* Initialize Advanced Error Capabilities and Control Register */
1773 	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1774 	reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1775 	/* Don't enable ECRC generation or checking if unsupported */
1776 	if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
1777 		reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
1778 	if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
1779 		reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
1780 	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1781 
1782 	/*
1783 	 * FIXME: The following two registers are not supported yet.
1784 	 *
1785 	 *   o Secondary Uncorrectable Error Severity Register
1786 	 *   o Secondary Uncorrectable Error Mask Register
1787 	 */
1788 }
1789 
1790 int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
1791 {
1792 	struct pci_host_bridge *host;
1793 	u32 cap;
1794 	u16 ctl;
1795 	int ret;
1796 
1797 	if (!pci_is_pcie(dev))
1798 		return 0;
1799 
1800 	ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
1801 	if (ret)
1802 		return 0;
1803 
1804 	if (!(cap & PCI_EXP_DEVCAP_EXT_TAG))
1805 		return 0;
1806 
1807 	ret = pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
1808 	if (ret)
1809 		return 0;
1810 
1811 	host = pci_find_host_bridge(dev->bus);
1812 	if (!host)
1813 		return 0;
1814 
1815 	/*
1816 	 * If some device in the hierarchy doesn't handle Extended Tags
1817 	 * correctly, make sure they're disabled.
1818 	 */
1819 	if (host->no_ext_tags) {
1820 		if (ctl & PCI_EXP_DEVCTL_EXT_TAG) {
1821 			dev_info(&dev->dev, "disabling Extended Tags\n");
1822 			pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
1823 						   PCI_EXP_DEVCTL_EXT_TAG);
1824 		}
1825 		return 0;
1826 	}
1827 
1828 	if (!(ctl & PCI_EXP_DEVCTL_EXT_TAG)) {
1829 		dev_info(&dev->dev, "enabling Extended Tags\n");
1830 		pcie_capability_set_word(dev, PCI_EXP_DEVCTL,
1831 					 PCI_EXP_DEVCTL_EXT_TAG);
1832 	}
1833 	return 0;
1834 }
1835 
1836 /**
1837  * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
1838  * @dev: PCI device to query
1839  *
1840  * Returns true if the device has enabled relaxed ordering attribute.
1841  */
1842 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev)
1843 {
1844 	u16 v;
1845 
1846 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v);
1847 
1848 	return !!(v & PCI_EXP_DEVCTL_RELAX_EN);
1849 }
1850 EXPORT_SYMBOL(pcie_relaxed_ordering_enabled);
1851 
1852 static void pci_configure_relaxed_ordering(struct pci_dev *dev)
1853 {
1854 	struct pci_dev *root;
1855 
1856 	/* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */
1857 	if (dev->is_virtfn)
1858 		return;
1859 
1860 	if (!pcie_relaxed_ordering_enabled(dev))
1861 		return;
1862 
1863 	/*
1864 	 * For now, we only deal with Relaxed Ordering issues with Root
1865 	 * Ports. Peer-to-Peer DMA is another can of worms.
1866 	 */
1867 	root = pci_find_pcie_root_port(dev);
1868 	if (!root)
1869 		return;
1870 
1871 	if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) {
1872 		pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
1873 					   PCI_EXP_DEVCTL_RELAX_EN);
1874 		dev_info(&dev->dev, "Disable Relaxed Ordering because the Root Port didn't support it\n");
1875 	}
1876 }
1877 
1878 static void pci_configure_device(struct pci_dev *dev)
1879 {
1880 	struct hotplug_params hpp;
1881 	int ret;
1882 
1883 	pci_configure_mps(dev);
1884 	pci_configure_extended_tags(dev, NULL);
1885 	pci_configure_relaxed_ordering(dev);
1886 
1887 	memset(&hpp, 0, sizeof(hpp));
1888 	ret = pci_get_hp_params(dev, &hpp);
1889 	if (ret)
1890 		return;
1891 
1892 	program_hpp_type2(dev, hpp.t2);
1893 	program_hpp_type1(dev, hpp.t1);
1894 	program_hpp_type0(dev, hpp.t0);
1895 }
1896 
1897 static void pci_release_capabilities(struct pci_dev *dev)
1898 {
1899 	pci_vpd_release(dev);
1900 	pci_iov_release(dev);
1901 	pci_free_cap_save_buffers(dev);
1902 }
1903 
1904 /**
1905  * pci_release_dev - free a pci device structure when all users of it are finished.
1906  * @dev: device that's been disconnected
1907  *
1908  * Will be called only by the device core when all users of this pci device are
1909  * done.
1910  */
1911 static void pci_release_dev(struct device *dev)
1912 {
1913 	struct pci_dev *pci_dev;
1914 
1915 	pci_dev = to_pci_dev(dev);
1916 	pci_release_capabilities(pci_dev);
1917 	pci_release_of_node(pci_dev);
1918 	pcibios_release_device(pci_dev);
1919 	pci_bus_put(pci_dev->bus);
1920 	kfree(pci_dev->driver_override);
1921 	kfree(pci_dev->dma_alias_mask);
1922 	kfree(pci_dev);
1923 }
1924 
1925 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1926 {
1927 	struct pci_dev *dev;
1928 
1929 	dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1930 	if (!dev)
1931 		return NULL;
1932 
1933 	INIT_LIST_HEAD(&dev->bus_list);
1934 	dev->dev.type = &pci_dev_type;
1935 	dev->bus = pci_bus_get(bus);
1936 
1937 	return dev;
1938 }
1939 EXPORT_SYMBOL(pci_alloc_dev);
1940 
1941 static bool pci_bus_crs_vendor_id(u32 l)
1942 {
1943 	return (l & 0xffff) == 0x0001;
1944 }
1945 
1946 static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
1947 			     int timeout)
1948 {
1949 	int delay = 1;
1950 
1951 	if (!pci_bus_crs_vendor_id(*l))
1952 		return true;	/* not a CRS completion */
1953 
1954 	if (!timeout)
1955 		return false;	/* CRS, but caller doesn't want to wait */
1956 
1957 	/*
1958 	 * We got the reserved Vendor ID that indicates a completion with
1959 	 * Configuration Request Retry Status (CRS).  Retry until we get a
1960 	 * valid Vendor ID or we time out.
1961 	 */
1962 	while (pci_bus_crs_vendor_id(*l)) {
1963 		if (delay > timeout) {
1964 			pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n",
1965 				pci_domain_nr(bus), bus->number,
1966 				PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
1967 
1968 			return false;
1969 		}
1970 		if (delay >= 1000)
1971 			pr_info("pci %04x:%02x:%02x.%d: not ready after %dms; waiting\n",
1972 				pci_domain_nr(bus), bus->number,
1973 				PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
1974 
1975 		msleep(delay);
1976 		delay *= 2;
1977 
1978 		if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1979 			return false;
1980 	}
1981 
1982 	if (delay >= 1000)
1983 		pr_info("pci %04x:%02x:%02x.%d: ready after %dms\n",
1984 			pci_domain_nr(bus), bus->number,
1985 			PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
1986 
1987 	return true;
1988 }
1989 
1990 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1991 				int timeout)
1992 {
1993 	if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1994 		return false;
1995 
1996 	/* some broken boards return 0 or ~0 if a slot is empty: */
1997 	if (*l == 0xffffffff || *l == 0x00000000 ||
1998 	    *l == 0x0000ffff || *l == 0xffff0000)
1999 		return false;
2000 
2001 	if (pci_bus_crs_vendor_id(*l))
2002 		return pci_bus_wait_crs(bus, devfn, l, timeout);
2003 
2004 	return true;
2005 }
2006 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
2007 
2008 /*
2009  * Read the config data for a PCI device, sanity-check it
2010  * and fill in the dev structure...
2011  */
2012 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
2013 {
2014 	struct pci_dev *dev;
2015 	u32 l;
2016 
2017 	if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
2018 		return NULL;
2019 
2020 	dev = pci_alloc_dev(bus);
2021 	if (!dev)
2022 		return NULL;
2023 
2024 	dev->devfn = devfn;
2025 	dev->vendor = l & 0xffff;
2026 	dev->device = (l >> 16) & 0xffff;
2027 
2028 	pci_set_of_node(dev);
2029 
2030 	if (pci_setup_device(dev)) {
2031 		pci_bus_put(dev->bus);
2032 		kfree(dev);
2033 		return NULL;
2034 	}
2035 
2036 	return dev;
2037 }
2038 
2039 static void pci_init_capabilities(struct pci_dev *dev)
2040 {
2041 	/* Enhanced Allocation */
2042 	pci_ea_init(dev);
2043 
2044 	/* Setup MSI caps & disable MSI/MSI-X interrupts */
2045 	pci_msi_setup_pci_dev(dev);
2046 
2047 	/* Buffers for saving PCIe and PCI-X capabilities */
2048 	pci_allocate_cap_save_buffers(dev);
2049 
2050 	/* Power Management */
2051 	pci_pm_init(dev);
2052 
2053 	/* Vital Product Data */
2054 	pci_vpd_init(dev);
2055 
2056 	/* Alternative Routing-ID Forwarding */
2057 	pci_configure_ari(dev);
2058 
2059 	/* Single Root I/O Virtualization */
2060 	pci_iov_init(dev);
2061 
2062 	/* Address Translation Services */
2063 	pci_ats_init(dev);
2064 
2065 	/* Enable ACS P2P upstream forwarding */
2066 	pci_enable_acs(dev);
2067 
2068 	/* Precision Time Measurement */
2069 	pci_ptm_init(dev);
2070 
2071 	/* Advanced Error Reporting */
2072 	pci_aer_init(dev);
2073 }
2074 
2075 /*
2076  * This is the equivalent of pci_host_bridge_msi_domain that acts on
2077  * devices. Firmware interfaces that can select the MSI domain on a
2078  * per-device basis should be called from here.
2079  */
2080 static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
2081 {
2082 	struct irq_domain *d;
2083 
2084 	/*
2085 	 * If a domain has been set through the pcibios_add_device
2086 	 * callback, then this is the one (platform code knows best).
2087 	 */
2088 	d = dev_get_msi_domain(&dev->dev);
2089 	if (d)
2090 		return d;
2091 
2092 	/*
2093 	 * Let's see if we have a firmware interface able to provide
2094 	 * the domain.
2095 	 */
2096 	d = pci_msi_get_device_domain(dev);
2097 	if (d)
2098 		return d;
2099 
2100 	return NULL;
2101 }
2102 
2103 static void pci_set_msi_domain(struct pci_dev *dev)
2104 {
2105 	struct irq_domain *d;
2106 
2107 	/*
2108 	 * If the platform or firmware interfaces cannot supply a
2109 	 * device-specific MSI domain, then inherit the default domain
2110 	 * from the host bridge itself.
2111 	 */
2112 	d = pci_dev_msi_domain(dev);
2113 	if (!d)
2114 		d = dev_get_msi_domain(&dev->bus->dev);
2115 
2116 	dev_set_msi_domain(&dev->dev, d);
2117 }
2118 
2119 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
2120 {
2121 	int ret;
2122 
2123 	pci_configure_device(dev);
2124 
2125 	device_initialize(&dev->dev);
2126 	dev->dev.release = pci_release_dev;
2127 
2128 	set_dev_node(&dev->dev, pcibus_to_node(bus));
2129 	dev->dev.dma_mask = &dev->dma_mask;
2130 	dev->dev.dma_parms = &dev->dma_parms;
2131 	dev->dev.coherent_dma_mask = 0xffffffffull;
2132 
2133 	pci_set_dma_max_seg_size(dev, 65536);
2134 	pci_set_dma_seg_boundary(dev, 0xffffffff);
2135 
2136 	/* Fix up broken headers */
2137 	pci_fixup_device(pci_fixup_header, dev);
2138 
2139 	/* moved out from quirk header fixup code */
2140 	pci_reassigndev_resource_alignment(dev);
2141 
2142 	/* Clear the state_saved flag. */
2143 	dev->state_saved = false;
2144 
2145 	/* Initialize various capabilities */
2146 	pci_init_capabilities(dev);
2147 
2148 	/*
2149 	 * Add the device to our list of discovered devices
2150 	 * and the bus list for fixup functions, etc.
2151 	 */
2152 	down_write(&pci_bus_sem);
2153 	list_add_tail(&dev->bus_list, &bus->devices);
2154 	up_write(&pci_bus_sem);
2155 
2156 	ret = pcibios_add_device(dev);
2157 	WARN_ON(ret < 0);
2158 
2159 	/* Setup MSI irq domain */
2160 	pci_set_msi_domain(dev);
2161 
2162 	/* Notifier could use PCI capabilities */
2163 	dev->match_driver = false;
2164 	ret = device_add(&dev->dev);
2165 	WARN_ON(ret < 0);
2166 }
2167 
2168 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
2169 {
2170 	struct pci_dev *dev;
2171 
2172 	dev = pci_get_slot(bus, devfn);
2173 	if (dev) {
2174 		pci_dev_put(dev);
2175 		return dev;
2176 	}
2177 
2178 	dev = pci_scan_device(bus, devfn);
2179 	if (!dev)
2180 		return NULL;
2181 
2182 	pci_device_add(dev, bus);
2183 
2184 	return dev;
2185 }
2186 EXPORT_SYMBOL(pci_scan_single_device);
2187 
2188 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
2189 {
2190 	int pos;
2191 	u16 cap = 0;
2192 	unsigned next_fn;
2193 
2194 	if (pci_ari_enabled(bus)) {
2195 		if (!dev)
2196 			return 0;
2197 		pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
2198 		if (!pos)
2199 			return 0;
2200 
2201 		pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
2202 		next_fn = PCI_ARI_CAP_NFN(cap);
2203 		if (next_fn <= fn)
2204 			return 0;	/* protect against malformed list */
2205 
2206 		return next_fn;
2207 	}
2208 
2209 	/* dev may be NULL for non-contiguous multifunction devices */
2210 	if (!dev || dev->multifunction)
2211 		return (fn + 1) % 8;
2212 
2213 	return 0;
2214 }
2215 
2216 static int only_one_child(struct pci_bus *bus)
2217 {
2218 	struct pci_dev *parent = bus->self;
2219 
2220 	if (!parent || !pci_is_pcie(parent))
2221 		return 0;
2222 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
2223 		return 1;
2224 
2225 	/*
2226 	 * PCIe downstream ports are bridges that normally lead to only a
2227 	 * device 0, but if PCI_SCAN_ALL_PCIE_DEVS is set, scan all
2228 	 * possible devices, not just device 0.  See PCIe spec r3.0,
2229 	 * sec 7.3.1.
2230 	 */
2231 	if (parent->has_secondary_link &&
2232 	    !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
2233 		return 1;
2234 	return 0;
2235 }
2236 
2237 /**
2238  * pci_scan_slot - scan a PCI slot on a bus for devices.
2239  * @bus: PCI bus to scan
2240  * @devfn: slot number to scan (must have zero function.)
2241  *
2242  * Scan a PCI slot on the specified PCI bus for devices, adding
2243  * discovered devices to the @bus->devices list.  New devices
2244  * will not have is_added set.
2245  *
2246  * Returns the number of new devices found.
2247  */
2248 int pci_scan_slot(struct pci_bus *bus, int devfn)
2249 {
2250 	unsigned fn, nr = 0;
2251 	struct pci_dev *dev;
2252 
2253 	if (only_one_child(bus) && (devfn > 0))
2254 		return 0; /* Already scanned the entire slot */
2255 
2256 	dev = pci_scan_single_device(bus, devfn);
2257 	if (!dev)
2258 		return 0;
2259 	if (!dev->is_added)
2260 		nr++;
2261 
2262 	for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
2263 		dev = pci_scan_single_device(bus, devfn + fn);
2264 		if (dev) {
2265 			if (!dev->is_added)
2266 				nr++;
2267 			dev->multifunction = 1;
2268 		}
2269 	}
2270 
2271 	/* only one slot has pcie device */
2272 	if (bus->self && nr)
2273 		pcie_aspm_init_link_state(bus->self);
2274 
2275 	return nr;
2276 }
2277 EXPORT_SYMBOL(pci_scan_slot);
2278 
2279 static int pcie_find_smpss(struct pci_dev *dev, void *data)
2280 {
2281 	u8 *smpss = data;
2282 
2283 	if (!pci_is_pcie(dev))
2284 		return 0;
2285 
2286 	/*
2287 	 * We don't have a way to change MPS settings on devices that have
2288 	 * drivers attached.  A hot-added device might support only the minimum
2289 	 * MPS setting (MPS=128).  Therefore, if the fabric contains a bridge
2290 	 * where devices may be hot-added, we limit the fabric MPS to 128 so
2291 	 * hot-added devices will work correctly.
2292 	 *
2293 	 * However, if we hot-add a device to a slot directly below a Root
2294 	 * Port, it's impossible for there to be other existing devices below
2295 	 * the port.  We don't limit the MPS in this case because we can
2296 	 * reconfigure MPS on both the Root Port and the hot-added device,
2297 	 * and there are no other devices involved.
2298 	 *
2299 	 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
2300 	 */
2301 	if (dev->is_hotplug_bridge &&
2302 	    pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
2303 		*smpss = 0;
2304 
2305 	if (*smpss > dev->pcie_mpss)
2306 		*smpss = dev->pcie_mpss;
2307 
2308 	return 0;
2309 }
2310 
2311 static void pcie_write_mps(struct pci_dev *dev, int mps)
2312 {
2313 	int rc;
2314 
2315 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
2316 		mps = 128 << dev->pcie_mpss;
2317 
2318 		if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
2319 		    dev->bus->self)
2320 			/* For "Performance", the assumption is made that
2321 			 * downstream communication will never be larger than
2322 			 * the MRRS.  So, the MPS only needs to be configured
2323 			 * for the upstream communication.  This being the case,
2324 			 * walk from the top down and set the MPS of the child
2325 			 * to that of the parent bus.
2326 			 *
2327 			 * Configure the device MPS with the smaller of the
2328 			 * device MPSS or the bridge MPS (which is assumed to be
2329 			 * properly configured at this point to the largest
2330 			 * allowable MPS based on its parent bus).
2331 			 */
2332 			mps = min(mps, pcie_get_mps(dev->bus->self));
2333 	}
2334 
2335 	rc = pcie_set_mps(dev, mps);
2336 	if (rc)
2337 		dev_err(&dev->dev, "Failed attempting to set the MPS\n");
2338 }
2339 
2340 static void pcie_write_mrrs(struct pci_dev *dev)
2341 {
2342 	int rc, mrrs;
2343 
2344 	/* In the "safe" case, do not configure the MRRS.  There appear to be
2345 	 * issues with setting MRRS to 0 on a number of devices.
2346 	 */
2347 	if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
2348 		return;
2349 
2350 	/* For Max performance, the MRRS must be set to the largest supported
2351 	 * value.  However, it cannot be configured larger than the MPS the
2352 	 * device or the bus can support.  This should already be properly
2353 	 * configured by a prior call to pcie_write_mps.
2354 	 */
2355 	mrrs = pcie_get_mps(dev);
2356 
2357 	/* MRRS is a R/W register.  Invalid values can be written, but a
2358 	 * subsequent read will verify if the value is acceptable or not.
2359 	 * If the MRRS value provided is not acceptable (e.g., too large),
2360 	 * shrink the value until it is acceptable to the HW.
2361 	 */
2362 	while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
2363 		rc = pcie_set_readrq(dev, mrrs);
2364 		if (!rc)
2365 			break;
2366 
2367 		dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
2368 		mrrs /= 2;
2369 	}
2370 
2371 	if (mrrs < 128)
2372 		dev_err(&dev->dev, "MRRS was unable to be configured with a safe value.  If problems are experienced, try running with pci=pcie_bus_safe\n");
2373 }
2374 
2375 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
2376 {
2377 	int mps, orig_mps;
2378 
2379 	if (!pci_is_pcie(dev))
2380 		return 0;
2381 
2382 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
2383 	    pcie_bus_config == PCIE_BUS_DEFAULT)
2384 		return 0;
2385 
2386 	mps = 128 << *(u8 *)data;
2387 	orig_mps = pcie_get_mps(dev);
2388 
2389 	pcie_write_mps(dev, mps);
2390 	pcie_write_mrrs(dev);
2391 
2392 	dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
2393 		 pcie_get_mps(dev), 128 << dev->pcie_mpss,
2394 		 orig_mps, pcie_get_readrq(dev));
2395 
2396 	return 0;
2397 }
2398 
2399 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
2400  * parents then children fashion.  If this changes, then this code will not
2401  * work as designed.
2402  */
2403 void pcie_bus_configure_settings(struct pci_bus *bus)
2404 {
2405 	u8 smpss = 0;
2406 
2407 	if (!bus->self)
2408 		return;
2409 
2410 	if (!pci_is_pcie(bus->self))
2411 		return;
2412 
2413 	/* FIXME - Peer to peer DMA is possible, though the endpoint would need
2414 	 * to be aware of the MPS of the destination.  To work around this,
2415 	 * simply force the MPS of the entire system to the smallest possible.
2416 	 */
2417 	if (pcie_bus_config == PCIE_BUS_PEER2PEER)
2418 		smpss = 0;
2419 
2420 	if (pcie_bus_config == PCIE_BUS_SAFE) {
2421 		smpss = bus->self->pcie_mpss;
2422 
2423 		pcie_find_smpss(bus->self, &smpss);
2424 		pci_walk_bus(bus, pcie_find_smpss, &smpss);
2425 	}
2426 
2427 	pcie_bus_configure_set(bus->self, &smpss);
2428 	pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
2429 }
2430 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
2431 
2432 /*
2433  * Called after each bus is probed, but before its children are examined.  This
2434  * is marked as __weak because multiple architectures define it.
2435  */
2436 void __weak pcibios_fixup_bus(struct pci_bus *bus)
2437 {
2438        /* nothing to do, expected to be removed in the future */
2439 }
2440 
2441 /**
2442  * pci_scan_child_bus_extend() - Scan devices below a bus
2443  * @bus: Bus to scan for devices
2444  * @available_buses: Total number of buses available (%0 does not try to
2445  *		     extend beyond the minimal)
2446  *
2447  * Scans devices below @bus including subordinate buses. Returns new
2448  * subordinate number including all the found devices. Passing
2449  * @available_buses causes the remaining bus space to be distributed
2450  * equally between hotplug-capable bridges to allow future extension of the
2451  * hierarchy.
2452  */
2453 static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
2454 					      unsigned int available_buses)
2455 {
2456 	unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0;
2457 	unsigned int start = bus->busn_res.start;
2458 	unsigned int devfn, cmax, max = start;
2459 	struct pci_dev *dev;
2460 
2461 	dev_dbg(&bus->dev, "scanning bus\n");
2462 
2463 	/* Go find them, Rover! */
2464 	for (devfn = 0; devfn < 0x100; devfn += 8)
2465 		pci_scan_slot(bus, devfn);
2466 
2467 	/* Reserve buses for SR-IOV capability. */
2468 	used_buses = pci_iov_bus_range(bus);
2469 	max += used_buses;
2470 
2471 	/*
2472 	 * After performing arch-dependent fixup of the bus, look behind
2473 	 * all PCI-to-PCI bridges on this bus.
2474 	 */
2475 	if (!bus->is_added) {
2476 		dev_dbg(&bus->dev, "fixups for bus\n");
2477 		pcibios_fixup_bus(bus);
2478 		bus->is_added = 1;
2479 	}
2480 
2481 	/*
2482 	 * Calculate how many hotplug bridges and normal bridges there
2483 	 * are on this bus. We will distribute the additional available
2484 	 * buses between hotplug bridges.
2485 	 */
2486 	for_each_pci_bridge(dev, bus) {
2487 		if (dev->is_hotplug_bridge)
2488 			hotplug_bridges++;
2489 		else
2490 			normal_bridges++;
2491 	}
2492 
2493 	/*
2494 	 * Scan bridges that are already configured. We don't touch them
2495 	 * unless they are misconfigured (which will be done in the second
2496 	 * scan below).
2497 	 */
2498 	for_each_pci_bridge(dev, bus) {
2499 		cmax = max;
2500 		max = pci_scan_bridge_extend(bus, dev, max, 0, 0);
2501 		used_buses += cmax - max;
2502 	}
2503 
2504 	/* Scan bridges that need to be reconfigured */
2505 	for_each_pci_bridge(dev, bus) {
2506 		unsigned int buses = 0;
2507 
2508 		if (!hotplug_bridges && normal_bridges == 1) {
2509 			/*
2510 			 * There is only one bridge on the bus (upstream
2511 			 * port) so it gets all available buses which it
2512 			 * can then distribute to the possible hotplug
2513 			 * bridges below.
2514 			 */
2515 			buses = available_buses;
2516 		} else if (dev->is_hotplug_bridge) {
2517 			/*
2518 			 * Distribute the extra buses between hotplug
2519 			 * bridges if any.
2520 			 */
2521 			buses = available_buses / hotplug_bridges;
2522 			buses = min(buses, available_buses - used_buses);
2523 		}
2524 
2525 		cmax = max;
2526 		max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1);
2527 		used_buses += max - cmax;
2528 	}
2529 
2530 	/*
2531 	 * Make sure a hotplug bridge has at least the minimum requested
2532 	 * number of buses but allow it to grow up to the maximum available
2533 	 * bus number of there is room.
2534 	 */
2535 	if (bus->self && bus->self->is_hotplug_bridge) {
2536 		used_buses = max_t(unsigned int, available_buses,
2537 				   pci_hotplug_bus_size - 1);
2538 		if (max - start < used_buses) {
2539 			max = start + used_buses;
2540 
2541 			/* Do not allocate more buses than we have room left */
2542 			if (max > bus->busn_res.end)
2543 				max = bus->busn_res.end;
2544 
2545 			dev_dbg(&bus->dev, "%pR extended by %#02x\n",
2546 				&bus->busn_res, max - start);
2547 		}
2548 	}
2549 
2550 	/*
2551 	 * We've scanned the bus and so we know all about what's on
2552 	 * the other side of any bridges that may be on this bus plus
2553 	 * any devices.
2554 	 *
2555 	 * Return how far we've got finding sub-buses.
2556 	 */
2557 	dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
2558 	return max;
2559 }
2560 
2561 /**
2562  * pci_scan_child_bus() - Scan devices below a bus
2563  * @bus: Bus to scan for devices
2564  *
2565  * Scans devices below @bus including subordinate buses. Returns new
2566  * subordinate number including all the found devices.
2567  */
2568 unsigned int pci_scan_child_bus(struct pci_bus *bus)
2569 {
2570 	return pci_scan_child_bus_extend(bus, 0);
2571 }
2572 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
2573 
2574 /**
2575  * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
2576  * @bridge: Host bridge to set up.
2577  *
2578  * Default empty implementation.  Replace with an architecture-specific setup
2579  * routine, if necessary.
2580  */
2581 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
2582 {
2583 	return 0;
2584 }
2585 
2586 void __weak pcibios_add_bus(struct pci_bus *bus)
2587 {
2588 }
2589 
2590 void __weak pcibios_remove_bus(struct pci_bus *bus)
2591 {
2592 }
2593 
2594 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
2595 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
2596 {
2597 	int error;
2598 	struct pci_host_bridge *bridge;
2599 
2600 	bridge = pci_alloc_host_bridge(0);
2601 	if (!bridge)
2602 		return NULL;
2603 
2604 	bridge->dev.parent = parent;
2605 
2606 	list_splice_init(resources, &bridge->windows);
2607 	bridge->sysdata = sysdata;
2608 	bridge->busnr = bus;
2609 	bridge->ops = ops;
2610 
2611 	error = pci_register_host_bridge(bridge);
2612 	if (error < 0)
2613 		goto err_out;
2614 
2615 	return bridge->bus;
2616 
2617 err_out:
2618 	kfree(bridge);
2619 	return NULL;
2620 }
2621 EXPORT_SYMBOL_GPL(pci_create_root_bus);
2622 
2623 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2624 {
2625 	struct resource *res = &b->busn_res;
2626 	struct resource *parent_res, *conflict;
2627 
2628 	res->start = bus;
2629 	res->end = bus_max;
2630 	res->flags = IORESOURCE_BUS;
2631 
2632 	if (!pci_is_root_bus(b))
2633 		parent_res = &b->parent->busn_res;
2634 	else {
2635 		parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2636 		res->flags |= IORESOURCE_PCI_FIXED;
2637 	}
2638 
2639 	conflict = request_resource_conflict(parent_res, res);
2640 
2641 	if (conflict)
2642 		dev_printk(KERN_DEBUG, &b->dev,
2643 			   "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2644 			    res, pci_is_root_bus(b) ? "domain " : "",
2645 			    parent_res, conflict->name, conflict);
2646 
2647 	return conflict == NULL;
2648 }
2649 
2650 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2651 {
2652 	struct resource *res = &b->busn_res;
2653 	struct resource old_res = *res;
2654 	resource_size_t size;
2655 	int ret;
2656 
2657 	if (res->start > bus_max)
2658 		return -EINVAL;
2659 
2660 	size = bus_max - res->start + 1;
2661 	ret = adjust_resource(res, res->start, size);
2662 	dev_printk(KERN_DEBUG, &b->dev,
2663 			"busn_res: %pR end %s updated to %02x\n",
2664 			&old_res, ret ? "can not be" : "is", bus_max);
2665 
2666 	if (!ret && !res->parent)
2667 		pci_bus_insert_busn_res(b, res->start, res->end);
2668 
2669 	return ret;
2670 }
2671 
2672 void pci_bus_release_busn_res(struct pci_bus *b)
2673 {
2674 	struct resource *res = &b->busn_res;
2675 	int ret;
2676 
2677 	if (!res->flags || !res->parent)
2678 		return;
2679 
2680 	ret = release_resource(res);
2681 	dev_printk(KERN_DEBUG, &b->dev,
2682 			"busn_res: %pR %s released\n",
2683 			res, ret ? "can not be" : "is");
2684 }
2685 
2686 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge)
2687 {
2688 	struct resource_entry *window;
2689 	bool found = false;
2690 	struct pci_bus *b;
2691 	int max, bus, ret;
2692 
2693 	if (!bridge)
2694 		return -EINVAL;
2695 
2696 	resource_list_for_each_entry(window, &bridge->windows)
2697 		if (window->res->flags & IORESOURCE_BUS) {
2698 			found = true;
2699 			break;
2700 		}
2701 
2702 	ret = pci_register_host_bridge(bridge);
2703 	if (ret < 0)
2704 		return ret;
2705 
2706 	b = bridge->bus;
2707 	bus = bridge->busnr;
2708 
2709 	if (!found) {
2710 		dev_info(&b->dev,
2711 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2712 			bus);
2713 		pci_bus_insert_busn_res(b, bus, 255);
2714 	}
2715 
2716 	max = pci_scan_child_bus(b);
2717 
2718 	if (!found)
2719 		pci_bus_update_busn_res_end(b, max);
2720 
2721 	return 0;
2722 }
2723 EXPORT_SYMBOL(pci_scan_root_bus_bridge);
2724 
2725 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2726 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
2727 {
2728 	struct resource_entry *window;
2729 	bool found = false;
2730 	struct pci_bus *b;
2731 	int max;
2732 
2733 	resource_list_for_each_entry(window, resources)
2734 		if (window->res->flags & IORESOURCE_BUS) {
2735 			found = true;
2736 			break;
2737 		}
2738 
2739 	b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
2740 	if (!b)
2741 		return NULL;
2742 
2743 	if (!found) {
2744 		dev_info(&b->dev,
2745 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2746 			bus);
2747 		pci_bus_insert_busn_res(b, bus, 255);
2748 	}
2749 
2750 	max = pci_scan_child_bus(b);
2751 
2752 	if (!found)
2753 		pci_bus_update_busn_res_end(b, max);
2754 
2755 	return b;
2756 }
2757 EXPORT_SYMBOL(pci_scan_root_bus);
2758 
2759 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
2760 					void *sysdata)
2761 {
2762 	LIST_HEAD(resources);
2763 	struct pci_bus *b;
2764 
2765 	pci_add_resource(&resources, &ioport_resource);
2766 	pci_add_resource(&resources, &iomem_resource);
2767 	pci_add_resource(&resources, &busn_resource);
2768 	b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2769 	if (b) {
2770 		pci_scan_child_bus(b);
2771 	} else {
2772 		pci_free_resource_list(&resources);
2773 	}
2774 	return b;
2775 }
2776 EXPORT_SYMBOL(pci_scan_bus);
2777 
2778 /**
2779  * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2780  * @bridge: PCI bridge for the bus to scan
2781  *
2782  * Scan a PCI bus and child buses for new devices, add them,
2783  * and enable them, resizing bridge mmio/io resource if necessary
2784  * and possible.  The caller must ensure the child devices are already
2785  * removed for resizing to occur.
2786  *
2787  * Returns the max number of subordinate bus discovered.
2788  */
2789 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2790 {
2791 	unsigned int max;
2792 	struct pci_bus *bus = bridge->subordinate;
2793 
2794 	max = pci_scan_child_bus(bus);
2795 
2796 	pci_assign_unassigned_bridge_resources(bridge);
2797 
2798 	pci_bus_add_devices(bus);
2799 
2800 	return max;
2801 }
2802 
2803 /**
2804  * pci_rescan_bus - scan a PCI bus for devices.
2805  * @bus: PCI bus to scan
2806  *
2807  * Scan a PCI bus and child buses for new devices, adds them,
2808  * and enables them.
2809  *
2810  * Returns the max number of subordinate bus discovered.
2811  */
2812 unsigned int pci_rescan_bus(struct pci_bus *bus)
2813 {
2814 	unsigned int max;
2815 
2816 	max = pci_scan_child_bus(bus);
2817 	pci_assign_unassigned_bus_resources(bus);
2818 	pci_bus_add_devices(bus);
2819 
2820 	return max;
2821 }
2822 EXPORT_SYMBOL_GPL(pci_rescan_bus);
2823 
2824 /*
2825  * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2826  * routines should always be executed under this mutex.
2827  */
2828 static DEFINE_MUTEX(pci_rescan_remove_lock);
2829 
2830 void pci_lock_rescan_remove(void)
2831 {
2832 	mutex_lock(&pci_rescan_remove_lock);
2833 }
2834 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2835 
2836 void pci_unlock_rescan_remove(void)
2837 {
2838 	mutex_unlock(&pci_rescan_remove_lock);
2839 }
2840 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2841 
2842 static int __init pci_sort_bf_cmp(const struct device *d_a,
2843 				  const struct device *d_b)
2844 {
2845 	const struct pci_dev *a = to_pci_dev(d_a);
2846 	const struct pci_dev *b = to_pci_dev(d_b);
2847 
2848 	if      (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2849 	else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return  1;
2850 
2851 	if      (a->bus->number < b->bus->number) return -1;
2852 	else if (a->bus->number > b->bus->number) return  1;
2853 
2854 	if      (a->devfn < b->devfn) return -1;
2855 	else if (a->devfn > b->devfn) return  1;
2856 
2857 	return 0;
2858 }
2859 
2860 void __init pci_sort_breadthfirst(void)
2861 {
2862 	bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2863 }
2864 
2865 int pci_hp_add_bridge(struct pci_dev *dev)
2866 {
2867 	struct pci_bus *parent = dev->bus;
2868 	int busnr, start = parent->busn_res.start;
2869 	unsigned int available_buses = 0;
2870 	int end = parent->busn_res.end;
2871 
2872 	for (busnr = start; busnr <= end; busnr++) {
2873 		if (!pci_find_bus(pci_domain_nr(parent), busnr))
2874 			break;
2875 	}
2876 	if (busnr-- > end) {
2877 		dev_err(&dev->dev, "No bus number available for hot-added bridge\n");
2878 		return -1;
2879 	}
2880 
2881 	/* Scan bridges that are already configured */
2882 	busnr = pci_scan_bridge(parent, dev, busnr, 0);
2883 
2884 	/*
2885 	 * Distribute the available bus numbers between hotplug-capable
2886 	 * bridges to make extending the chain later possible.
2887 	 */
2888 	available_buses = end - busnr;
2889 
2890 	/* Scan bridges that need to be reconfigured */
2891 	pci_scan_bridge_extend(parent, dev, busnr, available_buses, 1);
2892 
2893 	if (!dev->subordinate)
2894 		return -1;
2895 
2896 	return 0;
2897 }
2898 EXPORT_SYMBOL_GPL(pci_hp_add_bridge);
2899