xref: /linux/drivers/pci/probe.c (revision f884ab15afdc5514e88105c92a4e2e1e6539869a)
1 /*
2  * probe.c - PCI detection and setup code
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/cpumask.h>
12 #include <linux/pci-aspm.h>
13 #include <asm-generic/pci-bridge.h>
14 #include "pci.h"
15 
16 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
17 #define CARDBUS_RESERVE_BUSNR	3
18 
19 struct resource busn_resource = {
20 	.name	= "PCI busn",
21 	.start	= 0,
22 	.end	= 255,
23 	.flags	= IORESOURCE_BUS,
24 };
25 
26 /* Ugh.  Need to stop exporting this to modules. */
27 LIST_HEAD(pci_root_buses);
28 EXPORT_SYMBOL(pci_root_buses);
29 
30 static LIST_HEAD(pci_domain_busn_res_list);
31 
32 struct pci_domain_busn_res {
33 	struct list_head list;
34 	struct resource res;
35 	int domain_nr;
36 };
37 
38 static struct resource *get_pci_domain_busn_res(int domain_nr)
39 {
40 	struct pci_domain_busn_res *r;
41 
42 	list_for_each_entry(r, &pci_domain_busn_res_list, list)
43 		if (r->domain_nr == domain_nr)
44 			return &r->res;
45 
46 	r = kzalloc(sizeof(*r), GFP_KERNEL);
47 	if (!r)
48 		return NULL;
49 
50 	r->domain_nr = domain_nr;
51 	r->res.start = 0;
52 	r->res.end = 0xff;
53 	r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
54 
55 	list_add_tail(&r->list, &pci_domain_busn_res_list);
56 
57 	return &r->res;
58 }
59 
60 static int find_anything(struct device *dev, void *data)
61 {
62 	return 1;
63 }
64 
65 /*
66  * Some device drivers need know if pci is initiated.
67  * Basically, we think pci is not initiated when there
68  * is no device to be found on the pci_bus_type.
69  */
70 int no_pci_devices(void)
71 {
72 	struct device *dev;
73 	int no_devices;
74 
75 	dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
76 	no_devices = (dev == NULL);
77 	put_device(dev);
78 	return no_devices;
79 }
80 EXPORT_SYMBOL(no_pci_devices);
81 
82 /*
83  * PCI Bus Class
84  */
85 static void release_pcibus_dev(struct device *dev)
86 {
87 	struct pci_bus *pci_bus = to_pci_bus(dev);
88 
89 	if (pci_bus->bridge)
90 		put_device(pci_bus->bridge);
91 	pci_bus_remove_resources(pci_bus);
92 	pci_release_bus_of_node(pci_bus);
93 	kfree(pci_bus);
94 }
95 
96 static struct class pcibus_class = {
97 	.name		= "pci_bus",
98 	.dev_release	= &release_pcibus_dev,
99 	.dev_attrs	= pcibus_dev_attrs,
100 };
101 
102 static int __init pcibus_class_init(void)
103 {
104 	return class_register(&pcibus_class);
105 }
106 postcore_initcall(pcibus_class_init);
107 
108 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
109 {
110 	u64 size = mask & maxbase;	/* Find the significant bits */
111 	if (!size)
112 		return 0;
113 
114 	/* Get the lowest of them to find the decode size, and
115 	   from that the extent.  */
116 	size = (size & ~(size-1)) - 1;
117 
118 	/* base == maxbase can be valid only if the BAR has
119 	   already been programmed with all 1s.  */
120 	if (base == maxbase && ((base | size) & mask) != mask)
121 		return 0;
122 
123 	return size;
124 }
125 
126 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
127 {
128 	u32 mem_type;
129 	unsigned long flags;
130 
131 	if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
132 		flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
133 		flags |= IORESOURCE_IO;
134 		return flags;
135 	}
136 
137 	flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
138 	flags |= IORESOURCE_MEM;
139 	if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
140 		flags |= IORESOURCE_PREFETCH;
141 
142 	mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
143 	switch (mem_type) {
144 	case PCI_BASE_ADDRESS_MEM_TYPE_32:
145 		break;
146 	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
147 		/* 1M mem BAR treated as 32-bit BAR */
148 		break;
149 	case PCI_BASE_ADDRESS_MEM_TYPE_64:
150 		flags |= IORESOURCE_MEM_64;
151 		break;
152 	default:
153 		/* mem unknown type treated as 32-bit BAR */
154 		break;
155 	}
156 	return flags;
157 }
158 
159 /**
160  * pci_read_base - read a PCI BAR
161  * @dev: the PCI device
162  * @type: type of the BAR
163  * @res: resource buffer to be filled in
164  * @pos: BAR position in the config space
165  *
166  * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
167  */
168 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
169 			struct resource *res, unsigned int pos)
170 {
171 	u32 l, sz, mask;
172 	u16 orig_cmd;
173 	struct pci_bus_region region;
174 	bool bar_too_big = false, bar_disabled = false;
175 
176 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
177 
178 	/* No printks while decoding is disabled! */
179 	if (!dev->mmio_always_on) {
180 		pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
181 		pci_write_config_word(dev, PCI_COMMAND,
182 			orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO));
183 	}
184 
185 	res->name = pci_name(dev);
186 
187 	pci_read_config_dword(dev, pos, &l);
188 	pci_write_config_dword(dev, pos, l | mask);
189 	pci_read_config_dword(dev, pos, &sz);
190 	pci_write_config_dword(dev, pos, l);
191 
192 	/*
193 	 * All bits set in sz means the device isn't working properly.
194 	 * If the BAR isn't implemented, all bits must be 0.  If it's a
195 	 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
196 	 * 1 must be clear.
197 	 */
198 	if (!sz || sz == 0xffffffff)
199 		goto fail;
200 
201 	/*
202 	 * I don't know how l can have all bits set.  Copied from old code.
203 	 * Maybe it fixes a bug on some ancient platform.
204 	 */
205 	if (l == 0xffffffff)
206 		l = 0;
207 
208 	if (type == pci_bar_unknown) {
209 		res->flags = decode_bar(dev, l);
210 		res->flags |= IORESOURCE_SIZEALIGN;
211 		if (res->flags & IORESOURCE_IO) {
212 			l &= PCI_BASE_ADDRESS_IO_MASK;
213 			mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
214 		} else {
215 			l &= PCI_BASE_ADDRESS_MEM_MASK;
216 			mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
217 		}
218 	} else {
219 		res->flags |= (l & IORESOURCE_ROM_ENABLE);
220 		l &= PCI_ROM_ADDRESS_MASK;
221 		mask = (u32)PCI_ROM_ADDRESS_MASK;
222 	}
223 
224 	if (res->flags & IORESOURCE_MEM_64) {
225 		u64 l64 = l;
226 		u64 sz64 = sz;
227 		u64 mask64 = mask | (u64)~0 << 32;
228 
229 		pci_read_config_dword(dev, pos + 4, &l);
230 		pci_write_config_dword(dev, pos + 4, ~0);
231 		pci_read_config_dword(dev, pos + 4, &sz);
232 		pci_write_config_dword(dev, pos + 4, l);
233 
234 		l64 |= ((u64)l << 32);
235 		sz64 |= ((u64)sz << 32);
236 
237 		sz64 = pci_size(l64, sz64, mask64);
238 
239 		if (!sz64)
240 			goto fail;
241 
242 		if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
243 			bar_too_big = true;
244 			goto fail;
245 		}
246 
247 		if ((sizeof(resource_size_t) < 8) && l) {
248 			/* Address above 32-bit boundary; disable the BAR */
249 			pci_write_config_dword(dev, pos, 0);
250 			pci_write_config_dword(dev, pos + 4, 0);
251 			region.start = 0;
252 			region.end = sz64;
253 			pcibios_bus_to_resource(dev, res, &region);
254 			bar_disabled = true;
255 		} else {
256 			region.start = l64;
257 			region.end = l64 + sz64;
258 			pcibios_bus_to_resource(dev, res, &region);
259 		}
260 	} else {
261 		sz = pci_size(l, sz, mask);
262 
263 		if (!sz)
264 			goto fail;
265 
266 		region.start = l;
267 		region.end = l + sz;
268 		pcibios_bus_to_resource(dev, res, &region);
269 	}
270 
271 	goto out;
272 
273 
274 fail:
275 	res->flags = 0;
276 out:
277 	if (!dev->mmio_always_on)
278 		pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
279 
280 	if (bar_too_big)
281 		dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", pos);
282 	if (res->flags && !bar_disabled)
283 		dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
284 
285 	return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
286 }
287 
288 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
289 {
290 	unsigned int pos, reg;
291 
292 	for (pos = 0; pos < howmany; pos++) {
293 		struct resource *res = &dev->resource[pos];
294 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
295 		pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
296 	}
297 
298 	if (rom) {
299 		struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
300 		dev->rom_base_reg = rom;
301 		res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
302 				IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
303 				IORESOURCE_SIZEALIGN;
304 		__pci_read_base(dev, pci_bar_mem32, res, rom);
305 	}
306 }
307 
308 static void pci_read_bridge_io(struct pci_bus *child)
309 {
310 	struct pci_dev *dev = child->self;
311 	u8 io_base_lo, io_limit_lo;
312 	unsigned long io_mask, io_granularity, base, limit;
313 	struct pci_bus_region region;
314 	struct resource *res;
315 
316 	io_mask = PCI_IO_RANGE_MASK;
317 	io_granularity = 0x1000;
318 	if (dev->io_window_1k) {
319 		/* Support 1K I/O space granularity */
320 		io_mask = PCI_IO_1K_RANGE_MASK;
321 		io_granularity = 0x400;
322 	}
323 
324 	res = child->resource[0];
325 	pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
326 	pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
327 	base = (io_base_lo & io_mask) << 8;
328 	limit = (io_limit_lo & io_mask) << 8;
329 
330 	if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
331 		u16 io_base_hi, io_limit_hi;
332 
333 		pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
334 		pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
335 		base |= ((unsigned long) io_base_hi << 16);
336 		limit |= ((unsigned long) io_limit_hi << 16);
337 	}
338 
339 	if (base <= limit) {
340 		res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
341 		region.start = base;
342 		region.end = limit + io_granularity - 1;
343 		pcibios_bus_to_resource(dev, res, &region);
344 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
345 	}
346 }
347 
348 static void pci_read_bridge_mmio(struct pci_bus *child)
349 {
350 	struct pci_dev *dev = child->self;
351 	u16 mem_base_lo, mem_limit_lo;
352 	unsigned long base, limit;
353 	struct pci_bus_region region;
354 	struct resource *res;
355 
356 	res = child->resource[1];
357 	pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
358 	pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
359 	base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
360 	limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
361 	if (base <= limit) {
362 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
363 		region.start = base;
364 		region.end = limit + 0xfffff;
365 		pcibios_bus_to_resource(dev, res, &region);
366 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
367 	}
368 }
369 
370 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
371 {
372 	struct pci_dev *dev = child->self;
373 	u16 mem_base_lo, mem_limit_lo;
374 	unsigned long base, limit;
375 	struct pci_bus_region region;
376 	struct resource *res;
377 
378 	res = child->resource[2];
379 	pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
380 	pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
381 	base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
382 	limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
383 
384 	if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
385 		u32 mem_base_hi, mem_limit_hi;
386 
387 		pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
388 		pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
389 
390 		/*
391 		 * Some bridges set the base > limit by default, and some
392 		 * (broken) BIOSes do not initialize them.  If we find
393 		 * this, just assume they are not being used.
394 		 */
395 		if (mem_base_hi <= mem_limit_hi) {
396 #if BITS_PER_LONG == 64
397 			base |= ((unsigned long) mem_base_hi) << 32;
398 			limit |= ((unsigned long) mem_limit_hi) << 32;
399 #else
400 			if (mem_base_hi || mem_limit_hi) {
401 				dev_err(&dev->dev, "can't handle 64-bit "
402 					"address space for bridge\n");
403 				return;
404 			}
405 #endif
406 		}
407 	}
408 	if (base <= limit) {
409 		res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
410 					 IORESOURCE_MEM | IORESOURCE_PREFETCH;
411 		if (res->flags & PCI_PREF_RANGE_TYPE_64)
412 			res->flags |= IORESOURCE_MEM_64;
413 		region.start = base;
414 		region.end = limit + 0xfffff;
415 		pcibios_bus_to_resource(dev, res, &region);
416 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
417 	}
418 }
419 
420 void pci_read_bridge_bases(struct pci_bus *child)
421 {
422 	struct pci_dev *dev = child->self;
423 	struct resource *res;
424 	int i;
425 
426 	if (pci_is_root_bus(child))	/* It's a host bus, nothing to read */
427 		return;
428 
429 	dev_info(&dev->dev, "PCI bridge to %pR%s\n",
430 		 &child->busn_res,
431 		 dev->transparent ? " (subtractive decode)" : "");
432 
433 	pci_bus_remove_resources(child);
434 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
435 		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
436 
437 	pci_read_bridge_io(child);
438 	pci_read_bridge_mmio(child);
439 	pci_read_bridge_mmio_pref(child);
440 
441 	if (dev->transparent) {
442 		pci_bus_for_each_resource(child->parent, res, i) {
443 			if (res) {
444 				pci_bus_add_resource(child, res,
445 						     PCI_SUBTRACTIVE_DECODE);
446 				dev_printk(KERN_DEBUG, &dev->dev,
447 					   "  bridge window %pR (subtractive decode)\n",
448 					   res);
449 			}
450 		}
451 	}
452 }
453 
454 static struct pci_bus * pci_alloc_bus(void)
455 {
456 	struct pci_bus *b;
457 
458 	b = kzalloc(sizeof(*b), GFP_KERNEL);
459 	if (b) {
460 		INIT_LIST_HEAD(&b->node);
461 		INIT_LIST_HEAD(&b->children);
462 		INIT_LIST_HEAD(&b->devices);
463 		INIT_LIST_HEAD(&b->slots);
464 		INIT_LIST_HEAD(&b->resources);
465 		b->max_bus_speed = PCI_SPEED_UNKNOWN;
466 		b->cur_bus_speed = PCI_SPEED_UNKNOWN;
467 	}
468 	return b;
469 }
470 
471 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
472 {
473 	struct pci_host_bridge *bridge;
474 
475 	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
476 	if (bridge) {
477 		INIT_LIST_HEAD(&bridge->windows);
478 		bridge->bus = b;
479 	}
480 
481 	return bridge;
482 }
483 
484 static unsigned char pcix_bus_speed[] = {
485 	PCI_SPEED_UNKNOWN,		/* 0 */
486 	PCI_SPEED_66MHz_PCIX,		/* 1 */
487 	PCI_SPEED_100MHz_PCIX,		/* 2 */
488 	PCI_SPEED_133MHz_PCIX,		/* 3 */
489 	PCI_SPEED_UNKNOWN,		/* 4 */
490 	PCI_SPEED_66MHz_PCIX_ECC,	/* 5 */
491 	PCI_SPEED_100MHz_PCIX_ECC,	/* 6 */
492 	PCI_SPEED_133MHz_PCIX_ECC,	/* 7 */
493 	PCI_SPEED_UNKNOWN,		/* 8 */
494 	PCI_SPEED_66MHz_PCIX_266,	/* 9 */
495 	PCI_SPEED_100MHz_PCIX_266,	/* A */
496 	PCI_SPEED_133MHz_PCIX_266,	/* B */
497 	PCI_SPEED_UNKNOWN,		/* C */
498 	PCI_SPEED_66MHz_PCIX_533,	/* D */
499 	PCI_SPEED_100MHz_PCIX_533,	/* E */
500 	PCI_SPEED_133MHz_PCIX_533	/* F */
501 };
502 
503 static unsigned char pcie_link_speed[] = {
504 	PCI_SPEED_UNKNOWN,		/* 0 */
505 	PCIE_SPEED_2_5GT,		/* 1 */
506 	PCIE_SPEED_5_0GT,		/* 2 */
507 	PCIE_SPEED_8_0GT,		/* 3 */
508 	PCI_SPEED_UNKNOWN,		/* 4 */
509 	PCI_SPEED_UNKNOWN,		/* 5 */
510 	PCI_SPEED_UNKNOWN,		/* 6 */
511 	PCI_SPEED_UNKNOWN,		/* 7 */
512 	PCI_SPEED_UNKNOWN,		/* 8 */
513 	PCI_SPEED_UNKNOWN,		/* 9 */
514 	PCI_SPEED_UNKNOWN,		/* A */
515 	PCI_SPEED_UNKNOWN,		/* B */
516 	PCI_SPEED_UNKNOWN,		/* C */
517 	PCI_SPEED_UNKNOWN,		/* D */
518 	PCI_SPEED_UNKNOWN,		/* E */
519 	PCI_SPEED_UNKNOWN		/* F */
520 };
521 
522 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
523 {
524 	bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
525 }
526 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
527 
528 static unsigned char agp_speeds[] = {
529 	AGP_UNKNOWN,
530 	AGP_1X,
531 	AGP_2X,
532 	AGP_4X,
533 	AGP_8X
534 };
535 
536 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
537 {
538 	int index = 0;
539 
540 	if (agpstat & 4)
541 		index = 3;
542 	else if (agpstat & 2)
543 		index = 2;
544 	else if (agpstat & 1)
545 		index = 1;
546 	else
547 		goto out;
548 
549 	if (agp3) {
550 		index += 2;
551 		if (index == 5)
552 			index = 0;
553 	}
554 
555  out:
556 	return agp_speeds[index];
557 }
558 
559 
560 static void pci_set_bus_speed(struct pci_bus *bus)
561 {
562 	struct pci_dev *bridge = bus->self;
563 	int pos;
564 
565 	pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
566 	if (!pos)
567 		pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
568 	if (pos) {
569 		u32 agpstat, agpcmd;
570 
571 		pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
572 		bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
573 
574 		pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
575 		bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
576 	}
577 
578 	pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
579 	if (pos) {
580 		u16 status;
581 		enum pci_bus_speed max;
582 
583 		pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
584 				     &status);
585 
586 		if (status & PCI_X_SSTATUS_533MHZ) {
587 			max = PCI_SPEED_133MHz_PCIX_533;
588 		} else if (status & PCI_X_SSTATUS_266MHZ) {
589 			max = PCI_SPEED_133MHz_PCIX_266;
590 		} else if (status & PCI_X_SSTATUS_133MHZ) {
591 			if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2) {
592 				max = PCI_SPEED_133MHz_PCIX_ECC;
593 			} else {
594 				max = PCI_SPEED_133MHz_PCIX;
595 			}
596 		} else {
597 			max = PCI_SPEED_66MHz_PCIX;
598 		}
599 
600 		bus->max_bus_speed = max;
601 		bus->cur_bus_speed = pcix_bus_speed[
602 			(status & PCI_X_SSTATUS_FREQ) >> 6];
603 
604 		return;
605 	}
606 
607 	pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
608 	if (pos) {
609 		u32 linkcap;
610 		u16 linksta;
611 
612 		pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
613 		bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
614 
615 		pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
616 		pcie_update_link_speed(bus, linksta);
617 	}
618 }
619 
620 
621 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
622 					   struct pci_dev *bridge, int busnr)
623 {
624 	struct pci_bus *child;
625 	int i;
626 	int ret;
627 
628 	/*
629 	 * Allocate a new bus, and inherit stuff from the parent..
630 	 */
631 	child = pci_alloc_bus();
632 	if (!child)
633 		return NULL;
634 
635 	child->parent = parent;
636 	child->ops = parent->ops;
637 	child->sysdata = parent->sysdata;
638 	child->bus_flags = parent->bus_flags;
639 
640 	/* initialize some portions of the bus device, but don't register it
641 	 * now as the parent is not properly set up yet.
642 	 */
643 	child->dev.class = &pcibus_class;
644 	dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
645 
646 	/*
647 	 * Set up the primary, secondary and subordinate
648 	 * bus numbers.
649 	 */
650 	child->number = child->busn_res.start = busnr;
651 	child->primary = parent->busn_res.start;
652 	child->busn_res.end = 0xff;
653 
654 	if (!bridge) {
655 		child->dev.parent = parent->bridge;
656 		goto add_dev;
657 	}
658 
659 	child->self = bridge;
660 	child->bridge = get_device(&bridge->dev);
661 	child->dev.parent = child->bridge;
662 	pci_set_bus_of_node(child);
663 	pci_set_bus_speed(child);
664 
665 	/* Set up default resource pointers and names.. */
666 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
667 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
668 		child->resource[i]->name = child->name;
669 	}
670 	bridge->subordinate = child;
671 
672 add_dev:
673 	ret = device_register(&child->dev);
674 	WARN_ON(ret < 0);
675 
676 	pcibios_add_bus(child);
677 
678 	/* Create legacy_io and legacy_mem files for this bus */
679 	pci_create_legacy_files(child);
680 
681 	return child;
682 }
683 
684 struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
685 {
686 	struct pci_bus *child;
687 
688 	child = pci_alloc_child_bus(parent, dev, busnr);
689 	if (child) {
690 		down_write(&pci_bus_sem);
691 		list_add_tail(&child->node, &parent->children);
692 		up_write(&pci_bus_sem);
693 	}
694 	return child;
695 }
696 
697 static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
698 {
699 	struct pci_bus *parent = child->parent;
700 
701 	/* Attempts to fix that up are really dangerous unless
702 	   we're going to re-assign all bus numbers. */
703 	if (!pcibios_assign_all_busses())
704 		return;
705 
706 	while (parent->parent && parent->busn_res.end < max) {
707 		parent->busn_res.end = max;
708 		pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
709 		parent = parent->parent;
710 	}
711 }
712 
713 /*
714  * If it's a bridge, configure it and scan the bus behind it.
715  * For CardBus bridges, we don't scan behind as the devices will
716  * be handled by the bridge driver itself.
717  *
718  * We need to process bridges in two passes -- first we scan those
719  * already configured by the BIOS and after we are done with all of
720  * them, we proceed to assigning numbers to the remaining buses in
721  * order to avoid overlaps between old and new bus numbers.
722  */
723 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
724 {
725 	struct pci_bus *child;
726 	int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
727 	u32 buses, i, j = 0;
728 	u16 bctl;
729 	u8 primary, secondary, subordinate;
730 	int broken = 0;
731 
732 	pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
733 	primary = buses & 0xFF;
734 	secondary = (buses >> 8) & 0xFF;
735 	subordinate = (buses >> 16) & 0xFF;
736 
737 	dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
738 		secondary, subordinate, pass);
739 
740 	if (!primary && (primary != bus->number) && secondary && subordinate) {
741 		dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
742 		primary = bus->number;
743 	}
744 
745 	/* Check if setup is sensible at all */
746 	if (!pass &&
747 	    (primary != bus->number || secondary <= bus->number ||
748 	     secondary > subordinate)) {
749 		dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
750 			 secondary, subordinate);
751 		broken = 1;
752 	}
753 
754 	/* Disable MasterAbortMode during probing to avoid reporting
755 	   of bus errors (in some architectures) */
756 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
757 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
758 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
759 
760 	if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
761 	    !is_cardbus && !broken) {
762 		unsigned int cmax;
763 		/*
764 		 * Bus already configured by firmware, process it in the first
765 		 * pass and just note the configuration.
766 		 */
767 		if (pass)
768 			goto out;
769 
770 		/*
771 		 * If we already got to this bus through a different bridge,
772 		 * don't re-add it. This can happen with the i450NX chipset.
773 		 *
774 		 * However, we continue to descend down the hierarchy and
775 		 * scan remaining child buses.
776 		 */
777 		child = pci_find_bus(pci_domain_nr(bus), secondary);
778 		if (!child) {
779 			child = pci_add_new_bus(bus, dev, secondary);
780 			if (!child)
781 				goto out;
782 			child->primary = primary;
783 			pci_bus_insert_busn_res(child, secondary, subordinate);
784 			child->bridge_ctl = bctl;
785 		}
786 
787 		cmax = pci_scan_child_bus(child);
788 		if (cmax > max)
789 			max = cmax;
790 		if (child->busn_res.end > max)
791 			max = child->busn_res.end;
792 	} else {
793 		/*
794 		 * We need to assign a number to this bus which we always
795 		 * do in the second pass.
796 		 */
797 		if (!pass) {
798 			if (pcibios_assign_all_busses() || broken)
799 				/* Temporarily disable forwarding of the
800 				   configuration cycles on all bridges in
801 				   this bus segment to avoid possible
802 				   conflicts in the second pass between two
803 				   bridges programmed with overlapping
804 				   bus ranges. */
805 				pci_write_config_dword(dev, PCI_PRIMARY_BUS,
806 						       buses & ~0xffffff);
807 			goto out;
808 		}
809 
810 		/* Clear errors */
811 		pci_write_config_word(dev, PCI_STATUS, 0xffff);
812 
813 		/* Prevent assigning a bus number that already exists.
814 		 * This can happen when a bridge is hot-plugged, so in
815 		 * this case we only re-scan this bus. */
816 		child = pci_find_bus(pci_domain_nr(bus), max+1);
817 		if (!child) {
818 			child = pci_add_new_bus(bus, dev, ++max);
819 			if (!child)
820 				goto out;
821 			pci_bus_insert_busn_res(child, max, 0xff);
822 		}
823 		buses = (buses & 0xff000000)
824 		      | ((unsigned int)(child->primary)     <<  0)
825 		      | ((unsigned int)(child->busn_res.start)   <<  8)
826 		      | ((unsigned int)(child->busn_res.end) << 16);
827 
828 		/*
829 		 * yenta.c forces a secondary latency timer of 176.
830 		 * Copy that behaviour here.
831 		 */
832 		if (is_cardbus) {
833 			buses &= ~0xff000000;
834 			buses |= CARDBUS_LATENCY_TIMER << 24;
835 		}
836 
837 		/*
838 		 * We need to blast all three values with a single write.
839 		 */
840 		pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
841 
842 		if (!is_cardbus) {
843 			child->bridge_ctl = bctl;
844 			/*
845 			 * Adjust subordinate busnr in parent buses.
846 			 * We do this before scanning for children because
847 			 * some devices may not be detected if the bios
848 			 * was lazy.
849 			 */
850 			pci_fixup_parent_subordinate_busnr(child, max);
851 			/* Now we can scan all subordinate buses... */
852 			max = pci_scan_child_bus(child);
853 			/*
854 			 * now fix it up again since we have found
855 			 * the real value of max.
856 			 */
857 			pci_fixup_parent_subordinate_busnr(child, max);
858 		} else {
859 			/*
860 			 * For CardBus bridges, we leave 4 bus numbers
861 			 * as cards with a PCI-to-PCI bridge can be
862 			 * inserted later.
863 			 */
864 			for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
865 				struct pci_bus *parent = bus;
866 				if (pci_find_bus(pci_domain_nr(bus),
867 							max+i+1))
868 					break;
869 				while (parent->parent) {
870 					if ((!pcibios_assign_all_busses()) &&
871 					    (parent->busn_res.end > max) &&
872 					    (parent->busn_res.end <= max+i)) {
873 						j = 1;
874 					}
875 					parent = parent->parent;
876 				}
877 				if (j) {
878 					/*
879 					 * Often, there are two cardbus bridges
880 					 * -- try to leave one valid bus number
881 					 * for each one.
882 					 */
883 					i /= 2;
884 					break;
885 				}
886 			}
887 			max += i;
888 			pci_fixup_parent_subordinate_busnr(child, max);
889 		}
890 		/*
891 		 * Set the subordinate bus number to its real value.
892 		 */
893 		pci_bus_update_busn_res_end(child, max);
894 		pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
895 	}
896 
897 	sprintf(child->name,
898 		(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
899 		pci_domain_nr(bus), child->number);
900 
901 	/* Has only triggered on CardBus, fixup is in yenta_socket */
902 	while (bus->parent) {
903 		if ((child->busn_res.end > bus->busn_res.end) ||
904 		    (child->number > bus->busn_res.end) ||
905 		    (child->number < bus->number) ||
906 		    (child->busn_res.end < bus->number)) {
907 			dev_info(&child->dev, "%pR %s "
908 				"hidden behind%s bridge %s %pR\n",
909 				&child->busn_res,
910 				(bus->number > child->busn_res.end &&
911 				 bus->busn_res.end < child->number) ?
912 					"wholly" : "partially",
913 				bus->self->transparent ? " transparent" : "",
914 				dev_name(&bus->dev),
915 				&bus->busn_res);
916 		}
917 		bus = bus->parent;
918 	}
919 
920 out:
921 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
922 
923 	return max;
924 }
925 
926 /*
927  * Read interrupt line and base address registers.
928  * The architecture-dependent code can tweak these, of course.
929  */
930 static void pci_read_irq(struct pci_dev *dev)
931 {
932 	unsigned char irq;
933 
934 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
935 	dev->pin = irq;
936 	if (irq)
937 		pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
938 	dev->irq = irq;
939 }
940 
941 void set_pcie_port_type(struct pci_dev *pdev)
942 {
943 	int pos;
944 	u16 reg16;
945 
946 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
947 	if (!pos)
948 		return;
949 	pdev->is_pcie = 1;
950 	pdev->pcie_cap = pos;
951 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
952 	pdev->pcie_flags_reg = reg16;
953 	pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
954 	pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
955 }
956 
957 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
958 {
959 	u32 reg32;
960 
961 	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
962 	if (reg32 & PCI_EXP_SLTCAP_HPC)
963 		pdev->is_hotplug_bridge = 1;
964 }
965 
966 #define LEGACY_IO_RESOURCE	(IORESOURCE_IO | IORESOURCE_PCI_FIXED)
967 
968 /**
969  * pci_setup_device - fill in class and map information of a device
970  * @dev: the device structure to fill
971  *
972  * Initialize the device structure with information about the device's
973  * vendor,class,memory and IO-space addresses,IRQ lines etc.
974  * Called at initialisation of the PCI subsystem and by CardBus services.
975  * Returns 0 on success and negative if unknown type of device (not normal,
976  * bridge or CardBus).
977  */
978 int pci_setup_device(struct pci_dev *dev)
979 {
980 	u32 class;
981 	u8 hdr_type;
982 	struct pci_slot *slot;
983 	int pos = 0;
984 	struct pci_bus_region region;
985 	struct resource *res;
986 
987 	if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
988 		return -EIO;
989 
990 	dev->sysdata = dev->bus->sysdata;
991 	dev->dev.parent = dev->bus->bridge;
992 	dev->dev.bus = &pci_bus_type;
993 	dev->hdr_type = hdr_type & 0x7f;
994 	dev->multifunction = !!(hdr_type & 0x80);
995 	dev->error_state = pci_channel_io_normal;
996 	set_pcie_port_type(dev);
997 
998 	list_for_each_entry(slot, &dev->bus->slots, list)
999 		if (PCI_SLOT(dev->devfn) == slot->number)
1000 			dev->slot = slot;
1001 
1002 	/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1003 	   set this higher, assuming the system even supports it.  */
1004 	dev->dma_mask = 0xffffffff;
1005 
1006 	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1007 		     dev->bus->number, PCI_SLOT(dev->devfn),
1008 		     PCI_FUNC(dev->devfn));
1009 
1010 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1011 	dev->revision = class & 0xff;
1012 	dev->class = class >> 8;		    /* upper 3 bytes */
1013 
1014 	dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1015 		   dev->vendor, dev->device, dev->hdr_type, dev->class);
1016 
1017 	/* need to have dev->class ready */
1018 	dev->cfg_size = pci_cfg_space_size(dev);
1019 
1020 	/* "Unknown power state" */
1021 	dev->current_state = PCI_UNKNOWN;
1022 
1023 	/* Early fixups, before probing the BARs */
1024 	pci_fixup_device(pci_fixup_early, dev);
1025 	/* device class may be changed after fixup */
1026 	class = dev->class >> 8;
1027 
1028 	switch (dev->hdr_type) {		    /* header type */
1029 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
1030 		if (class == PCI_CLASS_BRIDGE_PCI)
1031 			goto bad;
1032 		pci_read_irq(dev);
1033 		pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1034 		pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1035 		pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1036 
1037 		/*
1038 		 *	Do the ugly legacy mode stuff here rather than broken chip
1039 		 *	quirk code. Legacy mode ATA controllers have fixed
1040 		 *	addresses. These are not always echoed in BAR0-3, and
1041 		 *	BAR0-3 in a few cases contain junk!
1042 		 */
1043 		if (class == PCI_CLASS_STORAGE_IDE) {
1044 			u8 progif;
1045 			pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1046 			if ((progif & 1) == 0) {
1047 				region.start = 0x1F0;
1048 				region.end = 0x1F7;
1049 				res = &dev->resource[0];
1050 				res->flags = LEGACY_IO_RESOURCE;
1051 				pcibios_bus_to_resource(dev, res, &region);
1052 				region.start = 0x3F6;
1053 				region.end = 0x3F6;
1054 				res = &dev->resource[1];
1055 				res->flags = LEGACY_IO_RESOURCE;
1056 				pcibios_bus_to_resource(dev, res, &region);
1057 			}
1058 			if ((progif & 4) == 0) {
1059 				region.start = 0x170;
1060 				region.end = 0x177;
1061 				res = &dev->resource[2];
1062 				res->flags = LEGACY_IO_RESOURCE;
1063 				pcibios_bus_to_resource(dev, res, &region);
1064 				region.start = 0x376;
1065 				region.end = 0x376;
1066 				res = &dev->resource[3];
1067 				res->flags = LEGACY_IO_RESOURCE;
1068 				pcibios_bus_to_resource(dev, res, &region);
1069 			}
1070 		}
1071 		break;
1072 
1073 	case PCI_HEADER_TYPE_BRIDGE:		    /* bridge header */
1074 		if (class != PCI_CLASS_BRIDGE_PCI)
1075 			goto bad;
1076 		/* The PCI-to-PCI bridge spec requires that subtractive
1077 		   decoding (i.e. transparent) bridge must have programming
1078 		   interface code of 0x01. */
1079 		pci_read_irq(dev);
1080 		dev->transparent = ((dev->class & 0xff) == 1);
1081 		pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1082 		set_pcie_hotplug_bridge(dev);
1083 		pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1084 		if (pos) {
1085 			pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1086 			pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1087 		}
1088 		break;
1089 
1090 	case PCI_HEADER_TYPE_CARDBUS:		    /* CardBus bridge header */
1091 		if (class != PCI_CLASS_BRIDGE_CARDBUS)
1092 			goto bad;
1093 		pci_read_irq(dev);
1094 		pci_read_bases(dev, 1, 0);
1095 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1096 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1097 		break;
1098 
1099 	default:				    /* unknown header */
1100 		dev_err(&dev->dev, "unknown header type %02x, "
1101 			"ignoring device\n", dev->hdr_type);
1102 		return -EIO;
1103 
1104 	bad:
1105 		dev_err(&dev->dev, "ignoring class %#08x (doesn't match header "
1106 			"type %02x)\n", dev->class, dev->hdr_type);
1107 		dev->class = PCI_CLASS_NOT_DEFINED;
1108 	}
1109 
1110 	/* We found a fine healthy device, go go go... */
1111 	return 0;
1112 }
1113 
1114 static void pci_release_capabilities(struct pci_dev *dev)
1115 {
1116 	pci_vpd_release(dev);
1117 	pci_iov_release(dev);
1118 	pci_free_cap_save_buffers(dev);
1119 }
1120 
1121 /**
1122  * pci_release_dev - free a pci device structure when all users of it are finished.
1123  * @dev: device that's been disconnected
1124  *
1125  * Will be called only by the device core when all users of this pci device are
1126  * done.
1127  */
1128 static void pci_release_dev(struct device *dev)
1129 {
1130 	struct pci_dev *pci_dev;
1131 
1132 	pci_dev = to_pci_dev(dev);
1133 	pci_release_capabilities(pci_dev);
1134 	pci_release_of_node(pci_dev);
1135 	kfree(pci_dev);
1136 }
1137 
1138 /**
1139  * pci_cfg_space_size - get the configuration space size of the PCI device.
1140  * @dev: PCI device
1141  *
1142  * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1143  * have 4096 bytes.  Even if the device is capable, that doesn't mean we can
1144  * access it.  Maybe we don't have a way to generate extended config space
1145  * accesses, or the device is behind a reverse Express bridge.  So we try
1146  * reading the dword at 0x100 which must either be 0 or a valid extended
1147  * capability header.
1148  */
1149 int pci_cfg_space_size_ext(struct pci_dev *dev)
1150 {
1151 	u32 status;
1152 	int pos = PCI_CFG_SPACE_SIZE;
1153 
1154 	if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1155 		goto fail;
1156 	if (status == 0xffffffff)
1157 		goto fail;
1158 
1159 	return PCI_CFG_SPACE_EXP_SIZE;
1160 
1161  fail:
1162 	return PCI_CFG_SPACE_SIZE;
1163 }
1164 
1165 int pci_cfg_space_size(struct pci_dev *dev)
1166 {
1167 	int pos;
1168 	u32 status;
1169 	u16 class;
1170 
1171 	class = dev->class >> 8;
1172 	if (class == PCI_CLASS_BRIDGE_HOST)
1173 		return pci_cfg_space_size_ext(dev);
1174 
1175 	if (!pci_is_pcie(dev)) {
1176 		pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1177 		if (!pos)
1178 			goto fail;
1179 
1180 		pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1181 		if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
1182 			goto fail;
1183 	}
1184 
1185 	return pci_cfg_space_size_ext(dev);
1186 
1187  fail:
1188 	return PCI_CFG_SPACE_SIZE;
1189 }
1190 
1191 static void pci_release_bus_bridge_dev(struct device *dev)
1192 {
1193 	struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
1194 
1195 	if (bridge->release_fn)
1196 		bridge->release_fn(bridge);
1197 
1198 	pci_free_resource_list(&bridge->windows);
1199 
1200 	kfree(bridge);
1201 }
1202 
1203 struct pci_dev *alloc_pci_dev(void)
1204 {
1205 	struct pci_dev *dev;
1206 
1207 	dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1208 	if (!dev)
1209 		return NULL;
1210 
1211 	INIT_LIST_HEAD(&dev->bus_list);
1212 	dev->dev.type = &pci_dev_type;
1213 
1214 	return dev;
1215 }
1216 EXPORT_SYMBOL(alloc_pci_dev);
1217 
1218 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1219 				 int crs_timeout)
1220 {
1221 	int delay = 1;
1222 
1223 	if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1224 		return false;
1225 
1226 	/* some broken boards return 0 or ~0 if a slot is empty: */
1227 	if (*l == 0xffffffff || *l == 0x00000000 ||
1228 	    *l == 0x0000ffff || *l == 0xffff0000)
1229 		return false;
1230 
1231 	/* Configuration request Retry Status */
1232 	while (*l == 0xffff0001) {
1233 		if (!crs_timeout)
1234 			return false;
1235 
1236 		msleep(delay);
1237 		delay *= 2;
1238 		if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1239 			return false;
1240 		/* Card hasn't responded in 60 seconds?  Must be stuck. */
1241 		if (delay > crs_timeout) {
1242 			printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
1243 					"responding\n", pci_domain_nr(bus),
1244 					bus->number, PCI_SLOT(devfn),
1245 					PCI_FUNC(devfn));
1246 			return false;
1247 		}
1248 	}
1249 
1250 	return true;
1251 }
1252 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1253 
1254 /*
1255  * Read the config data for a PCI device, sanity-check it
1256  * and fill in the dev structure...
1257  */
1258 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1259 {
1260 	struct pci_dev *dev;
1261 	u32 l;
1262 
1263 	if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1264 		return NULL;
1265 
1266 	dev = alloc_pci_dev();
1267 	if (!dev)
1268 		return NULL;
1269 
1270 	dev->bus = bus;
1271 	dev->devfn = devfn;
1272 	dev->vendor = l & 0xffff;
1273 	dev->device = (l >> 16) & 0xffff;
1274 
1275 	pci_set_of_node(dev);
1276 
1277 	if (pci_setup_device(dev)) {
1278 		kfree(dev);
1279 		return NULL;
1280 	}
1281 
1282 	return dev;
1283 }
1284 
1285 static void pci_init_capabilities(struct pci_dev *dev)
1286 {
1287 	/* MSI/MSI-X list */
1288 	pci_msi_init_pci_dev(dev);
1289 
1290 	/* Buffers for saving PCIe and PCI-X capabilities */
1291 	pci_allocate_cap_save_buffers(dev);
1292 
1293 	/* Power Management */
1294 	pci_pm_init(dev);
1295 
1296 	/* Vital Product Data */
1297 	pci_vpd_pci22_init(dev);
1298 
1299 	/* Alternative Routing-ID Forwarding */
1300 	pci_configure_ari(dev);
1301 
1302 	/* Single Root I/O Virtualization */
1303 	pci_iov_init(dev);
1304 
1305 	/* Enable ACS P2P upstream forwarding */
1306 	pci_enable_acs(dev);
1307 }
1308 
1309 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1310 {
1311 	int ret;
1312 
1313 	device_initialize(&dev->dev);
1314 	dev->dev.release = pci_release_dev;
1315 
1316 	set_dev_node(&dev->dev, pcibus_to_node(bus));
1317 	dev->dev.dma_mask = &dev->dma_mask;
1318 	dev->dev.dma_parms = &dev->dma_parms;
1319 	dev->dev.coherent_dma_mask = 0xffffffffull;
1320 
1321 	pci_set_dma_max_seg_size(dev, 65536);
1322 	pci_set_dma_seg_boundary(dev, 0xffffffff);
1323 
1324 	/* Fix up broken headers */
1325 	pci_fixup_device(pci_fixup_header, dev);
1326 
1327 	/* moved out from quirk header fixup code */
1328 	pci_reassigndev_resource_alignment(dev);
1329 
1330 	/* Clear the state_saved flag. */
1331 	dev->state_saved = false;
1332 
1333 	/* Initialize various capabilities */
1334 	pci_init_capabilities(dev);
1335 
1336 	/*
1337 	 * Add the device to our list of discovered devices
1338 	 * and the bus list for fixup functions, etc.
1339 	 */
1340 	down_write(&pci_bus_sem);
1341 	list_add_tail(&dev->bus_list, &bus->devices);
1342 	up_write(&pci_bus_sem);
1343 
1344 	ret = pcibios_add_device(dev);
1345 	WARN_ON(ret < 0);
1346 
1347 	/* Notifier could use PCI capabilities */
1348 	dev->match_driver = false;
1349 	ret = device_add(&dev->dev);
1350 	WARN_ON(ret < 0);
1351 
1352 	pci_proc_attach_device(dev);
1353 }
1354 
1355 struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
1356 {
1357 	struct pci_dev *dev;
1358 
1359 	dev = pci_get_slot(bus, devfn);
1360 	if (dev) {
1361 		pci_dev_put(dev);
1362 		return dev;
1363 	}
1364 
1365 	dev = pci_scan_device(bus, devfn);
1366 	if (!dev)
1367 		return NULL;
1368 
1369 	pci_device_add(dev, bus);
1370 
1371 	return dev;
1372 }
1373 EXPORT_SYMBOL(pci_scan_single_device);
1374 
1375 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
1376 {
1377 	int pos;
1378 	u16 cap = 0;
1379 	unsigned next_fn;
1380 
1381 	if (pci_ari_enabled(bus)) {
1382 		if (!dev)
1383 			return 0;
1384 		pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1385 		if (!pos)
1386 			return 0;
1387 
1388 		pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1389 		next_fn = PCI_ARI_CAP_NFN(cap);
1390 		if (next_fn <= fn)
1391 			return 0;	/* protect against malformed list */
1392 
1393 		return next_fn;
1394 	}
1395 
1396 	/* dev may be NULL for non-contiguous multifunction devices */
1397 	if (!dev || dev->multifunction)
1398 		return (fn + 1) % 8;
1399 
1400 	return 0;
1401 }
1402 
1403 static int only_one_child(struct pci_bus *bus)
1404 {
1405 	struct pci_dev *parent = bus->self;
1406 
1407 	if (!parent || !pci_is_pcie(parent))
1408 		return 0;
1409 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1410 		return 1;
1411 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM &&
1412 	    !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1413 		return 1;
1414 	return 0;
1415 }
1416 
1417 /**
1418  * pci_scan_slot - scan a PCI slot on a bus for devices.
1419  * @bus: PCI bus to scan
1420  * @devfn: slot number to scan (must have zero function.)
1421  *
1422  * Scan a PCI slot on the specified PCI bus for devices, adding
1423  * discovered devices to the @bus->devices list.  New devices
1424  * will not have is_added set.
1425  *
1426  * Returns the number of new devices found.
1427  */
1428 int pci_scan_slot(struct pci_bus *bus, int devfn)
1429 {
1430 	unsigned fn, nr = 0;
1431 	struct pci_dev *dev;
1432 
1433 	if (only_one_child(bus) && (devfn > 0))
1434 		return 0; /* Already scanned the entire slot */
1435 
1436 	dev = pci_scan_single_device(bus, devfn);
1437 	if (!dev)
1438 		return 0;
1439 	if (!dev->is_added)
1440 		nr++;
1441 
1442 	for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
1443 		dev = pci_scan_single_device(bus, devfn + fn);
1444 		if (dev) {
1445 			if (!dev->is_added)
1446 				nr++;
1447 			dev->multifunction = 1;
1448 		}
1449 	}
1450 
1451 	/* only one slot has pcie device */
1452 	if (bus->self && nr)
1453 		pcie_aspm_init_link_state(bus->self);
1454 
1455 	return nr;
1456 }
1457 
1458 static int pcie_find_smpss(struct pci_dev *dev, void *data)
1459 {
1460 	u8 *smpss = data;
1461 
1462 	if (!pci_is_pcie(dev))
1463 		return 0;
1464 
1465 	/* For PCIE hotplug enabled slots not connected directly to a
1466 	 * PCI-E root port, there can be problems when hotplugging
1467 	 * devices.  This is due to the possibility of hotplugging a
1468 	 * device into the fabric with a smaller MPS that the devices
1469 	 * currently running have configured.  Modifying the MPS on the
1470 	 * running devices could cause a fatal bus error due to an
1471 	 * incoming frame being larger than the newly configured MPS.
1472 	 * To work around this, the MPS for the entire fabric must be
1473 	 * set to the minimum size.  Any devices hotplugged into this
1474 	 * fabric will have the minimum MPS set.  If the PCI hotplug
1475 	 * slot is directly connected to the root port and there are not
1476 	 * other devices on the fabric (which seems to be the most
1477 	 * common case), then this is not an issue and MPS discovery
1478 	 * will occur as normal.
1479 	 */
1480 	if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) ||
1481 	     (dev->bus->self &&
1482 	      pci_pcie_type(dev->bus->self) != PCI_EXP_TYPE_ROOT_PORT)))
1483 		*smpss = 0;
1484 
1485 	if (*smpss > dev->pcie_mpss)
1486 		*smpss = dev->pcie_mpss;
1487 
1488 	return 0;
1489 }
1490 
1491 static void pcie_write_mps(struct pci_dev *dev, int mps)
1492 {
1493 	int rc;
1494 
1495 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1496 		mps = 128 << dev->pcie_mpss;
1497 
1498 		if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1499 		    dev->bus->self)
1500 			/* For "Performance", the assumption is made that
1501 			 * downstream communication will never be larger than
1502 			 * the MRRS.  So, the MPS only needs to be configured
1503 			 * for the upstream communication.  This being the case,
1504 			 * walk from the top down and set the MPS of the child
1505 			 * to that of the parent bus.
1506 			 *
1507 			 * Configure the device MPS with the smaller of the
1508 			 * device MPSS or the bridge MPS (which is assumed to be
1509 			 * properly configured at this point to the largest
1510 			 * allowable MPS based on its parent bus).
1511 			 */
1512 			mps = min(mps, pcie_get_mps(dev->bus->self));
1513 	}
1514 
1515 	rc = pcie_set_mps(dev, mps);
1516 	if (rc)
1517 		dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1518 }
1519 
1520 static void pcie_write_mrrs(struct pci_dev *dev)
1521 {
1522 	int rc, mrrs;
1523 
1524 	/* In the "safe" case, do not configure the MRRS.  There appear to be
1525 	 * issues with setting MRRS to 0 on a number of devices.
1526 	 */
1527 	if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1528 		return;
1529 
1530 	/* For Max performance, the MRRS must be set to the largest supported
1531 	 * value.  However, it cannot be configured larger than the MPS the
1532 	 * device or the bus can support.  This should already be properly
1533 	 * configured by a prior call to pcie_write_mps.
1534 	 */
1535 	mrrs = pcie_get_mps(dev);
1536 
1537 	/* MRRS is a R/W register.  Invalid values can be written, but a
1538 	 * subsequent read will verify if the value is acceptable or not.
1539 	 * If the MRRS value provided is not acceptable (e.g., too large),
1540 	 * shrink the value until it is acceptable to the HW.
1541  	 */
1542 	while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1543 		rc = pcie_set_readrq(dev, mrrs);
1544 		if (!rc)
1545 			break;
1546 
1547 		dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
1548 		mrrs /= 2;
1549 	}
1550 
1551 	if (mrrs < 128)
1552 		dev_err(&dev->dev, "MRRS was unable to be configured with a "
1553 			"safe value.  If problems are experienced, try running "
1554 			"with pci=pcie_bus_safe.\n");
1555 }
1556 
1557 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1558 {
1559 	int mps, orig_mps;
1560 
1561 	if (!pci_is_pcie(dev))
1562 		return 0;
1563 
1564 	mps = 128 << *(u8 *)data;
1565 	orig_mps = pcie_get_mps(dev);
1566 
1567 	pcie_write_mps(dev, mps);
1568 	pcie_write_mrrs(dev);
1569 
1570 	dev_info(&dev->dev, "PCI-E Max Payload Size set to %4d/%4d (was %4d), "
1571 		 "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss,
1572 		 orig_mps, pcie_get_readrq(dev));
1573 
1574 	return 0;
1575 }
1576 
1577 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
1578  * parents then children fashion.  If this changes, then this code will not
1579  * work as designed.
1580  */
1581 void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
1582 {
1583 	u8 smpss;
1584 
1585 	if (!pci_is_pcie(bus->self))
1586 		return;
1587 
1588 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
1589 		return;
1590 
1591 	/* FIXME - Peer to peer DMA is possible, though the endpoint would need
1592 	 * to be aware to the MPS of the destination.  To work around this,
1593 	 * simply force the MPS of the entire system to the smallest possible.
1594 	 */
1595 	if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1596 		smpss = 0;
1597 
1598 	if (pcie_bus_config == PCIE_BUS_SAFE) {
1599 		smpss = mpss;
1600 
1601 		pcie_find_smpss(bus->self, &smpss);
1602 		pci_walk_bus(bus, pcie_find_smpss, &smpss);
1603 	}
1604 
1605 	pcie_bus_configure_set(bus->self, &smpss);
1606 	pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1607 }
1608 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1609 
1610 unsigned int pci_scan_child_bus(struct pci_bus *bus)
1611 {
1612 	unsigned int devfn, pass, max = bus->busn_res.start;
1613 	struct pci_dev *dev;
1614 
1615 	dev_dbg(&bus->dev, "scanning bus\n");
1616 
1617 	/* Go find them, Rover! */
1618 	for (devfn = 0; devfn < 0x100; devfn += 8)
1619 		pci_scan_slot(bus, devfn);
1620 
1621 	/* Reserve buses for SR-IOV capability. */
1622 	max += pci_iov_bus_range(bus);
1623 
1624 	/*
1625 	 * After performing arch-dependent fixup of the bus, look behind
1626 	 * all PCI-to-PCI bridges on this bus.
1627 	 */
1628 	if (!bus->is_added) {
1629 		dev_dbg(&bus->dev, "fixups for bus\n");
1630 		pcibios_fixup_bus(bus);
1631 		bus->is_added = 1;
1632 	}
1633 
1634 	for (pass=0; pass < 2; pass++)
1635 		list_for_each_entry(dev, &bus->devices, bus_list) {
1636 			if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1637 			    dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1638 				max = pci_scan_bridge(bus, dev, max, pass);
1639 		}
1640 
1641 	/*
1642 	 * We've scanned the bus and so we know all about what's on
1643 	 * the other side of any bridges that may be on this bus plus
1644 	 * any devices.
1645 	 *
1646 	 * Return how far we've got finding sub-buses.
1647 	 */
1648 	dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1649 	return max;
1650 }
1651 
1652 /**
1653  * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
1654  * @bridge: Host bridge to set up.
1655  *
1656  * Default empty implementation.  Replace with an architecture-specific setup
1657  * routine, if necessary.
1658  */
1659 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
1660 {
1661 	return 0;
1662 }
1663 
1664 void __weak pcibios_add_bus(struct pci_bus *bus)
1665 {
1666 }
1667 
1668 void __weak pcibios_remove_bus(struct pci_bus *bus)
1669 {
1670 }
1671 
1672 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1673 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
1674 {
1675 	int error;
1676 	struct pci_host_bridge *bridge;
1677 	struct pci_bus *b, *b2;
1678 	struct pci_host_bridge_window *window, *n;
1679 	struct resource *res;
1680 	resource_size_t offset;
1681 	char bus_addr[64];
1682 	char *fmt;
1683 
1684 	b = pci_alloc_bus();
1685 	if (!b)
1686 		return NULL;
1687 
1688 	b->sysdata = sysdata;
1689 	b->ops = ops;
1690 	b->number = b->busn_res.start = bus;
1691 	b2 = pci_find_bus(pci_domain_nr(b), bus);
1692 	if (b2) {
1693 		/* If we already got to this bus through a different bridge, ignore it */
1694 		dev_dbg(&b2->dev, "bus already known\n");
1695 		goto err_out;
1696 	}
1697 
1698 	bridge = pci_alloc_host_bridge(b);
1699 	if (!bridge)
1700 		goto err_out;
1701 
1702 	bridge->dev.parent = parent;
1703 	bridge->dev.release = pci_release_bus_bridge_dev;
1704 	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
1705 	error = pcibios_root_bridge_prepare(bridge);
1706 	if (error)
1707 		goto bridge_dev_reg_err;
1708 
1709 	error = device_register(&bridge->dev);
1710 	if (error)
1711 		goto bridge_dev_reg_err;
1712 	b->bridge = get_device(&bridge->dev);
1713 	device_enable_async_suspend(b->bridge);
1714 	pci_set_bus_of_node(b);
1715 
1716 	if (!parent)
1717 		set_dev_node(b->bridge, pcibus_to_node(b));
1718 
1719 	b->dev.class = &pcibus_class;
1720 	b->dev.parent = b->bridge;
1721 	dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
1722 	error = device_register(&b->dev);
1723 	if (error)
1724 		goto class_dev_reg_err;
1725 
1726 	pcibios_add_bus(b);
1727 
1728 	/* Create legacy_io and legacy_mem files for this bus */
1729 	pci_create_legacy_files(b);
1730 
1731 	if (parent)
1732 		dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
1733 	else
1734 		printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
1735 
1736 	/* Add initial resources to the bus */
1737 	list_for_each_entry_safe(window, n, resources, list) {
1738 		list_move_tail(&window->list, &bridge->windows);
1739 		res = window->res;
1740 		offset = window->offset;
1741 		if (res->flags & IORESOURCE_BUS)
1742 			pci_bus_insert_busn_res(b, bus, res->end);
1743 		else
1744 			pci_bus_add_resource(b, res, 0);
1745 		if (offset) {
1746 			if (resource_type(res) == IORESOURCE_IO)
1747 				fmt = " (bus address [%#06llx-%#06llx])";
1748 			else
1749 				fmt = " (bus address [%#010llx-%#010llx])";
1750 			snprintf(bus_addr, sizeof(bus_addr), fmt,
1751 				 (unsigned long long) (res->start - offset),
1752 				 (unsigned long long) (res->end - offset));
1753 		} else
1754 			bus_addr[0] = '\0';
1755 		dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
1756 	}
1757 
1758 	down_write(&pci_bus_sem);
1759 	list_add_tail(&b->node, &pci_root_buses);
1760 	up_write(&pci_bus_sem);
1761 
1762 	return b;
1763 
1764 class_dev_reg_err:
1765 	put_device(&bridge->dev);
1766 	device_unregister(&bridge->dev);
1767 bridge_dev_reg_err:
1768 	kfree(bridge);
1769 err_out:
1770 	kfree(b);
1771 	return NULL;
1772 }
1773 
1774 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
1775 {
1776 	struct resource *res = &b->busn_res;
1777 	struct resource *parent_res, *conflict;
1778 
1779 	res->start = bus;
1780 	res->end = bus_max;
1781 	res->flags = IORESOURCE_BUS;
1782 
1783 	if (!pci_is_root_bus(b))
1784 		parent_res = &b->parent->busn_res;
1785 	else {
1786 		parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
1787 		res->flags |= IORESOURCE_PCI_FIXED;
1788 	}
1789 
1790 	conflict = insert_resource_conflict(parent_res, res);
1791 
1792 	if (conflict)
1793 		dev_printk(KERN_DEBUG, &b->dev,
1794 			   "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
1795 			    res, pci_is_root_bus(b) ? "domain " : "",
1796 			    parent_res, conflict->name, conflict);
1797 
1798 	return conflict == NULL;
1799 }
1800 
1801 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
1802 {
1803 	struct resource *res = &b->busn_res;
1804 	struct resource old_res = *res;
1805 	resource_size_t size;
1806 	int ret;
1807 
1808 	if (res->start > bus_max)
1809 		return -EINVAL;
1810 
1811 	size = bus_max - res->start + 1;
1812 	ret = adjust_resource(res, res->start, size);
1813 	dev_printk(KERN_DEBUG, &b->dev,
1814 			"busn_res: %pR end %s updated to %02x\n",
1815 			&old_res, ret ? "can not be" : "is", bus_max);
1816 
1817 	if (!ret && !res->parent)
1818 		pci_bus_insert_busn_res(b, res->start, res->end);
1819 
1820 	return ret;
1821 }
1822 
1823 void pci_bus_release_busn_res(struct pci_bus *b)
1824 {
1825 	struct resource *res = &b->busn_res;
1826 	int ret;
1827 
1828 	if (!res->flags || !res->parent)
1829 		return;
1830 
1831 	ret = release_resource(res);
1832 	dev_printk(KERN_DEBUG, &b->dev,
1833 			"busn_res: %pR %s released\n",
1834 			res, ret ? "can not be" : "is");
1835 }
1836 
1837 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1838 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
1839 {
1840 	struct pci_host_bridge_window *window;
1841 	bool found = false;
1842 	struct pci_bus *b;
1843 	int max;
1844 
1845 	list_for_each_entry(window, resources, list)
1846 		if (window->res->flags & IORESOURCE_BUS) {
1847 			found = true;
1848 			break;
1849 		}
1850 
1851 	b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
1852 	if (!b)
1853 		return NULL;
1854 
1855 	if (!found) {
1856 		dev_info(&b->dev,
1857 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
1858 			bus);
1859 		pci_bus_insert_busn_res(b, bus, 255);
1860 	}
1861 
1862 	max = pci_scan_child_bus(b);
1863 
1864 	if (!found)
1865 		pci_bus_update_busn_res_end(b, max);
1866 
1867 	pci_bus_add_devices(b);
1868 	return b;
1869 }
1870 EXPORT_SYMBOL(pci_scan_root_bus);
1871 
1872 /* Deprecated; use pci_scan_root_bus() instead */
1873 struct pci_bus *pci_scan_bus_parented(struct device *parent,
1874 		int bus, struct pci_ops *ops, void *sysdata)
1875 {
1876 	LIST_HEAD(resources);
1877 	struct pci_bus *b;
1878 
1879 	pci_add_resource(&resources, &ioport_resource);
1880 	pci_add_resource(&resources, &iomem_resource);
1881 	pci_add_resource(&resources, &busn_resource);
1882 	b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
1883 	if (b)
1884 		pci_scan_child_bus(b);
1885 	else
1886 		pci_free_resource_list(&resources);
1887 	return b;
1888 }
1889 EXPORT_SYMBOL(pci_scan_bus_parented);
1890 
1891 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
1892 					void *sysdata)
1893 {
1894 	LIST_HEAD(resources);
1895 	struct pci_bus *b;
1896 
1897 	pci_add_resource(&resources, &ioport_resource);
1898 	pci_add_resource(&resources, &iomem_resource);
1899 	pci_add_resource(&resources, &busn_resource);
1900 	b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
1901 	if (b) {
1902 		pci_scan_child_bus(b);
1903 		pci_bus_add_devices(b);
1904 	} else {
1905 		pci_free_resource_list(&resources);
1906 	}
1907 	return b;
1908 }
1909 EXPORT_SYMBOL(pci_scan_bus);
1910 
1911 /**
1912  * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
1913  * @bridge: PCI bridge for the bus to scan
1914  *
1915  * Scan a PCI bus and child buses for new devices, add them,
1916  * and enable them, resizing bridge mmio/io resource if necessary
1917  * and possible.  The caller must ensure the child devices are already
1918  * removed for resizing to occur.
1919  *
1920  * Returns the max number of subordinate bus discovered.
1921  */
1922 unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
1923 {
1924 	unsigned int max;
1925 	struct pci_bus *bus = bridge->subordinate;
1926 
1927 	max = pci_scan_child_bus(bus);
1928 
1929 	pci_assign_unassigned_bridge_resources(bridge);
1930 
1931 	pci_bus_add_devices(bus);
1932 
1933 	return max;
1934 }
1935 
1936 /**
1937  * pci_rescan_bus - scan a PCI bus for devices.
1938  * @bus: PCI bus to scan
1939  *
1940  * Scan a PCI bus and child buses for new devices, adds them,
1941  * and enables them.
1942  *
1943  * Returns the max number of subordinate bus discovered.
1944  */
1945 unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
1946 {
1947 	unsigned int max;
1948 
1949 	max = pci_scan_child_bus(bus);
1950 	pci_assign_unassigned_bus_resources(bus);
1951 	pci_enable_bridges(bus);
1952 	pci_bus_add_devices(bus);
1953 
1954 	return max;
1955 }
1956 EXPORT_SYMBOL_GPL(pci_rescan_bus);
1957 
1958 EXPORT_SYMBOL(pci_add_new_bus);
1959 EXPORT_SYMBOL(pci_scan_slot);
1960 EXPORT_SYMBOL(pci_scan_bridge);
1961 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1962 
1963 static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b)
1964 {
1965 	const struct pci_dev *a = to_pci_dev(d_a);
1966 	const struct pci_dev *b = to_pci_dev(d_b);
1967 
1968 	if      (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
1969 	else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return  1;
1970 
1971 	if      (a->bus->number < b->bus->number) return -1;
1972 	else if (a->bus->number > b->bus->number) return  1;
1973 
1974 	if      (a->devfn < b->devfn) return -1;
1975 	else if (a->devfn > b->devfn) return  1;
1976 
1977 	return 0;
1978 }
1979 
1980 void __init pci_sort_breadthfirst(void)
1981 {
1982 	bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
1983 }
1984