xref: /linux/drivers/pci/probe.c (revision 25aee3debe0464f6c680173041fa3de30ec9ff54)
1 /*
2  * probe.c - PCI detection and setup code
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/cpumask.h>
12 #include <linux/pci-aspm.h>
13 #include <asm-generic/pci-bridge.h>
14 #include "pci.h"
15 
16 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
17 #define CARDBUS_RESERVE_BUSNR	3
18 
19 struct resource busn_resource = {
20 	.name	= "PCI busn",
21 	.start	= 0,
22 	.end	= 255,
23 	.flags	= IORESOURCE_BUS,
24 };
25 
26 /* Ugh.  Need to stop exporting this to modules. */
27 LIST_HEAD(pci_root_buses);
28 EXPORT_SYMBOL(pci_root_buses);
29 
30 static LIST_HEAD(pci_domain_busn_res_list);
31 
32 struct pci_domain_busn_res {
33 	struct list_head list;
34 	struct resource res;
35 	int domain_nr;
36 };
37 
38 static struct resource *get_pci_domain_busn_res(int domain_nr)
39 {
40 	struct pci_domain_busn_res *r;
41 
42 	list_for_each_entry(r, &pci_domain_busn_res_list, list)
43 		if (r->domain_nr == domain_nr)
44 			return &r->res;
45 
46 	r = kzalloc(sizeof(*r), GFP_KERNEL);
47 	if (!r)
48 		return NULL;
49 
50 	r->domain_nr = domain_nr;
51 	r->res.start = 0;
52 	r->res.end = 0xff;
53 	r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
54 
55 	list_add_tail(&r->list, &pci_domain_busn_res_list);
56 
57 	return &r->res;
58 }
59 
60 static int find_anything(struct device *dev, void *data)
61 {
62 	return 1;
63 }
64 
65 /*
66  * Some device drivers need know if pci is initiated.
67  * Basically, we think pci is not initiated when there
68  * is no device to be found on the pci_bus_type.
69  */
70 int no_pci_devices(void)
71 {
72 	struct device *dev;
73 	int no_devices;
74 
75 	dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
76 	no_devices = (dev == NULL);
77 	put_device(dev);
78 	return no_devices;
79 }
80 EXPORT_SYMBOL(no_pci_devices);
81 
82 /*
83  * PCI Bus Class
84  */
85 static void release_pcibus_dev(struct device *dev)
86 {
87 	struct pci_bus *pci_bus = to_pci_bus(dev);
88 
89 	if (pci_bus->bridge)
90 		put_device(pci_bus->bridge);
91 	pci_bus_remove_resources(pci_bus);
92 	pci_release_bus_of_node(pci_bus);
93 	kfree(pci_bus);
94 }
95 
96 static struct class pcibus_class = {
97 	.name		= "pci_bus",
98 	.dev_release	= &release_pcibus_dev,
99 	.dev_attrs	= pcibus_dev_attrs,
100 };
101 
102 static int __init pcibus_class_init(void)
103 {
104 	return class_register(&pcibus_class);
105 }
106 postcore_initcall(pcibus_class_init);
107 
108 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
109 {
110 	u64 size = mask & maxbase;	/* Find the significant bits */
111 	if (!size)
112 		return 0;
113 
114 	/* Get the lowest of them to find the decode size, and
115 	   from that the extent.  */
116 	size = (size & ~(size-1)) - 1;
117 
118 	/* base == maxbase can be valid only if the BAR has
119 	   already been programmed with all 1s.  */
120 	if (base == maxbase && ((base | size) & mask) != mask)
121 		return 0;
122 
123 	return size;
124 }
125 
126 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
127 {
128 	u32 mem_type;
129 	unsigned long flags;
130 
131 	if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
132 		flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
133 		flags |= IORESOURCE_IO;
134 		return flags;
135 	}
136 
137 	flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
138 	flags |= IORESOURCE_MEM;
139 	if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
140 		flags |= IORESOURCE_PREFETCH;
141 
142 	mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
143 	switch (mem_type) {
144 	case PCI_BASE_ADDRESS_MEM_TYPE_32:
145 		break;
146 	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
147 		dev_info(&dev->dev, "1M mem BAR treated as 32-bit BAR\n");
148 		break;
149 	case PCI_BASE_ADDRESS_MEM_TYPE_64:
150 		flags |= IORESOURCE_MEM_64;
151 		break;
152 	default:
153 		dev_warn(&dev->dev,
154 			 "mem unknown type %x treated as 32-bit BAR\n",
155 			 mem_type);
156 		break;
157 	}
158 	return flags;
159 }
160 
161 /**
162  * pci_read_base - read a PCI BAR
163  * @dev: the PCI device
164  * @type: type of the BAR
165  * @res: resource buffer to be filled in
166  * @pos: BAR position in the config space
167  *
168  * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
169  */
170 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
171 			struct resource *res, unsigned int pos)
172 {
173 	u32 l, sz, mask;
174 	u16 orig_cmd;
175 	struct pci_bus_region region;
176 
177 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
178 
179 	if (!dev->mmio_always_on) {
180 		pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
181 		pci_write_config_word(dev, PCI_COMMAND,
182 			orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO));
183 	}
184 
185 	res->name = pci_name(dev);
186 
187 	pci_read_config_dword(dev, pos, &l);
188 	pci_write_config_dword(dev, pos, l | mask);
189 	pci_read_config_dword(dev, pos, &sz);
190 	pci_write_config_dword(dev, pos, l);
191 
192 	/*
193 	 * All bits set in sz means the device isn't working properly.
194 	 * If the BAR isn't implemented, all bits must be 0.  If it's a
195 	 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
196 	 * 1 must be clear.
197 	 */
198 	if (!sz || sz == 0xffffffff)
199 		goto fail;
200 
201 	/*
202 	 * I don't know how l can have all bits set.  Copied from old code.
203 	 * Maybe it fixes a bug on some ancient platform.
204 	 */
205 	if (l == 0xffffffff)
206 		l = 0;
207 
208 	if (type == pci_bar_unknown) {
209 		res->flags = decode_bar(dev, l);
210 		res->flags |= IORESOURCE_SIZEALIGN;
211 		if (res->flags & IORESOURCE_IO) {
212 			l &= PCI_BASE_ADDRESS_IO_MASK;
213 			mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
214 		} else {
215 			l &= PCI_BASE_ADDRESS_MEM_MASK;
216 			mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
217 		}
218 	} else {
219 		res->flags |= (l & IORESOURCE_ROM_ENABLE);
220 		l &= PCI_ROM_ADDRESS_MASK;
221 		mask = (u32)PCI_ROM_ADDRESS_MASK;
222 	}
223 
224 	if (res->flags & IORESOURCE_MEM_64) {
225 		u64 l64 = l;
226 		u64 sz64 = sz;
227 		u64 mask64 = mask | (u64)~0 << 32;
228 
229 		pci_read_config_dword(dev, pos + 4, &l);
230 		pci_write_config_dword(dev, pos + 4, ~0);
231 		pci_read_config_dword(dev, pos + 4, &sz);
232 		pci_write_config_dword(dev, pos + 4, l);
233 
234 		l64 |= ((u64)l << 32);
235 		sz64 |= ((u64)sz << 32);
236 
237 		sz64 = pci_size(l64, sz64, mask64);
238 
239 		if (!sz64)
240 			goto fail;
241 
242 		if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
243 			dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n",
244 				pos);
245 			goto fail;
246 		}
247 
248 		if ((sizeof(resource_size_t) < 8) && l) {
249 			/* Address above 32-bit boundary; disable the BAR */
250 			pci_write_config_dword(dev, pos, 0);
251 			pci_write_config_dword(dev, pos + 4, 0);
252 			region.start = 0;
253 			region.end = sz64;
254 			pcibios_bus_to_resource(dev, res, &region);
255 		} else {
256 			region.start = l64;
257 			region.end = l64 + sz64;
258 			pcibios_bus_to_resource(dev, res, &region);
259 			dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n",
260 				   pos, res);
261 		}
262 	} else {
263 		sz = pci_size(l, sz, mask);
264 
265 		if (!sz)
266 			goto fail;
267 
268 		region.start = l;
269 		region.end = l + sz;
270 		pcibios_bus_to_resource(dev, res, &region);
271 
272 		dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
273 	}
274 
275  out:
276 	if (!dev->mmio_always_on)
277 		pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
278 
279 	return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
280  fail:
281 	res->flags = 0;
282 	goto out;
283 }
284 
285 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
286 {
287 	unsigned int pos, reg;
288 
289 	for (pos = 0; pos < howmany; pos++) {
290 		struct resource *res = &dev->resource[pos];
291 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
292 		pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
293 	}
294 
295 	if (rom) {
296 		struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
297 		dev->rom_base_reg = rom;
298 		res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
299 				IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
300 				IORESOURCE_SIZEALIGN;
301 		__pci_read_base(dev, pci_bar_mem32, res, rom);
302 	}
303 }
304 
305 static void __devinit pci_read_bridge_io(struct pci_bus *child)
306 {
307 	struct pci_dev *dev = child->self;
308 	u8 io_base_lo, io_limit_lo;
309 	unsigned long io_mask, io_granularity, base, limit;
310 	struct pci_bus_region region;
311 	struct resource *res;
312 
313 	io_mask = PCI_IO_RANGE_MASK;
314 	io_granularity = 0x1000;
315 	if (dev->io_window_1k) {
316 		/* Support 1K I/O space granularity */
317 		io_mask = PCI_IO_1K_RANGE_MASK;
318 		io_granularity = 0x400;
319 	}
320 
321 	res = child->resource[0];
322 	pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
323 	pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
324 	base = (io_base_lo & io_mask) << 8;
325 	limit = (io_limit_lo & io_mask) << 8;
326 
327 	if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
328 		u16 io_base_hi, io_limit_hi;
329 
330 		pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
331 		pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
332 		base |= ((unsigned long) io_base_hi << 16);
333 		limit |= ((unsigned long) io_limit_hi << 16);
334 	}
335 
336 	if (base <= limit) {
337 		res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
338 		region.start = base;
339 		region.end = limit + io_granularity - 1;
340 		pcibios_bus_to_resource(dev, res, &region);
341 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
342 	}
343 }
344 
345 static void __devinit pci_read_bridge_mmio(struct pci_bus *child)
346 {
347 	struct pci_dev *dev = child->self;
348 	u16 mem_base_lo, mem_limit_lo;
349 	unsigned long base, limit;
350 	struct pci_bus_region region;
351 	struct resource *res;
352 
353 	res = child->resource[1];
354 	pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
355 	pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
356 	base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
357 	limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
358 	if (base <= limit) {
359 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
360 		region.start = base;
361 		region.end = limit + 0xfffff;
362 		pcibios_bus_to_resource(dev, res, &region);
363 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
364 	}
365 }
366 
367 static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child)
368 {
369 	struct pci_dev *dev = child->self;
370 	u16 mem_base_lo, mem_limit_lo;
371 	unsigned long base, limit;
372 	struct pci_bus_region region;
373 	struct resource *res;
374 
375 	res = child->resource[2];
376 	pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
377 	pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
378 	base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
379 	limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
380 
381 	if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
382 		u32 mem_base_hi, mem_limit_hi;
383 
384 		pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
385 		pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
386 
387 		/*
388 		 * Some bridges set the base > limit by default, and some
389 		 * (broken) BIOSes do not initialize them.  If we find
390 		 * this, just assume they are not being used.
391 		 */
392 		if (mem_base_hi <= mem_limit_hi) {
393 #if BITS_PER_LONG == 64
394 			base |= ((unsigned long) mem_base_hi) << 32;
395 			limit |= ((unsigned long) mem_limit_hi) << 32;
396 #else
397 			if (mem_base_hi || mem_limit_hi) {
398 				dev_err(&dev->dev, "can't handle 64-bit "
399 					"address space for bridge\n");
400 				return;
401 			}
402 #endif
403 		}
404 	}
405 	if (base <= limit) {
406 		res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
407 					 IORESOURCE_MEM | IORESOURCE_PREFETCH;
408 		if (res->flags & PCI_PREF_RANGE_TYPE_64)
409 			res->flags |= IORESOURCE_MEM_64;
410 		region.start = base;
411 		region.end = limit + 0xfffff;
412 		pcibios_bus_to_resource(dev, res, &region);
413 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
414 	}
415 }
416 
417 void __devinit pci_read_bridge_bases(struct pci_bus *child)
418 {
419 	struct pci_dev *dev = child->self;
420 	struct resource *res;
421 	int i;
422 
423 	if (pci_is_root_bus(child))	/* It's a host bus, nothing to read */
424 		return;
425 
426 	dev_info(&dev->dev, "PCI bridge to %pR%s\n",
427 		 &child->busn_res,
428 		 dev->transparent ? " (subtractive decode)" : "");
429 
430 	pci_bus_remove_resources(child);
431 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
432 		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
433 
434 	pci_read_bridge_io(child);
435 	pci_read_bridge_mmio(child);
436 	pci_read_bridge_mmio_pref(child);
437 
438 	if (dev->transparent) {
439 		pci_bus_for_each_resource(child->parent, res, i) {
440 			if (res) {
441 				pci_bus_add_resource(child, res,
442 						     PCI_SUBTRACTIVE_DECODE);
443 				dev_printk(KERN_DEBUG, &dev->dev,
444 					   "  bridge window %pR (subtractive decode)\n",
445 					   res);
446 			}
447 		}
448 	}
449 }
450 
451 static struct pci_bus * pci_alloc_bus(void)
452 {
453 	struct pci_bus *b;
454 
455 	b = kzalloc(sizeof(*b), GFP_KERNEL);
456 	if (b) {
457 		INIT_LIST_HEAD(&b->node);
458 		INIT_LIST_HEAD(&b->children);
459 		INIT_LIST_HEAD(&b->devices);
460 		INIT_LIST_HEAD(&b->slots);
461 		INIT_LIST_HEAD(&b->resources);
462 		b->max_bus_speed = PCI_SPEED_UNKNOWN;
463 		b->cur_bus_speed = PCI_SPEED_UNKNOWN;
464 	}
465 	return b;
466 }
467 
468 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
469 {
470 	struct pci_host_bridge *bridge;
471 
472 	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
473 	if (bridge) {
474 		INIT_LIST_HEAD(&bridge->windows);
475 		bridge->bus = b;
476 	}
477 
478 	return bridge;
479 }
480 
481 static unsigned char pcix_bus_speed[] = {
482 	PCI_SPEED_UNKNOWN,		/* 0 */
483 	PCI_SPEED_66MHz_PCIX,		/* 1 */
484 	PCI_SPEED_100MHz_PCIX,		/* 2 */
485 	PCI_SPEED_133MHz_PCIX,		/* 3 */
486 	PCI_SPEED_UNKNOWN,		/* 4 */
487 	PCI_SPEED_66MHz_PCIX_ECC,	/* 5 */
488 	PCI_SPEED_100MHz_PCIX_ECC,	/* 6 */
489 	PCI_SPEED_133MHz_PCIX_ECC,	/* 7 */
490 	PCI_SPEED_UNKNOWN,		/* 8 */
491 	PCI_SPEED_66MHz_PCIX_266,	/* 9 */
492 	PCI_SPEED_100MHz_PCIX_266,	/* A */
493 	PCI_SPEED_133MHz_PCIX_266,	/* B */
494 	PCI_SPEED_UNKNOWN,		/* C */
495 	PCI_SPEED_66MHz_PCIX_533,	/* D */
496 	PCI_SPEED_100MHz_PCIX_533,	/* E */
497 	PCI_SPEED_133MHz_PCIX_533	/* F */
498 };
499 
500 static unsigned char pcie_link_speed[] = {
501 	PCI_SPEED_UNKNOWN,		/* 0 */
502 	PCIE_SPEED_2_5GT,		/* 1 */
503 	PCIE_SPEED_5_0GT,		/* 2 */
504 	PCIE_SPEED_8_0GT,		/* 3 */
505 	PCI_SPEED_UNKNOWN,		/* 4 */
506 	PCI_SPEED_UNKNOWN,		/* 5 */
507 	PCI_SPEED_UNKNOWN,		/* 6 */
508 	PCI_SPEED_UNKNOWN,		/* 7 */
509 	PCI_SPEED_UNKNOWN,		/* 8 */
510 	PCI_SPEED_UNKNOWN,		/* 9 */
511 	PCI_SPEED_UNKNOWN,		/* A */
512 	PCI_SPEED_UNKNOWN,		/* B */
513 	PCI_SPEED_UNKNOWN,		/* C */
514 	PCI_SPEED_UNKNOWN,		/* D */
515 	PCI_SPEED_UNKNOWN,		/* E */
516 	PCI_SPEED_UNKNOWN		/* F */
517 };
518 
519 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
520 {
521 	bus->cur_bus_speed = pcie_link_speed[linksta & 0xf];
522 }
523 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
524 
525 static unsigned char agp_speeds[] = {
526 	AGP_UNKNOWN,
527 	AGP_1X,
528 	AGP_2X,
529 	AGP_4X,
530 	AGP_8X
531 };
532 
533 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
534 {
535 	int index = 0;
536 
537 	if (agpstat & 4)
538 		index = 3;
539 	else if (agpstat & 2)
540 		index = 2;
541 	else if (agpstat & 1)
542 		index = 1;
543 	else
544 		goto out;
545 
546 	if (agp3) {
547 		index += 2;
548 		if (index == 5)
549 			index = 0;
550 	}
551 
552  out:
553 	return agp_speeds[index];
554 }
555 
556 
557 static void pci_set_bus_speed(struct pci_bus *bus)
558 {
559 	struct pci_dev *bridge = bus->self;
560 	int pos;
561 
562 	pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
563 	if (!pos)
564 		pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
565 	if (pos) {
566 		u32 agpstat, agpcmd;
567 
568 		pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
569 		bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
570 
571 		pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
572 		bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
573 	}
574 
575 	pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
576 	if (pos) {
577 		u16 status;
578 		enum pci_bus_speed max;
579 		pci_read_config_word(bridge, pos + 2, &status);
580 
581 		if (status & 0x8000) {
582 			max = PCI_SPEED_133MHz_PCIX_533;
583 		} else if (status & 0x4000) {
584 			max = PCI_SPEED_133MHz_PCIX_266;
585 		} else if (status & 0x0002) {
586 			if (((status >> 12) & 0x3) == 2) {
587 				max = PCI_SPEED_133MHz_PCIX_ECC;
588 			} else {
589 				max = PCI_SPEED_133MHz_PCIX;
590 			}
591 		} else {
592 			max = PCI_SPEED_66MHz_PCIX;
593 		}
594 
595 		bus->max_bus_speed = max;
596 		bus->cur_bus_speed = pcix_bus_speed[(status >> 6) & 0xf];
597 
598 		return;
599 	}
600 
601 	pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
602 	if (pos) {
603 		u32 linkcap;
604 		u16 linksta;
605 
606 		pci_read_config_dword(bridge, pos + PCI_EXP_LNKCAP, &linkcap);
607 		bus->max_bus_speed = pcie_link_speed[linkcap & 0xf];
608 
609 		pci_read_config_word(bridge, pos + PCI_EXP_LNKSTA, &linksta);
610 		pcie_update_link_speed(bus, linksta);
611 	}
612 }
613 
614 
615 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
616 					   struct pci_dev *bridge, int busnr)
617 {
618 	struct pci_bus *child;
619 	int i;
620 
621 	/*
622 	 * Allocate a new bus, and inherit stuff from the parent..
623 	 */
624 	child = pci_alloc_bus();
625 	if (!child)
626 		return NULL;
627 
628 	child->parent = parent;
629 	child->ops = parent->ops;
630 	child->sysdata = parent->sysdata;
631 	child->bus_flags = parent->bus_flags;
632 
633 	/* initialize some portions of the bus device, but don't register it
634 	 * now as the parent is not properly set up yet.  This device will get
635 	 * registered later in pci_bus_add_devices()
636 	 */
637 	child->dev.class = &pcibus_class;
638 	dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
639 
640 	/*
641 	 * Set up the primary, secondary and subordinate
642 	 * bus numbers.
643 	 */
644 	child->number = child->busn_res.start = busnr;
645 	child->primary = parent->busn_res.start;
646 	child->busn_res.end = 0xff;
647 
648 	if (!bridge)
649 		return child;
650 
651 	child->self = bridge;
652 	child->bridge = get_device(&bridge->dev);
653 	pci_set_bus_of_node(child);
654 	pci_set_bus_speed(child);
655 
656 	/* Set up default resource pointers and names.. */
657 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
658 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
659 		child->resource[i]->name = child->name;
660 	}
661 	bridge->subordinate = child;
662 
663 	return child;
664 }
665 
666 struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
667 {
668 	struct pci_bus *child;
669 
670 	child = pci_alloc_child_bus(parent, dev, busnr);
671 	if (child) {
672 		down_write(&pci_bus_sem);
673 		list_add_tail(&child->node, &parent->children);
674 		up_write(&pci_bus_sem);
675 	}
676 	return child;
677 }
678 
679 static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
680 {
681 	struct pci_bus *parent = child->parent;
682 
683 	/* Attempts to fix that up are really dangerous unless
684 	   we're going to re-assign all bus numbers. */
685 	if (!pcibios_assign_all_busses())
686 		return;
687 
688 	while (parent->parent && parent->busn_res.end < max) {
689 		parent->busn_res.end = max;
690 		pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
691 		parent = parent->parent;
692 	}
693 }
694 
695 /*
696  * If it's a bridge, configure it and scan the bus behind it.
697  * For CardBus bridges, we don't scan behind as the devices will
698  * be handled by the bridge driver itself.
699  *
700  * We need to process bridges in two passes -- first we scan those
701  * already configured by the BIOS and after we are done with all of
702  * them, we proceed to assigning numbers to the remaining buses in
703  * order to avoid overlaps between old and new bus numbers.
704  */
705 int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
706 {
707 	struct pci_bus *child;
708 	int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
709 	u32 buses, i, j = 0;
710 	u16 bctl;
711 	u8 primary, secondary, subordinate;
712 	int broken = 0;
713 
714 	pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
715 	primary = buses & 0xFF;
716 	secondary = (buses >> 8) & 0xFF;
717 	subordinate = (buses >> 16) & 0xFF;
718 
719 	dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
720 		secondary, subordinate, pass);
721 
722 	if (!primary && (primary != bus->number) && secondary && subordinate) {
723 		dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
724 		primary = bus->number;
725 	}
726 
727 	/* Check if setup is sensible at all */
728 	if (!pass &&
729 	    (primary != bus->number || secondary <= bus->number)) {
730 		dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n");
731 		broken = 1;
732 	}
733 
734 	/* Disable MasterAbortMode during probing to avoid reporting
735 	   of bus errors (in some architectures) */
736 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
737 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
738 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
739 
740 	if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
741 	    !is_cardbus && !broken) {
742 		unsigned int cmax;
743 		/*
744 		 * Bus already configured by firmware, process it in the first
745 		 * pass and just note the configuration.
746 		 */
747 		if (pass)
748 			goto out;
749 
750 		/*
751 		 * If we already got to this bus through a different bridge,
752 		 * don't re-add it. This can happen with the i450NX chipset.
753 		 *
754 		 * However, we continue to descend down the hierarchy and
755 		 * scan remaining child buses.
756 		 */
757 		child = pci_find_bus(pci_domain_nr(bus), secondary);
758 		if (!child) {
759 			child = pci_add_new_bus(bus, dev, secondary);
760 			if (!child)
761 				goto out;
762 			child->primary = primary;
763 			pci_bus_insert_busn_res(child, secondary, subordinate);
764 			child->bridge_ctl = bctl;
765 		}
766 
767 		cmax = pci_scan_child_bus(child);
768 		if (cmax > max)
769 			max = cmax;
770 		if (child->busn_res.end > max)
771 			max = child->busn_res.end;
772 	} else {
773 		/*
774 		 * We need to assign a number to this bus which we always
775 		 * do in the second pass.
776 		 */
777 		if (!pass) {
778 			if (pcibios_assign_all_busses() || broken)
779 				/* Temporarily disable forwarding of the
780 				   configuration cycles on all bridges in
781 				   this bus segment to avoid possible
782 				   conflicts in the second pass between two
783 				   bridges programmed with overlapping
784 				   bus ranges. */
785 				pci_write_config_dword(dev, PCI_PRIMARY_BUS,
786 						       buses & ~0xffffff);
787 			goto out;
788 		}
789 
790 		/* Clear errors */
791 		pci_write_config_word(dev, PCI_STATUS, 0xffff);
792 
793 		/* Prevent assigning a bus number that already exists.
794 		 * This can happen when a bridge is hot-plugged, so in
795 		 * this case we only re-scan this bus. */
796 		child = pci_find_bus(pci_domain_nr(bus), max+1);
797 		if (!child) {
798 			child = pci_add_new_bus(bus, dev, ++max);
799 			if (!child)
800 				goto out;
801 			pci_bus_insert_busn_res(child, max, 0xff);
802 		}
803 		buses = (buses & 0xff000000)
804 		      | ((unsigned int)(child->primary)     <<  0)
805 		      | ((unsigned int)(child->busn_res.start)   <<  8)
806 		      | ((unsigned int)(child->busn_res.end) << 16);
807 
808 		/*
809 		 * yenta.c forces a secondary latency timer of 176.
810 		 * Copy that behaviour here.
811 		 */
812 		if (is_cardbus) {
813 			buses &= ~0xff000000;
814 			buses |= CARDBUS_LATENCY_TIMER << 24;
815 		}
816 
817 		/*
818 		 * We need to blast all three values with a single write.
819 		 */
820 		pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
821 
822 		if (!is_cardbus) {
823 			child->bridge_ctl = bctl;
824 			/*
825 			 * Adjust subordinate busnr in parent buses.
826 			 * We do this before scanning for children because
827 			 * some devices may not be detected if the bios
828 			 * was lazy.
829 			 */
830 			pci_fixup_parent_subordinate_busnr(child, max);
831 			/* Now we can scan all subordinate buses... */
832 			max = pci_scan_child_bus(child);
833 			/*
834 			 * now fix it up again since we have found
835 			 * the real value of max.
836 			 */
837 			pci_fixup_parent_subordinate_busnr(child, max);
838 		} else {
839 			/*
840 			 * For CardBus bridges, we leave 4 bus numbers
841 			 * as cards with a PCI-to-PCI bridge can be
842 			 * inserted later.
843 			 */
844 			for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
845 				struct pci_bus *parent = bus;
846 				if (pci_find_bus(pci_domain_nr(bus),
847 							max+i+1))
848 					break;
849 				while (parent->parent) {
850 					if ((!pcibios_assign_all_busses()) &&
851 					    (parent->busn_res.end > max) &&
852 					    (parent->busn_res.end <= max+i)) {
853 						j = 1;
854 					}
855 					parent = parent->parent;
856 				}
857 				if (j) {
858 					/*
859 					 * Often, there are two cardbus bridges
860 					 * -- try to leave one valid bus number
861 					 * for each one.
862 					 */
863 					i /= 2;
864 					break;
865 				}
866 			}
867 			max += i;
868 			pci_fixup_parent_subordinate_busnr(child, max);
869 		}
870 		/*
871 		 * Set the subordinate bus number to its real value.
872 		 */
873 		pci_bus_update_busn_res_end(child, max);
874 		pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
875 	}
876 
877 	sprintf(child->name,
878 		(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
879 		pci_domain_nr(bus), child->number);
880 
881 	/* Has only triggered on CardBus, fixup is in yenta_socket */
882 	while (bus->parent) {
883 		if ((child->busn_res.end > bus->busn_res.end) ||
884 		    (child->number > bus->busn_res.end) ||
885 		    (child->number < bus->number) ||
886 		    (child->busn_res.end < bus->number)) {
887 			dev_info(&child->dev, "%pR %s "
888 				"hidden behind%s bridge %s %pR\n",
889 				&child->busn_res,
890 				(bus->number > child->busn_res.end &&
891 				 bus->busn_res.end < child->number) ?
892 					"wholly" : "partially",
893 				bus->self->transparent ? " transparent" : "",
894 				dev_name(&bus->dev),
895 				&bus->busn_res);
896 		}
897 		bus = bus->parent;
898 	}
899 
900 out:
901 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
902 
903 	return max;
904 }
905 
906 /*
907  * Read interrupt line and base address registers.
908  * The architecture-dependent code can tweak these, of course.
909  */
910 static void pci_read_irq(struct pci_dev *dev)
911 {
912 	unsigned char irq;
913 
914 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
915 	dev->pin = irq;
916 	if (irq)
917 		pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
918 	dev->irq = irq;
919 }
920 
921 void set_pcie_port_type(struct pci_dev *pdev)
922 {
923 	int pos;
924 	u16 reg16;
925 
926 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
927 	if (!pos)
928 		return;
929 	pdev->is_pcie = 1;
930 	pdev->pcie_cap = pos;
931 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
932 	pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
933 	pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
934 	pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
935 }
936 
937 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
938 {
939 	int pos;
940 	u16 reg16;
941 	u32 reg32;
942 
943 	pos = pci_pcie_cap(pdev);
944 	if (!pos)
945 		return;
946 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
947 	if (!(reg16 & PCI_EXP_FLAGS_SLOT))
948 		return;
949 	pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &reg32);
950 	if (reg32 & PCI_EXP_SLTCAP_HPC)
951 		pdev->is_hotplug_bridge = 1;
952 }
953 
954 #define LEGACY_IO_RESOURCE	(IORESOURCE_IO | IORESOURCE_PCI_FIXED)
955 
956 /**
957  * pci_setup_device - fill in class and map information of a device
958  * @dev: the device structure to fill
959  *
960  * Initialize the device structure with information about the device's
961  * vendor,class,memory and IO-space addresses,IRQ lines etc.
962  * Called at initialisation of the PCI subsystem and by CardBus services.
963  * Returns 0 on success and negative if unknown type of device (not normal,
964  * bridge or CardBus).
965  */
966 int pci_setup_device(struct pci_dev *dev)
967 {
968 	u32 class;
969 	u8 hdr_type;
970 	struct pci_slot *slot;
971 	int pos = 0;
972 	struct pci_bus_region region;
973 	struct resource *res;
974 
975 	if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
976 		return -EIO;
977 
978 	dev->sysdata = dev->bus->sysdata;
979 	dev->dev.parent = dev->bus->bridge;
980 	dev->dev.bus = &pci_bus_type;
981 	dev->hdr_type = hdr_type & 0x7f;
982 	dev->multifunction = !!(hdr_type & 0x80);
983 	dev->error_state = pci_channel_io_normal;
984 	set_pcie_port_type(dev);
985 
986 	list_for_each_entry(slot, &dev->bus->slots, list)
987 		if (PCI_SLOT(dev->devfn) == slot->number)
988 			dev->slot = slot;
989 
990 	/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
991 	   set this higher, assuming the system even supports it.  */
992 	dev->dma_mask = 0xffffffff;
993 
994 	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
995 		     dev->bus->number, PCI_SLOT(dev->devfn),
996 		     PCI_FUNC(dev->devfn));
997 
998 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
999 	dev->revision = class & 0xff;
1000 	dev->class = class >> 8;		    /* upper 3 bytes */
1001 
1002 	dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1003 		   dev->vendor, dev->device, dev->hdr_type, dev->class);
1004 
1005 	/* need to have dev->class ready */
1006 	dev->cfg_size = pci_cfg_space_size(dev);
1007 
1008 	/* "Unknown power state" */
1009 	dev->current_state = PCI_UNKNOWN;
1010 
1011 	/* Early fixups, before probing the BARs */
1012 	pci_fixup_device(pci_fixup_early, dev);
1013 	/* device class may be changed after fixup */
1014 	class = dev->class >> 8;
1015 
1016 	switch (dev->hdr_type) {		    /* header type */
1017 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
1018 		if (class == PCI_CLASS_BRIDGE_PCI)
1019 			goto bad;
1020 		pci_read_irq(dev);
1021 		pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1022 		pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1023 		pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1024 
1025 		/*
1026 		 *	Do the ugly legacy mode stuff here rather than broken chip
1027 		 *	quirk code. Legacy mode ATA controllers have fixed
1028 		 *	addresses. These are not always echoed in BAR0-3, and
1029 		 *	BAR0-3 in a few cases contain junk!
1030 		 */
1031 		if (class == PCI_CLASS_STORAGE_IDE) {
1032 			u8 progif;
1033 			pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1034 			if ((progif & 1) == 0) {
1035 				region.start = 0x1F0;
1036 				region.end = 0x1F7;
1037 				res = &dev->resource[0];
1038 				res->flags = LEGACY_IO_RESOURCE;
1039 				pcibios_bus_to_resource(dev, res, &region);
1040 				region.start = 0x3F6;
1041 				region.end = 0x3F6;
1042 				res = &dev->resource[1];
1043 				res->flags = LEGACY_IO_RESOURCE;
1044 				pcibios_bus_to_resource(dev, res, &region);
1045 			}
1046 			if ((progif & 4) == 0) {
1047 				region.start = 0x170;
1048 				region.end = 0x177;
1049 				res = &dev->resource[2];
1050 				res->flags = LEGACY_IO_RESOURCE;
1051 				pcibios_bus_to_resource(dev, res, &region);
1052 				region.start = 0x376;
1053 				region.end = 0x376;
1054 				res = &dev->resource[3];
1055 				res->flags = LEGACY_IO_RESOURCE;
1056 				pcibios_bus_to_resource(dev, res, &region);
1057 			}
1058 		}
1059 		break;
1060 
1061 	case PCI_HEADER_TYPE_BRIDGE:		    /* bridge header */
1062 		if (class != PCI_CLASS_BRIDGE_PCI)
1063 			goto bad;
1064 		/* The PCI-to-PCI bridge spec requires that subtractive
1065 		   decoding (i.e. transparent) bridge must have programming
1066 		   interface code of 0x01. */
1067 		pci_read_irq(dev);
1068 		dev->transparent = ((dev->class & 0xff) == 1);
1069 		pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1070 		set_pcie_hotplug_bridge(dev);
1071 		pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1072 		if (pos) {
1073 			pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1074 			pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1075 		}
1076 		break;
1077 
1078 	case PCI_HEADER_TYPE_CARDBUS:		    /* CardBus bridge header */
1079 		if (class != PCI_CLASS_BRIDGE_CARDBUS)
1080 			goto bad;
1081 		pci_read_irq(dev);
1082 		pci_read_bases(dev, 1, 0);
1083 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1084 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1085 		break;
1086 
1087 	default:				    /* unknown header */
1088 		dev_err(&dev->dev, "unknown header type %02x, "
1089 			"ignoring device\n", dev->hdr_type);
1090 		return -EIO;
1091 
1092 	bad:
1093 		dev_err(&dev->dev, "ignoring class %#08x (doesn't match header "
1094 			"type %02x)\n", dev->class, dev->hdr_type);
1095 		dev->class = PCI_CLASS_NOT_DEFINED;
1096 	}
1097 
1098 	/* We found a fine healthy device, go go go... */
1099 	return 0;
1100 }
1101 
1102 static void pci_release_capabilities(struct pci_dev *dev)
1103 {
1104 	pci_vpd_release(dev);
1105 	pci_iov_release(dev);
1106 	pci_free_cap_save_buffers(dev);
1107 }
1108 
1109 /**
1110  * pci_release_dev - free a pci device structure when all users of it are finished.
1111  * @dev: device that's been disconnected
1112  *
1113  * Will be called only by the device core when all users of this pci device are
1114  * done.
1115  */
1116 static void pci_release_dev(struct device *dev)
1117 {
1118 	struct pci_dev *pci_dev;
1119 
1120 	pci_dev = to_pci_dev(dev);
1121 	pci_release_capabilities(pci_dev);
1122 	pci_release_of_node(pci_dev);
1123 	kfree(pci_dev);
1124 }
1125 
1126 /**
1127  * pci_cfg_space_size - get the configuration space size of the PCI device.
1128  * @dev: PCI device
1129  *
1130  * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1131  * have 4096 bytes.  Even if the device is capable, that doesn't mean we can
1132  * access it.  Maybe we don't have a way to generate extended config space
1133  * accesses, or the device is behind a reverse Express bridge.  So we try
1134  * reading the dword at 0x100 which must either be 0 or a valid extended
1135  * capability header.
1136  */
1137 int pci_cfg_space_size_ext(struct pci_dev *dev)
1138 {
1139 	u32 status;
1140 	int pos = PCI_CFG_SPACE_SIZE;
1141 
1142 	if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1143 		goto fail;
1144 	if (status == 0xffffffff)
1145 		goto fail;
1146 
1147 	return PCI_CFG_SPACE_EXP_SIZE;
1148 
1149  fail:
1150 	return PCI_CFG_SPACE_SIZE;
1151 }
1152 
1153 int pci_cfg_space_size(struct pci_dev *dev)
1154 {
1155 	int pos;
1156 	u32 status;
1157 	u16 class;
1158 
1159 	class = dev->class >> 8;
1160 	if (class == PCI_CLASS_BRIDGE_HOST)
1161 		return pci_cfg_space_size_ext(dev);
1162 
1163 	pos = pci_pcie_cap(dev);
1164 	if (!pos) {
1165 		pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1166 		if (!pos)
1167 			goto fail;
1168 
1169 		pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1170 		if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
1171 			goto fail;
1172 	}
1173 
1174 	return pci_cfg_space_size_ext(dev);
1175 
1176  fail:
1177 	return PCI_CFG_SPACE_SIZE;
1178 }
1179 
1180 static void pci_release_bus_bridge_dev(struct device *dev)
1181 {
1182 	struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
1183 
1184 	if (bridge->release_fn)
1185 		bridge->release_fn(bridge);
1186 
1187 	pci_free_resource_list(&bridge->windows);
1188 
1189 	kfree(bridge);
1190 }
1191 
1192 struct pci_dev *alloc_pci_dev(void)
1193 {
1194 	struct pci_dev *dev;
1195 
1196 	dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1197 	if (!dev)
1198 		return NULL;
1199 
1200 	INIT_LIST_HEAD(&dev->bus_list);
1201 
1202 	return dev;
1203 }
1204 EXPORT_SYMBOL(alloc_pci_dev);
1205 
1206 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1207 				 int crs_timeout)
1208 {
1209 	int delay = 1;
1210 
1211 	if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1212 		return false;
1213 
1214 	/* some broken boards return 0 or ~0 if a slot is empty: */
1215 	if (*l == 0xffffffff || *l == 0x00000000 ||
1216 	    *l == 0x0000ffff || *l == 0xffff0000)
1217 		return false;
1218 
1219 	/* Configuration request Retry Status */
1220 	while (*l == 0xffff0001) {
1221 		if (!crs_timeout)
1222 			return false;
1223 
1224 		msleep(delay);
1225 		delay *= 2;
1226 		if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1227 			return false;
1228 		/* Card hasn't responded in 60 seconds?  Must be stuck. */
1229 		if (delay > crs_timeout) {
1230 			printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
1231 					"responding\n", pci_domain_nr(bus),
1232 					bus->number, PCI_SLOT(devfn),
1233 					PCI_FUNC(devfn));
1234 			return false;
1235 		}
1236 	}
1237 
1238 	return true;
1239 }
1240 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1241 
1242 /*
1243  * Read the config data for a PCI device, sanity-check it
1244  * and fill in the dev structure...
1245  */
1246 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1247 {
1248 	struct pci_dev *dev;
1249 	u32 l;
1250 
1251 	if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1252 		return NULL;
1253 
1254 	dev = alloc_pci_dev();
1255 	if (!dev)
1256 		return NULL;
1257 
1258 	dev->bus = bus;
1259 	dev->devfn = devfn;
1260 	dev->vendor = l & 0xffff;
1261 	dev->device = (l >> 16) & 0xffff;
1262 
1263 	pci_set_of_node(dev);
1264 
1265 	if (pci_setup_device(dev)) {
1266 		kfree(dev);
1267 		return NULL;
1268 	}
1269 
1270 	return dev;
1271 }
1272 
1273 static void pci_init_capabilities(struct pci_dev *dev)
1274 {
1275 	/* MSI/MSI-X list */
1276 	pci_msi_init_pci_dev(dev);
1277 
1278 	/* Buffers for saving PCIe and PCI-X capabilities */
1279 	pci_allocate_cap_save_buffers(dev);
1280 
1281 	/* Power Management */
1282 	pci_pm_init(dev);
1283 	platform_pci_wakeup_init(dev);
1284 
1285 	/* Vital Product Data */
1286 	pci_vpd_pci22_init(dev);
1287 
1288 	/* Alternative Routing-ID Forwarding */
1289 	pci_enable_ari(dev);
1290 
1291 	/* Single Root I/O Virtualization */
1292 	pci_iov_init(dev);
1293 
1294 	/* Enable ACS P2P upstream forwarding */
1295 	pci_enable_acs(dev);
1296 }
1297 
1298 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1299 {
1300 	device_initialize(&dev->dev);
1301 	dev->dev.release = pci_release_dev;
1302 	pci_dev_get(dev);
1303 
1304 	dev->dev.dma_mask = &dev->dma_mask;
1305 	dev->dev.dma_parms = &dev->dma_parms;
1306 	dev->dev.coherent_dma_mask = 0xffffffffull;
1307 
1308 	pci_set_dma_max_seg_size(dev, 65536);
1309 	pci_set_dma_seg_boundary(dev, 0xffffffff);
1310 
1311 	/* Fix up broken headers */
1312 	pci_fixup_device(pci_fixup_header, dev);
1313 
1314 	/* moved out from quirk header fixup code */
1315 	pci_reassigndev_resource_alignment(dev);
1316 
1317 	/* Clear the state_saved flag. */
1318 	dev->state_saved = false;
1319 
1320 	/* Initialize various capabilities */
1321 	pci_init_capabilities(dev);
1322 
1323 	/*
1324 	 * Add the device to our list of discovered devices
1325 	 * and the bus list for fixup functions, etc.
1326 	 */
1327 	down_write(&pci_bus_sem);
1328 	list_add_tail(&dev->bus_list, &bus->devices);
1329 	up_write(&pci_bus_sem);
1330 }
1331 
1332 struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
1333 {
1334 	struct pci_dev *dev;
1335 
1336 	dev = pci_get_slot(bus, devfn);
1337 	if (dev) {
1338 		pci_dev_put(dev);
1339 		return dev;
1340 	}
1341 
1342 	dev = pci_scan_device(bus, devfn);
1343 	if (!dev)
1344 		return NULL;
1345 
1346 	pci_device_add(dev, bus);
1347 
1348 	return dev;
1349 }
1350 EXPORT_SYMBOL(pci_scan_single_device);
1351 
1352 static unsigned next_ari_fn(struct pci_dev *dev, unsigned fn)
1353 {
1354 	u16 cap;
1355 	unsigned pos, next_fn;
1356 
1357 	if (!dev)
1358 		return 0;
1359 
1360 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1361 	if (!pos)
1362 		return 0;
1363 	pci_read_config_word(dev, pos + 4, &cap);
1364 	next_fn = cap >> 8;
1365 	if (next_fn <= fn)
1366 		return 0;
1367 	return next_fn;
1368 }
1369 
1370 static unsigned next_trad_fn(struct pci_dev *dev, unsigned fn)
1371 {
1372 	return (fn + 1) % 8;
1373 }
1374 
1375 static unsigned no_next_fn(struct pci_dev *dev, unsigned fn)
1376 {
1377 	return 0;
1378 }
1379 
1380 static int only_one_child(struct pci_bus *bus)
1381 {
1382 	struct pci_dev *parent = bus->self;
1383 
1384 	if (!parent || !pci_is_pcie(parent))
1385 		return 0;
1386 	if (parent->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
1387 		return 1;
1388 	if (parent->pcie_type == PCI_EXP_TYPE_DOWNSTREAM &&
1389 	    !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1390 		return 1;
1391 	return 0;
1392 }
1393 
1394 /**
1395  * pci_scan_slot - scan a PCI slot on a bus for devices.
1396  * @bus: PCI bus to scan
1397  * @devfn: slot number to scan (must have zero function.)
1398  *
1399  * Scan a PCI slot on the specified PCI bus for devices, adding
1400  * discovered devices to the @bus->devices list.  New devices
1401  * will not have is_added set.
1402  *
1403  * Returns the number of new devices found.
1404  */
1405 int pci_scan_slot(struct pci_bus *bus, int devfn)
1406 {
1407 	unsigned fn, nr = 0;
1408 	struct pci_dev *dev;
1409 	unsigned (*next_fn)(struct pci_dev *, unsigned) = no_next_fn;
1410 
1411 	if (only_one_child(bus) && (devfn > 0))
1412 		return 0; /* Already scanned the entire slot */
1413 
1414 	dev = pci_scan_single_device(bus, devfn);
1415 	if (!dev)
1416 		return 0;
1417 	if (!dev->is_added)
1418 		nr++;
1419 
1420 	if (pci_ari_enabled(bus))
1421 		next_fn = next_ari_fn;
1422 	else if (dev->multifunction)
1423 		next_fn = next_trad_fn;
1424 
1425 	for (fn = next_fn(dev, 0); fn > 0; fn = next_fn(dev, fn)) {
1426 		dev = pci_scan_single_device(bus, devfn + fn);
1427 		if (dev) {
1428 			if (!dev->is_added)
1429 				nr++;
1430 			dev->multifunction = 1;
1431 		}
1432 	}
1433 
1434 	/* only one slot has pcie device */
1435 	if (bus->self && nr)
1436 		pcie_aspm_init_link_state(bus->self);
1437 
1438 	return nr;
1439 }
1440 
1441 static int pcie_find_smpss(struct pci_dev *dev, void *data)
1442 {
1443 	u8 *smpss = data;
1444 
1445 	if (!pci_is_pcie(dev))
1446 		return 0;
1447 
1448 	/* For PCIE hotplug enabled slots not connected directly to a
1449 	 * PCI-E root port, there can be problems when hotplugging
1450 	 * devices.  This is due to the possibility of hotplugging a
1451 	 * device into the fabric with a smaller MPS that the devices
1452 	 * currently running have configured.  Modifying the MPS on the
1453 	 * running devices could cause a fatal bus error due to an
1454 	 * incoming frame being larger than the newly configured MPS.
1455 	 * To work around this, the MPS for the entire fabric must be
1456 	 * set to the minimum size.  Any devices hotplugged into this
1457 	 * fabric will have the minimum MPS set.  If the PCI hotplug
1458 	 * slot is directly connected to the root port and there are not
1459 	 * other devices on the fabric (which seems to be the most
1460 	 * common case), then this is not an issue and MPS discovery
1461 	 * will occur as normal.
1462 	 */
1463 	if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) ||
1464 	     (dev->bus->self &&
1465 	      dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT)))
1466 		*smpss = 0;
1467 
1468 	if (*smpss > dev->pcie_mpss)
1469 		*smpss = dev->pcie_mpss;
1470 
1471 	return 0;
1472 }
1473 
1474 static void pcie_write_mps(struct pci_dev *dev, int mps)
1475 {
1476 	int rc;
1477 
1478 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1479 		mps = 128 << dev->pcie_mpss;
1480 
1481 		if (dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && dev->bus->self)
1482 			/* For "Performance", the assumption is made that
1483 			 * downstream communication will never be larger than
1484 			 * the MRRS.  So, the MPS only needs to be configured
1485 			 * for the upstream communication.  This being the case,
1486 			 * walk from the top down and set the MPS of the child
1487 			 * to that of the parent bus.
1488 			 *
1489 			 * Configure the device MPS with the smaller of the
1490 			 * device MPSS or the bridge MPS (which is assumed to be
1491 			 * properly configured at this point to the largest
1492 			 * allowable MPS based on its parent bus).
1493 			 */
1494 			mps = min(mps, pcie_get_mps(dev->bus->self));
1495 	}
1496 
1497 	rc = pcie_set_mps(dev, mps);
1498 	if (rc)
1499 		dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1500 }
1501 
1502 static void pcie_write_mrrs(struct pci_dev *dev)
1503 {
1504 	int rc, mrrs;
1505 
1506 	/* In the "safe" case, do not configure the MRRS.  There appear to be
1507 	 * issues with setting MRRS to 0 on a number of devices.
1508 	 */
1509 	if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1510 		return;
1511 
1512 	/* For Max performance, the MRRS must be set to the largest supported
1513 	 * value.  However, it cannot be configured larger than the MPS the
1514 	 * device or the bus can support.  This should already be properly
1515 	 * configured by a prior call to pcie_write_mps.
1516 	 */
1517 	mrrs = pcie_get_mps(dev);
1518 
1519 	/* MRRS is a R/W register.  Invalid values can be written, but a
1520 	 * subsequent read will verify if the value is acceptable or not.
1521 	 * If the MRRS value provided is not acceptable (e.g., too large),
1522 	 * shrink the value until it is acceptable to the HW.
1523  	 */
1524 	while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1525 		rc = pcie_set_readrq(dev, mrrs);
1526 		if (!rc)
1527 			break;
1528 
1529 		dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
1530 		mrrs /= 2;
1531 	}
1532 
1533 	if (mrrs < 128)
1534 		dev_err(&dev->dev, "MRRS was unable to be configured with a "
1535 			"safe value.  If problems are experienced, try running "
1536 			"with pci=pcie_bus_safe.\n");
1537 }
1538 
1539 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1540 {
1541 	int mps, orig_mps;
1542 
1543 	if (!pci_is_pcie(dev))
1544 		return 0;
1545 
1546 	mps = 128 << *(u8 *)data;
1547 	orig_mps = pcie_get_mps(dev);
1548 
1549 	pcie_write_mps(dev, mps);
1550 	pcie_write_mrrs(dev);
1551 
1552 	dev_info(&dev->dev, "PCI-E Max Payload Size set to %4d/%4d (was %4d), "
1553 		 "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss,
1554 		 orig_mps, pcie_get_readrq(dev));
1555 
1556 	return 0;
1557 }
1558 
1559 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
1560  * parents then children fashion.  If this changes, then this code will not
1561  * work as designed.
1562  */
1563 void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
1564 {
1565 	u8 smpss;
1566 
1567 	if (!pci_is_pcie(bus->self))
1568 		return;
1569 
1570 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
1571 		return;
1572 
1573 	/* FIXME - Peer to peer DMA is possible, though the endpoint would need
1574 	 * to be aware to the MPS of the destination.  To work around this,
1575 	 * simply force the MPS of the entire system to the smallest possible.
1576 	 */
1577 	if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1578 		smpss = 0;
1579 
1580 	if (pcie_bus_config == PCIE_BUS_SAFE) {
1581 		smpss = mpss;
1582 
1583 		pcie_find_smpss(bus->self, &smpss);
1584 		pci_walk_bus(bus, pcie_find_smpss, &smpss);
1585 	}
1586 
1587 	pcie_bus_configure_set(bus->self, &smpss);
1588 	pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1589 }
1590 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1591 
1592 unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
1593 {
1594 	unsigned int devfn, pass, max = bus->busn_res.start;
1595 	struct pci_dev *dev;
1596 
1597 	dev_dbg(&bus->dev, "scanning bus\n");
1598 
1599 	/* Go find them, Rover! */
1600 	for (devfn = 0; devfn < 0x100; devfn += 8)
1601 		pci_scan_slot(bus, devfn);
1602 
1603 	/* Reserve buses for SR-IOV capability. */
1604 	max += pci_iov_bus_range(bus);
1605 
1606 	/*
1607 	 * After performing arch-dependent fixup of the bus, look behind
1608 	 * all PCI-to-PCI bridges on this bus.
1609 	 */
1610 	if (!bus->is_added) {
1611 		dev_dbg(&bus->dev, "fixups for bus\n");
1612 		pcibios_fixup_bus(bus);
1613 		if (pci_is_root_bus(bus))
1614 			bus->is_added = 1;
1615 	}
1616 
1617 	for (pass=0; pass < 2; pass++)
1618 		list_for_each_entry(dev, &bus->devices, bus_list) {
1619 			if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1620 			    dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1621 				max = pci_scan_bridge(bus, dev, max, pass);
1622 		}
1623 
1624 	/*
1625 	 * We've scanned the bus and so we know all about what's on
1626 	 * the other side of any bridges that may be on this bus plus
1627 	 * any devices.
1628 	 *
1629 	 * Return how far we've got finding sub-buses.
1630 	 */
1631 	dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1632 	return max;
1633 }
1634 
1635 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1636 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
1637 {
1638 	int error;
1639 	struct pci_host_bridge *bridge;
1640 	struct pci_bus *b, *b2;
1641 	struct pci_host_bridge_window *window, *n;
1642 	struct resource *res;
1643 	resource_size_t offset;
1644 	char bus_addr[64];
1645 	char *fmt;
1646 
1647 
1648 	b = pci_alloc_bus();
1649 	if (!b)
1650 		return NULL;
1651 
1652 	b->sysdata = sysdata;
1653 	b->ops = ops;
1654 	b2 = pci_find_bus(pci_domain_nr(b), bus);
1655 	if (b2) {
1656 		/* If we already got to this bus through a different bridge, ignore it */
1657 		dev_dbg(&b2->dev, "bus already known\n");
1658 		goto err_out;
1659 	}
1660 
1661 	bridge = pci_alloc_host_bridge(b);
1662 	if (!bridge)
1663 		goto err_out;
1664 
1665 	bridge->dev.parent = parent;
1666 	bridge->dev.release = pci_release_bus_bridge_dev;
1667 	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
1668 	error = device_register(&bridge->dev);
1669 	if (error)
1670 		goto bridge_dev_reg_err;
1671 	b->bridge = get_device(&bridge->dev);
1672 	device_enable_async_suspend(b->bridge);
1673 	pci_set_bus_of_node(b);
1674 
1675 	if (!parent)
1676 		set_dev_node(b->bridge, pcibus_to_node(b));
1677 
1678 	b->dev.class = &pcibus_class;
1679 	b->dev.parent = b->bridge;
1680 	dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
1681 	error = device_register(&b->dev);
1682 	if (error)
1683 		goto class_dev_reg_err;
1684 
1685 	/* Create legacy_io and legacy_mem files for this bus */
1686 	pci_create_legacy_files(b);
1687 
1688 	b->number = b->busn_res.start = bus;
1689 
1690 	if (parent)
1691 		dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
1692 	else
1693 		printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
1694 
1695 	/* Add initial resources to the bus */
1696 	list_for_each_entry_safe(window, n, resources, list) {
1697 		list_move_tail(&window->list, &bridge->windows);
1698 		res = window->res;
1699 		offset = window->offset;
1700 		if (res->flags & IORESOURCE_BUS)
1701 			pci_bus_insert_busn_res(b, bus, res->end);
1702 		else
1703 			pci_bus_add_resource(b, res, 0);
1704 		if (offset) {
1705 			if (resource_type(res) == IORESOURCE_IO)
1706 				fmt = " (bus address [%#06llx-%#06llx])";
1707 			else
1708 				fmt = " (bus address [%#010llx-%#010llx])";
1709 			snprintf(bus_addr, sizeof(bus_addr), fmt,
1710 				 (unsigned long long) (res->start - offset),
1711 				 (unsigned long long) (res->end - offset));
1712 		} else
1713 			bus_addr[0] = '\0';
1714 		dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
1715 	}
1716 
1717 	down_write(&pci_bus_sem);
1718 	list_add_tail(&b->node, &pci_root_buses);
1719 	up_write(&pci_bus_sem);
1720 
1721 	return b;
1722 
1723 class_dev_reg_err:
1724 	put_device(&bridge->dev);
1725 	device_unregister(&bridge->dev);
1726 bridge_dev_reg_err:
1727 	kfree(bridge);
1728 err_out:
1729 	kfree(b);
1730 	return NULL;
1731 }
1732 
1733 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
1734 {
1735 	struct resource *res = &b->busn_res;
1736 	struct resource *parent_res, *conflict;
1737 
1738 	res->start = bus;
1739 	res->end = bus_max;
1740 	res->flags = IORESOURCE_BUS;
1741 
1742 	if (!pci_is_root_bus(b))
1743 		parent_res = &b->parent->busn_res;
1744 	else {
1745 		parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
1746 		res->flags |= IORESOURCE_PCI_FIXED;
1747 	}
1748 
1749 	conflict = insert_resource_conflict(parent_res, res);
1750 
1751 	if (conflict)
1752 		dev_printk(KERN_DEBUG, &b->dev,
1753 			   "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
1754 			    res, pci_is_root_bus(b) ? "domain " : "",
1755 			    parent_res, conflict->name, conflict);
1756 	else
1757 		dev_printk(KERN_DEBUG, &b->dev,
1758 			   "busn_res: %pR is inserted under %s%pR\n",
1759 			   res, pci_is_root_bus(b) ? "domain " : "",
1760 			   parent_res);
1761 
1762 	return conflict == NULL;
1763 }
1764 
1765 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
1766 {
1767 	struct resource *res = &b->busn_res;
1768 	struct resource old_res = *res;
1769 	resource_size_t size;
1770 	int ret;
1771 
1772 	if (res->start > bus_max)
1773 		return -EINVAL;
1774 
1775 	size = bus_max - res->start + 1;
1776 	ret = adjust_resource(res, res->start, size);
1777 	dev_printk(KERN_DEBUG, &b->dev,
1778 			"busn_res: %pR end %s updated to %02x\n",
1779 			&old_res, ret ? "can not be" : "is", bus_max);
1780 
1781 	if (!ret && !res->parent)
1782 		pci_bus_insert_busn_res(b, res->start, res->end);
1783 
1784 	return ret;
1785 }
1786 
1787 void pci_bus_release_busn_res(struct pci_bus *b)
1788 {
1789 	struct resource *res = &b->busn_res;
1790 	int ret;
1791 
1792 	if (!res->flags || !res->parent)
1793 		return;
1794 
1795 	ret = release_resource(res);
1796 	dev_printk(KERN_DEBUG, &b->dev,
1797 			"busn_res: %pR %s released\n",
1798 			res, ret ? "can not be" : "is");
1799 }
1800 
1801 struct pci_bus * __devinit pci_scan_root_bus(struct device *parent, int bus,
1802 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
1803 {
1804 	struct pci_host_bridge_window *window;
1805 	bool found = false;
1806 	struct pci_bus *b;
1807 	int max;
1808 
1809 	list_for_each_entry(window, resources, list)
1810 		if (window->res->flags & IORESOURCE_BUS) {
1811 			found = true;
1812 			break;
1813 		}
1814 
1815 	b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
1816 	if (!b)
1817 		return NULL;
1818 
1819 	if (!found) {
1820 		dev_info(&b->dev,
1821 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
1822 			bus);
1823 		pci_bus_insert_busn_res(b, bus, 255);
1824 	}
1825 
1826 	max = pci_scan_child_bus(b);
1827 
1828 	if (!found)
1829 		pci_bus_update_busn_res_end(b, max);
1830 
1831 	pci_bus_add_devices(b);
1832 	return b;
1833 }
1834 EXPORT_SYMBOL(pci_scan_root_bus);
1835 
1836 /* Deprecated; use pci_scan_root_bus() instead */
1837 struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
1838 		int bus, struct pci_ops *ops, void *sysdata)
1839 {
1840 	LIST_HEAD(resources);
1841 	struct pci_bus *b;
1842 
1843 	pci_add_resource(&resources, &ioport_resource);
1844 	pci_add_resource(&resources, &iomem_resource);
1845 	pci_add_resource(&resources, &busn_resource);
1846 	b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
1847 	if (b)
1848 		pci_scan_child_bus(b);
1849 	else
1850 		pci_free_resource_list(&resources);
1851 	return b;
1852 }
1853 EXPORT_SYMBOL(pci_scan_bus_parented);
1854 
1855 struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops,
1856 					void *sysdata)
1857 {
1858 	LIST_HEAD(resources);
1859 	struct pci_bus *b;
1860 
1861 	pci_add_resource(&resources, &ioport_resource);
1862 	pci_add_resource(&resources, &iomem_resource);
1863 	pci_add_resource(&resources, &busn_resource);
1864 	b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
1865 	if (b) {
1866 		pci_scan_child_bus(b);
1867 		pci_bus_add_devices(b);
1868 	} else {
1869 		pci_free_resource_list(&resources);
1870 	}
1871 	return b;
1872 }
1873 EXPORT_SYMBOL(pci_scan_bus);
1874 
1875 #ifdef CONFIG_HOTPLUG
1876 /**
1877  * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
1878  * @bridge: PCI bridge for the bus to scan
1879  *
1880  * Scan a PCI bus and child buses for new devices, add them,
1881  * and enable them, resizing bridge mmio/io resource if necessary
1882  * and possible.  The caller must ensure the child devices are already
1883  * removed for resizing to occur.
1884  *
1885  * Returns the max number of subordinate bus discovered.
1886  */
1887 unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
1888 {
1889 	unsigned int max;
1890 	struct pci_bus *bus = bridge->subordinate;
1891 
1892 	max = pci_scan_child_bus(bus);
1893 
1894 	pci_assign_unassigned_bridge_resources(bridge);
1895 
1896 	pci_bus_add_devices(bus);
1897 
1898 	return max;
1899 }
1900 
1901 EXPORT_SYMBOL(pci_add_new_bus);
1902 EXPORT_SYMBOL(pci_scan_slot);
1903 EXPORT_SYMBOL(pci_scan_bridge);
1904 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1905 #endif
1906 
1907 static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b)
1908 {
1909 	const struct pci_dev *a = to_pci_dev(d_a);
1910 	const struct pci_dev *b = to_pci_dev(d_b);
1911 
1912 	if      (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
1913 	else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return  1;
1914 
1915 	if      (a->bus->number < b->bus->number) return -1;
1916 	else if (a->bus->number > b->bus->number) return  1;
1917 
1918 	if      (a->devfn < b->devfn) return -1;
1919 	else if (a->devfn > b->devfn) return  1;
1920 
1921 	return 0;
1922 }
1923 
1924 void __init pci_sort_breadthfirst(void)
1925 {
1926 	bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
1927 }
1928