xref: /linux/drivers/pci/probe.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  * probe.c - PCI detection and setup code
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/cpumask.h>
12 #include "pci.h"
13 
14 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
15 #define CARDBUS_RESERVE_BUSNR	3
16 #define PCI_CFG_SPACE_SIZE	256
17 #define PCI_CFG_SPACE_EXP_SIZE	4096
18 
19 /* Ugh.  Need to stop exporting this to modules. */
20 LIST_HEAD(pci_root_buses);
21 EXPORT_SYMBOL(pci_root_buses);
22 
23 LIST_HEAD(pci_devices);
24 
25 #ifdef HAVE_PCI_LEGACY
26 /**
27  * pci_create_legacy_files - create legacy I/O port and memory files
28  * @b: bus to create files under
29  *
30  * Some platforms allow access to legacy I/O port and ISA memory space on
31  * a per-bus basis.  This routine creates the files and ties them into
32  * their associated read, write and mmap files from pci-sysfs.c
33  */
34 static void pci_create_legacy_files(struct pci_bus *b)
35 {
36 	b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2,
37 			       GFP_ATOMIC);
38 	if (b->legacy_io) {
39 		b->legacy_io->attr.name = "legacy_io";
40 		b->legacy_io->size = 0xffff;
41 		b->legacy_io->attr.mode = S_IRUSR | S_IWUSR;
42 		b->legacy_io->attr.owner = THIS_MODULE;
43 		b->legacy_io->read = pci_read_legacy_io;
44 		b->legacy_io->write = pci_write_legacy_io;
45 		class_device_create_bin_file(&b->class_dev, b->legacy_io);
46 
47 		/* Allocated above after the legacy_io struct */
48 		b->legacy_mem = b->legacy_io + 1;
49 		b->legacy_mem->attr.name = "legacy_mem";
50 		b->legacy_mem->size = 1024*1024;
51 		b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
52 		b->legacy_mem->attr.owner = THIS_MODULE;
53 		b->legacy_mem->mmap = pci_mmap_legacy_mem;
54 		class_device_create_bin_file(&b->class_dev, b->legacy_mem);
55 	}
56 }
57 
58 void pci_remove_legacy_files(struct pci_bus *b)
59 {
60 	if (b->legacy_io) {
61 		class_device_remove_bin_file(&b->class_dev, b->legacy_io);
62 		class_device_remove_bin_file(&b->class_dev, b->legacy_mem);
63 		kfree(b->legacy_io); /* both are allocated here */
64 	}
65 }
66 #else /* !HAVE_PCI_LEGACY */
67 static inline void pci_create_legacy_files(struct pci_bus *bus) { return; }
68 void pci_remove_legacy_files(struct pci_bus *bus) { return; }
69 #endif /* HAVE_PCI_LEGACY */
70 
71 /*
72  * PCI Bus Class Devices
73  */
74 static ssize_t pci_bus_show_cpuaffinity(struct class_device *class_dev,
75 					char *buf)
76 {
77 	int ret;
78 	cpumask_t cpumask;
79 
80 	cpumask = pcibus_to_cpumask(to_pci_bus(class_dev));
81 	ret = cpumask_scnprintf(buf, PAGE_SIZE, cpumask);
82 	if (ret < PAGE_SIZE)
83 		buf[ret++] = '\n';
84 	return ret;
85 }
86 CLASS_DEVICE_ATTR(cpuaffinity, S_IRUGO, pci_bus_show_cpuaffinity, NULL);
87 
88 /*
89  * PCI Bus Class
90  */
91 static void release_pcibus_dev(struct class_device *class_dev)
92 {
93 	struct pci_bus *pci_bus = to_pci_bus(class_dev);
94 
95 	if (pci_bus->bridge)
96 		put_device(pci_bus->bridge);
97 	kfree(pci_bus);
98 }
99 
100 static struct class pcibus_class = {
101 	.name		= "pci_bus",
102 	.release	= &release_pcibus_dev,
103 };
104 
105 static int __init pcibus_class_init(void)
106 {
107 	return class_register(&pcibus_class);
108 }
109 postcore_initcall(pcibus_class_init);
110 
111 /*
112  * Translate the low bits of the PCI base
113  * to the resource type
114  */
115 static inline unsigned int pci_calc_resource_flags(unsigned int flags)
116 {
117 	if (flags & PCI_BASE_ADDRESS_SPACE_IO)
118 		return IORESOURCE_IO;
119 
120 	if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
121 		return IORESOURCE_MEM | IORESOURCE_PREFETCH;
122 
123 	return IORESOURCE_MEM;
124 }
125 
126 /*
127  * Find the extent of a PCI decode..
128  */
129 static u32 pci_size(u32 base, u32 maxbase, u32 mask)
130 {
131 	u32 size = mask & maxbase;	/* Find the significant bits */
132 	if (!size)
133 		return 0;
134 
135 	/* Get the lowest of them to find the decode size, and
136 	   from that the extent.  */
137 	size = (size & ~(size-1)) - 1;
138 
139 	/* base == maxbase can be valid only if the BAR has
140 	   already been programmed with all 1s.  */
141 	if (base == maxbase && ((base | size) & mask) != mask)
142 		return 0;
143 
144 	return size;
145 }
146 
147 static u64 pci_size64(u64 base, u64 maxbase, u64 mask)
148 {
149 	u64 size = mask & maxbase;	/* Find the significant bits */
150 	if (!size)
151 		return 0;
152 
153 	/* Get the lowest of them to find the decode size, and
154 	   from that the extent.  */
155 	size = (size & ~(size-1)) - 1;
156 
157 	/* base == maxbase can be valid only if the BAR has
158 	   already been programmed with all 1s.  */
159 	if (base == maxbase && ((base | size) & mask) != mask)
160 		return 0;
161 
162 	return size;
163 }
164 
165 static inline int is_64bit_memory(u32 mask)
166 {
167 	if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
168 	    (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64))
169 		return 1;
170 	return 0;
171 }
172 
173 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
174 {
175 	unsigned int pos, reg, next;
176 	u32 l, sz;
177 	struct resource *res;
178 
179 	for(pos=0; pos<howmany; pos = next) {
180 		u64 l64;
181 		u64 sz64;
182 		u32 raw_sz;
183 
184 		next = pos+1;
185 		res = &dev->resource[pos];
186 		res->name = pci_name(dev);
187 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
188 		pci_read_config_dword(dev, reg, &l);
189 		pci_write_config_dword(dev, reg, ~0);
190 		pci_read_config_dword(dev, reg, &sz);
191 		pci_write_config_dword(dev, reg, l);
192 		if (!sz || sz == 0xffffffff)
193 			continue;
194 		if (l == 0xffffffff)
195 			l = 0;
196 		raw_sz = sz;
197 		if ((l & PCI_BASE_ADDRESS_SPACE) ==
198 				PCI_BASE_ADDRESS_SPACE_MEMORY) {
199 			sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK);
200 			/*
201 			 * For 64bit prefetchable memory sz could be 0, if the
202 			 * real size is bigger than 4G, so we need to check
203 			 * szhi for that.
204 			 */
205 			if (!is_64bit_memory(l) && !sz)
206 				continue;
207 			res->start = l & PCI_BASE_ADDRESS_MEM_MASK;
208 			res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK;
209 		} else {
210 			sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff);
211 			if (!sz)
212 				continue;
213 			res->start = l & PCI_BASE_ADDRESS_IO_MASK;
214 			res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK;
215 		}
216 		res->end = res->start + (unsigned long) sz;
217 		res->flags |= pci_calc_resource_flags(l);
218 		if (is_64bit_memory(l)) {
219 			u32 szhi, lhi;
220 
221 			pci_read_config_dword(dev, reg+4, &lhi);
222 			pci_write_config_dword(dev, reg+4, ~0);
223 			pci_read_config_dword(dev, reg+4, &szhi);
224 			pci_write_config_dword(dev, reg+4, lhi);
225 			sz64 = ((u64)szhi << 32) | raw_sz;
226 			l64 = ((u64)lhi << 32) | l;
227 			sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK);
228 			next++;
229 #if BITS_PER_LONG == 64
230 			if (!sz64) {
231 				res->start = 0;
232 				res->end = 0;
233 				res->flags = 0;
234 				continue;
235 			}
236 			res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK;
237 			res->end = res->start + sz64;
238 #else
239 			if (sz64 > 0x100000000ULL) {
240 				printk(KERN_ERR "PCI: Unable to handle 64-bit "
241 					"BAR for device %s\n", pci_name(dev));
242 				res->start = 0;
243 				res->flags = 0;
244 			} else if (lhi) {
245 				/* 64-bit wide address, treat as disabled */
246 				pci_write_config_dword(dev, reg,
247 					l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK);
248 				pci_write_config_dword(dev, reg+4, 0);
249 				res->start = 0;
250 				res->end = sz;
251 			}
252 #endif
253 		}
254 	}
255 	if (rom) {
256 		dev->rom_base_reg = rom;
257 		res = &dev->resource[PCI_ROM_RESOURCE];
258 		res->name = pci_name(dev);
259 		pci_read_config_dword(dev, rom, &l);
260 		pci_write_config_dword(dev, rom, ~PCI_ROM_ADDRESS_ENABLE);
261 		pci_read_config_dword(dev, rom, &sz);
262 		pci_write_config_dword(dev, rom, l);
263 		if (l == 0xffffffff)
264 			l = 0;
265 		if (sz && sz != 0xffffffff) {
266 			sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK);
267 			if (sz) {
268 				res->flags = (l & IORESOURCE_ROM_ENABLE) |
269 				  IORESOURCE_MEM | IORESOURCE_PREFETCH |
270 				  IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
271 				res->start = l & PCI_ROM_ADDRESS_MASK;
272 				res->end = res->start + (unsigned long) sz;
273 			}
274 		}
275 	}
276 }
277 
278 void __devinit pci_read_bridge_bases(struct pci_bus *child)
279 {
280 	struct pci_dev *dev = child->self;
281 	u8 io_base_lo, io_limit_lo;
282 	u16 mem_base_lo, mem_limit_lo;
283 	unsigned long base, limit;
284 	struct resource *res;
285 	int i;
286 
287 	if (!dev)		/* It's a host bus, nothing to read */
288 		return;
289 
290 	if (dev->transparent) {
291 		printk(KERN_INFO "PCI: Transparent bridge - %s\n", pci_name(dev));
292 		for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++)
293 			child->resource[i] = child->parent->resource[i - 3];
294 	}
295 
296 	for(i=0; i<3; i++)
297 		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
298 
299 	res = child->resource[0];
300 	pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
301 	pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
302 	base = (io_base_lo & PCI_IO_RANGE_MASK) << 8;
303 	limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8;
304 
305 	if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
306 		u16 io_base_hi, io_limit_hi;
307 		pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
308 		pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
309 		base |= (io_base_hi << 16);
310 		limit |= (io_limit_hi << 16);
311 	}
312 
313 	if (base <= limit) {
314 		res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
315 		if (!res->start)
316 			res->start = base;
317 		if (!res->end)
318 			res->end = limit + 0xfff;
319 	}
320 
321 	res = child->resource[1];
322 	pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
323 	pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
324 	base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
325 	limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
326 	if (base <= limit) {
327 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
328 		res->start = base;
329 		res->end = limit + 0xfffff;
330 	}
331 
332 	res = child->resource[2];
333 	pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
334 	pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
335 	base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
336 	limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
337 
338 	if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
339 		u32 mem_base_hi, mem_limit_hi;
340 		pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
341 		pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
342 
343 		/*
344 		 * Some bridges set the base > limit by default, and some
345 		 * (broken) BIOSes do not initialize them.  If we find
346 		 * this, just assume they are not being used.
347 		 */
348 		if (mem_base_hi <= mem_limit_hi) {
349 #if BITS_PER_LONG == 64
350 			base |= ((long) mem_base_hi) << 32;
351 			limit |= ((long) mem_limit_hi) << 32;
352 #else
353 			if (mem_base_hi || mem_limit_hi) {
354 				printk(KERN_ERR "PCI: Unable to handle 64-bit address space for bridge %s\n", pci_name(dev));
355 				return;
356 			}
357 #endif
358 		}
359 	}
360 	if (base <= limit) {
361 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH;
362 		res->start = base;
363 		res->end = limit + 0xfffff;
364 	}
365 }
366 
367 static struct pci_bus * __devinit pci_alloc_bus(void)
368 {
369 	struct pci_bus *b;
370 
371 	b = kzalloc(sizeof(*b), GFP_KERNEL);
372 	if (b) {
373 		INIT_LIST_HEAD(&b->node);
374 		INIT_LIST_HEAD(&b->children);
375 		INIT_LIST_HEAD(&b->devices);
376 	}
377 	return b;
378 }
379 
380 static struct pci_bus * __devinit
381 pci_alloc_child_bus(struct pci_bus *parent, struct pci_dev *bridge, int busnr)
382 {
383 	struct pci_bus *child;
384 	int i;
385 	int retval;
386 
387 	/*
388 	 * Allocate a new bus, and inherit stuff from the parent..
389 	 */
390 	child = pci_alloc_bus();
391 	if (!child)
392 		return NULL;
393 
394 	child->self = bridge;
395 	child->parent = parent;
396 	child->ops = parent->ops;
397 	child->sysdata = parent->sysdata;
398 	child->bus_flags = parent->bus_flags;
399 	child->bridge = get_device(&bridge->dev);
400 
401 	child->class_dev.class = &pcibus_class;
402 	sprintf(child->class_dev.class_id, "%04x:%02x", pci_domain_nr(child), busnr);
403 	retval = class_device_register(&child->class_dev);
404 	if (retval)
405 		goto error_register;
406 	retval = class_device_create_file(&child->class_dev,
407 					  &class_device_attr_cpuaffinity);
408 	if (retval)
409 		goto error_file_create;
410 
411 	/*
412 	 * Set up the primary, secondary and subordinate
413 	 * bus numbers.
414 	 */
415 	child->number = child->secondary = busnr;
416 	child->primary = parent->secondary;
417 	child->subordinate = 0xff;
418 
419 	/* Set up default resource pointers and names.. */
420 	for (i = 0; i < 4; i++) {
421 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
422 		child->resource[i]->name = child->name;
423 	}
424 	bridge->subordinate = child;
425 
426 	return child;
427 
428 error_file_create:
429 	class_device_unregister(&child->class_dev);
430 error_register:
431 	kfree(child);
432 	return NULL;
433 }
434 
435 struct pci_bus * __devinit pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
436 {
437 	struct pci_bus *child;
438 
439 	child = pci_alloc_child_bus(parent, dev, busnr);
440 	if (child) {
441 		down_write(&pci_bus_sem);
442 		list_add_tail(&child->node, &parent->children);
443 		up_write(&pci_bus_sem);
444 	}
445 	return child;
446 }
447 
448 static void pci_enable_crs(struct pci_dev *dev)
449 {
450 	u16 cap, rpctl;
451 	int rpcap = pci_find_capability(dev, PCI_CAP_ID_EXP);
452 	if (!rpcap)
453 		return;
454 
455 	pci_read_config_word(dev, rpcap + PCI_CAP_FLAGS, &cap);
456 	if (((cap & PCI_EXP_FLAGS_TYPE) >> 4) != PCI_EXP_TYPE_ROOT_PORT)
457 		return;
458 
459 	pci_read_config_word(dev, rpcap + PCI_EXP_RTCTL, &rpctl);
460 	rpctl |= PCI_EXP_RTCTL_CRSSVE;
461 	pci_write_config_word(dev, rpcap + PCI_EXP_RTCTL, rpctl);
462 }
463 
464 static void __devinit pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
465 {
466 	struct pci_bus *parent = child->parent;
467 
468 	/* Attempts to fix that up are really dangerous unless
469 	   we're going to re-assign all bus numbers. */
470 	if (!pcibios_assign_all_busses())
471 		return;
472 
473 	while (parent->parent && parent->subordinate < max) {
474 		parent->subordinate = max;
475 		pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
476 		parent = parent->parent;
477 	}
478 }
479 
480 unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus);
481 
482 /*
483  * If it's a bridge, configure it and scan the bus behind it.
484  * For CardBus bridges, we don't scan behind as the devices will
485  * be handled by the bridge driver itself.
486  *
487  * We need to process bridges in two passes -- first we scan those
488  * already configured by the BIOS and after we are done with all of
489  * them, we proceed to assigning numbers to the remaining buses in
490  * order to avoid overlaps between old and new bus numbers.
491  */
492 int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass)
493 {
494 	struct pci_bus *child;
495 	int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
496 	u32 buses, i, j = 0;
497 	u16 bctl;
498 
499 	pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
500 
501 	pr_debug("PCI: Scanning behind PCI bridge %s, config %06x, pass %d\n",
502 		 pci_name(dev), buses & 0xffffff, pass);
503 
504 	/* Disable MasterAbortMode during probing to avoid reporting
505 	   of bus errors (in some architectures) */
506 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
507 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
508 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
509 
510 	pci_enable_crs(dev);
511 
512 	if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus) {
513 		unsigned int cmax, busnr;
514 		/*
515 		 * Bus already configured by firmware, process it in the first
516 		 * pass and just note the configuration.
517 		 */
518 		if (pass)
519 			goto out;
520 		busnr = (buses >> 8) & 0xFF;
521 
522 		/*
523 		 * If we already got to this bus through a different bridge,
524 		 * ignore it.  This can happen with the i450NX chipset.
525 		 */
526 		if (pci_find_bus(pci_domain_nr(bus), busnr)) {
527 			printk(KERN_INFO "PCI: Bus %04x:%02x already known\n",
528 					pci_domain_nr(bus), busnr);
529 			goto out;
530 		}
531 
532 		child = pci_add_new_bus(bus, dev, busnr);
533 		if (!child)
534 			goto out;
535 		child->primary = buses & 0xFF;
536 		child->subordinate = (buses >> 16) & 0xFF;
537 		child->bridge_ctl = bctl;
538 
539 		cmax = pci_scan_child_bus(child);
540 		if (cmax > max)
541 			max = cmax;
542 		if (child->subordinate > max)
543 			max = child->subordinate;
544 	} else {
545 		/*
546 		 * We need to assign a number to this bus which we always
547 		 * do in the second pass.
548 		 */
549 		if (!pass) {
550 			if (pcibios_assign_all_busses())
551 				/* Temporarily disable forwarding of the
552 				   configuration cycles on all bridges in
553 				   this bus segment to avoid possible
554 				   conflicts in the second pass between two
555 				   bridges programmed with overlapping
556 				   bus ranges. */
557 				pci_write_config_dword(dev, PCI_PRIMARY_BUS,
558 						       buses & ~0xffffff);
559 			goto out;
560 		}
561 
562 		/* Clear errors */
563 		pci_write_config_word(dev, PCI_STATUS, 0xffff);
564 
565 		/* Prevent assigning a bus number that already exists.
566 		 * This can happen when a bridge is hot-plugged */
567 		if (pci_find_bus(pci_domain_nr(bus), max+1))
568 			goto out;
569 		child = pci_add_new_bus(bus, dev, ++max);
570 		buses = (buses & 0xff000000)
571 		      | ((unsigned int)(child->primary)     <<  0)
572 		      | ((unsigned int)(child->secondary)   <<  8)
573 		      | ((unsigned int)(child->subordinate) << 16);
574 
575 		/*
576 		 * yenta.c forces a secondary latency timer of 176.
577 		 * Copy that behaviour here.
578 		 */
579 		if (is_cardbus) {
580 			buses &= ~0xff000000;
581 			buses |= CARDBUS_LATENCY_TIMER << 24;
582 		}
583 
584 		/*
585 		 * We need to blast all three values with a single write.
586 		 */
587 		pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
588 
589 		if (!is_cardbus) {
590 			child->bridge_ctl = bctl | PCI_BRIDGE_CTL_NO_ISA;
591 			/*
592 			 * Adjust subordinate busnr in parent buses.
593 			 * We do this before scanning for children because
594 			 * some devices may not be detected if the bios
595 			 * was lazy.
596 			 */
597 			pci_fixup_parent_subordinate_busnr(child, max);
598 			/* Now we can scan all subordinate buses... */
599 			max = pci_scan_child_bus(child);
600 			/*
601 			 * now fix it up again since we have found
602 			 * the real value of max.
603 			 */
604 			pci_fixup_parent_subordinate_busnr(child, max);
605 		} else {
606 			/*
607 			 * For CardBus bridges, we leave 4 bus numbers
608 			 * as cards with a PCI-to-PCI bridge can be
609 			 * inserted later.
610 			 */
611 			for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
612 				struct pci_bus *parent = bus;
613 				if (pci_find_bus(pci_domain_nr(bus),
614 							max+i+1))
615 					break;
616 				while (parent->parent) {
617 					if ((!pcibios_assign_all_busses()) &&
618 					    (parent->subordinate > max) &&
619 					    (parent->subordinate <= max+i)) {
620 						j = 1;
621 					}
622 					parent = parent->parent;
623 				}
624 				if (j) {
625 					/*
626 					 * Often, there are two cardbus bridges
627 					 * -- try to leave one valid bus number
628 					 * for each one.
629 					 */
630 					i /= 2;
631 					break;
632 				}
633 			}
634 			max += i;
635 			pci_fixup_parent_subordinate_busnr(child, max);
636 		}
637 		/*
638 		 * Set the subordinate bus number to its real value.
639 		 */
640 		child->subordinate = max;
641 		pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
642 	}
643 
644 	sprintf(child->name, (is_cardbus ? "PCI CardBus #%02x" : "PCI Bus #%02x"), child->number);
645 
646 	while (bus->parent) {
647 		if ((child->subordinate > bus->subordinate) ||
648 		    (child->number > bus->subordinate) ||
649 		    (child->number < bus->number) ||
650 		    (child->subordinate < bus->number)) {
651 			printk(KERN_WARNING "PCI: Bus #%02x (-#%02x) is "
652 			       "hidden behind%s bridge #%02x (-#%02x)%s\n",
653 			       child->number, child->subordinate,
654 			       bus->self->transparent ? " transparent" : " ",
655 			       bus->number, bus->subordinate,
656 			       pcibios_assign_all_busses() ? " " :
657 			       " (try 'pci=assign-busses')");
658 			printk(KERN_WARNING "Please report the result to "
659 			       "linux-kernel to fix this permanently\n");
660 		}
661 		bus = bus->parent;
662 	}
663 
664 out:
665 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
666 
667 	return max;
668 }
669 
670 /*
671  * Read interrupt line and base address registers.
672  * The architecture-dependent code can tweak these, of course.
673  */
674 static void pci_read_irq(struct pci_dev *dev)
675 {
676 	unsigned char irq;
677 
678 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
679 	dev->pin = irq;
680 	if (irq)
681 		pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
682 	dev->irq = irq;
683 }
684 
685 static void change_legacy_io_resource(struct pci_dev * dev, unsigned index,
686                                       unsigned start, unsigned end)
687 {
688 	unsigned base = start & PCI_BASE_ADDRESS_IO_MASK;
689 	unsigned len = (end | ~PCI_BASE_ADDRESS_IO_MASK) - base + 1;
690 
691 	/*
692 	 * Some X versions get confused when the BARs reported through
693 	 * /sys or /proc differ from those seen in config space, thus
694 	 * try to update the config space values, too.
695 	 */
696 	if (!(pci_resource_flags(dev, index) & IORESOURCE_IO))
697 		printk(KERN_WARNING "%s: cannot adjust BAR%u (not I/O)\n",
698 		       pci_name(dev), index);
699 	else if (pci_resource_len(dev, index) != len)
700 		printk(KERN_WARNING "%s: cannot adjust BAR%u (size %04X)\n",
701 		       pci_name(dev), index, (unsigned)pci_resource_len(dev, index));
702 	else {
703 		printk(KERN_INFO "%s: trying to change BAR%u from %04X to %04X\n",
704 		       pci_name(dev), index,
705 		       (unsigned)pci_resource_start(dev, index), base);
706 		pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + index * 4, base);
707 	}
708 	pci_resource_start(dev, index) = start;
709 	pci_resource_end(dev, index)   = end;
710 	pci_resource_flags(dev, index) =
711 		IORESOURCE_IO | IORESOURCE_PCI_FIXED | PCI_BASE_ADDRESS_SPACE_IO;
712 }
713 
714 /**
715  * pci_setup_device - fill in class and map information of a device
716  * @dev: the device structure to fill
717  *
718  * Initialize the device structure with information about the device's
719  * vendor,class,memory and IO-space addresses,IRQ lines etc.
720  * Called at initialisation of the PCI subsystem and by CardBus services.
721  * Returns 0 on success and -1 if unknown type of device (not normal, bridge
722  * or CardBus).
723  */
724 static int pci_setup_device(struct pci_dev * dev)
725 {
726 	u32 class;
727 
728 	sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
729 		dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
730 
731 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
732 	class >>= 8;				    /* upper 3 bytes */
733 	dev->class = class;
734 	class >>= 8;
735 
736 	pr_debug("PCI: Found %s [%04x/%04x] %06x %02x\n", pci_name(dev),
737 		 dev->vendor, dev->device, class, dev->hdr_type);
738 
739 	/* "Unknown power state" */
740 	dev->current_state = PCI_UNKNOWN;
741 
742 	/* Early fixups, before probing the BARs */
743 	pci_fixup_device(pci_fixup_early, dev);
744 	class = dev->class >> 8;
745 
746 	switch (dev->hdr_type) {		    /* header type */
747 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
748 		if (class == PCI_CLASS_BRIDGE_PCI)
749 			goto bad;
750 		pci_read_irq(dev);
751 		pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
752 		pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
753 		pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
754 
755 		/*
756 		 *	Do the ugly legacy mode stuff here rather than broken chip
757 		 *	quirk code. Legacy mode ATA controllers have fixed
758 		 *	addresses. These are not always echoed in BAR0-3, and
759 		 *	BAR0-3 in a few cases contain junk!
760 		 */
761 		if (class == PCI_CLASS_STORAGE_IDE) {
762 			u8 progif;
763 			pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
764 			if ((progif & 1) == 0) {
765 				change_legacy_io_resource(dev, 0, 0x1F0, 0x1F7);
766 				change_legacy_io_resource(dev, 1, 0x3F6, 0x3F6);
767 			}
768 			if ((progif & 4) == 0) {
769 				change_legacy_io_resource(dev, 2, 0x170, 0x177);
770 				change_legacy_io_resource(dev, 3, 0x376, 0x376);
771 			}
772 		}
773 		break;
774 
775 	case PCI_HEADER_TYPE_BRIDGE:		    /* bridge header */
776 		if (class != PCI_CLASS_BRIDGE_PCI)
777 			goto bad;
778 		/* The PCI-to-PCI bridge spec requires that subtractive
779 		   decoding (i.e. transparent) bridge must have programming
780 		   interface code of 0x01. */
781 		pci_read_irq(dev);
782 		dev->transparent = ((dev->class & 0xff) == 1);
783 		pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
784 		break;
785 
786 	case PCI_HEADER_TYPE_CARDBUS:		    /* CardBus bridge header */
787 		if (class != PCI_CLASS_BRIDGE_CARDBUS)
788 			goto bad;
789 		pci_read_irq(dev);
790 		pci_read_bases(dev, 1, 0);
791 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
792 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
793 		break;
794 
795 	default:				    /* unknown header */
796 		printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n",
797 			pci_name(dev), dev->hdr_type);
798 		return -1;
799 
800 	bad:
801 		printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n",
802 		       pci_name(dev), class, dev->hdr_type);
803 		dev->class = PCI_CLASS_NOT_DEFINED;
804 	}
805 
806 	/* We found a fine healthy device, go go go... */
807 	return 0;
808 }
809 
810 /**
811  * pci_release_dev - free a pci device structure when all users of it are finished.
812  * @dev: device that's been disconnected
813  *
814  * Will be called only by the device core when all users of this pci device are
815  * done.
816  */
817 static void pci_release_dev(struct device *dev)
818 {
819 	struct pci_dev *pci_dev;
820 
821 	pci_dev = to_pci_dev(dev);
822 	kfree(pci_dev);
823 }
824 
825 /**
826  * pci_cfg_space_size - get the configuration space size of the PCI device.
827  * @dev: PCI device
828  *
829  * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
830  * have 4096 bytes.  Even if the device is capable, that doesn't mean we can
831  * access it.  Maybe we don't have a way to generate extended config space
832  * accesses, or the device is behind a reverse Express bridge.  So we try
833  * reading the dword at 0x100 which must either be 0 or a valid extended
834  * capability header.
835  */
836 int pci_cfg_space_size(struct pci_dev *dev)
837 {
838 	int pos;
839 	u32 status;
840 
841 	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
842 	if (!pos) {
843 		pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
844 		if (!pos)
845 			goto fail;
846 
847 		pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
848 		if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
849 			goto fail;
850 	}
851 
852 	if (pci_read_config_dword(dev, 256, &status) != PCIBIOS_SUCCESSFUL)
853 		goto fail;
854 	if (status == 0xffffffff)
855 		goto fail;
856 
857 	return PCI_CFG_SPACE_EXP_SIZE;
858 
859  fail:
860 	return PCI_CFG_SPACE_SIZE;
861 }
862 
863 static void pci_release_bus_bridge_dev(struct device *dev)
864 {
865 	kfree(dev);
866 }
867 
868 /*
869  * Read the config data for a PCI device, sanity-check it
870  * and fill in the dev structure...
871  */
872 static struct pci_dev * __devinit
873 pci_scan_device(struct pci_bus *bus, int devfn)
874 {
875 	struct pci_dev *dev;
876 	u32 l;
877 	u8 hdr_type;
878 	int delay = 1;
879 
880 	if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l))
881 		return NULL;
882 
883 	/* some broken boards return 0 or ~0 if a slot is empty: */
884 	if (l == 0xffffffff || l == 0x00000000 ||
885 	    l == 0x0000ffff || l == 0xffff0000)
886 		return NULL;
887 
888 	/* Configuration request Retry Status */
889 	while (l == 0xffff0001) {
890 		msleep(delay);
891 		delay *= 2;
892 		if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l))
893 			return NULL;
894 		/* Card hasn't responded in 60 seconds?  Must be stuck. */
895 		if (delay > 60 * 1000) {
896 			printk(KERN_WARNING "Device %04x:%02x:%02x.%d not "
897 					"responding\n", pci_domain_nr(bus),
898 					bus->number, PCI_SLOT(devfn),
899 					PCI_FUNC(devfn));
900 			return NULL;
901 		}
902 	}
903 
904 	if (pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type))
905 		return NULL;
906 
907 	dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
908 	if (!dev)
909 		return NULL;
910 
911 	dev->bus = bus;
912 	dev->sysdata = bus->sysdata;
913 	dev->dev.parent = bus->bridge;
914 	dev->dev.bus = &pci_bus_type;
915 	dev->devfn = devfn;
916 	dev->hdr_type = hdr_type & 0x7f;
917 	dev->multifunction = !!(hdr_type & 0x80);
918 	dev->vendor = l & 0xffff;
919 	dev->device = (l >> 16) & 0xffff;
920 	dev->cfg_size = pci_cfg_space_size(dev);
921 	dev->error_state = pci_channel_io_normal;
922 
923 	/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
924 	   set this higher, assuming the system even supports it.  */
925 	dev->dma_mask = 0xffffffff;
926 	if (pci_setup_device(dev) < 0) {
927 		kfree(dev);
928 		return NULL;
929 	}
930 
931 	return dev;
932 }
933 
934 void __devinit pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
935 {
936 	device_initialize(&dev->dev);
937 	dev->dev.release = pci_release_dev;
938 	pci_dev_get(dev);
939 
940 	set_dev_node(&dev->dev, pcibus_to_node(bus));
941 	dev->dev.dma_mask = &dev->dma_mask;
942 	dev->dev.coherent_dma_mask = 0xffffffffull;
943 
944 	/* Fix up broken headers */
945 	pci_fixup_device(pci_fixup_header, dev);
946 
947 	/*
948 	 * Add the device to our list of discovered devices
949 	 * and the bus list for fixup functions, etc.
950 	 */
951 	INIT_LIST_HEAD(&dev->global_list);
952 	down_write(&pci_bus_sem);
953 	list_add_tail(&dev->bus_list, &bus->devices);
954 	up_write(&pci_bus_sem);
955 }
956 
957 struct pci_dev * __devinit
958 pci_scan_single_device(struct pci_bus *bus, int devfn)
959 {
960 	struct pci_dev *dev;
961 
962 	dev = pci_scan_device(bus, devfn);
963 	if (!dev)
964 		return NULL;
965 
966 	pci_device_add(dev, bus);
967 
968 	return dev;
969 }
970 
971 /**
972  * pci_scan_slot - scan a PCI slot on a bus for devices.
973  * @bus: PCI bus to scan
974  * @devfn: slot number to scan (must have zero function.)
975  *
976  * Scan a PCI slot on the specified PCI bus for devices, adding
977  * discovered devices to the @bus->devices list.  New devices
978  * will have an empty dev->global_list head.
979  */
980 int __devinit pci_scan_slot(struct pci_bus *bus, int devfn)
981 {
982 	int func, nr = 0;
983 	int scan_all_fns;
984 
985 	scan_all_fns = pcibios_scan_all_fns(bus, devfn);
986 
987 	for (func = 0; func < 8; func++, devfn++) {
988 		struct pci_dev *dev;
989 
990 		dev = pci_scan_single_device(bus, devfn);
991 		if (dev) {
992 			nr++;
993 
994 			/*
995 		 	 * If this is a single function device,
996 		 	 * don't scan past the first function.
997 		 	 */
998 			if (!dev->multifunction) {
999 				if (func > 0) {
1000 					dev->multifunction = 1;
1001 				} else {
1002  					break;
1003 				}
1004 			}
1005 		} else {
1006 			if (func == 0 && !scan_all_fns)
1007 				break;
1008 		}
1009 	}
1010 	return nr;
1011 }
1012 
1013 unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
1014 {
1015 	unsigned int devfn, pass, max = bus->secondary;
1016 	struct pci_dev *dev;
1017 
1018 	pr_debug("PCI: Scanning bus %04x:%02x\n", pci_domain_nr(bus), bus->number);
1019 
1020 	/* Go find them, Rover! */
1021 	for (devfn = 0; devfn < 0x100; devfn += 8)
1022 		pci_scan_slot(bus, devfn);
1023 
1024 	/*
1025 	 * After performing arch-dependent fixup of the bus, look behind
1026 	 * all PCI-to-PCI bridges on this bus.
1027 	 */
1028 	pr_debug("PCI: Fixups for bus %04x:%02x\n", pci_domain_nr(bus), bus->number);
1029 	pcibios_fixup_bus(bus);
1030 	for (pass=0; pass < 2; pass++)
1031 		list_for_each_entry(dev, &bus->devices, bus_list) {
1032 			if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1033 			    dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1034 				max = pci_scan_bridge(bus, dev, max, pass);
1035 		}
1036 
1037 	/*
1038 	 * We've scanned the bus and so we know all about what's on
1039 	 * the other side of any bridges that may be on this bus plus
1040 	 * any devices.
1041 	 *
1042 	 * Return how far we've got finding sub-buses.
1043 	 */
1044 	pr_debug("PCI: Bus scan for %04x:%02x returning with max=%02x\n",
1045 		pci_domain_nr(bus), bus->number, max);
1046 	return max;
1047 }
1048 
1049 unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus)
1050 {
1051 	unsigned int max;
1052 
1053 	max = pci_scan_child_bus(bus);
1054 
1055 	/*
1056 	 * Make the discovered devices available.
1057 	 */
1058 	pci_bus_add_devices(bus);
1059 
1060 	return max;
1061 }
1062 
1063 struct pci_bus * __devinit pci_create_bus(struct device *parent,
1064 		int bus, struct pci_ops *ops, void *sysdata)
1065 {
1066 	int error;
1067 	struct pci_bus *b;
1068 	struct device *dev;
1069 
1070 	b = pci_alloc_bus();
1071 	if (!b)
1072 		return NULL;
1073 
1074 	dev = kmalloc(sizeof(*dev), GFP_KERNEL);
1075 	if (!dev){
1076 		kfree(b);
1077 		return NULL;
1078 	}
1079 
1080 	b->sysdata = sysdata;
1081 	b->ops = ops;
1082 
1083 	if (pci_find_bus(pci_domain_nr(b), bus)) {
1084 		/* If we already got to this bus through a different bridge, ignore it */
1085 		pr_debug("PCI: Bus %04x:%02x already known\n", pci_domain_nr(b), bus);
1086 		goto err_out;
1087 	}
1088 
1089 	down_write(&pci_bus_sem);
1090 	list_add_tail(&b->node, &pci_root_buses);
1091 	up_write(&pci_bus_sem);
1092 
1093 	memset(dev, 0, sizeof(*dev));
1094 	dev->parent = parent;
1095 	dev->release = pci_release_bus_bridge_dev;
1096 	sprintf(dev->bus_id, "pci%04x:%02x", pci_domain_nr(b), bus);
1097 	error = device_register(dev);
1098 	if (error)
1099 		goto dev_reg_err;
1100 	b->bridge = get_device(dev);
1101 
1102 	b->class_dev.class = &pcibus_class;
1103 	sprintf(b->class_dev.class_id, "%04x:%02x", pci_domain_nr(b), bus);
1104 	error = class_device_register(&b->class_dev);
1105 	if (error)
1106 		goto class_dev_reg_err;
1107 	error = class_device_create_file(&b->class_dev, &class_device_attr_cpuaffinity);
1108 	if (error)
1109 		goto class_dev_create_file_err;
1110 
1111 	/* Create legacy_io and legacy_mem files for this bus */
1112 	pci_create_legacy_files(b);
1113 
1114 	error = sysfs_create_link(&b->class_dev.kobj, &b->bridge->kobj, "bridge");
1115 	if (error)
1116 		goto sys_create_link_err;
1117 
1118 	b->number = b->secondary = bus;
1119 	b->resource[0] = &ioport_resource;
1120 	b->resource[1] = &iomem_resource;
1121 
1122 	return b;
1123 
1124 sys_create_link_err:
1125 	class_device_remove_file(&b->class_dev, &class_device_attr_cpuaffinity);
1126 class_dev_create_file_err:
1127 	class_device_unregister(&b->class_dev);
1128 class_dev_reg_err:
1129 	device_unregister(dev);
1130 dev_reg_err:
1131 	down_write(&pci_bus_sem);
1132 	list_del(&b->node);
1133 	up_write(&pci_bus_sem);
1134 err_out:
1135 	kfree(dev);
1136 	kfree(b);
1137 	return NULL;
1138 }
1139 EXPORT_SYMBOL_GPL(pci_create_bus);
1140 
1141 struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
1142 		int bus, struct pci_ops *ops, void *sysdata)
1143 {
1144 	struct pci_bus *b;
1145 
1146 	b = pci_create_bus(parent, bus, ops, sysdata);
1147 	if (b)
1148 		b->subordinate = pci_scan_child_bus(b);
1149 	return b;
1150 }
1151 EXPORT_SYMBOL(pci_scan_bus_parented);
1152 
1153 #ifdef CONFIG_HOTPLUG
1154 EXPORT_SYMBOL(pci_add_new_bus);
1155 EXPORT_SYMBOL(pci_do_scan_bus);
1156 EXPORT_SYMBOL(pci_scan_slot);
1157 EXPORT_SYMBOL(pci_scan_bridge);
1158 EXPORT_SYMBOL(pci_scan_single_device);
1159 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1160 #endif
1161 
1162 static int __init pci_sort_bf_cmp(const struct pci_dev *a, const struct pci_dev *b)
1163 {
1164 	if      (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
1165 	else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return  1;
1166 
1167 	if      (a->bus->number < b->bus->number) return -1;
1168 	else if (a->bus->number > b->bus->number) return  1;
1169 
1170 	if      (a->devfn < b->devfn) return -1;
1171 	else if (a->devfn > b->devfn) return  1;
1172 
1173 	return 0;
1174 }
1175 
1176 /*
1177  * Yes, this forcably breaks the klist abstraction temporarily.  It
1178  * just wants to sort the klist, not change reference counts and
1179  * take/drop locks rapidly in the process.  It does all this while
1180  * holding the lock for the list, so objects can't otherwise be
1181  * added/removed while we're swizzling.
1182  */
1183 static void __init pci_insertion_sort_klist(struct pci_dev *a, struct list_head *list)
1184 {
1185 	struct list_head *pos;
1186 	struct klist_node *n;
1187 	struct device *dev;
1188 	struct pci_dev *b;
1189 
1190 	list_for_each(pos, list) {
1191 		n = container_of(pos, struct klist_node, n_node);
1192 		dev = container_of(n, struct device, knode_bus);
1193 		b = to_pci_dev(dev);
1194 		if (pci_sort_bf_cmp(a, b) <= 0) {
1195 			list_move_tail(&a->dev.knode_bus.n_node, &b->dev.knode_bus.n_node);
1196 			return;
1197 		}
1198 	}
1199 	list_move_tail(&a->dev.knode_bus.n_node, list);
1200 }
1201 
1202 static void __init pci_sort_breadthfirst_klist(void)
1203 {
1204 	LIST_HEAD(sorted_devices);
1205 	struct list_head *pos, *tmp;
1206 	struct klist_node *n;
1207 	struct device *dev;
1208 	struct pci_dev *pdev;
1209 
1210 	spin_lock(&pci_bus_type.klist_devices.k_lock);
1211 	list_for_each_safe(pos, tmp, &pci_bus_type.klist_devices.k_list) {
1212 		n = container_of(pos, struct klist_node, n_node);
1213 		dev = container_of(n, struct device, knode_bus);
1214 		pdev = to_pci_dev(dev);
1215 		pci_insertion_sort_klist(pdev, &sorted_devices);
1216 	}
1217 	list_splice(&sorted_devices, &pci_bus_type.klist_devices.k_list);
1218 	spin_unlock(&pci_bus_type.klist_devices.k_lock);
1219 }
1220 
1221 static void __init pci_insertion_sort_devices(struct pci_dev *a, struct list_head *list)
1222 {
1223 	struct pci_dev *b;
1224 
1225 	list_for_each_entry(b, list, global_list) {
1226 		if (pci_sort_bf_cmp(a, b) <= 0) {
1227 			list_move_tail(&a->global_list, &b->global_list);
1228 			return;
1229 		}
1230 	}
1231 	list_move_tail(&a->global_list, list);
1232 }
1233 
1234 static void __init pci_sort_breadthfirst_devices(void)
1235 {
1236 	LIST_HEAD(sorted_devices);
1237 	struct pci_dev *dev, *tmp;
1238 
1239 	down_write(&pci_bus_sem);
1240 	list_for_each_entry_safe(dev, tmp, &pci_devices, global_list) {
1241 		pci_insertion_sort_devices(dev, &sorted_devices);
1242 	}
1243 	list_splice(&sorted_devices, &pci_devices);
1244 	up_write(&pci_bus_sem);
1245 }
1246 
1247 void __init pci_sort_breadthfirst(void)
1248 {
1249 	pci_sort_breadthfirst_devices();
1250 	pci_sort_breadthfirst_klist();
1251 }
1252 
1253