xref: /linux/drivers/pci/setup-bus.c (revision 5499b45190237ca90dd2ac86395cf464fe1f4cc7)
1 /*
2  *	drivers/pci/setup-bus.c
3  *
4  * Extruded from code written by
5  *      Dave Rusling (david.rusling@reo.mts.dec.com)
6  *      David Mosberger (davidm@cs.arizona.edu)
7  *	David Miller (davem@redhat.com)
8  *
9  * Support routines for initializing a PCI subsystem.
10  */
11 
12 /*
13  * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
14  *	     PCI-PCI bridges cleanup, sorted resource allocation.
15  * Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
16  *	     Converted to allocation in 3 passes, which gives
17  *	     tighter packing. Prefetchable range support.
18  */
19 
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/pci.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/cache.h>
27 #include <linux/slab.h>
28 #include "pci.h"
29 
30 struct resource_list_x {
31 	struct resource_list_x *next;
32 	struct resource *res;
33 	struct pci_dev *dev;
34 	resource_size_t start;
35 	resource_size_t end;
36 	unsigned long flags;
37 };
38 
39 static void add_to_failed_list(struct resource_list_x *head,
40 				 struct pci_dev *dev, struct resource *res)
41 {
42 	struct resource_list_x *list = head;
43 	struct resource_list_x *ln = list->next;
44 	struct resource_list_x *tmp;
45 
46 	tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
47 	if (!tmp) {
48 		pr_warning("add_to_failed_list: kmalloc() failed!\n");
49 		return;
50 	}
51 
52 	tmp->next = ln;
53 	tmp->res = res;
54 	tmp->dev = dev;
55 	tmp->start = res->start;
56 	tmp->end = res->end;
57 	tmp->flags = res->flags;
58 	list->next = tmp;
59 }
60 
61 static void free_failed_list(struct resource_list_x *head)
62 {
63 	struct resource_list_x *list, *tmp;
64 
65 	for (list = head->next; list;) {
66 		tmp = list;
67 		list = list->next;
68 		kfree(tmp);
69 	}
70 
71 	head->next = NULL;
72 }
73 
74 static void __dev_sort_resources(struct pci_dev *dev,
75 				 struct resource_list *head)
76 {
77 	u16 class = dev->class >> 8;
78 
79 	/* Don't touch classless devices or host bridges or ioapics.  */
80 	if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
81 		return;
82 
83 	/* Don't touch ioapic devices already enabled by firmware */
84 	if (class == PCI_CLASS_SYSTEM_PIC) {
85 		u16 command;
86 		pci_read_config_word(dev, PCI_COMMAND, &command);
87 		if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
88 			return;
89 	}
90 
91 	pdev_sort_resources(dev, head);
92 }
93 
94 static void __assign_resources_sorted(struct resource_list *head,
95 				 struct resource_list_x *fail_head)
96 {
97 	struct resource *res;
98 	struct resource_list *list, *tmp;
99 	int idx;
100 
101 	for (list = head->next; list;) {
102 		res = list->res;
103 		idx = res - &list->dev->resource[0];
104 		if (pci_assign_resource(list->dev, idx)) {
105 			if (fail_head && !pci_is_root_bus(list->dev->bus))
106 				add_to_failed_list(fail_head, list->dev, res);
107 			res->start = 0;
108 			res->end = 0;
109 			res->flags = 0;
110 		}
111 		tmp = list;
112 		list = list->next;
113 		kfree(tmp);
114 	}
115 }
116 
117 static void pdev_assign_resources_sorted(struct pci_dev *dev,
118 				 struct resource_list_x *fail_head)
119 {
120 	struct resource_list head;
121 
122 	head.next = NULL;
123 	__dev_sort_resources(dev, &head);
124 	__assign_resources_sorted(&head, fail_head);
125 
126 }
127 
128 static void pbus_assign_resources_sorted(const struct pci_bus *bus,
129 					 struct resource_list_x *fail_head)
130 {
131 	struct pci_dev *dev;
132 	struct resource_list head;
133 
134 	head.next = NULL;
135 	list_for_each_entry(dev, &bus->devices, bus_list)
136 		__dev_sort_resources(dev, &head);
137 
138 	__assign_resources_sorted(&head, fail_head);
139 }
140 
141 void pci_setup_cardbus(struct pci_bus *bus)
142 {
143 	struct pci_dev *bridge = bus->self;
144 	struct resource *res;
145 	struct pci_bus_region region;
146 
147 	dev_info(&bridge->dev, "CardBus bridge to [bus %02x-%02x]\n",
148 		 bus->secondary, bus->subordinate);
149 
150 	res = bus->resource[0];
151 	pcibios_resource_to_bus(bridge, &region, res);
152 	if (res->flags & IORESOURCE_IO) {
153 		/*
154 		 * The IO resource is allocated a range twice as large as it
155 		 * would normally need.  This allows us to set both IO regs.
156 		 */
157 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
158 		pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
159 					region.start);
160 		pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0,
161 					region.end);
162 	}
163 
164 	res = bus->resource[1];
165 	pcibios_resource_to_bus(bridge, &region, res);
166 	if (res->flags & IORESOURCE_IO) {
167 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
168 		pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
169 					region.start);
170 		pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1,
171 					region.end);
172 	}
173 
174 	res = bus->resource[2];
175 	pcibios_resource_to_bus(bridge, &region, res);
176 	if (res->flags & IORESOURCE_MEM) {
177 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
178 		pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
179 					region.start);
180 		pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0,
181 					region.end);
182 	}
183 
184 	res = bus->resource[3];
185 	pcibios_resource_to_bus(bridge, &region, res);
186 	if (res->flags & IORESOURCE_MEM) {
187 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
188 		pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
189 					region.start);
190 		pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1,
191 					region.end);
192 	}
193 }
194 EXPORT_SYMBOL(pci_setup_cardbus);
195 
196 /* Initialize bridges with base/limit values we have collected.
197    PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998)
198    requires that if there is no I/O ports or memory behind the
199    bridge, corresponding range must be turned off by writing base
200    value greater than limit to the bridge's base/limit registers.
201 
202    Note: care must be taken when updating I/O base/limit registers
203    of bridges which support 32-bit I/O. This update requires two
204    config space writes, so it's quite possible that an I/O window of
205    the bridge will have some undesirable address (e.g. 0) after the
206    first write. Ditto 64-bit prefetchable MMIO.  */
207 static void pci_setup_bridge_io(struct pci_bus *bus)
208 {
209 	struct pci_dev *bridge = bus->self;
210 	struct resource *res;
211 	struct pci_bus_region region;
212 	u32 l, io_upper16;
213 
214 	/* Set up the top and bottom of the PCI I/O segment for this bus. */
215 	res = bus->resource[0];
216 	pcibios_resource_to_bus(bridge, &region, res);
217 	if (res->flags & IORESOURCE_IO) {
218 		pci_read_config_dword(bridge, PCI_IO_BASE, &l);
219 		l &= 0xffff0000;
220 		l |= (region.start >> 8) & 0x00f0;
221 		l |= region.end & 0xf000;
222 		/* Set up upper 16 bits of I/O base/limit. */
223 		io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
224 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
225 	} else {
226 		/* Clear upper 16 bits of I/O base/limit. */
227 		io_upper16 = 0;
228 		l = 0x00f0;
229 		dev_info(&bridge->dev, "  bridge window [io  disabled]\n");
230 	}
231 	/* Temporarily disable the I/O range before updating PCI_IO_BASE. */
232 	pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
233 	/* Update lower 16 bits of I/O base/limit. */
234 	pci_write_config_dword(bridge, PCI_IO_BASE, l);
235 	/* Update upper 16 bits of I/O base/limit. */
236 	pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
237 }
238 
239 static void pci_setup_bridge_mmio(struct pci_bus *bus)
240 {
241 	struct pci_dev *bridge = bus->self;
242 	struct resource *res;
243 	struct pci_bus_region region;
244 	u32 l;
245 
246 	/* Set up the top and bottom of the PCI Memory segment for this bus. */
247 	res = bus->resource[1];
248 	pcibios_resource_to_bus(bridge, &region, res);
249 	if (res->flags & IORESOURCE_MEM) {
250 		l = (region.start >> 16) & 0xfff0;
251 		l |= region.end & 0xfff00000;
252 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
253 	} else {
254 		l = 0x0000fff0;
255 		dev_info(&bridge->dev, "  bridge window [mem disabled]\n");
256 	}
257 	pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
258 }
259 
260 static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
261 {
262 	struct pci_dev *bridge = bus->self;
263 	struct resource *res;
264 	struct pci_bus_region region;
265 	u32 l, bu, lu;
266 
267 	/* Clear out the upper 32 bits of PREF limit.
268 	   If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily
269 	   disables PREF range, which is ok. */
270 	pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
271 
272 	/* Set up PREF base/limit. */
273 	bu = lu = 0;
274 	res = bus->resource[2];
275 	pcibios_resource_to_bus(bridge, &region, res);
276 	if (res->flags & IORESOURCE_PREFETCH) {
277 		l = (region.start >> 16) & 0xfff0;
278 		l |= region.end & 0xfff00000;
279 		if (res->flags & IORESOURCE_MEM_64) {
280 			bu = upper_32_bits(region.start);
281 			lu = upper_32_bits(region.end);
282 		}
283 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
284 	} else {
285 		l = 0x0000fff0;
286 		dev_info(&bridge->dev, "  bridge window [mem pref disabled]\n");
287 	}
288 	pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
289 
290 	/* Set the upper 32 bits of PREF base & limit. */
291 	pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
292 	pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
293 }
294 
295 static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
296 {
297 	struct pci_dev *bridge = bus->self;
298 
299 	dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n",
300 		 bus->secondary, bus->subordinate);
301 
302 	if (type & IORESOURCE_IO)
303 		pci_setup_bridge_io(bus);
304 
305 	if (type & IORESOURCE_MEM)
306 		pci_setup_bridge_mmio(bus);
307 
308 	if (type & IORESOURCE_PREFETCH)
309 		pci_setup_bridge_mmio_pref(bus);
310 
311 	pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
312 }
313 
314 static void pci_setup_bridge(struct pci_bus *bus)
315 {
316 	unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
317 				  IORESOURCE_PREFETCH;
318 
319 	__pci_setup_bridge(bus, type);
320 }
321 
322 /* Check whether the bridge supports optional I/O and
323    prefetchable memory ranges. If not, the respective
324    base/limit registers must be read-only and read as 0. */
325 static void pci_bridge_check_ranges(struct pci_bus *bus)
326 {
327 	u16 io;
328 	u32 pmem;
329 	struct pci_dev *bridge = bus->self;
330 	struct resource *b_res;
331 
332 	b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
333 	b_res[1].flags |= IORESOURCE_MEM;
334 
335 	pci_read_config_word(bridge, PCI_IO_BASE, &io);
336 	if (!io) {
337 		pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0);
338 		pci_read_config_word(bridge, PCI_IO_BASE, &io);
339  		pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
340  	}
341  	if (io)
342 		b_res[0].flags |= IORESOURCE_IO;
343 	/*  DECchip 21050 pass 2 errata: the bridge may miss an address
344 	    disconnect boundary by one PCI data phase.
345 	    Workaround: do not use prefetching on this device. */
346 	if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
347 		return;
348 	pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
349 	if (!pmem) {
350 		pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
351 					       0xfff0fff0);
352 		pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
353 		pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
354 	}
355 	if (pmem) {
356 		b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
357 		if ((pmem & PCI_PREF_RANGE_TYPE_MASK) ==
358 		    PCI_PREF_RANGE_TYPE_64) {
359 			b_res[2].flags |= IORESOURCE_MEM_64;
360 			b_res[2].flags |= PCI_PREF_RANGE_TYPE_64;
361 		}
362 	}
363 
364 	/* double check if bridge does support 64 bit pref */
365 	if (b_res[2].flags & IORESOURCE_MEM_64) {
366 		u32 mem_base_hi, tmp;
367 		pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
368 					 &mem_base_hi);
369 		pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
370 					       0xffffffff);
371 		pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
372 		if (!tmp)
373 			b_res[2].flags &= ~IORESOURCE_MEM_64;
374 		pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
375 				       mem_base_hi);
376 	}
377 }
378 
379 /* Helper function for sizing routines: find first available
380    bus resource of a given type. Note: we intentionally skip
381    the bus resources which have already been assigned (that is,
382    have non-NULL parent resource). */
383 static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned long type)
384 {
385 	int i;
386 	struct resource *r;
387 	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
388 				  IORESOURCE_PREFETCH;
389 
390 	pci_bus_for_each_resource(bus, r, i) {
391 		if (r == &ioport_resource || r == &iomem_resource)
392 			continue;
393 		if (r && (r->flags & type_mask) == type && !r->parent)
394 			return r;
395 	}
396 	return NULL;
397 }
398 
399 /* Sizing the IO windows of the PCI-PCI bridge is trivial,
400    since these windows have 4K granularity and the IO ranges
401    of non-bridge PCI devices are limited to 256 bytes.
402    We must be careful with the ISA aliasing though. */
403 static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size)
404 {
405 	struct pci_dev *dev;
406 	struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
407 	unsigned long size = 0, size1 = 0, old_size;
408 
409 	if (!b_res)
410  		return;
411 
412 	list_for_each_entry(dev, &bus->devices, bus_list) {
413 		int i;
414 
415 		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
416 			struct resource *r = &dev->resource[i];
417 			unsigned long r_size;
418 
419 			if (r->parent || !(r->flags & IORESOURCE_IO))
420 				continue;
421 			r_size = resource_size(r);
422 
423 			if (r_size < 0x400)
424 				/* Might be re-aligned for ISA */
425 				size += r_size;
426 			else
427 				size1 += r_size;
428 		}
429 	}
430 	if (size < min_size)
431 		size = min_size;
432 	old_size = resource_size(b_res);
433 	if (old_size == 1)
434 		old_size = 0;
435 /* To be fixed in 2.5: we should have sort of HAVE_ISA
436    flag in the struct pci_bus. */
437 #if defined(CONFIG_ISA) || defined(CONFIG_EISA)
438 	size = (size & 0xff) + ((size & ~0xffUL) << 2);
439 #endif
440 	size = ALIGN(size + size1, 4096);
441 	if (size < old_size)
442 		size = old_size;
443 	if (!size) {
444 		if (b_res->start || b_res->end)
445 			dev_info(&bus->self->dev, "disabling bridge window "
446 				 "%pR to [bus %02x-%02x] (unused)\n", b_res,
447 				 bus->secondary, bus->subordinate);
448 		b_res->flags = 0;
449 		return;
450 	}
451 	/* Alignment of the IO window is always 4K */
452 	b_res->start = 4096;
453 	b_res->end = b_res->start + size - 1;
454 	b_res->flags |= IORESOURCE_STARTALIGN;
455 }
456 
457 /* Calculate the size of the bus and minimal alignment which
458    guarantees that all child resources fit in this size. */
459 static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
460 			 unsigned long type, resource_size_t min_size)
461 {
462 	struct pci_dev *dev;
463 	resource_size_t min_align, align, size, old_size;
464 	resource_size_t aligns[12];	/* Alignments from 1Mb to 2Gb */
465 	int order, max_order;
466 	struct resource *b_res = find_free_bus_resource(bus, type);
467 	unsigned int mem64_mask = 0;
468 
469 	if (!b_res)
470 		return 0;
471 
472 	memset(aligns, 0, sizeof(aligns));
473 	max_order = 0;
474 	size = 0;
475 
476 	mem64_mask = b_res->flags & IORESOURCE_MEM_64;
477 	b_res->flags &= ~IORESOURCE_MEM_64;
478 
479 	list_for_each_entry(dev, &bus->devices, bus_list) {
480 		int i;
481 
482 		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
483 			struct resource *r = &dev->resource[i];
484 			resource_size_t r_size;
485 
486 			if (r->parent || (r->flags & mask) != type)
487 				continue;
488 			r_size = resource_size(r);
489 			/* For bridges size != alignment */
490 			align = pci_resource_alignment(dev, r);
491 			order = __ffs(align) - 20;
492 			if (order > 11) {
493 				dev_warn(&dev->dev, "disabling BAR %d: %pR "
494 					 "(bad alignment %#llx)\n", i, r,
495 					 (unsigned long long) align);
496 				r->flags = 0;
497 				continue;
498 			}
499 			size += r_size;
500 			if (order < 0)
501 				order = 0;
502 			/* Exclude ranges with size > align from
503 			   calculation of the alignment. */
504 			if (r_size == align)
505 				aligns[order] += align;
506 			if (order > max_order)
507 				max_order = order;
508 			mem64_mask &= r->flags & IORESOURCE_MEM_64;
509 		}
510 	}
511 	if (size < min_size)
512 		size = min_size;
513 	old_size = resource_size(b_res);
514 	if (old_size == 1)
515 		old_size = 0;
516 	if (size < old_size)
517 		size = old_size;
518 
519 	align = 0;
520 	min_align = 0;
521 	for (order = 0; order <= max_order; order++) {
522 		resource_size_t align1 = 1;
523 
524 		align1 <<= (order + 20);
525 
526 		if (!align)
527 			min_align = align1;
528 		else if (ALIGN(align + min_align, min_align) < align1)
529 			min_align = align1 >> 1;
530 		align += aligns[order];
531 	}
532 	size = ALIGN(size, min_align);
533 	if (!size) {
534 		if (b_res->start || b_res->end)
535 			dev_info(&bus->self->dev, "disabling bridge window "
536 				 "%pR to [bus %02x-%02x] (unused)\n", b_res,
537 				 bus->secondary, bus->subordinate);
538 		b_res->flags = 0;
539 		return 1;
540 	}
541 	b_res->start = min_align;
542 	b_res->end = size + min_align - 1;
543 	b_res->flags |= IORESOURCE_STARTALIGN;
544 	b_res->flags |= mem64_mask;
545 	return 1;
546 }
547 
548 static void pci_bus_size_cardbus(struct pci_bus *bus)
549 {
550 	struct pci_dev *bridge = bus->self;
551 	struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
552 	u16 ctrl;
553 
554 	/*
555 	 * Reserve some resources for CardBus.  We reserve
556 	 * a fixed amount of bus space for CardBus bridges.
557 	 */
558 	b_res[0].start = 0;
559 	b_res[0].end = pci_cardbus_io_size - 1;
560 	b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
561 
562 	b_res[1].start = 0;
563 	b_res[1].end = pci_cardbus_io_size - 1;
564 	b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
565 
566 	/*
567 	 * Check whether prefetchable memory is supported
568 	 * by this bridge.
569 	 */
570 	pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
571 	if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) {
572 		ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0;
573 		pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
574 		pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
575 	}
576 
577 	/*
578 	 * If we have prefetchable memory support, allocate
579 	 * two regions.  Otherwise, allocate one region of
580 	 * twice the size.
581 	 */
582 	if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
583 		b_res[2].start = 0;
584 		b_res[2].end = pci_cardbus_mem_size - 1;
585 		b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN;
586 
587 		b_res[3].start = 0;
588 		b_res[3].end = pci_cardbus_mem_size - 1;
589 		b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
590 	} else {
591 		b_res[3].start = 0;
592 		b_res[3].end = pci_cardbus_mem_size * 2 - 1;
593 		b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
594 	}
595 }
596 
597 void __ref pci_bus_size_bridges(struct pci_bus *bus)
598 {
599 	struct pci_dev *dev;
600 	unsigned long mask, prefmask;
601 	resource_size_t min_mem_size = 0, min_io_size = 0;
602 
603 	list_for_each_entry(dev, &bus->devices, bus_list) {
604 		struct pci_bus *b = dev->subordinate;
605 		if (!b)
606 			continue;
607 
608 		switch (dev->class >> 8) {
609 		case PCI_CLASS_BRIDGE_CARDBUS:
610 			pci_bus_size_cardbus(b);
611 			break;
612 
613 		case PCI_CLASS_BRIDGE_PCI:
614 		default:
615 			pci_bus_size_bridges(b);
616 			break;
617 		}
618 	}
619 
620 	/* The root bus? */
621 	if (!bus->self)
622 		return;
623 
624 	switch (bus->self->class >> 8) {
625 	case PCI_CLASS_BRIDGE_CARDBUS:
626 		/* don't size cardbuses yet. */
627 		break;
628 
629 	case PCI_CLASS_BRIDGE_PCI:
630 		pci_bridge_check_ranges(bus);
631 		if (bus->self->is_hotplug_bridge) {
632 			min_io_size  = pci_hotplug_io_size;
633 			min_mem_size = pci_hotplug_mem_size;
634 		}
635 	default:
636 		pbus_size_io(bus, min_io_size);
637 		/* If the bridge supports prefetchable range, size it
638 		   separately. If it doesn't, or its prefetchable window
639 		   has already been allocated by arch code, try
640 		   non-prefetchable range for both types of PCI memory
641 		   resources. */
642 		mask = IORESOURCE_MEM;
643 		prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
644 		if (pbus_size_mem(bus, prefmask, prefmask, min_mem_size))
645 			mask = prefmask; /* Success, size non-prefetch only. */
646 		else
647 			min_mem_size += min_mem_size;
648 		pbus_size_mem(bus, mask, IORESOURCE_MEM, min_mem_size);
649 		break;
650 	}
651 }
652 EXPORT_SYMBOL(pci_bus_size_bridges);
653 
654 static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
655 					 struct resource_list_x *fail_head)
656 {
657 	struct pci_bus *b;
658 	struct pci_dev *dev;
659 
660 	pbus_assign_resources_sorted(bus, fail_head);
661 
662 	list_for_each_entry(dev, &bus->devices, bus_list) {
663 		b = dev->subordinate;
664 		if (!b)
665 			continue;
666 
667 		__pci_bus_assign_resources(b, fail_head);
668 
669 		switch (dev->class >> 8) {
670 		case PCI_CLASS_BRIDGE_PCI:
671 			if (!pci_is_enabled(dev))
672 				pci_setup_bridge(b);
673 			break;
674 
675 		case PCI_CLASS_BRIDGE_CARDBUS:
676 			pci_setup_cardbus(b);
677 			break;
678 
679 		default:
680 			dev_info(&dev->dev, "not setting up bridge for bus "
681 				 "%04x:%02x\n", pci_domain_nr(b), b->number);
682 			break;
683 		}
684 	}
685 }
686 
687 void __ref pci_bus_assign_resources(const struct pci_bus *bus)
688 {
689 	__pci_bus_assign_resources(bus, NULL);
690 }
691 EXPORT_SYMBOL(pci_bus_assign_resources);
692 
693 static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge,
694 					 struct resource_list_x *fail_head)
695 {
696 	struct pci_bus *b;
697 
698 	pdev_assign_resources_sorted((struct pci_dev *)bridge, fail_head);
699 
700 	b = bridge->subordinate;
701 	if (!b)
702 		return;
703 
704 	__pci_bus_assign_resources(b, fail_head);
705 
706 	switch (bridge->class >> 8) {
707 	case PCI_CLASS_BRIDGE_PCI:
708 		pci_setup_bridge(b);
709 		break;
710 
711 	case PCI_CLASS_BRIDGE_CARDBUS:
712 		pci_setup_cardbus(b);
713 		break;
714 
715 	default:
716 		dev_info(&bridge->dev, "not setting up bridge for bus "
717 			 "%04x:%02x\n", pci_domain_nr(b), b->number);
718 		break;
719 	}
720 }
721 static void pci_bridge_release_resources(struct pci_bus *bus,
722 					  unsigned long type)
723 {
724 	int idx;
725 	bool changed = false;
726 	struct pci_dev *dev;
727 	struct resource *r;
728 	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
729 				  IORESOURCE_PREFETCH;
730 
731 	dev = bus->self;
732 	for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END;
733 	     idx++) {
734 		r = &dev->resource[idx];
735 		if ((r->flags & type_mask) != type)
736 			continue;
737 		if (!r->parent)
738 			continue;
739 		/*
740 		 * if there are children under that, we should release them
741 		 *  all
742 		 */
743 		release_child_resources(r);
744 		if (!release_resource(r)) {
745 			dev_printk(KERN_DEBUG, &dev->dev,
746 				 "resource %d %pR released\n", idx, r);
747 			/* keep the old size */
748 			r->end = resource_size(r) - 1;
749 			r->start = 0;
750 			r->flags = 0;
751 			changed = true;
752 		}
753 	}
754 
755 	if (changed) {
756 		/* avoiding touch the one without PREF */
757 		if (type & IORESOURCE_PREFETCH)
758 			type = IORESOURCE_PREFETCH;
759 		__pci_setup_bridge(bus, type);
760 	}
761 }
762 
763 enum release_type {
764 	leaf_only,
765 	whole_subtree,
766 };
767 /*
768  * try to release pci bridge resources that is from leaf bridge,
769  * so we can allocate big new one later
770  */
771 static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus,
772 						   unsigned long type,
773 						   enum release_type rel_type)
774 {
775 	struct pci_dev *dev;
776 	bool is_leaf_bridge = true;
777 
778 	list_for_each_entry(dev, &bus->devices, bus_list) {
779 		struct pci_bus *b = dev->subordinate;
780 		if (!b)
781 			continue;
782 
783 		is_leaf_bridge = false;
784 
785 		if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
786 			continue;
787 
788 		if (rel_type == whole_subtree)
789 			pci_bus_release_bridge_resources(b, type,
790 						 whole_subtree);
791 	}
792 
793 	if (pci_is_root_bus(bus))
794 		return;
795 
796 	if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI)
797 		return;
798 
799 	if ((rel_type == whole_subtree) || is_leaf_bridge)
800 		pci_bridge_release_resources(bus, type);
801 }
802 
803 static void pci_bus_dump_res(struct pci_bus *bus)
804 {
805 	struct resource *res;
806 	int i;
807 
808 	pci_bus_for_each_resource(bus, res, i) {
809 		if (!res || !res->end || !res->flags)
810                         continue;
811 
812 		dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res);
813         }
814 }
815 
816 static void pci_bus_dump_resources(struct pci_bus *bus)
817 {
818 	struct pci_bus *b;
819 	struct pci_dev *dev;
820 
821 
822 	pci_bus_dump_res(bus);
823 
824 	list_for_each_entry(dev, &bus->devices, bus_list) {
825 		b = dev->subordinate;
826 		if (!b)
827 			continue;
828 
829 		pci_bus_dump_resources(b);
830 	}
831 }
832 
833 static int __init pci_bus_get_depth(struct pci_bus *bus)
834 {
835 	int depth = 0;
836 	struct pci_dev *dev;
837 
838 	list_for_each_entry(dev, &bus->devices, bus_list) {
839 		int ret;
840 		struct pci_bus *b = dev->subordinate;
841 		if (!b)
842 			continue;
843 
844 		ret = pci_bus_get_depth(b);
845 		if (ret + 1 > depth)
846 			depth = ret + 1;
847 	}
848 
849 	return depth;
850 }
851 static int __init pci_get_max_depth(void)
852 {
853 	int depth = 0;
854 	struct pci_bus *bus;
855 
856 	list_for_each_entry(bus, &pci_root_buses, node) {
857 		int ret;
858 
859 		ret = pci_bus_get_depth(bus);
860 		if (ret > depth)
861 			depth = ret;
862 	}
863 
864 	return depth;
865 }
866 
867 /*
868  * first try will not touch pci bridge res
869  * second  and later try will clear small leaf bridge res
870  * will stop till to the max  deepth if can not find good one
871  */
872 void __init
873 pci_assign_unassigned_resources(void)
874 {
875 	struct pci_bus *bus;
876 	int tried_times = 0;
877 	enum release_type rel_type = leaf_only;
878 	struct resource_list_x head, *list;
879 	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
880 				  IORESOURCE_PREFETCH;
881 	unsigned long failed_type;
882 	int max_depth = pci_get_max_depth();
883 	int pci_try_num;
884 
885 	head.next = NULL;
886 
887 	pci_try_num = max_depth + 1;
888 	printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n",
889 		 max_depth, pci_try_num);
890 
891 again:
892 	/* Depth first, calculate sizes and alignments of all
893 	   subordinate buses. */
894 	list_for_each_entry(bus, &pci_root_buses, node) {
895 		pci_bus_size_bridges(bus);
896 	}
897 	/* Depth last, allocate resources and update the hardware. */
898 	list_for_each_entry(bus, &pci_root_buses, node) {
899 		__pci_bus_assign_resources(bus, &head);
900 	}
901 	tried_times++;
902 
903 	/* any device complain? */
904 	if (!head.next)
905 		goto enable_and_dump;
906 	failed_type = 0;
907 	for (list = head.next; list;) {
908 		failed_type |= list->flags;
909 		list = list->next;
910 	}
911 	/*
912 	 * io port are tight, don't try extra
913 	 * or if reach the limit, don't want to try more
914 	 */
915 	failed_type &= type_mask;
916 	if ((failed_type == IORESOURCE_IO) || (tried_times >= pci_try_num)) {
917 		free_failed_list(&head);
918 		goto enable_and_dump;
919 	}
920 
921 	printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
922 			 tried_times + 1);
923 
924 	/* third times and later will not check if it is leaf */
925 	if ((tried_times + 1) > 2)
926 		rel_type = whole_subtree;
927 
928 	/*
929 	 * Try to release leaf bridge's resources that doesn't fit resource of
930 	 * child device under that bridge
931 	 */
932 	for (list = head.next; list;) {
933 		bus = list->dev->bus;
934 		pci_bus_release_bridge_resources(bus, list->flags & type_mask,
935 						  rel_type);
936 		list = list->next;
937 	}
938 	/* restore size and flags */
939 	for (list = head.next; list;) {
940 		struct resource *res = list->res;
941 
942 		res->start = list->start;
943 		res->end = list->end;
944 		res->flags = list->flags;
945 		if (list->dev->subordinate)
946 			res->flags = 0;
947 
948 		list = list->next;
949 	}
950 	free_failed_list(&head);
951 
952 	goto again;
953 
954 enable_and_dump:
955 	/* Depth last, update the hardware. */
956 	list_for_each_entry(bus, &pci_root_buses, node)
957 		pci_enable_bridges(bus);
958 
959 	/* dump the resource on buses */
960 	list_for_each_entry(bus, &pci_root_buses, node) {
961 		pci_bus_dump_resources(bus);
962 	}
963 }
964 
965 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
966 {
967 	struct pci_bus *parent = bridge->subordinate;
968 	int tried_times = 0;
969 	struct resource_list_x head, *list;
970 	int retval;
971 	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
972 				  IORESOURCE_PREFETCH;
973 
974 	head.next = NULL;
975 
976 again:
977 	pci_bus_size_bridges(parent);
978 	__pci_bridge_assign_resources(bridge, &head);
979 	retval = pci_reenable_device(bridge);
980 	pci_set_master(bridge);
981 	pci_enable_bridges(parent);
982 
983 	tried_times++;
984 
985 	if (!head.next)
986 		return;
987 
988 	if (tried_times >= 2) {
989 		/* still fail, don't need to try more */
990 		free_failed_list(&head);
991 		return;
992 	}
993 
994 	printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
995 			 tried_times + 1);
996 
997 	/*
998 	 * Try to release leaf bridge's resources that doesn't fit resource of
999 	 * child device under that bridge
1000 	 */
1001 	for (list = head.next; list;) {
1002 		struct pci_bus *bus = list->dev->bus;
1003 		unsigned long flags = list->flags;
1004 
1005 		pci_bus_release_bridge_resources(bus, flags & type_mask,
1006 						 whole_subtree);
1007 		list = list->next;
1008 	}
1009 	/* restore size and flags */
1010 	for (list = head.next; list;) {
1011 		struct resource *res = list->res;
1012 
1013 		res->start = list->start;
1014 		res->end = list->end;
1015 		res->flags = list->flags;
1016 		if (list->dev->subordinate)
1017 			res->flags = 0;
1018 
1019 		list = list->next;
1020 	}
1021 	free_failed_list(&head);
1022 
1023 	goto again;
1024 }
1025 EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
1026