xref: /linux/arch/powerpc/kernel/pci_32.c (revision f24e9f586b377749dff37554696cf3a105540c94)
1 /*
2  * Common pmac/prep/chrp pci routines. -- Cort
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/pci.h>
7 #include <linux/delay.h>
8 #include <linux/string.h>
9 #include <linux/init.h>
10 #include <linux/capability.h>
11 #include <linux/sched.h>
12 #include <linux/errno.h>
13 #include <linux/bootmem.h>
14 #include <linux/irq.h>
15 
16 #include <asm/processor.h>
17 #include <asm/io.h>
18 #include <asm/prom.h>
19 #include <asm/sections.h>
20 #include <asm/pci-bridge.h>
21 #include <asm/byteorder.h>
22 #include <asm/uaccess.h>
23 #include <asm/machdep.h>
24 
25 #undef DEBUG
26 
27 #ifdef DEBUG
28 #define DBG(x...) printk(x)
29 #else
30 #define DBG(x...)
31 #endif
32 
33 unsigned long isa_io_base     = 0;
34 unsigned long isa_mem_base    = 0;
35 unsigned long pci_dram_offset = 0;
36 int pcibios_assign_bus_offset = 1;
37 
38 void pcibios_make_OF_bus_map(void);
39 
40 static int pci_relocate_bridge_resource(struct pci_bus *bus, int i);
41 static int probe_resource(struct pci_bus *parent, struct resource *pr,
42 			  struct resource *res, struct resource **conflict);
43 static void update_bridge_base(struct pci_bus *bus, int i);
44 static void pcibios_fixup_resources(struct pci_dev* dev);
45 static void fixup_broken_pcnet32(struct pci_dev* dev);
46 static int reparent_resources(struct resource *parent, struct resource *res);
47 static void fixup_cpc710_pci64(struct pci_dev* dev);
48 #ifdef CONFIG_PPC_OF
49 static u8* pci_to_OF_bus_map;
50 #endif
51 
52 /* By default, we don't re-assign bus numbers. We do this only on
53  * some pmacs
54  */
55 int pci_assign_all_buses;
56 
57 struct pci_controller* hose_head;
58 struct pci_controller** hose_tail = &hose_head;
59 
60 static int pci_bus_count;
61 
62 static void
63 fixup_broken_pcnet32(struct pci_dev* dev)
64 {
65 	if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
66 		dev->vendor = PCI_VENDOR_ID_AMD;
67 		pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
68 	}
69 }
70 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT,	PCI_ANY_ID,			fixup_broken_pcnet32);
71 
72 static void
73 fixup_cpc710_pci64(struct pci_dev* dev)
74 {
75 	/* Hide the PCI64 BARs from the kernel as their content doesn't
76 	 * fit well in the resource management
77 	 */
78 	dev->resource[0].start = dev->resource[0].end = 0;
79 	dev->resource[0].flags = 0;
80 	dev->resource[1].start = dev->resource[1].end = 0;
81 	dev->resource[1].flags = 0;
82 }
83 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM,	PCI_DEVICE_ID_IBM_CPC710_PCI64,	fixup_cpc710_pci64);
84 
85 static void
86 pcibios_fixup_resources(struct pci_dev *dev)
87 {
88 	struct pci_controller* hose = (struct pci_controller *)dev->sysdata;
89 	int i;
90 	unsigned long offset;
91 
92 	if (!hose) {
93 		printk(KERN_ERR "No hose for PCI dev %s!\n", pci_name(dev));
94 		return;
95 	}
96 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
97 		struct resource *res = dev->resource + i;
98 		if (!res->flags)
99 			continue;
100 		if (res->end == 0xffffffff) {
101 			DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n",
102 			    pci_name(dev), i, res->start, res->end);
103 			res->end -= res->start;
104 			res->start = 0;
105 			res->flags |= IORESOURCE_UNSET;
106 			continue;
107 		}
108 		offset = 0;
109 		if (res->flags & IORESOURCE_MEM) {
110 			offset = hose->pci_mem_offset;
111 		} else if (res->flags & IORESOURCE_IO) {
112 			offset = (unsigned long) hose->io_base_virt
113 				- isa_io_base;
114 		}
115 		if (offset != 0) {
116 			res->start += offset;
117 			res->end += offset;
118 #ifdef DEBUG
119 			printk("Fixup res %d (%lx) of dev %s: %llx -> %llx\n",
120 			       i, res->flags, pci_name(dev),
121 			       res->start - offset, res->start);
122 #endif
123 		}
124 	}
125 
126 	/* Call machine specific resource fixup */
127 	if (ppc_md.pcibios_fixup_resources)
128 		ppc_md.pcibios_fixup_resources(dev);
129 }
130 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID,		PCI_ANY_ID,			pcibios_fixup_resources);
131 
132 void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
133 			struct resource *res)
134 {
135 	unsigned long offset = 0;
136 	struct pci_controller *hose = dev->sysdata;
137 
138 	if (hose && res->flags & IORESOURCE_IO)
139 		offset = (unsigned long)hose->io_base_virt - isa_io_base;
140 	else if (hose && res->flags & IORESOURCE_MEM)
141 		offset = hose->pci_mem_offset;
142 	region->start = res->start - offset;
143 	region->end = res->end - offset;
144 }
145 EXPORT_SYMBOL(pcibios_resource_to_bus);
146 
147 void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
148 			     struct pci_bus_region *region)
149 {
150 	unsigned long offset = 0;
151 	struct pci_controller *hose = dev->sysdata;
152 
153 	if (hose && res->flags & IORESOURCE_IO)
154 		offset = (unsigned long)hose->io_base_virt - isa_io_base;
155 	else if (hose && res->flags & IORESOURCE_MEM)
156 		offset = hose->pci_mem_offset;
157 	res->start = region->start + offset;
158 	res->end = region->end + offset;
159 }
160 EXPORT_SYMBOL(pcibios_bus_to_resource);
161 
162 /*
163  * We need to avoid collisions with `mirrored' VGA ports
164  * and other strange ISA hardware, so we always want the
165  * addresses to be allocated in the 0x000-0x0ff region
166  * modulo 0x400.
167  *
168  * Why? Because some silly external IO cards only decode
169  * the low 10 bits of the IO address. The 0x00-0xff region
170  * is reserved for motherboard devices that decode all 16
171  * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
172  * but we want to try to avoid allocating at 0x2900-0x2bff
173  * which might have be mirrored at 0x0100-0x03ff..
174  */
175 void pcibios_align_resource(void *data, struct resource *res,
176 				resource_size_t size, resource_size_t align)
177 {
178 	struct pci_dev *dev = data;
179 
180 	if (res->flags & IORESOURCE_IO) {
181 		resource_size_t start = res->start;
182 
183 		if (size > 0x100) {
184 			printk(KERN_ERR "PCI: I/O Region %s/%d too large"
185 			       " (%lld bytes)\n", pci_name(dev),
186 			       dev->resource - res, (unsigned long long)size);
187 		}
188 
189 		if (start & 0x300) {
190 			start = (start + 0x3ff) & ~0x3ff;
191 			res->start = start;
192 		}
193 	}
194 }
195 EXPORT_SYMBOL(pcibios_align_resource);
196 
197 /*
198  *  Handle resources of PCI devices.  If the world were perfect, we could
199  *  just allocate all the resource regions and do nothing more.  It isn't.
200  *  On the other hand, we cannot just re-allocate all devices, as it would
201  *  require us to know lots of host bridge internals.  So we attempt to
202  *  keep as much of the original configuration as possible, but tweak it
203  *  when it's found to be wrong.
204  *
205  *  Known BIOS problems we have to work around:
206  *	- I/O or memory regions not configured
207  *	- regions configured, but not enabled in the command register
208  *	- bogus I/O addresses above 64K used
209  *	- expansion ROMs left enabled (this may sound harmless, but given
210  *	  the fact the PCI specs explicitly allow address decoders to be
211  *	  shared between expansion ROMs and other resource regions, it's
212  *	  at least dangerous)
213  *
214  *  Our solution:
215  *	(1) Allocate resources for all buses behind PCI-to-PCI bridges.
216  *	    This gives us fixed barriers on where we can allocate.
217  *	(2) Allocate resources for all enabled devices.  If there is
218  *	    a collision, just mark the resource as unallocated. Also
219  *	    disable expansion ROMs during this step.
220  *	(3) Try to allocate resources for disabled devices.  If the
221  *	    resources were assigned correctly, everything goes well,
222  *	    if they weren't, they won't disturb allocation of other
223  *	    resources.
224  *	(4) Assign new addresses to resources which were either
225  *	    not configured at all or misconfigured.  If explicitly
226  *	    requested by the user, configure expansion ROM address
227  *	    as well.
228  */
229 
230 static void __init
231 pcibios_allocate_bus_resources(struct list_head *bus_list)
232 {
233 	struct pci_bus *bus;
234 	int i;
235 	struct resource *res, *pr;
236 
237 	/* Depth-First Search on bus tree */
238 	list_for_each_entry(bus, bus_list, node) {
239 		for (i = 0; i < 4; ++i) {
240 			if ((res = bus->resource[i]) == NULL || !res->flags
241 			    || res->start > res->end)
242 				continue;
243 			if (bus->parent == NULL)
244 				pr = (res->flags & IORESOURCE_IO)?
245 					&ioport_resource: &iomem_resource;
246 			else {
247 				pr = pci_find_parent_resource(bus->self, res);
248 				if (pr == res) {
249 					/* this happens when the generic PCI
250 					 * code (wrongly) decides that this
251 					 * bridge is transparent  -- paulus
252 					 */
253 					continue;
254 				}
255 			}
256 
257 			DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n",
258 				res->start, res->end, res->flags, pr);
259 			if (pr) {
260 				if (request_resource(pr, res) == 0)
261 					continue;
262 				/*
263 				 * Must be a conflict with an existing entry.
264 				 * Move that entry (or entries) under the
265 				 * bridge resource and try again.
266 				 */
267 				if (reparent_resources(pr, res) == 0)
268 					continue;
269 			}
270 			printk(KERN_ERR "PCI: Cannot allocate resource region "
271 			       "%d of PCI bridge %d\n", i, bus->number);
272 			if (pci_relocate_bridge_resource(bus, i))
273 				bus->resource[i] = NULL;
274 		}
275 		pcibios_allocate_bus_resources(&bus->children);
276 	}
277 }
278 
279 /*
280  * Reparent resource children of pr that conflict with res
281  * under res, and make res replace those children.
282  */
283 static int __init
284 reparent_resources(struct resource *parent, struct resource *res)
285 {
286 	struct resource *p, **pp;
287 	struct resource **firstpp = NULL;
288 
289 	for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
290 		if (p->end < res->start)
291 			continue;
292 		if (res->end < p->start)
293 			break;
294 		if (p->start < res->start || p->end > res->end)
295 			return -1;	/* not completely contained */
296 		if (firstpp == NULL)
297 			firstpp = pp;
298 	}
299 	if (firstpp == NULL)
300 		return -1;	/* didn't find any conflicting entries? */
301 	res->parent = parent;
302 	res->child = *firstpp;
303 	res->sibling = *pp;
304 	*firstpp = res;
305 	*pp = NULL;
306 	for (p = res->child; p != NULL; p = p->sibling) {
307 		p->parent = res;
308 		DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
309 		    p->name, p->start, p->end, res->name);
310 	}
311 	return 0;
312 }
313 
314 /*
315  * A bridge has been allocated a range which is outside the range
316  * of its parent bridge, so it needs to be moved.
317  */
318 static int __init
319 pci_relocate_bridge_resource(struct pci_bus *bus, int i)
320 {
321 	struct resource *res, *pr, *conflict;
322 	unsigned long try, size;
323 	int j;
324 	struct pci_bus *parent = bus->parent;
325 
326 	if (parent == NULL) {
327 		/* shouldn't ever happen */
328 		printk(KERN_ERR "PCI: can't move host bridge resource\n");
329 		return -1;
330 	}
331 	res = bus->resource[i];
332 	if (res == NULL)
333 		return -1;
334 	pr = NULL;
335 	for (j = 0; j < 4; j++) {
336 		struct resource *r = parent->resource[j];
337 		if (!r)
338 			continue;
339 		if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
340 			continue;
341 		if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) {
342 			pr = r;
343 			break;
344 		}
345 		if (res->flags & IORESOURCE_PREFETCH)
346 			pr = r;
347 	}
348 	if (pr == NULL)
349 		return -1;
350 	size = res->end - res->start;
351 	if (pr->start > pr->end || size > pr->end - pr->start)
352 		return -1;
353 	try = pr->end;
354 	for (;;) {
355 		res->start = try - size;
356 		res->end = try;
357 		if (probe_resource(bus->parent, pr, res, &conflict) == 0)
358 			break;
359 		if (conflict->start <= pr->start + size)
360 			return -1;
361 		try = conflict->start - 1;
362 	}
363 	if (request_resource(pr, res)) {
364 		DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n",
365 		    res->start, res->end);
366 		return -1;		/* "can't happen" */
367 	}
368 	update_bridge_base(bus, i);
369 	printk(KERN_INFO "PCI: bridge %d resource %d moved to %llx..%llx\n",
370 	       bus->number, i, (unsigned long long)res->start,
371 	       (unsigned long long)res->end);
372 	return 0;
373 }
374 
375 static int __init
376 probe_resource(struct pci_bus *parent, struct resource *pr,
377 	       struct resource *res, struct resource **conflict)
378 {
379 	struct pci_bus *bus;
380 	struct pci_dev *dev;
381 	struct resource *r;
382 	int i;
383 
384 	for (r = pr->child; r != NULL; r = r->sibling) {
385 		if (r->end >= res->start && res->end >= r->start) {
386 			*conflict = r;
387 			return 1;
388 		}
389 	}
390 	list_for_each_entry(bus, &parent->children, node) {
391 		for (i = 0; i < 4; ++i) {
392 			if ((r = bus->resource[i]) == NULL)
393 				continue;
394 			if (!r->flags || r->start > r->end || r == res)
395 				continue;
396 			if (pci_find_parent_resource(bus->self, r) != pr)
397 				continue;
398 			if (r->end >= res->start && res->end >= r->start) {
399 				*conflict = r;
400 				return 1;
401 			}
402 		}
403 	}
404 	list_for_each_entry(dev, &parent->devices, bus_list) {
405 		for (i = 0; i < 6; ++i) {
406 			r = &dev->resource[i];
407 			if (!r->flags || (r->flags & IORESOURCE_UNSET))
408 				continue;
409 			if (pci_find_parent_resource(dev, r) != pr)
410 				continue;
411 			if (r->end >= res->start && res->end >= r->start) {
412 				*conflict = r;
413 				return 1;
414 			}
415 		}
416 	}
417 	return 0;
418 }
419 
420 static void __init
421 update_bridge_base(struct pci_bus *bus, int i)
422 {
423 	struct resource *res = bus->resource[i];
424 	u8 io_base_lo, io_limit_lo;
425 	u16 mem_base, mem_limit;
426 	u16 cmd;
427 	unsigned long start, end, off;
428 	struct pci_dev *dev = bus->self;
429 	struct pci_controller *hose = dev->sysdata;
430 
431 	if (!hose) {
432 		printk("update_bridge_base: no hose?\n");
433 		return;
434 	}
435 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
436 	pci_write_config_word(dev, PCI_COMMAND,
437 			      cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY));
438 	if (res->flags & IORESOURCE_IO) {
439 		off = (unsigned long) hose->io_base_virt - isa_io_base;
440 		start = res->start - off;
441 		end = res->end - off;
442 		io_base_lo = (start >> 8) & PCI_IO_RANGE_MASK;
443 		io_limit_lo = (end >> 8) & PCI_IO_RANGE_MASK;
444 		if (end > 0xffff) {
445 			pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
446 					      start >> 16);
447 			pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
448 					      end >> 16);
449 			io_base_lo |= PCI_IO_RANGE_TYPE_32;
450 		} else
451 			io_base_lo |= PCI_IO_RANGE_TYPE_16;
452 		pci_write_config_byte(dev, PCI_IO_BASE, io_base_lo);
453 		pci_write_config_byte(dev, PCI_IO_LIMIT, io_limit_lo);
454 
455 	} else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
456 		   == IORESOURCE_MEM) {
457 		off = hose->pci_mem_offset;
458 		mem_base = ((res->start - off) >> 16) & PCI_MEMORY_RANGE_MASK;
459 		mem_limit = ((res->end - off) >> 16) & PCI_MEMORY_RANGE_MASK;
460 		pci_write_config_word(dev, PCI_MEMORY_BASE, mem_base);
461 		pci_write_config_word(dev, PCI_MEMORY_LIMIT, mem_limit);
462 
463 	} else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
464 		   == (IORESOURCE_MEM | IORESOURCE_PREFETCH)) {
465 		off = hose->pci_mem_offset;
466 		mem_base = ((res->start - off) >> 16) & PCI_PREF_RANGE_MASK;
467 		mem_limit = ((res->end - off) >> 16) & PCI_PREF_RANGE_MASK;
468 		pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, mem_base);
469 		pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, mem_limit);
470 
471 	} else {
472 		DBG(KERN_ERR "PCI: ugh, bridge %s res %d has flags=%lx\n",
473 		    pci_name(dev), i, res->flags);
474 	}
475 	pci_write_config_word(dev, PCI_COMMAND, cmd);
476 }
477 
478 static inline void alloc_resource(struct pci_dev *dev, int idx)
479 {
480 	struct resource *pr, *r = &dev->resource[idx];
481 
482 	DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n",
483 	    pci_name(dev), idx, r->start, r->end, r->flags);
484 	pr = pci_find_parent_resource(dev, r);
485 	if (!pr || request_resource(pr, r) < 0) {
486 		printk(KERN_ERR "PCI: Cannot allocate resource region %d"
487 		       " of device %s\n", idx, pci_name(dev));
488 		if (pr)
489 			DBG("PCI:  parent is %p: %016llx-%016llx (f=%lx)\n",
490 			    pr, pr->start, pr->end, pr->flags);
491 		/* We'll assign a new address later */
492 		r->flags |= IORESOURCE_UNSET;
493 		r->end -= r->start;
494 		r->start = 0;
495 	}
496 }
497 
498 static void __init
499 pcibios_allocate_resources(int pass)
500 {
501 	struct pci_dev *dev = NULL;
502 	int idx, disabled;
503 	u16 command;
504 	struct resource *r;
505 
506 	for_each_pci_dev(dev) {
507 		pci_read_config_word(dev, PCI_COMMAND, &command);
508 		for (idx = 0; idx < 6; idx++) {
509 			r = &dev->resource[idx];
510 			if (r->parent)		/* Already allocated */
511 				continue;
512 			if (!r->flags || (r->flags & IORESOURCE_UNSET))
513 				continue;	/* Not assigned at all */
514 			if (r->flags & IORESOURCE_IO)
515 				disabled = !(command & PCI_COMMAND_IO);
516 			else
517 				disabled = !(command & PCI_COMMAND_MEMORY);
518 			if (pass == disabled)
519 				alloc_resource(dev, idx);
520 		}
521 		if (pass)
522 			continue;
523 		r = &dev->resource[PCI_ROM_RESOURCE];
524 		if (r->flags & IORESOURCE_ROM_ENABLE) {
525 			/* Turn the ROM off, leave the resource region, but keep it unregistered. */
526 			u32 reg;
527 			DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
528 			r->flags &= ~IORESOURCE_ROM_ENABLE;
529 			pci_read_config_dword(dev, dev->rom_base_reg, &reg);
530 			pci_write_config_dword(dev, dev->rom_base_reg,
531 					       reg & ~PCI_ROM_ADDRESS_ENABLE);
532 		}
533 	}
534 }
535 
536 static void __init
537 pcibios_assign_resources(void)
538 {
539 	struct pci_dev *dev = NULL;
540 	int idx;
541 	struct resource *r;
542 
543 	for_each_pci_dev(dev) {
544 		int class = dev->class >> 8;
545 
546 		/* Don't touch classless devices and host bridges */
547 		if (!class || class == PCI_CLASS_BRIDGE_HOST)
548 			continue;
549 
550 		for (idx = 0; idx < 6; idx++) {
551 			r = &dev->resource[idx];
552 
553 			/*
554 			 * We shall assign a new address to this resource,
555 			 * either because the BIOS (sic) forgot to do so
556 			 * or because we have decided the old address was
557 			 * unusable for some reason.
558 			 */
559 			if ((r->flags & IORESOURCE_UNSET) && r->end &&
560 			    (!ppc_md.pcibios_enable_device_hook ||
561 			     !ppc_md.pcibios_enable_device_hook(dev, 1))) {
562 				r->flags &= ~IORESOURCE_UNSET;
563 				pci_assign_resource(dev, idx);
564 			}
565 		}
566 
567 #if 0 /* don't assign ROMs */
568 		r = &dev->resource[PCI_ROM_RESOURCE];
569 		r->end -= r->start;
570 		r->start = 0;
571 		if (r->end)
572 			pci_assign_resource(dev, PCI_ROM_RESOURCE);
573 #endif
574 	}
575 }
576 
577 
578 int
579 pcibios_enable_resources(struct pci_dev *dev, int mask)
580 {
581 	u16 cmd, old_cmd;
582 	int idx;
583 	struct resource *r;
584 
585 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
586 	old_cmd = cmd;
587 	for (idx=0; idx<6; idx++) {
588 		/* Only set up the requested stuff */
589 		if (!(mask & (1<<idx)))
590 			continue;
591 
592 		r = &dev->resource[idx];
593 		if (r->flags & IORESOURCE_UNSET) {
594 			printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
595 			return -EINVAL;
596 		}
597 		if (r->flags & IORESOURCE_IO)
598 			cmd |= PCI_COMMAND_IO;
599 		if (r->flags & IORESOURCE_MEM)
600 			cmd |= PCI_COMMAND_MEMORY;
601 	}
602 	if (dev->resource[PCI_ROM_RESOURCE].start)
603 		cmd |= PCI_COMMAND_MEMORY;
604 	if (cmd != old_cmd) {
605 		printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
606 		pci_write_config_word(dev, PCI_COMMAND, cmd);
607 	}
608 	return 0;
609 }
610 
611 static int next_controller_index;
612 
613 struct pci_controller * __init
614 pcibios_alloc_controller(void)
615 {
616 	struct pci_controller *hose;
617 
618 	hose = (struct pci_controller *)alloc_bootmem(sizeof(*hose));
619 	memset(hose, 0, sizeof(struct pci_controller));
620 
621 	*hose_tail = hose;
622 	hose_tail = &hose->next;
623 
624 	hose->index = next_controller_index++;
625 
626 	return hose;
627 }
628 
629 #ifdef CONFIG_PPC_OF
630 /*
631  * Functions below are used on OpenFirmware machines.
632  */
633 static void
634 make_one_node_map(struct device_node* node, u8 pci_bus)
635 {
636 	const int *bus_range;
637 	int len;
638 
639 	if (pci_bus >= pci_bus_count)
640 		return;
641 	bus_range = get_property(node, "bus-range", &len);
642 	if (bus_range == NULL || len < 2 * sizeof(int)) {
643 		printk(KERN_WARNING "Can't get bus-range for %s, "
644 		       "assuming it starts at 0\n", node->full_name);
645 		pci_to_OF_bus_map[pci_bus] = 0;
646 	} else
647 		pci_to_OF_bus_map[pci_bus] = bus_range[0];
648 
649 	for (node=node->child; node != 0;node = node->sibling) {
650 		struct pci_dev* dev;
651 		const unsigned int *class_code, *reg;
652 
653 		class_code = get_property(node, "class-code", NULL);
654 		if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
655 			(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
656 			continue;
657 		reg = get_property(node, "reg", NULL);
658 		if (!reg)
659 			continue;
660 		dev = pci_find_slot(pci_bus, ((reg[0] >> 8) & 0xff));
661 		if (!dev || !dev->subordinate)
662 			continue;
663 		make_one_node_map(node, dev->subordinate->number);
664 	}
665 }
666 
667 void
668 pcibios_make_OF_bus_map(void)
669 {
670 	int i;
671 	struct pci_controller* hose;
672 	struct property *map_prop;
673 
674 	pci_to_OF_bus_map = (u8*)kmalloc(pci_bus_count, GFP_KERNEL);
675 	if (!pci_to_OF_bus_map) {
676 		printk(KERN_ERR "Can't allocate OF bus map !\n");
677 		return;
678 	}
679 
680 	/* We fill the bus map with invalid values, that helps
681 	 * debugging.
682 	 */
683 	for (i=0; i<pci_bus_count; i++)
684 		pci_to_OF_bus_map[i] = 0xff;
685 
686 	/* For each hose, we begin searching bridges */
687 	for(hose=hose_head; hose; hose=hose->next) {
688 		struct device_node* node;
689 		node = (struct device_node *)hose->arch_data;
690 		if (!node)
691 			continue;
692 		make_one_node_map(node, hose->first_busno);
693 	}
694 	map_prop = of_find_property(find_path_device("/"),
695 			"pci-OF-bus-map", NULL);
696 	if (map_prop) {
697 		BUG_ON(pci_bus_count > map_prop->length);
698 		memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count);
699 	}
700 #ifdef DEBUG
701 	printk("PCI->OF bus map:\n");
702 	for (i=0; i<pci_bus_count; i++) {
703 		if (pci_to_OF_bus_map[i] == 0xff)
704 			continue;
705 		printk("%d -> %d\n", i, pci_to_OF_bus_map[i]);
706 	}
707 #endif
708 }
709 
710 typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
711 
712 static struct device_node*
713 scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data)
714 {
715 	struct device_node* sub_node;
716 
717 	for (; node != 0;node = node->sibling) {
718 		const unsigned int *class_code;
719 
720 		if (filter(node, data))
721 			return node;
722 
723 		/* For PCI<->PCI bridges or CardBus bridges, we go down
724 		 * Note: some OFs create a parent node "multifunc-device" as
725 		 * a fake root for all functions of a multi-function device,
726 		 * we go down them as well.
727 		 */
728 		class_code = get_property(node, "class-code", NULL);
729 		if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
730 			(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
731 			strcmp(node->name, "multifunc-device"))
732 			continue;
733 		sub_node = scan_OF_pci_childs(node->child, filter, data);
734 		if (sub_node)
735 			return sub_node;
736 	}
737 	return NULL;
738 }
739 
740 static int
741 scan_OF_pci_childs_iterator(struct device_node* node, void* data)
742 {
743 	const unsigned int *reg;
744 	u8* fdata = (u8*)data;
745 
746 	reg = get_property(node, "reg", NULL);
747 	if (reg && ((reg[0] >> 8) & 0xff) == fdata[1]
748 		&& ((reg[0] >> 16) & 0xff) == fdata[0])
749 		return 1;
750 	return 0;
751 }
752 
753 static struct device_node*
754 scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
755 {
756 	u8 filter_data[2] = {bus, dev_fn};
757 
758 	return scan_OF_pci_childs(node, scan_OF_pci_childs_iterator, filter_data);
759 }
760 
761 /*
762  * Scans the OF tree for a device node matching a PCI device
763  */
764 struct device_node *
765 pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
766 {
767 	struct pci_controller *hose;
768 	struct device_node *node;
769 	int busnr;
770 
771 	if (!have_of)
772 		return NULL;
773 
774 	/* Lookup the hose */
775 	busnr = bus->number;
776 	hose = pci_bus_to_hose(busnr);
777 	if (!hose)
778 		return NULL;
779 
780 	/* Check it has an OF node associated */
781 	node = (struct device_node *) hose->arch_data;
782 	if (!node)
783 		return NULL;
784 
785 	/* Fixup bus number according to what OF think it is. */
786 #ifdef CONFIG_PPC_PMAC
787 	/* The G5 need a special case here. Basically, we don't remap all
788 	 * busses on it so we don't create the pci-OF-map. However, we do
789 	 * remap the AGP bus and so have to deal with it. A future better
790 	 * fix has to be done by making the remapping per-host and always
791 	 * filling the pci_to_OF map. --BenH
792 	 */
793 	if (machine_is(powermac) && busnr >= 0xf0)
794 		busnr -= 0xf0;
795 	else
796 #endif
797 	if (pci_to_OF_bus_map)
798 		busnr = pci_to_OF_bus_map[busnr];
799 	if (busnr == 0xff)
800 		return NULL;
801 
802 	/* Now, lookup childs of the hose */
803 	return scan_OF_childs_for_device(node->child, busnr, devfn);
804 }
805 EXPORT_SYMBOL(pci_busdev_to_OF_node);
806 
807 struct device_node*
808 pci_device_to_OF_node(struct pci_dev *dev)
809 {
810 	return pci_busdev_to_OF_node(dev->bus, dev->devfn);
811 }
812 EXPORT_SYMBOL(pci_device_to_OF_node);
813 
814 /* This routine is meant to be used early during boot, when the
815  * PCI bus numbers have not yet been assigned, and you need to
816  * issue PCI config cycles to an OF device.
817  * It could also be used to "fix" RTAS config cycles if you want
818  * to set pci_assign_all_buses to 1 and still use RTAS for PCI
819  * config cycles.
820  */
821 struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
822 {
823 	if (!have_of)
824 		return NULL;
825 	while(node) {
826 		struct pci_controller* hose;
827 		for (hose=hose_head;hose;hose=hose->next)
828 			if (hose->arch_data == node)
829 				return hose;
830 		node=node->parent;
831 	}
832 	return NULL;
833 }
834 
835 static int
836 find_OF_pci_device_filter(struct device_node* node, void* data)
837 {
838 	return ((void *)node == data);
839 }
840 
841 /*
842  * Returns the PCI device matching a given OF node
843  */
844 int
845 pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
846 {
847 	const unsigned int *reg;
848 	struct pci_controller* hose;
849 	struct pci_dev* dev = NULL;
850 
851 	if (!have_of)
852 		return -ENODEV;
853 	/* Make sure it's really a PCI device */
854 	hose = pci_find_hose_for_OF_device(node);
855 	if (!hose || !hose->arch_data)
856 		return -ENODEV;
857 	if (!scan_OF_pci_childs(((struct device_node*)hose->arch_data)->child,
858 			find_OF_pci_device_filter, (void *)node))
859 		return -ENODEV;
860 	reg = get_property(node, "reg", NULL);
861 	if (!reg)
862 		return -ENODEV;
863 	*bus = (reg[0] >> 16) & 0xff;
864 	*devfn = ((reg[0] >> 8) & 0xff);
865 
866 	/* Ok, here we need some tweak. If we have already renumbered
867 	 * all busses, we can't rely on the OF bus number any more.
868 	 * the pci_to_OF_bus_map is not enough as several PCI busses
869 	 * may match the same OF bus number.
870 	 */
871 	if (!pci_to_OF_bus_map)
872 		return 0;
873 
874 	for_each_pci_dev(dev)
875 		if (pci_to_OF_bus_map[dev->bus->number] == *bus &&
876 				dev->devfn == *devfn) {
877 			*bus = dev->bus->number;
878 			pci_dev_put(dev);
879 			return 0;
880 		}
881 
882 	return -ENODEV;
883 }
884 EXPORT_SYMBOL(pci_device_from_OF_node);
885 
886 void __init
887 pci_process_bridge_OF_ranges(struct pci_controller *hose,
888 			   struct device_node *dev, int primary)
889 {
890 	static unsigned int static_lc_ranges[256] __initdata;
891 	const unsigned int *dt_ranges;
892 	unsigned int *lc_ranges, *ranges, *prev, size;
893 	int rlen = 0, orig_rlen;
894 	int memno = 0;
895 	struct resource *res;
896 	int np, na = prom_n_addr_cells(dev);
897 	np = na + 5;
898 
899 	/* First we try to merge ranges to fix a problem with some pmacs
900 	 * that can have more than 3 ranges, fortunately using contiguous
901 	 * addresses -- BenH
902 	 */
903 	dt_ranges = get_property(dev, "ranges", &rlen);
904 	if (!dt_ranges)
905 		return;
906 	/* Sanity check, though hopefully that never happens */
907 	if (rlen > sizeof(static_lc_ranges)) {
908 		printk(KERN_WARNING "OF ranges property too large !\n");
909 		rlen = sizeof(static_lc_ranges);
910 	}
911 	lc_ranges = static_lc_ranges;
912 	memcpy(lc_ranges, dt_ranges, rlen);
913 	orig_rlen = rlen;
914 
915 	/* Let's work on a copy of the "ranges" property instead of damaging
916 	 * the device-tree image in memory
917 	 */
918 	ranges = lc_ranges;
919 	prev = NULL;
920 	while ((rlen -= np * sizeof(unsigned int)) >= 0) {
921 		if (prev) {
922 			if (prev[0] == ranges[0] && prev[1] == ranges[1] &&
923 				(prev[2] + prev[na+4]) == ranges[2] &&
924 				(prev[na+2] + prev[na+4]) == ranges[na+2]) {
925 				prev[na+4] += ranges[na+4];
926 				ranges[0] = 0;
927 				ranges += np;
928 				continue;
929 			}
930 		}
931 		prev = ranges;
932 		ranges += np;
933 	}
934 
935 	/*
936 	 * The ranges property is laid out as an array of elements,
937 	 * each of which comprises:
938 	 *   cells 0 - 2:	a PCI address
939 	 *   cells 3 or 3+4:	a CPU physical address
940 	 *			(size depending on dev->n_addr_cells)
941 	 *   cells 4+5 or 5+6:	the size of the range
942 	 */
943 	ranges = lc_ranges;
944 	rlen = orig_rlen;
945 	while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) {
946 		res = NULL;
947 		size = ranges[na+4];
948 		switch ((ranges[0] >> 24) & 0x3) {
949 		case 1:		/* I/O space */
950 			if (ranges[2] != 0)
951 				break;
952 			hose->io_base_phys = ranges[na+2];
953 			/* limit I/O space to 16MB */
954 			if (size > 0x01000000)
955 				size = 0x01000000;
956 			hose->io_base_virt = ioremap(ranges[na+2], size);
957 			if (primary)
958 				isa_io_base = (unsigned long) hose->io_base_virt;
959 			res = &hose->io_resource;
960 			res->flags = IORESOURCE_IO;
961 			res->start = ranges[2];
962 			DBG("PCI: IO 0x%llx -> 0x%llx\n",
963 				    res->start, res->start + size - 1);
964 			break;
965 		case 2:		/* memory space */
966 			memno = 0;
967 			if (ranges[1] == 0 && ranges[2] == 0
968 			    && ranges[na+4] <= (16 << 20)) {
969 				/* 1st 16MB, i.e. ISA memory area */
970 				if (primary)
971 					isa_mem_base = ranges[na+2];
972 				memno = 1;
973 			}
974 			while (memno < 3 && hose->mem_resources[memno].flags)
975 				++memno;
976 			if (memno == 0)
977 				hose->pci_mem_offset = ranges[na+2] - ranges[2];
978 			if (memno < 3) {
979 				res = &hose->mem_resources[memno];
980 				res->flags = IORESOURCE_MEM;
981 				if(ranges[0] & 0x40000000)
982 					res->flags |= IORESOURCE_PREFETCH;
983 				res->start = ranges[na+2];
984 				DBG("PCI: MEM[%d] 0x%llx -> 0x%llx\n", memno,
985 					    res->start, res->start + size - 1);
986 			}
987 			break;
988 		}
989 		if (res != NULL) {
990 			res->name = dev->full_name;
991 			res->end = res->start + size - 1;
992 			res->parent = NULL;
993 			res->sibling = NULL;
994 			res->child = NULL;
995 		}
996 		ranges += np;
997 	}
998 }
999 
1000 /* We create the "pci-OF-bus-map" property now so it appears in the
1001  * /proc device tree
1002  */
1003 void __init
1004 pci_create_OF_bus_map(void)
1005 {
1006 	struct property* of_prop;
1007 
1008 	of_prop = (struct property*) alloc_bootmem(sizeof(struct property) + 256);
1009 	if (of_prop && find_path_device("/")) {
1010 		memset(of_prop, -1, sizeof(struct property) + 256);
1011 		of_prop->name = "pci-OF-bus-map";
1012 		of_prop->length = 256;
1013 		of_prop->value = (unsigned char *)&of_prop[1];
1014 		prom_add_property(find_path_device("/"), of_prop);
1015 	}
1016 }
1017 
1018 static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
1019 {
1020 	struct pci_dev *pdev;
1021 	struct device_node *np;
1022 
1023 	pdev = to_pci_dev (dev);
1024 	np = pci_device_to_OF_node(pdev);
1025 	if (np == NULL || np->full_name == NULL)
1026 		return 0;
1027 	return sprintf(buf, "%s", np->full_name);
1028 }
1029 static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
1030 
1031 #else /* CONFIG_PPC_OF */
1032 void pcibios_make_OF_bus_map(void)
1033 {
1034 }
1035 #endif /* CONFIG_PPC_OF */
1036 
1037 /* Add sysfs properties */
1038 void pcibios_add_platform_entries(struct pci_dev *pdev)
1039 {
1040 #ifdef CONFIG_PPC_OF
1041 	device_create_file(&pdev->dev, &dev_attr_devspec);
1042 #endif /* CONFIG_PPC_OF */
1043 }
1044 
1045 
1046 #ifdef CONFIG_PPC_PMAC
1047 /*
1048  * This set of routines checks for PCI<->PCI bridges that have closed
1049  * IO resources and have child devices. It tries to re-open an IO
1050  * window on them.
1051  *
1052  * This is a _temporary_ fix to workaround a problem with Apple's OF
1053  * closing IO windows on P2P bridges when the OF drivers of cards
1054  * below this bridge don't claim any IO range (typically ATI or
1055  * Adaptec).
1056  *
1057  * A more complete fix would be to use drivers/pci/setup-bus.c, which
1058  * involves a working pcibios_fixup_pbus_ranges(), some more care about
1059  * ordering when creating the host bus resources, and maybe a few more
1060  * minor tweaks
1061  */
1062 
1063 /* Initialize bridges with base/limit values we have collected */
1064 static void __init
1065 do_update_p2p_io_resource(struct pci_bus *bus, int enable_vga)
1066 {
1067 	struct pci_dev *bridge = bus->self;
1068 	struct pci_controller* hose = (struct pci_controller *)bridge->sysdata;
1069 	u32 l;
1070 	u16 w;
1071 	struct resource res;
1072 
1073 	if (bus->resource[0] == NULL)
1074 		return;
1075  	res = *(bus->resource[0]);
1076 
1077 	DBG("Remapping Bus %d, bridge: %s\n", bus->number, pci_name(bridge));
1078 	res.start -= ((unsigned long) hose->io_base_virt - isa_io_base);
1079 	res.end -= ((unsigned long) hose->io_base_virt - isa_io_base);
1080 	DBG("  IO window: %016llx-%016llx\n", res.start, res.end);
1081 
1082 	/* Set up the top and bottom of the PCI I/O segment for this bus. */
1083 	pci_read_config_dword(bridge, PCI_IO_BASE, &l);
1084 	l &= 0xffff000f;
1085 	l |= (res.start >> 8) & 0x00f0;
1086 	l |= res.end & 0xf000;
1087 	pci_write_config_dword(bridge, PCI_IO_BASE, l);
1088 
1089 	if ((l & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
1090 		l = (res.start >> 16) | (res.end & 0xffff0000);
1091 		pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, l);
1092 	}
1093 
1094 	pci_read_config_word(bridge, PCI_COMMAND, &w);
1095 	w |= PCI_COMMAND_IO;
1096 	pci_write_config_word(bridge, PCI_COMMAND, w);
1097 
1098 #if 0 /* Enabling this causes XFree 4.2.0 to hang during PCI probe */
1099 	if (enable_vga) {
1100 		pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &w);
1101 		w |= PCI_BRIDGE_CTL_VGA;
1102 		pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, w);
1103 	}
1104 #endif
1105 }
1106 
1107 /* This function is pretty basic and actually quite broken for the
1108  * general case, it's enough for us right now though. It's supposed
1109  * to tell us if we need to open an IO range at all or not and what
1110  * size.
1111  */
1112 static int __init
1113 check_for_io_childs(struct pci_bus *bus, struct resource* res, int *found_vga)
1114 {
1115 	struct pci_dev *dev;
1116 	int	i;
1117 	int	rc = 0;
1118 
1119 #define push_end(res, mask) do {		\
1120 	BUG_ON((mask+1) & mask);		\
1121 	res->end = (res->end + mask) | mask;	\
1122 } while (0)
1123 
1124 	list_for_each_entry(dev, &bus->devices, bus_list) {
1125 		u16 class = dev->class >> 8;
1126 
1127 		if (class == PCI_CLASS_DISPLAY_VGA ||
1128 		    class == PCI_CLASS_NOT_DEFINED_VGA)
1129 			*found_vga = 1;
1130 		if (class >> 8 == PCI_BASE_CLASS_BRIDGE && dev->subordinate)
1131 			rc |= check_for_io_childs(dev->subordinate, res, found_vga);
1132 		if (class == PCI_CLASS_BRIDGE_CARDBUS)
1133 			push_end(res, 0xfff);
1134 
1135 		for (i=0; i<PCI_NUM_RESOURCES; i++) {
1136 			struct resource *r;
1137 			unsigned long r_size;
1138 
1139 			if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI
1140 			    && i >= PCI_BRIDGE_RESOURCES)
1141 				continue;
1142 			r = &dev->resource[i];
1143 			r_size = r->end - r->start;
1144 			if (r_size < 0xfff)
1145 				r_size = 0xfff;
1146 			if (r->flags & IORESOURCE_IO && (r_size) != 0) {
1147 				rc = 1;
1148 				push_end(res, r_size);
1149 			}
1150 		}
1151 	}
1152 
1153 	return rc;
1154 }
1155 
1156 /* Here we scan all P2P bridges of a given level that have a closed
1157  * IO window. Note that the test for the presence of a VGA card should
1158  * be improved to take into account already configured P2P bridges,
1159  * currently, we don't see them and might end up configuring 2 bridges
1160  * with VGA pass through enabled
1161  */
1162 static void __init
1163 do_fixup_p2p_level(struct pci_bus *bus)
1164 {
1165 	struct pci_bus *b;
1166 	int i, parent_io;
1167 	int has_vga = 0;
1168 
1169 	for (parent_io=0; parent_io<4; parent_io++)
1170 		if (bus->resource[parent_io]
1171 		    && bus->resource[parent_io]->flags & IORESOURCE_IO)
1172 			break;
1173 	if (parent_io >= 4)
1174 		return;
1175 
1176 	list_for_each_entry(b, &bus->children, node) {
1177 		struct pci_dev *d = b->self;
1178 		struct pci_controller* hose = (struct pci_controller *)d->sysdata;
1179 		struct resource *res = b->resource[0];
1180 		struct resource tmp_res;
1181 		unsigned long max;
1182 		int found_vga = 0;
1183 
1184 		memset(&tmp_res, 0, sizeof(tmp_res));
1185 		tmp_res.start = bus->resource[parent_io]->start;
1186 
1187 		/* We don't let low addresses go through that closed P2P bridge, well,
1188 		 * that may not be necessary but I feel safer that way
1189 		 */
1190 		if (tmp_res.start == 0)
1191 			tmp_res.start = 0x1000;
1192 
1193 		if (!list_empty(&b->devices) && res && res->flags == 0 &&
1194 		    res != bus->resource[parent_io] &&
1195 		    (d->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
1196 		    check_for_io_childs(b, &tmp_res, &found_vga)) {
1197 			u8 io_base_lo;
1198 
1199 			printk(KERN_INFO "Fixing up IO bus %s\n", b->name);
1200 
1201 			if (found_vga) {
1202 				if (has_vga) {
1203 					printk(KERN_WARNING "Skipping VGA, already active"
1204 					    " on bus segment\n");
1205 					found_vga = 0;
1206 				} else
1207 					has_vga = 1;
1208 			}
1209 			pci_read_config_byte(d, PCI_IO_BASE, &io_base_lo);
1210 
1211 			if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32)
1212 				max = ((unsigned long) hose->io_base_virt
1213 					- isa_io_base) + 0xffffffff;
1214 			else
1215 				max = ((unsigned long) hose->io_base_virt
1216 					- isa_io_base) + 0xffff;
1217 
1218 			*res = tmp_res;
1219 			res->flags = IORESOURCE_IO;
1220 			res->name = b->name;
1221 
1222 			/* Find a resource in the parent where we can allocate */
1223 			for (i = 0 ; i < 4; i++) {
1224 				struct resource *r = bus->resource[i];
1225 				if (!r)
1226 					continue;
1227 				if ((r->flags & IORESOURCE_IO) == 0)
1228 					continue;
1229 				DBG("Trying to allocate from %016llx, size %016llx from parent"
1230 				    " res %d: %016llx -> %016llx\n",
1231 					res->start, res->end, i, r->start, r->end);
1232 
1233 				if (allocate_resource(r, res, res->end + 1, res->start, max,
1234 				    res->end + 1, NULL, NULL) < 0) {
1235 					DBG("Failed !\n");
1236 					continue;
1237 				}
1238 				do_update_p2p_io_resource(b, found_vga);
1239 				break;
1240 			}
1241 		}
1242 		do_fixup_p2p_level(b);
1243 	}
1244 }
1245 
1246 static void
1247 pcibios_fixup_p2p_bridges(void)
1248 {
1249 	struct pci_bus *b;
1250 
1251 	list_for_each_entry(b, &pci_root_buses, node)
1252 		do_fixup_p2p_level(b);
1253 }
1254 
1255 #endif /* CONFIG_PPC_PMAC */
1256 
1257 static int __init
1258 pcibios_init(void)
1259 {
1260 	struct pci_controller *hose;
1261 	struct pci_bus *bus;
1262 	int next_busno;
1263 
1264 	printk(KERN_INFO "PCI: Probing PCI hardware\n");
1265 
1266 	/* Scan all of the recorded PCI controllers.  */
1267 	for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
1268 		if (pci_assign_all_buses)
1269 			hose->first_busno = next_busno;
1270 		hose->last_busno = 0xff;
1271 		bus = pci_scan_bus(hose->first_busno, hose->ops, hose);
1272 		hose->last_busno = bus->subordinate;
1273 		if (pci_assign_all_buses || next_busno <= hose->last_busno)
1274 			next_busno = hose->last_busno + pcibios_assign_bus_offset;
1275 	}
1276 	pci_bus_count = next_busno;
1277 
1278 	/* OpenFirmware based machines need a map of OF bus
1279 	 * numbers vs. kernel bus numbers since we may have to
1280 	 * remap them.
1281 	 */
1282 	if (pci_assign_all_buses && have_of)
1283 		pcibios_make_OF_bus_map();
1284 
1285 	/* Do machine dependent PCI interrupt routing */
1286 	if (ppc_md.pci_swizzle && ppc_md.pci_map_irq)
1287 		pci_fixup_irqs(ppc_md.pci_swizzle, ppc_md.pci_map_irq);
1288 
1289 	/* Call machine dependent fixup */
1290 	if (ppc_md.pcibios_fixup)
1291 		ppc_md.pcibios_fixup();
1292 
1293 	/* Allocate and assign resources */
1294 	pcibios_allocate_bus_resources(&pci_root_buses);
1295 	pcibios_allocate_resources(0);
1296 	pcibios_allocate_resources(1);
1297 #ifdef CONFIG_PPC_PMAC
1298 	pcibios_fixup_p2p_bridges();
1299 #endif /* CONFIG_PPC_PMAC */
1300 	pcibios_assign_resources();
1301 
1302 	/* Call machine dependent post-init code */
1303 	if (ppc_md.pcibios_after_init)
1304 		ppc_md.pcibios_after_init();
1305 
1306 	return 0;
1307 }
1308 
1309 subsys_initcall(pcibios_init);
1310 
1311 unsigned char __init
1312 common_swizzle(struct pci_dev *dev, unsigned char *pinp)
1313 {
1314 	struct pci_controller *hose = dev->sysdata;
1315 
1316 	if (dev->bus->number != hose->first_busno) {
1317 		u8 pin = *pinp;
1318 		do {
1319 			pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
1320 			/* Move up the chain of bridges. */
1321 			dev = dev->bus->self;
1322 		} while (dev->bus->self);
1323 		*pinp = pin;
1324 
1325 		/* The slot is the idsel of the last bridge. */
1326 	}
1327 	return PCI_SLOT(dev->devfn);
1328 }
1329 
1330 unsigned long resource_fixup(struct pci_dev * dev, struct resource * res,
1331 			     unsigned long start, unsigned long size)
1332 {
1333 	return start;
1334 }
1335 
1336 void __init pcibios_fixup_bus(struct pci_bus *bus)
1337 {
1338 	struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
1339 	unsigned long io_offset;
1340 	struct resource *res;
1341 	int i;
1342 
1343 	io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
1344 	if (bus->parent == NULL) {
1345 		/* This is a host bridge - fill in its resources */
1346 		hose->bus = bus;
1347 
1348 		bus->resource[0] = res = &hose->io_resource;
1349 		if (!res->flags) {
1350 			if (io_offset)
1351 				printk(KERN_ERR "I/O resource not set for host"
1352 				       " bridge %d\n", hose->index);
1353 			res->start = 0;
1354 			res->end = IO_SPACE_LIMIT;
1355 			res->flags = IORESOURCE_IO;
1356 		}
1357 		res->start += io_offset;
1358 		res->end += io_offset;
1359 
1360 		for (i = 0; i < 3; ++i) {
1361 			res = &hose->mem_resources[i];
1362 			if (!res->flags) {
1363 				if (i > 0)
1364 					continue;
1365 				printk(KERN_ERR "Memory resource not set for "
1366 				       "host bridge %d\n", hose->index);
1367 				res->start = hose->pci_mem_offset;
1368 				res->end = ~0U;
1369 				res->flags = IORESOURCE_MEM;
1370 			}
1371 			bus->resource[i+1] = res;
1372 		}
1373 	} else {
1374 		/* This is a subordinate bridge */
1375 		pci_read_bridge_bases(bus);
1376 
1377 		for (i = 0; i < 4; ++i) {
1378 			if ((res = bus->resource[i]) == NULL)
1379 				continue;
1380 			if (!res->flags)
1381 				continue;
1382 			if (io_offset && (res->flags & IORESOURCE_IO)) {
1383 				res->start += io_offset;
1384 				res->end += io_offset;
1385 			} else if (hose->pci_mem_offset
1386 				   && (res->flags & IORESOURCE_MEM)) {
1387 				res->start += hose->pci_mem_offset;
1388 				res->end += hose->pci_mem_offset;
1389 			}
1390 		}
1391 	}
1392 
1393 	if (ppc_md.pcibios_fixup_bus)
1394 		ppc_md.pcibios_fixup_bus(bus);
1395 }
1396 
1397 char __init *pcibios_setup(char *str)
1398 {
1399 	return str;
1400 }
1401 
1402 /* the next one is stolen from the alpha port... */
1403 void __init
1404 pcibios_update_irq(struct pci_dev *dev, int irq)
1405 {
1406 	pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
1407 	/* XXX FIXME - update OF device tree node interrupt property */
1408 }
1409 
1410 #ifdef CONFIG_PPC_MERGE
1411 /* XXX This is a copy of the ppc64 version. This is temporary until we start
1412  * merging the 2 PCI layers
1413  */
1414 /*
1415  * Reads the interrupt pin to determine if interrupt is use by card.
1416  * If the interrupt is used, then gets the interrupt line from the
1417  * openfirmware and sets it in the pci_dev and pci_config line.
1418  */
1419 int pci_read_irq_line(struct pci_dev *pci_dev)
1420 {
1421 	struct of_irq oirq;
1422 	unsigned int virq;
1423 
1424 	DBG("Try to map irq for %s...\n", pci_name(pci_dev));
1425 
1426 	/* Try to get a mapping from the device-tree */
1427 	if (of_irq_map_pci(pci_dev, &oirq)) {
1428 		u8 line, pin;
1429 
1430 		/* If that fails, lets fallback to what is in the config
1431 		 * space and map that through the default controller. We
1432 		 * also set the type to level low since that's what PCI
1433 		 * interrupts are. If your platform does differently, then
1434 		 * either provide a proper interrupt tree or don't use this
1435 		 * function.
1436 		 */
1437 		if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
1438 			return -1;
1439 		if (pin == 0)
1440 			return -1;
1441 		if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
1442 		    line == 0xff) {
1443 			return -1;
1444 		}
1445 		DBG(" -> no map ! Using irq line %d from PCI config\n", line);
1446 
1447 		virq = irq_create_mapping(NULL, line);
1448 		if (virq != NO_IRQ)
1449 			set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
1450 	} else {
1451 		DBG(" -> got one, spec %d cells (0x%08x...) on %s\n",
1452 		    oirq.size, oirq.specifier[0], oirq.controller->full_name);
1453 
1454 		virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
1455 					     oirq.size);
1456 	}
1457 	if(virq == NO_IRQ) {
1458 		DBG(" -> failed to map !\n");
1459 		return -1;
1460 	}
1461 	pci_dev->irq = virq;
1462 	pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, virq);
1463 
1464 	return 0;
1465 }
1466 EXPORT_SYMBOL(pci_read_irq_line);
1467 #endif /* CONFIG_PPC_MERGE */
1468 
1469 int pcibios_enable_device(struct pci_dev *dev, int mask)
1470 {
1471 	u16 cmd, old_cmd;
1472 	int idx;
1473 	struct resource *r;
1474 
1475 	if (ppc_md.pcibios_enable_device_hook)
1476 		if (ppc_md.pcibios_enable_device_hook(dev, 0))
1477 			return -EINVAL;
1478 
1479 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
1480 	old_cmd = cmd;
1481 	for (idx=0; idx<6; idx++) {
1482 		r = &dev->resource[idx];
1483 		if (r->flags & IORESOURCE_UNSET) {
1484 			printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
1485 			return -EINVAL;
1486 		}
1487 		if (r->flags & IORESOURCE_IO)
1488 			cmd |= PCI_COMMAND_IO;
1489 		if (r->flags & IORESOURCE_MEM)
1490 			cmd |= PCI_COMMAND_MEMORY;
1491 	}
1492 	if (cmd != old_cmd) {
1493 		printk("PCI: Enabling device %s (%04x -> %04x)\n",
1494 		       pci_name(dev), old_cmd, cmd);
1495 		pci_write_config_word(dev, PCI_COMMAND, cmd);
1496 	}
1497 	return 0;
1498 }
1499 
1500 struct pci_controller*
1501 pci_bus_to_hose(int bus)
1502 {
1503 	struct pci_controller* hose = hose_head;
1504 
1505 	for (; hose; hose = hose->next)
1506 		if (bus >= hose->first_busno && bus <= hose->last_busno)
1507 			return hose;
1508 	return NULL;
1509 }
1510 
1511 void __iomem *
1512 pci_bus_io_base(unsigned int bus)
1513 {
1514 	struct pci_controller *hose;
1515 
1516 	hose = pci_bus_to_hose(bus);
1517 	if (!hose)
1518 		return NULL;
1519 	return hose->io_base_virt;
1520 }
1521 
1522 unsigned long
1523 pci_bus_io_base_phys(unsigned int bus)
1524 {
1525 	struct pci_controller *hose;
1526 
1527 	hose = pci_bus_to_hose(bus);
1528 	if (!hose)
1529 		return 0;
1530 	return hose->io_base_phys;
1531 }
1532 
1533 unsigned long
1534 pci_bus_mem_base_phys(unsigned int bus)
1535 {
1536 	struct pci_controller *hose;
1537 
1538 	hose = pci_bus_to_hose(bus);
1539 	if (!hose)
1540 		return 0;
1541 	return hose->pci_mem_offset;
1542 }
1543 
1544 unsigned long
1545 pci_resource_to_bus(struct pci_dev *pdev, struct resource *res)
1546 {
1547 	/* Hack alert again ! See comments in chrp_pci.c
1548 	 */
1549 	struct pci_controller* hose =
1550 		(struct pci_controller *)pdev->sysdata;
1551 	if (hose && res->flags & IORESOURCE_MEM)
1552 		return res->start - hose->pci_mem_offset;
1553 	/* We may want to do something with IOs here... */
1554 	return res->start;
1555 }
1556 
1557 
1558 static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
1559 					       unsigned long *offset,
1560 					       enum pci_mmap_state mmap_state)
1561 {
1562 	struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
1563 	unsigned long io_offset = 0;
1564 	int i, res_bit;
1565 
1566 	if (hose == 0)
1567 		return NULL;		/* should never happen */
1568 
1569 	/* If memory, add on the PCI bridge address offset */
1570 	if (mmap_state == pci_mmap_mem) {
1571 		*offset += hose->pci_mem_offset;
1572 		res_bit = IORESOURCE_MEM;
1573 	} else {
1574 		io_offset = hose->io_base_virt - ___IO_BASE;
1575 		*offset += io_offset;
1576 		res_bit = IORESOURCE_IO;
1577 	}
1578 
1579 	/*
1580 	 * Check that the offset requested corresponds to one of the
1581 	 * resources of the device.
1582 	 */
1583 	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
1584 		struct resource *rp = &dev->resource[i];
1585 		int flags = rp->flags;
1586 
1587 		/* treat ROM as memory (should be already) */
1588 		if (i == PCI_ROM_RESOURCE)
1589 			flags |= IORESOURCE_MEM;
1590 
1591 		/* Active and same type? */
1592 		if ((flags & res_bit) == 0)
1593 			continue;
1594 
1595 		/* In the range of this resource? */
1596 		if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
1597 			continue;
1598 
1599 		/* found it! construct the final physical address */
1600 		if (mmap_state == pci_mmap_io)
1601 			*offset += hose->io_base_phys - io_offset;
1602 		return rp;
1603 	}
1604 
1605 	return NULL;
1606 }
1607 
1608 /*
1609  * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
1610  * device mapping.
1611  */
1612 static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
1613 				      pgprot_t protection,
1614 				      enum pci_mmap_state mmap_state,
1615 				      int write_combine)
1616 {
1617 	unsigned long prot = pgprot_val(protection);
1618 
1619 	/* Write combine is always 0 on non-memory space mappings. On
1620 	 * memory space, if the user didn't pass 1, we check for a
1621 	 * "prefetchable" resource. This is a bit hackish, but we use
1622 	 * this to workaround the inability of /sysfs to provide a write
1623 	 * combine bit
1624 	 */
1625 	if (mmap_state != pci_mmap_mem)
1626 		write_combine = 0;
1627 	else if (write_combine == 0) {
1628 		if (rp->flags & IORESOURCE_PREFETCH)
1629 			write_combine = 1;
1630 	}
1631 
1632 	/* XXX would be nice to have a way to ask for write-through */
1633 	prot |= _PAGE_NO_CACHE;
1634 	if (write_combine)
1635 		prot &= ~_PAGE_GUARDED;
1636 	else
1637 		prot |= _PAGE_GUARDED;
1638 
1639 	printk("PCI map for %s:%llx, prot: %lx\n", pci_name(dev),
1640 		(unsigned long long)rp->start, prot);
1641 
1642 	return __pgprot(prot);
1643 }
1644 
1645 /*
1646  * This one is used by /dev/mem and fbdev who have no clue about the
1647  * PCI device, it tries to find the PCI device first and calls the
1648  * above routine
1649  */
1650 pgprot_t pci_phys_mem_access_prot(struct file *file,
1651 				  unsigned long pfn,
1652 				  unsigned long size,
1653 				  pgprot_t protection)
1654 {
1655 	struct pci_dev *pdev = NULL;
1656 	struct resource *found = NULL;
1657 	unsigned long prot = pgprot_val(protection);
1658 	unsigned long offset = pfn << PAGE_SHIFT;
1659 	int i;
1660 
1661 	if (page_is_ram(pfn))
1662 		return prot;
1663 
1664 	prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
1665 
1666 	for_each_pci_dev(pdev) {
1667 		for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
1668 			struct resource *rp = &pdev->resource[i];
1669 			int flags = rp->flags;
1670 
1671 			/* Active and same type? */
1672 			if ((flags & IORESOURCE_MEM) == 0)
1673 				continue;
1674 			/* In the range of this resource? */
1675 			if (offset < (rp->start & PAGE_MASK) ||
1676 			    offset > rp->end)
1677 				continue;
1678 			found = rp;
1679 			break;
1680 		}
1681 		if (found)
1682 			break;
1683 	}
1684 	if (found) {
1685 		if (found->flags & IORESOURCE_PREFETCH)
1686 			prot &= ~_PAGE_GUARDED;
1687 		pci_dev_put(pdev);
1688 	}
1689 
1690 	DBG("non-PCI map for %lx, prot: %lx\n", offset, prot);
1691 
1692 	return __pgprot(prot);
1693 }
1694 
1695 
1696 /*
1697  * Perform the actual remap of the pages for a PCI device mapping, as
1698  * appropriate for this architecture.  The region in the process to map
1699  * is described by vm_start and vm_end members of VMA, the base physical
1700  * address is found in vm_pgoff.
1701  * The pci device structure is provided so that architectures may make mapping
1702  * decisions on a per-device or per-bus basis.
1703  *
1704  * Returns a negative error code on failure, zero on success.
1705  */
1706 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
1707 			enum pci_mmap_state mmap_state,
1708 			int write_combine)
1709 {
1710 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
1711 	struct resource *rp;
1712 	int ret;
1713 
1714 	rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
1715 	if (rp == NULL)
1716 		return -EINVAL;
1717 
1718 	vma->vm_pgoff = offset >> PAGE_SHIFT;
1719 	vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
1720 						  vma->vm_page_prot,
1721 						  mmap_state, write_combine);
1722 
1723 	ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1724 			       vma->vm_end - vma->vm_start, vma->vm_page_prot);
1725 
1726 	return ret;
1727 }
1728 
1729 /* Obsolete functions. Should be removed once the symbios driver
1730  * is fixed
1731  */
1732 unsigned long
1733 phys_to_bus(unsigned long pa)
1734 {
1735 	struct pci_controller *hose;
1736 	int i;
1737 
1738 	for (hose = hose_head; hose; hose = hose->next) {
1739 		for (i = 0; i < 3; ++i) {
1740 			if (pa >= hose->mem_resources[i].start
1741 			    && pa <= hose->mem_resources[i].end) {
1742 				/*
1743 				 * XXX the hose->pci_mem_offset really
1744 				 * only applies to mem_resources[0].
1745 				 * We need a way to store an offset for
1746 				 * the others.  -- paulus
1747 				 */
1748 				if (i == 0)
1749 					pa -= hose->pci_mem_offset;
1750 				return pa;
1751 			}
1752 		}
1753 	}
1754 	/* hmmm, didn't find it */
1755 	return 0;
1756 }
1757 
1758 unsigned long
1759 pci_phys_to_bus(unsigned long pa, int busnr)
1760 {
1761 	struct pci_controller* hose = pci_bus_to_hose(busnr);
1762 	if (!hose)
1763 		return pa;
1764 	return pa - hose->pci_mem_offset;
1765 }
1766 
1767 unsigned long
1768 pci_bus_to_phys(unsigned int ba, int busnr)
1769 {
1770 	struct pci_controller* hose = pci_bus_to_hose(busnr);
1771 	if (!hose)
1772 		return ba;
1773 	return ba + hose->pci_mem_offset;
1774 }
1775 
1776 /* Provide information on locations of various I/O regions in physical
1777  * memory.  Do this on a per-card basis so that we choose the right
1778  * root bridge.
1779  * Note that the returned IO or memory base is a physical address
1780  */
1781 
1782 long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
1783 {
1784 	struct pci_controller* hose;
1785 	long result = -EOPNOTSUPP;
1786 
1787 	/* Argh ! Please forgive me for that hack, but that's the
1788 	 * simplest way to get existing XFree to not lockup on some
1789 	 * G5 machines... So when something asks for bus 0 io base
1790 	 * (bus 0 is HT root), we return the AGP one instead.
1791 	 */
1792 #ifdef CONFIG_PPC_PMAC
1793 	if (machine_is(powermac) && machine_is_compatible("MacRISC4"))
1794 		if (bus == 0)
1795 			bus = 0xf0;
1796 #endif /* CONFIG_PPC_PMAC */
1797 
1798 	hose = pci_bus_to_hose(bus);
1799 	if (!hose)
1800 		return -ENODEV;
1801 
1802 	switch (which) {
1803 	case IOBASE_BRIDGE_NUMBER:
1804 		return (long)hose->first_busno;
1805 	case IOBASE_MEMORY:
1806 		return (long)hose->pci_mem_offset;
1807 	case IOBASE_IO:
1808 		return (long)hose->io_base_phys;
1809 	case IOBASE_ISA_IO:
1810 		return (long)isa_io_base;
1811 	case IOBASE_ISA_MEM:
1812 		return (long)isa_mem_base;
1813 	}
1814 
1815 	return result;
1816 }
1817 
1818 void pci_resource_to_user(const struct pci_dev *dev, int bar,
1819 			  const struct resource *rsrc,
1820 			  resource_size_t *start, resource_size_t *end)
1821 {
1822 	struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
1823 	unsigned long offset = 0;
1824 
1825 	if (hose == NULL)
1826 		return;
1827 
1828 	if (rsrc->flags & IORESOURCE_IO)
1829 		offset = ___IO_BASE - hose->io_base_virt + hose->io_base_phys;
1830 
1831 	*start = rsrc->start + offset;
1832 	*end = rsrc->end + offset;
1833 }
1834 
1835 void __init
1836 pci_init_resource(struct resource *res, unsigned long start, unsigned long end,
1837 		  int flags, char *name)
1838 {
1839 	res->start = start;
1840 	res->end = end;
1841 	res->flags = flags;
1842 	res->name = name;
1843 	res->parent = NULL;
1844 	res->sibling = NULL;
1845 	res->child = NULL;
1846 }
1847 
1848 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
1849 {
1850 	unsigned long start = pci_resource_start(dev, bar);
1851 	unsigned long len = pci_resource_len(dev, bar);
1852 	unsigned long flags = pci_resource_flags(dev, bar);
1853 
1854 	if (!len)
1855 		return NULL;
1856 	if (max && len > max)
1857 		len = max;
1858 	if (flags & IORESOURCE_IO)
1859 		return ioport_map(start, len);
1860 	if (flags & IORESOURCE_MEM)
1861 		/* Not checking IORESOURCE_CACHEABLE because PPC does
1862 		 * not currently distinguish between ioremap and
1863 		 * ioremap_nocache.
1864 		 */
1865 		return ioremap(start, len);
1866 	/* What? */
1867 	return NULL;
1868 }
1869 
1870 void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
1871 {
1872 	/* Nothing to do */
1873 }
1874 EXPORT_SYMBOL(pci_iomap);
1875 EXPORT_SYMBOL(pci_iounmap);
1876 
1877 unsigned long pci_address_to_pio(phys_addr_t address)
1878 {
1879 	struct pci_controller* hose = hose_head;
1880 
1881 	for (; hose; hose = hose->next) {
1882 		unsigned int size = hose->io_resource.end -
1883 			hose->io_resource.start + 1;
1884 		if (address >= hose->io_base_phys &&
1885 		    address < (hose->io_base_phys + size)) {
1886 			unsigned long base =
1887 				(unsigned long)hose->io_base_virt - _IO_BASE;
1888 			return base + (address - hose->io_base_phys);
1889 		}
1890 	}
1891 	return (unsigned int)-1;
1892 }
1893 EXPORT_SYMBOL(pci_address_to_pio);
1894 
1895 /*
1896  * Null PCI config access functions, for the case when we can't
1897  * find a hose.
1898  */
1899 #define NULL_PCI_OP(rw, size, type)					\
1900 static int								\
1901 null_##rw##_config_##size(struct pci_dev *dev, int offset, type val)	\
1902 {									\
1903 	return PCIBIOS_DEVICE_NOT_FOUND;    				\
1904 }
1905 
1906 static int
1907 null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1908 		 int len, u32 *val)
1909 {
1910 	return PCIBIOS_DEVICE_NOT_FOUND;
1911 }
1912 
1913 static int
1914 null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1915 		  int len, u32 val)
1916 {
1917 	return PCIBIOS_DEVICE_NOT_FOUND;
1918 }
1919 
1920 static struct pci_ops null_pci_ops =
1921 {
1922 	null_read_config,
1923 	null_write_config
1924 };
1925 
1926 /*
1927  * These functions are used early on before PCI scanning is done
1928  * and all of the pci_dev and pci_bus structures have been created.
1929  */
1930 static struct pci_bus *
1931 fake_pci_bus(struct pci_controller *hose, int busnr)
1932 {
1933 	static struct pci_bus bus;
1934 
1935 	if (hose == 0) {
1936 		hose = pci_bus_to_hose(busnr);
1937 		if (hose == 0)
1938 			printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1939 	}
1940 	bus.number = busnr;
1941 	bus.sysdata = hose;
1942 	bus.ops = hose? hose->ops: &null_pci_ops;
1943 	return &bus;
1944 }
1945 
1946 #define EARLY_PCI_OP(rw, size, type)					\
1947 int early_##rw##_config_##size(struct pci_controller *hose, int bus,	\
1948 			       int devfn, int offset, type value)	\
1949 {									\
1950 	return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus),	\
1951 					    devfn, offset, value);	\
1952 }
1953 
1954 EARLY_PCI_OP(read, byte, u8 *)
1955 EARLY_PCI_OP(read, word, u16 *)
1956 EARLY_PCI_OP(read, dword, u32 *)
1957 EARLY_PCI_OP(write, byte, u8)
1958 EARLY_PCI_OP(write, word, u16)
1959 EARLY_PCI_OP(write, dword, u32)
1960