xref: /linux/drivers/pci/pci.c (revision 13abf8130139c2ccd4962a7e5a8902be5e6cb5a7)
1 /*
2  *	$Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $
3  *
4  *	PCI Bus Services, see include/linux/pci.h for further explanation.
5  *
6  *	Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
7  *	David Mosberger-Tang
8  *
9  *	Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <asm/dma.h>	/* isa_dma_bridge_buggy */
19 #include "pci.h"
20 
21 
22 /**
23  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
24  * @bus: pointer to PCI bus structure to search
25  *
26  * Given a PCI bus, returns the highest PCI bus number present in the set
27  * including the given PCI bus and its list of child PCI buses.
28  */
29 unsigned char __devinit
30 pci_bus_max_busnr(struct pci_bus* bus)
31 {
32 	struct list_head *tmp;
33 	unsigned char max, n;
34 
35 	max = bus->number;
36 	list_for_each(tmp, &bus->children) {
37 		n = pci_bus_max_busnr(pci_bus_b(tmp));
38 		if(n > max)
39 			max = n;
40 	}
41 	return max;
42 }
43 
44 /**
45  * pci_max_busnr - returns maximum PCI bus number
46  *
47  * Returns the highest PCI bus number present in the system global list of
48  * PCI buses.
49  */
50 unsigned char __devinit
51 pci_max_busnr(void)
52 {
53 	struct pci_bus *bus = NULL;
54 	unsigned char max, n;
55 
56 	max = 0;
57 	while ((bus = pci_find_next_bus(bus)) != NULL) {
58 		n = pci_bus_max_busnr(bus);
59 		if(n > max)
60 			max = n;
61 	}
62 	return max;
63 }
64 
65 static int __pci_bus_find_cap(struct pci_bus *bus, unsigned int devfn, u8 hdr_type, int cap)
66 {
67 	u16 status;
68 	u8 pos, id;
69 	int ttl = 48;
70 
71 	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
72 	if (!(status & PCI_STATUS_CAP_LIST))
73 		return 0;
74 
75 	switch (hdr_type) {
76 	case PCI_HEADER_TYPE_NORMAL:
77 	case PCI_HEADER_TYPE_BRIDGE:
78 		pci_bus_read_config_byte(bus, devfn, PCI_CAPABILITY_LIST, &pos);
79 		break;
80 	case PCI_HEADER_TYPE_CARDBUS:
81 		pci_bus_read_config_byte(bus, devfn, PCI_CB_CAPABILITY_LIST, &pos);
82 		break;
83 	default:
84 		return 0;
85 	}
86 	while (ttl-- && pos >= 0x40) {
87 		pos &= ~3;
88 		pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, &id);
89 		if (id == 0xff)
90 			break;
91 		if (id == cap)
92 			return pos;
93 		pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_NEXT, &pos);
94 	}
95 	return 0;
96 }
97 
98 /**
99  * pci_find_capability - query for devices' capabilities
100  * @dev: PCI device to query
101  * @cap: capability code
102  *
103  * Tell if a device supports a given PCI capability.
104  * Returns the address of the requested capability structure within the
105  * device's PCI configuration space or 0 in case the device does not
106  * support it.  Possible values for @cap:
107  *
108  *  %PCI_CAP_ID_PM           Power Management
109  *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
110  *  %PCI_CAP_ID_VPD          Vital Product Data
111  *  %PCI_CAP_ID_SLOTID       Slot Identification
112  *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
113  *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
114  *  %PCI_CAP_ID_PCIX         PCI-X
115  *  %PCI_CAP_ID_EXP          PCI Express
116  */
117 int pci_find_capability(struct pci_dev *dev, int cap)
118 {
119 	return __pci_bus_find_cap(dev->bus, dev->devfn, dev->hdr_type, cap);
120 }
121 
122 /**
123  * pci_bus_find_capability - query for devices' capabilities
124  * @bus:   the PCI bus to query
125  * @devfn: PCI device to query
126  * @cap:   capability code
127  *
128  * Like pci_find_capability() but works for pci devices that do not have a
129  * pci_dev structure set up yet.
130  *
131  * Returns the address of the requested capability structure within the
132  * device's PCI configuration space or 0 in case the device does not
133  * support it.
134  */
135 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
136 {
137 	u8 hdr_type;
138 
139 	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
140 
141 	return __pci_bus_find_cap(bus, devfn, hdr_type & 0x7f, cap);
142 }
143 
144 /**
145  * pci_find_ext_capability - Find an extended capability
146  * @dev: PCI device to query
147  * @cap: capability code
148  *
149  * Returns the address of the requested extended capability structure
150  * within the device's PCI configuration space or 0 if the device does
151  * not support it.  Possible values for @cap:
152  *
153  *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
154  *  %PCI_EXT_CAP_ID_VC		Virtual Channel
155  *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
156  *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
157  */
158 int pci_find_ext_capability(struct pci_dev *dev, int cap)
159 {
160 	u32 header;
161 	int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */
162 	int pos = 0x100;
163 
164 	if (dev->cfg_size <= 256)
165 		return 0;
166 
167 	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
168 		return 0;
169 
170 	/*
171 	 * If we have no capabilities, this is indicated by cap ID,
172 	 * cap version and next pointer all being 0.
173 	 */
174 	if (header == 0)
175 		return 0;
176 
177 	while (ttl-- > 0) {
178 		if (PCI_EXT_CAP_ID(header) == cap)
179 			return pos;
180 
181 		pos = PCI_EXT_CAP_NEXT(header);
182 		if (pos < 0x100)
183 			break;
184 
185 		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
186 			break;
187 	}
188 
189 	return 0;
190 }
191 
192 /**
193  * pci_find_parent_resource - return resource region of parent bus of given region
194  * @dev: PCI device structure contains resources to be searched
195  * @res: child resource record for which parent is sought
196  *
197  *  For given resource region of given device, return the resource
198  *  region of parent bus the given region is contained in or where
199  *  it should be allocated from.
200  */
201 struct resource *
202 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
203 {
204 	const struct pci_bus *bus = dev->bus;
205 	int i;
206 	struct resource *best = NULL;
207 
208 	for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
209 		struct resource *r = bus->resource[i];
210 		if (!r)
211 			continue;
212 		if (res->start && !(res->start >= r->start && res->end <= r->end))
213 			continue;	/* Not contained */
214 		if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
215 			continue;	/* Wrong type */
216 		if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
217 			return r;	/* Exact match */
218 		if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH))
219 			best = r;	/* Approximating prefetchable by non-prefetchable */
220 	}
221 	return best;
222 }
223 
224 /**
225  * pci_set_power_state - Set the power state of a PCI device
226  * @dev: PCI device to be suspended
227  * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering
228  *
229  * Transition a device to a new power state, using the Power Management
230  * Capabilities in the device's config space.
231  *
232  * RETURN VALUE:
233  * -EINVAL if trying to enter a lower state than we're already in.
234  * 0 if we're already in the requested state.
235  * -EIO if device does not support PCI PM.
236  * 0 if we can successfully change the power state.
237  */
238 int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t t);
239 int
240 pci_set_power_state(struct pci_dev *dev, pci_power_t state)
241 {
242 	int pm;
243 	u16 pmcsr, pmc;
244 
245 	/* bound the state we're entering */
246 	if (state > PCI_D3hot)
247 		state = PCI_D3hot;
248 
249 	/* Validate current state:
250 	 * Can enter D0 from any state, but if we can only go deeper
251 	 * to sleep if we're already in a low power state
252 	 */
253 	if (state != PCI_D0 && dev->current_state > state)
254 		return -EINVAL;
255 	else if (dev->current_state == state)
256 		return 0;        /* we're already there */
257 
258 	/* find PCI PM capability in list */
259 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
260 
261 	/* abort if the device doesn't support PM capabilities */
262 	if (!pm)
263 		return -EIO;
264 
265 	pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc);
266 	if ((pmc & PCI_PM_CAP_VER_MASK) > 2) {
267 		printk(KERN_DEBUG
268 		       "PCI: %s has unsupported PM cap regs version (%u)\n",
269 		       pci_name(dev), pmc & PCI_PM_CAP_VER_MASK);
270 		return -EIO;
271 	}
272 
273 	/* check if this device supports the desired state */
274 	if (state == PCI_D1 || state == PCI_D2) {
275 		if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1))
276 			return -EIO;
277 		else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2))
278 			return -EIO;
279 	}
280 
281 	/* If we're in D3, force entire word to 0.
282 	 * This doesn't affect PME_Status, disables PME_En, and
283 	 * sets PowerState to 0.
284 	 */
285 	if (dev->current_state >= PCI_D3hot)
286 		pmcsr = 0;
287 	else {
288 		pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
289 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
290 		pmcsr |= state;
291 	}
292 
293 	/* enter specified state */
294 	pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr);
295 
296 	/* Mandatory power management transition delays */
297 	/* see PCI PM 1.1 5.6.1 table 18 */
298 	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
299 		msleep(10);
300 	else if (state == PCI_D2 || dev->current_state == PCI_D2)
301 		udelay(200);
302 
303 	/*
304 	 * Give firmware a chance to be called, such as ACPI _PRx, _PSx
305 	 * Firmware method after natice method ?
306 	 */
307 	if (platform_pci_set_power_state)
308 		platform_pci_set_power_state(dev, state);
309 
310 	dev->current_state = state;
311 	return 0;
312 }
313 
314 int (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state);
315 
316 /**
317  * pci_choose_state - Choose the power state of a PCI device
318  * @dev: PCI device to be suspended
319  * @state: target sleep state for the whole system. This is the value
320  *	that is passed to suspend() function.
321  *
322  * Returns PCI power state suitable for given device and given system
323  * message.
324  */
325 
326 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
327 {
328 	int ret;
329 
330 	if (!pci_find_capability(dev, PCI_CAP_ID_PM))
331 		return PCI_D0;
332 
333 	if (platform_pci_choose_state) {
334 		ret = platform_pci_choose_state(dev, state);
335 		if (ret >= 0)
336 			state.event = ret;
337 	}
338 
339 	switch (state.event) {
340 	case PM_EVENT_ON:
341 		return PCI_D0;
342 	case PM_EVENT_FREEZE:
343 	case PM_EVENT_SUSPEND:
344 		return PCI_D3hot;
345 	default:
346 		printk("They asked me for state %d\n", state.event);
347 		BUG();
348 	}
349 	return PCI_D0;
350 }
351 
352 EXPORT_SYMBOL(pci_choose_state);
353 
354 /**
355  * pci_save_state - save the PCI configuration space of a device before suspending
356  * @dev: - PCI device that we're dealing with
357  */
358 int
359 pci_save_state(struct pci_dev *dev)
360 {
361 	int i;
362 	/* XXX: 100% dword access ok here? */
363 	for (i = 0; i < 16; i++)
364 		pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
365 	return 0;
366 }
367 
368 /**
369  * pci_restore_state - Restore the saved state of a PCI device
370  * @dev: - PCI device that we're dealing with
371  */
372 int
373 pci_restore_state(struct pci_dev *dev)
374 {
375 	int i;
376 
377 	for (i = 0; i < 16; i++)
378 		pci_write_config_dword(dev,i * 4, dev->saved_config_space[i]);
379 	return 0;
380 }
381 
382 /**
383  * pci_enable_device_bars - Initialize some of a device for use
384  * @dev: PCI device to be initialized
385  * @bars: bitmask of BAR's that must be configured
386  *
387  *  Initialize device before it's used by a driver. Ask low-level code
388  *  to enable selected I/O and memory resources. Wake up the device if it
389  *  was suspended. Beware, this function can fail.
390  */
391 
392 int
393 pci_enable_device_bars(struct pci_dev *dev, int bars)
394 {
395 	int err;
396 
397 	pci_set_power_state(dev, PCI_D0);
398 	if ((err = pcibios_enable_device(dev, bars)) < 0)
399 		return err;
400 	return 0;
401 }
402 
403 /**
404  * pci_enable_device - Initialize device before it's used by a driver.
405  * @dev: PCI device to be initialized
406  *
407  *  Initialize device before it's used by a driver. Ask low-level code
408  *  to enable I/O and memory. Wake up the device if it was suspended.
409  *  Beware, this function can fail.
410  */
411 int
412 pci_enable_device(struct pci_dev *dev)
413 {
414 	int err;
415 
416 	if ((err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1)))
417 		return err;
418 	pci_fixup_device(pci_fixup_enable, dev);
419 	dev->is_enabled = 1;
420 	return 0;
421 }
422 
423 /**
424  * pcibios_disable_device - disable arch specific PCI resources for device dev
425  * @dev: the PCI device to disable
426  *
427  * Disables architecture specific PCI resources for the device. This
428  * is the default implementation. Architecture implementations can
429  * override this.
430  */
431 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
432 
433 /**
434  * pci_disable_device - Disable PCI device after use
435  * @dev: PCI device to be disabled
436  *
437  * Signal to the system that the PCI device is not in use by the system
438  * anymore.  This only involves disabling PCI bus-mastering, if active.
439  */
440 void
441 pci_disable_device(struct pci_dev *dev)
442 {
443 	u16 pci_command;
444 
445 	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
446 	if (pci_command & PCI_COMMAND_MASTER) {
447 		pci_command &= ~PCI_COMMAND_MASTER;
448 		pci_write_config_word(dev, PCI_COMMAND, pci_command);
449 	}
450 	dev->is_busmaster = 0;
451 
452 	pcibios_disable_device(dev);
453 	dev->is_enabled = 0;
454 }
455 
456 /**
457  * pci_enable_wake - enable device to generate PME# when suspended
458  * @dev: - PCI device to operate on
459  * @state: - Current state of device.
460  * @enable: - Flag to enable or disable generation
461  *
462  * Set the bits in the device's PM Capabilities to generate PME# when
463  * the system is suspended.
464  *
465  * -EIO is returned if device doesn't have PM Capabilities.
466  * -EINVAL is returned if device supports it, but can't generate wake events.
467  * 0 if operation is successful.
468  *
469  */
470 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
471 {
472 	int pm;
473 	u16 value;
474 
475 	/* find PCI PM capability in list */
476 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
477 
478 	/* If device doesn't support PM Capabilities, but request is to disable
479 	 * wake events, it's a nop; otherwise fail */
480 	if (!pm)
481 		return enable ? -EIO : 0;
482 
483 	/* Check device's ability to generate PME# */
484 	pci_read_config_word(dev,pm+PCI_PM_PMC,&value);
485 
486 	value &= PCI_PM_CAP_PME_MASK;
487 	value >>= ffs(PCI_PM_CAP_PME_MASK) - 1;   /* First bit of mask */
488 
489 	/* Check if it can generate PME# from requested state. */
490 	if (!value || !(value & (1 << state)))
491 		return enable ? -EINVAL : 0;
492 
493 	pci_read_config_word(dev, pm + PCI_PM_CTRL, &value);
494 
495 	/* Clear PME_Status by writing 1 to it and enable PME# */
496 	value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
497 
498 	if (!enable)
499 		value &= ~PCI_PM_CTRL_PME_ENABLE;
500 
501 	pci_write_config_word(dev, pm + PCI_PM_CTRL, value);
502 
503 	return 0;
504 }
505 
506 int
507 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
508 {
509 	u8 pin;
510 
511 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
512 	if (!pin)
513 		return -1;
514 	pin--;
515 	while (dev->bus->self) {
516 		pin = (pin + PCI_SLOT(dev->devfn)) % 4;
517 		dev = dev->bus->self;
518 	}
519 	*bridge = dev;
520 	return pin;
521 }
522 
523 /**
524  *	pci_release_region - Release a PCI bar
525  *	@pdev: PCI device whose resources were previously reserved by pci_request_region
526  *	@bar: BAR to release
527  *
528  *	Releases the PCI I/O and memory resources previously reserved by a
529  *	successful call to pci_request_region.  Call this function only
530  *	after all use of the PCI regions has ceased.
531  */
532 void pci_release_region(struct pci_dev *pdev, int bar)
533 {
534 	if (pci_resource_len(pdev, bar) == 0)
535 		return;
536 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
537 		release_region(pci_resource_start(pdev, bar),
538 				pci_resource_len(pdev, bar));
539 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
540 		release_mem_region(pci_resource_start(pdev, bar),
541 				pci_resource_len(pdev, bar));
542 }
543 
544 /**
545  *	pci_request_region - Reserved PCI I/O and memory resource
546  *	@pdev: PCI device whose resources are to be reserved
547  *	@bar: BAR to be reserved
548  *	@res_name: Name to be associated with resource.
549  *
550  *	Mark the PCI region associated with PCI device @pdev BR @bar as
551  *	being reserved by owner @res_name.  Do not access any
552  *	address inside the PCI regions unless this call returns
553  *	successfully.
554  *
555  *	Returns 0 on success, or %EBUSY on error.  A warning
556  *	message is also printed on failure.
557  */
558 int pci_request_region(struct pci_dev *pdev, int bar, char *res_name)
559 {
560 	if (pci_resource_len(pdev, bar) == 0)
561 		return 0;
562 
563 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
564 		if (!request_region(pci_resource_start(pdev, bar),
565 			    pci_resource_len(pdev, bar), res_name))
566 			goto err_out;
567 	}
568 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
569 		if (!request_mem_region(pci_resource_start(pdev, bar),
570 				        pci_resource_len(pdev, bar), res_name))
571 			goto err_out;
572 	}
573 
574 	return 0;
575 
576 err_out:
577 	printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%lx@%lx for device %s\n",
578 		pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
579 		bar + 1, /* PCI BAR # */
580 		pci_resource_len(pdev, bar), pci_resource_start(pdev, bar),
581 		pci_name(pdev));
582 	return -EBUSY;
583 }
584 
585 
586 /**
587  *	pci_release_regions - Release reserved PCI I/O and memory resources
588  *	@pdev: PCI device whose resources were previously reserved by pci_request_regions
589  *
590  *	Releases all PCI I/O and memory resources previously reserved by a
591  *	successful call to pci_request_regions.  Call this function only
592  *	after all use of the PCI regions has ceased.
593  */
594 
595 void pci_release_regions(struct pci_dev *pdev)
596 {
597 	int i;
598 
599 	for (i = 0; i < 6; i++)
600 		pci_release_region(pdev, i);
601 }
602 
603 /**
604  *	pci_request_regions - Reserved PCI I/O and memory resources
605  *	@pdev: PCI device whose resources are to be reserved
606  *	@res_name: Name to be associated with resource.
607  *
608  *	Mark all PCI regions associated with PCI device @pdev as
609  *	being reserved by owner @res_name.  Do not access any
610  *	address inside the PCI regions unless this call returns
611  *	successfully.
612  *
613  *	Returns 0 on success, or %EBUSY on error.  A warning
614  *	message is also printed on failure.
615  */
616 int pci_request_regions(struct pci_dev *pdev, char *res_name)
617 {
618 	int i;
619 
620 	for (i = 0; i < 6; i++)
621 		if(pci_request_region(pdev, i, res_name))
622 			goto err_out;
623 	return 0;
624 
625 err_out:
626 	while(--i >= 0)
627 		pci_release_region(pdev, i);
628 
629 	return -EBUSY;
630 }
631 
632 /**
633  * pci_set_master - enables bus-mastering for device dev
634  * @dev: the PCI device to enable
635  *
636  * Enables bus-mastering on the device and calls pcibios_set_master()
637  * to do the needed arch specific settings.
638  */
639 void
640 pci_set_master(struct pci_dev *dev)
641 {
642 	u16 cmd;
643 
644 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
645 	if (! (cmd & PCI_COMMAND_MASTER)) {
646 		pr_debug("PCI: Enabling bus mastering for device %s\n", pci_name(dev));
647 		cmd |= PCI_COMMAND_MASTER;
648 		pci_write_config_word(dev, PCI_COMMAND, cmd);
649 	}
650 	dev->is_busmaster = 1;
651 	pcibios_set_master(dev);
652 }
653 
654 #ifndef HAVE_ARCH_PCI_MWI
655 /* This can be overridden by arch code. */
656 u8 pci_cache_line_size = L1_CACHE_BYTES >> 2;
657 
658 /**
659  * pci_generic_prep_mwi - helper function for pci_set_mwi
660  * @dev: the PCI device for which MWI is enabled
661  *
662  * Helper function for generic implementation of pcibios_prep_mwi
663  * function.  Originally copied from drivers/net/acenic.c.
664  * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
665  *
666  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
667  */
668 static int
669 pci_generic_prep_mwi(struct pci_dev *dev)
670 {
671 	u8 cacheline_size;
672 
673 	if (!pci_cache_line_size)
674 		return -EINVAL;		/* The system doesn't support MWI. */
675 
676 	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
677 	   equal to or multiple of the right value. */
678 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
679 	if (cacheline_size >= pci_cache_line_size &&
680 	    (cacheline_size % pci_cache_line_size) == 0)
681 		return 0;
682 
683 	/* Write the correct value. */
684 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
685 	/* Read it back. */
686 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
687 	if (cacheline_size == pci_cache_line_size)
688 		return 0;
689 
690 	printk(KERN_DEBUG "PCI: cache line size of %d is not supported "
691 	       "by device %s\n", pci_cache_line_size << 2, pci_name(dev));
692 
693 	return -EINVAL;
694 }
695 #endif /* !HAVE_ARCH_PCI_MWI */
696 
697 /**
698  * pci_set_mwi - enables memory-write-invalidate PCI transaction
699  * @dev: the PCI device for which MWI is enabled
700  *
701  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND,
702  * and then calls @pcibios_set_mwi to do the needed arch specific
703  * operations or a generic mwi-prep function.
704  *
705  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
706  */
707 int
708 pci_set_mwi(struct pci_dev *dev)
709 {
710 	int rc;
711 	u16 cmd;
712 
713 #ifdef HAVE_ARCH_PCI_MWI
714 	rc = pcibios_prep_mwi(dev);
715 #else
716 	rc = pci_generic_prep_mwi(dev);
717 #endif
718 
719 	if (rc)
720 		return rc;
721 
722 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
723 	if (! (cmd & PCI_COMMAND_INVALIDATE)) {
724 		pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n", pci_name(dev));
725 		cmd |= PCI_COMMAND_INVALIDATE;
726 		pci_write_config_word(dev, PCI_COMMAND, cmd);
727 	}
728 
729 	return 0;
730 }
731 
732 /**
733  * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
734  * @dev: the PCI device to disable
735  *
736  * Disables PCI Memory-Write-Invalidate transaction on the device
737  */
738 void
739 pci_clear_mwi(struct pci_dev *dev)
740 {
741 	u16 cmd;
742 
743 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
744 	if (cmd & PCI_COMMAND_INVALIDATE) {
745 		cmd &= ~PCI_COMMAND_INVALIDATE;
746 		pci_write_config_word(dev, PCI_COMMAND, cmd);
747 	}
748 }
749 
750 #ifndef HAVE_ARCH_PCI_SET_DMA_MASK
751 /*
752  * These can be overridden by arch-specific implementations
753  */
754 int
755 pci_set_dma_mask(struct pci_dev *dev, u64 mask)
756 {
757 	if (!pci_dma_supported(dev, mask))
758 		return -EIO;
759 
760 	dev->dma_mask = mask;
761 
762 	return 0;
763 }
764 
765 int
766 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
767 {
768 	if (!pci_dma_supported(dev, mask))
769 		return -EIO;
770 
771 	dev->dev.coherent_dma_mask = mask;
772 
773 	return 0;
774 }
775 #endif
776 
777 static int __devinit pci_init(void)
778 {
779 	struct pci_dev *dev = NULL;
780 
781 	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
782 		pci_fixup_device(pci_fixup_final, dev);
783 	}
784 	return 0;
785 }
786 
787 static int __devinit pci_setup(char *str)
788 {
789 	while (str) {
790 		char *k = strchr(str, ',');
791 		if (k)
792 			*k++ = 0;
793 		if (*str && (str = pcibios_setup(str)) && *str) {
794 			/* PCI layer options should be handled here */
795 			printk(KERN_ERR "PCI: Unknown option `%s'\n", str);
796 		}
797 		str = k;
798 	}
799 	return 1;
800 }
801 
802 device_initcall(pci_init);
803 
804 __setup("pci=", pci_setup);
805 
806 #if defined(CONFIG_ISA) || defined(CONFIG_EISA)
807 /* FIXME: Some boxes have multiple ISA bridges! */
808 struct pci_dev *isa_bridge;
809 EXPORT_SYMBOL(isa_bridge);
810 #endif
811 
812 EXPORT_SYMBOL(pci_enable_device_bars);
813 EXPORT_SYMBOL(pci_enable_device);
814 EXPORT_SYMBOL(pci_disable_device);
815 EXPORT_SYMBOL(pci_max_busnr);
816 EXPORT_SYMBOL(pci_bus_max_busnr);
817 EXPORT_SYMBOL(pci_find_capability);
818 EXPORT_SYMBOL(pci_bus_find_capability);
819 EXPORT_SYMBOL(pci_release_regions);
820 EXPORT_SYMBOL(pci_request_regions);
821 EXPORT_SYMBOL(pci_release_region);
822 EXPORT_SYMBOL(pci_request_region);
823 EXPORT_SYMBOL(pci_set_master);
824 EXPORT_SYMBOL(pci_set_mwi);
825 EXPORT_SYMBOL(pci_clear_mwi);
826 EXPORT_SYMBOL(pci_set_dma_mask);
827 EXPORT_SYMBOL(pci_set_consistent_dma_mask);
828 EXPORT_SYMBOL(pci_assign_resource);
829 EXPORT_SYMBOL(pci_find_parent_resource);
830 
831 EXPORT_SYMBOL(pci_set_power_state);
832 EXPORT_SYMBOL(pci_save_state);
833 EXPORT_SYMBOL(pci_restore_state);
834 EXPORT_SYMBOL(pci_enable_wake);
835 
836 /* Quirk info */
837 
838 EXPORT_SYMBOL(isa_dma_bridge_buggy);
839 EXPORT_SYMBOL(pci_pci_problems);
840