xref: /linux/drivers/pci/pci.c (revision 2624f124b3b5d550ab2fbef7ee3bc0e1fed09722)
1 /*
2  *	$Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $
3  *
4  *	PCI Bus Services, see include/linux/pci.h for further explanation.
5  *
6  *	Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
7  *	David Mosberger-Tang
8  *
9  *	Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <asm/dma.h>	/* isa_dma_bridge_buggy */
19 #include "pci.h"
20 
21 
22 /**
23  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
24  * @bus: pointer to PCI bus structure to search
25  *
26  * Given a PCI bus, returns the highest PCI bus number present in the set
27  * including the given PCI bus and its list of child PCI buses.
28  */
29 unsigned char __devinit
30 pci_bus_max_busnr(struct pci_bus* bus)
31 {
32 	struct list_head *tmp;
33 	unsigned char max, n;
34 
35 	max = bus->number;
36 	list_for_each(tmp, &bus->children) {
37 		n = pci_bus_max_busnr(pci_bus_b(tmp));
38 		if(n > max)
39 			max = n;
40 	}
41 	return max;
42 }
43 
44 /**
45  * pci_max_busnr - returns maximum PCI bus number
46  *
47  * Returns the highest PCI bus number present in the system global list of
48  * PCI buses.
49  */
50 unsigned char __devinit
51 pci_max_busnr(void)
52 {
53 	struct pci_bus *bus = NULL;
54 	unsigned char max, n;
55 
56 	max = 0;
57 	while ((bus = pci_find_next_bus(bus)) != NULL) {
58 		n = pci_bus_max_busnr(bus);
59 		if(n > max)
60 			max = n;
61 	}
62 	return max;
63 }
64 
65 static int __pci_bus_find_cap(struct pci_bus *bus, unsigned int devfn, u8 hdr_type, int cap)
66 {
67 	u16 status;
68 	u8 pos, id;
69 	int ttl = 48;
70 
71 	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
72 	if (!(status & PCI_STATUS_CAP_LIST))
73 		return 0;
74 
75 	switch (hdr_type) {
76 	case PCI_HEADER_TYPE_NORMAL:
77 	case PCI_HEADER_TYPE_BRIDGE:
78 		pci_bus_read_config_byte(bus, devfn, PCI_CAPABILITY_LIST, &pos);
79 		break;
80 	case PCI_HEADER_TYPE_CARDBUS:
81 		pci_bus_read_config_byte(bus, devfn, PCI_CB_CAPABILITY_LIST, &pos);
82 		break;
83 	default:
84 		return 0;
85 	}
86 	while (ttl-- && pos >= 0x40) {
87 		pos &= ~3;
88 		pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, &id);
89 		if (id == 0xff)
90 			break;
91 		if (id == cap)
92 			return pos;
93 		pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_NEXT, &pos);
94 	}
95 	return 0;
96 }
97 
98 /**
99  * pci_find_capability - query for devices' capabilities
100  * @dev: PCI device to query
101  * @cap: capability code
102  *
103  * Tell if a device supports a given PCI capability.
104  * Returns the address of the requested capability structure within the
105  * device's PCI configuration space or 0 in case the device does not
106  * support it.  Possible values for @cap:
107  *
108  *  %PCI_CAP_ID_PM           Power Management
109  *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
110  *  %PCI_CAP_ID_VPD          Vital Product Data
111  *  %PCI_CAP_ID_SLOTID       Slot Identification
112  *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
113  *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
114  *  %PCI_CAP_ID_PCIX         PCI-X
115  *  %PCI_CAP_ID_EXP          PCI Express
116  */
117 int pci_find_capability(struct pci_dev *dev, int cap)
118 {
119 	return __pci_bus_find_cap(dev->bus, dev->devfn, dev->hdr_type, cap);
120 }
121 
122 /**
123  * pci_bus_find_capability - query for devices' capabilities
124  * @bus:   the PCI bus to query
125  * @devfn: PCI device to query
126  * @cap:   capability code
127  *
128  * Like pci_find_capability() but works for pci devices that do not have a
129  * pci_dev structure set up yet.
130  *
131  * Returns the address of the requested capability structure within the
132  * device's PCI configuration space or 0 in case the device does not
133  * support it.
134  */
135 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
136 {
137 	u8 hdr_type;
138 
139 	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
140 
141 	return __pci_bus_find_cap(bus, devfn, hdr_type & 0x7f, cap);
142 }
143 
144 /**
145  * pci_find_ext_capability - Find an extended capability
146  * @dev: PCI device to query
147  * @cap: capability code
148  *
149  * Returns the address of the requested extended capability structure
150  * within the device's PCI configuration space or 0 if the device does
151  * not support it.  Possible values for @cap:
152  *
153  *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
154  *  %PCI_EXT_CAP_ID_VC		Virtual Channel
155  *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
156  *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
157  */
158 int pci_find_ext_capability(struct pci_dev *dev, int cap)
159 {
160 	u32 header;
161 	int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */
162 	int pos = 0x100;
163 
164 	if (dev->cfg_size <= 256)
165 		return 0;
166 
167 	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
168 		return 0;
169 
170 	/*
171 	 * If we have no capabilities, this is indicated by cap ID,
172 	 * cap version and next pointer all being 0.
173 	 */
174 	if (header == 0)
175 		return 0;
176 
177 	while (ttl-- > 0) {
178 		if (PCI_EXT_CAP_ID(header) == cap)
179 			return pos;
180 
181 		pos = PCI_EXT_CAP_NEXT(header);
182 		if (pos < 0x100)
183 			break;
184 
185 		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
186 			break;
187 	}
188 
189 	return 0;
190 }
191 
192 /**
193  * pci_find_parent_resource - return resource region of parent bus of given region
194  * @dev: PCI device structure contains resources to be searched
195  * @res: child resource record for which parent is sought
196  *
197  *  For given resource region of given device, return the resource
198  *  region of parent bus the given region is contained in or where
199  *  it should be allocated from.
200  */
201 struct resource *
202 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
203 {
204 	const struct pci_bus *bus = dev->bus;
205 	int i;
206 	struct resource *best = NULL;
207 
208 	for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
209 		struct resource *r = bus->resource[i];
210 		if (!r)
211 			continue;
212 		if (res->start && !(res->start >= r->start && res->end <= r->end))
213 			continue;	/* Not contained */
214 		if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
215 			continue;	/* Wrong type */
216 		if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
217 			return r;	/* Exact match */
218 		if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH))
219 			best = r;	/* Approximating prefetchable by non-prefetchable */
220 	}
221 	return best;
222 }
223 
224 /**
225  * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
226  * @dev: PCI device to have its BARs restored
227  *
228  * Restore the BAR values for a given device, so as to make it
229  * accessible by its driver.
230  */
231 void
232 pci_restore_bars(struct pci_dev *dev)
233 {
234 	int i, numres;
235 
236 	switch (dev->hdr_type) {
237 	case PCI_HEADER_TYPE_NORMAL:
238 		numres = 6;
239 		break;
240 	case PCI_HEADER_TYPE_BRIDGE:
241 		numres = 2;
242 		break;
243 	case PCI_HEADER_TYPE_CARDBUS:
244 		numres = 1;
245 		break;
246 	default:
247 		/* Should never get here, but just in case... */
248 		return;
249 	}
250 
251 	for (i = 0; i < numres; i ++)
252 		pci_update_resource(dev, &dev->resource[i], i);
253 }
254 
255 /**
256  * pci_set_power_state - Set the power state of a PCI device
257  * @dev: PCI device to be suspended
258  * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering
259  *
260  * Transition a device to a new power state, using the Power Management
261  * Capabilities in the device's config space.
262  *
263  * RETURN VALUE:
264  * -EINVAL if trying to enter a lower state than we're already in.
265  * 0 if we're already in the requested state.
266  * -EIO if device does not support PCI PM.
267  * 0 if we can successfully change the power state.
268  */
269 int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t t);
270 int
271 pci_set_power_state(struct pci_dev *dev, pci_power_t state)
272 {
273 	int pm, need_restore = 0;
274 	u16 pmcsr, pmc;
275 
276 	/* bound the state we're entering */
277 	if (state > PCI_D3hot)
278 		state = PCI_D3hot;
279 
280 	/* Validate current state:
281 	 * Can enter D0 from any state, but if we can only go deeper
282 	 * to sleep if we're already in a low power state
283 	 */
284 	if (state != PCI_D0 && dev->current_state > state)
285 		return -EINVAL;
286 	else if (dev->current_state == state)
287 		return 0;        /* we're already there */
288 
289 	/* find PCI PM capability in list */
290 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
291 
292 	/* abort if the device doesn't support PM capabilities */
293 	if (!pm)
294 		return -EIO;
295 
296 	pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc);
297 	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
298 		printk(KERN_DEBUG
299 		       "PCI: %s has unsupported PM cap regs version (%u)\n",
300 		       pci_name(dev), pmc & PCI_PM_CAP_VER_MASK);
301 		return -EIO;
302 	}
303 
304 	/* check if this device supports the desired state */
305 	if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1))
306 		return -EIO;
307 	else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2))
308 		return -EIO;
309 
310 	pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
311 
312 	/* If we're in D3, force entire word to 0.
313 	 * This doesn't affect PME_Status, disables PME_En, and
314 	 * sets PowerState to 0.
315 	 */
316 	if (dev->current_state >= PCI_D3hot) {
317 		if (!(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
318 			need_restore = 1;
319 		pmcsr = 0;
320 	} else {
321 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
322 		pmcsr |= state;
323 	}
324 
325 	/* enter specified state */
326 	pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr);
327 
328 	/* Mandatory power management transition delays */
329 	/* see PCI PM 1.1 5.6.1 table 18 */
330 	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
331 		msleep(10);
332 	else if (state == PCI_D2 || dev->current_state == PCI_D2)
333 		udelay(200);
334 
335 	/*
336 	 * Give firmware a chance to be called, such as ACPI _PRx, _PSx
337 	 * Firmware method after natice method ?
338 	 */
339 	if (platform_pci_set_power_state)
340 		platform_pci_set_power_state(dev, state);
341 
342 	dev->current_state = state;
343 
344 	/* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
345 	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
346 	 * from D3hot to D0 _may_ perform an internal reset, thereby
347 	 * going to "D0 Uninitialized" rather than "D0 Initialized".
348 	 * For example, at least some versions of the 3c905B and the
349 	 * 3c556B exhibit this behaviour.
350 	 *
351 	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
352 	 * devices in a D3hot state at boot.  Consequently, we need to
353 	 * restore at least the BARs so that the device will be
354 	 * accessible to its driver.
355 	 */
356 	if (need_restore)
357 		pci_restore_bars(dev);
358 
359 	return 0;
360 }
361 
362 int (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state);
363 
364 /**
365  * pci_choose_state - Choose the power state of a PCI device
366  * @dev: PCI device to be suspended
367  * @state: target sleep state for the whole system. This is the value
368  *	that is passed to suspend() function.
369  *
370  * Returns PCI power state suitable for given device and given system
371  * message.
372  */
373 
374 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
375 {
376 	int ret;
377 
378 	if (!pci_find_capability(dev, PCI_CAP_ID_PM))
379 		return PCI_D0;
380 
381 	if (platform_pci_choose_state) {
382 		ret = platform_pci_choose_state(dev, state);
383 		if (ret >= 0)
384 			state.event = ret;
385 	}
386 
387 	switch (state.event) {
388 	case PM_EVENT_ON:
389 		return PCI_D0;
390 	case PM_EVENT_FREEZE:
391 	case PM_EVENT_SUSPEND:
392 		return PCI_D3hot;
393 	default:
394 		printk("They asked me for state %d\n", state.event);
395 		BUG();
396 	}
397 	return PCI_D0;
398 }
399 
400 EXPORT_SYMBOL(pci_choose_state);
401 
402 /**
403  * pci_save_state - save the PCI configuration space of a device before suspending
404  * @dev: - PCI device that we're dealing with
405  */
406 int
407 pci_save_state(struct pci_dev *dev)
408 {
409 	int i;
410 	/* XXX: 100% dword access ok here? */
411 	for (i = 0; i < 16; i++)
412 		pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
413 	return 0;
414 }
415 
416 /**
417  * pci_restore_state - Restore the saved state of a PCI device
418  * @dev: - PCI device that we're dealing with
419  */
420 int
421 pci_restore_state(struct pci_dev *dev)
422 {
423 	int i;
424 
425 	for (i = 0; i < 16; i++)
426 		pci_write_config_dword(dev,i * 4, dev->saved_config_space[i]);
427 	return 0;
428 }
429 
430 /**
431  * pci_enable_device_bars - Initialize some of a device for use
432  * @dev: PCI device to be initialized
433  * @bars: bitmask of BAR's that must be configured
434  *
435  *  Initialize device before it's used by a driver. Ask low-level code
436  *  to enable selected I/O and memory resources. Wake up the device if it
437  *  was suspended. Beware, this function can fail.
438  */
439 
440 int
441 pci_enable_device_bars(struct pci_dev *dev, int bars)
442 {
443 	int err;
444 
445 	err = pci_set_power_state(dev, PCI_D0);
446 	if (err < 0 && err != -EIO)
447 		return err;
448 	err = pcibios_enable_device(dev, bars);
449 	if (err < 0)
450 		return err;
451 	return 0;
452 }
453 
454 /**
455  * pci_enable_device - Initialize device before it's used by a driver.
456  * @dev: PCI device to be initialized
457  *
458  *  Initialize device before it's used by a driver. Ask low-level code
459  *  to enable I/O and memory. Wake up the device if it was suspended.
460  *  Beware, this function can fail.
461  */
462 int
463 pci_enable_device(struct pci_dev *dev)
464 {
465 	int err;
466 
467 	if ((err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1)))
468 		return err;
469 	pci_fixup_device(pci_fixup_enable, dev);
470 	dev->is_enabled = 1;
471 	return 0;
472 }
473 
474 /**
475  * pcibios_disable_device - disable arch specific PCI resources for device dev
476  * @dev: the PCI device to disable
477  *
478  * Disables architecture specific PCI resources for the device. This
479  * is the default implementation. Architecture implementations can
480  * override this.
481  */
482 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
483 
484 /**
485  * pci_disable_device - Disable PCI device after use
486  * @dev: PCI device to be disabled
487  *
488  * Signal to the system that the PCI device is not in use by the system
489  * anymore.  This only involves disabling PCI bus-mastering, if active.
490  */
491 void
492 pci_disable_device(struct pci_dev *dev)
493 {
494 	u16 pci_command;
495 
496 	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
497 	if (pci_command & PCI_COMMAND_MASTER) {
498 		pci_command &= ~PCI_COMMAND_MASTER;
499 		pci_write_config_word(dev, PCI_COMMAND, pci_command);
500 	}
501 	dev->is_busmaster = 0;
502 
503 	pcibios_disable_device(dev);
504 	dev->is_enabled = 0;
505 }
506 
507 /**
508  * pci_enable_wake - enable device to generate PME# when suspended
509  * @dev: - PCI device to operate on
510  * @state: - Current state of device.
511  * @enable: - Flag to enable or disable generation
512  *
513  * Set the bits in the device's PM Capabilities to generate PME# when
514  * the system is suspended.
515  *
516  * -EIO is returned if device doesn't have PM Capabilities.
517  * -EINVAL is returned if device supports it, but can't generate wake events.
518  * 0 if operation is successful.
519  *
520  */
521 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
522 {
523 	int pm;
524 	u16 value;
525 
526 	/* find PCI PM capability in list */
527 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
528 
529 	/* If device doesn't support PM Capabilities, but request is to disable
530 	 * wake events, it's a nop; otherwise fail */
531 	if (!pm)
532 		return enable ? -EIO : 0;
533 
534 	/* Check device's ability to generate PME# */
535 	pci_read_config_word(dev,pm+PCI_PM_PMC,&value);
536 
537 	value &= PCI_PM_CAP_PME_MASK;
538 	value >>= ffs(PCI_PM_CAP_PME_MASK) - 1;   /* First bit of mask */
539 
540 	/* Check if it can generate PME# from requested state. */
541 	if (!value || !(value & (1 << state)))
542 		return enable ? -EINVAL : 0;
543 
544 	pci_read_config_word(dev, pm + PCI_PM_CTRL, &value);
545 
546 	/* Clear PME_Status by writing 1 to it and enable PME# */
547 	value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
548 
549 	if (!enable)
550 		value &= ~PCI_PM_CTRL_PME_ENABLE;
551 
552 	pci_write_config_word(dev, pm + PCI_PM_CTRL, value);
553 
554 	return 0;
555 }
556 
557 int
558 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
559 {
560 	u8 pin;
561 
562 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
563 	if (!pin)
564 		return -1;
565 	pin--;
566 	while (dev->bus->self) {
567 		pin = (pin + PCI_SLOT(dev->devfn)) % 4;
568 		dev = dev->bus->self;
569 	}
570 	*bridge = dev;
571 	return pin;
572 }
573 
574 /**
575  *	pci_release_region - Release a PCI bar
576  *	@pdev: PCI device whose resources were previously reserved by pci_request_region
577  *	@bar: BAR to release
578  *
579  *	Releases the PCI I/O and memory resources previously reserved by a
580  *	successful call to pci_request_region.  Call this function only
581  *	after all use of the PCI regions has ceased.
582  */
583 void pci_release_region(struct pci_dev *pdev, int bar)
584 {
585 	if (pci_resource_len(pdev, bar) == 0)
586 		return;
587 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
588 		release_region(pci_resource_start(pdev, bar),
589 				pci_resource_len(pdev, bar));
590 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
591 		release_mem_region(pci_resource_start(pdev, bar),
592 				pci_resource_len(pdev, bar));
593 }
594 
595 /**
596  *	pci_request_region - Reserved PCI I/O and memory resource
597  *	@pdev: PCI device whose resources are to be reserved
598  *	@bar: BAR to be reserved
599  *	@res_name: Name to be associated with resource.
600  *
601  *	Mark the PCI region associated with PCI device @pdev BR @bar as
602  *	being reserved by owner @res_name.  Do not access any
603  *	address inside the PCI regions unless this call returns
604  *	successfully.
605  *
606  *	Returns 0 on success, or %EBUSY on error.  A warning
607  *	message is also printed on failure.
608  */
609 int pci_request_region(struct pci_dev *pdev, int bar, char *res_name)
610 {
611 	if (pci_resource_len(pdev, bar) == 0)
612 		return 0;
613 
614 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
615 		if (!request_region(pci_resource_start(pdev, bar),
616 			    pci_resource_len(pdev, bar), res_name))
617 			goto err_out;
618 	}
619 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
620 		if (!request_mem_region(pci_resource_start(pdev, bar),
621 				        pci_resource_len(pdev, bar), res_name))
622 			goto err_out;
623 	}
624 
625 	return 0;
626 
627 err_out:
628 	printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%lx@%lx for device %s\n",
629 		pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
630 		bar + 1, /* PCI BAR # */
631 		pci_resource_len(pdev, bar), pci_resource_start(pdev, bar),
632 		pci_name(pdev));
633 	return -EBUSY;
634 }
635 
636 
637 /**
638  *	pci_release_regions - Release reserved PCI I/O and memory resources
639  *	@pdev: PCI device whose resources were previously reserved by pci_request_regions
640  *
641  *	Releases all PCI I/O and memory resources previously reserved by a
642  *	successful call to pci_request_regions.  Call this function only
643  *	after all use of the PCI regions has ceased.
644  */
645 
646 void pci_release_regions(struct pci_dev *pdev)
647 {
648 	int i;
649 
650 	for (i = 0; i < 6; i++)
651 		pci_release_region(pdev, i);
652 }
653 
654 /**
655  *	pci_request_regions - Reserved PCI I/O and memory resources
656  *	@pdev: PCI device whose resources are to be reserved
657  *	@res_name: Name to be associated with resource.
658  *
659  *	Mark all PCI regions associated with PCI device @pdev as
660  *	being reserved by owner @res_name.  Do not access any
661  *	address inside the PCI regions unless this call returns
662  *	successfully.
663  *
664  *	Returns 0 on success, or %EBUSY on error.  A warning
665  *	message is also printed on failure.
666  */
667 int pci_request_regions(struct pci_dev *pdev, char *res_name)
668 {
669 	int i;
670 
671 	for (i = 0; i < 6; i++)
672 		if(pci_request_region(pdev, i, res_name))
673 			goto err_out;
674 	return 0;
675 
676 err_out:
677 	while(--i >= 0)
678 		pci_release_region(pdev, i);
679 
680 	return -EBUSY;
681 }
682 
683 /**
684  * pci_set_master - enables bus-mastering for device dev
685  * @dev: the PCI device to enable
686  *
687  * Enables bus-mastering on the device and calls pcibios_set_master()
688  * to do the needed arch specific settings.
689  */
690 void
691 pci_set_master(struct pci_dev *dev)
692 {
693 	u16 cmd;
694 
695 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
696 	if (! (cmd & PCI_COMMAND_MASTER)) {
697 		pr_debug("PCI: Enabling bus mastering for device %s\n", pci_name(dev));
698 		cmd |= PCI_COMMAND_MASTER;
699 		pci_write_config_word(dev, PCI_COMMAND, cmd);
700 	}
701 	dev->is_busmaster = 1;
702 	pcibios_set_master(dev);
703 }
704 
705 #ifndef HAVE_ARCH_PCI_MWI
706 /* This can be overridden by arch code. */
707 u8 pci_cache_line_size = L1_CACHE_BYTES >> 2;
708 
709 /**
710  * pci_generic_prep_mwi - helper function for pci_set_mwi
711  * @dev: the PCI device for which MWI is enabled
712  *
713  * Helper function for generic implementation of pcibios_prep_mwi
714  * function.  Originally copied from drivers/net/acenic.c.
715  * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
716  *
717  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
718  */
719 static int
720 pci_generic_prep_mwi(struct pci_dev *dev)
721 {
722 	u8 cacheline_size;
723 
724 	if (!pci_cache_line_size)
725 		return -EINVAL;		/* The system doesn't support MWI. */
726 
727 	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
728 	   equal to or multiple of the right value. */
729 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
730 	if (cacheline_size >= pci_cache_line_size &&
731 	    (cacheline_size % pci_cache_line_size) == 0)
732 		return 0;
733 
734 	/* Write the correct value. */
735 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
736 	/* Read it back. */
737 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
738 	if (cacheline_size == pci_cache_line_size)
739 		return 0;
740 
741 	printk(KERN_DEBUG "PCI: cache line size of %d is not supported "
742 	       "by device %s\n", pci_cache_line_size << 2, pci_name(dev));
743 
744 	return -EINVAL;
745 }
746 #endif /* !HAVE_ARCH_PCI_MWI */
747 
748 /**
749  * pci_set_mwi - enables memory-write-invalidate PCI transaction
750  * @dev: the PCI device for which MWI is enabled
751  *
752  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND,
753  * and then calls @pcibios_set_mwi to do the needed arch specific
754  * operations or a generic mwi-prep function.
755  *
756  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
757  */
758 int
759 pci_set_mwi(struct pci_dev *dev)
760 {
761 	int rc;
762 	u16 cmd;
763 
764 #ifdef HAVE_ARCH_PCI_MWI
765 	rc = pcibios_prep_mwi(dev);
766 #else
767 	rc = pci_generic_prep_mwi(dev);
768 #endif
769 
770 	if (rc)
771 		return rc;
772 
773 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
774 	if (! (cmd & PCI_COMMAND_INVALIDATE)) {
775 		pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n", pci_name(dev));
776 		cmd |= PCI_COMMAND_INVALIDATE;
777 		pci_write_config_word(dev, PCI_COMMAND, cmd);
778 	}
779 
780 	return 0;
781 }
782 
783 /**
784  * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
785  * @dev: the PCI device to disable
786  *
787  * Disables PCI Memory-Write-Invalidate transaction on the device
788  */
789 void
790 pci_clear_mwi(struct pci_dev *dev)
791 {
792 	u16 cmd;
793 
794 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
795 	if (cmd & PCI_COMMAND_INVALIDATE) {
796 		cmd &= ~PCI_COMMAND_INVALIDATE;
797 		pci_write_config_word(dev, PCI_COMMAND, cmd);
798 	}
799 }
800 
801 /**
802  * pci_intx - enables/disables PCI INTx for device dev
803  * @dev: the PCI device to operate on
804  * @enable: boolean
805  *
806  * Enables/disables PCI INTx for device dev
807  */
808 void
809 pci_intx(struct pci_dev *pdev, int enable)
810 {
811 	u16 pci_command, new;
812 
813 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
814 
815 	if (enable) {
816 		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
817 	} else {
818 		new = pci_command | PCI_COMMAND_INTX_DISABLE;
819 	}
820 
821 	if (new != pci_command) {
822 		pci_write_config_word(pdev, PCI_COMMAND, new);
823 	}
824 }
825 
826 #ifndef HAVE_ARCH_PCI_SET_DMA_MASK
827 /*
828  * These can be overridden by arch-specific implementations
829  */
830 int
831 pci_set_dma_mask(struct pci_dev *dev, u64 mask)
832 {
833 	if (!pci_dma_supported(dev, mask))
834 		return -EIO;
835 
836 	dev->dma_mask = mask;
837 
838 	return 0;
839 }
840 
841 int
842 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
843 {
844 	if (!pci_dma_supported(dev, mask))
845 		return -EIO;
846 
847 	dev->dev.coherent_dma_mask = mask;
848 
849 	return 0;
850 }
851 #endif
852 
853 static int __devinit pci_init(void)
854 {
855 	struct pci_dev *dev = NULL;
856 
857 	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
858 		pci_fixup_device(pci_fixup_final, dev);
859 	}
860 	return 0;
861 }
862 
863 static int __devinit pci_setup(char *str)
864 {
865 	while (str) {
866 		char *k = strchr(str, ',');
867 		if (k)
868 			*k++ = 0;
869 		if (*str && (str = pcibios_setup(str)) && *str) {
870 			/* PCI layer options should be handled here */
871 			printk(KERN_ERR "PCI: Unknown option `%s'\n", str);
872 		}
873 		str = k;
874 	}
875 	return 1;
876 }
877 
878 device_initcall(pci_init);
879 
880 __setup("pci=", pci_setup);
881 
882 #if defined(CONFIG_ISA) || defined(CONFIG_EISA)
883 /* FIXME: Some boxes have multiple ISA bridges! */
884 struct pci_dev *isa_bridge;
885 EXPORT_SYMBOL(isa_bridge);
886 #endif
887 
888 EXPORT_SYMBOL_GPL(pci_restore_bars);
889 EXPORT_SYMBOL(pci_enable_device_bars);
890 EXPORT_SYMBOL(pci_enable_device);
891 EXPORT_SYMBOL(pci_disable_device);
892 EXPORT_SYMBOL(pci_max_busnr);
893 EXPORT_SYMBOL(pci_bus_max_busnr);
894 EXPORT_SYMBOL(pci_find_capability);
895 EXPORT_SYMBOL(pci_bus_find_capability);
896 EXPORT_SYMBOL(pci_release_regions);
897 EXPORT_SYMBOL(pci_request_regions);
898 EXPORT_SYMBOL(pci_release_region);
899 EXPORT_SYMBOL(pci_request_region);
900 EXPORT_SYMBOL(pci_set_master);
901 EXPORT_SYMBOL(pci_set_mwi);
902 EXPORT_SYMBOL(pci_clear_mwi);
903 EXPORT_SYMBOL_GPL(pci_intx);
904 EXPORT_SYMBOL(pci_set_dma_mask);
905 EXPORT_SYMBOL(pci_set_consistent_dma_mask);
906 EXPORT_SYMBOL(pci_assign_resource);
907 EXPORT_SYMBOL(pci_find_parent_resource);
908 
909 EXPORT_SYMBOL(pci_set_power_state);
910 EXPORT_SYMBOL(pci_save_state);
911 EXPORT_SYMBOL(pci_restore_state);
912 EXPORT_SYMBOL(pci_enable_wake);
913 
914 /* Quirk info */
915 
916 EXPORT_SYMBOL(isa_dma_bridge_buggy);
917 EXPORT_SYMBOL(pci_pci_problems);
918