xref: /linux/drivers/pci/pci.c (revision 26b0d14106954ae46d2f4f7eec3481828a210f7d)
1 /*
2  *	PCI Bus Services, see include/linux/pci.h for further explanation.
3  *
4  *	Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5  *	David Mosberger-Tang
6  *
7  *	Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/pci.h>
14 #include <linux/pm.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/log2.h>
20 #include <linux/pci-aspm.h>
21 #include <linux/pm_wakeup.h>
22 #include <linux/interrupt.h>
23 #include <linux/device.h>
24 #include <linux/pm_runtime.h>
25 #include <asm-generic/pci-bridge.h>
26 #include <asm/setup.h>
27 #include "pci.h"
28 
29 const char *pci_power_names[] = {
30 	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
31 };
32 EXPORT_SYMBOL_GPL(pci_power_names);
33 
34 int isa_dma_bridge_buggy;
35 EXPORT_SYMBOL(isa_dma_bridge_buggy);
36 
37 int pci_pci_problems;
38 EXPORT_SYMBOL(pci_pci_problems);
39 
40 unsigned int pci_pm_d3_delay;
41 
42 static void pci_pme_list_scan(struct work_struct *work);
43 
44 static LIST_HEAD(pci_pme_list);
45 static DEFINE_MUTEX(pci_pme_list_mutex);
46 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
47 
48 struct pci_pme_device {
49 	struct list_head list;
50 	struct pci_dev *dev;
51 };
52 
53 #define PME_TIMEOUT 1000 /* How long between PME checks */
54 
55 static void pci_dev_d3_sleep(struct pci_dev *dev)
56 {
57 	unsigned int delay = dev->d3_delay;
58 
59 	if (delay < pci_pm_d3_delay)
60 		delay = pci_pm_d3_delay;
61 
62 	msleep(delay);
63 }
64 
65 #ifdef CONFIG_PCI_DOMAINS
66 int pci_domains_supported = 1;
67 #endif
68 
69 #define DEFAULT_CARDBUS_IO_SIZE		(256)
70 #define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
71 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
72 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
73 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
74 
75 #define DEFAULT_HOTPLUG_IO_SIZE		(256)
76 #define DEFAULT_HOTPLUG_MEM_SIZE	(2*1024*1024)
77 /* pci=hpmemsize=nnM,hpiosize=nn can override this */
78 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
79 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
80 
81 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
82 
83 /*
84  * The default CLS is used if arch didn't set CLS explicitly and not
85  * all pci devices agree on the same value.  Arch can override either
86  * the dfl or actual value as it sees fit.  Don't forget this is
87  * measured in 32-bit words, not bytes.
88  */
89 u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
90 u8 pci_cache_line_size;
91 
92 /*
93  * If we set up a device for bus mastering, we need to check the latency
94  * timer as certain BIOSes forget to set it properly.
95  */
96 unsigned int pcibios_max_latency = 255;
97 
98 /* If set, the PCIe ARI capability will not be used. */
99 static bool pcie_ari_disabled;
100 
101 /**
102  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
103  * @bus: pointer to PCI bus structure to search
104  *
105  * Given a PCI bus, returns the highest PCI bus number present in the set
106  * including the given PCI bus and its list of child PCI buses.
107  */
108 unsigned char pci_bus_max_busnr(struct pci_bus* bus)
109 {
110 	struct list_head *tmp;
111 	unsigned char max, n;
112 
113 	max = bus->subordinate;
114 	list_for_each(tmp, &bus->children) {
115 		n = pci_bus_max_busnr(pci_bus_b(tmp));
116 		if(n > max)
117 			max = n;
118 	}
119 	return max;
120 }
121 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
122 
123 #ifdef CONFIG_HAS_IOMEM
124 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
125 {
126 	/*
127 	 * Make sure the BAR is actually a memory resource, not an IO resource
128 	 */
129 	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
130 		WARN_ON(1);
131 		return NULL;
132 	}
133 	return ioremap_nocache(pci_resource_start(pdev, bar),
134 				     pci_resource_len(pdev, bar));
135 }
136 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
137 #endif
138 
139 #if 0
140 /**
141  * pci_max_busnr - returns maximum PCI bus number
142  *
143  * Returns the highest PCI bus number present in the system global list of
144  * PCI buses.
145  */
146 unsigned char __devinit
147 pci_max_busnr(void)
148 {
149 	struct pci_bus *bus = NULL;
150 	unsigned char max, n;
151 
152 	max = 0;
153 	while ((bus = pci_find_next_bus(bus)) != NULL) {
154 		n = pci_bus_max_busnr(bus);
155 		if(n > max)
156 			max = n;
157 	}
158 	return max;
159 }
160 
161 #endif  /*  0  */
162 
163 #define PCI_FIND_CAP_TTL	48
164 
165 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
166 				   u8 pos, int cap, int *ttl)
167 {
168 	u8 id;
169 
170 	while ((*ttl)--) {
171 		pci_bus_read_config_byte(bus, devfn, pos, &pos);
172 		if (pos < 0x40)
173 			break;
174 		pos &= ~3;
175 		pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
176 					 &id);
177 		if (id == 0xff)
178 			break;
179 		if (id == cap)
180 			return pos;
181 		pos += PCI_CAP_LIST_NEXT;
182 	}
183 	return 0;
184 }
185 
186 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
187 			       u8 pos, int cap)
188 {
189 	int ttl = PCI_FIND_CAP_TTL;
190 
191 	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
192 }
193 
194 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
195 {
196 	return __pci_find_next_cap(dev->bus, dev->devfn,
197 				   pos + PCI_CAP_LIST_NEXT, cap);
198 }
199 EXPORT_SYMBOL_GPL(pci_find_next_capability);
200 
201 static int __pci_bus_find_cap_start(struct pci_bus *bus,
202 				    unsigned int devfn, u8 hdr_type)
203 {
204 	u16 status;
205 
206 	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
207 	if (!(status & PCI_STATUS_CAP_LIST))
208 		return 0;
209 
210 	switch (hdr_type) {
211 	case PCI_HEADER_TYPE_NORMAL:
212 	case PCI_HEADER_TYPE_BRIDGE:
213 		return PCI_CAPABILITY_LIST;
214 	case PCI_HEADER_TYPE_CARDBUS:
215 		return PCI_CB_CAPABILITY_LIST;
216 	default:
217 		return 0;
218 	}
219 
220 	return 0;
221 }
222 
223 /**
224  * pci_find_capability - query for devices' capabilities
225  * @dev: PCI device to query
226  * @cap: capability code
227  *
228  * Tell if a device supports a given PCI capability.
229  * Returns the address of the requested capability structure within the
230  * device's PCI configuration space or 0 in case the device does not
231  * support it.  Possible values for @cap:
232  *
233  *  %PCI_CAP_ID_PM           Power Management
234  *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
235  *  %PCI_CAP_ID_VPD          Vital Product Data
236  *  %PCI_CAP_ID_SLOTID       Slot Identification
237  *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
238  *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
239  *  %PCI_CAP_ID_PCIX         PCI-X
240  *  %PCI_CAP_ID_EXP          PCI Express
241  */
242 int pci_find_capability(struct pci_dev *dev, int cap)
243 {
244 	int pos;
245 
246 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
247 	if (pos)
248 		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
249 
250 	return pos;
251 }
252 
253 /**
254  * pci_bus_find_capability - query for devices' capabilities
255  * @bus:   the PCI bus to query
256  * @devfn: PCI device to query
257  * @cap:   capability code
258  *
259  * Like pci_find_capability() but works for pci devices that do not have a
260  * pci_dev structure set up yet.
261  *
262  * Returns the address of the requested capability structure within the
263  * device's PCI configuration space or 0 in case the device does not
264  * support it.
265  */
266 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
267 {
268 	int pos;
269 	u8 hdr_type;
270 
271 	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
272 
273 	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
274 	if (pos)
275 		pos = __pci_find_next_cap(bus, devfn, pos, cap);
276 
277 	return pos;
278 }
279 
280 /**
281  * pci_find_ext_capability - Find an extended capability
282  * @dev: PCI device to query
283  * @cap: capability code
284  *
285  * Returns the address of the requested extended capability structure
286  * within the device's PCI configuration space or 0 if the device does
287  * not support it.  Possible values for @cap:
288  *
289  *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
290  *  %PCI_EXT_CAP_ID_VC		Virtual Channel
291  *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
292  *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
293  */
294 int pci_find_ext_capability(struct pci_dev *dev, int cap)
295 {
296 	u32 header;
297 	int ttl;
298 	int pos = PCI_CFG_SPACE_SIZE;
299 
300 	/* minimum 8 bytes per capability */
301 	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
302 
303 	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
304 		return 0;
305 
306 	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
307 		return 0;
308 
309 	/*
310 	 * If we have no capabilities, this is indicated by cap ID,
311 	 * cap version and next pointer all being 0.
312 	 */
313 	if (header == 0)
314 		return 0;
315 
316 	while (ttl-- > 0) {
317 		if (PCI_EXT_CAP_ID(header) == cap)
318 			return pos;
319 
320 		pos = PCI_EXT_CAP_NEXT(header);
321 		if (pos < PCI_CFG_SPACE_SIZE)
322 			break;
323 
324 		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
325 			break;
326 	}
327 
328 	return 0;
329 }
330 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
331 
332 /**
333  * pci_bus_find_ext_capability - find an extended capability
334  * @bus:   the PCI bus to query
335  * @devfn: PCI device to query
336  * @cap:   capability code
337  *
338  * Like pci_find_ext_capability() but works for pci devices that do not have a
339  * pci_dev structure set up yet.
340  *
341  * Returns the address of the requested capability structure within the
342  * device's PCI configuration space or 0 in case the device does not
343  * support it.
344  */
345 int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
346 				int cap)
347 {
348 	u32 header;
349 	int ttl;
350 	int pos = PCI_CFG_SPACE_SIZE;
351 
352 	/* minimum 8 bytes per capability */
353 	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
354 
355 	if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
356 		return 0;
357 	if (header == 0xffffffff || header == 0)
358 		return 0;
359 
360 	while (ttl-- > 0) {
361 		if (PCI_EXT_CAP_ID(header) == cap)
362 			return pos;
363 
364 		pos = PCI_EXT_CAP_NEXT(header);
365 		if (pos < PCI_CFG_SPACE_SIZE)
366 			break;
367 
368 		if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
369 			break;
370 	}
371 
372 	return 0;
373 }
374 
375 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
376 {
377 	int rc, ttl = PCI_FIND_CAP_TTL;
378 	u8 cap, mask;
379 
380 	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
381 		mask = HT_3BIT_CAP_MASK;
382 	else
383 		mask = HT_5BIT_CAP_MASK;
384 
385 	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
386 				      PCI_CAP_ID_HT, &ttl);
387 	while (pos) {
388 		rc = pci_read_config_byte(dev, pos + 3, &cap);
389 		if (rc != PCIBIOS_SUCCESSFUL)
390 			return 0;
391 
392 		if ((cap & mask) == ht_cap)
393 			return pos;
394 
395 		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
396 					      pos + PCI_CAP_LIST_NEXT,
397 					      PCI_CAP_ID_HT, &ttl);
398 	}
399 
400 	return 0;
401 }
402 /**
403  * pci_find_next_ht_capability - query a device's Hypertransport capabilities
404  * @dev: PCI device to query
405  * @pos: Position from which to continue searching
406  * @ht_cap: Hypertransport capability code
407  *
408  * To be used in conjunction with pci_find_ht_capability() to search for
409  * all capabilities matching @ht_cap. @pos should always be a value returned
410  * from pci_find_ht_capability().
411  *
412  * NB. To be 100% safe against broken PCI devices, the caller should take
413  * steps to avoid an infinite loop.
414  */
415 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
416 {
417 	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
418 }
419 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
420 
421 /**
422  * pci_find_ht_capability - query a device's Hypertransport capabilities
423  * @dev: PCI device to query
424  * @ht_cap: Hypertransport capability code
425  *
426  * Tell if a device supports a given Hypertransport capability.
427  * Returns an address within the device's PCI configuration space
428  * or 0 in case the device does not support the request capability.
429  * The address points to the PCI capability, of type PCI_CAP_ID_HT,
430  * which has a Hypertransport capability matching @ht_cap.
431  */
432 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
433 {
434 	int pos;
435 
436 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
437 	if (pos)
438 		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
439 
440 	return pos;
441 }
442 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
443 
444 /**
445  * pci_find_parent_resource - return resource region of parent bus of given region
446  * @dev: PCI device structure contains resources to be searched
447  * @res: child resource record for which parent is sought
448  *
449  *  For given resource region of given device, return the resource
450  *  region of parent bus the given region is contained in or where
451  *  it should be allocated from.
452  */
453 struct resource *
454 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
455 {
456 	const struct pci_bus *bus = dev->bus;
457 	int i;
458 	struct resource *best = NULL, *r;
459 
460 	pci_bus_for_each_resource(bus, r, i) {
461 		if (!r)
462 			continue;
463 		if (res->start && !(res->start >= r->start && res->end <= r->end))
464 			continue;	/* Not contained */
465 		if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
466 			continue;	/* Wrong type */
467 		if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
468 			return r;	/* Exact match */
469 		/* We can't insert a non-prefetch resource inside a prefetchable parent .. */
470 		if (r->flags & IORESOURCE_PREFETCH)
471 			continue;
472 		/* .. but we can put a prefetchable resource inside a non-prefetchable one */
473 		if (!best)
474 			best = r;
475 	}
476 	return best;
477 }
478 
479 /**
480  * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
481  * @dev: PCI device to have its BARs restored
482  *
483  * Restore the BAR values for a given device, so as to make it
484  * accessible by its driver.
485  */
486 static void
487 pci_restore_bars(struct pci_dev *dev)
488 {
489 	int i;
490 
491 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
492 		pci_update_resource(dev, i);
493 }
494 
495 static struct pci_platform_pm_ops *pci_platform_pm;
496 
497 int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
498 {
499 	if (!ops->is_manageable || !ops->set_state || !ops->choose_state
500 	    || !ops->sleep_wake || !ops->can_wakeup)
501 		return -EINVAL;
502 	pci_platform_pm = ops;
503 	return 0;
504 }
505 
506 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
507 {
508 	return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
509 }
510 
511 static inline int platform_pci_set_power_state(struct pci_dev *dev,
512                                                 pci_power_t t)
513 {
514 	return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
515 }
516 
517 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
518 {
519 	return pci_platform_pm ?
520 			pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
521 }
522 
523 static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
524 {
525 	return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
526 }
527 
528 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
529 {
530 	return pci_platform_pm ?
531 			pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
532 }
533 
534 static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
535 {
536 	return pci_platform_pm ?
537 			pci_platform_pm->run_wake(dev, enable) : -ENODEV;
538 }
539 
540 /**
541  * pci_raw_set_power_state - Use PCI PM registers to set the power state of
542  *                           given PCI device
543  * @dev: PCI device to handle.
544  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
545  *
546  * RETURN VALUE:
547  * -EINVAL if the requested state is invalid.
548  * -EIO if device does not support PCI PM or its PM capabilities register has a
549  * wrong version, or device doesn't support the requested state.
550  * 0 if device already is in the requested state.
551  * 0 if device's power state has been successfully changed.
552  */
553 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
554 {
555 	u16 pmcsr;
556 	bool need_restore = false;
557 
558 	/* Check if we're already there */
559 	if (dev->current_state == state)
560 		return 0;
561 
562 	if (!dev->pm_cap)
563 		return -EIO;
564 
565 	if (state < PCI_D0 || state > PCI_D3hot)
566 		return -EINVAL;
567 
568 	/* Validate current state:
569 	 * Can enter D0 from any state, but if we can only go deeper
570 	 * to sleep if we're already in a low power state
571 	 */
572 	if (state != PCI_D0 && dev->current_state <= PCI_D3cold
573 	    && dev->current_state > state) {
574 		dev_err(&dev->dev, "invalid power transition "
575 			"(from state %d to %d)\n", dev->current_state, state);
576 		return -EINVAL;
577 	}
578 
579 	/* check if this device supports the desired state */
580 	if ((state == PCI_D1 && !dev->d1_support)
581 	   || (state == PCI_D2 && !dev->d2_support))
582 		return -EIO;
583 
584 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
585 
586 	/* If we're (effectively) in D3, force entire word to 0.
587 	 * This doesn't affect PME_Status, disables PME_En, and
588 	 * sets PowerState to 0.
589 	 */
590 	switch (dev->current_state) {
591 	case PCI_D0:
592 	case PCI_D1:
593 	case PCI_D2:
594 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
595 		pmcsr |= state;
596 		break;
597 	case PCI_D3hot:
598 	case PCI_D3cold:
599 	case PCI_UNKNOWN: /* Boot-up */
600 		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
601 		 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
602 			need_restore = true;
603 		/* Fall-through: force to D0 */
604 	default:
605 		pmcsr = 0;
606 		break;
607 	}
608 
609 	/* enter specified state */
610 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
611 
612 	/* Mandatory power management transition delays */
613 	/* see PCI PM 1.1 5.6.1 table 18 */
614 	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
615 		pci_dev_d3_sleep(dev);
616 	else if (state == PCI_D2 || dev->current_state == PCI_D2)
617 		udelay(PCI_PM_D2_DELAY);
618 
619 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
620 	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
621 	if (dev->current_state != state && printk_ratelimit())
622 		dev_info(&dev->dev, "Refused to change power state, "
623 			"currently in D%d\n", dev->current_state);
624 
625 	/* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
626 	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
627 	 * from D3hot to D0 _may_ perform an internal reset, thereby
628 	 * going to "D0 Uninitialized" rather than "D0 Initialized".
629 	 * For example, at least some versions of the 3c905B and the
630 	 * 3c556B exhibit this behaviour.
631 	 *
632 	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
633 	 * devices in a D3hot state at boot.  Consequently, we need to
634 	 * restore at least the BARs so that the device will be
635 	 * accessible to its driver.
636 	 */
637 	if (need_restore)
638 		pci_restore_bars(dev);
639 
640 	if (dev->bus->self)
641 		pcie_aspm_pm_state_change(dev->bus->self);
642 
643 	return 0;
644 }
645 
646 /**
647  * pci_update_current_state - Read PCI power state of given device from its
648  *                            PCI PM registers and cache it
649  * @dev: PCI device to handle.
650  * @state: State to cache in case the device doesn't have the PM capability
651  */
652 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
653 {
654 	if (dev->pm_cap) {
655 		u16 pmcsr;
656 
657 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
658 		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
659 	} else {
660 		dev->current_state = state;
661 	}
662 }
663 
664 /**
665  * pci_platform_power_transition - Use platform to change device power state
666  * @dev: PCI device to handle.
667  * @state: State to put the device into.
668  */
669 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
670 {
671 	int error;
672 
673 	if (platform_pci_power_manageable(dev)) {
674 		error = platform_pci_set_power_state(dev, state);
675 		if (!error)
676 			pci_update_current_state(dev, state);
677 		/* Fall back to PCI_D0 if native PM is not supported */
678 		if (!dev->pm_cap)
679 			dev->current_state = PCI_D0;
680 	} else {
681 		error = -ENODEV;
682 		/* Fall back to PCI_D0 if native PM is not supported */
683 		if (!dev->pm_cap)
684 			dev->current_state = PCI_D0;
685 	}
686 
687 	return error;
688 }
689 
690 /**
691  * __pci_start_power_transition - Start power transition of a PCI device
692  * @dev: PCI device to handle.
693  * @state: State to put the device into.
694  */
695 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
696 {
697 	if (state == PCI_D0)
698 		pci_platform_power_transition(dev, PCI_D0);
699 }
700 
701 /**
702  * __pci_complete_power_transition - Complete power transition of a PCI device
703  * @dev: PCI device to handle.
704  * @state: State to put the device into.
705  *
706  * This function should not be called directly by device drivers.
707  */
708 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
709 {
710 	return state >= PCI_D0 ?
711 			pci_platform_power_transition(dev, state) : -EINVAL;
712 }
713 EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
714 
715 /**
716  * pci_set_power_state - Set the power state of a PCI device
717  * @dev: PCI device to handle.
718  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
719  *
720  * Transition a device to a new power state, using the platform firmware and/or
721  * the device's PCI PM registers.
722  *
723  * RETURN VALUE:
724  * -EINVAL if the requested state is invalid.
725  * -EIO if device does not support PCI PM or its PM capabilities register has a
726  * wrong version, or device doesn't support the requested state.
727  * 0 if device already is in the requested state.
728  * 0 if device's power state has been successfully changed.
729  */
730 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
731 {
732 	int error;
733 
734 	/* bound the state we're entering */
735 	if (state > PCI_D3hot)
736 		state = PCI_D3hot;
737 	else if (state < PCI_D0)
738 		state = PCI_D0;
739 	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
740 		/*
741 		 * If the device or the parent bridge do not support PCI PM,
742 		 * ignore the request if we're doing anything other than putting
743 		 * it into D0 (which would only happen on boot).
744 		 */
745 		return 0;
746 
747 	__pci_start_power_transition(dev, state);
748 
749 	/* This device is quirked not to be put into D3, so
750 	   don't put it in D3 */
751 	if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
752 		return 0;
753 
754 	error = pci_raw_set_power_state(dev, state);
755 
756 	if (!__pci_complete_power_transition(dev, state))
757 		error = 0;
758 	/*
759 	 * When aspm_policy is "powersave" this call ensures
760 	 * that ASPM is configured.
761 	 */
762 	if (!error && dev->bus->self)
763 		pcie_aspm_powersave_config_link(dev->bus->self);
764 
765 	return error;
766 }
767 
768 /**
769  * pci_choose_state - Choose the power state of a PCI device
770  * @dev: PCI device to be suspended
771  * @state: target sleep state for the whole system. This is the value
772  *	that is passed to suspend() function.
773  *
774  * Returns PCI power state suitable for given device and given system
775  * message.
776  */
777 
778 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
779 {
780 	pci_power_t ret;
781 
782 	if (!pci_find_capability(dev, PCI_CAP_ID_PM))
783 		return PCI_D0;
784 
785 	ret = platform_pci_choose_state(dev);
786 	if (ret != PCI_POWER_ERROR)
787 		return ret;
788 
789 	switch (state.event) {
790 	case PM_EVENT_ON:
791 		return PCI_D0;
792 	case PM_EVENT_FREEZE:
793 	case PM_EVENT_PRETHAW:
794 		/* REVISIT both freeze and pre-thaw "should" use D0 */
795 	case PM_EVENT_SUSPEND:
796 	case PM_EVENT_HIBERNATE:
797 		return PCI_D3hot;
798 	default:
799 		dev_info(&dev->dev, "unrecognized suspend event %d\n",
800 			 state.event);
801 		BUG();
802 	}
803 	return PCI_D0;
804 }
805 
806 EXPORT_SYMBOL(pci_choose_state);
807 
808 #define PCI_EXP_SAVE_REGS	7
809 
810 #define pcie_cap_has_devctl(type, flags)	1
811 #define pcie_cap_has_lnkctl(type, flags)		\
812 		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
813 		 (type == PCI_EXP_TYPE_ROOT_PORT ||	\
814 		  type == PCI_EXP_TYPE_ENDPOINT ||	\
815 		  type == PCI_EXP_TYPE_LEG_END))
816 #define pcie_cap_has_sltctl(type, flags)		\
817 		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
818 		 ((type == PCI_EXP_TYPE_ROOT_PORT) ||	\
819 		  (type == PCI_EXP_TYPE_DOWNSTREAM &&	\
820 		   (flags & PCI_EXP_FLAGS_SLOT))))
821 #define pcie_cap_has_rtctl(type, flags)			\
822 		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
823 		 (type == PCI_EXP_TYPE_ROOT_PORT ||	\
824 		  type == PCI_EXP_TYPE_RC_EC))
825 #define pcie_cap_has_devctl2(type, flags)		\
826 		((flags & PCI_EXP_FLAGS_VERS) > 1)
827 #define pcie_cap_has_lnkctl2(type, flags)		\
828 		((flags & PCI_EXP_FLAGS_VERS) > 1)
829 #define pcie_cap_has_sltctl2(type, flags)		\
830 		((flags & PCI_EXP_FLAGS_VERS) > 1)
831 
832 static struct pci_cap_saved_state *pci_find_saved_cap(
833 	struct pci_dev *pci_dev, char cap)
834 {
835 	struct pci_cap_saved_state *tmp;
836 	struct hlist_node *pos;
837 
838 	hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
839 		if (tmp->cap.cap_nr == cap)
840 			return tmp;
841 	}
842 	return NULL;
843 }
844 
845 static int pci_save_pcie_state(struct pci_dev *dev)
846 {
847 	int pos, i = 0;
848 	struct pci_cap_saved_state *save_state;
849 	u16 *cap;
850 	u16 flags;
851 
852 	pos = pci_pcie_cap(dev);
853 	if (!pos)
854 		return 0;
855 
856 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
857 	if (!save_state) {
858 		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
859 		return -ENOMEM;
860 	}
861 	cap = (u16 *)&save_state->cap.data[0];
862 
863 	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
864 
865 	if (pcie_cap_has_devctl(dev->pcie_type, flags))
866 		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
867 	if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
868 		pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
869 	if (pcie_cap_has_sltctl(dev->pcie_type, flags))
870 		pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
871 	if (pcie_cap_has_rtctl(dev->pcie_type, flags))
872 		pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
873 	if (pcie_cap_has_devctl2(dev->pcie_type, flags))
874 		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
875 	if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
876 		pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
877 	if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
878 		pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
879 
880 	return 0;
881 }
882 
883 static void pci_restore_pcie_state(struct pci_dev *dev)
884 {
885 	int i = 0, pos;
886 	struct pci_cap_saved_state *save_state;
887 	u16 *cap;
888 	u16 flags;
889 
890 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
891 	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
892 	if (!save_state || pos <= 0)
893 		return;
894 	cap = (u16 *)&save_state->cap.data[0];
895 
896 	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
897 
898 	if (pcie_cap_has_devctl(dev->pcie_type, flags))
899 		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
900 	if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
901 		pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
902 	if (pcie_cap_has_sltctl(dev->pcie_type, flags))
903 		pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
904 	if (pcie_cap_has_rtctl(dev->pcie_type, flags))
905 		pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
906 	if (pcie_cap_has_devctl2(dev->pcie_type, flags))
907 		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
908 	if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
909 		pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
910 	if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
911 		pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
912 }
913 
914 
915 static int pci_save_pcix_state(struct pci_dev *dev)
916 {
917 	int pos;
918 	struct pci_cap_saved_state *save_state;
919 
920 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
921 	if (pos <= 0)
922 		return 0;
923 
924 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
925 	if (!save_state) {
926 		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
927 		return -ENOMEM;
928 	}
929 
930 	pci_read_config_word(dev, pos + PCI_X_CMD,
931 			     (u16 *)save_state->cap.data);
932 
933 	return 0;
934 }
935 
936 static void pci_restore_pcix_state(struct pci_dev *dev)
937 {
938 	int i = 0, pos;
939 	struct pci_cap_saved_state *save_state;
940 	u16 *cap;
941 
942 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
943 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
944 	if (!save_state || pos <= 0)
945 		return;
946 	cap = (u16 *)&save_state->cap.data[0];
947 
948 	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
949 }
950 
951 
952 /**
953  * pci_save_state - save the PCI configuration space of a device before suspending
954  * @dev: - PCI device that we're dealing with
955  */
956 int
957 pci_save_state(struct pci_dev *dev)
958 {
959 	int i;
960 	/* XXX: 100% dword access ok here? */
961 	for (i = 0; i < 16; i++)
962 		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
963 	dev->state_saved = true;
964 	if ((i = pci_save_pcie_state(dev)) != 0)
965 		return i;
966 	if ((i = pci_save_pcix_state(dev)) != 0)
967 		return i;
968 	return 0;
969 }
970 
971 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
972 				     u32 saved_val, int retry)
973 {
974 	u32 val;
975 
976 	pci_read_config_dword(pdev, offset, &val);
977 	if (val == saved_val)
978 		return;
979 
980 	for (;;) {
981 		dev_dbg(&pdev->dev, "restoring config space at offset "
982 			"%#x (was %#x, writing %#x)\n", offset, val, saved_val);
983 		pci_write_config_dword(pdev, offset, saved_val);
984 		if (retry-- <= 0)
985 			return;
986 
987 		pci_read_config_dword(pdev, offset, &val);
988 		if (val == saved_val)
989 			return;
990 
991 		mdelay(1);
992 	}
993 }
994 
995 static void pci_restore_config_space_range(struct pci_dev *pdev,
996 					   int start, int end, int retry)
997 {
998 	int index;
999 
1000 	for (index = end; index >= start; index--)
1001 		pci_restore_config_dword(pdev, 4 * index,
1002 					 pdev->saved_config_space[index],
1003 					 retry);
1004 }
1005 
1006 static void pci_restore_config_space(struct pci_dev *pdev)
1007 {
1008 	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1009 		pci_restore_config_space_range(pdev, 10, 15, 0);
1010 		/* Restore BARs before the command register. */
1011 		pci_restore_config_space_range(pdev, 4, 9, 10);
1012 		pci_restore_config_space_range(pdev, 0, 3, 0);
1013 	} else {
1014 		pci_restore_config_space_range(pdev, 0, 15, 0);
1015 	}
1016 }
1017 
1018 /**
1019  * pci_restore_state - Restore the saved state of a PCI device
1020  * @dev: - PCI device that we're dealing with
1021  */
1022 void pci_restore_state(struct pci_dev *dev)
1023 {
1024 	if (!dev->state_saved)
1025 		return;
1026 
1027 	/* PCI Express register must be restored first */
1028 	pci_restore_pcie_state(dev);
1029 	pci_restore_ats_state(dev);
1030 
1031 	pci_restore_config_space(dev);
1032 
1033 	pci_restore_pcix_state(dev);
1034 	pci_restore_msi_state(dev);
1035 	pci_restore_iov_state(dev);
1036 
1037 	dev->state_saved = false;
1038 }
1039 
1040 struct pci_saved_state {
1041 	u32 config_space[16];
1042 	struct pci_cap_saved_data cap[0];
1043 };
1044 
1045 /**
1046  * pci_store_saved_state - Allocate and return an opaque struct containing
1047  *			   the device saved state.
1048  * @dev: PCI device that we're dealing with
1049  *
1050  * Rerturn NULL if no state or error.
1051  */
1052 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1053 {
1054 	struct pci_saved_state *state;
1055 	struct pci_cap_saved_state *tmp;
1056 	struct pci_cap_saved_data *cap;
1057 	struct hlist_node *pos;
1058 	size_t size;
1059 
1060 	if (!dev->state_saved)
1061 		return NULL;
1062 
1063 	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1064 
1065 	hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1066 		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1067 
1068 	state = kzalloc(size, GFP_KERNEL);
1069 	if (!state)
1070 		return NULL;
1071 
1072 	memcpy(state->config_space, dev->saved_config_space,
1073 	       sizeof(state->config_space));
1074 
1075 	cap = state->cap;
1076 	hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1077 		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1078 		memcpy(cap, &tmp->cap, len);
1079 		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1080 	}
1081 	/* Empty cap_save terminates list */
1082 
1083 	return state;
1084 }
1085 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1086 
1087 /**
1088  * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1089  * @dev: PCI device that we're dealing with
1090  * @state: Saved state returned from pci_store_saved_state()
1091  */
1092 int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1093 {
1094 	struct pci_cap_saved_data *cap;
1095 
1096 	dev->state_saved = false;
1097 
1098 	if (!state)
1099 		return 0;
1100 
1101 	memcpy(dev->saved_config_space, state->config_space,
1102 	       sizeof(state->config_space));
1103 
1104 	cap = state->cap;
1105 	while (cap->size) {
1106 		struct pci_cap_saved_state *tmp;
1107 
1108 		tmp = pci_find_saved_cap(dev, cap->cap_nr);
1109 		if (!tmp || tmp->cap.size != cap->size)
1110 			return -EINVAL;
1111 
1112 		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1113 		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1114 		       sizeof(struct pci_cap_saved_data) + cap->size);
1115 	}
1116 
1117 	dev->state_saved = true;
1118 	return 0;
1119 }
1120 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1121 
1122 /**
1123  * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1124  *				   and free the memory allocated for it.
1125  * @dev: PCI device that we're dealing with
1126  * @state: Pointer to saved state returned from pci_store_saved_state()
1127  */
1128 int pci_load_and_free_saved_state(struct pci_dev *dev,
1129 				  struct pci_saved_state **state)
1130 {
1131 	int ret = pci_load_saved_state(dev, *state);
1132 	kfree(*state);
1133 	*state = NULL;
1134 	return ret;
1135 }
1136 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1137 
1138 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1139 {
1140 	int err;
1141 
1142 	err = pci_set_power_state(dev, PCI_D0);
1143 	if (err < 0 && err != -EIO)
1144 		return err;
1145 	err = pcibios_enable_device(dev, bars);
1146 	if (err < 0)
1147 		return err;
1148 	pci_fixup_device(pci_fixup_enable, dev);
1149 
1150 	return 0;
1151 }
1152 
1153 /**
1154  * pci_reenable_device - Resume abandoned device
1155  * @dev: PCI device to be resumed
1156  *
1157  *  Note this function is a backend of pci_default_resume and is not supposed
1158  *  to be called by normal code, write proper resume handler and use it instead.
1159  */
1160 int pci_reenable_device(struct pci_dev *dev)
1161 {
1162 	if (pci_is_enabled(dev))
1163 		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1164 	return 0;
1165 }
1166 
1167 static int __pci_enable_device_flags(struct pci_dev *dev,
1168 				     resource_size_t flags)
1169 {
1170 	int err;
1171 	int i, bars = 0;
1172 
1173 	/*
1174 	 * Power state could be unknown at this point, either due to a fresh
1175 	 * boot or a device removal call.  So get the current power state
1176 	 * so that things like MSI message writing will behave as expected
1177 	 * (e.g. if the device really is in D0 at enable time).
1178 	 */
1179 	if (dev->pm_cap) {
1180 		u16 pmcsr;
1181 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1182 		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1183 	}
1184 
1185 	if (atomic_add_return(1, &dev->enable_cnt) > 1)
1186 		return 0;		/* already enabled */
1187 
1188 	/* only skip sriov related */
1189 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1190 		if (dev->resource[i].flags & flags)
1191 			bars |= (1 << i);
1192 	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1193 		if (dev->resource[i].flags & flags)
1194 			bars |= (1 << i);
1195 
1196 	err = do_pci_enable_device(dev, bars);
1197 	if (err < 0)
1198 		atomic_dec(&dev->enable_cnt);
1199 	return err;
1200 }
1201 
1202 /**
1203  * pci_enable_device_io - Initialize a device for use with IO space
1204  * @dev: PCI device to be initialized
1205  *
1206  *  Initialize device before it's used by a driver. Ask low-level code
1207  *  to enable I/O resources. Wake up the device if it was suspended.
1208  *  Beware, this function can fail.
1209  */
1210 int pci_enable_device_io(struct pci_dev *dev)
1211 {
1212 	return __pci_enable_device_flags(dev, IORESOURCE_IO);
1213 }
1214 
1215 /**
1216  * pci_enable_device_mem - Initialize a device for use with Memory space
1217  * @dev: PCI device to be initialized
1218  *
1219  *  Initialize device before it's used by a driver. Ask low-level code
1220  *  to enable Memory resources. Wake up the device if it was suspended.
1221  *  Beware, this function can fail.
1222  */
1223 int pci_enable_device_mem(struct pci_dev *dev)
1224 {
1225 	return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1226 }
1227 
1228 /**
1229  * pci_enable_device - Initialize device before it's used by a driver.
1230  * @dev: PCI device to be initialized
1231  *
1232  *  Initialize device before it's used by a driver. Ask low-level code
1233  *  to enable I/O and memory. Wake up the device if it was suspended.
1234  *  Beware, this function can fail.
1235  *
1236  *  Note we don't actually enable the device many times if we call
1237  *  this function repeatedly (we just increment the count).
1238  */
1239 int pci_enable_device(struct pci_dev *dev)
1240 {
1241 	return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1242 }
1243 
1244 /*
1245  * Managed PCI resources.  This manages device on/off, intx/msi/msix
1246  * on/off and BAR regions.  pci_dev itself records msi/msix status, so
1247  * there's no need to track it separately.  pci_devres is initialized
1248  * when a device is enabled using managed PCI device enable interface.
1249  */
1250 struct pci_devres {
1251 	unsigned int enabled:1;
1252 	unsigned int pinned:1;
1253 	unsigned int orig_intx:1;
1254 	unsigned int restore_intx:1;
1255 	u32 region_mask;
1256 };
1257 
1258 static void pcim_release(struct device *gendev, void *res)
1259 {
1260 	struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1261 	struct pci_devres *this = res;
1262 	int i;
1263 
1264 	if (dev->msi_enabled)
1265 		pci_disable_msi(dev);
1266 	if (dev->msix_enabled)
1267 		pci_disable_msix(dev);
1268 
1269 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1270 		if (this->region_mask & (1 << i))
1271 			pci_release_region(dev, i);
1272 
1273 	if (this->restore_intx)
1274 		pci_intx(dev, this->orig_intx);
1275 
1276 	if (this->enabled && !this->pinned)
1277 		pci_disable_device(dev);
1278 }
1279 
1280 static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1281 {
1282 	struct pci_devres *dr, *new_dr;
1283 
1284 	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1285 	if (dr)
1286 		return dr;
1287 
1288 	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1289 	if (!new_dr)
1290 		return NULL;
1291 	return devres_get(&pdev->dev, new_dr, NULL, NULL);
1292 }
1293 
1294 static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1295 {
1296 	if (pci_is_managed(pdev))
1297 		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1298 	return NULL;
1299 }
1300 
1301 /**
1302  * pcim_enable_device - Managed pci_enable_device()
1303  * @pdev: PCI device to be initialized
1304  *
1305  * Managed pci_enable_device().
1306  */
1307 int pcim_enable_device(struct pci_dev *pdev)
1308 {
1309 	struct pci_devres *dr;
1310 	int rc;
1311 
1312 	dr = get_pci_dr(pdev);
1313 	if (unlikely(!dr))
1314 		return -ENOMEM;
1315 	if (dr->enabled)
1316 		return 0;
1317 
1318 	rc = pci_enable_device(pdev);
1319 	if (!rc) {
1320 		pdev->is_managed = 1;
1321 		dr->enabled = 1;
1322 	}
1323 	return rc;
1324 }
1325 
1326 /**
1327  * pcim_pin_device - Pin managed PCI device
1328  * @pdev: PCI device to pin
1329  *
1330  * Pin managed PCI device @pdev.  Pinned device won't be disabled on
1331  * driver detach.  @pdev must have been enabled with
1332  * pcim_enable_device().
1333  */
1334 void pcim_pin_device(struct pci_dev *pdev)
1335 {
1336 	struct pci_devres *dr;
1337 
1338 	dr = find_pci_dr(pdev);
1339 	WARN_ON(!dr || !dr->enabled);
1340 	if (dr)
1341 		dr->pinned = 1;
1342 }
1343 
1344 /**
1345  * pcibios_disable_device - disable arch specific PCI resources for device dev
1346  * @dev: the PCI device to disable
1347  *
1348  * Disables architecture specific PCI resources for the device. This
1349  * is the default implementation. Architecture implementations can
1350  * override this.
1351  */
1352 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
1353 
1354 static void do_pci_disable_device(struct pci_dev *dev)
1355 {
1356 	u16 pci_command;
1357 
1358 	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1359 	if (pci_command & PCI_COMMAND_MASTER) {
1360 		pci_command &= ~PCI_COMMAND_MASTER;
1361 		pci_write_config_word(dev, PCI_COMMAND, pci_command);
1362 	}
1363 
1364 	pcibios_disable_device(dev);
1365 }
1366 
1367 /**
1368  * pci_disable_enabled_device - Disable device without updating enable_cnt
1369  * @dev: PCI device to disable
1370  *
1371  * NOTE: This function is a backend of PCI power management routines and is
1372  * not supposed to be called drivers.
1373  */
1374 void pci_disable_enabled_device(struct pci_dev *dev)
1375 {
1376 	if (pci_is_enabled(dev))
1377 		do_pci_disable_device(dev);
1378 }
1379 
1380 /**
1381  * pci_disable_device - Disable PCI device after use
1382  * @dev: PCI device to be disabled
1383  *
1384  * Signal to the system that the PCI device is not in use by the system
1385  * anymore.  This only involves disabling PCI bus-mastering, if active.
1386  *
1387  * Note we don't actually disable the device until all callers of
1388  * pci_enable_device() have called pci_disable_device().
1389  */
1390 void
1391 pci_disable_device(struct pci_dev *dev)
1392 {
1393 	struct pci_devres *dr;
1394 
1395 	dr = find_pci_dr(dev);
1396 	if (dr)
1397 		dr->enabled = 0;
1398 
1399 	if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1400 		return;
1401 
1402 	do_pci_disable_device(dev);
1403 
1404 	dev->is_busmaster = 0;
1405 }
1406 
1407 /**
1408  * pcibios_set_pcie_reset_state - set reset state for device dev
1409  * @dev: the PCIe device reset
1410  * @state: Reset state to enter into
1411  *
1412  *
1413  * Sets the PCIe reset state for the device. This is the default
1414  * implementation. Architecture implementations can override this.
1415  */
1416 int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1417 							enum pcie_reset_state state)
1418 {
1419 	return -EINVAL;
1420 }
1421 
1422 /**
1423  * pci_set_pcie_reset_state - set reset state for device dev
1424  * @dev: the PCIe device reset
1425  * @state: Reset state to enter into
1426  *
1427  *
1428  * Sets the PCI reset state for the device.
1429  */
1430 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1431 {
1432 	return pcibios_set_pcie_reset_state(dev, state);
1433 }
1434 
1435 /**
1436  * pci_check_pme_status - Check if given device has generated PME.
1437  * @dev: Device to check.
1438  *
1439  * Check the PME status of the device and if set, clear it and clear PME enable
1440  * (if set).  Return 'true' if PME status and PME enable were both set or
1441  * 'false' otherwise.
1442  */
1443 bool pci_check_pme_status(struct pci_dev *dev)
1444 {
1445 	int pmcsr_pos;
1446 	u16 pmcsr;
1447 	bool ret = false;
1448 
1449 	if (!dev->pm_cap)
1450 		return false;
1451 
1452 	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1453 	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1454 	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1455 		return false;
1456 
1457 	/* Clear PME status. */
1458 	pmcsr |= PCI_PM_CTRL_PME_STATUS;
1459 	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1460 		/* Disable PME to avoid interrupt flood. */
1461 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1462 		ret = true;
1463 	}
1464 
1465 	pci_write_config_word(dev, pmcsr_pos, pmcsr);
1466 
1467 	return ret;
1468 }
1469 
1470 /**
1471  * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1472  * @dev: Device to handle.
1473  * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1474  *
1475  * Check if @dev has generated PME and queue a resume request for it in that
1476  * case.
1477  */
1478 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1479 {
1480 	if (pme_poll_reset && dev->pme_poll)
1481 		dev->pme_poll = false;
1482 
1483 	if (pci_check_pme_status(dev)) {
1484 		pci_wakeup_event(dev);
1485 		pm_request_resume(&dev->dev);
1486 	}
1487 	return 0;
1488 }
1489 
1490 /**
1491  * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1492  * @bus: Top bus of the subtree to walk.
1493  */
1494 void pci_pme_wakeup_bus(struct pci_bus *bus)
1495 {
1496 	if (bus)
1497 		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1498 }
1499 
1500 /**
1501  * pci_pme_capable - check the capability of PCI device to generate PME#
1502  * @dev: PCI device to handle.
1503  * @state: PCI state from which device will issue PME#.
1504  */
1505 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1506 {
1507 	if (!dev->pm_cap)
1508 		return false;
1509 
1510 	return !!(dev->pme_support & (1 << state));
1511 }
1512 
1513 static void pci_pme_list_scan(struct work_struct *work)
1514 {
1515 	struct pci_pme_device *pme_dev, *n;
1516 
1517 	mutex_lock(&pci_pme_list_mutex);
1518 	if (!list_empty(&pci_pme_list)) {
1519 		list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1520 			if (pme_dev->dev->pme_poll) {
1521 				pci_pme_wakeup(pme_dev->dev, NULL);
1522 			} else {
1523 				list_del(&pme_dev->list);
1524 				kfree(pme_dev);
1525 			}
1526 		}
1527 		if (!list_empty(&pci_pme_list))
1528 			schedule_delayed_work(&pci_pme_work,
1529 					      msecs_to_jiffies(PME_TIMEOUT));
1530 	}
1531 	mutex_unlock(&pci_pme_list_mutex);
1532 }
1533 
1534 /**
1535  * pci_pme_active - enable or disable PCI device's PME# function
1536  * @dev: PCI device to handle.
1537  * @enable: 'true' to enable PME# generation; 'false' to disable it.
1538  *
1539  * The caller must verify that the device is capable of generating PME# before
1540  * calling this function with @enable equal to 'true'.
1541  */
1542 void pci_pme_active(struct pci_dev *dev, bool enable)
1543 {
1544 	u16 pmcsr;
1545 
1546 	if (!dev->pm_cap)
1547 		return;
1548 
1549 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1550 	/* Clear PME_Status by writing 1 to it and enable PME# */
1551 	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1552 	if (!enable)
1553 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1554 
1555 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1556 
1557 	/* PCI (as opposed to PCIe) PME requires that the device have
1558 	   its PME# line hooked up correctly. Not all hardware vendors
1559 	   do this, so the PME never gets delivered and the device
1560 	   remains asleep. The easiest way around this is to
1561 	   periodically walk the list of suspended devices and check
1562 	   whether any have their PME flag set. The assumption is that
1563 	   we'll wake up often enough anyway that this won't be a huge
1564 	   hit, and the power savings from the devices will still be a
1565 	   win. */
1566 
1567 	if (dev->pme_poll) {
1568 		struct pci_pme_device *pme_dev;
1569 		if (enable) {
1570 			pme_dev = kmalloc(sizeof(struct pci_pme_device),
1571 					  GFP_KERNEL);
1572 			if (!pme_dev)
1573 				goto out;
1574 			pme_dev->dev = dev;
1575 			mutex_lock(&pci_pme_list_mutex);
1576 			list_add(&pme_dev->list, &pci_pme_list);
1577 			if (list_is_singular(&pci_pme_list))
1578 				schedule_delayed_work(&pci_pme_work,
1579 						      msecs_to_jiffies(PME_TIMEOUT));
1580 			mutex_unlock(&pci_pme_list_mutex);
1581 		} else {
1582 			mutex_lock(&pci_pme_list_mutex);
1583 			list_for_each_entry(pme_dev, &pci_pme_list, list) {
1584 				if (pme_dev->dev == dev) {
1585 					list_del(&pme_dev->list);
1586 					kfree(pme_dev);
1587 					break;
1588 				}
1589 			}
1590 			mutex_unlock(&pci_pme_list_mutex);
1591 		}
1592 	}
1593 
1594 out:
1595 	dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
1596 }
1597 
1598 /**
1599  * __pci_enable_wake - enable PCI device as wakeup event source
1600  * @dev: PCI device affected
1601  * @state: PCI state from which device will issue wakeup events
1602  * @runtime: True if the events are to be generated at run time
1603  * @enable: True to enable event generation; false to disable
1604  *
1605  * This enables the device as a wakeup event source, or disables it.
1606  * When such events involves platform-specific hooks, those hooks are
1607  * called automatically by this routine.
1608  *
1609  * Devices with legacy power management (no standard PCI PM capabilities)
1610  * always require such platform hooks.
1611  *
1612  * RETURN VALUE:
1613  * 0 is returned on success
1614  * -EINVAL is returned if device is not supposed to wake up the system
1615  * Error code depending on the platform is returned if both the platform and
1616  * the native mechanism fail to enable the generation of wake-up events
1617  */
1618 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1619 		      bool runtime, bool enable)
1620 {
1621 	int ret = 0;
1622 
1623 	if (enable && !runtime && !device_may_wakeup(&dev->dev))
1624 		return -EINVAL;
1625 
1626 	/* Don't do the same thing twice in a row for one device. */
1627 	if (!!enable == !!dev->wakeup_prepared)
1628 		return 0;
1629 
1630 	/*
1631 	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1632 	 * Anderson we should be doing PME# wake enable followed by ACPI wake
1633 	 * enable.  To disable wake-up we call the platform first, for symmetry.
1634 	 */
1635 
1636 	if (enable) {
1637 		int error;
1638 
1639 		if (pci_pme_capable(dev, state))
1640 			pci_pme_active(dev, true);
1641 		else
1642 			ret = 1;
1643 		error = runtime ? platform_pci_run_wake(dev, true) :
1644 					platform_pci_sleep_wake(dev, true);
1645 		if (ret)
1646 			ret = error;
1647 		if (!ret)
1648 			dev->wakeup_prepared = true;
1649 	} else {
1650 		if (runtime)
1651 			platform_pci_run_wake(dev, false);
1652 		else
1653 			platform_pci_sleep_wake(dev, false);
1654 		pci_pme_active(dev, false);
1655 		dev->wakeup_prepared = false;
1656 	}
1657 
1658 	return ret;
1659 }
1660 EXPORT_SYMBOL(__pci_enable_wake);
1661 
1662 /**
1663  * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1664  * @dev: PCI device to prepare
1665  * @enable: True to enable wake-up event generation; false to disable
1666  *
1667  * Many drivers want the device to wake up the system from D3_hot or D3_cold
1668  * and this function allows them to set that up cleanly - pci_enable_wake()
1669  * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1670  * ordering constraints.
1671  *
1672  * This function only returns error code if the device is not capable of
1673  * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1674  * enable wake-up power for it.
1675  */
1676 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1677 {
1678 	return pci_pme_capable(dev, PCI_D3cold) ?
1679 			pci_enable_wake(dev, PCI_D3cold, enable) :
1680 			pci_enable_wake(dev, PCI_D3hot, enable);
1681 }
1682 
1683 /**
1684  * pci_target_state - find an appropriate low power state for a given PCI dev
1685  * @dev: PCI device
1686  *
1687  * Use underlying platform code to find a supported low power state for @dev.
1688  * If the platform can't manage @dev, return the deepest state from which it
1689  * can generate wake events, based on any available PME info.
1690  */
1691 pci_power_t pci_target_state(struct pci_dev *dev)
1692 {
1693 	pci_power_t target_state = PCI_D3hot;
1694 
1695 	if (platform_pci_power_manageable(dev)) {
1696 		/*
1697 		 * Call the platform to choose the target state of the device
1698 		 * and enable wake-up from this state if supported.
1699 		 */
1700 		pci_power_t state = platform_pci_choose_state(dev);
1701 
1702 		switch (state) {
1703 		case PCI_POWER_ERROR:
1704 		case PCI_UNKNOWN:
1705 			break;
1706 		case PCI_D1:
1707 		case PCI_D2:
1708 			if (pci_no_d1d2(dev))
1709 				break;
1710 		default:
1711 			target_state = state;
1712 		}
1713 	} else if (!dev->pm_cap) {
1714 		target_state = PCI_D0;
1715 	} else if (device_may_wakeup(&dev->dev)) {
1716 		/*
1717 		 * Find the deepest state from which the device can generate
1718 		 * wake-up events, make it the target state and enable device
1719 		 * to generate PME#.
1720 		 */
1721 		if (dev->pme_support) {
1722 			while (target_state
1723 			      && !(dev->pme_support & (1 << target_state)))
1724 				target_state--;
1725 		}
1726 	}
1727 
1728 	return target_state;
1729 }
1730 
1731 /**
1732  * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1733  * @dev: Device to handle.
1734  *
1735  * Choose the power state appropriate for the device depending on whether
1736  * it can wake up the system and/or is power manageable by the platform
1737  * (PCI_D3hot is the default) and put the device into that state.
1738  */
1739 int pci_prepare_to_sleep(struct pci_dev *dev)
1740 {
1741 	pci_power_t target_state = pci_target_state(dev);
1742 	int error;
1743 
1744 	if (target_state == PCI_POWER_ERROR)
1745 		return -EIO;
1746 
1747 	/* Some devices mustn't be in D3 during system sleep */
1748 	if (target_state == PCI_D3hot &&
1749 			(dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP))
1750 		return 0;
1751 
1752 	pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1753 
1754 	error = pci_set_power_state(dev, target_state);
1755 
1756 	if (error)
1757 		pci_enable_wake(dev, target_state, false);
1758 
1759 	return error;
1760 }
1761 
1762 /**
1763  * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1764  * @dev: Device to handle.
1765  *
1766  * Disable device's system wake-up capability and put it into D0.
1767  */
1768 int pci_back_from_sleep(struct pci_dev *dev)
1769 {
1770 	pci_enable_wake(dev, PCI_D0, false);
1771 	return pci_set_power_state(dev, PCI_D0);
1772 }
1773 
1774 /**
1775  * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1776  * @dev: PCI device being suspended.
1777  *
1778  * Prepare @dev to generate wake-up events at run time and put it into a low
1779  * power state.
1780  */
1781 int pci_finish_runtime_suspend(struct pci_dev *dev)
1782 {
1783 	pci_power_t target_state = pci_target_state(dev);
1784 	int error;
1785 
1786 	if (target_state == PCI_POWER_ERROR)
1787 		return -EIO;
1788 
1789 	__pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1790 
1791 	error = pci_set_power_state(dev, target_state);
1792 
1793 	if (error)
1794 		__pci_enable_wake(dev, target_state, true, false);
1795 
1796 	return error;
1797 }
1798 
1799 /**
1800  * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1801  * @dev: Device to check.
1802  *
1803  * Return true if the device itself is cabable of generating wake-up events
1804  * (through the platform or using the native PCIe PME) or if the device supports
1805  * PME and one of its upstream bridges can generate wake-up events.
1806  */
1807 bool pci_dev_run_wake(struct pci_dev *dev)
1808 {
1809 	struct pci_bus *bus = dev->bus;
1810 
1811 	if (device_run_wake(&dev->dev))
1812 		return true;
1813 
1814 	if (!dev->pme_support)
1815 		return false;
1816 
1817 	while (bus->parent) {
1818 		struct pci_dev *bridge = bus->self;
1819 
1820 		if (device_run_wake(&bridge->dev))
1821 			return true;
1822 
1823 		bus = bus->parent;
1824 	}
1825 
1826 	/* We have reached the root bus. */
1827 	if (bus->bridge)
1828 		return device_run_wake(bus->bridge);
1829 
1830 	return false;
1831 }
1832 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1833 
1834 /**
1835  * pci_pm_init - Initialize PM functions of given PCI device
1836  * @dev: PCI device to handle.
1837  */
1838 void pci_pm_init(struct pci_dev *dev)
1839 {
1840 	int pm;
1841 	u16 pmc;
1842 
1843 	pm_runtime_forbid(&dev->dev);
1844 	device_enable_async_suspend(&dev->dev);
1845 	dev->wakeup_prepared = false;
1846 
1847 	dev->pm_cap = 0;
1848 
1849 	/* find PCI PM capability in list */
1850 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1851 	if (!pm)
1852 		return;
1853 	/* Check device's ability to generate PME# */
1854 	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1855 
1856 	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1857 		dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1858 			pmc & PCI_PM_CAP_VER_MASK);
1859 		return;
1860 	}
1861 
1862 	dev->pm_cap = pm;
1863 	dev->d3_delay = PCI_PM_D3_WAIT;
1864 
1865 	dev->d1_support = false;
1866 	dev->d2_support = false;
1867 	if (!pci_no_d1d2(dev)) {
1868 		if (pmc & PCI_PM_CAP_D1)
1869 			dev->d1_support = true;
1870 		if (pmc & PCI_PM_CAP_D2)
1871 			dev->d2_support = true;
1872 
1873 		if (dev->d1_support || dev->d2_support)
1874 			dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1875 				   dev->d1_support ? " D1" : "",
1876 				   dev->d2_support ? " D2" : "");
1877 	}
1878 
1879 	pmc &= PCI_PM_CAP_PME_MASK;
1880 	if (pmc) {
1881 		dev_printk(KERN_DEBUG, &dev->dev,
1882 			 "PME# supported from%s%s%s%s%s\n",
1883 			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1884 			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1885 			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1886 			 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1887 			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1888 		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1889 		dev->pme_poll = true;
1890 		/*
1891 		 * Make device's PM flags reflect the wake-up capability, but
1892 		 * let the user space enable it to wake up the system as needed.
1893 		 */
1894 		device_set_wakeup_capable(&dev->dev, true);
1895 		/* Disable the PME# generation functionality */
1896 		pci_pme_active(dev, false);
1897 	} else {
1898 		dev->pme_support = 0;
1899 	}
1900 }
1901 
1902 /**
1903  * platform_pci_wakeup_init - init platform wakeup if present
1904  * @dev: PCI device
1905  *
1906  * Some devices don't have PCI PM caps but can still generate wakeup
1907  * events through platform methods (like ACPI events).  If @dev supports
1908  * platform wakeup events, set the device flag to indicate as much.  This
1909  * may be redundant if the device also supports PCI PM caps, but double
1910  * initialization should be safe in that case.
1911  */
1912 void platform_pci_wakeup_init(struct pci_dev *dev)
1913 {
1914 	if (!platform_pci_can_wakeup(dev))
1915 		return;
1916 
1917 	device_set_wakeup_capable(&dev->dev, true);
1918 	platform_pci_sleep_wake(dev, false);
1919 }
1920 
1921 static void pci_add_saved_cap(struct pci_dev *pci_dev,
1922 	struct pci_cap_saved_state *new_cap)
1923 {
1924 	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
1925 }
1926 
1927 /**
1928  * pci_add_save_buffer - allocate buffer for saving given capability registers
1929  * @dev: the PCI device
1930  * @cap: the capability to allocate the buffer for
1931  * @size: requested size of the buffer
1932  */
1933 static int pci_add_cap_save_buffer(
1934 	struct pci_dev *dev, char cap, unsigned int size)
1935 {
1936 	int pos;
1937 	struct pci_cap_saved_state *save_state;
1938 
1939 	pos = pci_find_capability(dev, cap);
1940 	if (pos <= 0)
1941 		return 0;
1942 
1943 	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1944 	if (!save_state)
1945 		return -ENOMEM;
1946 
1947 	save_state->cap.cap_nr = cap;
1948 	save_state->cap.size = size;
1949 	pci_add_saved_cap(dev, save_state);
1950 
1951 	return 0;
1952 }
1953 
1954 /**
1955  * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1956  * @dev: the PCI device
1957  */
1958 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1959 {
1960 	int error;
1961 
1962 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1963 					PCI_EXP_SAVE_REGS * sizeof(u16));
1964 	if (error)
1965 		dev_err(&dev->dev,
1966 			"unable to preallocate PCI Express save buffer\n");
1967 
1968 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1969 	if (error)
1970 		dev_err(&dev->dev,
1971 			"unable to preallocate PCI-X save buffer\n");
1972 }
1973 
1974 void pci_free_cap_save_buffers(struct pci_dev *dev)
1975 {
1976 	struct pci_cap_saved_state *tmp;
1977 	struct hlist_node *pos, *n;
1978 
1979 	hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
1980 		kfree(tmp);
1981 }
1982 
1983 /**
1984  * pci_enable_ari - enable ARI forwarding if hardware support it
1985  * @dev: the PCI device
1986  */
1987 void pci_enable_ari(struct pci_dev *dev)
1988 {
1989 	int pos;
1990 	u32 cap;
1991 	u16 flags, ctrl;
1992 	struct pci_dev *bridge;
1993 
1994 	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
1995 		return;
1996 
1997 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1998 	if (!pos)
1999 		return;
2000 
2001 	bridge = dev->bus->self;
2002 	if (!bridge || !pci_is_pcie(bridge))
2003 		return;
2004 
2005 	pos = pci_pcie_cap(bridge);
2006 	if (!pos)
2007 		return;
2008 
2009 	/* ARI is a PCIe v2 feature */
2010 	pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
2011 	if ((flags & PCI_EXP_FLAGS_VERS) < 2)
2012 		return;
2013 
2014 	pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
2015 	if (!(cap & PCI_EXP_DEVCAP2_ARI))
2016 		return;
2017 
2018 	pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
2019 	ctrl |= PCI_EXP_DEVCTL2_ARI;
2020 	pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
2021 
2022 	bridge->ari_enabled = 1;
2023 }
2024 
2025 /**
2026  * pci_enable_ido - enable ID-based ordering on a device
2027  * @dev: the PCI device
2028  * @type: which types of IDO to enable
2029  *
2030  * Enable ID-based ordering on @dev.  @type can contain the bits
2031  * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2032  * which types of transactions are allowed to be re-ordered.
2033  */
2034 void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2035 {
2036 	int pos;
2037 	u16 ctrl;
2038 
2039 	pos = pci_pcie_cap(dev);
2040 	if (!pos)
2041 		return;
2042 
2043 	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2044 	if (type & PCI_EXP_IDO_REQUEST)
2045 		ctrl |= PCI_EXP_IDO_REQ_EN;
2046 	if (type & PCI_EXP_IDO_COMPLETION)
2047 		ctrl |= PCI_EXP_IDO_CMP_EN;
2048 	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2049 }
2050 EXPORT_SYMBOL(pci_enable_ido);
2051 
2052 /**
2053  * pci_disable_ido - disable ID-based ordering on a device
2054  * @dev: the PCI device
2055  * @type: which types of IDO to disable
2056  */
2057 void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2058 {
2059 	int pos;
2060 	u16 ctrl;
2061 
2062 	if (!pci_is_pcie(dev))
2063 		return;
2064 
2065 	pos = pci_pcie_cap(dev);
2066 	if (!pos)
2067 		return;
2068 
2069 	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2070 	if (type & PCI_EXP_IDO_REQUEST)
2071 		ctrl &= ~PCI_EXP_IDO_REQ_EN;
2072 	if (type & PCI_EXP_IDO_COMPLETION)
2073 		ctrl &= ~PCI_EXP_IDO_CMP_EN;
2074 	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2075 }
2076 EXPORT_SYMBOL(pci_disable_ido);
2077 
2078 /**
2079  * pci_enable_obff - enable optimized buffer flush/fill
2080  * @dev: PCI device
2081  * @type: type of signaling to use
2082  *
2083  * Try to enable @type OBFF signaling on @dev.  It will try using WAKE#
2084  * signaling if possible, falling back to message signaling only if
2085  * WAKE# isn't supported.  @type should indicate whether the PCIe link
2086  * be brought out of L0s or L1 to send the message.  It should be either
2087  * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2088  *
2089  * If your device can benefit from receiving all messages, even at the
2090  * power cost of bringing the link back up from a low power state, use
2091  * %PCI_EXP_OBFF_SIGNAL_ALWAYS.  Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2092  * preferred type).
2093  *
2094  * RETURNS:
2095  * Zero on success, appropriate error number on failure.
2096  */
2097 int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2098 {
2099 	int pos;
2100 	u32 cap;
2101 	u16 ctrl;
2102 	int ret;
2103 
2104 	if (!pci_is_pcie(dev))
2105 		return -ENOTSUPP;
2106 
2107 	pos = pci_pcie_cap(dev);
2108 	if (!pos)
2109 		return -ENOTSUPP;
2110 
2111 	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2112 	if (!(cap & PCI_EXP_OBFF_MASK))
2113 		return -ENOTSUPP; /* no OBFF support at all */
2114 
2115 	/* Make sure the topology supports OBFF as well */
2116 	if (dev->bus) {
2117 		ret = pci_enable_obff(dev->bus->self, type);
2118 		if (ret)
2119 			return ret;
2120 	}
2121 
2122 	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2123 	if (cap & PCI_EXP_OBFF_WAKE)
2124 		ctrl |= PCI_EXP_OBFF_WAKE_EN;
2125 	else {
2126 		switch (type) {
2127 		case PCI_EXP_OBFF_SIGNAL_L0:
2128 			if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2129 				ctrl |= PCI_EXP_OBFF_MSGA_EN;
2130 			break;
2131 		case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2132 			ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2133 			ctrl |= PCI_EXP_OBFF_MSGB_EN;
2134 			break;
2135 		default:
2136 			WARN(1, "bad OBFF signal type\n");
2137 			return -ENOTSUPP;
2138 		}
2139 	}
2140 	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2141 
2142 	return 0;
2143 }
2144 EXPORT_SYMBOL(pci_enable_obff);
2145 
2146 /**
2147  * pci_disable_obff - disable optimized buffer flush/fill
2148  * @dev: PCI device
2149  *
2150  * Disable OBFF on @dev.
2151  */
2152 void pci_disable_obff(struct pci_dev *dev)
2153 {
2154 	int pos;
2155 	u16 ctrl;
2156 
2157 	if (!pci_is_pcie(dev))
2158 		return;
2159 
2160 	pos = pci_pcie_cap(dev);
2161 	if (!pos)
2162 		return;
2163 
2164 	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2165 	ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2166 	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2167 }
2168 EXPORT_SYMBOL(pci_disable_obff);
2169 
2170 /**
2171  * pci_ltr_supported - check whether a device supports LTR
2172  * @dev: PCI device
2173  *
2174  * RETURNS:
2175  * True if @dev supports latency tolerance reporting, false otherwise.
2176  */
2177 bool pci_ltr_supported(struct pci_dev *dev)
2178 {
2179 	int pos;
2180 	u32 cap;
2181 
2182 	if (!pci_is_pcie(dev))
2183 		return false;
2184 
2185 	pos = pci_pcie_cap(dev);
2186 	if (!pos)
2187 		return false;
2188 
2189 	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2190 
2191 	return cap & PCI_EXP_DEVCAP2_LTR;
2192 }
2193 EXPORT_SYMBOL(pci_ltr_supported);
2194 
2195 /**
2196  * pci_enable_ltr - enable latency tolerance reporting
2197  * @dev: PCI device
2198  *
2199  * Enable LTR on @dev if possible, which means enabling it first on
2200  * upstream ports.
2201  *
2202  * RETURNS:
2203  * Zero on success, errno on failure.
2204  */
2205 int pci_enable_ltr(struct pci_dev *dev)
2206 {
2207 	int pos;
2208 	u16 ctrl;
2209 	int ret;
2210 
2211 	if (!pci_ltr_supported(dev))
2212 		return -ENOTSUPP;
2213 
2214 	pos = pci_pcie_cap(dev);
2215 	if (!pos)
2216 		return -ENOTSUPP;
2217 
2218 	/* Only primary function can enable/disable LTR */
2219 	if (PCI_FUNC(dev->devfn) != 0)
2220 		return -EINVAL;
2221 
2222 	/* Enable upstream ports first */
2223 	if (dev->bus) {
2224 		ret = pci_enable_ltr(dev->bus->self);
2225 		if (ret)
2226 			return ret;
2227 	}
2228 
2229 	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2230 	ctrl |= PCI_EXP_LTR_EN;
2231 	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2232 
2233 	return 0;
2234 }
2235 EXPORT_SYMBOL(pci_enable_ltr);
2236 
2237 /**
2238  * pci_disable_ltr - disable latency tolerance reporting
2239  * @dev: PCI device
2240  */
2241 void pci_disable_ltr(struct pci_dev *dev)
2242 {
2243 	int pos;
2244 	u16 ctrl;
2245 
2246 	if (!pci_ltr_supported(dev))
2247 		return;
2248 
2249 	pos = pci_pcie_cap(dev);
2250 	if (!pos)
2251 		return;
2252 
2253 	/* Only primary function can enable/disable LTR */
2254 	if (PCI_FUNC(dev->devfn) != 0)
2255 		return;
2256 
2257 	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2258 	ctrl &= ~PCI_EXP_LTR_EN;
2259 	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2260 }
2261 EXPORT_SYMBOL(pci_disable_ltr);
2262 
2263 static int __pci_ltr_scale(int *val)
2264 {
2265 	int scale = 0;
2266 
2267 	while (*val > 1023) {
2268 		*val = (*val + 31) / 32;
2269 		scale++;
2270 	}
2271 	return scale;
2272 }
2273 
2274 /**
2275  * pci_set_ltr - set LTR latency values
2276  * @dev: PCI device
2277  * @snoop_lat_ns: snoop latency in nanoseconds
2278  * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2279  *
2280  * Figure out the scale and set the LTR values accordingly.
2281  */
2282 int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2283 {
2284 	int pos, ret, snoop_scale, nosnoop_scale;
2285 	u16 val;
2286 
2287 	if (!pci_ltr_supported(dev))
2288 		return -ENOTSUPP;
2289 
2290 	snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2291 	nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2292 
2293 	if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2294 	    nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2295 		return -EINVAL;
2296 
2297 	if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2298 	    (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2299 		return -EINVAL;
2300 
2301 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2302 	if (!pos)
2303 		return -ENOTSUPP;
2304 
2305 	val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2306 	ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2307 	if (ret != 4)
2308 		return -EIO;
2309 
2310 	val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2311 	ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2312 	if (ret != 4)
2313 		return -EIO;
2314 
2315 	return 0;
2316 }
2317 EXPORT_SYMBOL(pci_set_ltr);
2318 
2319 static int pci_acs_enable;
2320 
2321 /**
2322  * pci_request_acs - ask for ACS to be enabled if supported
2323  */
2324 void pci_request_acs(void)
2325 {
2326 	pci_acs_enable = 1;
2327 }
2328 
2329 /**
2330  * pci_enable_acs - enable ACS if hardware support it
2331  * @dev: the PCI device
2332  */
2333 void pci_enable_acs(struct pci_dev *dev)
2334 {
2335 	int pos;
2336 	u16 cap;
2337 	u16 ctrl;
2338 
2339 	if (!pci_acs_enable)
2340 		return;
2341 
2342 	if (!pci_is_pcie(dev))
2343 		return;
2344 
2345 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2346 	if (!pos)
2347 		return;
2348 
2349 	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2350 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2351 
2352 	/* Source Validation */
2353 	ctrl |= (cap & PCI_ACS_SV);
2354 
2355 	/* P2P Request Redirect */
2356 	ctrl |= (cap & PCI_ACS_RR);
2357 
2358 	/* P2P Completion Redirect */
2359 	ctrl |= (cap & PCI_ACS_CR);
2360 
2361 	/* Upstream Forwarding */
2362 	ctrl |= (cap & PCI_ACS_UF);
2363 
2364 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2365 }
2366 
2367 /**
2368  * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2369  * @dev: the PCI device
2370  * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2371  *
2372  * Perform INTx swizzling for a device behind one level of bridge.  This is
2373  * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2374  * behind bridges on add-in cards.  For devices with ARI enabled, the slot
2375  * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2376  * the PCI Express Base Specification, Revision 2.1)
2377  */
2378 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
2379 {
2380 	int slot;
2381 
2382 	if (pci_ari_enabled(dev->bus))
2383 		slot = 0;
2384 	else
2385 		slot = PCI_SLOT(dev->devfn);
2386 
2387 	return (((pin - 1) + slot) % 4) + 1;
2388 }
2389 
2390 int
2391 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2392 {
2393 	u8 pin;
2394 
2395 	pin = dev->pin;
2396 	if (!pin)
2397 		return -1;
2398 
2399 	while (!pci_is_root_bus(dev->bus)) {
2400 		pin = pci_swizzle_interrupt_pin(dev, pin);
2401 		dev = dev->bus->self;
2402 	}
2403 	*bridge = dev;
2404 	return pin;
2405 }
2406 
2407 /**
2408  * pci_common_swizzle - swizzle INTx all the way to root bridge
2409  * @dev: the PCI device
2410  * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2411  *
2412  * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
2413  * bridges all the way up to a PCI root bus.
2414  */
2415 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2416 {
2417 	u8 pin = *pinp;
2418 
2419 	while (!pci_is_root_bus(dev->bus)) {
2420 		pin = pci_swizzle_interrupt_pin(dev, pin);
2421 		dev = dev->bus->self;
2422 	}
2423 	*pinp = pin;
2424 	return PCI_SLOT(dev->devfn);
2425 }
2426 
2427 /**
2428  *	pci_release_region - Release a PCI bar
2429  *	@pdev: PCI device whose resources were previously reserved by pci_request_region
2430  *	@bar: BAR to release
2431  *
2432  *	Releases the PCI I/O and memory resources previously reserved by a
2433  *	successful call to pci_request_region.  Call this function only
2434  *	after all use of the PCI regions has ceased.
2435  */
2436 void pci_release_region(struct pci_dev *pdev, int bar)
2437 {
2438 	struct pci_devres *dr;
2439 
2440 	if (pci_resource_len(pdev, bar) == 0)
2441 		return;
2442 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2443 		release_region(pci_resource_start(pdev, bar),
2444 				pci_resource_len(pdev, bar));
2445 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2446 		release_mem_region(pci_resource_start(pdev, bar),
2447 				pci_resource_len(pdev, bar));
2448 
2449 	dr = find_pci_dr(pdev);
2450 	if (dr)
2451 		dr->region_mask &= ~(1 << bar);
2452 }
2453 
2454 /**
2455  *	__pci_request_region - Reserved PCI I/O and memory resource
2456  *	@pdev: PCI device whose resources are to be reserved
2457  *	@bar: BAR to be reserved
2458  *	@res_name: Name to be associated with resource.
2459  *	@exclusive: whether the region access is exclusive or not
2460  *
2461  *	Mark the PCI region associated with PCI device @pdev BR @bar as
2462  *	being reserved by owner @res_name.  Do not access any
2463  *	address inside the PCI regions unless this call returns
2464  *	successfully.
2465  *
2466  *	If @exclusive is set, then the region is marked so that userspace
2467  *	is explicitly not allowed to map the resource via /dev/mem or
2468  * 	sysfs MMIO access.
2469  *
2470  *	Returns 0 on success, or %EBUSY on error.  A warning
2471  *	message is also printed on failure.
2472  */
2473 static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2474 									int exclusive)
2475 {
2476 	struct pci_devres *dr;
2477 
2478 	if (pci_resource_len(pdev, bar) == 0)
2479 		return 0;
2480 
2481 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2482 		if (!request_region(pci_resource_start(pdev, bar),
2483 			    pci_resource_len(pdev, bar), res_name))
2484 			goto err_out;
2485 	}
2486 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
2487 		if (!__request_mem_region(pci_resource_start(pdev, bar),
2488 					pci_resource_len(pdev, bar), res_name,
2489 					exclusive))
2490 			goto err_out;
2491 	}
2492 
2493 	dr = find_pci_dr(pdev);
2494 	if (dr)
2495 		dr->region_mask |= 1 << bar;
2496 
2497 	return 0;
2498 
2499 err_out:
2500 	dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2501 		 &pdev->resource[bar]);
2502 	return -EBUSY;
2503 }
2504 
2505 /**
2506  *	pci_request_region - Reserve PCI I/O and memory resource
2507  *	@pdev: PCI device whose resources are to be reserved
2508  *	@bar: BAR to be reserved
2509  *	@res_name: Name to be associated with resource
2510  *
2511  *	Mark the PCI region associated with PCI device @pdev BAR @bar as
2512  *	being reserved by owner @res_name.  Do not access any
2513  *	address inside the PCI regions unless this call returns
2514  *	successfully.
2515  *
2516  *	Returns 0 on success, or %EBUSY on error.  A warning
2517  *	message is also printed on failure.
2518  */
2519 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2520 {
2521 	return __pci_request_region(pdev, bar, res_name, 0);
2522 }
2523 
2524 /**
2525  *	pci_request_region_exclusive - Reserved PCI I/O and memory resource
2526  *	@pdev: PCI device whose resources are to be reserved
2527  *	@bar: BAR to be reserved
2528  *	@res_name: Name to be associated with resource.
2529  *
2530  *	Mark the PCI region associated with PCI device @pdev BR @bar as
2531  *	being reserved by owner @res_name.  Do not access any
2532  *	address inside the PCI regions unless this call returns
2533  *	successfully.
2534  *
2535  *	Returns 0 on success, or %EBUSY on error.  A warning
2536  *	message is also printed on failure.
2537  *
2538  *	The key difference that _exclusive makes it that userspace is
2539  *	explicitly not allowed to map the resource via /dev/mem or
2540  * 	sysfs.
2541  */
2542 int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2543 {
2544 	return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2545 }
2546 /**
2547  * pci_release_selected_regions - Release selected PCI I/O and memory resources
2548  * @pdev: PCI device whose resources were previously reserved
2549  * @bars: Bitmask of BARs to be released
2550  *
2551  * Release selected PCI I/O and memory resources previously reserved.
2552  * Call this function only after all use of the PCI regions has ceased.
2553  */
2554 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2555 {
2556 	int i;
2557 
2558 	for (i = 0; i < 6; i++)
2559 		if (bars & (1 << i))
2560 			pci_release_region(pdev, i);
2561 }
2562 
2563 int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2564 				 const char *res_name, int excl)
2565 {
2566 	int i;
2567 
2568 	for (i = 0; i < 6; i++)
2569 		if (bars & (1 << i))
2570 			if (__pci_request_region(pdev, i, res_name, excl))
2571 				goto err_out;
2572 	return 0;
2573 
2574 err_out:
2575 	while(--i >= 0)
2576 		if (bars & (1 << i))
2577 			pci_release_region(pdev, i);
2578 
2579 	return -EBUSY;
2580 }
2581 
2582 
2583 /**
2584  * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2585  * @pdev: PCI device whose resources are to be reserved
2586  * @bars: Bitmask of BARs to be requested
2587  * @res_name: Name to be associated with resource
2588  */
2589 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2590 				 const char *res_name)
2591 {
2592 	return __pci_request_selected_regions(pdev, bars, res_name, 0);
2593 }
2594 
2595 int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2596 				 int bars, const char *res_name)
2597 {
2598 	return __pci_request_selected_regions(pdev, bars, res_name,
2599 			IORESOURCE_EXCLUSIVE);
2600 }
2601 
2602 /**
2603  *	pci_release_regions - Release reserved PCI I/O and memory resources
2604  *	@pdev: PCI device whose resources were previously reserved by pci_request_regions
2605  *
2606  *	Releases all PCI I/O and memory resources previously reserved by a
2607  *	successful call to pci_request_regions.  Call this function only
2608  *	after all use of the PCI regions has ceased.
2609  */
2610 
2611 void pci_release_regions(struct pci_dev *pdev)
2612 {
2613 	pci_release_selected_regions(pdev, (1 << 6) - 1);
2614 }
2615 
2616 /**
2617  *	pci_request_regions - Reserved PCI I/O and memory resources
2618  *	@pdev: PCI device whose resources are to be reserved
2619  *	@res_name: Name to be associated with resource.
2620  *
2621  *	Mark all PCI regions associated with PCI device @pdev as
2622  *	being reserved by owner @res_name.  Do not access any
2623  *	address inside the PCI regions unless this call returns
2624  *	successfully.
2625  *
2626  *	Returns 0 on success, or %EBUSY on error.  A warning
2627  *	message is also printed on failure.
2628  */
2629 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2630 {
2631 	return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2632 }
2633 
2634 /**
2635  *	pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2636  *	@pdev: PCI device whose resources are to be reserved
2637  *	@res_name: Name to be associated with resource.
2638  *
2639  *	Mark all PCI regions associated with PCI device @pdev as
2640  *	being reserved by owner @res_name.  Do not access any
2641  *	address inside the PCI regions unless this call returns
2642  *	successfully.
2643  *
2644  *	pci_request_regions_exclusive() will mark the region so that
2645  * 	/dev/mem and the sysfs MMIO access will not be allowed.
2646  *
2647  *	Returns 0 on success, or %EBUSY on error.  A warning
2648  *	message is also printed on failure.
2649  */
2650 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2651 {
2652 	return pci_request_selected_regions_exclusive(pdev,
2653 					((1 << 6) - 1), res_name);
2654 }
2655 
2656 static void __pci_set_master(struct pci_dev *dev, bool enable)
2657 {
2658 	u16 old_cmd, cmd;
2659 
2660 	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2661 	if (enable)
2662 		cmd = old_cmd | PCI_COMMAND_MASTER;
2663 	else
2664 		cmd = old_cmd & ~PCI_COMMAND_MASTER;
2665 	if (cmd != old_cmd) {
2666 		dev_dbg(&dev->dev, "%s bus mastering\n",
2667 			enable ? "enabling" : "disabling");
2668 		pci_write_config_word(dev, PCI_COMMAND, cmd);
2669 	}
2670 	dev->is_busmaster = enable;
2671 }
2672 
2673 /**
2674  * pcibios_set_master - enable PCI bus-mastering for device dev
2675  * @dev: the PCI device to enable
2676  *
2677  * Enables PCI bus-mastering for the device.  This is the default
2678  * implementation.  Architecture specific implementations can override
2679  * this if necessary.
2680  */
2681 void __weak pcibios_set_master(struct pci_dev *dev)
2682 {
2683 	u8 lat;
2684 
2685 	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2686 	if (pci_is_pcie(dev))
2687 		return;
2688 
2689 	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2690 	if (lat < 16)
2691 		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2692 	else if (lat > pcibios_max_latency)
2693 		lat = pcibios_max_latency;
2694 	else
2695 		return;
2696 	dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
2697 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2698 }
2699 
2700 /**
2701  * pci_set_master - enables bus-mastering for device dev
2702  * @dev: the PCI device to enable
2703  *
2704  * Enables bus-mastering on the device and calls pcibios_set_master()
2705  * to do the needed arch specific settings.
2706  */
2707 void pci_set_master(struct pci_dev *dev)
2708 {
2709 	__pci_set_master(dev, true);
2710 	pcibios_set_master(dev);
2711 }
2712 
2713 /**
2714  * pci_clear_master - disables bus-mastering for device dev
2715  * @dev: the PCI device to disable
2716  */
2717 void pci_clear_master(struct pci_dev *dev)
2718 {
2719 	__pci_set_master(dev, false);
2720 }
2721 
2722 /**
2723  * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2724  * @dev: the PCI device for which MWI is to be enabled
2725  *
2726  * Helper function for pci_set_mwi.
2727  * Originally copied from drivers/net/acenic.c.
2728  * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2729  *
2730  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2731  */
2732 int pci_set_cacheline_size(struct pci_dev *dev)
2733 {
2734 	u8 cacheline_size;
2735 
2736 	if (!pci_cache_line_size)
2737 		return -EINVAL;
2738 
2739 	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2740 	   equal to or multiple of the right value. */
2741 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2742 	if (cacheline_size >= pci_cache_line_size &&
2743 	    (cacheline_size % pci_cache_line_size) == 0)
2744 		return 0;
2745 
2746 	/* Write the correct value. */
2747 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2748 	/* Read it back. */
2749 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2750 	if (cacheline_size == pci_cache_line_size)
2751 		return 0;
2752 
2753 	dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2754 		   "supported\n", pci_cache_line_size << 2);
2755 
2756 	return -EINVAL;
2757 }
2758 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2759 
2760 #ifdef PCI_DISABLE_MWI
2761 int pci_set_mwi(struct pci_dev *dev)
2762 {
2763 	return 0;
2764 }
2765 
2766 int pci_try_set_mwi(struct pci_dev *dev)
2767 {
2768 	return 0;
2769 }
2770 
2771 void pci_clear_mwi(struct pci_dev *dev)
2772 {
2773 }
2774 
2775 #else
2776 
2777 /**
2778  * pci_set_mwi - enables memory-write-invalidate PCI transaction
2779  * @dev: the PCI device for which MWI is enabled
2780  *
2781  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2782  *
2783  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2784  */
2785 int
2786 pci_set_mwi(struct pci_dev *dev)
2787 {
2788 	int rc;
2789 	u16 cmd;
2790 
2791 	rc = pci_set_cacheline_size(dev);
2792 	if (rc)
2793 		return rc;
2794 
2795 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2796 	if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2797 		dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2798 		cmd |= PCI_COMMAND_INVALIDATE;
2799 		pci_write_config_word(dev, PCI_COMMAND, cmd);
2800 	}
2801 
2802 	return 0;
2803 }
2804 
2805 /**
2806  * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2807  * @dev: the PCI device for which MWI is enabled
2808  *
2809  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2810  * Callers are not required to check the return value.
2811  *
2812  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2813  */
2814 int pci_try_set_mwi(struct pci_dev *dev)
2815 {
2816 	int rc = pci_set_mwi(dev);
2817 	return rc;
2818 }
2819 
2820 /**
2821  * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2822  * @dev: the PCI device to disable
2823  *
2824  * Disables PCI Memory-Write-Invalidate transaction on the device
2825  */
2826 void
2827 pci_clear_mwi(struct pci_dev *dev)
2828 {
2829 	u16 cmd;
2830 
2831 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2832 	if (cmd & PCI_COMMAND_INVALIDATE) {
2833 		cmd &= ~PCI_COMMAND_INVALIDATE;
2834 		pci_write_config_word(dev, PCI_COMMAND, cmd);
2835 	}
2836 }
2837 #endif /* ! PCI_DISABLE_MWI */
2838 
2839 /**
2840  * pci_intx - enables/disables PCI INTx for device dev
2841  * @pdev: the PCI device to operate on
2842  * @enable: boolean: whether to enable or disable PCI INTx
2843  *
2844  * Enables/disables PCI INTx for device dev
2845  */
2846 void
2847 pci_intx(struct pci_dev *pdev, int enable)
2848 {
2849 	u16 pci_command, new;
2850 
2851 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2852 
2853 	if (enable) {
2854 		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2855 	} else {
2856 		new = pci_command | PCI_COMMAND_INTX_DISABLE;
2857 	}
2858 
2859 	if (new != pci_command) {
2860 		struct pci_devres *dr;
2861 
2862 		pci_write_config_word(pdev, PCI_COMMAND, new);
2863 
2864 		dr = find_pci_dr(pdev);
2865 		if (dr && !dr->restore_intx) {
2866 			dr->restore_intx = 1;
2867 			dr->orig_intx = !enable;
2868 		}
2869 	}
2870 }
2871 
2872 /**
2873  * pci_intx_mask_supported - probe for INTx masking support
2874  * @dev: the PCI device to operate on
2875  *
2876  * Check if the device dev support INTx masking via the config space
2877  * command word.
2878  */
2879 bool pci_intx_mask_supported(struct pci_dev *dev)
2880 {
2881 	bool mask_supported = false;
2882 	u16 orig, new;
2883 
2884 	pci_cfg_access_lock(dev);
2885 
2886 	pci_read_config_word(dev, PCI_COMMAND, &orig);
2887 	pci_write_config_word(dev, PCI_COMMAND,
2888 			      orig ^ PCI_COMMAND_INTX_DISABLE);
2889 	pci_read_config_word(dev, PCI_COMMAND, &new);
2890 
2891 	/*
2892 	 * There's no way to protect against hardware bugs or detect them
2893 	 * reliably, but as long as we know what the value should be, let's
2894 	 * go ahead and check it.
2895 	 */
2896 	if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2897 		dev_err(&dev->dev, "Command register changed from "
2898 			"0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2899 	} else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2900 		mask_supported = true;
2901 		pci_write_config_word(dev, PCI_COMMAND, orig);
2902 	}
2903 
2904 	pci_cfg_access_unlock(dev);
2905 	return mask_supported;
2906 }
2907 EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
2908 
2909 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2910 {
2911 	struct pci_bus *bus = dev->bus;
2912 	bool mask_updated = true;
2913 	u32 cmd_status_dword;
2914 	u16 origcmd, newcmd;
2915 	unsigned long flags;
2916 	bool irq_pending;
2917 
2918 	/*
2919 	 * We do a single dword read to retrieve both command and status.
2920 	 * Document assumptions that make this possible.
2921 	 */
2922 	BUILD_BUG_ON(PCI_COMMAND % 4);
2923 	BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
2924 
2925 	raw_spin_lock_irqsave(&pci_lock, flags);
2926 
2927 	bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2928 
2929 	irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2930 
2931 	/*
2932 	 * Check interrupt status register to see whether our device
2933 	 * triggered the interrupt (when masking) or the next IRQ is
2934 	 * already pending (when unmasking).
2935 	 */
2936 	if (mask != irq_pending) {
2937 		mask_updated = false;
2938 		goto done;
2939 	}
2940 
2941 	origcmd = cmd_status_dword;
2942 	newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
2943 	if (mask)
2944 		newcmd |= PCI_COMMAND_INTX_DISABLE;
2945 	if (newcmd != origcmd)
2946 		bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
2947 
2948 done:
2949 	raw_spin_unlock_irqrestore(&pci_lock, flags);
2950 
2951 	return mask_updated;
2952 }
2953 
2954 /**
2955  * pci_check_and_mask_intx - mask INTx on pending interrupt
2956  * @dev: the PCI device to operate on
2957  *
2958  * Check if the device dev has its INTx line asserted, mask it and
2959  * return true in that case. False is returned if not interrupt was
2960  * pending.
2961  */
2962 bool pci_check_and_mask_intx(struct pci_dev *dev)
2963 {
2964 	return pci_check_and_set_intx_mask(dev, true);
2965 }
2966 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
2967 
2968 /**
2969  * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
2970  * @dev: the PCI device to operate on
2971  *
2972  * Check if the device dev has its INTx line asserted, unmask it if not
2973  * and return true. False is returned and the mask remains active if
2974  * there was still an interrupt pending.
2975  */
2976 bool pci_check_and_unmask_intx(struct pci_dev *dev)
2977 {
2978 	return pci_check_and_set_intx_mask(dev, false);
2979 }
2980 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
2981 
2982 /**
2983  * pci_msi_off - disables any msi or msix capabilities
2984  * @dev: the PCI device to operate on
2985  *
2986  * If you want to use msi see pci_enable_msi and friends.
2987  * This is a lower level primitive that allows us to disable
2988  * msi operation at the device level.
2989  */
2990 void pci_msi_off(struct pci_dev *dev)
2991 {
2992 	int pos;
2993 	u16 control;
2994 
2995 	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2996 	if (pos) {
2997 		pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2998 		control &= ~PCI_MSI_FLAGS_ENABLE;
2999 		pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
3000 	}
3001 	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
3002 	if (pos) {
3003 		pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
3004 		control &= ~PCI_MSIX_FLAGS_ENABLE;
3005 		pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3006 	}
3007 }
3008 EXPORT_SYMBOL_GPL(pci_msi_off);
3009 
3010 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3011 {
3012 	return dma_set_max_seg_size(&dev->dev, size);
3013 }
3014 EXPORT_SYMBOL(pci_set_dma_max_seg_size);
3015 
3016 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3017 {
3018 	return dma_set_seg_boundary(&dev->dev, mask);
3019 }
3020 EXPORT_SYMBOL(pci_set_dma_seg_boundary);
3021 
3022 static int pcie_flr(struct pci_dev *dev, int probe)
3023 {
3024 	int i;
3025 	int pos;
3026 	u32 cap;
3027 	u16 status, control;
3028 
3029 	pos = pci_pcie_cap(dev);
3030 	if (!pos)
3031 		return -ENOTTY;
3032 
3033 	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
3034 	if (!(cap & PCI_EXP_DEVCAP_FLR))
3035 		return -ENOTTY;
3036 
3037 	if (probe)
3038 		return 0;
3039 
3040 	/* Wait for Transaction Pending bit clean */
3041 	for (i = 0; i < 4; i++) {
3042 		if (i)
3043 			msleep((1 << (i - 1)) * 100);
3044 
3045 		pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
3046 		if (!(status & PCI_EXP_DEVSTA_TRPND))
3047 			goto clear;
3048 	}
3049 
3050 	dev_err(&dev->dev, "transaction is not cleared; "
3051 			"proceeding with reset anyway\n");
3052 
3053 clear:
3054 	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
3055 	control |= PCI_EXP_DEVCTL_BCR_FLR;
3056 	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
3057 
3058 	msleep(100);
3059 
3060 	return 0;
3061 }
3062 
3063 static int pci_af_flr(struct pci_dev *dev, int probe)
3064 {
3065 	int i;
3066 	int pos;
3067 	u8 cap;
3068 	u8 status;
3069 
3070 	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3071 	if (!pos)
3072 		return -ENOTTY;
3073 
3074 	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
3075 	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3076 		return -ENOTTY;
3077 
3078 	if (probe)
3079 		return 0;
3080 
3081 	/* Wait for Transaction Pending bit clean */
3082 	for (i = 0; i < 4; i++) {
3083 		if (i)
3084 			msleep((1 << (i - 1)) * 100);
3085 
3086 		pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3087 		if (!(status & PCI_AF_STATUS_TP))
3088 			goto clear;
3089 	}
3090 
3091 	dev_err(&dev->dev, "transaction is not cleared; "
3092 			"proceeding with reset anyway\n");
3093 
3094 clear:
3095 	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
3096 	msleep(100);
3097 
3098 	return 0;
3099 }
3100 
3101 /**
3102  * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3103  * @dev: Device to reset.
3104  * @probe: If set, only check if the device can be reset this way.
3105  *
3106  * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3107  * unset, it will be reinitialized internally when going from PCI_D3hot to
3108  * PCI_D0.  If that's the case and the device is not in a low-power state
3109  * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3110  *
3111  * NOTE: This causes the caller to sleep for twice the device power transition
3112  * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3113  * by devault (i.e. unless the @dev's d3_delay field has a different value).
3114  * Moreover, only devices in D0 can be reset by this function.
3115  */
3116 static int pci_pm_reset(struct pci_dev *dev, int probe)
3117 {
3118 	u16 csr;
3119 
3120 	if (!dev->pm_cap)
3121 		return -ENOTTY;
3122 
3123 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3124 	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3125 		return -ENOTTY;
3126 
3127 	if (probe)
3128 		return 0;
3129 
3130 	if (dev->current_state != PCI_D0)
3131 		return -EINVAL;
3132 
3133 	csr &= ~PCI_PM_CTRL_STATE_MASK;
3134 	csr |= PCI_D3hot;
3135 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3136 	pci_dev_d3_sleep(dev);
3137 
3138 	csr &= ~PCI_PM_CTRL_STATE_MASK;
3139 	csr |= PCI_D0;
3140 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3141 	pci_dev_d3_sleep(dev);
3142 
3143 	return 0;
3144 }
3145 
3146 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3147 {
3148 	u16 ctrl;
3149 	struct pci_dev *pdev;
3150 
3151 	if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
3152 		return -ENOTTY;
3153 
3154 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3155 		if (pdev != dev)
3156 			return -ENOTTY;
3157 
3158 	if (probe)
3159 		return 0;
3160 
3161 	pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
3162 	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3163 	pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3164 	msleep(100);
3165 
3166 	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3167 	pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3168 	msleep(100);
3169 
3170 	return 0;
3171 }
3172 
3173 static int __pci_dev_reset(struct pci_dev *dev, int probe)
3174 {
3175 	int rc;
3176 
3177 	might_sleep();
3178 
3179 	rc = pci_dev_specific_reset(dev, probe);
3180 	if (rc != -ENOTTY)
3181 		goto done;
3182 
3183 	rc = pcie_flr(dev, probe);
3184 	if (rc != -ENOTTY)
3185 		goto done;
3186 
3187 	rc = pci_af_flr(dev, probe);
3188 	if (rc != -ENOTTY)
3189 		goto done;
3190 
3191 	rc = pci_pm_reset(dev, probe);
3192 	if (rc != -ENOTTY)
3193 		goto done;
3194 
3195 	rc = pci_parent_bus_reset(dev, probe);
3196 done:
3197 	return rc;
3198 }
3199 
3200 static int pci_dev_reset(struct pci_dev *dev, int probe)
3201 {
3202 	int rc;
3203 
3204 	if (!probe) {
3205 		pci_cfg_access_lock(dev);
3206 		/* block PM suspend, driver probe, etc. */
3207 		device_lock(&dev->dev);
3208 	}
3209 
3210 	rc = __pci_dev_reset(dev, probe);
3211 
3212 	if (!probe) {
3213 		device_unlock(&dev->dev);
3214 		pci_cfg_access_unlock(dev);
3215 	}
3216 	return rc;
3217 }
3218 /**
3219  * __pci_reset_function - reset a PCI device function
3220  * @dev: PCI device to reset
3221  *
3222  * Some devices allow an individual function to be reset without affecting
3223  * other functions in the same device.  The PCI device must be responsive
3224  * to PCI config space in order to use this function.
3225  *
3226  * The device function is presumed to be unused when this function is called.
3227  * Resetting the device will make the contents of PCI configuration space
3228  * random, so any caller of this must be prepared to reinitialise the
3229  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3230  * etc.
3231  *
3232  * Returns 0 if the device function was successfully reset or negative if the
3233  * device doesn't support resetting a single function.
3234  */
3235 int __pci_reset_function(struct pci_dev *dev)
3236 {
3237 	return pci_dev_reset(dev, 0);
3238 }
3239 EXPORT_SYMBOL_GPL(__pci_reset_function);
3240 
3241 /**
3242  * __pci_reset_function_locked - reset a PCI device function while holding
3243  * the @dev mutex lock.
3244  * @dev: PCI device to reset
3245  *
3246  * Some devices allow an individual function to be reset without affecting
3247  * other functions in the same device.  The PCI device must be responsive
3248  * to PCI config space in order to use this function.
3249  *
3250  * The device function is presumed to be unused and the caller is holding
3251  * the device mutex lock when this function is called.
3252  * Resetting the device will make the contents of PCI configuration space
3253  * random, so any caller of this must be prepared to reinitialise the
3254  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3255  * etc.
3256  *
3257  * Returns 0 if the device function was successfully reset or negative if the
3258  * device doesn't support resetting a single function.
3259  */
3260 int __pci_reset_function_locked(struct pci_dev *dev)
3261 {
3262 	return __pci_dev_reset(dev, 0);
3263 }
3264 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3265 
3266 /**
3267  * pci_probe_reset_function - check whether the device can be safely reset
3268  * @dev: PCI device to reset
3269  *
3270  * Some devices allow an individual function to be reset without affecting
3271  * other functions in the same device.  The PCI device must be responsive
3272  * to PCI config space in order to use this function.
3273  *
3274  * Returns 0 if the device function can be reset or negative if the
3275  * device doesn't support resetting a single function.
3276  */
3277 int pci_probe_reset_function(struct pci_dev *dev)
3278 {
3279 	return pci_dev_reset(dev, 1);
3280 }
3281 
3282 /**
3283  * pci_reset_function - quiesce and reset a PCI device function
3284  * @dev: PCI device to reset
3285  *
3286  * Some devices allow an individual function to be reset without affecting
3287  * other functions in the same device.  The PCI device must be responsive
3288  * to PCI config space in order to use this function.
3289  *
3290  * This function does not just reset the PCI portion of a device, but
3291  * clears all the state associated with the device.  This function differs
3292  * from __pci_reset_function in that it saves and restores device state
3293  * over the reset.
3294  *
3295  * Returns 0 if the device function was successfully reset or negative if the
3296  * device doesn't support resetting a single function.
3297  */
3298 int pci_reset_function(struct pci_dev *dev)
3299 {
3300 	int rc;
3301 
3302 	rc = pci_dev_reset(dev, 1);
3303 	if (rc)
3304 		return rc;
3305 
3306 	pci_save_state(dev);
3307 
3308 	/*
3309 	 * both INTx and MSI are disabled after the Interrupt Disable bit
3310 	 * is set and the Bus Master bit is cleared.
3311 	 */
3312 	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3313 
3314 	rc = pci_dev_reset(dev, 0);
3315 
3316 	pci_restore_state(dev);
3317 
3318 	return rc;
3319 }
3320 EXPORT_SYMBOL_GPL(pci_reset_function);
3321 
3322 /**
3323  * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3324  * @dev: PCI device to query
3325  *
3326  * Returns mmrbc: maximum designed memory read count in bytes
3327  *    or appropriate error value.
3328  */
3329 int pcix_get_max_mmrbc(struct pci_dev *dev)
3330 {
3331 	int cap;
3332 	u32 stat;
3333 
3334 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3335 	if (!cap)
3336 		return -EINVAL;
3337 
3338 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3339 		return -EINVAL;
3340 
3341 	return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
3342 }
3343 EXPORT_SYMBOL(pcix_get_max_mmrbc);
3344 
3345 /**
3346  * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3347  * @dev: PCI device to query
3348  *
3349  * Returns mmrbc: maximum memory read count in bytes
3350  *    or appropriate error value.
3351  */
3352 int pcix_get_mmrbc(struct pci_dev *dev)
3353 {
3354 	int cap;
3355 	u16 cmd;
3356 
3357 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3358 	if (!cap)
3359 		return -EINVAL;
3360 
3361 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3362 		return -EINVAL;
3363 
3364 	return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
3365 }
3366 EXPORT_SYMBOL(pcix_get_mmrbc);
3367 
3368 /**
3369  * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3370  * @dev: PCI device to query
3371  * @mmrbc: maximum memory read count in bytes
3372  *    valid values are 512, 1024, 2048, 4096
3373  *
3374  * If possible sets maximum memory read byte count, some bridges have erratas
3375  * that prevent this.
3376  */
3377 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3378 {
3379 	int cap;
3380 	u32 stat, v, o;
3381 	u16 cmd;
3382 
3383 	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
3384 		return -EINVAL;
3385 
3386 	v = ffs(mmrbc) - 10;
3387 
3388 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3389 	if (!cap)
3390 		return -EINVAL;
3391 
3392 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3393 		return -EINVAL;
3394 
3395 	if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3396 		return -E2BIG;
3397 
3398 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3399 		return -EINVAL;
3400 
3401 	o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3402 	if (o != v) {
3403 		if (v > o && dev->bus &&
3404 		   (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3405 			return -EIO;
3406 
3407 		cmd &= ~PCI_X_CMD_MAX_READ;
3408 		cmd |= v << 2;
3409 		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3410 			return -EIO;
3411 	}
3412 	return 0;
3413 }
3414 EXPORT_SYMBOL(pcix_set_mmrbc);
3415 
3416 /**
3417  * pcie_get_readrq - get PCI Express read request size
3418  * @dev: PCI device to query
3419  *
3420  * Returns maximum memory read request in bytes
3421  *    or appropriate error value.
3422  */
3423 int pcie_get_readrq(struct pci_dev *dev)
3424 {
3425 	int ret, cap;
3426 	u16 ctl;
3427 
3428 	cap = pci_pcie_cap(dev);
3429 	if (!cap)
3430 		return -EINVAL;
3431 
3432 	ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3433 	if (!ret)
3434 		ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3435 
3436 	return ret;
3437 }
3438 EXPORT_SYMBOL(pcie_get_readrq);
3439 
3440 /**
3441  * pcie_set_readrq - set PCI Express maximum memory read request
3442  * @dev: PCI device to query
3443  * @rq: maximum memory read count in bytes
3444  *    valid values are 128, 256, 512, 1024, 2048, 4096
3445  *
3446  * If possible sets maximum memory read request in bytes
3447  */
3448 int pcie_set_readrq(struct pci_dev *dev, int rq)
3449 {
3450 	int cap, err = -EINVAL;
3451 	u16 ctl, v;
3452 
3453 	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
3454 		goto out;
3455 
3456 	cap = pci_pcie_cap(dev);
3457 	if (!cap)
3458 		goto out;
3459 
3460 	err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3461 	if (err)
3462 		goto out;
3463 	/*
3464 	 * If using the "performance" PCIe config, we clamp the
3465 	 * read rq size to the max packet size to prevent the
3466 	 * host bridge generating requests larger than we can
3467 	 * cope with
3468 	 */
3469 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3470 		int mps = pcie_get_mps(dev);
3471 
3472 		if (mps < 0)
3473 			return mps;
3474 		if (mps < rq)
3475 			rq = mps;
3476 	}
3477 
3478 	v = (ffs(rq) - 8) << 12;
3479 
3480 	if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3481 		ctl &= ~PCI_EXP_DEVCTL_READRQ;
3482 		ctl |= v;
3483 		err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3484 	}
3485 
3486 out:
3487 	return err;
3488 }
3489 EXPORT_SYMBOL(pcie_set_readrq);
3490 
3491 /**
3492  * pcie_get_mps - get PCI Express maximum payload size
3493  * @dev: PCI device to query
3494  *
3495  * Returns maximum payload size in bytes
3496  *    or appropriate error value.
3497  */
3498 int pcie_get_mps(struct pci_dev *dev)
3499 {
3500 	int ret, cap;
3501 	u16 ctl;
3502 
3503 	cap = pci_pcie_cap(dev);
3504 	if (!cap)
3505 		return -EINVAL;
3506 
3507 	ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3508 	if (!ret)
3509 		ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3510 
3511 	return ret;
3512 }
3513 
3514 /**
3515  * pcie_set_mps - set PCI Express maximum payload size
3516  * @dev: PCI device to query
3517  * @mps: maximum payload size in bytes
3518  *    valid values are 128, 256, 512, 1024, 2048, 4096
3519  *
3520  * If possible sets maximum payload size
3521  */
3522 int pcie_set_mps(struct pci_dev *dev, int mps)
3523 {
3524 	int cap, err = -EINVAL;
3525 	u16 ctl, v;
3526 
3527 	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3528 		goto out;
3529 
3530 	v = ffs(mps) - 8;
3531 	if (v > dev->pcie_mpss)
3532 		goto out;
3533 	v <<= 5;
3534 
3535 	cap = pci_pcie_cap(dev);
3536 	if (!cap)
3537 		goto out;
3538 
3539 	err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3540 	if (err)
3541 		goto out;
3542 
3543 	if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3544 		ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3545 		ctl |= v;
3546 		err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3547 	}
3548 out:
3549 	return err;
3550 }
3551 
3552 /**
3553  * pci_select_bars - Make BAR mask from the type of resource
3554  * @dev: the PCI device for which BAR mask is made
3555  * @flags: resource type mask to be selected
3556  *
3557  * This helper routine makes bar mask from the type of resource.
3558  */
3559 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3560 {
3561 	int i, bars = 0;
3562 	for (i = 0; i < PCI_NUM_RESOURCES; i++)
3563 		if (pci_resource_flags(dev, i) & flags)
3564 			bars |= (1 << i);
3565 	return bars;
3566 }
3567 
3568 /**
3569  * pci_resource_bar - get position of the BAR associated with a resource
3570  * @dev: the PCI device
3571  * @resno: the resource number
3572  * @type: the BAR type to be filled in
3573  *
3574  * Returns BAR position in config space, or 0 if the BAR is invalid.
3575  */
3576 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3577 {
3578 	int reg;
3579 
3580 	if (resno < PCI_ROM_RESOURCE) {
3581 		*type = pci_bar_unknown;
3582 		return PCI_BASE_ADDRESS_0 + 4 * resno;
3583 	} else if (resno == PCI_ROM_RESOURCE) {
3584 		*type = pci_bar_mem32;
3585 		return dev->rom_base_reg;
3586 	} else if (resno < PCI_BRIDGE_RESOURCES) {
3587 		/* device specific resource */
3588 		reg = pci_iov_resource_bar(dev, resno, type);
3589 		if (reg)
3590 			return reg;
3591 	}
3592 
3593 	dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
3594 	return 0;
3595 }
3596 
3597 /* Some architectures require additional programming to enable VGA */
3598 static arch_set_vga_state_t arch_set_vga_state;
3599 
3600 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3601 {
3602 	arch_set_vga_state = func;	/* NULL disables */
3603 }
3604 
3605 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3606 		      unsigned int command_bits, u32 flags)
3607 {
3608 	if (arch_set_vga_state)
3609 		return arch_set_vga_state(dev, decode, command_bits,
3610 						flags);
3611 	return 0;
3612 }
3613 
3614 /**
3615  * pci_set_vga_state - set VGA decode state on device and parents if requested
3616  * @dev: the PCI device
3617  * @decode: true = enable decoding, false = disable decoding
3618  * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3619  * @flags: traverse ancestors and change bridges
3620  * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3621  */
3622 int pci_set_vga_state(struct pci_dev *dev, bool decode,
3623 		      unsigned int command_bits, u32 flags)
3624 {
3625 	struct pci_bus *bus;
3626 	struct pci_dev *bridge;
3627 	u16 cmd;
3628 	int rc;
3629 
3630 	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
3631 
3632 	/* ARCH specific VGA enables */
3633 	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
3634 	if (rc)
3635 		return rc;
3636 
3637 	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3638 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
3639 		if (decode == true)
3640 			cmd |= command_bits;
3641 		else
3642 			cmd &= ~command_bits;
3643 		pci_write_config_word(dev, PCI_COMMAND, cmd);
3644 	}
3645 
3646 	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
3647 		return 0;
3648 
3649 	bus = dev->bus;
3650 	while (bus) {
3651 		bridge = bus->self;
3652 		if (bridge) {
3653 			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3654 					     &cmd);
3655 			if (decode == true)
3656 				cmd |= PCI_BRIDGE_CTL_VGA;
3657 			else
3658 				cmd &= ~PCI_BRIDGE_CTL_VGA;
3659 			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3660 					      cmd);
3661 		}
3662 		bus = bus->parent;
3663 	}
3664 	return 0;
3665 }
3666 
3667 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3668 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
3669 static DEFINE_SPINLOCK(resource_alignment_lock);
3670 
3671 /**
3672  * pci_specified_resource_alignment - get resource alignment specified by user.
3673  * @dev: the PCI device to get
3674  *
3675  * RETURNS: Resource alignment if it is specified.
3676  *          Zero if it is not specified.
3677  */
3678 resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3679 {
3680 	int seg, bus, slot, func, align_order, count;
3681 	resource_size_t align = 0;
3682 	char *p;
3683 
3684 	spin_lock(&resource_alignment_lock);
3685 	p = resource_alignment_param;
3686 	while (*p) {
3687 		count = 0;
3688 		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3689 							p[count] == '@') {
3690 			p += count + 1;
3691 		} else {
3692 			align_order = -1;
3693 		}
3694 		if (sscanf(p, "%x:%x:%x.%x%n",
3695 			&seg, &bus, &slot, &func, &count) != 4) {
3696 			seg = 0;
3697 			if (sscanf(p, "%x:%x.%x%n",
3698 					&bus, &slot, &func, &count) != 3) {
3699 				/* Invalid format */
3700 				printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3701 					p);
3702 				break;
3703 			}
3704 		}
3705 		p += count;
3706 		if (seg == pci_domain_nr(dev->bus) &&
3707 			bus == dev->bus->number &&
3708 			slot == PCI_SLOT(dev->devfn) &&
3709 			func == PCI_FUNC(dev->devfn)) {
3710 			if (align_order == -1) {
3711 				align = PAGE_SIZE;
3712 			} else {
3713 				align = 1 << align_order;
3714 			}
3715 			/* Found */
3716 			break;
3717 		}
3718 		if (*p != ';' && *p != ',') {
3719 			/* End of param or invalid format */
3720 			break;
3721 		}
3722 		p++;
3723 	}
3724 	spin_unlock(&resource_alignment_lock);
3725 	return align;
3726 }
3727 
3728 /**
3729  * pci_is_reassigndev - check if specified PCI is target device to reassign
3730  * @dev: the PCI device to check
3731  *
3732  * RETURNS: non-zero for PCI device is a target device to reassign,
3733  *          or zero is not.
3734  */
3735 int pci_is_reassigndev(struct pci_dev *dev)
3736 {
3737 	return (pci_specified_resource_alignment(dev) != 0);
3738 }
3739 
3740 /*
3741  * This function disables memory decoding and releases memory resources
3742  * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3743  * It also rounds up size to specified alignment.
3744  * Later on, the kernel will assign page-aligned memory resource back
3745  * to the device.
3746  */
3747 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
3748 {
3749 	int i;
3750 	struct resource *r;
3751 	resource_size_t align, size;
3752 	u16 command;
3753 
3754 	if (!pci_is_reassigndev(dev))
3755 		return;
3756 
3757 	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
3758 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
3759 		dev_warn(&dev->dev,
3760 			"Can't reassign resources to host bridge.\n");
3761 		return;
3762 	}
3763 
3764 	dev_info(&dev->dev,
3765 		"Disabling memory decoding and releasing memory resources.\n");
3766 	pci_read_config_word(dev, PCI_COMMAND, &command);
3767 	command &= ~PCI_COMMAND_MEMORY;
3768 	pci_write_config_word(dev, PCI_COMMAND, command);
3769 
3770 	align = pci_specified_resource_alignment(dev);
3771 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
3772 		r = &dev->resource[i];
3773 		if (!(r->flags & IORESOURCE_MEM))
3774 			continue;
3775 		size = resource_size(r);
3776 		if (size < align) {
3777 			size = align;
3778 			dev_info(&dev->dev,
3779 				"Rounding up size of resource #%d to %#llx.\n",
3780 				i, (unsigned long long)size);
3781 		}
3782 		r->end = size - 1;
3783 		r->start = 0;
3784 	}
3785 	/* Need to disable bridge's resource window,
3786 	 * to enable the kernel to reassign new resource
3787 	 * window later on.
3788 	 */
3789 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3790 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
3791 		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
3792 			r = &dev->resource[i];
3793 			if (!(r->flags & IORESOURCE_MEM))
3794 				continue;
3795 			r->end = resource_size(r) - 1;
3796 			r->start = 0;
3797 		}
3798 		pci_disable_bridge_window(dev);
3799 	}
3800 }
3801 
3802 ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3803 {
3804 	if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3805 		count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3806 	spin_lock(&resource_alignment_lock);
3807 	strncpy(resource_alignment_param, buf, count);
3808 	resource_alignment_param[count] = '\0';
3809 	spin_unlock(&resource_alignment_lock);
3810 	return count;
3811 }
3812 
3813 ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3814 {
3815 	size_t count;
3816 	spin_lock(&resource_alignment_lock);
3817 	count = snprintf(buf, size, "%s", resource_alignment_param);
3818 	spin_unlock(&resource_alignment_lock);
3819 	return count;
3820 }
3821 
3822 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3823 {
3824 	return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3825 }
3826 
3827 static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3828 					const char *buf, size_t count)
3829 {
3830 	return pci_set_resource_alignment_param(buf, count);
3831 }
3832 
3833 BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3834 					pci_resource_alignment_store);
3835 
3836 static int __init pci_resource_alignment_sysfs_init(void)
3837 {
3838 	return bus_create_file(&pci_bus_type,
3839 					&bus_attr_resource_alignment);
3840 }
3841 
3842 late_initcall(pci_resource_alignment_sysfs_init);
3843 
3844 static void __devinit pci_no_domains(void)
3845 {
3846 #ifdef CONFIG_PCI_DOMAINS
3847 	pci_domains_supported = 0;
3848 #endif
3849 }
3850 
3851 /**
3852  * pci_ext_cfg_enabled - can we access extended PCI config space?
3853  * @dev: The PCI device of the root bridge.
3854  *
3855  * Returns 1 if we can access PCI extended config space (offsets
3856  * greater than 0xff). This is the default implementation. Architecture
3857  * implementations can override this.
3858  */
3859 int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3860 {
3861 	return 1;
3862 }
3863 
3864 void __weak pci_fixup_cardbus(struct pci_bus *bus)
3865 {
3866 }
3867 EXPORT_SYMBOL(pci_fixup_cardbus);
3868 
3869 static int __init pci_setup(char *str)
3870 {
3871 	while (str) {
3872 		char *k = strchr(str, ',');
3873 		if (k)
3874 			*k++ = 0;
3875 		if (*str && (str = pcibios_setup(str)) && *str) {
3876 			if (!strcmp(str, "nomsi")) {
3877 				pci_no_msi();
3878 			} else if (!strcmp(str, "noaer")) {
3879 				pci_no_aer();
3880 			} else if (!strncmp(str, "realloc=", 8)) {
3881 				pci_realloc_get_opt(str + 8);
3882 			} else if (!strncmp(str, "realloc", 7)) {
3883 				pci_realloc_get_opt("on");
3884 			} else if (!strcmp(str, "nodomains")) {
3885 				pci_no_domains();
3886 			} else if (!strncmp(str, "noari", 5)) {
3887 				pcie_ari_disabled = true;
3888 			} else if (!strncmp(str, "cbiosize=", 9)) {
3889 				pci_cardbus_io_size = memparse(str + 9, &str);
3890 			} else if (!strncmp(str, "cbmemsize=", 10)) {
3891 				pci_cardbus_mem_size = memparse(str + 10, &str);
3892 			} else if (!strncmp(str, "resource_alignment=", 19)) {
3893 				pci_set_resource_alignment_param(str + 19,
3894 							strlen(str + 19));
3895 			} else if (!strncmp(str, "ecrc=", 5)) {
3896 				pcie_ecrc_get_policy(str + 5);
3897 			} else if (!strncmp(str, "hpiosize=", 9)) {
3898 				pci_hotplug_io_size = memparse(str + 9, &str);
3899 			} else if (!strncmp(str, "hpmemsize=", 10)) {
3900 				pci_hotplug_mem_size = memparse(str + 10, &str);
3901 			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3902 				pcie_bus_config = PCIE_BUS_TUNE_OFF;
3903 			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
3904 				pcie_bus_config = PCIE_BUS_SAFE;
3905 			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
3906 				pcie_bus_config = PCIE_BUS_PERFORMANCE;
3907 			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3908 				pcie_bus_config = PCIE_BUS_PEER2PEER;
3909 			} else if (!strncmp(str, "pcie_scan_all", 13)) {
3910 				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
3911 			} else {
3912 				printk(KERN_ERR "PCI: Unknown option `%s'\n",
3913 						str);
3914 			}
3915 		}
3916 		str = k;
3917 	}
3918 	return 0;
3919 }
3920 early_param("pci", pci_setup);
3921 
3922 EXPORT_SYMBOL(pci_reenable_device);
3923 EXPORT_SYMBOL(pci_enable_device_io);
3924 EXPORT_SYMBOL(pci_enable_device_mem);
3925 EXPORT_SYMBOL(pci_enable_device);
3926 EXPORT_SYMBOL(pcim_enable_device);
3927 EXPORT_SYMBOL(pcim_pin_device);
3928 EXPORT_SYMBOL(pci_disable_device);
3929 EXPORT_SYMBOL(pci_find_capability);
3930 EXPORT_SYMBOL(pci_bus_find_capability);
3931 EXPORT_SYMBOL(pci_release_regions);
3932 EXPORT_SYMBOL(pci_request_regions);
3933 EXPORT_SYMBOL(pci_request_regions_exclusive);
3934 EXPORT_SYMBOL(pci_release_region);
3935 EXPORT_SYMBOL(pci_request_region);
3936 EXPORT_SYMBOL(pci_request_region_exclusive);
3937 EXPORT_SYMBOL(pci_release_selected_regions);
3938 EXPORT_SYMBOL(pci_request_selected_regions);
3939 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3940 EXPORT_SYMBOL(pci_set_master);
3941 EXPORT_SYMBOL(pci_clear_master);
3942 EXPORT_SYMBOL(pci_set_mwi);
3943 EXPORT_SYMBOL(pci_try_set_mwi);
3944 EXPORT_SYMBOL(pci_clear_mwi);
3945 EXPORT_SYMBOL_GPL(pci_intx);
3946 EXPORT_SYMBOL(pci_assign_resource);
3947 EXPORT_SYMBOL(pci_find_parent_resource);
3948 EXPORT_SYMBOL(pci_select_bars);
3949 
3950 EXPORT_SYMBOL(pci_set_power_state);
3951 EXPORT_SYMBOL(pci_save_state);
3952 EXPORT_SYMBOL(pci_restore_state);
3953 EXPORT_SYMBOL(pci_pme_capable);
3954 EXPORT_SYMBOL(pci_pme_active);
3955 EXPORT_SYMBOL(pci_wake_from_d3);
3956 EXPORT_SYMBOL(pci_target_state);
3957 EXPORT_SYMBOL(pci_prepare_to_sleep);
3958 EXPORT_SYMBOL(pci_back_from_sleep);
3959 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
3960