xref: /linux/drivers/pci/pci-acpi.c (revision 9e9f60108423f18a99c9cc93ef7f23490ecc709b)
1 /*
2  * File:	pci-acpi.c
3  * Purpose:	Provide PCI support in ACPI
4  *
5  * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com>
6  * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com>
7  * Copyright (C) 2004 Intel Corp.
8  */
9 
10 #include <linux/delay.h>
11 #include <linux/init.h>
12 #include <linux/pci.h>
13 #include <linux/pci_hotplug.h>
14 #include <linux/module.h>
15 #include <linux/pci-aspm.h>
16 #include <linux/pci-acpi.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/pm_qos.h>
19 #include "pci.h"
20 
21 phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
22 {
23 	acpi_status status = AE_NOT_EXIST;
24 	unsigned long long mcfg_addr;
25 
26 	if (handle)
27 		status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
28 					       NULL, &mcfg_addr);
29 	if (ACPI_FAILURE(status))
30 		return 0;
31 
32 	return (phys_addr_t)mcfg_addr;
33 }
34 
35 static acpi_status decode_type0_hpx_record(union acpi_object *record,
36 					   struct hotplug_params *hpx)
37 {
38 	int i;
39 	union acpi_object *fields = record->package.elements;
40 	u32 revision = fields[1].integer.value;
41 
42 	switch (revision) {
43 	case 1:
44 		if (record->package.count != 6)
45 			return AE_ERROR;
46 		for (i = 2; i < 6; i++)
47 			if (fields[i].type != ACPI_TYPE_INTEGER)
48 				return AE_ERROR;
49 		hpx->t0 = &hpx->type0_data;
50 		hpx->t0->revision        = revision;
51 		hpx->t0->cache_line_size = fields[2].integer.value;
52 		hpx->t0->latency_timer   = fields[3].integer.value;
53 		hpx->t0->enable_serr     = fields[4].integer.value;
54 		hpx->t0->enable_perr     = fields[5].integer.value;
55 		break;
56 	default:
57 		printk(KERN_WARNING
58 		       "%s: Type 0 Revision %d record not supported\n",
59 		       __func__, revision);
60 		return AE_ERROR;
61 	}
62 	return AE_OK;
63 }
64 
65 static acpi_status decode_type1_hpx_record(union acpi_object *record,
66 					   struct hotplug_params *hpx)
67 {
68 	int i;
69 	union acpi_object *fields = record->package.elements;
70 	u32 revision = fields[1].integer.value;
71 
72 	switch (revision) {
73 	case 1:
74 		if (record->package.count != 5)
75 			return AE_ERROR;
76 		for (i = 2; i < 5; i++)
77 			if (fields[i].type != ACPI_TYPE_INTEGER)
78 				return AE_ERROR;
79 		hpx->t1 = &hpx->type1_data;
80 		hpx->t1->revision      = revision;
81 		hpx->t1->max_mem_read  = fields[2].integer.value;
82 		hpx->t1->avg_max_split = fields[3].integer.value;
83 		hpx->t1->tot_max_split = fields[4].integer.value;
84 		break;
85 	default:
86 		printk(KERN_WARNING
87 		       "%s: Type 1 Revision %d record not supported\n",
88 		       __func__, revision);
89 		return AE_ERROR;
90 	}
91 	return AE_OK;
92 }
93 
94 static acpi_status decode_type2_hpx_record(union acpi_object *record,
95 					   struct hotplug_params *hpx)
96 {
97 	int i;
98 	union acpi_object *fields = record->package.elements;
99 	u32 revision = fields[1].integer.value;
100 
101 	switch (revision) {
102 	case 1:
103 		if (record->package.count != 18)
104 			return AE_ERROR;
105 		for (i = 2; i < 18; i++)
106 			if (fields[i].type != ACPI_TYPE_INTEGER)
107 				return AE_ERROR;
108 		hpx->t2 = &hpx->type2_data;
109 		hpx->t2->revision      = revision;
110 		hpx->t2->unc_err_mask_and      = fields[2].integer.value;
111 		hpx->t2->unc_err_mask_or       = fields[3].integer.value;
112 		hpx->t2->unc_err_sever_and     = fields[4].integer.value;
113 		hpx->t2->unc_err_sever_or      = fields[5].integer.value;
114 		hpx->t2->cor_err_mask_and      = fields[6].integer.value;
115 		hpx->t2->cor_err_mask_or       = fields[7].integer.value;
116 		hpx->t2->adv_err_cap_and       = fields[8].integer.value;
117 		hpx->t2->adv_err_cap_or        = fields[9].integer.value;
118 		hpx->t2->pci_exp_devctl_and    = fields[10].integer.value;
119 		hpx->t2->pci_exp_devctl_or     = fields[11].integer.value;
120 		hpx->t2->pci_exp_lnkctl_and    = fields[12].integer.value;
121 		hpx->t2->pci_exp_lnkctl_or     = fields[13].integer.value;
122 		hpx->t2->sec_unc_err_sever_and = fields[14].integer.value;
123 		hpx->t2->sec_unc_err_sever_or  = fields[15].integer.value;
124 		hpx->t2->sec_unc_err_mask_and  = fields[16].integer.value;
125 		hpx->t2->sec_unc_err_mask_or   = fields[17].integer.value;
126 		break;
127 	default:
128 		printk(KERN_WARNING
129 		       "%s: Type 2 Revision %d record not supported\n",
130 		       __func__, revision);
131 		return AE_ERROR;
132 	}
133 	return AE_OK;
134 }
135 
136 static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
137 {
138 	acpi_status status;
139 	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
140 	union acpi_object *package, *record, *fields;
141 	u32 type;
142 	int i;
143 
144 	/* Clear the return buffer with zeros */
145 	memset(hpx, 0, sizeof(struct hotplug_params));
146 
147 	status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
148 	if (ACPI_FAILURE(status))
149 		return status;
150 
151 	package = (union acpi_object *)buffer.pointer;
152 	if (package->type != ACPI_TYPE_PACKAGE) {
153 		status = AE_ERROR;
154 		goto exit;
155 	}
156 
157 	for (i = 0; i < package->package.count; i++) {
158 		record = &package->package.elements[i];
159 		if (record->type != ACPI_TYPE_PACKAGE) {
160 			status = AE_ERROR;
161 			goto exit;
162 		}
163 
164 		fields = record->package.elements;
165 		if (fields[0].type != ACPI_TYPE_INTEGER ||
166 		    fields[1].type != ACPI_TYPE_INTEGER) {
167 			status = AE_ERROR;
168 			goto exit;
169 		}
170 
171 		type = fields[0].integer.value;
172 		switch (type) {
173 		case 0:
174 			status = decode_type0_hpx_record(record, hpx);
175 			if (ACPI_FAILURE(status))
176 				goto exit;
177 			break;
178 		case 1:
179 			status = decode_type1_hpx_record(record, hpx);
180 			if (ACPI_FAILURE(status))
181 				goto exit;
182 			break;
183 		case 2:
184 			status = decode_type2_hpx_record(record, hpx);
185 			if (ACPI_FAILURE(status))
186 				goto exit;
187 			break;
188 		default:
189 			printk(KERN_ERR "%s: Type %d record not supported\n",
190 			       __func__, type);
191 			status = AE_ERROR;
192 			goto exit;
193 		}
194 	}
195  exit:
196 	kfree(buffer.pointer);
197 	return status;
198 }
199 
200 static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
201 {
202 	acpi_status status;
203 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
204 	union acpi_object *package, *fields;
205 	int i;
206 
207 	memset(hpp, 0, sizeof(struct hotplug_params));
208 
209 	status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
210 	if (ACPI_FAILURE(status))
211 		return status;
212 
213 	package = (union acpi_object *) buffer.pointer;
214 	if (package->type != ACPI_TYPE_PACKAGE ||
215 	    package->package.count != 4) {
216 		status = AE_ERROR;
217 		goto exit;
218 	}
219 
220 	fields = package->package.elements;
221 	for (i = 0; i < 4; i++) {
222 		if (fields[i].type != ACPI_TYPE_INTEGER) {
223 			status = AE_ERROR;
224 			goto exit;
225 		}
226 	}
227 
228 	hpp->t0 = &hpp->type0_data;
229 	hpp->t0->revision        = 1;
230 	hpp->t0->cache_line_size = fields[0].integer.value;
231 	hpp->t0->latency_timer   = fields[1].integer.value;
232 	hpp->t0->enable_serr     = fields[2].integer.value;
233 	hpp->t0->enable_perr     = fields[3].integer.value;
234 
235 exit:
236 	kfree(buffer.pointer);
237 	return status;
238 }
239 
240 /* pci_get_hp_params
241  *
242  * @dev - the pci_dev for which we want parameters
243  * @hpp - allocated by the caller
244  */
245 int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
246 {
247 	acpi_status status;
248 	acpi_handle handle, phandle;
249 	struct pci_bus *pbus;
250 
251 	handle = NULL;
252 	for (pbus = dev->bus; pbus; pbus = pbus->parent) {
253 		handle = acpi_pci_get_bridge_handle(pbus);
254 		if (handle)
255 			break;
256 	}
257 
258 	/*
259 	 * _HPP settings apply to all child buses, until another _HPP is
260 	 * encountered. If we don't find an _HPP for the input pci dev,
261 	 * look for it in the parent device scope since that would apply to
262 	 * this pci dev.
263 	 */
264 	while (handle) {
265 		status = acpi_run_hpx(handle, hpp);
266 		if (ACPI_SUCCESS(status))
267 			return 0;
268 		status = acpi_run_hpp(handle, hpp);
269 		if (ACPI_SUCCESS(status))
270 			return 0;
271 		if (acpi_is_root_bridge(handle))
272 			break;
273 		status = acpi_get_parent(handle, &phandle);
274 		if (ACPI_FAILURE(status))
275 			break;
276 		handle = phandle;
277 	}
278 	return -ENODEV;
279 }
280 EXPORT_SYMBOL_GPL(pci_get_hp_params);
281 
282 /**
283  * pci_acpi_wake_bus - Root bus wakeup notification fork function.
284  * @work: Work item to handle.
285  */
286 static void pci_acpi_wake_bus(struct work_struct *work)
287 {
288 	struct acpi_device *adev;
289 	struct acpi_pci_root *root;
290 
291 	adev = container_of(work, struct acpi_device, wakeup.context.work);
292 	root = acpi_driver_data(adev);
293 	pci_pme_wakeup_bus(root->bus);
294 }
295 
296 /**
297  * pci_acpi_wake_dev - PCI device wakeup notification work function.
298  * @handle: ACPI handle of a device the notification is for.
299  * @work: Work item to handle.
300  */
301 static void pci_acpi_wake_dev(struct work_struct *work)
302 {
303 	struct acpi_device_wakeup_context *context;
304 	struct pci_dev *pci_dev;
305 
306 	context = container_of(work, struct acpi_device_wakeup_context, work);
307 	pci_dev = to_pci_dev(context->dev);
308 
309 	if (pci_dev->pme_poll)
310 		pci_dev->pme_poll = false;
311 
312 	if (pci_dev->current_state == PCI_D3cold) {
313 		pci_wakeup_event(pci_dev);
314 		pm_runtime_resume(&pci_dev->dev);
315 		return;
316 	}
317 
318 	/* Clear PME Status if set. */
319 	if (pci_dev->pme_support)
320 		pci_check_pme_status(pci_dev);
321 
322 	pci_wakeup_event(pci_dev);
323 	pm_runtime_resume(&pci_dev->dev);
324 
325 	pci_pme_wakeup_bus(pci_dev->subordinate);
326 }
327 
328 /**
329  * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus.
330  * @dev: PCI root bridge ACPI device.
331  */
332 acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev)
333 {
334 	return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus);
335 }
336 
337 /**
338  * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device.
339  * @dev: ACPI device to add the notifier for.
340  * @pci_dev: PCI device to check for the PME status if an event is signaled.
341  */
342 acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
343 				     struct pci_dev *pci_dev)
344 {
345 	return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
346 }
347 
348 /*
349  * _SxD returns the D-state with the highest power
350  * (lowest D-state number) supported in the S-state "x".
351  *
352  * If the devices does not have a _PRW
353  * (Power Resources for Wake) supporting system wakeup from "x"
354  * then the OS is free to choose a lower power (higher number
355  * D-state) than the return value from _SxD.
356  *
357  * But if _PRW is enabled at S-state "x", the OS
358  * must not choose a power lower than _SxD --
359  * unless the device has an _SxW method specifying
360  * the lowest power (highest D-state number) the device
361  * may enter while still able to wake the system.
362  *
363  * ie. depending on global OS policy:
364  *
365  * if (_PRW at S-state x)
366  *	choose from highest power _SxD to lowest power _SxW
367  * else // no _PRW at S-state x
368  *	choose highest power _SxD or any lower power
369  */
370 
371 static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
372 {
373 	int acpi_state, d_max;
374 
375 	if (pdev->no_d3cold)
376 		d_max = ACPI_STATE_D3_HOT;
377 	else
378 		d_max = ACPI_STATE_D3_COLD;
379 	acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max);
380 	if (acpi_state < 0)
381 		return PCI_POWER_ERROR;
382 
383 	switch (acpi_state) {
384 	case ACPI_STATE_D0:
385 		return PCI_D0;
386 	case ACPI_STATE_D1:
387 		return PCI_D1;
388 	case ACPI_STATE_D2:
389 		return PCI_D2;
390 	case ACPI_STATE_D3_HOT:
391 		return PCI_D3hot;
392 	case ACPI_STATE_D3_COLD:
393 		return PCI_D3cold;
394 	}
395 	return PCI_POWER_ERROR;
396 }
397 
398 static bool acpi_pci_power_manageable(struct pci_dev *dev)
399 {
400 	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
401 	return adev ? acpi_device_power_manageable(adev) : false;
402 }
403 
404 static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
405 {
406 	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
407 	static const u8 state_conv[] = {
408 		[PCI_D0] = ACPI_STATE_D0,
409 		[PCI_D1] = ACPI_STATE_D1,
410 		[PCI_D2] = ACPI_STATE_D2,
411 		[PCI_D3hot] = ACPI_STATE_D3_COLD,
412 		[PCI_D3cold] = ACPI_STATE_D3_COLD,
413 	};
414 	int error = -EINVAL;
415 
416 	/* If the ACPI device has _EJ0, ignore the device */
417 	if (!adev || acpi_has_method(adev->handle, "_EJ0"))
418 		return -ENODEV;
419 
420 	switch (state) {
421 	case PCI_D3cold:
422 		if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
423 				PM_QOS_FLAGS_ALL) {
424 			error = -EBUSY;
425 			break;
426 		}
427 	case PCI_D0:
428 	case PCI_D1:
429 	case PCI_D2:
430 	case PCI_D3hot:
431 		error = acpi_device_set_power(adev, state_conv[state]);
432 	}
433 
434 	if (!error)
435 		dev_dbg(&dev->dev, "power state changed by ACPI to %s\n",
436 			 acpi_power_state_string(state_conv[state]));
437 
438 	return error;
439 }
440 
441 static bool acpi_pci_can_wakeup(struct pci_dev *dev)
442 {
443 	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
444 	return adev ? acpi_device_can_wakeup(adev) : false;
445 }
446 
447 static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable)
448 {
449 	while (bus->parent) {
450 		if (!acpi_pm_device_sleep_wake(&bus->self->dev, enable))
451 			return;
452 		bus = bus->parent;
453 	}
454 
455 	/* We have reached the root bus. */
456 	if (bus->bridge)
457 		acpi_pm_device_sleep_wake(bus->bridge, enable);
458 }
459 
460 static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
461 {
462 	if (acpi_pci_can_wakeup(dev))
463 		return acpi_pm_device_sleep_wake(&dev->dev, enable);
464 
465 	acpi_pci_propagate_wakeup_enable(dev->bus, enable);
466 	return 0;
467 }
468 
469 static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
470 {
471 	while (bus->parent) {
472 		struct pci_dev *bridge = bus->self;
473 
474 		if (bridge->pme_interrupt)
475 			return;
476 		if (!acpi_pm_device_run_wake(&bridge->dev, enable))
477 			return;
478 		bus = bus->parent;
479 	}
480 
481 	/* We have reached the root bus. */
482 	if (bus->bridge)
483 		acpi_pm_device_run_wake(bus->bridge, enable);
484 }
485 
486 static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
487 {
488 	/*
489 	 * Per PCI Express Base Specification Revision 2.0 section
490 	 * 5.3.3.2 Link Wakeup, platform support is needed for D3cold
491 	 * waking up to power on the main link even if there is PME
492 	 * support for D3cold
493 	 */
494 	if (dev->pme_interrupt && !dev->runtime_d3cold)
495 		return 0;
496 
497 	if (!acpi_pm_device_run_wake(&dev->dev, enable))
498 		return 0;
499 
500 	acpi_pci_propagate_run_wake(dev->bus, enable);
501 	return 0;
502 }
503 
504 static struct pci_platform_pm_ops acpi_pci_platform_pm = {
505 	.is_manageable = acpi_pci_power_manageable,
506 	.set_state = acpi_pci_set_power_state,
507 	.choose_state = acpi_pci_choose_state,
508 	.sleep_wake = acpi_pci_sleep_wake,
509 	.run_wake = acpi_pci_run_wake,
510 };
511 
512 void acpi_pci_add_bus(struct pci_bus *bus)
513 {
514 	if (acpi_pci_disabled || !bus->bridge)
515 		return;
516 
517 	acpi_pci_slot_enumerate(bus);
518 	acpiphp_enumerate_slots(bus);
519 }
520 
521 void acpi_pci_remove_bus(struct pci_bus *bus)
522 {
523 	if (acpi_pci_disabled || !bus->bridge)
524 		return;
525 
526 	acpiphp_remove_slots(bus);
527 	acpi_pci_slot_remove(bus);
528 }
529 
530 /* ACPI bus type */
531 static struct acpi_device *acpi_pci_find_companion(struct device *dev)
532 {
533 	struct pci_dev *pci_dev = to_pci_dev(dev);
534 	bool check_children;
535 	u64 addr;
536 
537 	check_children = pci_is_bridge(pci_dev);
538 	/* Please ref to ACPI spec for the syntax of _ADR */
539 	addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
540 	return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
541 				      check_children);
542 }
543 
544 static void pci_acpi_setup(struct device *dev)
545 {
546 	struct pci_dev *pci_dev = to_pci_dev(dev);
547 	struct acpi_device *adev = ACPI_COMPANION(dev);
548 
549 	if (!adev)
550 		return;
551 
552 	pci_acpi_add_pm_notifier(adev, pci_dev);
553 	if (!adev->wakeup.flags.valid)
554 		return;
555 
556 	device_set_wakeup_capable(dev, true);
557 	acpi_pci_sleep_wake(pci_dev, false);
558 	if (adev->wakeup.flags.run_wake)
559 		device_set_run_wake(dev, true);
560 }
561 
562 static void pci_acpi_cleanup(struct device *dev)
563 {
564 	struct acpi_device *adev = ACPI_COMPANION(dev);
565 
566 	if (!adev)
567 		return;
568 
569 	pci_acpi_remove_pm_notifier(adev);
570 	if (adev->wakeup.flags.valid) {
571 		device_set_wakeup_capable(dev, false);
572 		device_set_run_wake(dev, false);
573 	}
574 }
575 
576 static bool pci_acpi_bus_match(struct device *dev)
577 {
578 	return dev_is_pci(dev);
579 }
580 
581 static struct acpi_bus_type acpi_pci_bus = {
582 	.name = "PCI",
583 	.match = pci_acpi_bus_match,
584 	.find_companion = acpi_pci_find_companion,
585 	.setup = pci_acpi_setup,
586 	.cleanup = pci_acpi_cleanup,
587 };
588 
589 static int __init acpi_pci_init(void)
590 {
591 	int ret;
592 
593 	if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
594 		pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n");
595 		pci_no_msi();
596 	}
597 
598 	if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
599 		pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
600 		pcie_no_aspm();
601 	}
602 
603 	ret = register_acpi_bus_type(&acpi_pci_bus);
604 	if (ret)
605 		return 0;
606 
607 	pci_set_platform_pm(&acpi_pci_platform_pm);
608 	acpi_pci_slot_init();
609 	acpiphp_init();
610 
611 	return 0;
612 }
613 arch_initcall(acpi_pci_init);
614