xref: /linux/drivers/acpi/sleep.c (revision 3c4fc7bf4c9e66fe71abcbf93f62f4ddb89b7f15)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * sleep.c - ACPI sleep support.
4  *
5  * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
6  * Copyright (c) 2004 David Shaohua Li <shaohua.li@intel.com>
7  * Copyright (c) 2000-2003 Patrick Mochel
8  * Copyright (c) 2003 Open Source Development Lab
9  */
10 
11 #define pr_fmt(fmt) "ACPI: PM: " fmt
12 
13 #include <linux/delay.h>
14 #include <linux/irq.h>
15 #include <linux/dmi.h>
16 #include <linux/device.h>
17 #include <linux/interrupt.h>
18 #include <linux/suspend.h>
19 #include <linux/reboot.h>
20 #include <linux/acpi.h>
21 #include <linux/module.h>
22 #include <linux/syscore_ops.h>
23 #include <asm/io.h>
24 #include <trace/events/power.h>
25 
26 #include "internal.h"
27 #include "sleep.h"
28 
29 /*
30  * Some HW-full platforms do not have _S5, so they may need
31  * to leverage efi power off for a shutdown.
32  */
33 bool acpi_no_s5;
34 static u8 sleep_states[ACPI_S_STATE_COUNT];
35 
36 static void acpi_sleep_tts_switch(u32 acpi_state)
37 {
38 	acpi_status status;
39 
40 	status = acpi_execute_simple_method(NULL, "\\_TTS", acpi_state);
41 	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
42 		/*
43 		 * OS can't evaluate the _TTS object correctly. Some warning
44 		 * message will be printed. But it won't break anything.
45 		 */
46 		pr_notice("Failure in evaluating _TTS object\n");
47 	}
48 }
49 
50 static int tts_notify_reboot(struct notifier_block *this,
51 			unsigned long code, void *x)
52 {
53 	acpi_sleep_tts_switch(ACPI_STATE_S5);
54 	return NOTIFY_DONE;
55 }
56 
57 static struct notifier_block tts_notifier = {
58 	.notifier_call	= tts_notify_reboot,
59 	.next		= NULL,
60 	.priority	= 0,
61 };
62 
63 static int acpi_sleep_prepare(u32 acpi_state)
64 {
65 #ifdef CONFIG_ACPI_SLEEP
66 	unsigned long acpi_wakeup_address;
67 
68 	/* do we have a wakeup address for S2 and S3? */
69 	if (acpi_state == ACPI_STATE_S3) {
70 		acpi_wakeup_address = acpi_get_wakeup_address();
71 		if (!acpi_wakeup_address)
72 			return -EFAULT;
73 		acpi_set_waking_vector(acpi_wakeup_address);
74 
75 	}
76 #endif
77 	pr_info("Preparing to enter system sleep state S%d\n", acpi_state);
78 	acpi_enable_wakeup_devices(acpi_state);
79 	acpi_enter_sleep_state_prep(acpi_state);
80 	return 0;
81 }
82 
83 bool acpi_sleep_state_supported(u8 sleep_state)
84 {
85 	acpi_status status;
86 	u8 type_a, type_b;
87 
88 	status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
89 	return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
90 		|| (acpi_gbl_FADT.sleep_control.address
91 			&& acpi_gbl_FADT.sleep_status.address));
92 }
93 
94 #ifdef CONFIG_ACPI_SLEEP
95 static u32 acpi_target_sleep_state = ACPI_STATE_S0;
96 
97 u32 acpi_target_system_state(void)
98 {
99 	return acpi_target_sleep_state;
100 }
101 EXPORT_SYMBOL_GPL(acpi_target_system_state);
102 
103 static bool pwr_btn_event_pending;
104 
105 /*
106  * The ACPI specification wants us to save NVS memory regions during hibernation
107  * and to restore them during the subsequent resume.  Windows does that also for
108  * suspend to RAM.  However, it is known that this mechanism does not work on
109  * all machines, so we allow the user to disable it with the help of the
110  * 'acpi_sleep=nonvs' kernel command line option.
111  */
112 static bool nvs_nosave;
113 
114 void __init acpi_nvs_nosave(void)
115 {
116 	nvs_nosave = true;
117 }
118 
119 /*
120  * The ACPI specification wants us to save NVS memory regions during hibernation
121  * but says nothing about saving NVS during S3.  Not all versions of Windows
122  * save NVS on S3 suspend either, and it is clear that not all systems need
123  * NVS to be saved at S3 time.  To improve suspend/resume time, allow the
124  * user to disable saving NVS on S3 if their system does not require it, but
125  * continue to save/restore NVS for S4 as specified.
126  */
127 static bool nvs_nosave_s3;
128 
129 void __init acpi_nvs_nosave_s3(void)
130 {
131 	nvs_nosave_s3 = true;
132 }
133 
134 static int __init init_nvs_save_s3(const struct dmi_system_id *d)
135 {
136 	nvs_nosave_s3 = false;
137 	return 0;
138 }
139 
140 /*
141  * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
142  * user to request that behavior by using the 'acpi_old_suspend_ordering'
143  * kernel command line option that causes the following variable to be set.
144  */
145 static bool old_suspend_ordering;
146 
147 void __init acpi_old_suspend_ordering(void)
148 {
149 	old_suspend_ordering = true;
150 }
151 
152 static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
153 {
154 	acpi_old_suspend_ordering();
155 	return 0;
156 }
157 
158 static int __init init_nvs_nosave(const struct dmi_system_id *d)
159 {
160 	acpi_nvs_nosave();
161 	return 0;
162 }
163 
164 bool acpi_sleep_default_s3;
165 
166 static int __init init_default_s3(const struct dmi_system_id *d)
167 {
168 	acpi_sleep_default_s3 = true;
169 	return 0;
170 }
171 
172 static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
173 	{
174 	.callback = init_old_suspend_ordering,
175 	.ident = "Abit KN9 (nForce4 variant)",
176 	.matches = {
177 		DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
178 		DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
179 		},
180 	},
181 	{
182 	.callback = init_old_suspend_ordering,
183 	.ident = "HP xw4600 Workstation",
184 	.matches = {
185 		DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
186 		DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
187 		},
188 	},
189 	{
190 	.callback = init_old_suspend_ordering,
191 	.ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
192 	.matches = {
193 		DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
194 		DMI_MATCH(DMI_BOARD_NAME, "M2N8L"),
195 		},
196 	},
197 	{
198 	.callback = init_old_suspend_ordering,
199 	.ident = "Panasonic CF51-2L",
200 	.matches = {
201 		DMI_MATCH(DMI_BOARD_VENDOR,
202 				"Matsushita Electric Industrial Co.,Ltd."),
203 		DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
204 		},
205 	},
206 	{
207 	.callback = init_nvs_nosave,
208 	.ident = "Sony Vaio VGN-FW41E_H",
209 	.matches = {
210 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
211 		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"),
212 		},
213 	},
214 	{
215 	.callback = init_nvs_nosave,
216 	.ident = "Sony Vaio VGN-FW21E",
217 	.matches = {
218 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
219 		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
220 		},
221 	},
222 	{
223 	.callback = init_nvs_nosave,
224 	.ident = "Sony Vaio VGN-FW21M",
225 	.matches = {
226 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
227 		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"),
228 		},
229 	},
230 	{
231 	.callback = init_nvs_nosave,
232 	.ident = "Sony Vaio VPCEB17FX",
233 	.matches = {
234 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
235 		DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
236 		},
237 	},
238 	{
239 	.callback = init_nvs_nosave,
240 	.ident = "Sony Vaio VGN-SR11M",
241 	.matches = {
242 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
243 		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
244 		},
245 	},
246 	{
247 	.callback = init_nvs_nosave,
248 	.ident = "Everex StepNote Series",
249 	.matches = {
250 		DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
251 		DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
252 		},
253 	},
254 	{
255 	.callback = init_nvs_nosave,
256 	.ident = "Sony Vaio VPCEB1Z1E",
257 	.matches = {
258 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
259 		DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
260 		},
261 	},
262 	{
263 	.callback = init_nvs_nosave,
264 	.ident = "Sony Vaio VGN-NW130D",
265 	.matches = {
266 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
267 		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
268 		},
269 	},
270 	{
271 	.callback = init_nvs_nosave,
272 	.ident = "Sony Vaio VPCCW29FX",
273 	.matches = {
274 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
275 		DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
276 		},
277 	},
278 	{
279 	.callback = init_nvs_nosave,
280 	.ident = "Averatec AV1020-ED2",
281 	.matches = {
282 		DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
283 		DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
284 		},
285 	},
286 	{
287 	.callback = init_old_suspend_ordering,
288 	.ident = "Asus A8N-SLI DELUXE",
289 	.matches = {
290 		DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
291 		DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
292 		},
293 	},
294 	{
295 	.callback = init_old_suspend_ordering,
296 	.ident = "Asus A8N-SLI Premium",
297 	.matches = {
298 		DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
299 		DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
300 		},
301 	},
302 	{
303 	.callback = init_nvs_nosave,
304 	.ident = "Sony Vaio VGN-SR26GN_P",
305 	.matches = {
306 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
307 		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
308 		},
309 	},
310 	{
311 	.callback = init_nvs_nosave,
312 	.ident = "Sony Vaio VPCEB1S1E",
313 	.matches = {
314 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
315 		DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"),
316 		},
317 	},
318 	{
319 	.callback = init_nvs_nosave,
320 	.ident = "Sony Vaio VGN-FW520F",
321 	.matches = {
322 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
323 		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
324 		},
325 	},
326 	{
327 	.callback = init_nvs_nosave,
328 	.ident = "Asus K54C",
329 	.matches = {
330 		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
331 		DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
332 		},
333 	},
334 	{
335 	.callback = init_nvs_nosave,
336 	.ident = "Asus K54HR",
337 	.matches = {
338 		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
339 		DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
340 		},
341 	},
342 	{
343 	.callback = init_nvs_save_s3,
344 	.ident = "Asus 1025C",
345 	.matches = {
346 		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
347 		DMI_MATCH(DMI_PRODUCT_NAME, "1025C"),
348 		},
349 	},
350 	/*
351 	 * https://bugzilla.kernel.org/show_bug.cgi?id=189431
352 	 * Lenovo G50-45 is a platform later than 2012, but needs nvs memory
353 	 * saving during S3.
354 	 */
355 	{
356 	.callback = init_nvs_save_s3,
357 	.ident = "Lenovo G50-45",
358 	.matches = {
359 		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
360 		DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
361 		},
362 	},
363 	{
364 	.callback = init_nvs_save_s3,
365 	.ident = "Lenovo G40-45",
366 	.matches = {
367 		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
368 		DMI_MATCH(DMI_PRODUCT_NAME, "80E1"),
369 		},
370 	},
371 	/*
372 	 * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
373 	 * the Low Power S0 Idle firmware interface (see
374 	 * https://bugzilla.kernel.org/show_bug.cgi?id=199057).
375 	 */
376 	{
377 	.callback = init_default_s3,
378 	.ident = "ThinkPad X1 Tablet(2016)",
379 	.matches = {
380 		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
381 		DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
382 		},
383 	},
384 	/*
385 	 * ASUS B1400CEAE hangs on resume from suspend (see
386 	 * https://bugzilla.kernel.org/show_bug.cgi?id=215742).
387 	 */
388 	{
389 	.callback = init_default_s3,
390 	.ident = "ASUS B1400CEAE",
391 	.matches = {
392 		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
393 		DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"),
394 		},
395 	},
396 	{},
397 };
398 
399 static bool ignore_blacklist;
400 
401 void __init acpi_sleep_no_blacklist(void)
402 {
403 	ignore_blacklist = true;
404 }
405 
406 static void __init acpi_sleep_dmi_check(void)
407 {
408 	if (ignore_blacklist)
409 		return;
410 
411 	if (dmi_get_bios_year() >= 2012)
412 		acpi_nvs_nosave_s3();
413 
414 	dmi_check_system(acpisleep_dmi_table);
415 }
416 
417 /**
418  * acpi_pm_freeze - Disable the GPEs and suspend EC transactions.
419  */
420 static int acpi_pm_freeze(void)
421 {
422 	acpi_disable_all_gpes();
423 	acpi_os_wait_events_complete();
424 	acpi_ec_block_transactions();
425 	return 0;
426 }
427 
428 /**
429  * acpi_pm_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS.
430  */
431 static int acpi_pm_pre_suspend(void)
432 {
433 	acpi_pm_freeze();
434 	return suspend_nvs_save();
435 }
436 
437 /**
438  *	__acpi_pm_prepare - Prepare the platform to enter the target state.
439  *
440  *	If necessary, set the firmware waking vector and do arch-specific
441  *	nastiness to get the wakeup code to the waking vector.
442  */
443 static int __acpi_pm_prepare(void)
444 {
445 	int error = acpi_sleep_prepare(acpi_target_sleep_state);
446 	if (error)
447 		acpi_target_sleep_state = ACPI_STATE_S0;
448 
449 	return error;
450 }
451 
452 /**
453  *	acpi_pm_prepare - Prepare the platform to enter the target sleep
454  *		state and disable the GPEs.
455  */
456 static int acpi_pm_prepare(void)
457 {
458 	int error = __acpi_pm_prepare();
459 	if (!error)
460 		error = acpi_pm_pre_suspend();
461 
462 	return error;
463 }
464 
465 /**
466  *	acpi_pm_finish - Instruct the platform to leave a sleep state.
467  *
468  *	This is called after we wake back up (or if entering the sleep state
469  *	failed).
470  */
471 static void acpi_pm_finish(void)
472 {
473 	struct acpi_device *pwr_btn_adev;
474 	u32 acpi_state = acpi_target_sleep_state;
475 
476 	acpi_ec_unblock_transactions();
477 	suspend_nvs_free();
478 
479 	if (acpi_state == ACPI_STATE_S0)
480 		return;
481 
482 	pr_info("Waking up from system sleep state S%d\n", acpi_state);
483 	acpi_disable_wakeup_devices(acpi_state);
484 	acpi_leave_sleep_state(acpi_state);
485 
486 	/* reset firmware waking vector */
487 	acpi_set_waking_vector(0);
488 
489 	acpi_target_sleep_state = ACPI_STATE_S0;
490 
491 	acpi_resume_power_resources();
492 
493 	/* If we were woken with the fixed power button, provide a small
494 	 * hint to userspace in the form of a wakeup event on the fixed power
495 	 * button device (if it can be found).
496 	 *
497 	 * We delay the event generation til now, as the PM layer requires
498 	 * timekeeping to be running before we generate events. */
499 	if (!pwr_btn_event_pending)
500 		return;
501 
502 	pwr_btn_event_pending = false;
503 	pwr_btn_adev = acpi_dev_get_first_match_dev(ACPI_BUTTON_HID_POWERF,
504 						    NULL, -1);
505 	if (pwr_btn_adev) {
506 		pm_wakeup_event(&pwr_btn_adev->dev, 0);
507 		acpi_dev_put(pwr_btn_adev);
508 	}
509 }
510 
511 /**
512  * acpi_pm_start - Start system PM transition.
513  */
514 static void acpi_pm_start(u32 acpi_state)
515 {
516 	acpi_target_sleep_state = acpi_state;
517 	acpi_sleep_tts_switch(acpi_target_sleep_state);
518 	acpi_scan_lock_acquire();
519 }
520 
521 /**
522  * acpi_pm_end - Finish up system PM transition.
523  */
524 static void acpi_pm_end(void)
525 {
526 	acpi_turn_off_unused_power_resources();
527 	acpi_scan_lock_release();
528 	/*
529 	 * This is necessary in case acpi_pm_finish() is not called during a
530 	 * failing transition to a sleep state.
531 	 */
532 	acpi_target_sleep_state = ACPI_STATE_S0;
533 	acpi_sleep_tts_switch(acpi_target_sleep_state);
534 }
535 #else /* !CONFIG_ACPI_SLEEP */
536 #define sleep_no_lps0	(1)
537 #define acpi_target_sleep_state	ACPI_STATE_S0
538 #define acpi_sleep_default_s3	(1)
539 static inline void acpi_sleep_dmi_check(void) {}
540 #endif /* CONFIG_ACPI_SLEEP */
541 
542 #ifdef CONFIG_SUSPEND
543 static u32 acpi_suspend_states[] = {
544 	[PM_SUSPEND_ON] = ACPI_STATE_S0,
545 	[PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
546 	[PM_SUSPEND_MEM] = ACPI_STATE_S3,
547 	[PM_SUSPEND_MAX] = ACPI_STATE_S5
548 };
549 
550 /**
551  *	acpi_suspend_begin - Set the target system sleep state to the state
552  *		associated with given @pm_state, if supported.
553  */
554 static int acpi_suspend_begin(suspend_state_t pm_state)
555 {
556 	u32 acpi_state = acpi_suspend_states[pm_state];
557 	int error;
558 
559 	error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc();
560 	if (error)
561 		return error;
562 
563 	if (!sleep_states[acpi_state]) {
564 		pr_err("ACPI does not support sleep state S%u\n", acpi_state);
565 		return -ENOSYS;
566 	}
567 	if (acpi_state > ACPI_STATE_S1)
568 		pm_set_suspend_via_firmware();
569 
570 	acpi_pm_start(acpi_state);
571 	return 0;
572 }
573 
574 /**
575  *	acpi_suspend_enter - Actually enter a sleep state.
576  *	@pm_state: ignored
577  *
578  *	Flush caches and go to sleep. For STR we have to call arch-specific
579  *	assembly, which in turn call acpi_enter_sleep_state().
580  *	It's unfortunate, but it works. Please fix if you're feeling frisky.
581  */
582 static int acpi_suspend_enter(suspend_state_t pm_state)
583 {
584 	acpi_status status = AE_OK;
585 	u32 acpi_state = acpi_target_sleep_state;
586 	int error;
587 
588 	trace_suspend_resume(TPS("acpi_suspend"), acpi_state, true);
589 	switch (acpi_state) {
590 	case ACPI_STATE_S1:
591 		barrier();
592 		status = acpi_enter_sleep_state(acpi_state);
593 		break;
594 
595 	case ACPI_STATE_S3:
596 		if (!acpi_suspend_lowlevel)
597 			return -ENOSYS;
598 		error = acpi_suspend_lowlevel();
599 		if (error)
600 			return error;
601 		pr_info("Low-level resume complete\n");
602 		pm_set_resume_via_firmware();
603 		break;
604 	}
605 	trace_suspend_resume(TPS("acpi_suspend"), acpi_state, false);
606 
607 	/* This violates the spec but is required for bug compatibility. */
608 	acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
609 
610 	/* Reprogram control registers */
611 	acpi_leave_sleep_state_prep(acpi_state);
612 
613 	/* ACPI 3.0 specs (P62) says that it's the responsibility
614 	 * of the OSPM to clear the status bit [ implying that the
615 	 * POWER_BUTTON event should not reach userspace ]
616 	 *
617 	 * However, we do generate a small hint for userspace in the form of
618 	 * a wakeup event. We flag this condition for now and generate the
619 	 * event later, as we're currently too early in resume to be able to
620 	 * generate wakeup events.
621 	 */
622 	if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
623 		acpi_event_status pwr_btn_status = ACPI_EVENT_FLAG_DISABLED;
624 
625 		acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
626 
627 		if (pwr_btn_status & ACPI_EVENT_FLAG_STATUS_SET) {
628 			acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
629 			/* Flag for later */
630 			pwr_btn_event_pending = true;
631 		}
632 	}
633 
634 	/*
635 	 * Disable and clear GPE status before interrupt is enabled. Some GPEs
636 	 * (like wakeup GPE) haven't handler, this can avoid such GPE misfire.
637 	 * acpi_leave_sleep_state will reenable specific GPEs later
638 	 */
639 	acpi_disable_all_gpes();
640 	/* Allow EC transactions to happen. */
641 	acpi_ec_unblock_transactions();
642 
643 	suspend_nvs_restore();
644 
645 	return ACPI_SUCCESS(status) ? 0 : -EFAULT;
646 }
647 
648 static int acpi_suspend_state_valid(suspend_state_t pm_state)
649 {
650 	u32 acpi_state;
651 
652 	switch (pm_state) {
653 	case PM_SUSPEND_ON:
654 	case PM_SUSPEND_STANDBY:
655 	case PM_SUSPEND_MEM:
656 		acpi_state = acpi_suspend_states[pm_state];
657 
658 		return sleep_states[acpi_state];
659 	default:
660 		return 0;
661 	}
662 }
663 
664 static const struct platform_suspend_ops acpi_suspend_ops = {
665 	.valid = acpi_suspend_state_valid,
666 	.begin = acpi_suspend_begin,
667 	.prepare_late = acpi_pm_prepare,
668 	.enter = acpi_suspend_enter,
669 	.wake = acpi_pm_finish,
670 	.end = acpi_pm_end,
671 };
672 
673 /**
674  *	acpi_suspend_begin_old - Set the target system sleep state to the
675  *		state associated with given @pm_state, if supported, and
676  *		execute the _PTS control method.  This function is used if the
677  *		pre-ACPI 2.0 suspend ordering has been requested.
678  */
679 static int acpi_suspend_begin_old(suspend_state_t pm_state)
680 {
681 	int error = acpi_suspend_begin(pm_state);
682 	if (!error)
683 		error = __acpi_pm_prepare();
684 
685 	return error;
686 }
687 
688 /*
689  * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
690  * been requested.
691  */
692 static const struct platform_suspend_ops acpi_suspend_ops_old = {
693 	.valid = acpi_suspend_state_valid,
694 	.begin = acpi_suspend_begin_old,
695 	.prepare_late = acpi_pm_pre_suspend,
696 	.enter = acpi_suspend_enter,
697 	.wake = acpi_pm_finish,
698 	.end = acpi_pm_end,
699 	.recover = acpi_pm_finish,
700 };
701 
702 static bool s2idle_wakeup;
703 
704 int acpi_s2idle_begin(void)
705 {
706 	acpi_scan_lock_acquire();
707 	return 0;
708 }
709 
710 int acpi_s2idle_prepare(void)
711 {
712 	if (acpi_sci_irq_valid()) {
713 		enable_irq_wake(acpi_sci_irq);
714 		acpi_ec_set_gpe_wake_mask(ACPI_GPE_ENABLE);
715 	}
716 
717 	acpi_enable_wakeup_devices(ACPI_STATE_S0);
718 
719 	/* Change the configuration of GPEs to avoid spurious wakeup. */
720 	acpi_enable_all_wakeup_gpes();
721 	acpi_os_wait_events_complete();
722 
723 	s2idle_wakeup = true;
724 	return 0;
725 }
726 
727 bool acpi_s2idle_wake(void)
728 {
729 	if (!acpi_sci_irq_valid())
730 		return pm_wakeup_pending();
731 
732 	while (pm_wakeup_pending()) {
733 		/*
734 		 * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the
735 		 * SCI has not triggered while suspended, so bail out (the
736 		 * wakeup is pending anyway and the SCI is not the source of
737 		 * it).
738 		 */
739 		if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) {
740 			pm_pr_dbg("Wakeup unrelated to ACPI SCI\n");
741 			return true;
742 		}
743 
744 		/*
745 		 * If the status bit of any enabled fixed event is set, the
746 		 * wakeup is regarded as valid.
747 		 */
748 		if (acpi_any_fixed_event_status_set()) {
749 			pm_pr_dbg("ACPI fixed event wakeup\n");
750 			return true;
751 		}
752 
753 		/* Check wakeups from drivers sharing the SCI. */
754 		if (acpi_check_wakeup_handlers()) {
755 			pm_pr_dbg("ACPI custom handler wakeup\n");
756 			return true;
757 		}
758 
759 		/*
760 		 * Check non-EC GPE wakeups and if there are none, cancel the
761 		 * SCI-related wakeup and dispatch the EC GPE.
762 		 */
763 		if (acpi_ec_dispatch_gpe()) {
764 			pm_pr_dbg("ACPI non-EC GPE wakeup\n");
765 			return true;
766 		}
767 
768 		acpi_os_wait_events_complete();
769 
770 		/*
771 		 * The SCI is in the "suspended" state now and it cannot produce
772 		 * new wakeup events till the rearming below, so if any of them
773 		 * are pending here, they must be resulting from the processing
774 		 * of EC events above or coming from somewhere else.
775 		 */
776 		if (pm_wakeup_pending()) {
777 			pm_pr_dbg("Wakeup after ACPI Notify sync\n");
778 			return true;
779 		}
780 
781 		pm_pr_dbg("Rearming ACPI SCI for wakeup\n");
782 
783 		pm_wakeup_clear(acpi_sci_irq);
784 		rearm_wake_irq(acpi_sci_irq);
785 	}
786 
787 	return false;
788 }
789 
790 void acpi_s2idle_restore(void)
791 {
792 	/*
793 	 * Drain pending events before restoring the working-state configuration
794 	 * of GPEs.
795 	 */
796 	acpi_os_wait_events_complete(); /* synchronize GPE processing */
797 	acpi_ec_flush_work(); /* flush the EC driver's workqueues */
798 	acpi_os_wait_events_complete(); /* synchronize Notify handling */
799 
800 	s2idle_wakeup = false;
801 
802 	acpi_enable_all_runtime_gpes();
803 
804 	acpi_disable_wakeup_devices(ACPI_STATE_S0);
805 
806 	if (acpi_sci_irq_valid()) {
807 		acpi_ec_set_gpe_wake_mask(ACPI_GPE_DISABLE);
808 		disable_irq_wake(acpi_sci_irq);
809 	}
810 }
811 
812 void acpi_s2idle_end(void)
813 {
814 	acpi_scan_lock_release();
815 }
816 
817 static const struct platform_s2idle_ops acpi_s2idle_ops = {
818 	.begin = acpi_s2idle_begin,
819 	.prepare = acpi_s2idle_prepare,
820 	.wake = acpi_s2idle_wake,
821 	.restore = acpi_s2idle_restore,
822 	.end = acpi_s2idle_end,
823 };
824 
825 void __weak acpi_s2idle_setup(void)
826 {
827 	if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)
828 		pr_info("Efficient low-power S0 idle declared\n");
829 
830 	s2idle_set_ops(&acpi_s2idle_ops);
831 }
832 
833 static void acpi_sleep_suspend_setup(void)
834 {
835 	bool suspend_ops_needed = false;
836 	int i;
837 
838 	for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
839 		if (acpi_sleep_state_supported(i)) {
840 			sleep_states[i] = 1;
841 			suspend_ops_needed = true;
842 		}
843 
844 	if (suspend_ops_needed)
845 		suspend_set_ops(old_suspend_ordering ?
846 				&acpi_suspend_ops_old : &acpi_suspend_ops);
847 
848 	acpi_s2idle_setup();
849 }
850 
851 #else /* !CONFIG_SUSPEND */
852 #define s2idle_wakeup		(false)
853 static inline void acpi_sleep_suspend_setup(void) {}
854 #endif /* !CONFIG_SUSPEND */
855 
856 bool acpi_s2idle_wakeup(void)
857 {
858 	return s2idle_wakeup;
859 }
860 
861 #ifdef CONFIG_PM_SLEEP
862 static u32 saved_bm_rld;
863 
864 static int  acpi_save_bm_rld(void)
865 {
866 	acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
867 	return 0;
868 }
869 
870 static void  acpi_restore_bm_rld(void)
871 {
872 	u32 resumed_bm_rld = 0;
873 
874 	acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
875 	if (resumed_bm_rld == saved_bm_rld)
876 		return;
877 
878 	acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
879 }
880 
881 static struct syscore_ops acpi_sleep_syscore_ops = {
882 	.suspend = acpi_save_bm_rld,
883 	.resume = acpi_restore_bm_rld,
884 };
885 
886 static void acpi_sleep_syscore_init(void)
887 {
888 	register_syscore_ops(&acpi_sleep_syscore_ops);
889 }
890 #else
891 static inline void acpi_sleep_syscore_init(void) {}
892 #endif /* CONFIG_PM_SLEEP */
893 
894 #ifdef CONFIG_HIBERNATION
895 static unsigned long s4_hardware_signature;
896 static struct acpi_table_facs *facs;
897 int acpi_check_s4_hw_signature = -1; /* Default behaviour is just to warn */
898 
899 static int acpi_hibernation_begin(pm_message_t stage)
900 {
901 	if (!nvs_nosave) {
902 		int error = suspend_nvs_alloc();
903 		if (error)
904 			return error;
905 	}
906 
907 	if (stage.event == PM_EVENT_HIBERNATE)
908 		pm_set_suspend_via_firmware();
909 
910 	acpi_pm_start(ACPI_STATE_S4);
911 	return 0;
912 }
913 
914 static int acpi_hibernation_enter(void)
915 {
916 	acpi_status status = AE_OK;
917 
918 	/* This shouldn't return.  If it returns, we have a problem */
919 	status = acpi_enter_sleep_state(ACPI_STATE_S4);
920 	/* Reprogram control registers */
921 	acpi_leave_sleep_state_prep(ACPI_STATE_S4);
922 
923 	return ACPI_SUCCESS(status) ? 0 : -EFAULT;
924 }
925 
926 static void acpi_hibernation_leave(void)
927 {
928 	pm_set_resume_via_firmware();
929 	/*
930 	 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
931 	 * enable it here.
932 	 */
933 	acpi_enable();
934 	/* Reprogram control registers */
935 	acpi_leave_sleep_state_prep(ACPI_STATE_S4);
936 	/* Check the hardware signature */
937 	if (facs && s4_hardware_signature != facs->hardware_signature)
938 		pr_crit("Hardware changed while hibernated, success doubtful!\n");
939 	/* Restore the NVS memory area */
940 	suspend_nvs_restore();
941 	/* Allow EC transactions to happen. */
942 	acpi_ec_unblock_transactions();
943 }
944 
945 static void acpi_pm_thaw(void)
946 {
947 	acpi_ec_unblock_transactions();
948 	acpi_enable_all_runtime_gpes();
949 }
950 
951 static const struct platform_hibernation_ops acpi_hibernation_ops = {
952 	.begin = acpi_hibernation_begin,
953 	.end = acpi_pm_end,
954 	.pre_snapshot = acpi_pm_prepare,
955 	.finish = acpi_pm_finish,
956 	.prepare = acpi_pm_prepare,
957 	.enter = acpi_hibernation_enter,
958 	.leave = acpi_hibernation_leave,
959 	.pre_restore = acpi_pm_freeze,
960 	.restore_cleanup = acpi_pm_thaw,
961 };
962 
963 /**
964  *	acpi_hibernation_begin_old - Set the target system sleep state to
965  *		ACPI_STATE_S4 and execute the _PTS control method.  This
966  *		function is used if the pre-ACPI 2.0 suspend ordering has been
967  *		requested.
968  */
969 static int acpi_hibernation_begin_old(pm_message_t stage)
970 {
971 	int error;
972 	/*
973 	 * The _TTS object should always be evaluated before the _PTS object.
974 	 * When the old_suspended_ordering is true, the _PTS object is
975 	 * evaluated in the acpi_sleep_prepare.
976 	 */
977 	acpi_sleep_tts_switch(ACPI_STATE_S4);
978 
979 	error = acpi_sleep_prepare(ACPI_STATE_S4);
980 	if (error)
981 		return error;
982 
983 	if (!nvs_nosave) {
984 		error = suspend_nvs_alloc();
985 		if (error)
986 			return error;
987 	}
988 
989 	if (stage.event == PM_EVENT_HIBERNATE)
990 		pm_set_suspend_via_firmware();
991 
992 	acpi_target_sleep_state = ACPI_STATE_S4;
993 	acpi_scan_lock_acquire();
994 	return 0;
995 }
996 
997 /*
998  * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
999  * been requested.
1000  */
1001 static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
1002 	.begin = acpi_hibernation_begin_old,
1003 	.end = acpi_pm_end,
1004 	.pre_snapshot = acpi_pm_pre_suspend,
1005 	.prepare = acpi_pm_freeze,
1006 	.finish = acpi_pm_finish,
1007 	.enter = acpi_hibernation_enter,
1008 	.leave = acpi_hibernation_leave,
1009 	.pre_restore = acpi_pm_freeze,
1010 	.restore_cleanup = acpi_pm_thaw,
1011 	.recover = acpi_pm_finish,
1012 };
1013 
1014 static void acpi_sleep_hibernate_setup(void)
1015 {
1016 	if (!acpi_sleep_state_supported(ACPI_STATE_S4))
1017 		return;
1018 
1019 	hibernation_set_ops(old_suspend_ordering ?
1020 			&acpi_hibernation_ops_old : &acpi_hibernation_ops);
1021 	sleep_states[ACPI_STATE_S4] = 1;
1022 	if (!acpi_check_s4_hw_signature)
1023 		return;
1024 
1025 	acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
1026 	if (facs) {
1027 		/*
1028 		 * s4_hardware_signature is the local variable which is just
1029 		 * used to warn about mismatch after we're attempting to
1030 		 * resume (in violation of the ACPI specification.)
1031 		 */
1032 		s4_hardware_signature = facs->hardware_signature;
1033 
1034 		if (acpi_check_s4_hw_signature > 0) {
1035 			/*
1036 			 * If we're actually obeying the ACPI specification
1037 			 * then the signature is written out as part of the
1038 			 * swsusp header, in order to allow the boot kernel
1039 			 * to gracefully decline to resume.
1040 			 */
1041 			swsusp_hardware_signature = facs->hardware_signature;
1042 		}
1043 	}
1044 }
1045 #else /* !CONFIG_HIBERNATION */
1046 static inline void acpi_sleep_hibernate_setup(void) {}
1047 #endif /* !CONFIG_HIBERNATION */
1048 
1049 static int acpi_power_off_prepare(struct sys_off_data *data)
1050 {
1051 	/* Prepare to power off the system */
1052 	acpi_sleep_prepare(ACPI_STATE_S5);
1053 	acpi_disable_all_gpes();
1054 	acpi_os_wait_events_complete();
1055 	return NOTIFY_DONE;
1056 }
1057 
1058 static int acpi_power_off(struct sys_off_data *data)
1059 {
1060 	/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
1061 	pr_debug("%s called\n", __func__);
1062 	local_irq_disable();
1063 	acpi_enter_sleep_state(ACPI_STATE_S5);
1064 	return NOTIFY_DONE;
1065 }
1066 
1067 int __init acpi_sleep_init(void)
1068 {
1069 	char supported[ACPI_S_STATE_COUNT * 3 + 1];
1070 	char *pos = supported;
1071 	int i;
1072 
1073 	acpi_sleep_dmi_check();
1074 
1075 	sleep_states[ACPI_STATE_S0] = 1;
1076 
1077 	acpi_sleep_syscore_init();
1078 	acpi_sleep_suspend_setup();
1079 	acpi_sleep_hibernate_setup();
1080 
1081 	if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
1082 		sleep_states[ACPI_STATE_S5] = 1;
1083 
1084 		register_sys_off_handler(SYS_OFF_MODE_POWER_OFF_PREPARE,
1085 					 SYS_OFF_PRIO_FIRMWARE,
1086 					 acpi_power_off_prepare, NULL);
1087 
1088 		register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
1089 					 SYS_OFF_PRIO_FIRMWARE,
1090 					 acpi_power_off, NULL);
1091 	} else {
1092 		acpi_no_s5 = true;
1093 	}
1094 
1095 	supported[0] = 0;
1096 	for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
1097 		if (sleep_states[i])
1098 			pos += sprintf(pos, " S%d", i);
1099 	}
1100 	pr_info("(supports%s)\n", supported);
1101 
1102 	/*
1103 	 * Register the tts_notifier to reboot notifier list so that the _TTS
1104 	 * object can also be evaluated when the system enters S5.
1105 	 */
1106 	register_reboot_notifier(&tts_notifier);
1107 	return 0;
1108 }
1109