xref: /linux/arch/x86/kernel/acpi/wakeup_64.S (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1.text
2#include <linux/linkage.h>
3#include <asm/segment.h>
4#include <asm/pgtable_types.h>
5#include <asm/page_types.h>
6#include <asm/msr.h>
7#include <asm/asm-offsets.h>
8#include <asm/frame.h>
9
10# Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
11
12.code64
13	/*
14	 * Hooray, we are in Long 64-bit mode (but still running in low memory)
15	 */
16ENTRY(wakeup_long64)
17	movq	saved_magic, %rax
18	movq	$0x123456789abcdef0, %rdx
19	cmpq	%rdx, %rax
20	jne	bogus_64_magic
21
22	movw	$__KERNEL_DS, %ax
23	movw	%ax, %ss
24	movw	%ax, %ds
25	movw	%ax, %es
26	movw	%ax, %fs
27	movw	%ax, %gs
28	movq	saved_rsp, %rsp
29
30	movq	saved_rbx, %rbx
31	movq	saved_rdi, %rdi
32	movq	saved_rsi, %rsi
33	movq	saved_rbp, %rbp
34
35	movq	saved_rip, %rax
36	jmp	*%rax
37ENDPROC(wakeup_long64)
38
39bogus_64_magic:
40	jmp	bogus_64_magic
41
42ENTRY(do_suspend_lowlevel)
43	FRAME_BEGIN
44	subq	$8, %rsp
45	xorl	%eax, %eax
46	call	save_processor_state
47
48	movq	$saved_context, %rax
49	movq	%rsp, pt_regs_sp(%rax)
50	movq	%rbp, pt_regs_bp(%rax)
51	movq	%rsi, pt_regs_si(%rax)
52	movq	%rdi, pt_regs_di(%rax)
53	movq	%rbx, pt_regs_bx(%rax)
54	movq	%rcx, pt_regs_cx(%rax)
55	movq	%rdx, pt_regs_dx(%rax)
56	movq	%r8, pt_regs_r8(%rax)
57	movq	%r9, pt_regs_r9(%rax)
58	movq	%r10, pt_regs_r10(%rax)
59	movq	%r11, pt_regs_r11(%rax)
60	movq	%r12, pt_regs_r12(%rax)
61	movq	%r13, pt_regs_r13(%rax)
62	movq	%r14, pt_regs_r14(%rax)
63	movq	%r15, pt_regs_r15(%rax)
64	pushfq
65	popq	pt_regs_flags(%rax)
66
67	movq	$.Lresume_point, saved_rip(%rip)
68
69	movq	%rsp, saved_rsp
70	movq	%rbp, saved_rbp
71	movq	%rbx, saved_rbx
72	movq	%rdi, saved_rdi
73	movq	%rsi, saved_rsi
74
75	addq	$8, %rsp
76	movl	$3, %edi
77	xorl	%eax, %eax
78	call	x86_acpi_enter_sleep_state
79	/* in case something went wrong, restore the machine status and go on */
80	jmp	.Lresume_point
81
82	.align 4
83.Lresume_point:
84	/* We don't restore %rax, it must be 0 anyway */
85	movq	$saved_context, %rax
86	movq	saved_context_cr4(%rax), %rbx
87	movq	%rbx, %cr4
88	movq	saved_context_cr3(%rax), %rbx
89	movq	%rbx, %cr3
90	movq	saved_context_cr2(%rax), %rbx
91	movq	%rbx, %cr2
92	movq	saved_context_cr0(%rax), %rbx
93	movq	%rbx, %cr0
94	pushq	pt_regs_flags(%rax)
95	popfq
96	movq	pt_regs_sp(%rax), %rsp
97	movq	pt_regs_bp(%rax), %rbp
98	movq	pt_regs_si(%rax), %rsi
99	movq	pt_regs_di(%rax), %rdi
100	movq	pt_regs_bx(%rax), %rbx
101	movq	pt_regs_cx(%rax), %rcx
102	movq	pt_regs_dx(%rax), %rdx
103	movq	pt_regs_r8(%rax), %r8
104	movq	pt_regs_r9(%rax), %r9
105	movq	pt_regs_r10(%rax), %r10
106	movq	pt_regs_r11(%rax), %r11
107	movq	pt_regs_r12(%rax), %r12
108	movq	pt_regs_r13(%rax), %r13
109	movq	pt_regs_r14(%rax), %r14
110	movq	pt_regs_r15(%rax), %r15
111
112#ifdef CONFIG_KASAN
113	/*
114	 * The suspend path may have poisoned some areas deeper in the stack,
115	 * which we now need to unpoison.
116	 */
117	movq	%rsp, %rdi
118	call	kasan_unpoison_task_stack_below
119#endif
120
121	xorl	%eax, %eax
122	addq	$8, %rsp
123	FRAME_END
124	jmp	restore_processor_state
125ENDPROC(do_suspend_lowlevel)
126
127.data
128ENTRY(saved_rbp)	.quad	0
129ENTRY(saved_rsi)	.quad	0
130ENTRY(saved_rdi)	.quad	0
131ENTRY(saved_rbx)	.quad	0
132
133ENTRY(saved_rip)	.quad	0
134ENTRY(saved_rsp)	.quad	0
135
136ENTRY(saved_magic)	.quad	0
137