1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * AMD Memory Encryption Support
4 *
5 * Copyright (C) 2019 SUSE
6 *
7 * Author: Joerg Roedel <jroedel@suse.de>
8 */
9
10 #define pr_fmt(fmt) "SEV: " fmt
11
12 #include <linux/bug.h>
13 #include <linux/kernel.h>
14
15 #include <asm/cpu_entry_area.h>
16 #include <asm/msr.h>
17 #include <asm/ptrace.h>
18 #include <asm/sev.h>
19
20 #include "internal.h"
21
on_vc_stack(struct pt_regs * regs)22 static __always_inline bool on_vc_stack(struct pt_regs *regs)
23 {
24 unsigned long sp = regs->sp;
25
26 /* User-mode RSP is not trusted */
27 if (user_mode(regs))
28 return false;
29
30 /* SYSCALL gap still has user-mode RSP */
31 if (ip_within_syscall_gap(regs))
32 return false;
33
34 return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
35 }
36
37 /*
38 * This function handles the case when an NMI is raised in the #VC
39 * exception handler entry code, before the #VC handler has switched off
40 * its IST stack. In this case, the IST entry for #VC must be adjusted,
41 * so that any nested #VC exception will not overwrite the stack
42 * contents of the interrupted #VC handler.
43 *
44 * The IST entry is adjusted unconditionally so that it can be also be
45 * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a
46 * nested sev_es_ist_exit() call may adjust back the IST entry too
47 * early.
48 *
49 * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run
50 * on the NMI IST stack, as they are only called from NMI handling code
51 * right now.
52 */
__sev_es_ist_enter(struct pt_regs * regs)53 void noinstr __sev_es_ist_enter(struct pt_regs *regs)
54 {
55 unsigned long old_ist, new_ist;
56
57 /* Read old IST entry */
58 new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
59
60 /*
61 * If NMI happened while on the #VC IST stack, set the new IST
62 * value below regs->sp, so that the interrupted stack frame is
63 * not overwritten by subsequent #VC exceptions.
64 */
65 if (on_vc_stack(regs))
66 new_ist = regs->sp;
67
68 /*
69 * Reserve additional 8 bytes and store old IST value so this
70 * adjustment can be unrolled in __sev_es_ist_exit().
71 */
72 new_ist -= sizeof(old_ist);
73 *(unsigned long *)new_ist = old_ist;
74
75 /* Set new IST entry */
76 this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
77 }
78
__sev_es_ist_exit(void)79 void noinstr __sev_es_ist_exit(void)
80 {
81 unsigned long ist;
82
83 /* Read IST entry */
84 ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
85
86 if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
87 return;
88
89 /* Read back old IST entry and write it to the TSS */
90 this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
91 }
92
__sev_es_nmi_complete(void)93 void noinstr __sev_es_nmi_complete(void)
94 {
95 struct ghcb_state state;
96 struct ghcb *ghcb;
97
98 ghcb = __sev_get_ghcb(&state);
99
100 vc_ghcb_invalidate(ghcb);
101 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
102 ghcb_set_sw_exit_info_1(ghcb, 0);
103 ghcb_set_sw_exit_info_2(ghcb, 0);
104
105 sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
106 VMGEXIT();
107
108 __sev_put_ghcb(&state);
109 }
110
111 /*
112 * Nothing shall interrupt this code path while holding the per-CPU
113 * GHCB. The backup GHCB is only for NMIs interrupting this path.
114 *
115 * Callers must disable local interrupts around it.
116 */
__sev_get_ghcb(struct ghcb_state * state)117 noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
118 {
119 struct sev_es_runtime_data *data;
120 struct ghcb *ghcb;
121
122 WARN_ON(!irqs_disabled());
123
124 if (!sev_cfg.ghcbs_initialized)
125 return boot_ghcb;
126
127 data = this_cpu_read(runtime_data);
128 ghcb = &data->ghcb_page;
129
130 if (unlikely(data->ghcb_active)) {
131 /* GHCB is already in use - save its contents */
132
133 if (unlikely(data->backup_ghcb_active)) {
134 /*
135 * Backup-GHCB is also already in use. There is no way
136 * to continue here so just kill the machine. To make
137 * panic() work, mark GHCBs inactive so that messages
138 * can be printed out.
139 */
140 data->ghcb_active = false;
141 data->backup_ghcb_active = false;
142
143 instrumentation_begin();
144 panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
145 instrumentation_end();
146 }
147
148 /* Mark backup_ghcb active before writing to it */
149 data->backup_ghcb_active = true;
150
151 state->ghcb = &data->backup_ghcb;
152
153 /* Backup GHCB content */
154 *state->ghcb = *ghcb;
155 } else {
156 state->ghcb = NULL;
157 data->ghcb_active = true;
158 }
159
160 return ghcb;
161 }
162
__sev_put_ghcb(struct ghcb_state * state)163 noinstr void __sev_put_ghcb(struct ghcb_state *state)
164 {
165 struct sev_es_runtime_data *data;
166 struct ghcb *ghcb;
167
168 WARN_ON(!irqs_disabled());
169
170 if (!sev_cfg.ghcbs_initialized)
171 return;
172
173 data = this_cpu_read(runtime_data);
174 ghcb = &data->ghcb_page;
175
176 if (state->ghcb) {
177 /* Restore GHCB from Backup */
178 *ghcb = *state->ghcb;
179 data->backup_ghcb_active = false;
180 state->ghcb = NULL;
181 } else {
182 /*
183 * Invalidate the GHCB so a VMGEXIT instruction issued
184 * from userspace won't appear to be valid.
185 */
186 vc_ghcb_invalidate(ghcb);
187 data->ghcb_active = false;
188 }
189 }
190