1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * AMD Memory Encryption Support
4 *
5 * Copyright (C) 2019 SUSE
6 *
7 * Author: Joerg Roedel <jroedel@suse.de>
8 */
9
10 #define pr_fmt(fmt) "SEV: " fmt
11
12 #include <linux/percpu-defs.h>
13 #include <linux/cc_platform.h>
14 #include <linux/printk.h>
15 #include <linux/mm_types.h>
16 #include <linux/set_memory.h>
17 #include <linux/memblock.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/cpumask.h>
21 #include <linux/efi.h>
22 #include <linux/io.h>
23 #include <linux/psp-sev.h>
24 #include <uapi/linux/sev-guest.h>
25
26 #include <asm/init.h>
27 #include <asm/cpu_entry_area.h>
28 #include <asm/stacktrace.h>
29 #include <asm/sev.h>
30 #include <asm/sev-internal.h>
31 #include <asm/insn-eval.h>
32 #include <asm/fpu/xcr.h>
33 #include <asm/processor.h>
34 #include <asm/realmode.h>
35 #include <asm/setup.h>
36 #include <asm/traps.h>
37 #include <asm/svm.h>
38 #include <asm/smp.h>
39 #include <asm/cpu.h>
40 #include <asm/apic.h>
41 #include <asm/cpuid/api.h>
42 #include <asm/cmdline.h>
43
44 /* Include code shared with pre-decompression boot stage */
45 #include "sev-shared.c"
46
47 void
early_set_pages_state(unsigned long vaddr,unsigned long paddr,unsigned long npages,const struct psc_desc * desc)48 early_set_pages_state(unsigned long vaddr, unsigned long paddr,
49 unsigned long npages, const struct psc_desc *desc)
50 {
51 unsigned long paddr_end;
52
53 vaddr = vaddr & PAGE_MASK;
54
55 paddr = paddr & PAGE_MASK;
56 paddr_end = paddr + (npages << PAGE_SHIFT);
57
58 while (paddr < paddr_end) {
59 __page_state_change(vaddr, paddr, desc);
60
61 vaddr += PAGE_SIZE;
62 paddr += PAGE_SIZE;
63 }
64 }
65
early_snp_set_memory_private(unsigned long vaddr,unsigned long paddr,unsigned long npages)66 void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
67 unsigned long npages)
68 {
69 struct psc_desc d = {
70 SNP_PAGE_STATE_PRIVATE,
71 rip_rel_ptr(&boot_svsm_ca_page),
72 boot_svsm_caa_pa
73 };
74
75 /*
76 * This can be invoked in early boot while running identity mapped, so
77 * use an open coded check for SNP instead of using cc_platform_has().
78 * This eliminates worries about jump tables or checking boot_cpu_data
79 * in the cc_platform_has() function.
80 */
81 if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
82 return;
83
84 /*
85 * Ask the hypervisor to mark the memory pages as private in the RMP
86 * table.
87 */
88 early_set_pages_state(vaddr, paddr, npages, &d);
89 }
90
early_snp_set_memory_shared(unsigned long vaddr,unsigned long paddr,unsigned long npages)91 void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
92 unsigned long npages)
93 {
94 struct psc_desc d = {
95 SNP_PAGE_STATE_SHARED,
96 rip_rel_ptr(&boot_svsm_ca_page),
97 boot_svsm_caa_pa
98 };
99
100 /*
101 * This can be invoked in early boot while running identity mapped, so
102 * use an open coded check for SNP instead of using cc_platform_has().
103 * This eliminates worries about jump tables or checking boot_cpu_data
104 * in the cc_platform_has() function.
105 */
106 if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
107 return;
108
109 /* Ask hypervisor to mark the memory pages shared in the RMP table. */
110 early_set_pages_state(vaddr, paddr, npages, &d);
111 }
112
113 /*
114 * Initial set up of SNP relies on information provided by the
115 * Confidential Computing blob, which can be passed to the kernel
116 * in the following ways, depending on how it is booted:
117 *
118 * - when booted via the boot/decompress kernel:
119 * - via boot_params
120 *
121 * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH):
122 * - via a setup_data entry, as defined by the Linux Boot Protocol
123 *
124 * Scan for the blob in that order.
125 */
find_cc_blob(struct boot_params * bp)126 static struct cc_blob_sev_info *__init find_cc_blob(struct boot_params *bp)
127 {
128 struct cc_blob_sev_info *cc_info;
129
130 /* Boot kernel would have passed the CC blob via boot_params. */
131 if (bp->cc_blob_address) {
132 cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address;
133 goto found_cc_info;
134 }
135
136 /*
137 * If kernel was booted directly, without the use of the
138 * boot/decompression kernel, the CC blob may have been passed via
139 * setup_data instead.
140 */
141 cc_info = find_cc_blob_setup_data(bp);
142 if (!cc_info)
143 return NULL;
144
145 found_cc_info:
146 if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
147 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
148
149 return cc_info;
150 }
151
svsm_setup(struct cc_blob_sev_info * cc_info)152 static void __init svsm_setup(struct cc_blob_sev_info *cc_info)
153 {
154 struct snp_secrets_page *secrets = (void *)cc_info->secrets_phys;
155 struct svsm_call call = {};
156 u64 pa;
157
158 /*
159 * Record the SVSM Calling Area address (CAA) if the guest is not
160 * running at VMPL0. The CA will be used to communicate with the
161 * SVSM to perform the SVSM services.
162 */
163 if (!svsm_setup_ca(cc_info, rip_rel_ptr(&boot_svsm_ca_page)))
164 return;
165
166 /*
167 * It is very early in the boot and the kernel is running identity
168 * mapped but without having adjusted the pagetables to where the
169 * kernel was loaded (physbase), so the get the CA address using
170 * RIP-relative addressing.
171 */
172 pa = (u64)rip_rel_ptr(&boot_svsm_ca_page);
173
174 /*
175 * Switch over to the boot SVSM CA while the current CA is still 1:1
176 * mapped and thus addressable with VA == PA. There is no GHCB at this
177 * point so use the MSR protocol.
178 *
179 * SVSM_CORE_REMAP_CA call:
180 * RAX = 0 (Protocol=0, CallID=0)
181 * RCX = New CA GPA
182 */
183 call.caa = (struct svsm_ca *)secrets->svsm_caa;
184 call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA);
185 call.rcx = pa;
186
187 if (svsm_call_msr_protocol(&call))
188 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CA_REMAP_FAIL);
189
190 boot_svsm_caa_pa = pa;
191 }
192
snp_init(struct boot_params * bp)193 bool __init snp_init(struct boot_params *bp)
194 {
195 struct cc_blob_sev_info *cc_info;
196
197 if (!bp)
198 return false;
199
200 cc_info = find_cc_blob(bp);
201 if (!cc_info)
202 return false;
203
204 if (cc_info->secrets_phys && cc_info->secrets_len == PAGE_SIZE)
205 sev_secrets_pa = cc_info->secrets_phys;
206 else
207 return false;
208
209 setup_cpuid_table(cc_info);
210
211 svsm_setup(cc_info);
212
213 /*
214 * The CC blob will be used later to access the secrets page. Cache
215 * it here like the boot kernel does.
216 */
217 bp->cc_blob_address = (u32)(unsigned long)cc_info;
218
219 return true;
220 }
221