tdx.c (6162b310bc219d18bac970dbd441d7743097d1b9) tdx.c (abe8dbab8f9f8370c26e7b79b49ed795c1b6b70f)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright(c) 2023 Intel Corporation.
4 *
5 * Intel Trusted Domain Extensions (TDX) support
6 */
7
8#define pr_fmt(fmt) "virt/tdx: " fmt
9
10#include <linux/types.h>
11#include <linux/cache.h>
12#include <linux/init.h>
13#include <linux/errno.h>
14#include <linux/printk.h>
15#include <linux/cpu.h>
16#include <linux/spinlock.h>
17#include <linux/percpu-defs.h>
18#include <linux/mutex.h>
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright(c) 2023 Intel Corporation.
4 *
5 * Intel Trusted Domain Extensions (TDX) support
6 */
7
8#define pr_fmt(fmt) "virt/tdx: " fmt
9
10#include <linux/types.h>
11#include <linux/cache.h>
12#include <linux/init.h>
13#include <linux/errno.h>
14#include <linux/printk.h>
15#include <linux/cpu.h>
16#include <linux/spinlock.h>
17#include <linux/percpu-defs.h>
18#include <linux/mutex.h>
19#include <linux/list.h>
20#include <linux/memblock.h>
21#include <linux/memory.h>
22#include <linux/minmax.h>
23#include <linux/sizes.h>
24#include <linux/pfn.h>
19#include <asm/msr-index.h>
20#include <asm/msr.h>
21#include <asm/cpufeature.h>
22#include <asm/tdx.h>
23#include "tdx.h"
24
25static u32 tdx_global_keyid __ro_after_init;
26static u32 tdx_guest_keyid_start __ro_after_init;
27static u32 tdx_nr_guest_keyids __ro_after_init;
28
29static DEFINE_PER_CPU(bool, tdx_lp_initialized);
30
31static enum tdx_module_status_t tdx_module_status;
32static DEFINE_MUTEX(tdx_module_lock);
33
25#include <asm/msr-index.h>
26#include <asm/msr.h>
27#include <asm/cpufeature.h>
28#include <asm/tdx.h>
29#include "tdx.h"
30
31static u32 tdx_global_keyid __ro_after_init;
32static u32 tdx_guest_keyid_start __ro_after_init;
33static u32 tdx_nr_guest_keyids __ro_after_init;
34
35static DEFINE_PER_CPU(bool, tdx_lp_initialized);
36
37static enum tdx_module_status_t tdx_module_status;
38static DEFINE_MUTEX(tdx_module_lock);
39
40/* All TDX-usable memory regions. Protected by mem_hotplug_lock. */
41static LIST_HEAD(tdx_memlist);
42
34typedef void (*sc_err_func_t)(u64 fn, u64 err, struct tdx_module_args *args);
35
36static inline void seamcall_err(u64 fn, u64 err, struct tdx_module_args *args)
37{
38 pr_err("SEAMCALL (0x%016llx) failed: 0x%016llx\n", fn, err);
39}
40
41static inline void seamcall_err_ret(u64 fn, u64 err,

--- 108 unchanged lines hidden (view full) ---

150 return ret;
151
152 __this_cpu_write(tdx_lp_initialized, true);
153
154 return 0;
155}
156EXPORT_SYMBOL_GPL(tdx_cpu_enable);
157
43typedef void (*sc_err_func_t)(u64 fn, u64 err, struct tdx_module_args *args);
44
45static inline void seamcall_err(u64 fn, u64 err, struct tdx_module_args *args)
46{
47 pr_err("SEAMCALL (0x%016llx) failed: 0x%016llx\n", fn, err);
48}
49
50static inline void seamcall_err_ret(u64 fn, u64 err,

--- 108 unchanged lines hidden (view full) ---

159 return ret;
160
161 __this_cpu_write(tdx_lp_initialized, true);
162
163 return 0;
164}
165EXPORT_SYMBOL_GPL(tdx_cpu_enable);
166
167/*
168 * Add a memory region as a TDX memory block. The caller must make sure
169 * all memory regions are added in address ascending order and don't
170 * overlap.
171 */
172static int add_tdx_memblock(struct list_head *tmb_list, unsigned long start_pfn,
173 unsigned long end_pfn)
174{
175 struct tdx_memblock *tmb;
176
177 tmb = kmalloc(sizeof(*tmb), GFP_KERNEL);
178 if (!tmb)
179 return -ENOMEM;
180
181 INIT_LIST_HEAD(&tmb->list);
182 tmb->start_pfn = start_pfn;
183 tmb->end_pfn = end_pfn;
184
185 /* @tmb_list is protected by mem_hotplug_lock */
186 list_add_tail(&tmb->list, tmb_list);
187 return 0;
188}
189
190static void free_tdx_memlist(struct list_head *tmb_list)
191{
192 /* @tmb_list is protected by mem_hotplug_lock */
193 while (!list_empty(tmb_list)) {
194 struct tdx_memblock *tmb = list_first_entry(tmb_list,
195 struct tdx_memblock, list);
196
197 list_del(&tmb->list);
198 kfree(tmb);
199 }
200}
201
202/*
203 * Ensure that all memblock memory regions are convertible to TDX
204 * memory. Once this has been established, stash the memblock
205 * ranges off in a secondary structure because memblock is modified
206 * in memory hotplug while TDX memory regions are fixed.
207 */
208static int build_tdx_memlist(struct list_head *tmb_list)
209{
210 unsigned long start_pfn, end_pfn;
211 int i, ret;
212
213 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
214 /*
215 * The first 1MB is not reported as TDX convertible memory.
216 * Although the first 1MB is always reserved and won't end up
217 * to the page allocator, it is still in memblock's memory
218 * regions. Skip them manually to exclude them as TDX memory.
219 */
220 start_pfn = max(start_pfn, PHYS_PFN(SZ_1M));
221 if (start_pfn >= end_pfn)
222 continue;
223
224 /*
225 * Add the memory regions as TDX memory. The regions in
226 * memblock has already guaranteed they are in address
227 * ascending order and don't overlap.
228 */
229 ret = add_tdx_memblock(tmb_list, start_pfn, end_pfn);
230 if (ret)
231 goto err;
232 }
233
234 return 0;
235err:
236 free_tdx_memlist(tmb_list);
237 return ret;
238}
239
158static int init_tdx_module(void)
159{
240static int init_tdx_module(void)
241{
242 int ret;
243
160 /*
244 /*
245 * To keep things simple, assume that all TDX-protected memory
246 * will come from the page allocator. Make sure all pages in the
247 * page allocator are TDX-usable memory.
248 *
249 * Build the list of "TDX-usable" memory regions which cover all
250 * pages in the page allocator to guarantee that. Do it while
251 * holding mem_hotplug_lock read-lock as the memory hotplug code
252 * path reads the @tdx_memlist to reject any new memory.
253 */
254 get_online_mems();
255
256 ret = build_tdx_memlist(&tdx_memlist);
257 if (ret)
258 goto out_put_tdxmem;
259
260 /*
161 * TODO:
162 *
261 * TODO:
262 *
163 * - Build the list of TDX-usable memory regions.
164 * - Get TDX module "TD Memory Region" (TDMR) global metadata.
165 * - Construct a list of TDMRs to cover all TDX-usable memory
166 * regions.
167 * - Configure the TDMRs and the global KeyID to the TDX module.
168 * - Configure the global KeyID on all packages.
169 * - Initialize all TDMRs.
170 *
171 * Return error before all steps are done.
172 */
263 * - Get TDX module "TD Memory Region" (TDMR) global metadata.
264 * - Construct a list of TDMRs to cover all TDX-usable memory
265 * regions.
266 * - Configure the TDMRs and the global KeyID to the TDX module.
267 * - Configure the global KeyID on all packages.
268 * - Initialize all TDMRs.
269 *
270 * Return error before all steps are done.
271 */
173 return -EINVAL;
272 ret = -EINVAL;
273out_put_tdxmem:
274 /*
275 * @tdx_memlist is written here and read at memory hotplug time.
276 * Lock out memory hotplug code while building it.
277 */
278 put_online_mems();
279 return ret;
174}
175
176static int __tdx_enable(void)
177{
178 int ret;
179
180 ret = init_tdx_module();
181 if (ret) {

--- 70 unchanged lines hidden (view full) ---

252 _tdx_keyid_start = _nr_mktme_keyids + 1;
253
254 *tdx_keyid_start = _tdx_keyid_start;
255 *nr_tdx_keyids = _nr_tdx_keyids;
256
257 return 0;
258}
259
280}
281
282static int __tdx_enable(void)
283{
284 int ret;
285
286 ret = init_tdx_module();
287 if (ret) {

--- 70 unchanged lines hidden (view full) ---

358 _tdx_keyid_start = _nr_mktme_keyids + 1;
359
360 *tdx_keyid_start = _tdx_keyid_start;
361 *nr_tdx_keyids = _nr_tdx_keyids;
362
363 return 0;
364}
365
366static bool is_tdx_memory(unsigned long start_pfn, unsigned long end_pfn)
367{
368 struct tdx_memblock *tmb;
369
370 /*
371 * This check assumes that the start_pfn<->end_pfn range does not
372 * cross multiple @tdx_memlist entries. A single memory online
373 * event across multiple memblocks (from which @tdx_memlist
374 * entries are derived at the time of module initialization) is
375 * not possible. This is because memory offline/online is done
376 * on granularity of 'struct memory_block', and the hotpluggable
377 * memory region (one memblock) must be multiple of memory_block.
378 */
379 list_for_each_entry(tmb, &tdx_memlist, list) {
380 if (start_pfn >= tmb->start_pfn && end_pfn <= tmb->end_pfn)
381 return true;
382 }
383 return false;
384}
385
386static int tdx_memory_notifier(struct notifier_block *nb, unsigned long action,
387 void *v)
388{
389 struct memory_notify *mn = v;
390
391 if (action != MEM_GOING_ONLINE)
392 return NOTIFY_OK;
393
394 /*
395 * Empty list means TDX isn't enabled. Allow any memory
396 * to go online.
397 */
398 if (list_empty(&tdx_memlist))
399 return NOTIFY_OK;
400
401 /*
402 * The TDX memory configuration is static and can not be
403 * changed. Reject onlining any memory which is outside of
404 * the static configuration whether it supports TDX or not.
405 */
406 if (is_tdx_memory(mn->start_pfn, mn->start_pfn + mn->nr_pages))
407 return NOTIFY_OK;
408
409 return NOTIFY_BAD;
410}
411
412static struct notifier_block tdx_memory_nb = {
413 .notifier_call = tdx_memory_notifier,
414};
415
260void __init tdx_init(void)
261{
262 u32 tdx_keyid_start, nr_tdx_keyids;
263 int err;
264
265 err = record_keyid_partitioning(&tdx_keyid_start, &nr_tdx_keyids);
266 if (err)
267 return;

--- 7 unchanged lines hidden (view full) ---

275 * any left for TDX guests thus there's no point to enable TDX
276 * at all.
277 */
278 if (nr_tdx_keyids < 2) {
279 pr_err("initialization failed: too few private KeyIDs available.\n");
280 return;
281 }
282
416void __init tdx_init(void)
417{
418 u32 tdx_keyid_start, nr_tdx_keyids;
419 int err;
420
421 err = record_keyid_partitioning(&tdx_keyid_start, &nr_tdx_keyids);
422 if (err)
423 return;

--- 7 unchanged lines hidden (view full) ---

431 * any left for TDX guests thus there's no point to enable TDX
432 * at all.
433 */
434 if (nr_tdx_keyids < 2) {
435 pr_err("initialization failed: too few private KeyIDs available.\n");
436 return;
437 }
438
439 err = register_memory_notifier(&tdx_memory_nb);
440 if (err) {
441 pr_err("initialization failed: register_memory_notifier() failed (%d)\n",
442 err);
443 return;
444 }
445
283 /*
284 * Just use the first TDX KeyID as the 'global KeyID' and
285 * leave the rest for TDX guests.
286 */
287 tdx_global_keyid = tdx_keyid_start;
288 tdx_guest_keyid_start = tdx_keyid_start + 1;
289 tdx_nr_guest_keyids = nr_tdx_keyids - 1;
290
291 setup_force_cpu_cap(X86_FEATURE_TDX_HOST_PLATFORM);
292}
446 /*
447 * Just use the first TDX KeyID as the 'global KeyID' and
448 * leave the rest for TDX guests.
449 */
450 tdx_global_keyid = tdx_keyid_start;
451 tdx_guest_keyid_start = tdx_keyid_start + 1;
452 tdx_nr_guest_keyids = nr_tdx_keyids - 1;
453
454 setup_force_cpu_cap(X86_FEATURE_TDX_HOST_PLATFORM);
455}