1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM-SEV support
6 *
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/kvm_types.h>
12 #include <linux/kvm_host.h>
13 #include <linux/kernel.h>
14 #include <linux/highmem.h>
15 #include <linux/psp.h>
16 #include <linux/psp-sev.h>
17 #include <linux/pagemap.h>
18 #include <linux/swap.h>
19 #include <linux/misc_cgroup.h>
20 #include <linux/processor.h>
21 #include <linux/trace_events.h>
22 #include <uapi/linux/sev-guest.h>
23
24 #include <asm/pkru.h>
25 #include <asm/trapnr.h>
26 #include <asm/fpu/xcr.h>
27 #include <asm/fpu/xstate.h>
28 #include <asm/debugreg.h>
29 #include <asm/sev.h>
30
31 #include "mmu.h"
32 #include "x86.h"
33 #include "svm.h"
34 #include "svm_ops.h"
35 #include "cpuid.h"
36 #include "trace.h"
37
38 #define GHCB_VERSION_MAX 2ULL
39 #define GHCB_VERSION_DEFAULT 2ULL
40 #define GHCB_VERSION_MIN 1ULL
41
42 #define GHCB_HV_FT_SUPPORTED (GHCB_HV_FT_SNP | GHCB_HV_FT_SNP_AP_CREATION)
43
44 /* enable/disable SEV support */
45 static bool sev_enabled = true;
46 module_param_named(sev, sev_enabled, bool, 0444);
47
48 /* enable/disable SEV-ES support */
49 static bool sev_es_enabled = true;
50 module_param_named(sev_es, sev_es_enabled, bool, 0444);
51
52 /* enable/disable SEV-SNP support */
53 static bool sev_snp_enabled = true;
54 module_param_named(sev_snp, sev_snp_enabled, bool, 0444);
55
56 /* enable/disable SEV-ES DebugSwap support */
57 static bool sev_es_debug_swap_enabled = true;
58 module_param_named(debug_swap, sev_es_debug_swap_enabled, bool, 0444);
59 static u64 sev_supported_vmsa_features;
60
61 #define AP_RESET_HOLD_NONE 0
62 #define AP_RESET_HOLD_NAE_EVENT 1
63 #define AP_RESET_HOLD_MSR_PROTO 2
64
65 /* As defined by SEV-SNP Firmware ABI, under "Guest Policy". */
66 #define SNP_POLICY_MASK_API_MINOR GENMASK_ULL(7, 0)
67 #define SNP_POLICY_MASK_API_MAJOR GENMASK_ULL(15, 8)
68 #define SNP_POLICY_MASK_SMT BIT_ULL(16)
69 #define SNP_POLICY_MASK_RSVD_MBO BIT_ULL(17)
70 #define SNP_POLICY_MASK_DEBUG BIT_ULL(19)
71 #define SNP_POLICY_MASK_SINGLE_SOCKET BIT_ULL(20)
72
73 #define SNP_POLICY_MASK_VALID (SNP_POLICY_MASK_API_MINOR | \
74 SNP_POLICY_MASK_API_MAJOR | \
75 SNP_POLICY_MASK_SMT | \
76 SNP_POLICY_MASK_RSVD_MBO | \
77 SNP_POLICY_MASK_DEBUG | \
78 SNP_POLICY_MASK_SINGLE_SOCKET)
79
80 #define INITIAL_VMSA_GPA 0xFFFFFFFFF000
81
82 static u8 sev_enc_bit;
83 static DECLARE_RWSEM(sev_deactivate_lock);
84 static DEFINE_MUTEX(sev_bitmap_lock);
85 unsigned int max_sev_asid;
86 static unsigned int min_sev_asid;
87 static unsigned long sev_me_mask;
88 static unsigned int nr_asids;
89 static unsigned long *sev_asid_bitmap;
90 static unsigned long *sev_reclaim_asid_bitmap;
91
92 static int snp_decommission_context(struct kvm *kvm);
93
94 struct enc_region {
95 struct list_head list;
96 unsigned long npages;
97 struct page **pages;
98 unsigned long uaddr;
99 unsigned long size;
100 };
101
102 /* Called with the sev_bitmap_lock held, or on shutdown */
sev_flush_asids(unsigned int min_asid,unsigned int max_asid)103 static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid)
104 {
105 int ret, error = 0;
106 unsigned int asid;
107
108 /* Check if there are any ASIDs to reclaim before performing a flush */
109 asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
110 if (asid > max_asid)
111 return -EBUSY;
112
113 /*
114 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
115 * so it must be guarded.
116 */
117 down_write(&sev_deactivate_lock);
118
119 wbinvd_on_all_cpus();
120
121 if (sev_snp_enabled)
122 ret = sev_do_cmd(SEV_CMD_SNP_DF_FLUSH, NULL, &error);
123 else
124 ret = sev_guest_df_flush(&error);
125
126 up_write(&sev_deactivate_lock);
127
128 if (ret)
129 pr_err("SEV%s: DF_FLUSH failed, ret=%d, error=%#x\n",
130 sev_snp_enabled ? "-SNP" : "", ret, error);
131
132 return ret;
133 }
134
is_mirroring_enc_context(struct kvm * kvm)135 static inline bool is_mirroring_enc_context(struct kvm *kvm)
136 {
137 return !!to_kvm_sev_info(kvm)->enc_context_owner;
138 }
139
sev_vcpu_has_debug_swap(struct vcpu_svm * svm)140 static bool sev_vcpu_has_debug_swap(struct vcpu_svm *svm)
141 {
142 struct kvm_vcpu *vcpu = &svm->vcpu;
143 struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
144
145 return sev->vmsa_features & SVM_SEV_FEAT_DEBUG_SWAP;
146 }
147
148 /* Must be called with the sev_bitmap_lock held */
__sev_recycle_asids(unsigned int min_asid,unsigned int max_asid)149 static bool __sev_recycle_asids(unsigned int min_asid, unsigned int max_asid)
150 {
151 if (sev_flush_asids(min_asid, max_asid))
152 return false;
153
154 /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
155 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
156 nr_asids);
157 bitmap_zero(sev_reclaim_asid_bitmap, nr_asids);
158
159 return true;
160 }
161
sev_misc_cg_try_charge(struct kvm_sev_info * sev)162 static int sev_misc_cg_try_charge(struct kvm_sev_info *sev)
163 {
164 enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
165 return misc_cg_try_charge(type, sev->misc_cg, 1);
166 }
167
sev_misc_cg_uncharge(struct kvm_sev_info * sev)168 static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
169 {
170 enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
171 misc_cg_uncharge(type, sev->misc_cg, 1);
172 }
173
sev_asid_new(struct kvm_sev_info * sev)174 static int sev_asid_new(struct kvm_sev_info *sev)
175 {
176 /*
177 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
178 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
179 * Note: min ASID can end up larger than the max if basic SEV support is
180 * effectively disabled by disallowing use of ASIDs for SEV guests.
181 */
182 unsigned int min_asid = sev->es_active ? 1 : min_sev_asid;
183 unsigned int max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
184 unsigned int asid;
185 bool retry = true;
186 int ret;
187
188 if (min_asid > max_asid)
189 return -ENOTTY;
190
191 WARN_ON(sev->misc_cg);
192 sev->misc_cg = get_current_misc_cg();
193 ret = sev_misc_cg_try_charge(sev);
194 if (ret) {
195 put_misc_cg(sev->misc_cg);
196 sev->misc_cg = NULL;
197 return ret;
198 }
199
200 mutex_lock(&sev_bitmap_lock);
201
202 again:
203 asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
204 if (asid > max_asid) {
205 if (retry && __sev_recycle_asids(min_asid, max_asid)) {
206 retry = false;
207 goto again;
208 }
209 mutex_unlock(&sev_bitmap_lock);
210 ret = -EBUSY;
211 goto e_uncharge;
212 }
213
214 __set_bit(asid, sev_asid_bitmap);
215
216 mutex_unlock(&sev_bitmap_lock);
217
218 sev->asid = asid;
219 return 0;
220 e_uncharge:
221 sev_misc_cg_uncharge(sev);
222 put_misc_cg(sev->misc_cg);
223 sev->misc_cg = NULL;
224 return ret;
225 }
226
sev_get_asid(struct kvm * kvm)227 static unsigned int sev_get_asid(struct kvm *kvm)
228 {
229 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
230
231 return sev->asid;
232 }
233
sev_asid_free(struct kvm_sev_info * sev)234 static void sev_asid_free(struct kvm_sev_info *sev)
235 {
236 struct svm_cpu_data *sd;
237 int cpu;
238
239 mutex_lock(&sev_bitmap_lock);
240
241 __set_bit(sev->asid, sev_reclaim_asid_bitmap);
242
243 for_each_possible_cpu(cpu) {
244 sd = per_cpu_ptr(&svm_data, cpu);
245 sd->sev_vmcbs[sev->asid] = NULL;
246 }
247
248 mutex_unlock(&sev_bitmap_lock);
249
250 sev_misc_cg_uncharge(sev);
251 put_misc_cg(sev->misc_cg);
252 sev->misc_cg = NULL;
253 }
254
sev_decommission(unsigned int handle)255 static void sev_decommission(unsigned int handle)
256 {
257 struct sev_data_decommission decommission;
258
259 if (!handle)
260 return;
261
262 decommission.handle = handle;
263 sev_guest_decommission(&decommission, NULL);
264 }
265
266 /*
267 * Transition a page to hypervisor-owned/shared state in the RMP table. This
268 * should not fail under normal conditions, but leak the page should that
269 * happen since it will no longer be usable by the host due to RMP protections.
270 */
kvm_rmp_make_shared(struct kvm * kvm,u64 pfn,enum pg_level level)271 static int kvm_rmp_make_shared(struct kvm *kvm, u64 pfn, enum pg_level level)
272 {
273 if (KVM_BUG_ON(rmp_make_shared(pfn, level), kvm)) {
274 snp_leak_pages(pfn, page_level_size(level) >> PAGE_SHIFT);
275 return -EIO;
276 }
277
278 return 0;
279 }
280
281 /*
282 * Certain page-states, such as Pre-Guest and Firmware pages (as documented
283 * in Chapter 5 of the SEV-SNP Firmware ABI under "Page States") cannot be
284 * directly transitioned back to normal/hypervisor-owned state via RMPUPDATE
285 * unless they are reclaimed first.
286 *
287 * Until they are reclaimed and subsequently transitioned via RMPUPDATE, they
288 * might not be usable by the host due to being set as immutable or still
289 * being associated with a guest ASID.
290 *
291 * Bug the VM and leak the page if reclaim fails, or if the RMP entry can't be
292 * converted back to shared, as the page is no longer usable due to RMP
293 * protections, and it's infeasible for the guest to continue on.
294 */
snp_page_reclaim(struct kvm * kvm,u64 pfn)295 static int snp_page_reclaim(struct kvm *kvm, u64 pfn)
296 {
297 struct sev_data_snp_page_reclaim data = {0};
298 int fw_err, rc;
299
300 data.paddr = __sme_set(pfn << PAGE_SHIFT);
301 rc = sev_do_cmd(SEV_CMD_SNP_PAGE_RECLAIM, &data, &fw_err);
302 if (KVM_BUG(rc, kvm, "Failed to reclaim PFN %llx, rc %d fw_err %d", pfn, rc, fw_err)) {
303 snp_leak_pages(pfn, 1);
304 return -EIO;
305 }
306
307 if (kvm_rmp_make_shared(kvm, pfn, PG_LEVEL_4K))
308 return -EIO;
309
310 return rc;
311 }
312
sev_unbind_asid(struct kvm * kvm,unsigned int handle)313 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
314 {
315 struct sev_data_deactivate deactivate;
316
317 if (!handle)
318 return;
319
320 deactivate.handle = handle;
321
322 /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
323 down_read(&sev_deactivate_lock);
324 sev_guest_deactivate(&deactivate, NULL);
325 up_read(&sev_deactivate_lock);
326
327 sev_decommission(handle);
328 }
329
330 /*
331 * This sets up bounce buffers/firmware pages to handle SNP Guest Request
332 * messages (e.g. attestation requests). See "SNP Guest Request" in the GHCB
333 * 2.0 specification for more details.
334 *
335 * Technically, when an SNP Guest Request is issued, the guest will provide its
336 * own request/response pages, which could in theory be passed along directly
337 * to firmware rather than using bounce pages. However, these pages would need
338 * special care:
339 *
340 * - Both pages are from shared guest memory, so they need to be protected
341 * from migration/etc. occurring while firmware reads/writes to them. At a
342 * minimum, this requires elevating the ref counts and potentially needing
343 * an explicit pinning of the memory. This places additional restrictions
344 * on what type of memory backends userspace can use for shared guest
345 * memory since there is some reliance on using refcounted pages.
346 *
347 * - The response page needs to be switched to Firmware-owned[1] state
348 * before the firmware can write to it, which can lead to potential
349 * host RMP #PFs if the guest is misbehaved and hands the host a
350 * guest page that KVM might write to for other reasons (e.g. virtio
351 * buffers/etc.).
352 *
353 * Both of these issues can be avoided completely by using separately-allocated
354 * bounce pages for both the request/response pages and passing those to
355 * firmware instead. So that's what is being set up here.
356 *
357 * Guest requests rely on message sequence numbers to ensure requests are
358 * issued to firmware in the order the guest issues them, so concurrent guest
359 * requests generally shouldn't happen. But a misbehaved guest could issue
360 * concurrent guest requests in theory, so a mutex is used to serialize
361 * access to the bounce buffers.
362 *
363 * [1] See the "Page States" section of the SEV-SNP Firmware ABI for more
364 * details on Firmware-owned pages, along with "RMP and VMPL Access Checks"
365 * in the APM for details on the related RMP restrictions.
366 */
snp_guest_req_init(struct kvm * kvm)367 static int snp_guest_req_init(struct kvm *kvm)
368 {
369 struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
370 struct page *req_page;
371
372 req_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
373 if (!req_page)
374 return -ENOMEM;
375
376 sev->guest_resp_buf = snp_alloc_firmware_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
377 if (!sev->guest_resp_buf) {
378 __free_page(req_page);
379 return -EIO;
380 }
381
382 sev->guest_req_buf = page_address(req_page);
383 mutex_init(&sev->guest_req_mutex);
384
385 return 0;
386 }
387
snp_guest_req_cleanup(struct kvm * kvm)388 static void snp_guest_req_cleanup(struct kvm *kvm)
389 {
390 struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
391
392 if (sev->guest_resp_buf)
393 snp_free_firmware_page(sev->guest_resp_buf);
394
395 if (sev->guest_req_buf)
396 __free_page(virt_to_page(sev->guest_req_buf));
397
398 sev->guest_req_buf = NULL;
399 sev->guest_resp_buf = NULL;
400 }
401
__sev_guest_init(struct kvm * kvm,struct kvm_sev_cmd * argp,struct kvm_sev_init * data,unsigned long vm_type)402 static int __sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp,
403 struct kvm_sev_init *data,
404 unsigned long vm_type)
405 {
406 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
407 struct sev_platform_init_args init_args = {0};
408 bool es_active = vm_type != KVM_X86_SEV_VM;
409 u64 valid_vmsa_features = es_active ? sev_supported_vmsa_features : 0;
410 int ret;
411
412 if (kvm->created_vcpus)
413 return -EINVAL;
414
415 if (data->flags)
416 return -EINVAL;
417
418 if (data->vmsa_features & ~valid_vmsa_features)
419 return -EINVAL;
420
421 if (data->ghcb_version > GHCB_VERSION_MAX || (!es_active && data->ghcb_version))
422 return -EINVAL;
423
424 if (unlikely(sev->active))
425 return -EINVAL;
426
427 sev->active = true;
428 sev->es_active = es_active;
429 sev->vmsa_features = data->vmsa_features;
430 sev->ghcb_version = data->ghcb_version;
431
432 /*
433 * Currently KVM supports the full range of mandatory features defined
434 * by version 2 of the GHCB protocol, so default to that for SEV-ES
435 * guests created via KVM_SEV_INIT2.
436 */
437 if (sev->es_active && !sev->ghcb_version)
438 sev->ghcb_version = GHCB_VERSION_DEFAULT;
439
440 if (vm_type == KVM_X86_SNP_VM)
441 sev->vmsa_features |= SVM_SEV_FEAT_SNP_ACTIVE;
442
443 ret = sev_asid_new(sev);
444 if (ret)
445 goto e_no_asid;
446
447 init_args.probe = false;
448 ret = sev_platform_init(&init_args);
449 if (ret)
450 goto e_free;
451
452 /* This needs to happen after SEV/SNP firmware initialization. */
453 if (vm_type == KVM_X86_SNP_VM) {
454 ret = snp_guest_req_init(kvm);
455 if (ret)
456 goto e_free;
457 }
458
459 INIT_LIST_HEAD(&sev->regions_list);
460 INIT_LIST_HEAD(&sev->mirror_vms);
461 sev->need_init = false;
462
463 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_SEV);
464
465 return 0;
466
467 e_free:
468 argp->error = init_args.error;
469 sev_asid_free(sev);
470 sev->asid = 0;
471 e_no_asid:
472 sev->vmsa_features = 0;
473 sev->es_active = false;
474 sev->active = false;
475 return ret;
476 }
477
sev_guest_init(struct kvm * kvm,struct kvm_sev_cmd * argp)478 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
479 {
480 struct kvm_sev_init data = {
481 .vmsa_features = 0,
482 .ghcb_version = 0,
483 };
484 unsigned long vm_type;
485
486 if (kvm->arch.vm_type != KVM_X86_DEFAULT_VM)
487 return -EINVAL;
488
489 vm_type = (argp->id == KVM_SEV_INIT ? KVM_X86_SEV_VM : KVM_X86_SEV_ES_VM);
490
491 /*
492 * KVM_SEV_ES_INIT has been deprecated by KVM_SEV_INIT2, so it will
493 * continue to only ever support the minimal GHCB protocol version.
494 */
495 if (vm_type == KVM_X86_SEV_ES_VM)
496 data.ghcb_version = GHCB_VERSION_MIN;
497
498 return __sev_guest_init(kvm, argp, &data, vm_type);
499 }
500
sev_guest_init2(struct kvm * kvm,struct kvm_sev_cmd * argp)501 static int sev_guest_init2(struct kvm *kvm, struct kvm_sev_cmd *argp)
502 {
503 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
504 struct kvm_sev_init data;
505
506 if (!sev->need_init)
507 return -EINVAL;
508
509 if (kvm->arch.vm_type != KVM_X86_SEV_VM &&
510 kvm->arch.vm_type != KVM_X86_SEV_ES_VM &&
511 kvm->arch.vm_type != KVM_X86_SNP_VM)
512 return -EINVAL;
513
514 if (copy_from_user(&data, u64_to_user_ptr(argp->data), sizeof(data)))
515 return -EFAULT;
516
517 return __sev_guest_init(kvm, argp, &data, kvm->arch.vm_type);
518 }
519
sev_bind_asid(struct kvm * kvm,unsigned int handle,int * error)520 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
521 {
522 unsigned int asid = sev_get_asid(kvm);
523 struct sev_data_activate activate;
524 int ret;
525
526 /* activate ASID on the given handle */
527 activate.handle = handle;
528 activate.asid = asid;
529 ret = sev_guest_activate(&activate, error);
530
531 return ret;
532 }
533
__sev_issue_cmd(int fd,int id,void * data,int * error)534 static int __sev_issue_cmd(int fd, int id, void *data, int *error)
535 {
536 struct fd f;
537 int ret;
538
539 f = fdget(fd);
540 if (!fd_file(f))
541 return -EBADF;
542
543 ret = sev_issue_cmd_external_user(fd_file(f), id, data, error);
544
545 fdput(f);
546 return ret;
547 }
548
sev_issue_cmd(struct kvm * kvm,int id,void * data,int * error)549 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
550 {
551 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
552
553 return __sev_issue_cmd(sev->fd, id, data, error);
554 }
555
sev_launch_start(struct kvm * kvm,struct kvm_sev_cmd * argp)556 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
557 {
558 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
559 struct sev_data_launch_start start;
560 struct kvm_sev_launch_start params;
561 void *dh_blob, *session_blob;
562 int *error = &argp->error;
563 int ret;
564
565 if (!sev_guest(kvm))
566 return -ENOTTY;
567
568 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), sizeof(params)))
569 return -EFAULT;
570
571 memset(&start, 0, sizeof(start));
572
573 dh_blob = NULL;
574 if (params.dh_uaddr) {
575 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
576 if (IS_ERR(dh_blob))
577 return PTR_ERR(dh_blob);
578
579 start.dh_cert_address = __sme_set(__pa(dh_blob));
580 start.dh_cert_len = params.dh_len;
581 }
582
583 session_blob = NULL;
584 if (params.session_uaddr) {
585 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
586 if (IS_ERR(session_blob)) {
587 ret = PTR_ERR(session_blob);
588 goto e_free_dh;
589 }
590
591 start.session_address = __sme_set(__pa(session_blob));
592 start.session_len = params.session_len;
593 }
594
595 start.handle = params.handle;
596 start.policy = params.policy;
597
598 /* create memory encryption context */
599 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error);
600 if (ret)
601 goto e_free_session;
602
603 /* Bind ASID to this guest */
604 ret = sev_bind_asid(kvm, start.handle, error);
605 if (ret) {
606 sev_decommission(start.handle);
607 goto e_free_session;
608 }
609
610 /* return handle to userspace */
611 params.handle = start.handle;
612 if (copy_to_user(u64_to_user_ptr(argp->data), ¶ms, sizeof(params))) {
613 sev_unbind_asid(kvm, start.handle);
614 ret = -EFAULT;
615 goto e_free_session;
616 }
617
618 sev->handle = start.handle;
619 sev->fd = argp->sev_fd;
620
621 e_free_session:
622 kfree(session_blob);
623 e_free_dh:
624 kfree(dh_blob);
625 return ret;
626 }
627
sev_pin_memory(struct kvm * kvm,unsigned long uaddr,unsigned long ulen,unsigned long * n,int write)628 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
629 unsigned long ulen, unsigned long *n,
630 int write)
631 {
632 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
633 unsigned long npages, size;
634 int npinned;
635 unsigned long locked, lock_limit;
636 struct page **pages;
637 unsigned long first, last;
638 int ret;
639
640 lockdep_assert_held(&kvm->lock);
641
642 if (ulen == 0 || uaddr + ulen < uaddr)
643 return ERR_PTR(-EINVAL);
644
645 /* Calculate number of pages. */
646 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
647 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
648 npages = (last - first + 1);
649
650 locked = sev->pages_locked + npages;
651 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
652 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
653 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
654 return ERR_PTR(-ENOMEM);
655 }
656
657 if (WARN_ON_ONCE(npages > INT_MAX))
658 return ERR_PTR(-EINVAL);
659
660 /* Avoid using vmalloc for smaller buffers. */
661 size = npages * sizeof(struct page *);
662 if (size > PAGE_SIZE)
663 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT);
664 else
665 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
666
667 if (!pages)
668 return ERR_PTR(-ENOMEM);
669
670 /* Pin the user virtual address. */
671 npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
672 if (npinned != npages) {
673 pr_err("SEV: Failure locking %lu pages.\n", npages);
674 ret = -ENOMEM;
675 goto err;
676 }
677
678 *n = npages;
679 sev->pages_locked = locked;
680
681 return pages;
682
683 err:
684 if (npinned > 0)
685 unpin_user_pages(pages, npinned);
686
687 kvfree(pages);
688 return ERR_PTR(ret);
689 }
690
sev_unpin_memory(struct kvm * kvm,struct page ** pages,unsigned long npages)691 static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
692 unsigned long npages)
693 {
694 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
695
696 unpin_user_pages(pages, npages);
697 kvfree(pages);
698 sev->pages_locked -= npages;
699 }
700
sev_clflush_pages(struct page * pages[],unsigned long npages)701 static void sev_clflush_pages(struct page *pages[], unsigned long npages)
702 {
703 uint8_t *page_virtual;
704 unsigned long i;
705
706 if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
707 pages == NULL)
708 return;
709
710 for (i = 0; i < npages; i++) {
711 page_virtual = kmap_local_page(pages[i]);
712 clflush_cache_range(page_virtual, PAGE_SIZE);
713 kunmap_local(page_virtual);
714 cond_resched();
715 }
716 }
717
get_num_contig_pages(unsigned long idx,struct page ** inpages,unsigned long npages)718 static unsigned long get_num_contig_pages(unsigned long idx,
719 struct page **inpages, unsigned long npages)
720 {
721 unsigned long paddr, next_paddr;
722 unsigned long i = idx + 1, pages = 1;
723
724 /* find the number of contiguous pages starting from idx */
725 paddr = __sme_page_pa(inpages[idx]);
726 while (i < npages) {
727 next_paddr = __sme_page_pa(inpages[i++]);
728 if ((paddr + PAGE_SIZE) == next_paddr) {
729 pages++;
730 paddr = next_paddr;
731 continue;
732 }
733 break;
734 }
735
736 return pages;
737 }
738
sev_launch_update_data(struct kvm * kvm,struct kvm_sev_cmd * argp)739 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
740 {
741 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
742 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
743 struct kvm_sev_launch_update_data params;
744 struct sev_data_launch_update_data data;
745 struct page **inpages;
746 int ret;
747
748 if (!sev_guest(kvm))
749 return -ENOTTY;
750
751 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), sizeof(params)))
752 return -EFAULT;
753
754 vaddr = params.uaddr;
755 size = params.len;
756 vaddr_end = vaddr + size;
757
758 /* Lock the user memory. */
759 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
760 if (IS_ERR(inpages))
761 return PTR_ERR(inpages);
762
763 /*
764 * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
765 * place; the cache may contain the data that was written unencrypted.
766 */
767 sev_clflush_pages(inpages, npages);
768
769 data.reserved = 0;
770 data.handle = sev->handle;
771
772 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
773 int offset, len;
774
775 /*
776 * If the user buffer is not page-aligned, calculate the offset
777 * within the page.
778 */
779 offset = vaddr & (PAGE_SIZE - 1);
780
781 /* Calculate the number of pages that can be encrypted in one go. */
782 pages = get_num_contig_pages(i, inpages, npages);
783
784 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
785
786 data.len = len;
787 data.address = __sme_page_pa(inpages[i]) + offset;
788 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error);
789 if (ret)
790 goto e_unpin;
791
792 size -= len;
793 next_vaddr = vaddr + len;
794 }
795
796 e_unpin:
797 /* content of memory is updated, mark pages dirty */
798 for (i = 0; i < npages; i++) {
799 set_page_dirty_lock(inpages[i]);
800 mark_page_accessed(inpages[i]);
801 }
802 /* unlock the user pages */
803 sev_unpin_memory(kvm, inpages, npages);
804 return ret;
805 }
806
sev_es_sync_vmsa(struct vcpu_svm * svm)807 static int sev_es_sync_vmsa(struct vcpu_svm *svm)
808 {
809 struct kvm_vcpu *vcpu = &svm->vcpu;
810 struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
811 struct sev_es_save_area *save = svm->sev_es.vmsa;
812 struct xregs_state *xsave;
813 const u8 *s;
814 u8 *d;
815 int i;
816
817 /* Check some debug related fields before encrypting the VMSA */
818 if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1))
819 return -EINVAL;
820
821 /*
822 * SEV-ES will use a VMSA that is pointed to by the VMCB, not
823 * the traditional VMSA that is part of the VMCB. Copy the
824 * traditional VMSA as it has been built so far (in prep
825 * for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
826 */
827 memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save));
828
829 /* Sync registgers */
830 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX];
831 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX];
832 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
833 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX];
834 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP];
835 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP];
836 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI];
837 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI];
838 #ifdef CONFIG_X86_64
839 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8];
840 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9];
841 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10];
842 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11];
843 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12];
844 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13];
845 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14];
846 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15];
847 #endif
848 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP];
849
850 /* Sync some non-GPR registers before encrypting */
851 save->xcr0 = svm->vcpu.arch.xcr0;
852 save->pkru = svm->vcpu.arch.pkru;
853 save->xss = svm->vcpu.arch.ia32_xss;
854 save->dr6 = svm->vcpu.arch.dr6;
855
856 save->sev_features = sev->vmsa_features;
857
858 /*
859 * Skip FPU and AVX setup with KVM_SEV_ES_INIT to avoid
860 * breaking older measurements.
861 */
862 if (vcpu->kvm->arch.vm_type != KVM_X86_DEFAULT_VM) {
863 xsave = &vcpu->arch.guest_fpu.fpstate->regs.xsave;
864 save->x87_dp = xsave->i387.rdp;
865 save->mxcsr = xsave->i387.mxcsr;
866 save->x87_ftw = xsave->i387.twd;
867 save->x87_fsw = xsave->i387.swd;
868 save->x87_fcw = xsave->i387.cwd;
869 save->x87_fop = xsave->i387.fop;
870 save->x87_ds = 0;
871 save->x87_cs = 0;
872 save->x87_rip = xsave->i387.rip;
873
874 for (i = 0; i < 8; i++) {
875 /*
876 * The format of the x87 save area is undocumented and
877 * definitely not what you would expect. It consists of
878 * an 8*8 bytes area with bytes 0-7, and an 8*2 bytes
879 * area with bytes 8-9 of each register.
880 */
881 d = save->fpreg_x87 + i * 8;
882 s = ((u8 *)xsave->i387.st_space) + i * 16;
883 memcpy(d, s, 8);
884 save->fpreg_x87[64 + i * 2] = s[8];
885 save->fpreg_x87[64 + i * 2 + 1] = s[9];
886 }
887 memcpy(save->fpreg_xmm, xsave->i387.xmm_space, 256);
888
889 s = get_xsave_addr(xsave, XFEATURE_YMM);
890 if (s)
891 memcpy(save->fpreg_ymm, s, 256);
892 else
893 memset(save->fpreg_ymm, 0, 256);
894 }
895
896 pr_debug("Virtual Machine Save Area (VMSA):\n");
897 print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, save, sizeof(*save), false);
898
899 return 0;
900 }
901
__sev_launch_update_vmsa(struct kvm * kvm,struct kvm_vcpu * vcpu,int * error)902 static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
903 int *error)
904 {
905 struct sev_data_launch_update_vmsa vmsa;
906 struct vcpu_svm *svm = to_svm(vcpu);
907 int ret;
908
909 if (vcpu->guest_debug) {
910 pr_warn_once("KVM_SET_GUEST_DEBUG for SEV-ES guest is not supported");
911 return -EINVAL;
912 }
913
914 /* Perform some pre-encryption checks against the VMSA */
915 ret = sev_es_sync_vmsa(svm);
916 if (ret)
917 return ret;
918
919 /*
920 * The LAUNCH_UPDATE_VMSA command will perform in-place encryption of
921 * the VMSA memory content (i.e it will write the same memory region
922 * with the guest's key), so invalidate it first.
923 */
924 clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE);
925
926 vmsa.reserved = 0;
927 vmsa.handle = to_kvm_sev_info(kvm)->handle;
928 vmsa.address = __sme_pa(svm->sev_es.vmsa);
929 vmsa.len = PAGE_SIZE;
930 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
931 if (ret)
932 return ret;
933
934 /*
935 * SEV-ES guests maintain an encrypted version of their FPU
936 * state which is restored and saved on VMRUN and VMEXIT.
937 * Mark vcpu->arch.guest_fpu->fpstate as scratch so it won't
938 * do xsave/xrstor on it.
939 */
940 fpstate_set_confidential(&vcpu->arch.guest_fpu);
941 vcpu->arch.guest_state_protected = true;
942
943 /*
944 * SEV-ES guest mandates LBR Virtualization to be _always_ ON. Enable it
945 * only after setting guest_state_protected because KVM_SET_MSRS allows
946 * dynamic toggling of LBRV (for performance reason) on write access to
947 * MSR_IA32_DEBUGCTLMSR when guest_state_protected is not set.
948 */
949 svm_enable_lbrv(vcpu);
950 return 0;
951 }
952
sev_launch_update_vmsa(struct kvm * kvm,struct kvm_sev_cmd * argp)953 static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
954 {
955 struct kvm_vcpu *vcpu;
956 unsigned long i;
957 int ret;
958
959 if (!sev_es_guest(kvm))
960 return -ENOTTY;
961
962 kvm_for_each_vcpu(i, vcpu, kvm) {
963 ret = mutex_lock_killable(&vcpu->mutex);
964 if (ret)
965 return ret;
966
967 ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);
968
969 mutex_unlock(&vcpu->mutex);
970 if (ret)
971 return ret;
972 }
973
974 return 0;
975 }
976
sev_launch_measure(struct kvm * kvm,struct kvm_sev_cmd * argp)977 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
978 {
979 void __user *measure = u64_to_user_ptr(argp->data);
980 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
981 struct sev_data_launch_measure data;
982 struct kvm_sev_launch_measure params;
983 void __user *p = NULL;
984 void *blob = NULL;
985 int ret;
986
987 if (!sev_guest(kvm))
988 return -ENOTTY;
989
990 if (copy_from_user(¶ms, measure, sizeof(params)))
991 return -EFAULT;
992
993 memset(&data, 0, sizeof(data));
994
995 /* User wants to query the blob length */
996 if (!params.len)
997 goto cmd;
998
999 p = u64_to_user_ptr(params.uaddr);
1000 if (p) {
1001 if (params.len > SEV_FW_BLOB_MAX_SIZE)
1002 return -EINVAL;
1003
1004 blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT);
1005 if (!blob)
1006 return -ENOMEM;
1007
1008 data.address = __psp_pa(blob);
1009 data.len = params.len;
1010 }
1011
1012 cmd:
1013 data.handle = sev->handle;
1014 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error);
1015
1016 /*
1017 * If we query the session length, FW responded with expected data.
1018 */
1019 if (!params.len)
1020 goto done;
1021
1022 if (ret)
1023 goto e_free_blob;
1024
1025 if (blob) {
1026 if (copy_to_user(p, blob, params.len))
1027 ret = -EFAULT;
1028 }
1029
1030 done:
1031 params.len = data.len;
1032 if (copy_to_user(measure, ¶ms, sizeof(params)))
1033 ret = -EFAULT;
1034 e_free_blob:
1035 kfree(blob);
1036 return ret;
1037 }
1038
sev_launch_finish(struct kvm * kvm,struct kvm_sev_cmd * argp)1039 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1040 {
1041 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1042 struct sev_data_launch_finish data;
1043
1044 if (!sev_guest(kvm))
1045 return -ENOTTY;
1046
1047 data.handle = sev->handle;
1048 return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error);
1049 }
1050
sev_guest_status(struct kvm * kvm,struct kvm_sev_cmd * argp)1051 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
1052 {
1053 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1054 struct kvm_sev_guest_status params;
1055 struct sev_data_guest_status data;
1056 int ret;
1057
1058 if (!sev_guest(kvm))
1059 return -ENOTTY;
1060
1061 memset(&data, 0, sizeof(data));
1062
1063 data.handle = sev->handle;
1064 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error);
1065 if (ret)
1066 return ret;
1067
1068 params.policy = data.policy;
1069 params.state = data.state;
1070 params.handle = data.handle;
1071
1072 if (copy_to_user(u64_to_user_ptr(argp->data), ¶ms, sizeof(params)))
1073 ret = -EFAULT;
1074
1075 return ret;
1076 }
1077
__sev_issue_dbg_cmd(struct kvm * kvm,unsigned long src,unsigned long dst,int size,int * error,bool enc)1078 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
1079 unsigned long dst, int size,
1080 int *error, bool enc)
1081 {
1082 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1083 struct sev_data_dbg data;
1084
1085 data.reserved = 0;
1086 data.handle = sev->handle;
1087 data.dst_addr = dst;
1088 data.src_addr = src;
1089 data.len = size;
1090
1091 return sev_issue_cmd(kvm,
1092 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
1093 &data, error);
1094 }
1095
__sev_dbg_decrypt(struct kvm * kvm,unsigned long src_paddr,unsigned long dst_paddr,int sz,int * err)1096 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
1097 unsigned long dst_paddr, int sz, int *err)
1098 {
1099 int offset;
1100
1101 /*
1102 * Its safe to read more than we are asked, caller should ensure that
1103 * destination has enough space.
1104 */
1105 offset = src_paddr & 15;
1106 src_paddr = round_down(src_paddr, 16);
1107 sz = round_up(sz + offset, 16);
1108
1109 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
1110 }
1111
__sev_dbg_decrypt_user(struct kvm * kvm,unsigned long paddr,void __user * dst_uaddr,unsigned long dst_paddr,int size,int * err)1112 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
1113 void __user *dst_uaddr,
1114 unsigned long dst_paddr,
1115 int size, int *err)
1116 {
1117 struct page *tpage = NULL;
1118 int ret, offset;
1119
1120 /* if inputs are not 16-byte then use intermediate buffer */
1121 if (!IS_ALIGNED(dst_paddr, 16) ||
1122 !IS_ALIGNED(paddr, 16) ||
1123 !IS_ALIGNED(size, 16)) {
1124 tpage = (void *)alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1125 if (!tpage)
1126 return -ENOMEM;
1127
1128 dst_paddr = __sme_page_pa(tpage);
1129 }
1130
1131 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
1132 if (ret)
1133 goto e_free;
1134
1135 if (tpage) {
1136 offset = paddr & 15;
1137 if (copy_to_user(dst_uaddr, page_address(tpage) + offset, size))
1138 ret = -EFAULT;
1139 }
1140
1141 e_free:
1142 if (tpage)
1143 __free_page(tpage);
1144
1145 return ret;
1146 }
1147
__sev_dbg_encrypt_user(struct kvm * kvm,unsigned long paddr,void __user * vaddr,unsigned long dst_paddr,void __user * dst_vaddr,int size,int * error)1148 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
1149 void __user *vaddr,
1150 unsigned long dst_paddr,
1151 void __user *dst_vaddr,
1152 int size, int *error)
1153 {
1154 struct page *src_tpage = NULL;
1155 struct page *dst_tpage = NULL;
1156 int ret, len = size;
1157
1158 /* If source buffer is not aligned then use an intermediate buffer */
1159 if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
1160 src_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
1161 if (!src_tpage)
1162 return -ENOMEM;
1163
1164 if (copy_from_user(page_address(src_tpage), vaddr, size)) {
1165 __free_page(src_tpage);
1166 return -EFAULT;
1167 }
1168
1169 paddr = __sme_page_pa(src_tpage);
1170 }
1171
1172 /*
1173 * If destination buffer or length is not aligned then do read-modify-write:
1174 * - decrypt destination in an intermediate buffer
1175 * - copy the source buffer in an intermediate buffer
1176 * - use the intermediate buffer as source buffer
1177 */
1178 if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
1179 int dst_offset;
1180
1181 dst_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
1182 if (!dst_tpage) {
1183 ret = -ENOMEM;
1184 goto e_free;
1185 }
1186
1187 ret = __sev_dbg_decrypt(kvm, dst_paddr,
1188 __sme_page_pa(dst_tpage), size, error);
1189 if (ret)
1190 goto e_free;
1191
1192 /*
1193 * If source is kernel buffer then use memcpy() otherwise
1194 * copy_from_user().
1195 */
1196 dst_offset = dst_paddr & 15;
1197
1198 if (src_tpage)
1199 memcpy(page_address(dst_tpage) + dst_offset,
1200 page_address(src_tpage), size);
1201 else {
1202 if (copy_from_user(page_address(dst_tpage) + dst_offset,
1203 vaddr, size)) {
1204 ret = -EFAULT;
1205 goto e_free;
1206 }
1207 }
1208
1209 paddr = __sme_page_pa(dst_tpage);
1210 dst_paddr = round_down(dst_paddr, 16);
1211 len = round_up(size, 16);
1212 }
1213
1214 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
1215
1216 e_free:
1217 if (src_tpage)
1218 __free_page(src_tpage);
1219 if (dst_tpage)
1220 __free_page(dst_tpage);
1221 return ret;
1222 }
1223
sev_dbg_crypt(struct kvm * kvm,struct kvm_sev_cmd * argp,bool dec)1224 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
1225 {
1226 unsigned long vaddr, vaddr_end, next_vaddr;
1227 unsigned long dst_vaddr;
1228 struct page **src_p, **dst_p;
1229 struct kvm_sev_dbg debug;
1230 unsigned long n;
1231 unsigned int size;
1232 int ret;
1233
1234 if (!sev_guest(kvm))
1235 return -ENOTTY;
1236
1237 if (copy_from_user(&debug, u64_to_user_ptr(argp->data), sizeof(debug)))
1238 return -EFAULT;
1239
1240 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
1241 return -EINVAL;
1242 if (!debug.dst_uaddr)
1243 return -EINVAL;
1244
1245 vaddr = debug.src_uaddr;
1246 size = debug.len;
1247 vaddr_end = vaddr + size;
1248 dst_vaddr = debug.dst_uaddr;
1249
1250 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
1251 int len, s_off, d_off;
1252
1253 /* lock userspace source and destination page */
1254 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
1255 if (IS_ERR(src_p))
1256 return PTR_ERR(src_p);
1257
1258 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
1259 if (IS_ERR(dst_p)) {
1260 sev_unpin_memory(kvm, src_p, n);
1261 return PTR_ERR(dst_p);
1262 }
1263
1264 /*
1265 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify
1266 * the pages; flush the destination too so that future accesses do not
1267 * see stale data.
1268 */
1269 sev_clflush_pages(src_p, 1);
1270 sev_clflush_pages(dst_p, 1);
1271
1272 /*
1273 * Since user buffer may not be page aligned, calculate the
1274 * offset within the page.
1275 */
1276 s_off = vaddr & ~PAGE_MASK;
1277 d_off = dst_vaddr & ~PAGE_MASK;
1278 len = min_t(size_t, (PAGE_SIZE - s_off), size);
1279
1280 if (dec)
1281 ret = __sev_dbg_decrypt_user(kvm,
1282 __sme_page_pa(src_p[0]) + s_off,
1283 (void __user *)dst_vaddr,
1284 __sme_page_pa(dst_p[0]) + d_off,
1285 len, &argp->error);
1286 else
1287 ret = __sev_dbg_encrypt_user(kvm,
1288 __sme_page_pa(src_p[0]) + s_off,
1289 (void __user *)vaddr,
1290 __sme_page_pa(dst_p[0]) + d_off,
1291 (void __user *)dst_vaddr,
1292 len, &argp->error);
1293
1294 sev_unpin_memory(kvm, src_p, n);
1295 sev_unpin_memory(kvm, dst_p, n);
1296
1297 if (ret)
1298 goto err;
1299
1300 next_vaddr = vaddr + len;
1301 dst_vaddr = dst_vaddr + len;
1302 size -= len;
1303 }
1304 err:
1305 return ret;
1306 }
1307
sev_launch_secret(struct kvm * kvm,struct kvm_sev_cmd * argp)1308 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
1309 {
1310 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1311 struct sev_data_launch_secret data;
1312 struct kvm_sev_launch_secret params;
1313 struct page **pages;
1314 void *blob, *hdr;
1315 unsigned long n, i;
1316 int ret, offset;
1317
1318 if (!sev_guest(kvm))
1319 return -ENOTTY;
1320
1321 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), sizeof(params)))
1322 return -EFAULT;
1323
1324 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
1325 if (IS_ERR(pages))
1326 return PTR_ERR(pages);
1327
1328 /*
1329 * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in
1330 * place; the cache may contain the data that was written unencrypted.
1331 */
1332 sev_clflush_pages(pages, n);
1333
1334 /*
1335 * The secret must be copied into contiguous memory region, lets verify
1336 * that userspace memory pages are contiguous before we issue command.
1337 */
1338 if (get_num_contig_pages(0, pages, n) != n) {
1339 ret = -EINVAL;
1340 goto e_unpin_memory;
1341 }
1342
1343 memset(&data, 0, sizeof(data));
1344
1345 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1346 data.guest_address = __sme_page_pa(pages[0]) + offset;
1347 data.guest_len = params.guest_len;
1348
1349 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1350 if (IS_ERR(blob)) {
1351 ret = PTR_ERR(blob);
1352 goto e_unpin_memory;
1353 }
1354
1355 data.trans_address = __psp_pa(blob);
1356 data.trans_len = params.trans_len;
1357
1358 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1359 if (IS_ERR(hdr)) {
1360 ret = PTR_ERR(hdr);
1361 goto e_free_blob;
1362 }
1363 data.hdr_address = __psp_pa(hdr);
1364 data.hdr_len = params.hdr_len;
1365
1366 data.handle = sev->handle;
1367 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error);
1368
1369 kfree(hdr);
1370
1371 e_free_blob:
1372 kfree(blob);
1373 e_unpin_memory:
1374 /* content of memory is updated, mark pages dirty */
1375 for (i = 0; i < n; i++) {
1376 set_page_dirty_lock(pages[i]);
1377 mark_page_accessed(pages[i]);
1378 }
1379 sev_unpin_memory(kvm, pages, n);
1380 return ret;
1381 }
1382
sev_get_attestation_report(struct kvm * kvm,struct kvm_sev_cmd * argp)1383 static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
1384 {
1385 void __user *report = u64_to_user_ptr(argp->data);
1386 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1387 struct sev_data_attestation_report data;
1388 struct kvm_sev_attestation_report params;
1389 void __user *p;
1390 void *blob = NULL;
1391 int ret;
1392
1393 if (!sev_guest(kvm))
1394 return -ENOTTY;
1395
1396 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), sizeof(params)))
1397 return -EFAULT;
1398
1399 memset(&data, 0, sizeof(data));
1400
1401 /* User wants to query the blob length */
1402 if (!params.len)
1403 goto cmd;
1404
1405 p = u64_to_user_ptr(params.uaddr);
1406 if (p) {
1407 if (params.len > SEV_FW_BLOB_MAX_SIZE)
1408 return -EINVAL;
1409
1410 blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT);
1411 if (!blob)
1412 return -ENOMEM;
1413
1414 data.address = __psp_pa(blob);
1415 data.len = params.len;
1416 memcpy(data.mnonce, params.mnonce, sizeof(params.mnonce));
1417 }
1418 cmd:
1419 data.handle = sev->handle;
1420 ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error);
1421 /*
1422 * If we query the session length, FW responded with expected data.
1423 */
1424 if (!params.len)
1425 goto done;
1426
1427 if (ret)
1428 goto e_free_blob;
1429
1430 if (blob) {
1431 if (copy_to_user(p, blob, params.len))
1432 ret = -EFAULT;
1433 }
1434
1435 done:
1436 params.len = data.len;
1437 if (copy_to_user(report, ¶ms, sizeof(params)))
1438 ret = -EFAULT;
1439 e_free_blob:
1440 kfree(blob);
1441 return ret;
1442 }
1443
1444 /* Userspace wants to query session length. */
1445 static int
__sev_send_start_query_session_length(struct kvm * kvm,struct kvm_sev_cmd * argp,struct kvm_sev_send_start * params)1446 __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
1447 struct kvm_sev_send_start *params)
1448 {
1449 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1450 struct sev_data_send_start data;
1451 int ret;
1452
1453 memset(&data, 0, sizeof(data));
1454 data.handle = sev->handle;
1455 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
1456
1457 params->session_len = data.session_len;
1458 if (copy_to_user(u64_to_user_ptr(argp->data), params,
1459 sizeof(struct kvm_sev_send_start)))
1460 ret = -EFAULT;
1461
1462 return ret;
1463 }
1464
sev_send_start(struct kvm * kvm,struct kvm_sev_cmd * argp)1465 static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1466 {
1467 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1468 struct sev_data_send_start data;
1469 struct kvm_sev_send_start params;
1470 void *amd_certs, *session_data;
1471 void *pdh_cert, *plat_certs;
1472 int ret;
1473
1474 if (!sev_guest(kvm))
1475 return -ENOTTY;
1476
1477 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data),
1478 sizeof(struct kvm_sev_send_start)))
1479 return -EFAULT;
1480
1481 /* if session_len is zero, userspace wants to query the session length */
1482 if (!params.session_len)
1483 return __sev_send_start_query_session_length(kvm, argp,
1484 ¶ms);
1485
1486 /* some sanity checks */
1487 if (!params.pdh_cert_uaddr || !params.pdh_cert_len ||
1488 !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE)
1489 return -EINVAL;
1490
1491 /* allocate the memory to hold the session data blob */
1492 session_data = kzalloc(params.session_len, GFP_KERNEL_ACCOUNT);
1493 if (!session_data)
1494 return -ENOMEM;
1495
1496 /* copy the certificate blobs from userspace */
1497 pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr,
1498 params.pdh_cert_len);
1499 if (IS_ERR(pdh_cert)) {
1500 ret = PTR_ERR(pdh_cert);
1501 goto e_free_session;
1502 }
1503
1504 plat_certs = psp_copy_user_blob(params.plat_certs_uaddr,
1505 params.plat_certs_len);
1506 if (IS_ERR(plat_certs)) {
1507 ret = PTR_ERR(plat_certs);
1508 goto e_free_pdh;
1509 }
1510
1511 amd_certs = psp_copy_user_blob(params.amd_certs_uaddr,
1512 params.amd_certs_len);
1513 if (IS_ERR(amd_certs)) {
1514 ret = PTR_ERR(amd_certs);
1515 goto e_free_plat_cert;
1516 }
1517
1518 /* populate the FW SEND_START field with system physical address */
1519 memset(&data, 0, sizeof(data));
1520 data.pdh_cert_address = __psp_pa(pdh_cert);
1521 data.pdh_cert_len = params.pdh_cert_len;
1522 data.plat_certs_address = __psp_pa(plat_certs);
1523 data.plat_certs_len = params.plat_certs_len;
1524 data.amd_certs_address = __psp_pa(amd_certs);
1525 data.amd_certs_len = params.amd_certs_len;
1526 data.session_address = __psp_pa(session_data);
1527 data.session_len = params.session_len;
1528 data.handle = sev->handle;
1529
1530 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
1531
1532 if (!ret && copy_to_user(u64_to_user_ptr(params.session_uaddr),
1533 session_data, params.session_len)) {
1534 ret = -EFAULT;
1535 goto e_free_amd_cert;
1536 }
1537
1538 params.policy = data.policy;
1539 params.session_len = data.session_len;
1540 if (copy_to_user(u64_to_user_ptr(argp->data), ¶ms,
1541 sizeof(struct kvm_sev_send_start)))
1542 ret = -EFAULT;
1543
1544 e_free_amd_cert:
1545 kfree(amd_certs);
1546 e_free_plat_cert:
1547 kfree(plat_certs);
1548 e_free_pdh:
1549 kfree(pdh_cert);
1550 e_free_session:
1551 kfree(session_data);
1552 return ret;
1553 }
1554
1555 /* Userspace wants to query either header or trans length. */
1556 static int
__sev_send_update_data_query_lengths(struct kvm * kvm,struct kvm_sev_cmd * argp,struct kvm_sev_send_update_data * params)1557 __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
1558 struct kvm_sev_send_update_data *params)
1559 {
1560 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1561 struct sev_data_send_update_data data;
1562 int ret;
1563
1564 memset(&data, 0, sizeof(data));
1565 data.handle = sev->handle;
1566 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1567
1568 params->hdr_len = data.hdr_len;
1569 params->trans_len = data.trans_len;
1570
1571 if (copy_to_user(u64_to_user_ptr(argp->data), params,
1572 sizeof(struct kvm_sev_send_update_data)))
1573 ret = -EFAULT;
1574
1575 return ret;
1576 }
1577
sev_send_update_data(struct kvm * kvm,struct kvm_sev_cmd * argp)1578 static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1579 {
1580 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1581 struct sev_data_send_update_data data;
1582 struct kvm_sev_send_update_data params;
1583 void *hdr, *trans_data;
1584 struct page **guest_page;
1585 unsigned long n;
1586 int ret, offset;
1587
1588 if (!sev_guest(kvm))
1589 return -ENOTTY;
1590
1591 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data),
1592 sizeof(struct kvm_sev_send_update_data)))
1593 return -EFAULT;
1594
1595 /* userspace wants to query either header or trans length */
1596 if (!params.trans_len || !params.hdr_len)
1597 return __sev_send_update_data_query_lengths(kvm, argp, ¶ms);
1598
1599 if (!params.trans_uaddr || !params.guest_uaddr ||
1600 !params.guest_len || !params.hdr_uaddr)
1601 return -EINVAL;
1602
1603 /* Check if we are crossing the page boundary */
1604 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1605 if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE)
1606 return -EINVAL;
1607
1608 /* Pin guest memory */
1609 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1610 PAGE_SIZE, &n, 0);
1611 if (IS_ERR(guest_page))
1612 return PTR_ERR(guest_page);
1613
1614 /* allocate memory for header and transport buffer */
1615 ret = -ENOMEM;
1616 hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
1617 if (!hdr)
1618 goto e_unpin;
1619
1620 trans_data = kzalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
1621 if (!trans_data)
1622 goto e_free_hdr;
1623
1624 memset(&data, 0, sizeof(data));
1625 data.hdr_address = __psp_pa(hdr);
1626 data.hdr_len = params.hdr_len;
1627 data.trans_address = __psp_pa(trans_data);
1628 data.trans_len = params.trans_len;
1629
1630 /* The SEND_UPDATE_DATA command requires C-bit to be always set. */
1631 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1632 data.guest_address |= sev_me_mask;
1633 data.guest_len = params.guest_len;
1634 data.handle = sev->handle;
1635
1636 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1637
1638 if (ret)
1639 goto e_free_trans_data;
1640
1641 /* copy transport buffer to user space */
1642 if (copy_to_user(u64_to_user_ptr(params.trans_uaddr),
1643 trans_data, params.trans_len)) {
1644 ret = -EFAULT;
1645 goto e_free_trans_data;
1646 }
1647
1648 /* Copy packet header to userspace. */
1649 if (copy_to_user(u64_to_user_ptr(params.hdr_uaddr), hdr,
1650 params.hdr_len))
1651 ret = -EFAULT;
1652
1653 e_free_trans_data:
1654 kfree(trans_data);
1655 e_free_hdr:
1656 kfree(hdr);
1657 e_unpin:
1658 sev_unpin_memory(kvm, guest_page, n);
1659
1660 return ret;
1661 }
1662
sev_send_finish(struct kvm * kvm,struct kvm_sev_cmd * argp)1663 static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1664 {
1665 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1666 struct sev_data_send_finish data;
1667
1668 if (!sev_guest(kvm))
1669 return -ENOTTY;
1670
1671 data.handle = sev->handle;
1672 return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error);
1673 }
1674
sev_send_cancel(struct kvm * kvm,struct kvm_sev_cmd * argp)1675 static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
1676 {
1677 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1678 struct sev_data_send_cancel data;
1679
1680 if (!sev_guest(kvm))
1681 return -ENOTTY;
1682
1683 data.handle = sev->handle;
1684 return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error);
1685 }
1686
sev_receive_start(struct kvm * kvm,struct kvm_sev_cmd * argp)1687 static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1688 {
1689 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1690 struct sev_data_receive_start start;
1691 struct kvm_sev_receive_start params;
1692 int *error = &argp->error;
1693 void *session_data;
1694 void *pdh_data;
1695 int ret;
1696
1697 if (!sev_guest(kvm))
1698 return -ENOTTY;
1699
1700 /* Get parameter from the userspace */
1701 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data),
1702 sizeof(struct kvm_sev_receive_start)))
1703 return -EFAULT;
1704
1705 /* some sanity checks */
1706 if (!params.pdh_uaddr || !params.pdh_len ||
1707 !params.session_uaddr || !params.session_len)
1708 return -EINVAL;
1709
1710 pdh_data = psp_copy_user_blob(params.pdh_uaddr, params.pdh_len);
1711 if (IS_ERR(pdh_data))
1712 return PTR_ERR(pdh_data);
1713
1714 session_data = psp_copy_user_blob(params.session_uaddr,
1715 params.session_len);
1716 if (IS_ERR(session_data)) {
1717 ret = PTR_ERR(session_data);
1718 goto e_free_pdh;
1719 }
1720
1721 memset(&start, 0, sizeof(start));
1722 start.handle = params.handle;
1723 start.policy = params.policy;
1724 start.pdh_cert_address = __psp_pa(pdh_data);
1725 start.pdh_cert_len = params.pdh_len;
1726 start.session_address = __psp_pa(session_data);
1727 start.session_len = params.session_len;
1728
1729 /* create memory encryption context */
1730 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, &start,
1731 error);
1732 if (ret)
1733 goto e_free_session;
1734
1735 /* Bind ASID to this guest */
1736 ret = sev_bind_asid(kvm, start.handle, error);
1737 if (ret) {
1738 sev_decommission(start.handle);
1739 goto e_free_session;
1740 }
1741
1742 params.handle = start.handle;
1743 if (copy_to_user(u64_to_user_ptr(argp->data),
1744 ¶ms, sizeof(struct kvm_sev_receive_start))) {
1745 ret = -EFAULT;
1746 sev_unbind_asid(kvm, start.handle);
1747 goto e_free_session;
1748 }
1749
1750 sev->handle = start.handle;
1751 sev->fd = argp->sev_fd;
1752
1753 e_free_session:
1754 kfree(session_data);
1755 e_free_pdh:
1756 kfree(pdh_data);
1757
1758 return ret;
1759 }
1760
sev_receive_update_data(struct kvm * kvm,struct kvm_sev_cmd * argp)1761 static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1762 {
1763 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1764 struct kvm_sev_receive_update_data params;
1765 struct sev_data_receive_update_data data;
1766 void *hdr = NULL, *trans = NULL;
1767 struct page **guest_page;
1768 unsigned long n;
1769 int ret, offset;
1770
1771 if (!sev_guest(kvm))
1772 return -EINVAL;
1773
1774 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data),
1775 sizeof(struct kvm_sev_receive_update_data)))
1776 return -EFAULT;
1777
1778 if (!params.hdr_uaddr || !params.hdr_len ||
1779 !params.guest_uaddr || !params.guest_len ||
1780 !params.trans_uaddr || !params.trans_len)
1781 return -EINVAL;
1782
1783 /* Check if we are crossing the page boundary */
1784 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1785 if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE)
1786 return -EINVAL;
1787
1788 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1789 if (IS_ERR(hdr))
1790 return PTR_ERR(hdr);
1791
1792 trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1793 if (IS_ERR(trans)) {
1794 ret = PTR_ERR(trans);
1795 goto e_free_hdr;
1796 }
1797
1798 memset(&data, 0, sizeof(data));
1799 data.hdr_address = __psp_pa(hdr);
1800 data.hdr_len = params.hdr_len;
1801 data.trans_address = __psp_pa(trans);
1802 data.trans_len = params.trans_len;
1803
1804 /* Pin guest memory */
1805 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1806 PAGE_SIZE, &n, 1);
1807 if (IS_ERR(guest_page)) {
1808 ret = PTR_ERR(guest_page);
1809 goto e_free_trans;
1810 }
1811
1812 /*
1813 * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP
1814 * encrypts the written data with the guest's key, and the cache may
1815 * contain dirty, unencrypted data.
1816 */
1817 sev_clflush_pages(guest_page, n);
1818
1819 /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
1820 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1821 data.guest_address |= sev_me_mask;
1822 data.guest_len = params.guest_len;
1823 data.handle = sev->handle;
1824
1825 ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data,
1826 &argp->error);
1827
1828 sev_unpin_memory(kvm, guest_page, n);
1829
1830 e_free_trans:
1831 kfree(trans);
1832 e_free_hdr:
1833 kfree(hdr);
1834
1835 return ret;
1836 }
1837
sev_receive_finish(struct kvm * kvm,struct kvm_sev_cmd * argp)1838 static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1839 {
1840 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1841 struct sev_data_receive_finish data;
1842
1843 if (!sev_guest(kvm))
1844 return -ENOTTY;
1845
1846 data.handle = sev->handle;
1847 return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
1848 }
1849
is_cmd_allowed_from_mirror(u32 cmd_id)1850 static bool is_cmd_allowed_from_mirror(u32 cmd_id)
1851 {
1852 /*
1853 * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
1854 * active mirror VMs. Also allow the debugging and status commands.
1855 */
1856 if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||
1857 cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||
1858 cmd_id == KVM_SEV_DBG_ENCRYPT)
1859 return true;
1860
1861 return false;
1862 }
1863
sev_lock_two_vms(struct kvm * dst_kvm,struct kvm * src_kvm)1864 static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
1865 {
1866 struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
1867 struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
1868 int r = -EBUSY;
1869
1870 if (dst_kvm == src_kvm)
1871 return -EINVAL;
1872
1873 /*
1874 * Bail if these VMs are already involved in a migration to avoid
1875 * deadlock between two VMs trying to migrate to/from each other.
1876 */
1877 if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1))
1878 return -EBUSY;
1879
1880 if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1))
1881 goto release_dst;
1882
1883 r = -EINTR;
1884 if (mutex_lock_killable(&dst_kvm->lock))
1885 goto release_src;
1886 if (mutex_lock_killable_nested(&src_kvm->lock, SINGLE_DEPTH_NESTING))
1887 goto unlock_dst;
1888 return 0;
1889
1890 unlock_dst:
1891 mutex_unlock(&dst_kvm->lock);
1892 release_src:
1893 atomic_set_release(&src_sev->migration_in_progress, 0);
1894 release_dst:
1895 atomic_set_release(&dst_sev->migration_in_progress, 0);
1896 return r;
1897 }
1898
sev_unlock_two_vms(struct kvm * dst_kvm,struct kvm * src_kvm)1899 static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
1900 {
1901 struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
1902 struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
1903
1904 mutex_unlock(&dst_kvm->lock);
1905 mutex_unlock(&src_kvm->lock);
1906 atomic_set_release(&dst_sev->migration_in_progress, 0);
1907 atomic_set_release(&src_sev->migration_in_progress, 0);
1908 }
1909
1910 /* vCPU mutex subclasses. */
1911 enum sev_migration_role {
1912 SEV_MIGRATION_SOURCE = 0,
1913 SEV_MIGRATION_TARGET,
1914 SEV_NR_MIGRATION_ROLES,
1915 };
1916
sev_lock_vcpus_for_migration(struct kvm * kvm,enum sev_migration_role role)1917 static int sev_lock_vcpus_for_migration(struct kvm *kvm,
1918 enum sev_migration_role role)
1919 {
1920 struct kvm_vcpu *vcpu;
1921 unsigned long i, j;
1922
1923 kvm_for_each_vcpu(i, vcpu, kvm) {
1924 if (mutex_lock_killable_nested(&vcpu->mutex, role))
1925 goto out_unlock;
1926
1927 #ifdef CONFIG_PROVE_LOCKING
1928 if (!i)
1929 /*
1930 * Reset the role to one that avoids colliding with
1931 * the role used for the first vcpu mutex.
1932 */
1933 role = SEV_NR_MIGRATION_ROLES;
1934 else
1935 mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
1936 #endif
1937 }
1938
1939 return 0;
1940
1941 out_unlock:
1942
1943 kvm_for_each_vcpu(j, vcpu, kvm) {
1944 if (i == j)
1945 break;
1946
1947 #ifdef CONFIG_PROVE_LOCKING
1948 if (j)
1949 mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
1950 #endif
1951
1952 mutex_unlock(&vcpu->mutex);
1953 }
1954 return -EINTR;
1955 }
1956
sev_unlock_vcpus_for_migration(struct kvm * kvm)1957 static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
1958 {
1959 struct kvm_vcpu *vcpu;
1960 unsigned long i;
1961 bool first = true;
1962
1963 kvm_for_each_vcpu(i, vcpu, kvm) {
1964 if (first)
1965 first = false;
1966 else
1967 mutex_acquire(&vcpu->mutex.dep_map,
1968 SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
1969
1970 mutex_unlock(&vcpu->mutex);
1971 }
1972 }
1973
sev_migrate_from(struct kvm * dst_kvm,struct kvm * src_kvm)1974 static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
1975 {
1976 struct kvm_sev_info *dst = &to_kvm_svm(dst_kvm)->sev_info;
1977 struct kvm_sev_info *src = &to_kvm_svm(src_kvm)->sev_info;
1978 struct kvm_vcpu *dst_vcpu, *src_vcpu;
1979 struct vcpu_svm *dst_svm, *src_svm;
1980 struct kvm_sev_info *mirror;
1981 unsigned long i;
1982
1983 dst->active = true;
1984 dst->asid = src->asid;
1985 dst->handle = src->handle;
1986 dst->pages_locked = src->pages_locked;
1987 dst->enc_context_owner = src->enc_context_owner;
1988 dst->es_active = src->es_active;
1989 dst->vmsa_features = src->vmsa_features;
1990
1991 src->asid = 0;
1992 src->active = false;
1993 src->handle = 0;
1994 src->pages_locked = 0;
1995 src->enc_context_owner = NULL;
1996 src->es_active = false;
1997
1998 list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
1999
2000 /*
2001 * If this VM has mirrors, "transfer" each mirror's refcount of the
2002 * source to the destination (this KVM). The caller holds a reference
2003 * to the source, so there's no danger of use-after-free.
2004 */
2005 list_cut_before(&dst->mirror_vms, &src->mirror_vms, &src->mirror_vms);
2006 list_for_each_entry(mirror, &dst->mirror_vms, mirror_entry) {
2007 kvm_get_kvm(dst_kvm);
2008 kvm_put_kvm(src_kvm);
2009 mirror->enc_context_owner = dst_kvm;
2010 }
2011
2012 /*
2013 * If this VM is a mirror, remove the old mirror from the owners list
2014 * and add the new mirror to the list.
2015 */
2016 if (is_mirroring_enc_context(dst_kvm)) {
2017 struct kvm_sev_info *owner_sev_info =
2018 &to_kvm_svm(dst->enc_context_owner)->sev_info;
2019
2020 list_del(&src->mirror_entry);
2021 list_add_tail(&dst->mirror_entry, &owner_sev_info->mirror_vms);
2022 }
2023
2024 kvm_for_each_vcpu(i, dst_vcpu, dst_kvm) {
2025 dst_svm = to_svm(dst_vcpu);
2026
2027 sev_init_vmcb(dst_svm);
2028
2029 if (!dst->es_active)
2030 continue;
2031
2032 /*
2033 * Note, the source is not required to have the same number of
2034 * vCPUs as the destination when migrating a vanilla SEV VM.
2035 */
2036 src_vcpu = kvm_get_vcpu(src_kvm, i);
2037 src_svm = to_svm(src_vcpu);
2038
2039 /*
2040 * Transfer VMSA and GHCB state to the destination. Nullify and
2041 * clear source fields as appropriate, the state now belongs to
2042 * the destination.
2043 */
2044 memcpy(&dst_svm->sev_es, &src_svm->sev_es, sizeof(src_svm->sev_es));
2045 dst_svm->vmcb->control.ghcb_gpa = src_svm->vmcb->control.ghcb_gpa;
2046 dst_svm->vmcb->control.vmsa_pa = src_svm->vmcb->control.vmsa_pa;
2047 dst_vcpu->arch.guest_state_protected = true;
2048
2049 memset(&src_svm->sev_es, 0, sizeof(src_svm->sev_es));
2050 src_svm->vmcb->control.ghcb_gpa = INVALID_PAGE;
2051 src_svm->vmcb->control.vmsa_pa = INVALID_PAGE;
2052 src_vcpu->arch.guest_state_protected = false;
2053 }
2054 }
2055
sev_check_source_vcpus(struct kvm * dst,struct kvm * src)2056 static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src)
2057 {
2058 struct kvm_vcpu *src_vcpu;
2059 unsigned long i;
2060
2061 if (!sev_es_guest(src))
2062 return 0;
2063
2064 if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
2065 return -EINVAL;
2066
2067 kvm_for_each_vcpu(i, src_vcpu, src) {
2068 if (!src_vcpu->arch.guest_state_protected)
2069 return -EINVAL;
2070 }
2071
2072 return 0;
2073 }
2074
sev_vm_move_enc_context_from(struct kvm * kvm,unsigned int source_fd)2075 int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
2076 {
2077 struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info;
2078 struct kvm_sev_info *src_sev, *cg_cleanup_sev;
2079 struct fd f = fdget(source_fd);
2080 struct kvm *source_kvm;
2081 bool charged = false;
2082 int ret;
2083
2084 if (!fd_file(f))
2085 return -EBADF;
2086
2087 if (!file_is_kvm(fd_file(f))) {
2088 ret = -EBADF;
2089 goto out_fput;
2090 }
2091
2092 source_kvm = fd_file(f)->private_data;
2093 ret = sev_lock_two_vms(kvm, source_kvm);
2094 if (ret)
2095 goto out_fput;
2096
2097 if (kvm->arch.vm_type != source_kvm->arch.vm_type ||
2098 sev_guest(kvm) || !sev_guest(source_kvm)) {
2099 ret = -EINVAL;
2100 goto out_unlock;
2101 }
2102
2103 src_sev = &to_kvm_svm(source_kvm)->sev_info;
2104
2105 dst_sev->misc_cg = get_current_misc_cg();
2106 cg_cleanup_sev = dst_sev;
2107 if (dst_sev->misc_cg != src_sev->misc_cg) {
2108 ret = sev_misc_cg_try_charge(dst_sev);
2109 if (ret)
2110 goto out_dst_cgroup;
2111 charged = true;
2112 }
2113
2114 ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
2115 if (ret)
2116 goto out_dst_cgroup;
2117 ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
2118 if (ret)
2119 goto out_dst_vcpu;
2120
2121 ret = sev_check_source_vcpus(kvm, source_kvm);
2122 if (ret)
2123 goto out_source_vcpu;
2124
2125 sev_migrate_from(kvm, source_kvm);
2126 kvm_vm_dead(source_kvm);
2127 cg_cleanup_sev = src_sev;
2128 ret = 0;
2129
2130 out_source_vcpu:
2131 sev_unlock_vcpus_for_migration(source_kvm);
2132 out_dst_vcpu:
2133 sev_unlock_vcpus_for_migration(kvm);
2134 out_dst_cgroup:
2135 /* Operates on the source on success, on the destination on failure. */
2136 if (charged)
2137 sev_misc_cg_uncharge(cg_cleanup_sev);
2138 put_misc_cg(cg_cleanup_sev->misc_cg);
2139 cg_cleanup_sev->misc_cg = NULL;
2140 out_unlock:
2141 sev_unlock_two_vms(kvm, source_kvm);
2142 out_fput:
2143 fdput(f);
2144 return ret;
2145 }
2146
sev_dev_get_attr(u32 group,u64 attr,u64 * val)2147 int sev_dev_get_attr(u32 group, u64 attr, u64 *val)
2148 {
2149 if (group != KVM_X86_GRP_SEV)
2150 return -ENXIO;
2151
2152 switch (attr) {
2153 case KVM_X86_SEV_VMSA_FEATURES:
2154 *val = sev_supported_vmsa_features;
2155 return 0;
2156
2157 default:
2158 return -ENXIO;
2159 }
2160 }
2161
2162 /*
2163 * The guest context contains all the information, keys and metadata
2164 * associated with the guest that the firmware tracks to implement SEV
2165 * and SNP features. The firmware stores the guest context in hypervisor
2166 * provide page via the SNP_GCTX_CREATE command.
2167 */
snp_context_create(struct kvm * kvm,struct kvm_sev_cmd * argp)2168 static void *snp_context_create(struct kvm *kvm, struct kvm_sev_cmd *argp)
2169 {
2170 struct sev_data_snp_addr data = {};
2171 void *context;
2172 int rc;
2173
2174 /* Allocate memory for context page */
2175 context = snp_alloc_firmware_page(GFP_KERNEL_ACCOUNT);
2176 if (!context)
2177 return NULL;
2178
2179 data.address = __psp_pa(context);
2180 rc = __sev_issue_cmd(argp->sev_fd, SEV_CMD_SNP_GCTX_CREATE, &data, &argp->error);
2181 if (rc) {
2182 pr_warn("Failed to create SEV-SNP context, rc %d fw_error %d",
2183 rc, argp->error);
2184 snp_free_firmware_page(context);
2185 return NULL;
2186 }
2187
2188 return context;
2189 }
2190
snp_bind_asid(struct kvm * kvm,int * error)2191 static int snp_bind_asid(struct kvm *kvm, int *error)
2192 {
2193 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
2194 struct sev_data_snp_activate data = {0};
2195
2196 data.gctx_paddr = __psp_pa(sev->snp_context);
2197 data.asid = sev_get_asid(kvm);
2198 return sev_issue_cmd(kvm, SEV_CMD_SNP_ACTIVATE, &data, error);
2199 }
2200
snp_launch_start(struct kvm * kvm,struct kvm_sev_cmd * argp)2201 static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
2202 {
2203 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
2204 struct sev_data_snp_launch_start start = {0};
2205 struct kvm_sev_snp_launch_start params;
2206 int rc;
2207
2208 if (!sev_snp_guest(kvm))
2209 return -ENOTTY;
2210
2211 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), sizeof(params)))
2212 return -EFAULT;
2213
2214 /* Don't allow userspace to allocate memory for more than 1 SNP context. */
2215 if (sev->snp_context)
2216 return -EINVAL;
2217
2218 if (params.flags)
2219 return -EINVAL;
2220
2221 if (params.policy & ~SNP_POLICY_MASK_VALID)
2222 return -EINVAL;
2223
2224 /* Check for policy bits that must be set */
2225 if (!(params.policy & SNP_POLICY_MASK_RSVD_MBO) ||
2226 !(params.policy & SNP_POLICY_MASK_SMT))
2227 return -EINVAL;
2228
2229 if (params.policy & SNP_POLICY_MASK_SINGLE_SOCKET)
2230 return -EINVAL;
2231
2232 sev->snp_context = snp_context_create(kvm, argp);
2233 if (!sev->snp_context)
2234 return -ENOTTY;
2235
2236 start.gctx_paddr = __psp_pa(sev->snp_context);
2237 start.policy = params.policy;
2238 memcpy(start.gosvw, params.gosvw, sizeof(params.gosvw));
2239 rc = __sev_issue_cmd(argp->sev_fd, SEV_CMD_SNP_LAUNCH_START, &start, &argp->error);
2240 if (rc) {
2241 pr_debug("%s: SEV_CMD_SNP_LAUNCH_START firmware command failed, rc %d\n",
2242 __func__, rc);
2243 goto e_free_context;
2244 }
2245
2246 sev->fd = argp->sev_fd;
2247 rc = snp_bind_asid(kvm, &argp->error);
2248 if (rc) {
2249 pr_debug("%s: Failed to bind ASID to SEV-SNP context, rc %d\n",
2250 __func__, rc);
2251 goto e_free_context;
2252 }
2253
2254 return 0;
2255
2256 e_free_context:
2257 snp_decommission_context(kvm);
2258
2259 return rc;
2260 }
2261
2262 struct sev_gmem_populate_args {
2263 __u8 type;
2264 int sev_fd;
2265 int fw_error;
2266 };
2267
sev_gmem_post_populate(struct kvm * kvm,gfn_t gfn_start,kvm_pfn_t pfn,void __user * src,int order,void * opaque)2268 static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pfn,
2269 void __user *src, int order, void *opaque)
2270 {
2271 struct sev_gmem_populate_args *sev_populate_args = opaque;
2272 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
2273 int n_private = 0, ret, i;
2274 int npages = (1 << order);
2275 gfn_t gfn;
2276
2277 if (WARN_ON_ONCE(sev_populate_args->type != KVM_SEV_SNP_PAGE_TYPE_ZERO && !src))
2278 return -EINVAL;
2279
2280 for (gfn = gfn_start, i = 0; gfn < gfn_start + npages; gfn++, i++) {
2281 struct sev_data_snp_launch_update fw_args = {0};
2282 bool assigned = false;
2283 int level;
2284
2285 ret = snp_lookup_rmpentry((u64)pfn + i, &assigned, &level);
2286 if (ret || assigned) {
2287 pr_debug("%s: Failed to ensure GFN 0x%llx RMP entry is initial shared state, ret: %d assigned: %d\n",
2288 __func__, gfn, ret, assigned);
2289 ret = ret ? -EINVAL : -EEXIST;
2290 goto err;
2291 }
2292
2293 if (src) {
2294 void *vaddr = kmap_local_pfn(pfn + i);
2295
2296 if (copy_from_user(vaddr, src + i * PAGE_SIZE, PAGE_SIZE)) {
2297 ret = -EFAULT;
2298 goto err;
2299 }
2300 kunmap_local(vaddr);
2301 }
2302
2303 ret = rmp_make_private(pfn + i, gfn << PAGE_SHIFT, PG_LEVEL_4K,
2304 sev_get_asid(kvm), true);
2305 if (ret)
2306 goto err;
2307
2308 n_private++;
2309
2310 fw_args.gctx_paddr = __psp_pa(sev->snp_context);
2311 fw_args.address = __sme_set(pfn_to_hpa(pfn + i));
2312 fw_args.page_size = PG_LEVEL_TO_RMP(PG_LEVEL_4K);
2313 fw_args.page_type = sev_populate_args->type;
2314
2315 ret = __sev_issue_cmd(sev_populate_args->sev_fd, SEV_CMD_SNP_LAUNCH_UPDATE,
2316 &fw_args, &sev_populate_args->fw_error);
2317 if (ret)
2318 goto fw_err;
2319 }
2320
2321 return 0;
2322
2323 fw_err:
2324 /*
2325 * If the firmware command failed handle the reclaim and cleanup of that
2326 * PFN specially vs. prior pages which can be cleaned up below without
2327 * needing to reclaim in advance.
2328 *
2329 * Additionally, when invalid CPUID function entries are detected,
2330 * firmware writes the expected values into the page and leaves it
2331 * unencrypted so it can be used for debugging and error-reporting.
2332 *
2333 * Copy this page back into the source buffer so userspace can use this
2334 * information to provide information on which CPUID leaves/fields
2335 * failed CPUID validation.
2336 */
2337 if (!snp_page_reclaim(kvm, pfn + i) &&
2338 sev_populate_args->type == KVM_SEV_SNP_PAGE_TYPE_CPUID &&
2339 sev_populate_args->fw_error == SEV_RET_INVALID_PARAM) {
2340 void *vaddr = kmap_local_pfn(pfn + i);
2341
2342 if (copy_to_user(src + i * PAGE_SIZE, vaddr, PAGE_SIZE))
2343 pr_debug("Failed to write CPUID page back to userspace\n");
2344
2345 kunmap_local(vaddr);
2346 }
2347
2348 /* pfn + i is hypervisor-owned now, so skip below cleanup for it. */
2349 n_private--;
2350
2351 err:
2352 pr_debug("%s: exiting with error ret %d (fw_error %d), restoring %d gmem PFNs to shared.\n",
2353 __func__, ret, sev_populate_args->fw_error, n_private);
2354 for (i = 0; i < n_private; i++)
2355 kvm_rmp_make_shared(kvm, pfn + i, PG_LEVEL_4K);
2356
2357 return ret;
2358 }
2359
snp_launch_update(struct kvm * kvm,struct kvm_sev_cmd * argp)2360 static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)
2361 {
2362 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
2363 struct sev_gmem_populate_args sev_populate_args = {0};
2364 struct kvm_sev_snp_launch_update params;
2365 struct kvm_memory_slot *memslot;
2366 long npages, count;
2367 void __user *src;
2368 int ret = 0;
2369
2370 if (!sev_snp_guest(kvm) || !sev->snp_context)
2371 return -EINVAL;
2372
2373 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), sizeof(params)))
2374 return -EFAULT;
2375
2376 pr_debug("%s: GFN start 0x%llx length 0x%llx type %d flags %d\n", __func__,
2377 params.gfn_start, params.len, params.type, params.flags);
2378
2379 if (!PAGE_ALIGNED(params.len) || params.flags ||
2380 (params.type != KVM_SEV_SNP_PAGE_TYPE_NORMAL &&
2381 params.type != KVM_SEV_SNP_PAGE_TYPE_ZERO &&
2382 params.type != KVM_SEV_SNP_PAGE_TYPE_UNMEASURED &&
2383 params.type != KVM_SEV_SNP_PAGE_TYPE_SECRETS &&
2384 params.type != KVM_SEV_SNP_PAGE_TYPE_CPUID))
2385 return -EINVAL;
2386
2387 npages = params.len / PAGE_SIZE;
2388
2389 /*
2390 * For each GFN that's being prepared as part of the initial guest
2391 * state, the following pre-conditions are verified:
2392 *
2393 * 1) The backing memslot is a valid private memslot.
2394 * 2) The GFN has been set to private via KVM_SET_MEMORY_ATTRIBUTES
2395 * beforehand.
2396 * 3) The PFN of the guest_memfd has not already been set to private
2397 * in the RMP table.
2398 *
2399 * The KVM MMU relies on kvm->mmu_invalidate_seq to retry nested page
2400 * faults if there's a race between a fault and an attribute update via
2401 * KVM_SET_MEMORY_ATTRIBUTES, and a similar approach could be utilized
2402 * here. However, kvm->slots_lock guards against both this as well as
2403 * concurrent memslot updates occurring while these checks are being
2404 * performed, so use that here to make it easier to reason about the
2405 * initial expected state and better guard against unexpected
2406 * situations.
2407 */
2408 mutex_lock(&kvm->slots_lock);
2409
2410 memslot = gfn_to_memslot(kvm, params.gfn_start);
2411 if (!kvm_slot_can_be_private(memslot)) {
2412 ret = -EINVAL;
2413 goto out;
2414 }
2415
2416 sev_populate_args.sev_fd = argp->sev_fd;
2417 sev_populate_args.type = params.type;
2418 src = params.type == KVM_SEV_SNP_PAGE_TYPE_ZERO ? NULL : u64_to_user_ptr(params.uaddr);
2419
2420 count = kvm_gmem_populate(kvm, params.gfn_start, src, npages,
2421 sev_gmem_post_populate, &sev_populate_args);
2422 if (count < 0) {
2423 argp->error = sev_populate_args.fw_error;
2424 pr_debug("%s: kvm_gmem_populate failed, ret %ld (fw_error %d)\n",
2425 __func__, count, argp->error);
2426 ret = -EIO;
2427 } else {
2428 params.gfn_start += count;
2429 params.len -= count * PAGE_SIZE;
2430 if (params.type != KVM_SEV_SNP_PAGE_TYPE_ZERO)
2431 params.uaddr += count * PAGE_SIZE;
2432
2433 ret = 0;
2434 if (copy_to_user(u64_to_user_ptr(argp->data), ¶ms, sizeof(params)))
2435 ret = -EFAULT;
2436 }
2437
2438 out:
2439 mutex_unlock(&kvm->slots_lock);
2440
2441 return ret;
2442 }
2443
snp_launch_update_vmsa(struct kvm * kvm,struct kvm_sev_cmd * argp)2444 static int snp_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
2445 {
2446 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
2447 struct sev_data_snp_launch_update data = {};
2448 struct kvm_vcpu *vcpu;
2449 unsigned long i;
2450 int ret;
2451
2452 data.gctx_paddr = __psp_pa(sev->snp_context);
2453 data.page_type = SNP_PAGE_TYPE_VMSA;
2454
2455 kvm_for_each_vcpu(i, vcpu, kvm) {
2456 struct vcpu_svm *svm = to_svm(vcpu);
2457 u64 pfn = __pa(svm->sev_es.vmsa) >> PAGE_SHIFT;
2458
2459 ret = sev_es_sync_vmsa(svm);
2460 if (ret)
2461 return ret;
2462
2463 /* Transition the VMSA page to a firmware state. */
2464 ret = rmp_make_private(pfn, INITIAL_VMSA_GPA, PG_LEVEL_4K, sev->asid, true);
2465 if (ret)
2466 return ret;
2467
2468 /* Issue the SNP command to encrypt the VMSA */
2469 data.address = __sme_pa(svm->sev_es.vmsa);
2470 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_SNP_LAUNCH_UPDATE,
2471 &data, &argp->error);
2472 if (ret) {
2473 snp_page_reclaim(kvm, pfn);
2474
2475 return ret;
2476 }
2477
2478 svm->vcpu.arch.guest_state_protected = true;
2479 /*
2480 * SEV-ES (and thus SNP) guest mandates LBR Virtualization to
2481 * be _always_ ON. Enable it only after setting
2482 * guest_state_protected because KVM_SET_MSRS allows dynamic
2483 * toggling of LBRV (for performance reason) on write access to
2484 * MSR_IA32_DEBUGCTLMSR when guest_state_protected is not set.
2485 */
2486 svm_enable_lbrv(vcpu);
2487 }
2488
2489 return 0;
2490 }
2491
snp_launch_finish(struct kvm * kvm,struct kvm_sev_cmd * argp)2492 static int snp_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
2493 {
2494 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
2495 struct kvm_sev_snp_launch_finish params;
2496 struct sev_data_snp_launch_finish *data;
2497 void *id_block = NULL, *id_auth = NULL;
2498 int ret;
2499
2500 if (!sev_snp_guest(kvm))
2501 return -ENOTTY;
2502
2503 if (!sev->snp_context)
2504 return -EINVAL;
2505
2506 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), sizeof(params)))
2507 return -EFAULT;
2508
2509 if (params.flags)
2510 return -EINVAL;
2511
2512 /* Measure all vCPUs using LAUNCH_UPDATE before finalizing the launch flow. */
2513 ret = snp_launch_update_vmsa(kvm, argp);
2514 if (ret)
2515 return ret;
2516
2517 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
2518 if (!data)
2519 return -ENOMEM;
2520
2521 if (params.id_block_en) {
2522 id_block = psp_copy_user_blob(params.id_block_uaddr, KVM_SEV_SNP_ID_BLOCK_SIZE);
2523 if (IS_ERR(id_block)) {
2524 ret = PTR_ERR(id_block);
2525 goto e_free;
2526 }
2527
2528 data->id_block_en = 1;
2529 data->id_block_paddr = __sme_pa(id_block);
2530
2531 id_auth = psp_copy_user_blob(params.id_auth_uaddr, KVM_SEV_SNP_ID_AUTH_SIZE);
2532 if (IS_ERR(id_auth)) {
2533 ret = PTR_ERR(id_auth);
2534 goto e_free_id_block;
2535 }
2536
2537 data->id_auth_paddr = __sme_pa(id_auth);
2538
2539 if (params.auth_key_en)
2540 data->auth_key_en = 1;
2541 }
2542
2543 data->vcek_disabled = params.vcek_disabled;
2544
2545 memcpy(data->host_data, params.host_data, KVM_SEV_SNP_FINISH_DATA_SIZE);
2546 data->gctx_paddr = __psp_pa(sev->snp_context);
2547 ret = sev_issue_cmd(kvm, SEV_CMD_SNP_LAUNCH_FINISH, data, &argp->error);
2548
2549 /*
2550 * Now that there will be no more SNP_LAUNCH_UPDATE ioctls, private pages
2551 * can be given to the guest simply by marking the RMP entry as private.
2552 * This can happen on first access and also with KVM_PRE_FAULT_MEMORY.
2553 */
2554 if (!ret)
2555 kvm->arch.pre_fault_allowed = true;
2556
2557 kfree(id_auth);
2558
2559 e_free_id_block:
2560 kfree(id_block);
2561
2562 e_free:
2563 kfree(data);
2564
2565 return ret;
2566 }
2567
sev_mem_enc_ioctl(struct kvm * kvm,void __user * argp)2568 int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
2569 {
2570 struct kvm_sev_cmd sev_cmd;
2571 int r;
2572
2573 if (!sev_enabled)
2574 return -ENOTTY;
2575
2576 if (!argp)
2577 return 0;
2578
2579 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
2580 return -EFAULT;
2581
2582 mutex_lock(&kvm->lock);
2583
2584 /* Only the enc_context_owner handles some memory enc operations. */
2585 if (is_mirroring_enc_context(kvm) &&
2586 !is_cmd_allowed_from_mirror(sev_cmd.id)) {
2587 r = -EINVAL;
2588 goto out;
2589 }
2590
2591 /*
2592 * Once KVM_SEV_INIT2 initializes a KVM instance as an SNP guest, only
2593 * allow the use of SNP-specific commands.
2594 */
2595 if (sev_snp_guest(kvm) && sev_cmd.id < KVM_SEV_SNP_LAUNCH_START) {
2596 r = -EPERM;
2597 goto out;
2598 }
2599
2600 switch (sev_cmd.id) {
2601 case KVM_SEV_ES_INIT:
2602 if (!sev_es_enabled) {
2603 r = -ENOTTY;
2604 goto out;
2605 }
2606 fallthrough;
2607 case KVM_SEV_INIT:
2608 r = sev_guest_init(kvm, &sev_cmd);
2609 break;
2610 case KVM_SEV_INIT2:
2611 r = sev_guest_init2(kvm, &sev_cmd);
2612 break;
2613 case KVM_SEV_LAUNCH_START:
2614 r = sev_launch_start(kvm, &sev_cmd);
2615 break;
2616 case KVM_SEV_LAUNCH_UPDATE_DATA:
2617 r = sev_launch_update_data(kvm, &sev_cmd);
2618 break;
2619 case KVM_SEV_LAUNCH_UPDATE_VMSA:
2620 r = sev_launch_update_vmsa(kvm, &sev_cmd);
2621 break;
2622 case KVM_SEV_LAUNCH_MEASURE:
2623 r = sev_launch_measure(kvm, &sev_cmd);
2624 break;
2625 case KVM_SEV_LAUNCH_FINISH:
2626 r = sev_launch_finish(kvm, &sev_cmd);
2627 break;
2628 case KVM_SEV_GUEST_STATUS:
2629 r = sev_guest_status(kvm, &sev_cmd);
2630 break;
2631 case KVM_SEV_DBG_DECRYPT:
2632 r = sev_dbg_crypt(kvm, &sev_cmd, true);
2633 break;
2634 case KVM_SEV_DBG_ENCRYPT:
2635 r = sev_dbg_crypt(kvm, &sev_cmd, false);
2636 break;
2637 case KVM_SEV_LAUNCH_SECRET:
2638 r = sev_launch_secret(kvm, &sev_cmd);
2639 break;
2640 case KVM_SEV_GET_ATTESTATION_REPORT:
2641 r = sev_get_attestation_report(kvm, &sev_cmd);
2642 break;
2643 case KVM_SEV_SEND_START:
2644 r = sev_send_start(kvm, &sev_cmd);
2645 break;
2646 case KVM_SEV_SEND_UPDATE_DATA:
2647 r = sev_send_update_data(kvm, &sev_cmd);
2648 break;
2649 case KVM_SEV_SEND_FINISH:
2650 r = sev_send_finish(kvm, &sev_cmd);
2651 break;
2652 case KVM_SEV_SEND_CANCEL:
2653 r = sev_send_cancel(kvm, &sev_cmd);
2654 break;
2655 case KVM_SEV_RECEIVE_START:
2656 r = sev_receive_start(kvm, &sev_cmd);
2657 break;
2658 case KVM_SEV_RECEIVE_UPDATE_DATA:
2659 r = sev_receive_update_data(kvm, &sev_cmd);
2660 break;
2661 case KVM_SEV_RECEIVE_FINISH:
2662 r = sev_receive_finish(kvm, &sev_cmd);
2663 break;
2664 case KVM_SEV_SNP_LAUNCH_START:
2665 r = snp_launch_start(kvm, &sev_cmd);
2666 break;
2667 case KVM_SEV_SNP_LAUNCH_UPDATE:
2668 r = snp_launch_update(kvm, &sev_cmd);
2669 break;
2670 case KVM_SEV_SNP_LAUNCH_FINISH:
2671 r = snp_launch_finish(kvm, &sev_cmd);
2672 break;
2673 default:
2674 r = -EINVAL;
2675 goto out;
2676 }
2677
2678 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
2679 r = -EFAULT;
2680
2681 out:
2682 mutex_unlock(&kvm->lock);
2683 return r;
2684 }
2685
sev_mem_enc_register_region(struct kvm * kvm,struct kvm_enc_region * range)2686 int sev_mem_enc_register_region(struct kvm *kvm,
2687 struct kvm_enc_region *range)
2688 {
2689 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
2690 struct enc_region *region;
2691 int ret = 0;
2692
2693 if (!sev_guest(kvm))
2694 return -ENOTTY;
2695
2696 /* If kvm is mirroring encryption context it isn't responsible for it */
2697 if (is_mirroring_enc_context(kvm))
2698 return -EINVAL;
2699
2700 if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
2701 return -EINVAL;
2702
2703 region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
2704 if (!region)
2705 return -ENOMEM;
2706
2707 mutex_lock(&kvm->lock);
2708 region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1);
2709 if (IS_ERR(region->pages)) {
2710 ret = PTR_ERR(region->pages);
2711 mutex_unlock(&kvm->lock);
2712 goto e_free;
2713 }
2714
2715 /*
2716 * The guest may change the memory encryption attribute from C=0 -> C=1
2717 * or vice versa for this memory range. Lets make sure caches are
2718 * flushed to ensure that guest data gets written into memory with
2719 * correct C-bit. Note, this must be done before dropping kvm->lock,
2720 * as region and its array of pages can be freed by a different task
2721 * once kvm->lock is released.
2722 */
2723 sev_clflush_pages(region->pages, region->npages);
2724
2725 region->uaddr = range->addr;
2726 region->size = range->size;
2727
2728 list_add_tail(®ion->list, &sev->regions_list);
2729 mutex_unlock(&kvm->lock);
2730
2731 return ret;
2732
2733 e_free:
2734 kfree(region);
2735 return ret;
2736 }
2737
2738 static struct enc_region *
find_enc_region(struct kvm * kvm,struct kvm_enc_region * range)2739 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
2740 {
2741 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
2742 struct list_head *head = &sev->regions_list;
2743 struct enc_region *i;
2744
2745 list_for_each_entry(i, head, list) {
2746 if (i->uaddr == range->addr &&
2747 i->size == range->size)
2748 return i;
2749 }
2750
2751 return NULL;
2752 }
2753
__unregister_enc_region_locked(struct kvm * kvm,struct enc_region * region)2754 static void __unregister_enc_region_locked(struct kvm *kvm,
2755 struct enc_region *region)
2756 {
2757 sev_unpin_memory(kvm, region->pages, region->npages);
2758 list_del(®ion->list);
2759 kfree(region);
2760 }
2761
sev_mem_enc_unregister_region(struct kvm * kvm,struct kvm_enc_region * range)2762 int sev_mem_enc_unregister_region(struct kvm *kvm,
2763 struct kvm_enc_region *range)
2764 {
2765 struct enc_region *region;
2766 int ret;
2767
2768 /* If kvm is mirroring encryption context it isn't responsible for it */
2769 if (is_mirroring_enc_context(kvm))
2770 return -EINVAL;
2771
2772 mutex_lock(&kvm->lock);
2773
2774 if (!sev_guest(kvm)) {
2775 ret = -ENOTTY;
2776 goto failed;
2777 }
2778
2779 region = find_enc_region(kvm, range);
2780 if (!region) {
2781 ret = -EINVAL;
2782 goto failed;
2783 }
2784
2785 /*
2786 * Ensure that all guest tagged cache entries are flushed before
2787 * releasing the pages back to the system for use. CLFLUSH will
2788 * not do this, so issue a WBINVD.
2789 */
2790 wbinvd_on_all_cpus();
2791
2792 __unregister_enc_region_locked(kvm, region);
2793
2794 mutex_unlock(&kvm->lock);
2795 return 0;
2796
2797 failed:
2798 mutex_unlock(&kvm->lock);
2799 return ret;
2800 }
2801
sev_vm_copy_enc_context_from(struct kvm * kvm,unsigned int source_fd)2802 int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
2803 {
2804 struct fd f = fdget(source_fd);
2805 struct kvm *source_kvm;
2806 struct kvm_sev_info *source_sev, *mirror_sev;
2807 int ret;
2808
2809 if (!fd_file(f))
2810 return -EBADF;
2811
2812 if (!file_is_kvm(fd_file(f))) {
2813 ret = -EBADF;
2814 goto e_source_fput;
2815 }
2816
2817 source_kvm = fd_file(f)->private_data;
2818 ret = sev_lock_two_vms(kvm, source_kvm);
2819 if (ret)
2820 goto e_source_fput;
2821
2822 /*
2823 * Mirrors of mirrors should work, but let's not get silly. Also
2824 * disallow out-of-band SEV/SEV-ES init if the target is already an
2825 * SEV guest, or if vCPUs have been created. KVM relies on vCPUs being
2826 * created after SEV/SEV-ES initialization, e.g. to init intercepts.
2827 */
2828 if (sev_guest(kvm) || !sev_guest(source_kvm) ||
2829 is_mirroring_enc_context(source_kvm) || kvm->created_vcpus) {
2830 ret = -EINVAL;
2831 goto e_unlock;
2832 }
2833
2834 /*
2835 * The mirror kvm holds an enc_context_owner ref so its asid can't
2836 * disappear until we're done with it
2837 */
2838 source_sev = &to_kvm_svm(source_kvm)->sev_info;
2839 kvm_get_kvm(source_kvm);
2840 mirror_sev = &to_kvm_svm(kvm)->sev_info;
2841 list_add_tail(&mirror_sev->mirror_entry, &source_sev->mirror_vms);
2842
2843 /* Set enc_context_owner and copy its encryption context over */
2844 mirror_sev->enc_context_owner = source_kvm;
2845 mirror_sev->active = true;
2846 mirror_sev->asid = source_sev->asid;
2847 mirror_sev->fd = source_sev->fd;
2848 mirror_sev->es_active = source_sev->es_active;
2849 mirror_sev->need_init = false;
2850 mirror_sev->handle = source_sev->handle;
2851 INIT_LIST_HEAD(&mirror_sev->regions_list);
2852 INIT_LIST_HEAD(&mirror_sev->mirror_vms);
2853 ret = 0;
2854
2855 /*
2856 * Do not copy ap_jump_table. Since the mirror does not share the same
2857 * KVM contexts as the original, and they may have different
2858 * memory-views.
2859 */
2860
2861 e_unlock:
2862 sev_unlock_two_vms(kvm, source_kvm);
2863 e_source_fput:
2864 fdput(f);
2865 return ret;
2866 }
2867
snp_decommission_context(struct kvm * kvm)2868 static int snp_decommission_context(struct kvm *kvm)
2869 {
2870 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
2871 struct sev_data_snp_addr data = {};
2872 int ret;
2873
2874 /* If context is not created then do nothing */
2875 if (!sev->snp_context)
2876 return 0;
2877
2878 /* Do the decommision, which will unbind the ASID from the SNP context */
2879 data.address = __sme_pa(sev->snp_context);
2880 down_write(&sev_deactivate_lock);
2881 ret = sev_do_cmd(SEV_CMD_SNP_DECOMMISSION, &data, NULL);
2882 up_write(&sev_deactivate_lock);
2883
2884 if (WARN_ONCE(ret, "Failed to release guest context, ret %d", ret))
2885 return ret;
2886
2887 snp_free_firmware_page(sev->snp_context);
2888 sev->snp_context = NULL;
2889
2890 return 0;
2891 }
2892
sev_vm_destroy(struct kvm * kvm)2893 void sev_vm_destroy(struct kvm *kvm)
2894 {
2895 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
2896 struct list_head *head = &sev->regions_list;
2897 struct list_head *pos, *q;
2898
2899 if (!sev_guest(kvm))
2900 return;
2901
2902 WARN_ON(!list_empty(&sev->mirror_vms));
2903
2904 /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
2905 if (is_mirroring_enc_context(kvm)) {
2906 struct kvm *owner_kvm = sev->enc_context_owner;
2907
2908 mutex_lock(&owner_kvm->lock);
2909 list_del(&sev->mirror_entry);
2910 mutex_unlock(&owner_kvm->lock);
2911 kvm_put_kvm(owner_kvm);
2912 return;
2913 }
2914
2915 /*
2916 * Ensure that all guest tagged cache entries are flushed before
2917 * releasing the pages back to the system for use. CLFLUSH will
2918 * not do this, so issue a WBINVD.
2919 */
2920 wbinvd_on_all_cpus();
2921
2922 /*
2923 * if userspace was terminated before unregistering the memory regions
2924 * then lets unpin all the registered memory.
2925 */
2926 if (!list_empty(head)) {
2927 list_for_each_safe(pos, q, head) {
2928 __unregister_enc_region_locked(kvm,
2929 list_entry(pos, struct enc_region, list));
2930 cond_resched();
2931 }
2932 }
2933
2934 if (sev_snp_guest(kvm)) {
2935 snp_guest_req_cleanup(kvm);
2936
2937 /*
2938 * Decomission handles unbinding of the ASID. If it fails for
2939 * some unexpected reason, just leak the ASID.
2940 */
2941 if (snp_decommission_context(kvm))
2942 return;
2943 } else {
2944 sev_unbind_asid(kvm, sev->handle);
2945 }
2946
2947 sev_asid_free(sev);
2948 }
2949
sev_set_cpu_caps(void)2950 void __init sev_set_cpu_caps(void)
2951 {
2952 if (sev_enabled) {
2953 kvm_cpu_cap_set(X86_FEATURE_SEV);
2954 kvm_caps.supported_vm_types |= BIT(KVM_X86_SEV_VM);
2955 }
2956 if (sev_es_enabled) {
2957 kvm_cpu_cap_set(X86_FEATURE_SEV_ES);
2958 kvm_caps.supported_vm_types |= BIT(KVM_X86_SEV_ES_VM);
2959 }
2960 if (sev_snp_enabled) {
2961 kvm_cpu_cap_set(X86_FEATURE_SEV_SNP);
2962 kvm_caps.supported_vm_types |= BIT(KVM_X86_SNP_VM);
2963 }
2964 }
2965
sev_hardware_setup(void)2966 void __init sev_hardware_setup(void)
2967 {
2968 unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
2969 bool sev_snp_supported = false;
2970 bool sev_es_supported = false;
2971 bool sev_supported = false;
2972
2973 if (!sev_enabled || !npt_enabled || !nrips)
2974 goto out;
2975
2976 /*
2977 * SEV must obviously be supported in hardware. Sanity check that the
2978 * CPU supports decode assists, which is mandatory for SEV guests to
2979 * support instruction emulation. Ditto for flushing by ASID, as SEV
2980 * guests are bound to a single ASID, i.e. KVM can't rotate to a new
2981 * ASID to effect a TLB flush.
2982 */
2983 if (!boot_cpu_has(X86_FEATURE_SEV) ||
2984 WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_DECODEASSISTS)) ||
2985 WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_FLUSHBYASID)))
2986 goto out;
2987
2988 /* Retrieve SEV CPUID information */
2989 cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
2990
2991 /* Set encryption bit location for SEV-ES guests */
2992 sev_enc_bit = ebx & 0x3f;
2993
2994 /* Maximum number of encrypted guests supported simultaneously */
2995 max_sev_asid = ecx;
2996 if (!max_sev_asid)
2997 goto out;
2998
2999 /* Minimum ASID value that should be used for SEV guest */
3000 min_sev_asid = edx;
3001 sev_me_mask = 1UL << (ebx & 0x3f);
3002
3003 /*
3004 * Initialize SEV ASID bitmaps. Allocate space for ASID 0 in the bitmap,
3005 * even though it's never used, so that the bitmap is indexed by the
3006 * actual ASID.
3007 */
3008 nr_asids = max_sev_asid + 1;
3009 sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
3010 if (!sev_asid_bitmap)
3011 goto out;
3012
3013 sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
3014 if (!sev_reclaim_asid_bitmap) {
3015 bitmap_free(sev_asid_bitmap);
3016 sev_asid_bitmap = NULL;
3017 goto out;
3018 }
3019
3020 if (min_sev_asid <= max_sev_asid) {
3021 sev_asid_count = max_sev_asid - min_sev_asid + 1;
3022 WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
3023 }
3024 sev_supported = true;
3025
3026 /* SEV-ES support requested? */
3027 if (!sev_es_enabled)
3028 goto out;
3029
3030 /*
3031 * SEV-ES requires MMIO caching as KVM doesn't have access to the guest
3032 * instruction stream, i.e. can't emulate in response to a #NPF and
3033 * instead relies on #NPF(RSVD) being reflected into the guest as #VC
3034 * (the guest can then do a #VMGEXIT to request MMIO emulation).
3035 */
3036 if (!enable_mmio_caching)
3037 goto out;
3038
3039 /* Does the CPU support SEV-ES? */
3040 if (!boot_cpu_has(X86_FEATURE_SEV_ES))
3041 goto out;
3042
3043 if (!lbrv) {
3044 WARN_ONCE(!boot_cpu_has(X86_FEATURE_LBRV),
3045 "LBRV must be present for SEV-ES support");
3046 goto out;
3047 }
3048
3049 /* Has the system been allocated ASIDs for SEV-ES? */
3050 if (min_sev_asid == 1)
3051 goto out;
3052
3053 sev_es_asid_count = min_sev_asid - 1;
3054 WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count));
3055 sev_es_supported = true;
3056 sev_snp_supported = sev_snp_enabled && cc_platform_has(CC_ATTR_HOST_SEV_SNP);
3057
3058 out:
3059 if (boot_cpu_has(X86_FEATURE_SEV))
3060 pr_info("SEV %s (ASIDs %u - %u)\n",
3061 sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" :
3062 "unusable" :
3063 "disabled",
3064 min_sev_asid, max_sev_asid);
3065 if (boot_cpu_has(X86_FEATURE_SEV_ES))
3066 pr_info("SEV-ES %s (ASIDs %u - %u)\n",
3067 sev_es_supported ? "enabled" : "disabled",
3068 min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1);
3069 if (boot_cpu_has(X86_FEATURE_SEV_SNP))
3070 pr_info("SEV-SNP %s (ASIDs %u - %u)\n",
3071 sev_snp_supported ? "enabled" : "disabled",
3072 min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1);
3073
3074 sev_enabled = sev_supported;
3075 sev_es_enabled = sev_es_supported;
3076 sev_snp_enabled = sev_snp_supported;
3077
3078 if (!sev_es_enabled || !cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP) ||
3079 !cpu_feature_enabled(X86_FEATURE_NO_NESTED_DATA_BP))
3080 sev_es_debug_swap_enabled = false;
3081
3082 sev_supported_vmsa_features = 0;
3083 if (sev_es_debug_swap_enabled)
3084 sev_supported_vmsa_features |= SVM_SEV_FEAT_DEBUG_SWAP;
3085 }
3086
sev_hardware_unsetup(void)3087 void sev_hardware_unsetup(void)
3088 {
3089 if (!sev_enabled)
3090 return;
3091
3092 /* No need to take sev_bitmap_lock, all VMs have been destroyed. */
3093 sev_flush_asids(1, max_sev_asid);
3094
3095 bitmap_free(sev_asid_bitmap);
3096 bitmap_free(sev_reclaim_asid_bitmap);
3097
3098 misc_cg_set_capacity(MISC_CG_RES_SEV, 0);
3099 misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0);
3100 }
3101
sev_cpu_init(struct svm_cpu_data * sd)3102 int sev_cpu_init(struct svm_cpu_data *sd)
3103 {
3104 if (!sev_enabled)
3105 return 0;
3106
3107 sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL);
3108 if (!sd->sev_vmcbs)
3109 return -ENOMEM;
3110
3111 return 0;
3112 }
3113
3114 /*
3115 * Pages used by hardware to hold guest encrypted state must be flushed before
3116 * returning them to the system.
3117 */
sev_flush_encrypted_page(struct kvm_vcpu * vcpu,void * va)3118 static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
3119 {
3120 unsigned int asid = sev_get_asid(vcpu->kvm);
3121
3122 /*
3123 * Note! The address must be a kernel address, as regular page walk
3124 * checks are performed by VM_PAGE_FLUSH, i.e. operating on a user
3125 * address is non-deterministic and unsafe. This function deliberately
3126 * takes a pointer to deter passing in a user address.
3127 */
3128 unsigned long addr = (unsigned long)va;
3129
3130 /*
3131 * If CPU enforced cache coherency for encrypted mappings of the
3132 * same physical page is supported, use CLFLUSHOPT instead. NOTE: cache
3133 * flush is still needed in order to work properly with DMA devices.
3134 */
3135 if (boot_cpu_has(X86_FEATURE_SME_COHERENT)) {
3136 clflush_cache_range(va, PAGE_SIZE);
3137 return;
3138 }
3139
3140 /*
3141 * VM Page Flush takes a host virtual address and a guest ASID. Fall
3142 * back to WBINVD if this faults so as not to make any problems worse
3143 * by leaving stale encrypted data in the cache.
3144 */
3145 if (WARN_ON_ONCE(wrmsrl_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid)))
3146 goto do_wbinvd;
3147
3148 return;
3149
3150 do_wbinvd:
3151 wbinvd_on_all_cpus();
3152 }
3153
sev_guest_memory_reclaimed(struct kvm * kvm)3154 void sev_guest_memory_reclaimed(struct kvm *kvm)
3155 {
3156 /*
3157 * With SNP+gmem, private/encrypted memory is unreachable via the
3158 * hva-based mmu notifiers, so these events are only actually
3159 * pertaining to shared pages where there is no need to perform
3160 * the WBINVD to flush associated caches.
3161 */
3162 if (!sev_guest(kvm) || sev_snp_guest(kvm))
3163 return;
3164
3165 wbinvd_on_all_cpus();
3166 }
3167
sev_free_vcpu(struct kvm_vcpu * vcpu)3168 void sev_free_vcpu(struct kvm_vcpu *vcpu)
3169 {
3170 struct vcpu_svm *svm;
3171
3172 if (!sev_es_guest(vcpu->kvm))
3173 return;
3174
3175 svm = to_svm(vcpu);
3176
3177 /*
3178 * If it's an SNP guest, then the VMSA was marked in the RMP table as
3179 * a guest-owned page. Transition the page to hypervisor state before
3180 * releasing it back to the system.
3181 */
3182 if (sev_snp_guest(vcpu->kvm)) {
3183 u64 pfn = __pa(svm->sev_es.vmsa) >> PAGE_SHIFT;
3184
3185 if (kvm_rmp_make_shared(vcpu->kvm, pfn, PG_LEVEL_4K))
3186 goto skip_vmsa_free;
3187 }
3188
3189 if (vcpu->arch.guest_state_protected)
3190 sev_flush_encrypted_page(vcpu, svm->sev_es.vmsa);
3191
3192 __free_page(virt_to_page(svm->sev_es.vmsa));
3193
3194 skip_vmsa_free:
3195 if (svm->sev_es.ghcb_sa_free)
3196 kvfree(svm->sev_es.ghcb_sa);
3197 }
3198
dump_ghcb(struct vcpu_svm * svm)3199 static void dump_ghcb(struct vcpu_svm *svm)
3200 {
3201 struct ghcb *ghcb = svm->sev_es.ghcb;
3202 unsigned int nbits;
3203
3204 /* Re-use the dump_invalid_vmcb module parameter */
3205 if (!dump_invalid_vmcb) {
3206 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
3207 return;
3208 }
3209
3210 nbits = sizeof(ghcb->save.valid_bitmap) * 8;
3211
3212 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
3213 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
3214 ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
3215 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
3216 ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
3217 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
3218 ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
3219 pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
3220 ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
3221 pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
3222 }
3223
sev_es_sync_to_ghcb(struct vcpu_svm * svm)3224 static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
3225 {
3226 struct kvm_vcpu *vcpu = &svm->vcpu;
3227 struct ghcb *ghcb = svm->sev_es.ghcb;
3228
3229 /*
3230 * The GHCB protocol so far allows for the following data
3231 * to be returned:
3232 * GPRs RAX, RBX, RCX, RDX
3233 *
3234 * Copy their values, even if they may not have been written during the
3235 * VM-Exit. It's the guest's responsibility to not consume random data.
3236 */
3237 ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
3238 ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
3239 ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
3240 ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
3241 }
3242
sev_es_sync_from_ghcb(struct vcpu_svm * svm)3243 static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
3244 {
3245 struct vmcb_control_area *control = &svm->vmcb->control;
3246 struct kvm_vcpu *vcpu = &svm->vcpu;
3247 struct ghcb *ghcb = svm->sev_es.ghcb;
3248 u64 exit_code;
3249
3250 /*
3251 * The GHCB protocol so far allows for the following data
3252 * to be supplied:
3253 * GPRs RAX, RBX, RCX, RDX
3254 * XCR0
3255 * CPL
3256 *
3257 * VMMCALL allows the guest to provide extra registers. KVM also
3258 * expects RSI for hypercalls, so include that, too.
3259 *
3260 * Copy their values to the appropriate location if supplied.
3261 */
3262 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
3263
3264 BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap));
3265 memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap));
3266
3267 vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb);
3268 vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb);
3269 vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb);
3270 vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb);
3271 vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb);
3272
3273 svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb);
3274
3275 if (kvm_ghcb_xcr0_is_valid(svm)) {
3276 vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
3277 kvm_update_cpuid_runtime(vcpu);
3278 }
3279
3280 /* Copy the GHCB exit information into the VMCB fields */
3281 exit_code = ghcb_get_sw_exit_code(ghcb);
3282 control->exit_code = lower_32_bits(exit_code);
3283 control->exit_code_hi = upper_32_bits(exit_code);
3284 control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
3285 control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
3286 svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb);
3287
3288 /* Clear the valid entries fields */
3289 memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
3290 }
3291
kvm_ghcb_get_sw_exit_code(struct vmcb_control_area * control)3292 static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
3293 {
3294 return (((u64)control->exit_code_hi) << 32) | control->exit_code;
3295 }
3296
sev_es_validate_vmgexit(struct vcpu_svm * svm)3297 static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
3298 {
3299 struct vmcb_control_area *control = &svm->vmcb->control;
3300 struct kvm_vcpu *vcpu = &svm->vcpu;
3301 u64 exit_code;
3302 u64 reason;
3303
3304 /*
3305 * Retrieve the exit code now even though it may not be marked valid
3306 * as it could help with debugging.
3307 */
3308 exit_code = kvm_ghcb_get_sw_exit_code(control);
3309
3310 /* Only GHCB Usage code 0 is supported */
3311 if (svm->sev_es.ghcb->ghcb_usage) {
3312 reason = GHCB_ERR_INVALID_USAGE;
3313 goto vmgexit_err;
3314 }
3315
3316 reason = GHCB_ERR_MISSING_INPUT;
3317
3318 if (!kvm_ghcb_sw_exit_code_is_valid(svm) ||
3319 !kvm_ghcb_sw_exit_info_1_is_valid(svm) ||
3320 !kvm_ghcb_sw_exit_info_2_is_valid(svm))
3321 goto vmgexit_err;
3322
3323 switch (exit_code) {
3324 case SVM_EXIT_READ_DR7:
3325 break;
3326 case SVM_EXIT_WRITE_DR7:
3327 if (!kvm_ghcb_rax_is_valid(svm))
3328 goto vmgexit_err;
3329 break;
3330 case SVM_EXIT_RDTSC:
3331 break;
3332 case SVM_EXIT_RDPMC:
3333 if (!kvm_ghcb_rcx_is_valid(svm))
3334 goto vmgexit_err;
3335 break;
3336 case SVM_EXIT_CPUID:
3337 if (!kvm_ghcb_rax_is_valid(svm) ||
3338 !kvm_ghcb_rcx_is_valid(svm))
3339 goto vmgexit_err;
3340 if (vcpu->arch.regs[VCPU_REGS_RAX] == 0xd)
3341 if (!kvm_ghcb_xcr0_is_valid(svm))
3342 goto vmgexit_err;
3343 break;
3344 case SVM_EXIT_INVD:
3345 break;
3346 case SVM_EXIT_IOIO:
3347 if (control->exit_info_1 & SVM_IOIO_STR_MASK) {
3348 if (!kvm_ghcb_sw_scratch_is_valid(svm))
3349 goto vmgexit_err;
3350 } else {
3351 if (!(control->exit_info_1 & SVM_IOIO_TYPE_MASK))
3352 if (!kvm_ghcb_rax_is_valid(svm))
3353 goto vmgexit_err;
3354 }
3355 break;
3356 case SVM_EXIT_MSR:
3357 if (!kvm_ghcb_rcx_is_valid(svm))
3358 goto vmgexit_err;
3359 if (control->exit_info_1) {
3360 if (!kvm_ghcb_rax_is_valid(svm) ||
3361 !kvm_ghcb_rdx_is_valid(svm))
3362 goto vmgexit_err;
3363 }
3364 break;
3365 case SVM_EXIT_VMMCALL:
3366 if (!kvm_ghcb_rax_is_valid(svm) ||
3367 !kvm_ghcb_cpl_is_valid(svm))
3368 goto vmgexit_err;
3369 break;
3370 case SVM_EXIT_RDTSCP:
3371 break;
3372 case SVM_EXIT_WBINVD:
3373 break;
3374 case SVM_EXIT_MONITOR:
3375 if (!kvm_ghcb_rax_is_valid(svm) ||
3376 !kvm_ghcb_rcx_is_valid(svm) ||
3377 !kvm_ghcb_rdx_is_valid(svm))
3378 goto vmgexit_err;
3379 break;
3380 case SVM_EXIT_MWAIT:
3381 if (!kvm_ghcb_rax_is_valid(svm) ||
3382 !kvm_ghcb_rcx_is_valid(svm))
3383 goto vmgexit_err;
3384 break;
3385 case SVM_VMGEXIT_MMIO_READ:
3386 case SVM_VMGEXIT_MMIO_WRITE:
3387 if (!kvm_ghcb_sw_scratch_is_valid(svm))
3388 goto vmgexit_err;
3389 break;
3390 case SVM_VMGEXIT_AP_CREATION:
3391 if (!sev_snp_guest(vcpu->kvm))
3392 goto vmgexit_err;
3393 if (lower_32_bits(control->exit_info_1) != SVM_VMGEXIT_AP_DESTROY)
3394 if (!kvm_ghcb_rax_is_valid(svm))
3395 goto vmgexit_err;
3396 break;
3397 case SVM_VMGEXIT_NMI_COMPLETE:
3398 case SVM_VMGEXIT_AP_HLT_LOOP:
3399 case SVM_VMGEXIT_AP_JUMP_TABLE:
3400 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
3401 case SVM_VMGEXIT_HV_FEATURES:
3402 case SVM_VMGEXIT_TERM_REQUEST:
3403 break;
3404 case SVM_VMGEXIT_PSC:
3405 if (!sev_snp_guest(vcpu->kvm) || !kvm_ghcb_sw_scratch_is_valid(svm))
3406 goto vmgexit_err;
3407 break;
3408 case SVM_VMGEXIT_GUEST_REQUEST:
3409 case SVM_VMGEXIT_EXT_GUEST_REQUEST:
3410 if (!sev_snp_guest(vcpu->kvm) ||
3411 !PAGE_ALIGNED(control->exit_info_1) ||
3412 !PAGE_ALIGNED(control->exit_info_2) ||
3413 control->exit_info_1 == control->exit_info_2)
3414 goto vmgexit_err;
3415 break;
3416 default:
3417 reason = GHCB_ERR_INVALID_EVENT;
3418 goto vmgexit_err;
3419 }
3420
3421 return 0;
3422
3423 vmgexit_err:
3424 if (reason == GHCB_ERR_INVALID_USAGE) {
3425 vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
3426 svm->sev_es.ghcb->ghcb_usage);
3427 } else if (reason == GHCB_ERR_INVALID_EVENT) {
3428 vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n",
3429 exit_code);
3430 } else {
3431 vcpu_unimpl(vcpu, "vmgexit: exit code %#llx input is not valid\n",
3432 exit_code);
3433 dump_ghcb(svm);
3434 }
3435
3436 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
3437 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, reason);
3438
3439 /* Resume the guest to "return" the error code. */
3440 return 1;
3441 }
3442
sev_es_unmap_ghcb(struct vcpu_svm * svm)3443 void sev_es_unmap_ghcb(struct vcpu_svm *svm)
3444 {
3445 /* Clear any indication that the vCPU is in a type of AP Reset Hold */
3446 svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_NONE;
3447
3448 if (!svm->sev_es.ghcb)
3449 return;
3450
3451 if (svm->sev_es.ghcb_sa_free) {
3452 /*
3453 * The scratch area lives outside the GHCB, so there is a
3454 * buffer that, depending on the operation performed, may
3455 * need to be synced, then freed.
3456 */
3457 if (svm->sev_es.ghcb_sa_sync) {
3458 kvm_write_guest(svm->vcpu.kvm,
3459 svm->sev_es.sw_scratch,
3460 svm->sev_es.ghcb_sa,
3461 svm->sev_es.ghcb_sa_len);
3462 svm->sev_es.ghcb_sa_sync = false;
3463 }
3464
3465 kvfree(svm->sev_es.ghcb_sa);
3466 svm->sev_es.ghcb_sa = NULL;
3467 svm->sev_es.ghcb_sa_free = false;
3468 }
3469
3470 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb);
3471
3472 sev_es_sync_to_ghcb(svm);
3473
3474 kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map, true);
3475 svm->sev_es.ghcb = NULL;
3476 }
3477
pre_sev_run(struct vcpu_svm * svm,int cpu)3478 void pre_sev_run(struct vcpu_svm *svm, int cpu)
3479 {
3480 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
3481 unsigned int asid = sev_get_asid(svm->vcpu.kvm);
3482
3483 /* Assign the asid allocated with this SEV guest */
3484 svm->asid = asid;
3485
3486 /*
3487 * Flush guest TLB:
3488 *
3489 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
3490 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
3491 */
3492 if (sd->sev_vmcbs[asid] == svm->vmcb &&
3493 svm->vcpu.arch.last_vmentry_cpu == cpu)
3494 return;
3495
3496 sd->sev_vmcbs[asid] = svm->vmcb;
3497 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3498 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
3499 }
3500
3501 #define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
setup_vmgexit_scratch(struct vcpu_svm * svm,bool sync,u64 len)3502 static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
3503 {
3504 struct vmcb_control_area *control = &svm->vmcb->control;
3505 u64 ghcb_scratch_beg, ghcb_scratch_end;
3506 u64 scratch_gpa_beg, scratch_gpa_end;
3507 void *scratch_va;
3508
3509 scratch_gpa_beg = svm->sev_es.sw_scratch;
3510 if (!scratch_gpa_beg) {
3511 pr_err("vmgexit: scratch gpa not provided\n");
3512 goto e_scratch;
3513 }
3514
3515 scratch_gpa_end = scratch_gpa_beg + len;
3516 if (scratch_gpa_end < scratch_gpa_beg) {
3517 pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
3518 len, scratch_gpa_beg);
3519 goto e_scratch;
3520 }
3521
3522 if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
3523 /* Scratch area begins within GHCB */
3524 ghcb_scratch_beg = control->ghcb_gpa +
3525 offsetof(struct ghcb, shared_buffer);
3526 ghcb_scratch_end = control->ghcb_gpa +
3527 offsetof(struct ghcb, reserved_0xff0);
3528
3529 /*
3530 * If the scratch area begins within the GHCB, it must be
3531 * completely contained in the GHCB shared buffer area.
3532 */
3533 if (scratch_gpa_beg < ghcb_scratch_beg ||
3534 scratch_gpa_end > ghcb_scratch_end) {
3535 pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
3536 scratch_gpa_beg, scratch_gpa_end);
3537 goto e_scratch;
3538 }
3539
3540 scratch_va = (void *)svm->sev_es.ghcb;
3541 scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
3542 } else {
3543 /*
3544 * The guest memory must be read into a kernel buffer, so
3545 * limit the size
3546 */
3547 if (len > GHCB_SCRATCH_AREA_LIMIT) {
3548 pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
3549 len, GHCB_SCRATCH_AREA_LIMIT);
3550 goto e_scratch;
3551 }
3552 scratch_va = kvzalloc(len, GFP_KERNEL_ACCOUNT);
3553 if (!scratch_va)
3554 return -ENOMEM;
3555
3556 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
3557 /* Unable to copy scratch area from guest */
3558 pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
3559
3560 kvfree(scratch_va);
3561 return -EFAULT;
3562 }
3563
3564 /*
3565 * The scratch area is outside the GHCB. The operation will
3566 * dictate whether the buffer needs to be synced before running
3567 * the vCPU next time (i.e. a read was requested so the data
3568 * must be written back to the guest memory).
3569 */
3570 svm->sev_es.ghcb_sa_sync = sync;
3571 svm->sev_es.ghcb_sa_free = true;
3572 }
3573
3574 svm->sev_es.ghcb_sa = scratch_va;
3575 svm->sev_es.ghcb_sa_len = len;
3576
3577 return 0;
3578
3579 e_scratch:
3580 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
3581 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
3582
3583 return 1;
3584 }
3585
set_ghcb_msr_bits(struct vcpu_svm * svm,u64 value,u64 mask,unsigned int pos)3586 static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
3587 unsigned int pos)
3588 {
3589 svm->vmcb->control.ghcb_gpa &= ~(mask << pos);
3590 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos;
3591 }
3592
get_ghcb_msr_bits(struct vcpu_svm * svm,u64 mask,unsigned int pos)3593 static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
3594 {
3595 return (svm->vmcb->control.ghcb_gpa >> pos) & mask;
3596 }
3597
set_ghcb_msr(struct vcpu_svm * svm,u64 value)3598 static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
3599 {
3600 svm->vmcb->control.ghcb_gpa = value;
3601 }
3602
snp_rmptable_psmash(kvm_pfn_t pfn)3603 static int snp_rmptable_psmash(kvm_pfn_t pfn)
3604 {
3605 int ret;
3606
3607 pfn = pfn & ~(KVM_PAGES_PER_HPAGE(PG_LEVEL_2M) - 1);
3608
3609 /*
3610 * PSMASH_FAIL_INUSE indicates another processor is modifying the
3611 * entry, so retry until that's no longer the case.
3612 */
3613 do {
3614 ret = psmash(pfn);
3615 } while (ret == PSMASH_FAIL_INUSE);
3616
3617 return ret;
3618 }
3619
snp_complete_psc_msr(struct kvm_vcpu * vcpu)3620 static int snp_complete_psc_msr(struct kvm_vcpu *vcpu)
3621 {
3622 struct vcpu_svm *svm = to_svm(vcpu);
3623
3624 if (vcpu->run->hypercall.ret)
3625 set_ghcb_msr(svm, GHCB_MSR_PSC_RESP_ERROR);
3626 else
3627 set_ghcb_msr(svm, GHCB_MSR_PSC_RESP);
3628
3629 return 1; /* resume guest */
3630 }
3631
snp_begin_psc_msr(struct vcpu_svm * svm,u64 ghcb_msr)3632 static int snp_begin_psc_msr(struct vcpu_svm *svm, u64 ghcb_msr)
3633 {
3634 u64 gpa = gfn_to_gpa(GHCB_MSR_PSC_REQ_TO_GFN(ghcb_msr));
3635 u8 op = GHCB_MSR_PSC_REQ_TO_OP(ghcb_msr);
3636 struct kvm_vcpu *vcpu = &svm->vcpu;
3637
3638 if (op != SNP_PAGE_STATE_PRIVATE && op != SNP_PAGE_STATE_SHARED) {
3639 set_ghcb_msr(svm, GHCB_MSR_PSC_RESP_ERROR);
3640 return 1; /* resume guest */
3641 }
3642
3643 if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE))) {
3644 set_ghcb_msr(svm, GHCB_MSR_PSC_RESP_ERROR);
3645 return 1; /* resume guest */
3646 }
3647
3648 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
3649 vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE;
3650 vcpu->run->hypercall.args[0] = gpa;
3651 vcpu->run->hypercall.args[1] = 1;
3652 vcpu->run->hypercall.args[2] = (op == SNP_PAGE_STATE_PRIVATE)
3653 ? KVM_MAP_GPA_RANGE_ENCRYPTED
3654 : KVM_MAP_GPA_RANGE_DECRYPTED;
3655 vcpu->run->hypercall.args[2] |= KVM_MAP_GPA_RANGE_PAGE_SZ_4K;
3656
3657 vcpu->arch.complete_userspace_io = snp_complete_psc_msr;
3658
3659 return 0; /* forward request to userspace */
3660 }
3661
3662 struct psc_buffer {
3663 struct psc_hdr hdr;
3664 struct psc_entry entries[];
3665 } __packed;
3666
3667 static int snp_begin_psc(struct vcpu_svm *svm, struct psc_buffer *psc);
3668
snp_complete_psc(struct vcpu_svm * svm,u64 psc_ret)3669 static void snp_complete_psc(struct vcpu_svm *svm, u64 psc_ret)
3670 {
3671 svm->sev_es.psc_inflight = 0;
3672 svm->sev_es.psc_idx = 0;
3673 svm->sev_es.psc_2m = false;
3674 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, psc_ret);
3675 }
3676
__snp_complete_one_psc(struct vcpu_svm * svm)3677 static void __snp_complete_one_psc(struct vcpu_svm *svm)
3678 {
3679 struct psc_buffer *psc = svm->sev_es.ghcb_sa;
3680 struct psc_entry *entries = psc->entries;
3681 struct psc_hdr *hdr = &psc->hdr;
3682 __u16 idx;
3683
3684 /*
3685 * Everything in-flight has been processed successfully. Update the
3686 * corresponding entries in the guest's PSC buffer and zero out the
3687 * count of in-flight PSC entries.
3688 */
3689 for (idx = svm->sev_es.psc_idx; svm->sev_es.psc_inflight;
3690 svm->sev_es.psc_inflight--, idx++) {
3691 struct psc_entry *entry = &entries[idx];
3692
3693 entry->cur_page = entry->pagesize ? 512 : 1;
3694 }
3695
3696 hdr->cur_entry = idx;
3697 }
3698
snp_complete_one_psc(struct kvm_vcpu * vcpu)3699 static int snp_complete_one_psc(struct kvm_vcpu *vcpu)
3700 {
3701 struct vcpu_svm *svm = to_svm(vcpu);
3702 struct psc_buffer *psc = svm->sev_es.ghcb_sa;
3703
3704 if (vcpu->run->hypercall.ret) {
3705 snp_complete_psc(svm, VMGEXIT_PSC_ERROR_GENERIC);
3706 return 1; /* resume guest */
3707 }
3708
3709 __snp_complete_one_psc(svm);
3710
3711 /* Handle the next range (if any). */
3712 return snp_begin_psc(svm, psc);
3713 }
3714
snp_begin_psc(struct vcpu_svm * svm,struct psc_buffer * psc)3715 static int snp_begin_psc(struct vcpu_svm *svm, struct psc_buffer *psc)
3716 {
3717 struct psc_entry *entries = psc->entries;
3718 struct kvm_vcpu *vcpu = &svm->vcpu;
3719 struct psc_hdr *hdr = &psc->hdr;
3720 struct psc_entry entry_start;
3721 u16 idx, idx_start, idx_end;
3722 int npages;
3723 bool huge;
3724 u64 gfn;
3725
3726 if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE))) {
3727 snp_complete_psc(svm, VMGEXIT_PSC_ERROR_GENERIC);
3728 return 1;
3729 }
3730
3731 next_range:
3732 /* There should be no other PSCs in-flight at this point. */
3733 if (WARN_ON_ONCE(svm->sev_es.psc_inflight)) {
3734 snp_complete_psc(svm, VMGEXIT_PSC_ERROR_GENERIC);
3735 return 1;
3736 }
3737
3738 /*
3739 * The PSC descriptor buffer can be modified by a misbehaved guest after
3740 * validation, so take care to only use validated copies of values used
3741 * for things like array indexing.
3742 */
3743 idx_start = hdr->cur_entry;
3744 idx_end = hdr->end_entry;
3745
3746 if (idx_end >= VMGEXIT_PSC_MAX_COUNT) {
3747 snp_complete_psc(svm, VMGEXIT_PSC_ERROR_INVALID_HDR);
3748 return 1;
3749 }
3750
3751 /* Find the start of the next range which needs processing. */
3752 for (idx = idx_start; idx <= idx_end; idx++, hdr->cur_entry++) {
3753 entry_start = entries[idx];
3754
3755 gfn = entry_start.gfn;
3756 huge = entry_start.pagesize;
3757 npages = huge ? 512 : 1;
3758
3759 if (entry_start.cur_page > npages || !IS_ALIGNED(gfn, npages)) {
3760 snp_complete_psc(svm, VMGEXIT_PSC_ERROR_INVALID_ENTRY);
3761 return 1;
3762 }
3763
3764 if (entry_start.cur_page) {
3765 /*
3766 * If this is a partially-completed 2M range, force 4K handling
3767 * for the remaining pages since they're effectively split at
3768 * this point. Subsequent code should ensure this doesn't get
3769 * combined with adjacent PSC entries where 2M handling is still
3770 * possible.
3771 */
3772 npages -= entry_start.cur_page;
3773 gfn += entry_start.cur_page;
3774 huge = false;
3775 }
3776
3777 if (npages)
3778 break;
3779 }
3780
3781 if (idx > idx_end) {
3782 /* Nothing more to process. */
3783 snp_complete_psc(svm, 0);
3784 return 1;
3785 }
3786
3787 svm->sev_es.psc_2m = huge;
3788 svm->sev_es.psc_idx = idx;
3789 svm->sev_es.psc_inflight = 1;
3790
3791 /*
3792 * Find all subsequent PSC entries that contain adjacent GPA
3793 * ranges/operations and can be combined into a single
3794 * KVM_HC_MAP_GPA_RANGE exit.
3795 */
3796 while (++idx <= idx_end) {
3797 struct psc_entry entry = entries[idx];
3798
3799 if (entry.operation != entry_start.operation ||
3800 entry.gfn != entry_start.gfn + npages ||
3801 entry.cur_page || !!entry.pagesize != huge)
3802 break;
3803
3804 svm->sev_es.psc_inflight++;
3805 npages += huge ? 512 : 1;
3806 }
3807
3808 switch (entry_start.operation) {
3809 case VMGEXIT_PSC_OP_PRIVATE:
3810 case VMGEXIT_PSC_OP_SHARED:
3811 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
3812 vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE;
3813 vcpu->run->hypercall.args[0] = gfn_to_gpa(gfn);
3814 vcpu->run->hypercall.args[1] = npages;
3815 vcpu->run->hypercall.args[2] = entry_start.operation == VMGEXIT_PSC_OP_PRIVATE
3816 ? KVM_MAP_GPA_RANGE_ENCRYPTED
3817 : KVM_MAP_GPA_RANGE_DECRYPTED;
3818 vcpu->run->hypercall.args[2] |= entry_start.pagesize
3819 ? KVM_MAP_GPA_RANGE_PAGE_SZ_2M
3820 : KVM_MAP_GPA_RANGE_PAGE_SZ_4K;
3821 vcpu->arch.complete_userspace_io = snp_complete_one_psc;
3822 return 0; /* forward request to userspace */
3823 default:
3824 /*
3825 * Only shared/private PSC operations are currently supported, so if the
3826 * entire range consists of unsupported operations (e.g. SMASH/UNSMASH),
3827 * then consider the entire range completed and avoid exiting to
3828 * userspace. In theory snp_complete_psc() can always be called directly
3829 * at this point to complete the current range and start the next one,
3830 * but that could lead to unexpected levels of recursion.
3831 */
3832 __snp_complete_one_psc(svm);
3833 goto next_range;
3834 }
3835
3836 unreachable();
3837 }
3838
__sev_snp_update_protected_guest_state(struct kvm_vcpu * vcpu)3839 static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu)
3840 {
3841 struct vcpu_svm *svm = to_svm(vcpu);
3842
3843 WARN_ON(!mutex_is_locked(&svm->sev_es.snp_vmsa_mutex));
3844
3845 /* Mark the vCPU as offline and not runnable */
3846 vcpu->arch.pv.pv_unhalted = false;
3847 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
3848
3849 /* Clear use of the VMSA */
3850 svm->vmcb->control.vmsa_pa = INVALID_PAGE;
3851
3852 if (VALID_PAGE(svm->sev_es.snp_vmsa_gpa)) {
3853 gfn_t gfn = gpa_to_gfn(svm->sev_es.snp_vmsa_gpa);
3854 struct kvm_memory_slot *slot;
3855 kvm_pfn_t pfn;
3856
3857 slot = gfn_to_memslot(vcpu->kvm, gfn);
3858 if (!slot)
3859 return -EINVAL;
3860
3861 /*
3862 * The new VMSA will be private memory guest memory, so
3863 * retrieve the PFN from the gmem backend.
3864 */
3865 if (kvm_gmem_get_pfn(vcpu->kvm, slot, gfn, &pfn, NULL))
3866 return -EINVAL;
3867
3868 /*
3869 * From this point forward, the VMSA will always be a
3870 * guest-mapped page rather than the initial one allocated
3871 * by KVM in svm->sev_es.vmsa. In theory, svm->sev_es.vmsa
3872 * could be free'd and cleaned up here, but that involves
3873 * cleanups like wbinvd_on_all_cpus() which would ideally
3874 * be handled during teardown rather than guest boot.
3875 * Deferring that also allows the existing logic for SEV-ES
3876 * VMSAs to be re-used with minimal SNP-specific changes.
3877 */
3878 svm->sev_es.snp_has_guest_vmsa = true;
3879
3880 /* Use the new VMSA */
3881 svm->vmcb->control.vmsa_pa = pfn_to_hpa(pfn);
3882
3883 /* Mark the vCPU as runnable */
3884 vcpu->arch.pv.pv_unhalted = false;
3885 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3886
3887 svm->sev_es.snp_vmsa_gpa = INVALID_PAGE;
3888
3889 /*
3890 * gmem pages aren't currently migratable, but if this ever
3891 * changes then care should be taken to ensure
3892 * svm->sev_es.vmsa is pinned through some other means.
3893 */
3894 kvm_release_pfn_clean(pfn);
3895 }
3896
3897 /*
3898 * When replacing the VMSA during SEV-SNP AP creation,
3899 * mark the VMCB dirty so that full state is always reloaded.
3900 */
3901 vmcb_mark_all_dirty(svm->vmcb);
3902
3903 return 0;
3904 }
3905
3906 /*
3907 * Invoked as part of svm_vcpu_reset() processing of an init event.
3908 */
sev_snp_init_protected_guest_state(struct kvm_vcpu * vcpu)3909 void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu)
3910 {
3911 struct vcpu_svm *svm = to_svm(vcpu);
3912 int ret;
3913
3914 if (!sev_snp_guest(vcpu->kvm))
3915 return;
3916
3917 mutex_lock(&svm->sev_es.snp_vmsa_mutex);
3918
3919 if (!svm->sev_es.snp_ap_waiting_for_reset)
3920 goto unlock;
3921
3922 svm->sev_es.snp_ap_waiting_for_reset = false;
3923
3924 ret = __sev_snp_update_protected_guest_state(vcpu);
3925 if (ret)
3926 vcpu_unimpl(vcpu, "snp: AP state update on init failed\n");
3927
3928 unlock:
3929 mutex_unlock(&svm->sev_es.snp_vmsa_mutex);
3930 }
3931
sev_snp_ap_creation(struct vcpu_svm * svm)3932 static int sev_snp_ap_creation(struct vcpu_svm *svm)
3933 {
3934 struct kvm_sev_info *sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
3935 struct kvm_vcpu *vcpu = &svm->vcpu;
3936 struct kvm_vcpu *target_vcpu;
3937 struct vcpu_svm *target_svm;
3938 unsigned int request;
3939 unsigned int apic_id;
3940 bool kick;
3941 int ret;
3942
3943 request = lower_32_bits(svm->vmcb->control.exit_info_1);
3944 apic_id = upper_32_bits(svm->vmcb->control.exit_info_1);
3945
3946 /* Validate the APIC ID */
3947 target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, apic_id);
3948 if (!target_vcpu) {
3949 vcpu_unimpl(vcpu, "vmgexit: invalid AP APIC ID [%#x] from guest\n",
3950 apic_id);
3951 return -EINVAL;
3952 }
3953
3954 ret = 0;
3955
3956 target_svm = to_svm(target_vcpu);
3957
3958 /*
3959 * The target vCPU is valid, so the vCPU will be kicked unless the
3960 * request is for CREATE_ON_INIT. For any errors at this stage, the
3961 * kick will place the vCPU in an non-runnable state.
3962 */
3963 kick = true;
3964
3965 mutex_lock(&target_svm->sev_es.snp_vmsa_mutex);
3966
3967 target_svm->sev_es.snp_vmsa_gpa = INVALID_PAGE;
3968 target_svm->sev_es.snp_ap_waiting_for_reset = true;
3969
3970 /* Interrupt injection mode shouldn't change for AP creation */
3971 if (request < SVM_VMGEXIT_AP_DESTROY) {
3972 u64 sev_features;
3973
3974 sev_features = vcpu->arch.regs[VCPU_REGS_RAX];
3975 sev_features ^= sev->vmsa_features;
3976
3977 if (sev_features & SVM_SEV_FEAT_INT_INJ_MODES) {
3978 vcpu_unimpl(vcpu, "vmgexit: invalid AP injection mode [%#lx] from guest\n",
3979 vcpu->arch.regs[VCPU_REGS_RAX]);
3980 ret = -EINVAL;
3981 goto out;
3982 }
3983 }
3984
3985 switch (request) {
3986 case SVM_VMGEXIT_AP_CREATE_ON_INIT:
3987 kick = false;
3988 fallthrough;
3989 case SVM_VMGEXIT_AP_CREATE:
3990 if (!page_address_valid(vcpu, svm->vmcb->control.exit_info_2)) {
3991 vcpu_unimpl(vcpu, "vmgexit: invalid AP VMSA address [%#llx] from guest\n",
3992 svm->vmcb->control.exit_info_2);
3993 ret = -EINVAL;
3994 goto out;
3995 }
3996
3997 /*
3998 * Malicious guest can RMPADJUST a large page into VMSA which
3999 * will hit the SNP erratum where the CPU will incorrectly signal
4000 * an RMP violation #PF if a hugepage collides with the RMP entry
4001 * of VMSA page, reject the AP CREATE request if VMSA address from
4002 * guest is 2M aligned.
4003 */
4004 if (IS_ALIGNED(svm->vmcb->control.exit_info_2, PMD_SIZE)) {
4005 vcpu_unimpl(vcpu,
4006 "vmgexit: AP VMSA address [%llx] from guest is unsafe as it is 2M aligned\n",
4007 svm->vmcb->control.exit_info_2);
4008 ret = -EINVAL;
4009 goto out;
4010 }
4011
4012 target_svm->sev_es.snp_vmsa_gpa = svm->vmcb->control.exit_info_2;
4013 break;
4014 case SVM_VMGEXIT_AP_DESTROY:
4015 break;
4016 default:
4017 vcpu_unimpl(vcpu, "vmgexit: invalid AP creation request [%#x] from guest\n",
4018 request);
4019 ret = -EINVAL;
4020 break;
4021 }
4022
4023 out:
4024 if (kick) {
4025 kvm_make_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, target_vcpu);
4026 kvm_vcpu_kick(target_vcpu);
4027 }
4028
4029 mutex_unlock(&target_svm->sev_es.snp_vmsa_mutex);
4030
4031 return ret;
4032 }
4033
snp_handle_guest_req(struct vcpu_svm * svm,gpa_t req_gpa,gpa_t resp_gpa)4034 static int snp_handle_guest_req(struct vcpu_svm *svm, gpa_t req_gpa, gpa_t resp_gpa)
4035 {
4036 struct sev_data_snp_guest_request data = {0};
4037 struct kvm *kvm = svm->vcpu.kvm;
4038 struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
4039 sev_ret_code fw_err = 0;
4040 int ret;
4041
4042 if (!sev_snp_guest(kvm))
4043 return -EINVAL;
4044
4045 mutex_lock(&sev->guest_req_mutex);
4046
4047 if (kvm_read_guest(kvm, req_gpa, sev->guest_req_buf, PAGE_SIZE)) {
4048 ret = -EIO;
4049 goto out_unlock;
4050 }
4051
4052 data.gctx_paddr = __psp_pa(sev->snp_context);
4053 data.req_paddr = __psp_pa(sev->guest_req_buf);
4054 data.res_paddr = __psp_pa(sev->guest_resp_buf);
4055
4056 /*
4057 * Firmware failures are propagated on to guest, but any other failure
4058 * condition along the way should be reported to userspace. E.g. if
4059 * the PSP is dead and commands are timing out.
4060 */
4061 ret = sev_issue_cmd(kvm, SEV_CMD_SNP_GUEST_REQUEST, &data, &fw_err);
4062 if (ret && !fw_err)
4063 goto out_unlock;
4064
4065 if (kvm_write_guest(kvm, resp_gpa, sev->guest_resp_buf, PAGE_SIZE)) {
4066 ret = -EIO;
4067 goto out_unlock;
4068 }
4069
4070 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, SNP_GUEST_ERR(0, fw_err));
4071
4072 ret = 1; /* resume guest */
4073
4074 out_unlock:
4075 mutex_unlock(&sev->guest_req_mutex);
4076 return ret;
4077 }
4078
snp_handle_ext_guest_req(struct vcpu_svm * svm,gpa_t req_gpa,gpa_t resp_gpa)4079 static int snp_handle_ext_guest_req(struct vcpu_svm *svm, gpa_t req_gpa, gpa_t resp_gpa)
4080 {
4081 struct kvm *kvm = svm->vcpu.kvm;
4082 u8 msg_type;
4083
4084 if (!sev_snp_guest(kvm))
4085 return -EINVAL;
4086
4087 if (kvm_read_guest(kvm, req_gpa + offsetof(struct snp_guest_msg_hdr, msg_type),
4088 &msg_type, 1))
4089 return -EIO;
4090
4091 /*
4092 * As per GHCB spec, requests of type MSG_REPORT_REQ also allow for
4093 * additional certificate data to be provided alongside the attestation
4094 * report via the guest-provided data pages indicated by RAX/RBX. The
4095 * certificate data is optional and requires additional KVM enablement
4096 * to provide an interface for userspace to provide it, but KVM still
4097 * needs to be able to handle extended guest requests either way. So
4098 * provide a stub implementation that will always return an empty
4099 * certificate table in the guest-provided data pages.
4100 */
4101 if (msg_type == SNP_MSG_REPORT_REQ) {
4102 struct kvm_vcpu *vcpu = &svm->vcpu;
4103 u64 data_npages;
4104 gpa_t data_gpa;
4105
4106 if (!kvm_ghcb_rax_is_valid(svm) || !kvm_ghcb_rbx_is_valid(svm))
4107 goto request_invalid;
4108
4109 data_gpa = vcpu->arch.regs[VCPU_REGS_RAX];
4110 data_npages = vcpu->arch.regs[VCPU_REGS_RBX];
4111
4112 if (!PAGE_ALIGNED(data_gpa))
4113 goto request_invalid;
4114
4115 /*
4116 * As per GHCB spec (see "SNP Extended Guest Request"), the
4117 * certificate table is terminated by 24-bytes of zeroes.
4118 */
4119 if (data_npages && kvm_clear_guest(kvm, data_gpa, 24))
4120 return -EIO;
4121 }
4122
4123 return snp_handle_guest_req(svm, req_gpa, resp_gpa);
4124
4125 request_invalid:
4126 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
4127 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT);
4128 return 1; /* resume guest */
4129 }
4130
sev_handle_vmgexit_msr_protocol(struct vcpu_svm * svm)4131 static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
4132 {
4133 struct vmcb_control_area *control = &svm->vmcb->control;
4134 struct kvm_vcpu *vcpu = &svm->vcpu;
4135 struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
4136 u64 ghcb_info;
4137 int ret = 1;
4138
4139 ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK;
4140
4141 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id,
4142 control->ghcb_gpa);
4143
4144 switch (ghcb_info) {
4145 case GHCB_MSR_SEV_INFO_REQ:
4146 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO((__u64)sev->ghcb_version,
4147 GHCB_VERSION_MIN,
4148 sev_enc_bit));
4149 break;
4150 case GHCB_MSR_CPUID_REQ: {
4151 u64 cpuid_fn, cpuid_reg, cpuid_value;
4152
4153 cpuid_fn = get_ghcb_msr_bits(svm,
4154 GHCB_MSR_CPUID_FUNC_MASK,
4155 GHCB_MSR_CPUID_FUNC_POS);
4156
4157 /* Initialize the registers needed by the CPUID intercept */
4158 vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
4159 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
4160
4161 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
4162 if (!ret) {
4163 /* Error, keep GHCB MSR value as-is */
4164 break;
4165 }
4166
4167 cpuid_reg = get_ghcb_msr_bits(svm,
4168 GHCB_MSR_CPUID_REG_MASK,
4169 GHCB_MSR_CPUID_REG_POS);
4170 if (cpuid_reg == 0)
4171 cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX];
4172 else if (cpuid_reg == 1)
4173 cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX];
4174 else if (cpuid_reg == 2)
4175 cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX];
4176 else
4177 cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX];
4178
4179 set_ghcb_msr_bits(svm, cpuid_value,
4180 GHCB_MSR_CPUID_VALUE_MASK,
4181 GHCB_MSR_CPUID_VALUE_POS);
4182
4183 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP,
4184 GHCB_MSR_INFO_MASK,
4185 GHCB_MSR_INFO_POS);
4186 break;
4187 }
4188 case GHCB_MSR_AP_RESET_HOLD_REQ:
4189 svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_MSR_PROTO;
4190 ret = kvm_emulate_ap_reset_hold(&svm->vcpu);
4191
4192 /*
4193 * Preset the result to a non-SIPI return and then only set
4194 * the result to non-zero when delivering a SIPI.
4195 */
4196 set_ghcb_msr_bits(svm, 0,
4197 GHCB_MSR_AP_RESET_HOLD_RESULT_MASK,
4198 GHCB_MSR_AP_RESET_HOLD_RESULT_POS);
4199
4200 set_ghcb_msr_bits(svm, GHCB_MSR_AP_RESET_HOLD_RESP,
4201 GHCB_MSR_INFO_MASK,
4202 GHCB_MSR_INFO_POS);
4203 break;
4204 case GHCB_MSR_HV_FT_REQ:
4205 set_ghcb_msr_bits(svm, GHCB_HV_FT_SUPPORTED,
4206 GHCB_MSR_HV_FT_MASK, GHCB_MSR_HV_FT_POS);
4207 set_ghcb_msr_bits(svm, GHCB_MSR_HV_FT_RESP,
4208 GHCB_MSR_INFO_MASK, GHCB_MSR_INFO_POS);
4209 break;
4210 case GHCB_MSR_PREF_GPA_REQ:
4211 if (!sev_snp_guest(vcpu->kvm))
4212 goto out_terminate;
4213
4214 set_ghcb_msr_bits(svm, GHCB_MSR_PREF_GPA_NONE, GHCB_MSR_GPA_VALUE_MASK,
4215 GHCB_MSR_GPA_VALUE_POS);
4216 set_ghcb_msr_bits(svm, GHCB_MSR_PREF_GPA_RESP, GHCB_MSR_INFO_MASK,
4217 GHCB_MSR_INFO_POS);
4218 break;
4219 case GHCB_MSR_REG_GPA_REQ: {
4220 u64 gfn;
4221
4222 if (!sev_snp_guest(vcpu->kvm))
4223 goto out_terminate;
4224
4225 gfn = get_ghcb_msr_bits(svm, GHCB_MSR_GPA_VALUE_MASK,
4226 GHCB_MSR_GPA_VALUE_POS);
4227
4228 svm->sev_es.ghcb_registered_gpa = gfn_to_gpa(gfn);
4229
4230 set_ghcb_msr_bits(svm, gfn, GHCB_MSR_GPA_VALUE_MASK,
4231 GHCB_MSR_GPA_VALUE_POS);
4232 set_ghcb_msr_bits(svm, GHCB_MSR_REG_GPA_RESP, GHCB_MSR_INFO_MASK,
4233 GHCB_MSR_INFO_POS);
4234 break;
4235 }
4236 case GHCB_MSR_PSC_REQ:
4237 if (!sev_snp_guest(vcpu->kvm))
4238 goto out_terminate;
4239
4240 ret = snp_begin_psc_msr(svm, control->ghcb_gpa);
4241 break;
4242 case GHCB_MSR_TERM_REQ: {
4243 u64 reason_set, reason_code;
4244
4245 reason_set = get_ghcb_msr_bits(svm,
4246 GHCB_MSR_TERM_REASON_SET_MASK,
4247 GHCB_MSR_TERM_REASON_SET_POS);
4248 reason_code = get_ghcb_msr_bits(svm,
4249 GHCB_MSR_TERM_REASON_MASK,
4250 GHCB_MSR_TERM_REASON_POS);
4251 pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
4252 reason_set, reason_code);
4253
4254 goto out_terminate;
4255 }
4256 default:
4257 /* Error, keep GHCB MSR value as-is */
4258 break;
4259 }
4260
4261 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
4262 control->ghcb_gpa, ret);
4263
4264 return ret;
4265
4266 out_terminate:
4267 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
4268 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SEV_TERM;
4269 vcpu->run->system_event.ndata = 1;
4270 vcpu->run->system_event.data[0] = control->ghcb_gpa;
4271
4272 return 0;
4273 }
4274
sev_handle_vmgexit(struct kvm_vcpu * vcpu)4275 int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
4276 {
4277 struct vcpu_svm *svm = to_svm(vcpu);
4278 struct vmcb_control_area *control = &svm->vmcb->control;
4279 u64 ghcb_gpa, exit_code;
4280 int ret;
4281
4282 /* Validate the GHCB */
4283 ghcb_gpa = control->ghcb_gpa;
4284 if (ghcb_gpa & GHCB_MSR_INFO_MASK)
4285 return sev_handle_vmgexit_msr_protocol(svm);
4286
4287 if (!ghcb_gpa) {
4288 vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
4289
4290 /* Without a GHCB, just return right back to the guest */
4291 return 1;
4292 }
4293
4294 if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
4295 /* Unable to map GHCB from guest */
4296 vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
4297 ghcb_gpa);
4298
4299 /* Without a GHCB, just return right back to the guest */
4300 return 1;
4301 }
4302
4303 svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
4304
4305 trace_kvm_vmgexit_enter(vcpu->vcpu_id, svm->sev_es.ghcb);
4306
4307 sev_es_sync_from_ghcb(svm);
4308
4309 /* SEV-SNP guest requires that the GHCB GPA must be registered */
4310 if (sev_snp_guest(svm->vcpu.kvm) && !ghcb_gpa_is_registered(svm, ghcb_gpa)) {
4311 vcpu_unimpl(&svm->vcpu, "vmgexit: GHCB GPA [%#llx] is not registered.\n", ghcb_gpa);
4312 return -EINVAL;
4313 }
4314
4315 ret = sev_es_validate_vmgexit(svm);
4316 if (ret)
4317 return ret;
4318
4319 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 0);
4320 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 0);
4321
4322 exit_code = kvm_ghcb_get_sw_exit_code(control);
4323 switch (exit_code) {
4324 case SVM_VMGEXIT_MMIO_READ:
4325 ret = setup_vmgexit_scratch(svm, true, control->exit_info_2);
4326 if (ret)
4327 break;
4328
4329 ret = kvm_sev_es_mmio_read(vcpu,
4330 control->exit_info_1,
4331 control->exit_info_2,
4332 svm->sev_es.ghcb_sa);
4333 break;
4334 case SVM_VMGEXIT_MMIO_WRITE:
4335 ret = setup_vmgexit_scratch(svm, false, control->exit_info_2);
4336 if (ret)
4337 break;
4338
4339 ret = kvm_sev_es_mmio_write(vcpu,
4340 control->exit_info_1,
4341 control->exit_info_2,
4342 svm->sev_es.ghcb_sa);
4343 break;
4344 case SVM_VMGEXIT_NMI_COMPLETE:
4345 ++vcpu->stat.nmi_window_exits;
4346 svm->nmi_masked = false;
4347 kvm_make_request(KVM_REQ_EVENT, vcpu);
4348 ret = 1;
4349 break;
4350 case SVM_VMGEXIT_AP_HLT_LOOP:
4351 svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_NAE_EVENT;
4352 ret = kvm_emulate_ap_reset_hold(vcpu);
4353 break;
4354 case SVM_VMGEXIT_AP_JUMP_TABLE: {
4355 struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
4356
4357 switch (control->exit_info_1) {
4358 case 0:
4359 /* Set AP jump table address */
4360 sev->ap_jump_table = control->exit_info_2;
4361 break;
4362 case 1:
4363 /* Get AP jump table address */
4364 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, sev->ap_jump_table);
4365 break;
4366 default:
4367 pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
4368 control->exit_info_1);
4369 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
4370 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT);
4371 }
4372
4373 ret = 1;
4374 break;
4375 }
4376 case SVM_VMGEXIT_HV_FEATURES:
4377 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_HV_FT_SUPPORTED);
4378
4379 ret = 1;
4380 break;
4381 case SVM_VMGEXIT_TERM_REQUEST:
4382 pr_info("SEV-ES guest requested termination: reason %#llx info %#llx\n",
4383 control->exit_info_1, control->exit_info_2);
4384 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
4385 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SEV_TERM;
4386 vcpu->run->system_event.ndata = 1;
4387 vcpu->run->system_event.data[0] = control->ghcb_gpa;
4388 break;
4389 case SVM_VMGEXIT_PSC:
4390 ret = setup_vmgexit_scratch(svm, true, control->exit_info_2);
4391 if (ret)
4392 break;
4393
4394 ret = snp_begin_psc(svm, svm->sev_es.ghcb_sa);
4395 break;
4396 case SVM_VMGEXIT_AP_CREATION:
4397 ret = sev_snp_ap_creation(svm);
4398 if (ret) {
4399 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
4400 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT);
4401 }
4402
4403 ret = 1;
4404 break;
4405 case SVM_VMGEXIT_GUEST_REQUEST:
4406 ret = snp_handle_guest_req(svm, control->exit_info_1, control->exit_info_2);
4407 break;
4408 case SVM_VMGEXIT_EXT_GUEST_REQUEST:
4409 ret = snp_handle_ext_guest_req(svm, control->exit_info_1, control->exit_info_2);
4410 break;
4411 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
4412 vcpu_unimpl(vcpu,
4413 "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
4414 control->exit_info_1, control->exit_info_2);
4415 ret = -EINVAL;
4416 break;
4417 default:
4418 ret = svm_invoke_exit_handler(vcpu, exit_code);
4419 }
4420
4421 return ret;
4422 }
4423
sev_es_string_io(struct vcpu_svm * svm,int size,unsigned int port,int in)4424 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
4425 {
4426 int count;
4427 int bytes;
4428 int r;
4429
4430 if (svm->vmcb->control.exit_info_2 > INT_MAX)
4431 return -EINVAL;
4432
4433 count = svm->vmcb->control.exit_info_2;
4434 if (unlikely(check_mul_overflow(count, size, &bytes)))
4435 return -EINVAL;
4436
4437 r = setup_vmgexit_scratch(svm, in, bytes);
4438 if (r)
4439 return r;
4440
4441 return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
4442 count, in);
4443 }
4444
sev_es_vcpu_after_set_cpuid(struct vcpu_svm * svm)4445 static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
4446 {
4447 struct kvm_vcpu *vcpu = &svm->vcpu;
4448
4449 if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
4450 bool v_tsc_aux = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
4451 guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
4452
4453 set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux);
4454 }
4455
4456 /*
4457 * For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if
4458 * the host/guest supports its use.
4459 *
4460 * guest_can_use() checks a number of requirements on the host/guest to
4461 * ensure that MSR_IA32_XSS is available, but it might report true even
4462 * if X86_FEATURE_XSAVES isn't configured in the guest to ensure host
4463 * MSR_IA32_XSS is always properly restored. For SEV-ES, it is better
4464 * to further check that the guest CPUID actually supports
4465 * X86_FEATURE_XSAVES so that accesses to MSR_IA32_XSS by misbehaved
4466 * guests will still get intercepted and caught in the normal
4467 * kvm_emulate_rdmsr()/kvm_emulated_wrmsr() paths.
4468 */
4469 if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
4470 guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
4471 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 1, 1);
4472 else
4473 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 0, 0);
4474 }
4475
sev_vcpu_after_set_cpuid(struct vcpu_svm * svm)4476 void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
4477 {
4478 struct kvm_vcpu *vcpu = &svm->vcpu;
4479 struct kvm_cpuid_entry2 *best;
4480
4481 /* For sev guests, the memory encryption bit is not reserved in CR3. */
4482 best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
4483 if (best)
4484 vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
4485
4486 if (sev_es_guest(svm->vcpu.kvm))
4487 sev_es_vcpu_after_set_cpuid(svm);
4488 }
4489
sev_es_init_vmcb(struct vcpu_svm * svm)4490 static void sev_es_init_vmcb(struct vcpu_svm *svm)
4491 {
4492 struct vmcb *vmcb = svm->vmcb01.ptr;
4493 struct kvm_vcpu *vcpu = &svm->vcpu;
4494
4495 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
4496
4497 /*
4498 * An SEV-ES guest requires a VMSA area that is a separate from the
4499 * VMCB page. Do not include the encryption mask on the VMSA physical
4500 * address since hardware will access it using the guest key. Note,
4501 * the VMSA will be NULL if this vCPU is the destination for intrahost
4502 * migration, and will be copied later.
4503 */
4504 if (svm->sev_es.vmsa && !svm->sev_es.snp_has_guest_vmsa)
4505 svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
4506
4507 /* Can't intercept CR register access, HV can't modify CR registers */
4508 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
4509 svm_clr_intercept(svm, INTERCEPT_CR4_READ);
4510 svm_clr_intercept(svm, INTERCEPT_CR8_READ);
4511 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
4512 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE);
4513 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
4514
4515 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0);
4516
4517 /* Track EFER/CR register changes */
4518 svm_set_intercept(svm, TRAP_EFER_WRITE);
4519 svm_set_intercept(svm, TRAP_CR0_WRITE);
4520 svm_set_intercept(svm, TRAP_CR4_WRITE);
4521 svm_set_intercept(svm, TRAP_CR8_WRITE);
4522
4523 vmcb->control.intercepts[INTERCEPT_DR] = 0;
4524 if (!sev_vcpu_has_debug_swap(svm)) {
4525 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
4526 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
4527 recalc_intercepts(svm);
4528 } else {
4529 /*
4530 * Disable #DB intercept iff DebugSwap is enabled. KVM doesn't
4531 * allow debugging SEV-ES guests, and enables DebugSwap iff
4532 * NO_NESTED_DATA_BP is supported, so there's no reason to
4533 * intercept #DB when DebugSwap is enabled. For simplicity
4534 * with respect to guest debug, intercept #DB for other VMs
4535 * even if NO_NESTED_DATA_BP is supported, i.e. even if the
4536 * guest can't DoS the CPU with infinite #DB vectoring.
4537 */
4538 clr_exception_intercept(svm, DB_VECTOR);
4539 }
4540
4541 /* Can't intercept XSETBV, HV can't modify XCR0 directly */
4542 svm_clr_intercept(svm, INTERCEPT_XSETBV);
4543
4544 /* Clear intercepts on selected MSRs */
4545 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
4546 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
4547 }
4548
sev_init_vmcb(struct vcpu_svm * svm)4549 void sev_init_vmcb(struct vcpu_svm *svm)
4550 {
4551 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
4552 clr_exception_intercept(svm, UD_VECTOR);
4553
4554 /*
4555 * Don't intercept #GP for SEV guests, e.g. for the VMware backdoor, as
4556 * KVM can't decrypt guest memory to decode the faulting instruction.
4557 */
4558 clr_exception_intercept(svm, GP_VECTOR);
4559
4560 if (sev_es_guest(svm->vcpu.kvm))
4561 sev_es_init_vmcb(svm);
4562 }
4563
sev_es_vcpu_reset(struct vcpu_svm * svm)4564 void sev_es_vcpu_reset(struct vcpu_svm *svm)
4565 {
4566 struct kvm_vcpu *vcpu = &svm->vcpu;
4567 struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
4568
4569 /*
4570 * Set the GHCB MSR value as per the GHCB specification when emulating
4571 * vCPU RESET for an SEV-ES guest.
4572 */
4573 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO((__u64)sev->ghcb_version,
4574 GHCB_VERSION_MIN,
4575 sev_enc_bit));
4576
4577 mutex_init(&svm->sev_es.snp_vmsa_mutex);
4578 }
4579
sev_es_prepare_switch_to_guest(struct vcpu_svm * svm,struct sev_es_save_area * hostsa)4580 void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa)
4581 {
4582 /*
4583 * All host state for SEV-ES guests is categorized into three swap types
4584 * based on how it is handled by hardware during a world switch:
4585 *
4586 * A: VMRUN: Host state saved in host save area
4587 * VMEXIT: Host state loaded from host save area
4588 *
4589 * B: VMRUN: Host state _NOT_ saved in host save area
4590 * VMEXIT: Host state loaded from host save area
4591 *
4592 * C: VMRUN: Host state _NOT_ saved in host save area
4593 * VMEXIT: Host state initialized to default(reset) values
4594 *
4595 * Manually save type-B state, i.e. state that is loaded by VMEXIT but
4596 * isn't saved by VMRUN, that isn't already saved by VMSAVE (performed
4597 * by common SVM code).
4598 */
4599 hostsa->xcr0 = kvm_host.xcr0;
4600 hostsa->pkru = read_pkru();
4601 hostsa->xss = kvm_host.xss;
4602
4603 /*
4604 * If DebugSwap is enabled, debug registers are loaded but NOT saved by
4605 * the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU both
4606 * saves and loads debug registers (Type-A).
4607 */
4608 if (sev_vcpu_has_debug_swap(svm)) {
4609 hostsa->dr0 = native_get_debugreg(0);
4610 hostsa->dr1 = native_get_debugreg(1);
4611 hostsa->dr2 = native_get_debugreg(2);
4612 hostsa->dr3 = native_get_debugreg(3);
4613 hostsa->dr0_addr_mask = amd_get_dr_addr_mask(0);
4614 hostsa->dr1_addr_mask = amd_get_dr_addr_mask(1);
4615 hostsa->dr2_addr_mask = amd_get_dr_addr_mask(2);
4616 hostsa->dr3_addr_mask = amd_get_dr_addr_mask(3);
4617 }
4618 }
4619
sev_vcpu_deliver_sipi_vector(struct kvm_vcpu * vcpu,u8 vector)4620 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
4621 {
4622 struct vcpu_svm *svm = to_svm(vcpu);
4623
4624 /* First SIPI: Use the values as initially set by the VMM */
4625 if (!svm->sev_es.received_first_sipi) {
4626 svm->sev_es.received_first_sipi = true;
4627 return;
4628 }
4629
4630 /* Subsequent SIPI */
4631 switch (svm->sev_es.ap_reset_hold_type) {
4632 case AP_RESET_HOLD_NAE_EVENT:
4633 /*
4634 * Return from an AP Reset Hold VMGEXIT, where the guest will
4635 * set the CS and RIP. Set SW_EXIT_INFO_2 to a non-zero value.
4636 */
4637 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
4638 break;
4639 case AP_RESET_HOLD_MSR_PROTO:
4640 /*
4641 * Return from an AP Reset Hold VMGEXIT, where the guest will
4642 * set the CS and RIP. Set GHCB data field to a non-zero value.
4643 */
4644 set_ghcb_msr_bits(svm, 1,
4645 GHCB_MSR_AP_RESET_HOLD_RESULT_MASK,
4646 GHCB_MSR_AP_RESET_HOLD_RESULT_POS);
4647
4648 set_ghcb_msr_bits(svm, GHCB_MSR_AP_RESET_HOLD_RESP,
4649 GHCB_MSR_INFO_MASK,
4650 GHCB_MSR_INFO_POS);
4651 break;
4652 default:
4653 break;
4654 }
4655 }
4656
snp_safe_alloc_page_node(int node,gfp_t gfp)4657 struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
4658 {
4659 unsigned long pfn;
4660 struct page *p;
4661
4662 if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
4663 return alloc_pages_node(node, gfp | __GFP_ZERO, 0);
4664
4665 /*
4666 * Allocate an SNP-safe page to workaround the SNP erratum where
4667 * the CPU will incorrectly signal an RMP violation #PF if a
4668 * hugepage (2MB or 1GB) collides with the RMP entry of a
4669 * 2MB-aligned VMCB, VMSA, or AVIC backing page.
4670 *
4671 * Allocate one extra page, choose a page which is not
4672 * 2MB-aligned, and free the other.
4673 */
4674 p = alloc_pages_node(node, gfp | __GFP_ZERO, 1);
4675 if (!p)
4676 return NULL;
4677
4678 split_page(p, 1);
4679
4680 pfn = page_to_pfn(p);
4681 if (IS_ALIGNED(pfn, PTRS_PER_PMD))
4682 __free_page(p++);
4683 else
4684 __free_page(p + 1);
4685
4686 return p;
4687 }
4688
sev_handle_rmp_fault(struct kvm_vcpu * vcpu,gpa_t gpa,u64 error_code)4689 void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code)
4690 {
4691 struct kvm_memory_slot *slot;
4692 struct kvm *kvm = vcpu->kvm;
4693 int order, rmp_level, ret;
4694 bool assigned;
4695 kvm_pfn_t pfn;
4696 gfn_t gfn;
4697
4698 gfn = gpa >> PAGE_SHIFT;
4699
4700 /*
4701 * The only time RMP faults occur for shared pages is when the guest is
4702 * triggering an RMP fault for an implicit page-state change from
4703 * shared->private. Implicit page-state changes are forwarded to
4704 * userspace via KVM_EXIT_MEMORY_FAULT events, however, so RMP faults
4705 * for shared pages should not end up here.
4706 */
4707 if (!kvm_mem_is_private(kvm, gfn)) {
4708 pr_warn_ratelimited("SEV: Unexpected RMP fault for non-private GPA 0x%llx\n",
4709 gpa);
4710 return;
4711 }
4712
4713 slot = gfn_to_memslot(kvm, gfn);
4714 if (!kvm_slot_can_be_private(slot)) {
4715 pr_warn_ratelimited("SEV: Unexpected RMP fault, non-private slot for GPA 0x%llx\n",
4716 gpa);
4717 return;
4718 }
4719
4720 ret = kvm_gmem_get_pfn(kvm, slot, gfn, &pfn, &order);
4721 if (ret) {
4722 pr_warn_ratelimited("SEV: Unexpected RMP fault, no backing page for private GPA 0x%llx\n",
4723 gpa);
4724 return;
4725 }
4726
4727 ret = snp_lookup_rmpentry(pfn, &assigned, &rmp_level);
4728 if (ret || !assigned) {
4729 pr_warn_ratelimited("SEV: Unexpected RMP fault, no assigned RMP entry found for GPA 0x%llx PFN 0x%llx error %d\n",
4730 gpa, pfn, ret);
4731 goto out_no_trace;
4732 }
4733
4734 /*
4735 * There are 2 cases where a PSMASH may be needed to resolve an #NPF
4736 * with PFERR_GUEST_RMP_BIT set:
4737 *
4738 * 1) RMPADJUST/PVALIDATE can trigger an #NPF with PFERR_GUEST_SIZEM
4739 * bit set if the guest issues them with a smaller granularity than
4740 * what is indicated by the page-size bit in the 2MB RMP entry for
4741 * the PFN that backs the GPA.
4742 *
4743 * 2) Guest access via NPT can trigger an #NPF if the NPT mapping is
4744 * smaller than what is indicated by the 2MB RMP entry for the PFN
4745 * that backs the GPA.
4746 *
4747 * In both these cases, the corresponding 2M RMP entry needs to
4748 * be PSMASH'd to 512 4K RMP entries. If the RMP entry is already
4749 * split into 4K RMP entries, then this is likely a spurious case which
4750 * can occur when there are concurrent accesses by the guest to a 2MB
4751 * GPA range that is backed by a 2MB-aligned PFN who's RMP entry is in
4752 * the process of being PMASH'd into 4K entries. These cases should
4753 * resolve automatically on subsequent accesses, so just ignore them
4754 * here.
4755 */
4756 if (rmp_level == PG_LEVEL_4K)
4757 goto out;
4758
4759 ret = snp_rmptable_psmash(pfn);
4760 if (ret) {
4761 /*
4762 * Look it up again. If it's 4K now then the PSMASH may have
4763 * raced with another process and the issue has already resolved
4764 * itself.
4765 */
4766 if (!snp_lookup_rmpentry(pfn, &assigned, &rmp_level) &&
4767 assigned && rmp_level == PG_LEVEL_4K)
4768 goto out;
4769
4770 pr_warn_ratelimited("SEV: Unable to split RMP entry for GPA 0x%llx PFN 0x%llx ret %d\n",
4771 gpa, pfn, ret);
4772 }
4773
4774 kvm_zap_gfn_range(kvm, gfn, gfn + PTRS_PER_PMD);
4775 out:
4776 trace_kvm_rmp_fault(vcpu, gpa, pfn, error_code, rmp_level, ret);
4777 out_no_trace:
4778 put_page(pfn_to_page(pfn));
4779 }
4780
is_pfn_range_shared(kvm_pfn_t start,kvm_pfn_t end)4781 static bool is_pfn_range_shared(kvm_pfn_t start, kvm_pfn_t end)
4782 {
4783 kvm_pfn_t pfn = start;
4784
4785 while (pfn < end) {
4786 int ret, rmp_level;
4787 bool assigned;
4788
4789 ret = snp_lookup_rmpentry(pfn, &assigned, &rmp_level);
4790 if (ret) {
4791 pr_warn_ratelimited("SEV: Failed to retrieve RMP entry: PFN 0x%llx GFN start 0x%llx GFN end 0x%llx RMP level %d error %d\n",
4792 pfn, start, end, rmp_level, ret);
4793 return false;
4794 }
4795
4796 if (assigned) {
4797 pr_debug("%s: overlap detected, PFN 0x%llx start 0x%llx end 0x%llx RMP level %d\n",
4798 __func__, pfn, start, end, rmp_level);
4799 return false;
4800 }
4801
4802 pfn++;
4803 }
4804
4805 return true;
4806 }
4807
max_level_for_order(int order)4808 static u8 max_level_for_order(int order)
4809 {
4810 if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
4811 return PG_LEVEL_2M;
4812
4813 return PG_LEVEL_4K;
4814 }
4815
is_large_rmp_possible(struct kvm * kvm,kvm_pfn_t pfn,int order)4816 static bool is_large_rmp_possible(struct kvm *kvm, kvm_pfn_t pfn, int order)
4817 {
4818 kvm_pfn_t pfn_aligned = ALIGN_DOWN(pfn, PTRS_PER_PMD);
4819
4820 /*
4821 * If this is a large folio, and the entire 2M range containing the
4822 * PFN is currently shared, then the entire 2M-aligned range can be
4823 * set to private via a single 2M RMP entry.
4824 */
4825 if (max_level_for_order(order) > PG_LEVEL_4K &&
4826 is_pfn_range_shared(pfn_aligned, pfn_aligned + PTRS_PER_PMD))
4827 return true;
4828
4829 return false;
4830 }
4831
sev_gmem_prepare(struct kvm * kvm,kvm_pfn_t pfn,gfn_t gfn,int max_order)4832 int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
4833 {
4834 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
4835 kvm_pfn_t pfn_aligned;
4836 gfn_t gfn_aligned;
4837 int level, rc;
4838 bool assigned;
4839
4840 if (!sev_snp_guest(kvm))
4841 return 0;
4842
4843 rc = snp_lookup_rmpentry(pfn, &assigned, &level);
4844 if (rc) {
4845 pr_err_ratelimited("SEV: Failed to look up RMP entry: GFN %llx PFN %llx error %d\n",
4846 gfn, pfn, rc);
4847 return -ENOENT;
4848 }
4849
4850 if (assigned) {
4851 pr_debug("%s: already assigned: gfn %llx pfn %llx max_order %d level %d\n",
4852 __func__, gfn, pfn, max_order, level);
4853 return 0;
4854 }
4855
4856 if (is_large_rmp_possible(kvm, pfn, max_order)) {
4857 level = PG_LEVEL_2M;
4858 pfn_aligned = ALIGN_DOWN(pfn, PTRS_PER_PMD);
4859 gfn_aligned = ALIGN_DOWN(gfn, PTRS_PER_PMD);
4860 } else {
4861 level = PG_LEVEL_4K;
4862 pfn_aligned = pfn;
4863 gfn_aligned = gfn;
4864 }
4865
4866 rc = rmp_make_private(pfn_aligned, gfn_to_gpa(gfn_aligned), level, sev->asid, false);
4867 if (rc) {
4868 pr_err_ratelimited("SEV: Failed to update RMP entry: GFN %llx PFN %llx level %d error %d\n",
4869 gfn, pfn, level, rc);
4870 return -EINVAL;
4871 }
4872
4873 pr_debug("%s: updated: gfn %llx pfn %llx pfn_aligned %llx max_order %d level %d\n",
4874 __func__, gfn, pfn, pfn_aligned, max_order, level);
4875
4876 return 0;
4877 }
4878
sev_gmem_invalidate(kvm_pfn_t start,kvm_pfn_t end)4879 void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
4880 {
4881 kvm_pfn_t pfn;
4882
4883 if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
4884 return;
4885
4886 pr_debug("%s: PFN start 0x%llx PFN end 0x%llx\n", __func__, start, end);
4887
4888 for (pfn = start; pfn < end;) {
4889 bool use_2m_update = false;
4890 int rc, rmp_level;
4891 bool assigned;
4892
4893 rc = snp_lookup_rmpentry(pfn, &assigned, &rmp_level);
4894 if (rc || !assigned)
4895 goto next_pfn;
4896
4897 use_2m_update = IS_ALIGNED(pfn, PTRS_PER_PMD) &&
4898 end >= (pfn + PTRS_PER_PMD) &&
4899 rmp_level > PG_LEVEL_4K;
4900
4901 /*
4902 * If an unaligned PFN corresponds to a 2M region assigned as a
4903 * large page in the RMP table, PSMASH the region into individual
4904 * 4K RMP entries before attempting to convert a 4K sub-page.
4905 */
4906 if (!use_2m_update && rmp_level > PG_LEVEL_4K) {
4907 /*
4908 * This shouldn't fail, but if it does, report it, but
4909 * still try to update RMP entry to shared and pray this
4910 * was a spurious error that can be addressed later.
4911 */
4912 rc = snp_rmptable_psmash(pfn);
4913 WARN_ONCE(rc, "SEV: Failed to PSMASH RMP entry for PFN 0x%llx error %d\n",
4914 pfn, rc);
4915 }
4916
4917 rc = rmp_make_shared(pfn, use_2m_update ? PG_LEVEL_2M : PG_LEVEL_4K);
4918 if (WARN_ONCE(rc, "SEV: Failed to update RMP entry for PFN 0x%llx error %d\n",
4919 pfn, rc))
4920 goto next_pfn;
4921
4922 /*
4923 * SEV-ES avoids host/guest cache coherency issues through
4924 * WBINVD hooks issued via MMU notifiers during run-time, and
4925 * KVM's VM destroy path at shutdown. Those MMU notifier events
4926 * don't cover gmem since there is no requirement to map pages
4927 * to a HVA in order to use them for a running guest. While the
4928 * shutdown path would still likely cover things for SNP guests,
4929 * userspace may also free gmem pages during run-time via
4930 * hole-punching operations on the guest_memfd, so flush the
4931 * cache entries for these pages before free'ing them back to
4932 * the host.
4933 */
4934 clflush_cache_range(__va(pfn_to_hpa(pfn)),
4935 use_2m_update ? PMD_SIZE : PAGE_SIZE);
4936 next_pfn:
4937 pfn += use_2m_update ? PTRS_PER_PMD : 1;
4938 cond_resched();
4939 }
4940 }
4941
sev_private_max_mapping_level(struct kvm * kvm,kvm_pfn_t pfn)4942 int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
4943 {
4944 int level, rc;
4945 bool assigned;
4946
4947 if (!sev_snp_guest(kvm))
4948 return 0;
4949
4950 rc = snp_lookup_rmpentry(pfn, &assigned, &level);
4951 if (rc || !assigned)
4952 return PG_LEVEL_4K;
4953
4954 return level;
4955 }
4956