xref: /linux/arch/x86/kvm/svm/sev.c (revision ca64d84e93762f4e587e040a44ad9f6089afc777)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM-SEV support
6  *
7  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8  */
9 
10 #include <linux/kvm_types.h>
11 #include <linux/kvm_host.h>
12 #include <linux/kernel.h>
13 #include <linux/highmem.h>
14 #include <linux/psp-sev.h>
15 #include <linux/swap.h>
16 
17 #include "x86.h"
18 #include "svm.h"
19 
20 static int sev_flush_asids(void);
21 static DECLARE_RWSEM(sev_deactivate_lock);
22 static DEFINE_MUTEX(sev_bitmap_lock);
23 unsigned int max_sev_asid;
24 static unsigned int min_sev_asid;
25 static unsigned long *sev_asid_bitmap;
26 static unsigned long *sev_reclaim_asid_bitmap;
27 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
28 
29 struct enc_region {
30 	struct list_head list;
31 	unsigned long npages;
32 	struct page **pages;
33 	unsigned long uaddr;
34 	unsigned long size;
35 };
36 
37 static int sev_flush_asids(void)
38 {
39 	int ret, error = 0;
40 
41 	/*
42 	 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
43 	 * so it must be guarded.
44 	 */
45 	down_write(&sev_deactivate_lock);
46 
47 	wbinvd_on_all_cpus();
48 	ret = sev_guest_df_flush(&error);
49 
50 	up_write(&sev_deactivate_lock);
51 
52 	if (ret)
53 		pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
54 
55 	return ret;
56 }
57 
58 /* Must be called with the sev_bitmap_lock held */
59 static bool __sev_recycle_asids(void)
60 {
61 	int pos;
62 
63 	/* Check if there are any ASIDs to reclaim before performing a flush */
64 	pos = find_next_bit(sev_reclaim_asid_bitmap,
65 			    max_sev_asid, min_sev_asid - 1);
66 	if (pos >= max_sev_asid)
67 		return false;
68 
69 	if (sev_flush_asids())
70 		return false;
71 
72 	bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
73 		   max_sev_asid);
74 	bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
75 
76 	return true;
77 }
78 
79 static int sev_asid_new(void)
80 {
81 	bool retry = true;
82 	int pos;
83 
84 	mutex_lock(&sev_bitmap_lock);
85 
86 	/*
87 	 * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
88 	 */
89 again:
90 	pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
91 	if (pos >= max_sev_asid) {
92 		if (retry && __sev_recycle_asids()) {
93 			retry = false;
94 			goto again;
95 		}
96 		mutex_unlock(&sev_bitmap_lock);
97 		return -EBUSY;
98 	}
99 
100 	__set_bit(pos, sev_asid_bitmap);
101 
102 	mutex_unlock(&sev_bitmap_lock);
103 
104 	return pos + 1;
105 }
106 
107 static int sev_get_asid(struct kvm *kvm)
108 {
109 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
110 
111 	return sev->asid;
112 }
113 
114 static void sev_asid_free(int asid)
115 {
116 	struct svm_cpu_data *sd;
117 	int cpu, pos;
118 
119 	mutex_lock(&sev_bitmap_lock);
120 
121 	pos = asid - 1;
122 	__set_bit(pos, sev_reclaim_asid_bitmap);
123 
124 	for_each_possible_cpu(cpu) {
125 		sd = per_cpu(svm_data, cpu);
126 		sd->sev_vmcbs[pos] = NULL;
127 	}
128 
129 	mutex_unlock(&sev_bitmap_lock);
130 }
131 
132 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
133 {
134 	struct sev_data_decommission *decommission;
135 	struct sev_data_deactivate *data;
136 
137 	if (!handle)
138 		return;
139 
140 	data = kzalloc(sizeof(*data), GFP_KERNEL);
141 	if (!data)
142 		return;
143 
144 	/* deactivate handle */
145 	data->handle = handle;
146 
147 	/* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
148 	down_read(&sev_deactivate_lock);
149 	sev_guest_deactivate(data, NULL);
150 	up_read(&sev_deactivate_lock);
151 
152 	kfree(data);
153 
154 	decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
155 	if (!decommission)
156 		return;
157 
158 	/* decommission handle */
159 	decommission->handle = handle;
160 	sev_guest_decommission(decommission, NULL);
161 
162 	kfree(decommission);
163 }
164 
165 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
166 {
167 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
168 	int asid, ret;
169 
170 	ret = -EBUSY;
171 	if (unlikely(sev->active))
172 		return ret;
173 
174 	asid = sev_asid_new();
175 	if (asid < 0)
176 		return ret;
177 
178 	ret = sev_platform_init(&argp->error);
179 	if (ret)
180 		goto e_free;
181 
182 	sev->active = true;
183 	sev->asid = asid;
184 	INIT_LIST_HEAD(&sev->regions_list);
185 
186 	return 0;
187 
188 e_free:
189 	sev_asid_free(asid);
190 	return ret;
191 }
192 
193 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
194 {
195 	struct sev_data_activate *data;
196 	int asid = sev_get_asid(kvm);
197 	int ret;
198 
199 	data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
200 	if (!data)
201 		return -ENOMEM;
202 
203 	/* activate ASID on the given handle */
204 	data->handle = handle;
205 	data->asid   = asid;
206 	ret = sev_guest_activate(data, error);
207 	kfree(data);
208 
209 	return ret;
210 }
211 
212 static int __sev_issue_cmd(int fd, int id, void *data, int *error)
213 {
214 	struct fd f;
215 	int ret;
216 
217 	f = fdget(fd);
218 	if (!f.file)
219 		return -EBADF;
220 
221 	ret = sev_issue_cmd_external_user(f.file, id, data, error);
222 
223 	fdput(f);
224 	return ret;
225 }
226 
227 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
228 {
229 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
230 
231 	return __sev_issue_cmd(sev->fd, id, data, error);
232 }
233 
234 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
235 {
236 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
237 	struct sev_data_launch_start *start;
238 	struct kvm_sev_launch_start params;
239 	void *dh_blob, *session_blob;
240 	int *error = &argp->error;
241 	int ret;
242 
243 	if (!sev_guest(kvm))
244 		return -ENOTTY;
245 
246 	if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
247 		return -EFAULT;
248 
249 	start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
250 	if (!start)
251 		return -ENOMEM;
252 
253 	dh_blob = NULL;
254 	if (params.dh_uaddr) {
255 		dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
256 		if (IS_ERR(dh_blob)) {
257 			ret = PTR_ERR(dh_blob);
258 			goto e_free;
259 		}
260 
261 		start->dh_cert_address = __sme_set(__pa(dh_blob));
262 		start->dh_cert_len = params.dh_len;
263 	}
264 
265 	session_blob = NULL;
266 	if (params.session_uaddr) {
267 		session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
268 		if (IS_ERR(session_blob)) {
269 			ret = PTR_ERR(session_blob);
270 			goto e_free_dh;
271 		}
272 
273 		start->session_address = __sme_set(__pa(session_blob));
274 		start->session_len = params.session_len;
275 	}
276 
277 	start->handle = params.handle;
278 	start->policy = params.policy;
279 
280 	/* create memory encryption context */
281 	ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
282 	if (ret)
283 		goto e_free_session;
284 
285 	/* Bind ASID to this guest */
286 	ret = sev_bind_asid(kvm, start->handle, error);
287 	if (ret)
288 		goto e_free_session;
289 
290 	/* return handle to userspace */
291 	params.handle = start->handle;
292 	if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
293 		sev_unbind_asid(kvm, start->handle);
294 		ret = -EFAULT;
295 		goto e_free_session;
296 	}
297 
298 	sev->handle = start->handle;
299 	sev->fd = argp->sev_fd;
300 
301 e_free_session:
302 	kfree(session_blob);
303 e_free_dh:
304 	kfree(dh_blob);
305 e_free:
306 	kfree(start);
307 	return ret;
308 }
309 
310 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
311 				    unsigned long ulen, unsigned long *n,
312 				    int write)
313 {
314 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
315 	unsigned long npages, npinned, size;
316 	unsigned long locked, lock_limit;
317 	struct page **pages;
318 	unsigned long first, last;
319 
320 	if (ulen == 0 || uaddr + ulen < uaddr)
321 		return NULL;
322 
323 	/* Calculate number of pages. */
324 	first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
325 	last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
326 	npages = (last - first + 1);
327 
328 	locked = sev->pages_locked + npages;
329 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
330 	if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
331 		pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
332 		return NULL;
333 	}
334 
335 	/* Avoid using vmalloc for smaller buffers. */
336 	size = npages * sizeof(struct page *);
337 	if (size > PAGE_SIZE)
338 		pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO,
339 				  PAGE_KERNEL);
340 	else
341 		pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
342 
343 	if (!pages)
344 		return NULL;
345 
346 	/* Pin the user virtual address. */
347 	npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages);
348 	if (npinned != npages) {
349 		pr_err("SEV: Failure locking %lu pages.\n", npages);
350 		goto err;
351 	}
352 
353 	*n = npages;
354 	sev->pages_locked = locked;
355 
356 	return pages;
357 
358 err:
359 	if (npinned > 0)
360 		release_pages(pages, npinned);
361 
362 	kvfree(pages);
363 	return NULL;
364 }
365 
366 static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
367 			     unsigned long npages)
368 {
369 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
370 
371 	release_pages(pages, npages);
372 	kvfree(pages);
373 	sev->pages_locked -= npages;
374 }
375 
376 static void sev_clflush_pages(struct page *pages[], unsigned long npages)
377 {
378 	uint8_t *page_virtual;
379 	unsigned long i;
380 
381 	if (npages == 0 || pages == NULL)
382 		return;
383 
384 	for (i = 0; i < npages; i++) {
385 		page_virtual = kmap_atomic(pages[i]);
386 		clflush_cache_range(page_virtual, PAGE_SIZE);
387 		kunmap_atomic(page_virtual);
388 	}
389 }
390 
391 static unsigned long get_num_contig_pages(unsigned long idx,
392 				struct page **inpages, unsigned long npages)
393 {
394 	unsigned long paddr, next_paddr;
395 	unsigned long i = idx + 1, pages = 1;
396 
397 	/* find the number of contiguous pages starting from idx */
398 	paddr = __sme_page_pa(inpages[idx]);
399 	while (i < npages) {
400 		next_paddr = __sme_page_pa(inpages[i++]);
401 		if ((paddr + PAGE_SIZE) == next_paddr) {
402 			pages++;
403 			paddr = next_paddr;
404 			continue;
405 		}
406 		break;
407 	}
408 
409 	return pages;
410 }
411 
412 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
413 {
414 	unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
415 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
416 	struct kvm_sev_launch_update_data params;
417 	struct sev_data_launch_update_data *data;
418 	struct page **inpages;
419 	int ret;
420 
421 	if (!sev_guest(kvm))
422 		return -ENOTTY;
423 
424 	if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
425 		return -EFAULT;
426 
427 	data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
428 	if (!data)
429 		return -ENOMEM;
430 
431 	vaddr = params.uaddr;
432 	size = params.len;
433 	vaddr_end = vaddr + size;
434 
435 	/* Lock the user memory. */
436 	inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
437 	if (!inpages) {
438 		ret = -ENOMEM;
439 		goto e_free;
440 	}
441 
442 	/*
443 	 * The LAUNCH_UPDATE command will perform in-place encryption of the
444 	 * memory content (i.e it will write the same memory region with C=1).
445 	 * It's possible that the cache may contain the data with C=0, i.e.,
446 	 * unencrypted so invalidate it first.
447 	 */
448 	sev_clflush_pages(inpages, npages);
449 
450 	for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
451 		int offset, len;
452 
453 		/*
454 		 * If the user buffer is not page-aligned, calculate the offset
455 		 * within the page.
456 		 */
457 		offset = vaddr & (PAGE_SIZE - 1);
458 
459 		/* Calculate the number of pages that can be encrypted in one go. */
460 		pages = get_num_contig_pages(i, inpages, npages);
461 
462 		len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
463 
464 		data->handle = sev->handle;
465 		data->len = len;
466 		data->address = __sme_page_pa(inpages[i]) + offset;
467 		ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
468 		if (ret)
469 			goto e_unpin;
470 
471 		size -= len;
472 		next_vaddr = vaddr + len;
473 	}
474 
475 e_unpin:
476 	/* content of memory is updated, mark pages dirty */
477 	for (i = 0; i < npages; i++) {
478 		set_page_dirty_lock(inpages[i]);
479 		mark_page_accessed(inpages[i]);
480 	}
481 	/* unlock the user pages */
482 	sev_unpin_memory(kvm, inpages, npages);
483 e_free:
484 	kfree(data);
485 	return ret;
486 }
487 
488 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
489 {
490 	void __user *measure = (void __user *)(uintptr_t)argp->data;
491 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
492 	struct sev_data_launch_measure *data;
493 	struct kvm_sev_launch_measure params;
494 	void __user *p = NULL;
495 	void *blob = NULL;
496 	int ret;
497 
498 	if (!sev_guest(kvm))
499 		return -ENOTTY;
500 
501 	if (copy_from_user(&params, measure, sizeof(params)))
502 		return -EFAULT;
503 
504 	data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
505 	if (!data)
506 		return -ENOMEM;
507 
508 	/* User wants to query the blob length */
509 	if (!params.len)
510 		goto cmd;
511 
512 	p = (void __user *)(uintptr_t)params.uaddr;
513 	if (p) {
514 		if (params.len > SEV_FW_BLOB_MAX_SIZE) {
515 			ret = -EINVAL;
516 			goto e_free;
517 		}
518 
519 		ret = -ENOMEM;
520 		blob = kmalloc(params.len, GFP_KERNEL);
521 		if (!blob)
522 			goto e_free;
523 
524 		data->address = __psp_pa(blob);
525 		data->len = params.len;
526 	}
527 
528 cmd:
529 	data->handle = sev->handle;
530 	ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
531 
532 	/*
533 	 * If we query the session length, FW responded with expected data.
534 	 */
535 	if (!params.len)
536 		goto done;
537 
538 	if (ret)
539 		goto e_free_blob;
540 
541 	if (blob) {
542 		if (copy_to_user(p, blob, params.len))
543 			ret = -EFAULT;
544 	}
545 
546 done:
547 	params.len = data->len;
548 	if (copy_to_user(measure, &params, sizeof(params)))
549 		ret = -EFAULT;
550 e_free_blob:
551 	kfree(blob);
552 e_free:
553 	kfree(data);
554 	return ret;
555 }
556 
557 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
558 {
559 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
560 	struct sev_data_launch_finish *data;
561 	int ret;
562 
563 	if (!sev_guest(kvm))
564 		return -ENOTTY;
565 
566 	data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
567 	if (!data)
568 		return -ENOMEM;
569 
570 	data->handle = sev->handle;
571 	ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
572 
573 	kfree(data);
574 	return ret;
575 }
576 
577 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
578 {
579 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
580 	struct kvm_sev_guest_status params;
581 	struct sev_data_guest_status *data;
582 	int ret;
583 
584 	if (!sev_guest(kvm))
585 		return -ENOTTY;
586 
587 	data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
588 	if (!data)
589 		return -ENOMEM;
590 
591 	data->handle = sev->handle;
592 	ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
593 	if (ret)
594 		goto e_free;
595 
596 	params.policy = data->policy;
597 	params.state = data->state;
598 	params.handle = data->handle;
599 
600 	if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
601 		ret = -EFAULT;
602 e_free:
603 	kfree(data);
604 	return ret;
605 }
606 
607 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
608 			       unsigned long dst, int size,
609 			       int *error, bool enc)
610 {
611 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
612 	struct sev_data_dbg *data;
613 	int ret;
614 
615 	data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
616 	if (!data)
617 		return -ENOMEM;
618 
619 	data->handle = sev->handle;
620 	data->dst_addr = dst;
621 	data->src_addr = src;
622 	data->len = size;
623 
624 	ret = sev_issue_cmd(kvm,
625 			    enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
626 			    data, error);
627 	kfree(data);
628 	return ret;
629 }
630 
631 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
632 			     unsigned long dst_paddr, int sz, int *err)
633 {
634 	int offset;
635 
636 	/*
637 	 * Its safe to read more than we are asked, caller should ensure that
638 	 * destination has enough space.
639 	 */
640 	src_paddr = round_down(src_paddr, 16);
641 	offset = src_paddr & 15;
642 	sz = round_up(sz + offset, 16);
643 
644 	return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
645 }
646 
647 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
648 				  unsigned long __user dst_uaddr,
649 				  unsigned long dst_paddr,
650 				  int size, int *err)
651 {
652 	struct page *tpage = NULL;
653 	int ret, offset;
654 
655 	/* if inputs are not 16-byte then use intermediate buffer */
656 	if (!IS_ALIGNED(dst_paddr, 16) ||
657 	    !IS_ALIGNED(paddr,     16) ||
658 	    !IS_ALIGNED(size,      16)) {
659 		tpage = (void *)alloc_page(GFP_KERNEL);
660 		if (!tpage)
661 			return -ENOMEM;
662 
663 		dst_paddr = __sme_page_pa(tpage);
664 	}
665 
666 	ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
667 	if (ret)
668 		goto e_free;
669 
670 	if (tpage) {
671 		offset = paddr & 15;
672 		if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
673 				 page_address(tpage) + offset, size))
674 			ret = -EFAULT;
675 	}
676 
677 e_free:
678 	if (tpage)
679 		__free_page(tpage);
680 
681 	return ret;
682 }
683 
684 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
685 				  unsigned long __user vaddr,
686 				  unsigned long dst_paddr,
687 				  unsigned long __user dst_vaddr,
688 				  int size, int *error)
689 {
690 	struct page *src_tpage = NULL;
691 	struct page *dst_tpage = NULL;
692 	int ret, len = size;
693 
694 	/* If source buffer is not aligned then use an intermediate buffer */
695 	if (!IS_ALIGNED(vaddr, 16)) {
696 		src_tpage = alloc_page(GFP_KERNEL);
697 		if (!src_tpage)
698 			return -ENOMEM;
699 
700 		if (copy_from_user(page_address(src_tpage),
701 				(void __user *)(uintptr_t)vaddr, size)) {
702 			__free_page(src_tpage);
703 			return -EFAULT;
704 		}
705 
706 		paddr = __sme_page_pa(src_tpage);
707 	}
708 
709 	/*
710 	 *  If destination buffer or length is not aligned then do read-modify-write:
711 	 *   - decrypt destination in an intermediate buffer
712 	 *   - copy the source buffer in an intermediate buffer
713 	 *   - use the intermediate buffer as source buffer
714 	 */
715 	if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
716 		int dst_offset;
717 
718 		dst_tpage = alloc_page(GFP_KERNEL);
719 		if (!dst_tpage) {
720 			ret = -ENOMEM;
721 			goto e_free;
722 		}
723 
724 		ret = __sev_dbg_decrypt(kvm, dst_paddr,
725 					__sme_page_pa(dst_tpage), size, error);
726 		if (ret)
727 			goto e_free;
728 
729 		/*
730 		 *  If source is kernel buffer then use memcpy() otherwise
731 		 *  copy_from_user().
732 		 */
733 		dst_offset = dst_paddr & 15;
734 
735 		if (src_tpage)
736 			memcpy(page_address(dst_tpage) + dst_offset,
737 			       page_address(src_tpage), size);
738 		else {
739 			if (copy_from_user(page_address(dst_tpage) + dst_offset,
740 					   (void __user *)(uintptr_t)vaddr, size)) {
741 				ret = -EFAULT;
742 				goto e_free;
743 			}
744 		}
745 
746 		paddr = __sme_page_pa(dst_tpage);
747 		dst_paddr = round_down(dst_paddr, 16);
748 		len = round_up(size, 16);
749 	}
750 
751 	ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
752 
753 e_free:
754 	if (src_tpage)
755 		__free_page(src_tpage);
756 	if (dst_tpage)
757 		__free_page(dst_tpage);
758 	return ret;
759 }
760 
761 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
762 {
763 	unsigned long vaddr, vaddr_end, next_vaddr;
764 	unsigned long dst_vaddr;
765 	struct page **src_p, **dst_p;
766 	struct kvm_sev_dbg debug;
767 	unsigned long n;
768 	unsigned int size;
769 	int ret;
770 
771 	if (!sev_guest(kvm))
772 		return -ENOTTY;
773 
774 	if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
775 		return -EFAULT;
776 
777 	if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
778 		return -EINVAL;
779 	if (!debug.dst_uaddr)
780 		return -EINVAL;
781 
782 	vaddr = debug.src_uaddr;
783 	size = debug.len;
784 	vaddr_end = vaddr + size;
785 	dst_vaddr = debug.dst_uaddr;
786 
787 	for (; vaddr < vaddr_end; vaddr = next_vaddr) {
788 		int len, s_off, d_off;
789 
790 		/* lock userspace source and destination page */
791 		src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
792 		if (!src_p)
793 			return -EFAULT;
794 
795 		dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
796 		if (!dst_p) {
797 			sev_unpin_memory(kvm, src_p, n);
798 			return -EFAULT;
799 		}
800 
801 		/*
802 		 * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the
803 		 * memory content (i.e it will write the same memory region with C=1).
804 		 * It's possible that the cache may contain the data with C=0, i.e.,
805 		 * unencrypted so invalidate it first.
806 		 */
807 		sev_clflush_pages(src_p, 1);
808 		sev_clflush_pages(dst_p, 1);
809 
810 		/*
811 		 * Since user buffer may not be page aligned, calculate the
812 		 * offset within the page.
813 		 */
814 		s_off = vaddr & ~PAGE_MASK;
815 		d_off = dst_vaddr & ~PAGE_MASK;
816 		len = min_t(size_t, (PAGE_SIZE - s_off), size);
817 
818 		if (dec)
819 			ret = __sev_dbg_decrypt_user(kvm,
820 						     __sme_page_pa(src_p[0]) + s_off,
821 						     dst_vaddr,
822 						     __sme_page_pa(dst_p[0]) + d_off,
823 						     len, &argp->error);
824 		else
825 			ret = __sev_dbg_encrypt_user(kvm,
826 						     __sme_page_pa(src_p[0]) + s_off,
827 						     vaddr,
828 						     __sme_page_pa(dst_p[0]) + d_off,
829 						     dst_vaddr,
830 						     len, &argp->error);
831 
832 		sev_unpin_memory(kvm, src_p, n);
833 		sev_unpin_memory(kvm, dst_p, n);
834 
835 		if (ret)
836 			goto err;
837 
838 		next_vaddr = vaddr + len;
839 		dst_vaddr = dst_vaddr + len;
840 		size -= len;
841 	}
842 err:
843 	return ret;
844 }
845 
846 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
847 {
848 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
849 	struct sev_data_launch_secret *data;
850 	struct kvm_sev_launch_secret params;
851 	struct page **pages;
852 	void *blob, *hdr;
853 	unsigned long n;
854 	int ret, offset;
855 
856 	if (!sev_guest(kvm))
857 		return -ENOTTY;
858 
859 	if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
860 		return -EFAULT;
861 
862 	pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
863 	if (!pages)
864 		return -ENOMEM;
865 
866 	/*
867 	 * The secret must be copied into contiguous memory region, lets verify
868 	 * that userspace memory pages are contiguous before we issue command.
869 	 */
870 	if (get_num_contig_pages(0, pages, n) != n) {
871 		ret = -EINVAL;
872 		goto e_unpin_memory;
873 	}
874 
875 	ret = -ENOMEM;
876 	data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
877 	if (!data)
878 		goto e_unpin_memory;
879 
880 	offset = params.guest_uaddr & (PAGE_SIZE - 1);
881 	data->guest_address = __sme_page_pa(pages[0]) + offset;
882 	data->guest_len = params.guest_len;
883 
884 	blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
885 	if (IS_ERR(blob)) {
886 		ret = PTR_ERR(blob);
887 		goto e_free;
888 	}
889 
890 	data->trans_address = __psp_pa(blob);
891 	data->trans_len = params.trans_len;
892 
893 	hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
894 	if (IS_ERR(hdr)) {
895 		ret = PTR_ERR(hdr);
896 		goto e_free_blob;
897 	}
898 	data->hdr_address = __psp_pa(hdr);
899 	data->hdr_len = params.hdr_len;
900 
901 	data->handle = sev->handle;
902 	ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
903 
904 	kfree(hdr);
905 
906 e_free_blob:
907 	kfree(blob);
908 e_free:
909 	kfree(data);
910 e_unpin_memory:
911 	sev_unpin_memory(kvm, pages, n);
912 	return ret;
913 }
914 
915 int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
916 {
917 	struct kvm_sev_cmd sev_cmd;
918 	int r;
919 
920 	if (!svm_sev_enabled())
921 		return -ENOTTY;
922 
923 	if (!argp)
924 		return 0;
925 
926 	if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
927 		return -EFAULT;
928 
929 	mutex_lock(&kvm->lock);
930 
931 	switch (sev_cmd.id) {
932 	case KVM_SEV_INIT:
933 		r = sev_guest_init(kvm, &sev_cmd);
934 		break;
935 	case KVM_SEV_LAUNCH_START:
936 		r = sev_launch_start(kvm, &sev_cmd);
937 		break;
938 	case KVM_SEV_LAUNCH_UPDATE_DATA:
939 		r = sev_launch_update_data(kvm, &sev_cmd);
940 		break;
941 	case KVM_SEV_LAUNCH_MEASURE:
942 		r = sev_launch_measure(kvm, &sev_cmd);
943 		break;
944 	case KVM_SEV_LAUNCH_FINISH:
945 		r = sev_launch_finish(kvm, &sev_cmd);
946 		break;
947 	case KVM_SEV_GUEST_STATUS:
948 		r = sev_guest_status(kvm, &sev_cmd);
949 		break;
950 	case KVM_SEV_DBG_DECRYPT:
951 		r = sev_dbg_crypt(kvm, &sev_cmd, true);
952 		break;
953 	case KVM_SEV_DBG_ENCRYPT:
954 		r = sev_dbg_crypt(kvm, &sev_cmd, false);
955 		break;
956 	case KVM_SEV_LAUNCH_SECRET:
957 		r = sev_launch_secret(kvm, &sev_cmd);
958 		break;
959 	default:
960 		r = -EINVAL;
961 		goto out;
962 	}
963 
964 	if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
965 		r = -EFAULT;
966 
967 out:
968 	mutex_unlock(&kvm->lock);
969 	return r;
970 }
971 
972 int svm_register_enc_region(struct kvm *kvm,
973 			    struct kvm_enc_region *range)
974 {
975 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
976 	struct enc_region *region;
977 	int ret = 0;
978 
979 	if (!sev_guest(kvm))
980 		return -ENOTTY;
981 
982 	if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
983 		return -EINVAL;
984 
985 	region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
986 	if (!region)
987 		return -ENOMEM;
988 
989 	region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
990 	if (!region->pages) {
991 		ret = -ENOMEM;
992 		goto e_free;
993 	}
994 
995 	/*
996 	 * The guest may change the memory encryption attribute from C=0 -> C=1
997 	 * or vice versa for this memory range. Lets make sure caches are
998 	 * flushed to ensure that guest data gets written into memory with
999 	 * correct C-bit.
1000 	 */
1001 	sev_clflush_pages(region->pages, region->npages);
1002 
1003 	region->uaddr = range->addr;
1004 	region->size = range->size;
1005 
1006 	mutex_lock(&kvm->lock);
1007 	list_add_tail(&region->list, &sev->regions_list);
1008 	mutex_unlock(&kvm->lock);
1009 
1010 	return ret;
1011 
1012 e_free:
1013 	kfree(region);
1014 	return ret;
1015 }
1016 
1017 static struct enc_region *
1018 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1019 {
1020 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1021 	struct list_head *head = &sev->regions_list;
1022 	struct enc_region *i;
1023 
1024 	list_for_each_entry(i, head, list) {
1025 		if (i->uaddr == range->addr &&
1026 		    i->size == range->size)
1027 			return i;
1028 	}
1029 
1030 	return NULL;
1031 }
1032 
1033 static void __unregister_enc_region_locked(struct kvm *kvm,
1034 					   struct enc_region *region)
1035 {
1036 	sev_unpin_memory(kvm, region->pages, region->npages);
1037 	list_del(&region->list);
1038 	kfree(region);
1039 }
1040 
1041 int svm_unregister_enc_region(struct kvm *kvm,
1042 			      struct kvm_enc_region *range)
1043 {
1044 	struct enc_region *region;
1045 	int ret;
1046 
1047 	mutex_lock(&kvm->lock);
1048 
1049 	if (!sev_guest(kvm)) {
1050 		ret = -ENOTTY;
1051 		goto failed;
1052 	}
1053 
1054 	region = find_enc_region(kvm, range);
1055 	if (!region) {
1056 		ret = -EINVAL;
1057 		goto failed;
1058 	}
1059 
1060 	/*
1061 	 * Ensure that all guest tagged cache entries are flushed before
1062 	 * releasing the pages back to the system for use. CLFLUSH will
1063 	 * not do this, so issue a WBINVD.
1064 	 */
1065 	wbinvd_on_all_cpus();
1066 
1067 	__unregister_enc_region_locked(kvm, region);
1068 
1069 	mutex_unlock(&kvm->lock);
1070 	return 0;
1071 
1072 failed:
1073 	mutex_unlock(&kvm->lock);
1074 	return ret;
1075 }
1076 
1077 void sev_vm_destroy(struct kvm *kvm)
1078 {
1079 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1080 	struct list_head *head = &sev->regions_list;
1081 	struct list_head *pos, *q;
1082 
1083 	if (!sev_guest(kvm))
1084 		return;
1085 
1086 	mutex_lock(&kvm->lock);
1087 
1088 	/*
1089 	 * Ensure that all guest tagged cache entries are flushed before
1090 	 * releasing the pages back to the system for use. CLFLUSH will
1091 	 * not do this, so issue a WBINVD.
1092 	 */
1093 	wbinvd_on_all_cpus();
1094 
1095 	/*
1096 	 * if userspace was terminated before unregistering the memory regions
1097 	 * then lets unpin all the registered memory.
1098 	 */
1099 	if (!list_empty(head)) {
1100 		list_for_each_safe(pos, q, head) {
1101 			__unregister_enc_region_locked(kvm,
1102 				list_entry(pos, struct enc_region, list));
1103 		}
1104 	}
1105 
1106 	mutex_unlock(&kvm->lock);
1107 
1108 	sev_unbind_asid(kvm, sev->handle);
1109 	sev_asid_free(sev->asid);
1110 }
1111 
1112 int __init sev_hardware_setup(void)
1113 {
1114 	struct sev_user_data_status *status;
1115 	int rc;
1116 
1117 	/* Maximum number of encrypted guests supported simultaneously */
1118 	max_sev_asid = cpuid_ecx(0x8000001F);
1119 
1120 	if (!max_sev_asid)
1121 		return 1;
1122 
1123 	/* Minimum ASID value that should be used for SEV guest */
1124 	min_sev_asid = cpuid_edx(0x8000001F);
1125 
1126 	/* Initialize SEV ASID bitmaps */
1127 	sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1128 	if (!sev_asid_bitmap)
1129 		return 1;
1130 
1131 	sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1132 	if (!sev_reclaim_asid_bitmap)
1133 		return 1;
1134 
1135 	status = kmalloc(sizeof(*status), GFP_KERNEL);
1136 	if (!status)
1137 		return 1;
1138 
1139 	/*
1140 	 * Check SEV platform status.
1141 	 *
1142 	 * PLATFORM_STATUS can be called in any state, if we failed to query
1143 	 * the PLATFORM status then either PSP firmware does not support SEV
1144 	 * feature or SEV firmware is dead.
1145 	 */
1146 	rc = sev_platform_status(status, NULL);
1147 	if (rc)
1148 		goto err;
1149 
1150 	pr_info("SEV supported\n");
1151 
1152 err:
1153 	kfree(status);
1154 	return rc;
1155 }
1156 
1157 void sev_hardware_teardown(void)
1158 {
1159 	bitmap_free(sev_asid_bitmap);
1160 	bitmap_free(sev_reclaim_asid_bitmap);
1161 
1162 	sev_flush_asids();
1163 }
1164 
1165 void pre_sev_run(struct vcpu_svm *svm, int cpu)
1166 {
1167 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
1168 	int asid = sev_get_asid(svm->vcpu.kvm);
1169 
1170 	/* Assign the asid allocated with this SEV guest */
1171 	svm->vmcb->control.asid = asid;
1172 
1173 	/*
1174 	 * Flush guest TLB:
1175 	 *
1176 	 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
1177 	 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
1178 	 */
1179 	if (sd->sev_vmcbs[asid] == svm->vmcb &&
1180 	    svm->last_cpu == cpu)
1181 		return;
1182 
1183 	svm->last_cpu = cpu;
1184 	sd->sev_vmcbs[asid] = svm->vmcb;
1185 	svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
1186 	mark_dirty(svm->vmcb, VMCB_ASID);
1187 }
1188