xref: /linux/arch/s390/kvm/gmap.c (revision f45f8f0ed4c6d3a9be27ff27347408e1c1bbb364)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Guest memory management for KVM/s390
4  *
5  * Copyright IBM Corp. 2008, 2020, 2024
6  *
7  *    Author(s): Claudio Imbrenda <imbrenda@linux.ibm.com>
8  *               Martin Schwidefsky <schwidefsky@de.ibm.com>
9  *               David Hildenbrand <david@redhat.com>
10  *               Janosch Frank <frankja@linux.vnet.ibm.com>
11  */
12 
13 #include <linux/compiler.h>
14 #include <linux/kvm.h>
15 #include <linux/kvm_host.h>
16 #include <linux/pgtable.h>
17 #include <linux/pagemap.h>
18 
19 #include <asm/lowcore.h>
20 #include <asm/gmap.h>
21 #include <asm/uv.h>
22 
23 #include "gmap.h"
24 
25 /**
26  * gmap_make_secure() - make one guest page secure
27  * @gmap: the guest gmap
28  * @gaddr: the guest address that needs to be made secure
29  * @uvcb: the UVCB specifying which operation needs to be performed
30  *
31  * Context: needs to be called with kvm->srcu held.
32  * Return: 0 on success, < 0 in case of error.
33  */
gmap_make_secure(struct gmap * gmap,unsigned long gaddr,void * uvcb)34 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
35 {
36 	struct kvm *kvm = gmap->private;
37 	unsigned long vmaddr;
38 
39 	lockdep_assert_held(&kvm->srcu);
40 
41 	vmaddr = gfn_to_hva(kvm, gpa_to_gfn(gaddr));
42 	if (kvm_is_error_hva(vmaddr))
43 		return -EFAULT;
44 	return make_hva_secure(gmap->mm, vmaddr, uvcb);
45 }
46 
gmap_convert_to_secure(struct gmap * gmap,unsigned long gaddr)47 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
48 {
49 	struct uv_cb_cts uvcb = {
50 		.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
51 		.header.len = sizeof(uvcb),
52 		.guest_handle = gmap->guest_handle,
53 		.gaddr = gaddr,
54 	};
55 
56 	return gmap_make_secure(gmap, gaddr, &uvcb);
57 }
58 
59 /**
60  * __gmap_destroy_page() - Destroy a guest page.
61  * @gmap: the gmap of the guest
62  * @page: the page to destroy
63  *
64  * An attempt will be made to destroy the given guest page. If the attempt
65  * fails, an attempt is made to export the page. If both attempts fail, an
66  * appropriate error is returned.
67  *
68  * Context: must be called holding the mm lock for gmap->mm
69  */
__gmap_destroy_page(struct gmap * gmap,struct page * page)70 static int __gmap_destroy_page(struct gmap *gmap, struct page *page)
71 {
72 	struct folio *folio = page_folio(page);
73 	int rc;
74 
75 	/*
76 	 * See gmap_make_secure(): large folios cannot be secure. Small
77 	 * folio implies FW_LEVEL_PTE.
78 	 */
79 	if (folio_test_large(folio))
80 		return -EFAULT;
81 
82 	rc = uv_destroy_folio(folio);
83 	/*
84 	 * Fault handlers can race; it is possible that two CPUs will fault
85 	 * on the same secure page. One CPU can destroy the page, reboot,
86 	 * re-enter secure mode and import it, while the second CPU was
87 	 * stuck at the beginning of the handler. At some point the second
88 	 * CPU will be able to progress, and it will not be able to destroy
89 	 * the page. In that case we do not want to terminate the process,
90 	 * we instead try to export the page.
91 	 */
92 	if (rc)
93 		rc = uv_convert_from_secure_folio(folio);
94 
95 	return rc;
96 }
97 
98 /**
99  * gmap_destroy_page() - Destroy a guest page.
100  * @gmap: the gmap of the guest
101  * @gaddr: the guest address to destroy
102  *
103  * An attempt will be made to destroy the given guest page. If the attempt
104  * fails, an attempt is made to export the page. If both attempts fail, an
105  * appropriate error is returned.
106  *
107  * Context: may sleep.
108  */
gmap_destroy_page(struct gmap * gmap,unsigned long gaddr)109 int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
110 {
111 	struct page *page;
112 	int rc = 0;
113 
114 	mmap_read_lock(gmap->mm);
115 	page = gfn_to_page(gmap->private, gpa_to_gfn(gaddr));
116 	if (page)
117 		rc = __gmap_destroy_page(gmap, page);
118 	kvm_release_page_clean(page);
119 	mmap_read_unlock(gmap->mm);
120 	return rc;
121 }
122