1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Guest memory management for KVM/s390
4 *
5 * Copyright IBM Corp. 2008, 2020, 2024
6 *
7 * Author(s): Claudio Imbrenda <imbrenda@linux.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 * David Hildenbrand <david@redhat.com>
10 * Janosch Frank <frankja@linux.vnet.ibm.com>
11 */
12
13 #include <linux/compiler.h>
14 #include <linux/kvm.h>
15 #include <linux/kvm_host.h>
16 #include <linux/pgtable.h>
17 #include <linux/pagemap.h>
18
19 #include <asm/lowcore.h>
20 #include <asm/gmap.h>
21 #include <asm/uv.h>
22
23 #include "gmap.h"
24
25 /**
26 * should_export_before_import - Determine whether an export is needed
27 * before an import-like operation
28 * @uvcb: the Ultravisor control block of the UVC to be performed
29 * @mm: the mm of the process
30 *
31 * Returns whether an export is needed before every import-like operation.
32 * This is needed for shared pages, which don't trigger a secure storage
33 * exception when accessed from a different guest.
34 *
35 * Although considered as one, the Unpin Page UVC is not an actual import,
36 * so it is not affected.
37 *
38 * No export is needed also when there is only one protected VM, because the
39 * page cannot belong to the wrong VM in that case (there is no "other VM"
40 * it can belong to).
41 *
42 * Return: true if an export is needed before every import, otherwise false.
43 */
should_export_before_import(struct uv_cb_header * uvcb,struct mm_struct * mm)44 static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
45 {
46 /*
47 * The misc feature indicates, among other things, that importing a
48 * shared page from a different protected VM will automatically also
49 * transfer its ownership.
50 */
51 if (uv_has_feature(BIT_UV_FEAT_MISC))
52 return false;
53 if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
54 return false;
55 return atomic_read(&mm->context.protected_count) > 1;
56 }
57
__gmap_make_secure(struct gmap * gmap,struct page * page,void * uvcb)58 static int __gmap_make_secure(struct gmap *gmap, struct page *page, void *uvcb)
59 {
60 struct folio *folio = page_folio(page);
61 int rc;
62
63 /*
64 * Secure pages cannot be huge and userspace should not combine both.
65 * In case userspace does it anyway this will result in an -EFAULT for
66 * the unpack. The guest is thus never reaching secure mode.
67 * If userspace plays dirty tricks and decides to map huge pages at a
68 * later point in time, it will receive a segmentation fault or
69 * KVM_RUN will return -EFAULT.
70 */
71 if (folio_test_hugetlb(folio))
72 return -EFAULT;
73 if (folio_test_large(folio)) {
74 mmap_read_unlock(gmap->mm);
75 rc = kvm_s390_wiggle_split_folio(gmap->mm, folio, true);
76 mmap_read_lock(gmap->mm);
77 if (rc)
78 return rc;
79 folio = page_folio(page);
80 }
81
82 if (!folio_trylock(folio))
83 return -EAGAIN;
84 if (should_export_before_import(uvcb, gmap->mm))
85 uv_convert_from_secure(folio_to_phys(folio));
86 rc = make_folio_secure(folio, uvcb);
87 folio_unlock(folio);
88
89 /*
90 * In theory a race is possible and the folio might have become
91 * large again before the folio_trylock() above. In that case, no
92 * action is performed and -EAGAIN is returned; the callers will
93 * have to try again later.
94 * In most cases this implies running the VM again, getting the same
95 * exception again, and make another attempt in this function.
96 * This is expected to happen extremely rarely.
97 */
98 if (rc == -E2BIG)
99 return -EAGAIN;
100 /* The folio has too many references, try to shake some off */
101 if (rc == -EBUSY) {
102 mmap_read_unlock(gmap->mm);
103 kvm_s390_wiggle_split_folio(gmap->mm, folio, false);
104 mmap_read_lock(gmap->mm);
105 return -EAGAIN;
106 }
107
108 return rc;
109 }
110
111 /**
112 * gmap_make_secure() - make one guest page secure
113 * @gmap: the guest gmap
114 * @gaddr: the guest address that needs to be made secure
115 * @uvcb: the UVCB specifying which operation needs to be performed
116 *
117 * Context: needs to be called with kvm->srcu held.
118 * Return: 0 on success, < 0 in case of error (see __gmap_make_secure()).
119 */
gmap_make_secure(struct gmap * gmap,unsigned long gaddr,void * uvcb)120 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
121 {
122 struct kvm *kvm = gmap->private;
123 struct page *page;
124 int rc = 0;
125
126 lockdep_assert_held(&kvm->srcu);
127
128 page = gfn_to_page(kvm, gpa_to_gfn(gaddr));
129 mmap_read_lock(gmap->mm);
130 if (page)
131 rc = __gmap_make_secure(gmap, page, uvcb);
132 kvm_release_page_clean(page);
133 mmap_read_unlock(gmap->mm);
134
135 return rc;
136 }
137
gmap_convert_to_secure(struct gmap * gmap,unsigned long gaddr)138 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
139 {
140 struct uv_cb_cts uvcb = {
141 .header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
142 .header.len = sizeof(uvcb),
143 .guest_handle = gmap->guest_handle,
144 .gaddr = gaddr,
145 };
146
147 return gmap_make_secure(gmap, gaddr, &uvcb);
148 }
149
150 /**
151 * __gmap_destroy_page() - Destroy a guest page.
152 * @gmap: the gmap of the guest
153 * @page: the page to destroy
154 *
155 * An attempt will be made to destroy the given guest page. If the attempt
156 * fails, an attempt is made to export the page. If both attempts fail, an
157 * appropriate error is returned.
158 *
159 * Context: must be called holding the mm lock for gmap->mm
160 */
__gmap_destroy_page(struct gmap * gmap,struct page * page)161 static int __gmap_destroy_page(struct gmap *gmap, struct page *page)
162 {
163 struct folio *folio = page_folio(page);
164 int rc;
165
166 /*
167 * See gmap_make_secure(): large folios cannot be secure. Small
168 * folio implies FW_LEVEL_PTE.
169 */
170 if (folio_test_large(folio))
171 return -EFAULT;
172
173 rc = uv_destroy_folio(folio);
174 /*
175 * Fault handlers can race; it is possible that two CPUs will fault
176 * on the same secure page. One CPU can destroy the page, reboot,
177 * re-enter secure mode and import it, while the second CPU was
178 * stuck at the beginning of the handler. At some point the second
179 * CPU will be able to progress, and it will not be able to destroy
180 * the page. In that case we do not want to terminate the process,
181 * we instead try to export the page.
182 */
183 if (rc)
184 rc = uv_convert_from_secure_folio(folio);
185
186 return rc;
187 }
188
189 /**
190 * gmap_destroy_page() - Destroy a guest page.
191 * @gmap: the gmap of the guest
192 * @gaddr: the guest address to destroy
193 *
194 * An attempt will be made to destroy the given guest page. If the attempt
195 * fails, an attempt is made to export the page. If both attempts fail, an
196 * appropriate error is returned.
197 *
198 * Context: may sleep.
199 */
gmap_destroy_page(struct gmap * gmap,unsigned long gaddr)200 int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
201 {
202 struct page *page;
203 int rc = 0;
204
205 mmap_read_lock(gmap->mm);
206 page = gfn_to_page(gmap->private, gpa_to_gfn(gaddr));
207 if (page)
208 rc = __gmap_destroy_page(gmap, page);
209 kvm_release_page_clean(page);
210 mmap_read_unlock(gmap->mm);
211 return rc;
212 }
213