1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ACRN: Memory mapping management
4 *
5 * Copyright (C) 2020 Intel Corporation. All rights reserved.
6 *
7 * Authors:
8 * Fei Li <lei1.li@intel.com>
9 * Shuo Liu <shuo.a.liu@intel.com>
10 */
11
12 #include <linux/io.h>
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16
17 #include "acrn_drv.h"
18
modify_region(struct acrn_vm * vm,struct vm_memory_region_op * region)19 static int modify_region(struct acrn_vm *vm, struct vm_memory_region_op *region)
20 {
21 struct vm_memory_region_batch *regions;
22 int ret;
23
24 regions = kzalloc(sizeof(*regions), GFP_KERNEL);
25 if (!regions)
26 return -ENOMEM;
27
28 regions->vmid = vm->vmid;
29 regions->regions_num = 1;
30 regions->regions_gpa = virt_to_phys(region);
31
32 ret = hcall_set_memory_regions(virt_to_phys(regions));
33 if (ret < 0)
34 dev_dbg(acrn_dev.this_device,
35 "Failed to set memory region for VM[%u]!\n", vm->vmid);
36
37 kfree(regions);
38 return ret;
39 }
40
41 /**
42 * acrn_mm_region_add() - Set up the EPT mapping of a memory region.
43 * @vm: User VM.
44 * @user_gpa: A GPA of User VM.
45 * @service_gpa: A GPA of Service VM.
46 * @size: Size of the region.
47 * @mem_type: Combination of ACRN_MEM_TYPE_*.
48 * @mem_access_right: Combination of ACRN_MEM_ACCESS_*.
49 *
50 * Return: 0 on success, <0 on error.
51 */
acrn_mm_region_add(struct acrn_vm * vm,u64 user_gpa,u64 service_gpa,u64 size,u32 mem_type,u32 mem_access_right)52 int acrn_mm_region_add(struct acrn_vm *vm, u64 user_gpa, u64 service_gpa,
53 u64 size, u32 mem_type, u32 mem_access_right)
54 {
55 struct vm_memory_region_op *region;
56 int ret = 0;
57
58 region = kzalloc(sizeof(*region), GFP_KERNEL);
59 if (!region)
60 return -ENOMEM;
61
62 region->type = ACRN_MEM_REGION_ADD;
63 region->user_vm_pa = user_gpa;
64 region->service_vm_pa = service_gpa;
65 region->size = size;
66 region->attr = ((mem_type & ACRN_MEM_TYPE_MASK) |
67 (mem_access_right & ACRN_MEM_ACCESS_RIGHT_MASK));
68 ret = modify_region(vm, region);
69
70 dev_dbg(acrn_dev.this_device,
71 "%s: user-GPA[%pK] service-GPA[%pK] size[0x%llx].\n",
72 __func__, (void *)user_gpa, (void *)service_gpa, size);
73 kfree(region);
74 return ret;
75 }
76
77 /**
78 * acrn_mm_region_del() - Del the EPT mapping of a memory region.
79 * @vm: User VM.
80 * @user_gpa: A GPA of the User VM.
81 * @size: Size of the region.
82 *
83 * Return: 0 on success, <0 for error.
84 */
acrn_mm_region_del(struct acrn_vm * vm,u64 user_gpa,u64 size)85 int acrn_mm_region_del(struct acrn_vm *vm, u64 user_gpa, u64 size)
86 {
87 struct vm_memory_region_op *region;
88 int ret = 0;
89
90 region = kzalloc(sizeof(*region), GFP_KERNEL);
91 if (!region)
92 return -ENOMEM;
93
94 region->type = ACRN_MEM_REGION_DEL;
95 region->user_vm_pa = user_gpa;
96 region->service_vm_pa = 0UL;
97 region->size = size;
98 region->attr = 0U;
99
100 ret = modify_region(vm, region);
101
102 dev_dbg(acrn_dev.this_device, "%s: user-GPA[%pK] size[0x%llx].\n",
103 __func__, (void *)user_gpa, size);
104 kfree(region);
105 return ret;
106 }
107
acrn_vm_memseg_map(struct acrn_vm * vm,struct acrn_vm_memmap * memmap)108 int acrn_vm_memseg_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
109 {
110 int ret;
111
112 if (memmap->type == ACRN_MEMMAP_RAM)
113 return acrn_vm_ram_map(vm, memmap);
114
115 if (memmap->type != ACRN_MEMMAP_MMIO) {
116 dev_dbg(acrn_dev.this_device,
117 "Invalid memmap type: %u\n", memmap->type);
118 return -EINVAL;
119 }
120
121 ret = acrn_mm_region_add(vm, memmap->user_vm_pa,
122 memmap->service_vm_pa, memmap->len,
123 ACRN_MEM_TYPE_UC, memmap->attr);
124 if (ret < 0)
125 dev_dbg(acrn_dev.this_device,
126 "Add memory region failed, VM[%u]!\n", vm->vmid);
127
128 return ret;
129 }
130
acrn_vm_memseg_unmap(struct acrn_vm * vm,struct acrn_vm_memmap * memmap)131 int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
132 {
133 int ret;
134
135 if (memmap->type != ACRN_MEMMAP_MMIO) {
136 dev_dbg(acrn_dev.this_device,
137 "Invalid memmap type: %u\n", memmap->type);
138 return -EINVAL;
139 }
140
141 ret = acrn_mm_region_del(vm, memmap->user_vm_pa, memmap->len);
142 if (ret < 0)
143 dev_dbg(acrn_dev.this_device,
144 "Del memory region failed, VM[%u]!\n", vm->vmid);
145
146 return ret;
147 }
148
149 /**
150 * acrn_vm_ram_map() - Create a RAM EPT mapping of User VM.
151 * @vm: The User VM pointer
152 * @memmap: Info of the EPT mapping
153 *
154 * Return: 0 on success, <0 for error.
155 */
acrn_vm_ram_map(struct acrn_vm * vm,struct acrn_vm_memmap * memmap)156 int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
157 {
158 struct vm_memory_region_batch *regions_info;
159 int nr_pages, i, order, nr_regions = 0;
160 struct vm_memory_mapping *region_mapping;
161 struct vm_memory_region_op *vm_region;
162 struct page **pages = NULL, *page;
163 void *remap_vaddr;
164 int ret, pinned;
165 u64 user_vm_pa;
166 struct vm_area_struct *vma;
167
168 if (!vm || !memmap)
169 return -EINVAL;
170
171 /* Get the page number of the map region */
172 nr_pages = memmap->len >> PAGE_SHIFT;
173 if (!nr_pages)
174 return -EINVAL;
175
176 mmap_read_lock(current->mm);
177 vma = vma_lookup(current->mm, memmap->vma_base);
178 if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
179 unsigned long start_pfn, cur_pfn;
180 bool writable;
181
182 if ((memmap->vma_base + memmap->len) > vma->vm_end) {
183 mmap_read_unlock(current->mm);
184 return -EINVAL;
185 }
186
187 for (i = 0; i < nr_pages; i++) {
188 struct follow_pfnmap_args args = {
189 .vma = vma,
190 .address = memmap->vma_base + i * PAGE_SIZE,
191 };
192
193 ret = follow_pfnmap_start(&args);
194 if (ret)
195 break;
196
197 cur_pfn = args.pfn;
198 if (i == 0)
199 start_pfn = cur_pfn;
200 writable = args.writable;
201 follow_pfnmap_end(&args);
202
203 /* Disallow write access if the PTE is not writable. */
204 if (!writable &&
205 (memmap->attr & ACRN_MEM_ACCESS_WRITE)) {
206 ret = -EFAULT;
207 break;
208 }
209
210 /* Disallow refcounted pages. */
211 if (pfn_valid(cur_pfn) &&
212 !PageReserved(pfn_to_page(cur_pfn))) {
213 ret = -EFAULT;
214 break;
215 }
216
217 /* Disallow non-contiguous ranges. */
218 if (cur_pfn != start_pfn + i) {
219 ret = -EINVAL;
220 break;
221 }
222 }
223 mmap_read_unlock(current->mm);
224
225 if (ret) {
226 dev_dbg(acrn_dev.this_device,
227 "Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base);
228 return ret;
229 }
230
231 return acrn_mm_region_add(vm, memmap->user_vm_pa,
232 PFN_PHYS(start_pfn), memmap->len,
233 ACRN_MEM_TYPE_WB, memmap->attr);
234 }
235 mmap_read_unlock(current->mm);
236
237 pages = vzalloc(array_size(nr_pages, sizeof(*pages)));
238 if (!pages)
239 return -ENOMEM;
240
241 /* Lock the pages of user memory map region */
242 pinned = pin_user_pages_fast(memmap->vma_base,
243 nr_pages, FOLL_WRITE | FOLL_LONGTERM,
244 pages);
245 if (pinned < 0) {
246 ret = pinned;
247 goto free_pages;
248 } else if (pinned != nr_pages) {
249 ret = -EFAULT;
250 goto put_pages;
251 }
252
253 /* Create a kernel map for the map region */
254 remap_vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
255 if (!remap_vaddr) {
256 ret = -ENOMEM;
257 goto put_pages;
258 }
259
260 /* Record Service VM va <-> User VM pa mapping */
261 mutex_lock(&vm->regions_mapping_lock);
262 region_mapping = &vm->regions_mapping[vm->regions_mapping_count];
263 if (vm->regions_mapping_count < ACRN_MEM_MAPPING_MAX) {
264 region_mapping->pages = pages;
265 region_mapping->npages = nr_pages;
266 region_mapping->size = memmap->len;
267 region_mapping->service_vm_va = remap_vaddr;
268 region_mapping->user_vm_pa = memmap->user_vm_pa;
269 vm->regions_mapping_count++;
270 } else {
271 dev_warn(acrn_dev.this_device,
272 "Run out of memory mapping slots!\n");
273 ret = -ENOMEM;
274 mutex_unlock(&vm->regions_mapping_lock);
275 goto unmap_no_count;
276 }
277 mutex_unlock(&vm->regions_mapping_lock);
278
279 /* Calculate count of vm_memory_region_op */
280 for (i = 0; i < nr_pages; i += 1 << order) {
281 page = pages[i];
282 VM_BUG_ON_PAGE(PageTail(page), page);
283 order = compound_order(page);
284 nr_regions++;
285 }
286
287 /* Prepare the vm_memory_region_batch */
288 regions_info = kzalloc(struct_size(regions_info, regions_op,
289 nr_regions), GFP_KERNEL);
290 if (!regions_info) {
291 ret = -ENOMEM;
292 goto unmap_kernel_map;
293 }
294 regions_info->regions_num = nr_regions;
295
296 /* Fill each vm_memory_region_op */
297 vm_region = regions_info->regions_op;
298 regions_info->vmid = vm->vmid;
299 regions_info->regions_gpa = virt_to_phys(vm_region);
300 user_vm_pa = memmap->user_vm_pa;
301 for (i = 0; i < nr_pages; i += 1 << order) {
302 u32 region_size;
303
304 page = pages[i];
305 VM_BUG_ON_PAGE(PageTail(page), page);
306 order = compound_order(page);
307 region_size = PAGE_SIZE << order;
308 vm_region->type = ACRN_MEM_REGION_ADD;
309 vm_region->user_vm_pa = user_vm_pa;
310 vm_region->service_vm_pa = page_to_phys(page);
311 vm_region->size = region_size;
312 vm_region->attr = (ACRN_MEM_TYPE_WB & ACRN_MEM_TYPE_MASK) |
313 (memmap->attr & ACRN_MEM_ACCESS_RIGHT_MASK);
314
315 vm_region++;
316 user_vm_pa += region_size;
317 }
318
319 /* Inform the ACRN Hypervisor to set up EPT mappings */
320 ret = hcall_set_memory_regions(virt_to_phys(regions_info));
321 if (ret < 0) {
322 dev_dbg(acrn_dev.this_device,
323 "Failed to set regions, VM[%u]!\n", vm->vmid);
324 goto unset_region;
325 }
326 kfree(regions_info);
327
328 dev_dbg(acrn_dev.this_device,
329 "%s: VM[%u] service-GVA[%pK] user-GPA[%pK] size[0x%llx]\n",
330 __func__, vm->vmid,
331 remap_vaddr, (void *)memmap->user_vm_pa, memmap->len);
332 return ret;
333
334 unset_region:
335 kfree(regions_info);
336 unmap_kernel_map:
337 mutex_lock(&vm->regions_mapping_lock);
338 vm->regions_mapping_count--;
339 mutex_unlock(&vm->regions_mapping_lock);
340 unmap_no_count:
341 vunmap(remap_vaddr);
342 put_pages:
343 for (i = 0; i < pinned; i++)
344 unpin_user_page(pages[i]);
345 free_pages:
346 vfree(pages);
347 return ret;
348 }
349
350 /**
351 * acrn_vm_all_ram_unmap() - Destroy a RAM EPT mapping of User VM.
352 * @vm: The User VM
353 */
acrn_vm_all_ram_unmap(struct acrn_vm * vm)354 void acrn_vm_all_ram_unmap(struct acrn_vm *vm)
355 {
356 struct vm_memory_mapping *region_mapping;
357 int i, j;
358
359 mutex_lock(&vm->regions_mapping_lock);
360 for (i = 0; i < vm->regions_mapping_count; i++) {
361 region_mapping = &vm->regions_mapping[i];
362 vunmap(region_mapping->service_vm_va);
363 for (j = 0; j < region_mapping->npages; j++)
364 unpin_user_page(region_mapping->pages[j]);
365 vfree(region_mapping->pages);
366 }
367 mutex_unlock(&vm->regions_mapping_lock);
368 }
369