1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ACRN: Memory mapping management 4 * 5 * Copyright (C) 2020 Intel Corporation. All rights reserved. 6 * 7 * Authors: 8 * Fei Li <lei1.li@intel.com> 9 * Shuo Liu <shuo.a.liu@intel.com> 10 */ 11 12 #include <linux/io.h> 13 #include <linux/mm.h> 14 #include <linux/slab.h> 15 #include <linux/vmalloc.h> 16 17 #include "acrn_drv.h" 18 19 static int modify_region(struct acrn_vm *vm, struct vm_memory_region_op *region) 20 { 21 struct vm_memory_region_batch *regions; 22 int ret; 23 24 regions = kzalloc(sizeof(*regions), GFP_KERNEL); 25 if (!regions) 26 return -ENOMEM; 27 28 regions->vmid = vm->vmid; 29 regions->regions_num = 1; 30 regions->regions_gpa = virt_to_phys(region); 31 32 ret = hcall_set_memory_regions(virt_to_phys(regions)); 33 if (ret < 0) 34 dev_dbg(acrn_dev.this_device, 35 "Failed to set memory region for VM[%u]!\n", vm->vmid); 36 37 kfree(regions); 38 return ret; 39 } 40 41 /** 42 * acrn_mm_region_add() - Set up the EPT mapping of a memory region. 43 * @vm: User VM. 44 * @user_gpa: A GPA of User VM. 45 * @service_gpa: A GPA of Service VM. 46 * @size: Size of the region. 47 * @mem_type: Combination of ACRN_MEM_TYPE_*. 48 * @mem_access_right: Combination of ACRN_MEM_ACCESS_*. 49 * 50 * Return: 0 on success, <0 on error. 51 */ 52 int acrn_mm_region_add(struct acrn_vm *vm, u64 user_gpa, u64 service_gpa, 53 u64 size, u32 mem_type, u32 mem_access_right) 54 { 55 struct vm_memory_region_op *region; 56 int ret = 0; 57 58 region = kzalloc(sizeof(*region), GFP_KERNEL); 59 if (!region) 60 return -ENOMEM; 61 62 region->type = ACRN_MEM_REGION_ADD; 63 region->user_vm_pa = user_gpa; 64 region->service_vm_pa = service_gpa; 65 region->size = size; 66 region->attr = ((mem_type & ACRN_MEM_TYPE_MASK) | 67 (mem_access_right & ACRN_MEM_ACCESS_RIGHT_MASK)); 68 ret = modify_region(vm, region); 69 70 dev_dbg(acrn_dev.this_device, 71 "%s: user-GPA[%pK] service-GPA[%pK] size[0x%llx].\n", 72 __func__, (void *)user_gpa, (void *)service_gpa, size); 73 kfree(region); 74 return ret; 75 } 76 77 /** 78 * acrn_mm_region_del() - Del the EPT mapping of a memory region. 79 * @vm: User VM. 80 * @user_gpa: A GPA of the User VM. 81 * @size: Size of the region. 82 * 83 * Return: 0 on success, <0 for error. 84 */ 85 int acrn_mm_region_del(struct acrn_vm *vm, u64 user_gpa, u64 size) 86 { 87 struct vm_memory_region_op *region; 88 int ret = 0; 89 90 region = kzalloc(sizeof(*region), GFP_KERNEL); 91 if (!region) 92 return -ENOMEM; 93 94 region->type = ACRN_MEM_REGION_DEL; 95 region->user_vm_pa = user_gpa; 96 region->service_vm_pa = 0UL; 97 region->size = size; 98 region->attr = 0U; 99 100 ret = modify_region(vm, region); 101 102 dev_dbg(acrn_dev.this_device, "%s: user-GPA[%pK] size[0x%llx].\n", 103 __func__, (void *)user_gpa, size); 104 kfree(region); 105 return ret; 106 } 107 108 int acrn_vm_memseg_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap) 109 { 110 int ret; 111 112 if (memmap->type == ACRN_MEMMAP_RAM) 113 return acrn_vm_ram_map(vm, memmap); 114 115 if (memmap->type != ACRN_MEMMAP_MMIO) { 116 dev_dbg(acrn_dev.this_device, 117 "Invalid memmap type: %u\n", memmap->type); 118 return -EINVAL; 119 } 120 121 ret = acrn_mm_region_add(vm, memmap->user_vm_pa, 122 memmap->service_vm_pa, memmap->len, 123 ACRN_MEM_TYPE_UC, memmap->attr); 124 if (ret < 0) 125 dev_dbg(acrn_dev.this_device, 126 "Add memory region failed, VM[%u]!\n", vm->vmid); 127 128 return ret; 129 } 130 131 int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap) 132 { 133 int ret; 134 135 if (memmap->type != ACRN_MEMMAP_MMIO) { 136 dev_dbg(acrn_dev.this_device, 137 "Invalid memmap type: %u\n", memmap->type); 138 return -EINVAL; 139 } 140 141 ret = acrn_mm_region_del(vm, memmap->user_vm_pa, memmap->len); 142 if (ret < 0) 143 dev_dbg(acrn_dev.this_device, 144 "Del memory region failed, VM[%u]!\n", vm->vmid); 145 146 return ret; 147 } 148 149 /** 150 * acrn_vm_ram_map() - Create a RAM EPT mapping of User VM. 151 * @vm: The User VM pointer 152 * @memmap: Info of the EPT mapping 153 * 154 * Return: 0 on success, <0 for error. 155 */ 156 int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap) 157 { 158 struct vm_memory_region_batch *regions_info; 159 int nr_pages, i, order, nr_regions = 0; 160 struct vm_memory_mapping *region_mapping; 161 struct vm_memory_region_op *vm_region; 162 struct page **pages = NULL, *page; 163 void *remap_vaddr; 164 int ret, pinned; 165 u64 user_vm_pa; 166 struct vm_area_struct *vma; 167 168 if (!vm || !memmap) 169 return -EINVAL; 170 171 /* Get the page number of the map region */ 172 nr_pages = memmap->len >> PAGE_SHIFT; 173 if (!nr_pages) 174 return -EINVAL; 175 176 mmap_read_lock(current->mm); 177 vma = vma_lookup(current->mm, memmap->vma_base); 178 if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) { 179 unsigned long start_pfn, cur_pfn; 180 spinlock_t *ptl; 181 bool writable; 182 pte_t *ptep; 183 184 if ((memmap->vma_base + memmap->len) > vma->vm_end) { 185 mmap_read_unlock(current->mm); 186 return -EINVAL; 187 } 188 189 for (i = 0; i < nr_pages; i++) { 190 ret = follow_pte(vma, memmap->vma_base + i * PAGE_SIZE, 191 &ptep, &ptl); 192 if (ret) 193 break; 194 195 cur_pfn = pte_pfn(ptep_get(ptep)); 196 if (i == 0) 197 start_pfn = cur_pfn; 198 writable = !!pte_write(ptep_get(ptep)); 199 pte_unmap_unlock(ptep, ptl); 200 201 /* Disallow write access if the PTE is not writable. */ 202 if (!writable && 203 (memmap->attr & ACRN_MEM_ACCESS_WRITE)) { 204 ret = -EFAULT; 205 break; 206 } 207 208 /* Disallow refcounted pages. */ 209 if (pfn_valid(cur_pfn) && 210 !PageReserved(pfn_to_page(cur_pfn))) { 211 ret = -EFAULT; 212 break; 213 } 214 215 /* Disallow non-contiguous ranges. */ 216 if (cur_pfn != start_pfn + i) { 217 ret = -EINVAL; 218 break; 219 } 220 } 221 mmap_read_unlock(current->mm); 222 223 if (ret) { 224 dev_dbg(acrn_dev.this_device, 225 "Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base); 226 return ret; 227 } 228 229 return acrn_mm_region_add(vm, memmap->user_vm_pa, 230 PFN_PHYS(start_pfn), memmap->len, 231 ACRN_MEM_TYPE_WB, memmap->attr); 232 } 233 mmap_read_unlock(current->mm); 234 235 pages = vzalloc(array_size(nr_pages, sizeof(*pages))); 236 if (!pages) 237 return -ENOMEM; 238 239 /* Lock the pages of user memory map region */ 240 pinned = pin_user_pages_fast(memmap->vma_base, 241 nr_pages, FOLL_WRITE | FOLL_LONGTERM, 242 pages); 243 if (pinned < 0) { 244 ret = pinned; 245 goto free_pages; 246 } else if (pinned != nr_pages) { 247 ret = -EFAULT; 248 goto put_pages; 249 } 250 251 /* Create a kernel map for the map region */ 252 remap_vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); 253 if (!remap_vaddr) { 254 ret = -ENOMEM; 255 goto put_pages; 256 } 257 258 /* Record Service VM va <-> User VM pa mapping */ 259 mutex_lock(&vm->regions_mapping_lock); 260 region_mapping = &vm->regions_mapping[vm->regions_mapping_count]; 261 if (vm->regions_mapping_count < ACRN_MEM_MAPPING_MAX) { 262 region_mapping->pages = pages; 263 region_mapping->npages = nr_pages; 264 region_mapping->size = memmap->len; 265 region_mapping->service_vm_va = remap_vaddr; 266 region_mapping->user_vm_pa = memmap->user_vm_pa; 267 vm->regions_mapping_count++; 268 } else { 269 dev_warn(acrn_dev.this_device, 270 "Run out of memory mapping slots!\n"); 271 ret = -ENOMEM; 272 mutex_unlock(&vm->regions_mapping_lock); 273 goto unmap_no_count; 274 } 275 mutex_unlock(&vm->regions_mapping_lock); 276 277 /* Calculate count of vm_memory_region_op */ 278 for (i = 0; i < nr_pages; i += 1 << order) { 279 page = pages[i]; 280 VM_BUG_ON_PAGE(PageTail(page), page); 281 order = compound_order(page); 282 nr_regions++; 283 } 284 285 /* Prepare the vm_memory_region_batch */ 286 regions_info = kzalloc(struct_size(regions_info, regions_op, 287 nr_regions), GFP_KERNEL); 288 if (!regions_info) { 289 ret = -ENOMEM; 290 goto unmap_kernel_map; 291 } 292 regions_info->regions_num = nr_regions; 293 294 /* Fill each vm_memory_region_op */ 295 vm_region = regions_info->regions_op; 296 regions_info->vmid = vm->vmid; 297 regions_info->regions_gpa = virt_to_phys(vm_region); 298 user_vm_pa = memmap->user_vm_pa; 299 for (i = 0; i < nr_pages; i += 1 << order) { 300 u32 region_size; 301 302 page = pages[i]; 303 VM_BUG_ON_PAGE(PageTail(page), page); 304 order = compound_order(page); 305 region_size = PAGE_SIZE << order; 306 vm_region->type = ACRN_MEM_REGION_ADD; 307 vm_region->user_vm_pa = user_vm_pa; 308 vm_region->service_vm_pa = page_to_phys(page); 309 vm_region->size = region_size; 310 vm_region->attr = (ACRN_MEM_TYPE_WB & ACRN_MEM_TYPE_MASK) | 311 (memmap->attr & ACRN_MEM_ACCESS_RIGHT_MASK); 312 313 vm_region++; 314 user_vm_pa += region_size; 315 } 316 317 /* Inform the ACRN Hypervisor to set up EPT mappings */ 318 ret = hcall_set_memory_regions(virt_to_phys(regions_info)); 319 if (ret < 0) { 320 dev_dbg(acrn_dev.this_device, 321 "Failed to set regions, VM[%u]!\n", vm->vmid); 322 goto unset_region; 323 } 324 kfree(regions_info); 325 326 dev_dbg(acrn_dev.this_device, 327 "%s: VM[%u] service-GVA[%pK] user-GPA[%pK] size[0x%llx]\n", 328 __func__, vm->vmid, 329 remap_vaddr, (void *)memmap->user_vm_pa, memmap->len); 330 return ret; 331 332 unset_region: 333 kfree(regions_info); 334 unmap_kernel_map: 335 mutex_lock(&vm->regions_mapping_lock); 336 vm->regions_mapping_count--; 337 mutex_unlock(&vm->regions_mapping_lock); 338 unmap_no_count: 339 vunmap(remap_vaddr); 340 put_pages: 341 for (i = 0; i < pinned; i++) 342 unpin_user_page(pages[i]); 343 free_pages: 344 vfree(pages); 345 return ret; 346 } 347 348 /** 349 * acrn_vm_all_ram_unmap() - Destroy a RAM EPT mapping of User VM. 350 * @vm: The User VM 351 */ 352 void acrn_vm_all_ram_unmap(struct acrn_vm *vm) 353 { 354 struct vm_memory_mapping *region_mapping; 355 int i, j; 356 357 mutex_lock(&vm->regions_mapping_lock); 358 for (i = 0; i < vm->regions_mapping_count; i++) { 359 region_mapping = &vm->regions_mapping[i]; 360 vunmap(region_mapping->service_vm_va); 361 for (j = 0; j < region_mapping->npages; j++) 362 unpin_user_page(region_mapping->pages[j]); 363 vfree(region_mapping->pages); 364 } 365 mutex_unlock(&vm->regions_mapping_lock); 366 } 367