1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ACRN: Memory mapping management 4 * 5 * Copyright (C) 2020 Intel Corporation. All rights reserved. 6 * 7 * Authors: 8 * Fei Li <lei1.li@intel.com> 9 * Shuo Liu <shuo.a.liu@intel.com> 10 */ 11 12 #include <linux/io.h> 13 #include <linux/mm.h> 14 #include <linux/slab.h> 15 16 #include "acrn_drv.h" 17 18 static int modify_region(struct acrn_vm *vm, struct vm_memory_region_op *region) 19 { 20 struct vm_memory_region_batch *regions; 21 int ret; 22 23 regions = kzalloc(sizeof(*regions), GFP_KERNEL); 24 if (!regions) 25 return -ENOMEM; 26 27 regions->vmid = vm->vmid; 28 regions->regions_num = 1; 29 regions->regions_gpa = virt_to_phys(region); 30 31 ret = hcall_set_memory_regions(virt_to_phys(regions)); 32 if (ret < 0) 33 dev_dbg(acrn_dev.this_device, 34 "Failed to set memory region for VM[%u]!\n", vm->vmid); 35 36 kfree(regions); 37 return ret; 38 } 39 40 /** 41 * acrn_mm_region_add() - Set up the EPT mapping of a memory region. 42 * @vm: User VM. 43 * @user_gpa: A GPA of User VM. 44 * @service_gpa: A GPA of Service VM. 45 * @size: Size of the region. 46 * @mem_type: Combination of ACRN_MEM_TYPE_*. 47 * @mem_access_right: Combination of ACRN_MEM_ACCESS_*. 48 * 49 * Return: 0 on success, <0 on error. 50 */ 51 int acrn_mm_region_add(struct acrn_vm *vm, u64 user_gpa, u64 service_gpa, 52 u64 size, u32 mem_type, u32 mem_access_right) 53 { 54 struct vm_memory_region_op *region; 55 int ret = 0; 56 57 region = kzalloc(sizeof(*region), GFP_KERNEL); 58 if (!region) 59 return -ENOMEM; 60 61 region->type = ACRN_MEM_REGION_ADD; 62 region->user_vm_pa = user_gpa; 63 region->service_vm_pa = service_gpa; 64 region->size = size; 65 region->attr = ((mem_type & ACRN_MEM_TYPE_MASK) | 66 (mem_access_right & ACRN_MEM_ACCESS_RIGHT_MASK)); 67 ret = modify_region(vm, region); 68 69 dev_dbg(acrn_dev.this_device, 70 "%s: user-GPA[%pK] service-GPA[%pK] size[0x%llx].\n", 71 __func__, (void *)user_gpa, (void *)service_gpa, size); 72 kfree(region); 73 return ret; 74 } 75 76 /** 77 * acrn_mm_region_del() - Del the EPT mapping of a memory region. 78 * @vm: User VM. 79 * @user_gpa: A GPA of the User VM. 80 * @size: Size of the region. 81 * 82 * Return: 0 on success, <0 for error. 83 */ 84 int acrn_mm_region_del(struct acrn_vm *vm, u64 user_gpa, u64 size) 85 { 86 struct vm_memory_region_op *region; 87 int ret = 0; 88 89 region = kzalloc(sizeof(*region), GFP_KERNEL); 90 if (!region) 91 return -ENOMEM; 92 93 region->type = ACRN_MEM_REGION_DEL; 94 region->user_vm_pa = user_gpa; 95 region->service_vm_pa = 0UL; 96 region->size = size; 97 region->attr = 0U; 98 99 ret = modify_region(vm, region); 100 101 dev_dbg(acrn_dev.this_device, "%s: user-GPA[%pK] size[0x%llx].\n", 102 __func__, (void *)user_gpa, size); 103 kfree(region); 104 return ret; 105 } 106 107 int acrn_vm_memseg_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap) 108 { 109 int ret; 110 111 if (memmap->type == ACRN_MEMMAP_RAM) 112 return acrn_vm_ram_map(vm, memmap); 113 114 if (memmap->type != ACRN_MEMMAP_MMIO) { 115 dev_dbg(acrn_dev.this_device, 116 "Invalid memmap type: %u\n", memmap->type); 117 return -EINVAL; 118 } 119 120 ret = acrn_mm_region_add(vm, memmap->user_vm_pa, 121 memmap->service_vm_pa, memmap->len, 122 ACRN_MEM_TYPE_UC, memmap->attr); 123 if (ret < 0) 124 dev_dbg(acrn_dev.this_device, 125 "Add memory region failed, VM[%u]!\n", vm->vmid); 126 127 return ret; 128 } 129 130 int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap) 131 { 132 int ret; 133 134 if (memmap->type != ACRN_MEMMAP_MMIO) { 135 dev_dbg(acrn_dev.this_device, 136 "Invalid memmap type: %u\n", memmap->type); 137 return -EINVAL; 138 } 139 140 ret = acrn_mm_region_del(vm, memmap->user_vm_pa, memmap->len); 141 if (ret < 0) 142 dev_dbg(acrn_dev.this_device, 143 "Del memory region failed, VM[%u]!\n", vm->vmid); 144 145 return ret; 146 } 147 148 /** 149 * acrn_vm_ram_map() - Create a RAM EPT mapping of User VM. 150 * @vm: The User VM pointer 151 * @memmap: Info of the EPT mapping 152 * 153 * Return: 0 on success, <0 for error. 154 */ 155 int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap) 156 { 157 struct vm_memory_region_batch *regions_info; 158 int nr_pages, i = 0, order, nr_regions = 0; 159 struct vm_memory_mapping *region_mapping; 160 struct vm_memory_region_op *vm_region; 161 struct page **pages = NULL, *page; 162 void *remap_vaddr; 163 int ret, pinned; 164 u64 user_vm_pa; 165 166 if (!vm || !memmap) 167 return -EINVAL; 168 169 /* Get the page number of the map region */ 170 nr_pages = memmap->len >> PAGE_SHIFT; 171 pages = vzalloc(nr_pages * sizeof(struct page *)); 172 if (!pages) 173 return -ENOMEM; 174 175 /* Lock the pages of user memory map region */ 176 pinned = pin_user_pages_fast(memmap->vma_base, 177 nr_pages, FOLL_WRITE | FOLL_LONGTERM, 178 pages); 179 if (pinned < 0) { 180 ret = pinned; 181 goto free_pages; 182 } else if (pinned != nr_pages) { 183 ret = -EFAULT; 184 goto put_pages; 185 } 186 187 /* Create a kernel map for the map region */ 188 remap_vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); 189 if (!remap_vaddr) { 190 ret = -ENOMEM; 191 goto put_pages; 192 } 193 194 /* Record Service VM va <-> User VM pa mapping */ 195 mutex_lock(&vm->regions_mapping_lock); 196 region_mapping = &vm->regions_mapping[vm->regions_mapping_count]; 197 if (vm->regions_mapping_count < ACRN_MEM_MAPPING_MAX) { 198 region_mapping->pages = pages; 199 region_mapping->npages = nr_pages; 200 region_mapping->size = memmap->len; 201 region_mapping->service_vm_va = remap_vaddr; 202 region_mapping->user_vm_pa = memmap->user_vm_pa; 203 vm->regions_mapping_count++; 204 } else { 205 dev_warn(acrn_dev.this_device, 206 "Run out of memory mapping slots!\n"); 207 ret = -ENOMEM; 208 mutex_unlock(&vm->regions_mapping_lock); 209 goto unmap_no_count; 210 } 211 mutex_unlock(&vm->regions_mapping_lock); 212 213 /* Calculate count of vm_memory_region_op */ 214 while (i < nr_pages) { 215 page = pages[i]; 216 VM_BUG_ON_PAGE(PageTail(page), page); 217 order = compound_order(page); 218 nr_regions++; 219 i += 1 << order; 220 } 221 222 /* Prepare the vm_memory_region_batch */ 223 regions_info = kzalloc(sizeof(*regions_info) + 224 sizeof(*vm_region) * nr_regions, 225 GFP_KERNEL); 226 if (!regions_info) { 227 ret = -ENOMEM; 228 goto unmap_kernel_map; 229 } 230 231 /* Fill each vm_memory_region_op */ 232 vm_region = (struct vm_memory_region_op *)(regions_info + 1); 233 regions_info->vmid = vm->vmid; 234 regions_info->regions_num = nr_regions; 235 regions_info->regions_gpa = virt_to_phys(vm_region); 236 user_vm_pa = memmap->user_vm_pa; 237 i = 0; 238 while (i < nr_pages) { 239 u32 region_size; 240 241 page = pages[i]; 242 VM_BUG_ON_PAGE(PageTail(page), page); 243 order = compound_order(page); 244 region_size = PAGE_SIZE << order; 245 vm_region->type = ACRN_MEM_REGION_ADD; 246 vm_region->user_vm_pa = user_vm_pa; 247 vm_region->service_vm_pa = page_to_phys(page); 248 vm_region->size = region_size; 249 vm_region->attr = (ACRN_MEM_TYPE_WB & ACRN_MEM_TYPE_MASK) | 250 (memmap->attr & ACRN_MEM_ACCESS_RIGHT_MASK); 251 252 vm_region++; 253 user_vm_pa += region_size; 254 i += 1 << order; 255 } 256 257 /* Inform the ACRN Hypervisor to set up EPT mappings */ 258 ret = hcall_set_memory_regions(virt_to_phys(regions_info)); 259 if (ret < 0) { 260 dev_dbg(acrn_dev.this_device, 261 "Failed to set regions, VM[%u]!\n", vm->vmid); 262 goto unset_region; 263 } 264 kfree(regions_info); 265 266 dev_dbg(acrn_dev.this_device, 267 "%s: VM[%u] service-GVA[%pK] user-GPA[%pK] size[0x%llx]\n", 268 __func__, vm->vmid, 269 remap_vaddr, (void *)memmap->user_vm_pa, memmap->len); 270 return ret; 271 272 unset_region: 273 kfree(regions_info); 274 unmap_kernel_map: 275 mutex_lock(&vm->regions_mapping_lock); 276 vm->regions_mapping_count--; 277 mutex_unlock(&vm->regions_mapping_lock); 278 unmap_no_count: 279 vunmap(remap_vaddr); 280 put_pages: 281 for (i = 0; i < pinned; i++) 282 unpin_user_page(pages[i]); 283 free_pages: 284 vfree(pages); 285 return ret; 286 } 287 288 /** 289 * acrn_vm_all_ram_unmap() - Destroy a RAM EPT mapping of User VM. 290 * @vm: The User VM 291 */ 292 void acrn_vm_all_ram_unmap(struct acrn_vm *vm) 293 { 294 struct vm_memory_mapping *region_mapping; 295 int i, j; 296 297 mutex_lock(&vm->regions_mapping_lock); 298 for (i = 0; i < vm->regions_mapping_count; i++) { 299 region_mapping = &vm->regions_mapping[i]; 300 vunmap(region_mapping->service_vm_va); 301 for (j = 0; j < region_mapping->npages; j++) 302 unpin_user_page(region_mapping->pages[j]); 303 vfree(region_mapping->pages); 304 } 305 mutex_unlock(&vm->regions_mapping_lock); 306 } 307