xref: /linux/drivers/virt/acrn/mm.c (revision 36ec807b627b4c0a0a382f0ae48eac7187d14b2b)
188f537d5SShuo Liu // SPDX-License-Identifier: GPL-2.0
288f537d5SShuo Liu /*
388f537d5SShuo Liu  * ACRN: Memory mapping management
488f537d5SShuo Liu  *
588f537d5SShuo Liu  * Copyright (C) 2020 Intel Corporation. All rights reserved.
688f537d5SShuo Liu  *
788f537d5SShuo Liu  * Authors:
888f537d5SShuo Liu  *	Fei Li <lei1.li@intel.com>
988f537d5SShuo Liu  *	Shuo Liu <shuo.a.liu@intel.com>
1088f537d5SShuo Liu  */
1188f537d5SShuo Liu 
1288f537d5SShuo Liu #include <linux/io.h>
1388f537d5SShuo Liu #include <linux/mm.h>
1488f537d5SShuo Liu #include <linux/slab.h>
150069455bSKent Overstreet #include <linux/vmalloc.h>
1688f537d5SShuo Liu 
1788f537d5SShuo Liu #include "acrn_drv.h"
1888f537d5SShuo Liu 
1988f537d5SShuo Liu static int modify_region(struct acrn_vm *vm, struct vm_memory_region_op *region)
2088f537d5SShuo Liu {
2188f537d5SShuo Liu 	struct vm_memory_region_batch *regions;
2288f537d5SShuo Liu 	int ret;
2388f537d5SShuo Liu 
2488f537d5SShuo Liu 	regions = kzalloc(sizeof(*regions), GFP_KERNEL);
2588f537d5SShuo Liu 	if (!regions)
2688f537d5SShuo Liu 		return -ENOMEM;
2788f537d5SShuo Liu 
2888f537d5SShuo Liu 	regions->vmid = vm->vmid;
2988f537d5SShuo Liu 	regions->regions_num = 1;
3088f537d5SShuo Liu 	regions->regions_gpa = virt_to_phys(region);
3188f537d5SShuo Liu 
3288f537d5SShuo Liu 	ret = hcall_set_memory_regions(virt_to_phys(regions));
3388f537d5SShuo Liu 	if (ret < 0)
3488f537d5SShuo Liu 		dev_dbg(acrn_dev.this_device,
3588f537d5SShuo Liu 			"Failed to set memory region for VM[%u]!\n", vm->vmid);
3688f537d5SShuo Liu 
3788f537d5SShuo Liu 	kfree(regions);
3888f537d5SShuo Liu 	return ret;
3988f537d5SShuo Liu }
4088f537d5SShuo Liu 
4188f537d5SShuo Liu /**
4288f537d5SShuo Liu  * acrn_mm_region_add() - Set up the EPT mapping of a memory region.
4388f537d5SShuo Liu  * @vm:			User VM.
4488f537d5SShuo Liu  * @user_gpa:		A GPA of User VM.
4588f537d5SShuo Liu  * @service_gpa:	A GPA of Service VM.
4688f537d5SShuo Liu  * @size:		Size of the region.
4788f537d5SShuo Liu  * @mem_type:		Combination of ACRN_MEM_TYPE_*.
4888f537d5SShuo Liu  * @mem_access_right:	Combination of ACRN_MEM_ACCESS_*.
4988f537d5SShuo Liu  *
5088f537d5SShuo Liu  * Return: 0 on success, <0 on error.
5188f537d5SShuo Liu  */
5288f537d5SShuo Liu int acrn_mm_region_add(struct acrn_vm *vm, u64 user_gpa, u64 service_gpa,
5388f537d5SShuo Liu 		       u64 size, u32 mem_type, u32 mem_access_right)
5488f537d5SShuo Liu {
5588f537d5SShuo Liu 	struct vm_memory_region_op *region;
5688f537d5SShuo Liu 	int ret = 0;
5788f537d5SShuo Liu 
5888f537d5SShuo Liu 	region = kzalloc(sizeof(*region), GFP_KERNEL);
5988f537d5SShuo Liu 	if (!region)
6088f537d5SShuo Liu 		return -ENOMEM;
6188f537d5SShuo Liu 
6288f537d5SShuo Liu 	region->type = ACRN_MEM_REGION_ADD;
6388f537d5SShuo Liu 	region->user_vm_pa = user_gpa;
6488f537d5SShuo Liu 	region->service_vm_pa = service_gpa;
6588f537d5SShuo Liu 	region->size = size;
6688f537d5SShuo Liu 	region->attr = ((mem_type & ACRN_MEM_TYPE_MASK) |
6788f537d5SShuo Liu 			(mem_access_right & ACRN_MEM_ACCESS_RIGHT_MASK));
6888f537d5SShuo Liu 	ret = modify_region(vm, region);
6988f537d5SShuo Liu 
7088f537d5SShuo Liu 	dev_dbg(acrn_dev.this_device,
7188f537d5SShuo Liu 		"%s: user-GPA[%pK] service-GPA[%pK] size[0x%llx].\n",
7288f537d5SShuo Liu 		__func__, (void *)user_gpa, (void *)service_gpa, size);
7388f537d5SShuo Liu 	kfree(region);
7488f537d5SShuo Liu 	return ret;
7588f537d5SShuo Liu }
7688f537d5SShuo Liu 
7788f537d5SShuo Liu /**
7888f537d5SShuo Liu  * acrn_mm_region_del() - Del the EPT mapping of a memory region.
7988f537d5SShuo Liu  * @vm:		User VM.
8088f537d5SShuo Liu  * @user_gpa:	A GPA of the User VM.
8188f537d5SShuo Liu  * @size:	Size of the region.
8288f537d5SShuo Liu  *
8388f537d5SShuo Liu  * Return: 0 on success, <0 for error.
8488f537d5SShuo Liu  */
8588f537d5SShuo Liu int acrn_mm_region_del(struct acrn_vm *vm, u64 user_gpa, u64 size)
8688f537d5SShuo Liu {
8788f537d5SShuo Liu 	struct vm_memory_region_op *region;
8888f537d5SShuo Liu 	int ret = 0;
8988f537d5SShuo Liu 
9088f537d5SShuo Liu 	region = kzalloc(sizeof(*region), GFP_KERNEL);
9188f537d5SShuo Liu 	if (!region)
9288f537d5SShuo Liu 		return -ENOMEM;
9388f537d5SShuo Liu 
9488f537d5SShuo Liu 	region->type = ACRN_MEM_REGION_DEL;
9588f537d5SShuo Liu 	region->user_vm_pa = user_gpa;
9688f537d5SShuo Liu 	region->service_vm_pa = 0UL;
9788f537d5SShuo Liu 	region->size = size;
9888f537d5SShuo Liu 	region->attr = 0U;
9988f537d5SShuo Liu 
10088f537d5SShuo Liu 	ret = modify_region(vm, region);
10188f537d5SShuo Liu 
10288f537d5SShuo Liu 	dev_dbg(acrn_dev.this_device, "%s: user-GPA[%pK] size[0x%llx].\n",
10388f537d5SShuo Liu 		__func__, (void *)user_gpa, size);
10488f537d5SShuo Liu 	kfree(region);
10588f537d5SShuo Liu 	return ret;
10688f537d5SShuo Liu }
10788f537d5SShuo Liu 
10888f537d5SShuo Liu int acrn_vm_memseg_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
10988f537d5SShuo Liu {
11088f537d5SShuo Liu 	int ret;
11188f537d5SShuo Liu 
11288f537d5SShuo Liu 	if (memmap->type == ACRN_MEMMAP_RAM)
11388f537d5SShuo Liu 		return acrn_vm_ram_map(vm, memmap);
11488f537d5SShuo Liu 
11588f537d5SShuo Liu 	if (memmap->type != ACRN_MEMMAP_MMIO) {
11688f537d5SShuo Liu 		dev_dbg(acrn_dev.this_device,
11788f537d5SShuo Liu 			"Invalid memmap type: %u\n", memmap->type);
11888f537d5SShuo Liu 		return -EINVAL;
11988f537d5SShuo Liu 	}
12088f537d5SShuo Liu 
12188f537d5SShuo Liu 	ret = acrn_mm_region_add(vm, memmap->user_vm_pa,
12288f537d5SShuo Liu 				 memmap->service_vm_pa, memmap->len,
12388f537d5SShuo Liu 				 ACRN_MEM_TYPE_UC, memmap->attr);
12488f537d5SShuo Liu 	if (ret < 0)
12588f537d5SShuo Liu 		dev_dbg(acrn_dev.this_device,
12688f537d5SShuo Liu 			"Add memory region failed, VM[%u]!\n", vm->vmid);
12788f537d5SShuo Liu 
12888f537d5SShuo Liu 	return ret;
12988f537d5SShuo Liu }
13088f537d5SShuo Liu 
13188f537d5SShuo Liu int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
13288f537d5SShuo Liu {
13388f537d5SShuo Liu 	int ret;
13488f537d5SShuo Liu 
13588f537d5SShuo Liu 	if (memmap->type != ACRN_MEMMAP_MMIO) {
13688f537d5SShuo Liu 		dev_dbg(acrn_dev.this_device,
13788f537d5SShuo Liu 			"Invalid memmap type: %u\n", memmap->type);
13888f537d5SShuo Liu 		return -EINVAL;
13988f537d5SShuo Liu 	}
14088f537d5SShuo Liu 
14188f537d5SShuo Liu 	ret = acrn_mm_region_del(vm, memmap->user_vm_pa, memmap->len);
14288f537d5SShuo Liu 	if (ret < 0)
14388f537d5SShuo Liu 		dev_dbg(acrn_dev.this_device,
14488f537d5SShuo Liu 			"Del memory region failed, VM[%u]!\n", vm->vmid);
14588f537d5SShuo Liu 
14688f537d5SShuo Liu 	return ret;
14788f537d5SShuo Liu }
14888f537d5SShuo Liu 
14988f537d5SShuo Liu /**
15088f537d5SShuo Liu  * acrn_vm_ram_map() - Create a RAM EPT mapping of User VM.
15188f537d5SShuo Liu  * @vm:		The User VM pointer
15288f537d5SShuo Liu  * @memmap:	Info of the EPT mapping
15388f537d5SShuo Liu  *
15488f537d5SShuo Liu  * Return: 0 on success, <0 for error.
15588f537d5SShuo Liu  */
15688f537d5SShuo Liu int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
15788f537d5SShuo Liu {
15888f537d5SShuo Liu 	struct vm_memory_region_batch *regions_info;
1593d658600SDavid Hildenbrand 	int nr_pages, i, order, nr_regions = 0;
16088f537d5SShuo Liu 	struct vm_memory_mapping *region_mapping;
16188f537d5SShuo Liu 	struct vm_memory_region_op *vm_region;
16288f537d5SShuo Liu 	struct page **pages = NULL, *page;
16388f537d5SShuo Liu 	void *remap_vaddr;
16488f537d5SShuo Liu 	int ret, pinned;
16588f537d5SShuo Liu 	u64 user_vm_pa;
1668a6e85f7SYonghua Huang 	struct vm_area_struct *vma;
16788f537d5SShuo Liu 
16888f537d5SShuo Liu 	if (!vm || !memmap)
16988f537d5SShuo Liu 		return -EINVAL;
17088f537d5SShuo Liu 
1713d658600SDavid Hildenbrand 	/* Get the page number of the map region */
1723d658600SDavid Hildenbrand 	nr_pages = memmap->len >> PAGE_SHIFT;
1733d658600SDavid Hildenbrand 	if (!nr_pages)
1743d658600SDavid Hildenbrand 		return -EINVAL;
1753d658600SDavid Hildenbrand 
1768a6e85f7SYonghua Huang 	mmap_read_lock(current->mm);
1778a6e85f7SYonghua Huang 	vma = vma_lookup(current->mm, memmap->vma_base);
1788a6e85f7SYonghua Huang 	if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
1793d658600SDavid Hildenbrand 		unsigned long start_pfn, cur_pfn;
1801b265da7SChristoph Hellwig 		spinlock_t *ptl;
1813d658600SDavid Hildenbrand 		bool writable;
1821b265da7SChristoph Hellwig 		pte_t *ptep;
1831b265da7SChristoph Hellwig 
1848a6e85f7SYonghua Huang 		if ((memmap->vma_base + memmap->len) > vma->vm_end) {
1858a6e85f7SYonghua Huang 			mmap_read_unlock(current->mm);
1868a6e85f7SYonghua Huang 			return -EINVAL;
1878a6e85f7SYonghua Huang 		}
1888a6e85f7SYonghua Huang 
1893d658600SDavid Hildenbrand 		for (i = 0; i < nr_pages; i++) {
190*29ae7d96SDavid Hildenbrand 			ret = follow_pte(vma, memmap->vma_base + i * PAGE_SIZE,
1913d658600SDavid Hildenbrand 					 &ptep, &ptl);
1923d658600SDavid Hildenbrand 			if (ret)
1933d658600SDavid Hildenbrand 				break;
1943d658600SDavid Hildenbrand 
1953d658600SDavid Hildenbrand 			cur_pfn = pte_pfn(ptep_get(ptep));
1963d658600SDavid Hildenbrand 			if (i == 0)
1973d658600SDavid Hildenbrand 				start_pfn = cur_pfn;
1983d658600SDavid Hildenbrand 			writable = !!pte_write(ptep_get(ptep));
1993d658600SDavid Hildenbrand 			pte_unmap_unlock(ptep, ptl);
2003d658600SDavid Hildenbrand 
2013d658600SDavid Hildenbrand 			/* Disallow write access if the PTE is not writable. */
2023d658600SDavid Hildenbrand 			if (!writable &&
2033d658600SDavid Hildenbrand 			    (memmap->attr & ACRN_MEM_ACCESS_WRITE)) {
2043d658600SDavid Hildenbrand 				ret = -EFAULT;
2053d658600SDavid Hildenbrand 				break;
2063d658600SDavid Hildenbrand 			}
2073d658600SDavid Hildenbrand 
2083d658600SDavid Hildenbrand 			/* Disallow refcounted pages. */
2093d658600SDavid Hildenbrand 			if (pfn_valid(cur_pfn) &&
2103d658600SDavid Hildenbrand 			    !PageReserved(pfn_to_page(cur_pfn))) {
2113d658600SDavid Hildenbrand 				ret = -EFAULT;
2123d658600SDavid Hildenbrand 				break;
2133d658600SDavid Hildenbrand 			}
2143d658600SDavid Hildenbrand 
2153d658600SDavid Hildenbrand 			/* Disallow non-contiguous ranges. */
2163d658600SDavid Hildenbrand 			if (cur_pfn != start_pfn + i) {
2173d658600SDavid Hildenbrand 				ret = -EINVAL;
2183d658600SDavid Hildenbrand 				break;
2193d658600SDavid Hildenbrand 			}
2203d658600SDavid Hildenbrand 		}
2211b265da7SChristoph Hellwig 		mmap_read_unlock(current->mm);
2223d658600SDavid Hildenbrand 
2233d658600SDavid Hildenbrand 		if (ret) {
2248a6e85f7SYonghua Huang 			dev_dbg(acrn_dev.this_device,
2258a6e85f7SYonghua Huang 				"Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base);
2268a6e85f7SYonghua Huang 			return ret;
2278a6e85f7SYonghua Huang 		}
2288a6e85f7SYonghua Huang 
2298a6e85f7SYonghua Huang 		return acrn_mm_region_add(vm, memmap->user_vm_pa,
2303d658600SDavid Hildenbrand 			 PFN_PHYS(start_pfn), memmap->len,
2318a6e85f7SYonghua Huang 			 ACRN_MEM_TYPE_WB, memmap->attr);
2328a6e85f7SYonghua Huang 	}
2338a6e85f7SYonghua Huang 	mmap_read_unlock(current->mm);
2348a6e85f7SYonghua Huang 
235746f1b0aSLen Baker 	pages = vzalloc(array_size(nr_pages, sizeof(*pages)));
23688f537d5SShuo Liu 	if (!pages)
23788f537d5SShuo Liu 		return -ENOMEM;
23888f537d5SShuo Liu 
23988f537d5SShuo Liu 	/* Lock the pages of user memory map region */
24088f537d5SShuo Liu 	pinned = pin_user_pages_fast(memmap->vma_base,
24188f537d5SShuo Liu 				     nr_pages, FOLL_WRITE | FOLL_LONGTERM,
24288f537d5SShuo Liu 				     pages);
24388f537d5SShuo Liu 	if (pinned < 0) {
24488f537d5SShuo Liu 		ret = pinned;
24588f537d5SShuo Liu 		goto free_pages;
24688f537d5SShuo Liu 	} else if (pinned != nr_pages) {
24788f537d5SShuo Liu 		ret = -EFAULT;
24888f537d5SShuo Liu 		goto put_pages;
24988f537d5SShuo Liu 	}
25088f537d5SShuo Liu 
25188f537d5SShuo Liu 	/* Create a kernel map for the map region */
25288f537d5SShuo Liu 	remap_vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
25388f537d5SShuo Liu 	if (!remap_vaddr) {
25488f537d5SShuo Liu 		ret = -ENOMEM;
25588f537d5SShuo Liu 		goto put_pages;
25688f537d5SShuo Liu 	}
25788f537d5SShuo Liu 
25888f537d5SShuo Liu 	/* Record Service VM va <-> User VM pa mapping */
25988f537d5SShuo Liu 	mutex_lock(&vm->regions_mapping_lock);
26088f537d5SShuo Liu 	region_mapping = &vm->regions_mapping[vm->regions_mapping_count];
26188f537d5SShuo Liu 	if (vm->regions_mapping_count < ACRN_MEM_MAPPING_MAX) {
26288f537d5SShuo Liu 		region_mapping->pages = pages;
26388f537d5SShuo Liu 		region_mapping->npages = nr_pages;
26488f537d5SShuo Liu 		region_mapping->size = memmap->len;
26588f537d5SShuo Liu 		region_mapping->service_vm_va = remap_vaddr;
26688f537d5SShuo Liu 		region_mapping->user_vm_pa = memmap->user_vm_pa;
26788f537d5SShuo Liu 		vm->regions_mapping_count++;
26888f537d5SShuo Liu 	} else {
26988f537d5SShuo Liu 		dev_warn(acrn_dev.this_device,
27088f537d5SShuo Liu 			"Run out of memory mapping slots!\n");
27188f537d5SShuo Liu 		ret = -ENOMEM;
27288f537d5SShuo Liu 		mutex_unlock(&vm->regions_mapping_lock);
27388f537d5SShuo Liu 		goto unmap_no_count;
27488f537d5SShuo Liu 	}
27588f537d5SShuo Liu 	mutex_unlock(&vm->regions_mapping_lock);
27688f537d5SShuo Liu 
27788f537d5SShuo Liu 	/* Calculate count of vm_memory_region_op */
2783d658600SDavid Hildenbrand 	for (i = 0; i < nr_pages; i += 1 << order) {
27988f537d5SShuo Liu 		page = pages[i];
28088f537d5SShuo Liu 		VM_BUG_ON_PAGE(PageTail(page), page);
28188f537d5SShuo Liu 		order = compound_order(page);
28288f537d5SShuo Liu 		nr_regions++;
28388f537d5SShuo Liu 	}
28488f537d5SShuo Liu 
28588f537d5SShuo Liu 	/* Prepare the vm_memory_region_batch */
286746f1b0aSLen Baker 	regions_info = kzalloc(struct_size(regions_info, regions_op,
287746f1b0aSLen Baker 					   nr_regions), GFP_KERNEL);
28888f537d5SShuo Liu 	if (!regions_info) {
28988f537d5SShuo Liu 		ret = -ENOMEM;
29088f537d5SShuo Liu 		goto unmap_kernel_map;
29188f537d5SShuo Liu 	}
29251a71ab2SKees Cook 	regions_info->regions_num = nr_regions;
29388f537d5SShuo Liu 
29488f537d5SShuo Liu 	/* Fill each vm_memory_region_op */
295746f1b0aSLen Baker 	vm_region = regions_info->regions_op;
29688f537d5SShuo Liu 	regions_info->vmid = vm->vmid;
29788f537d5SShuo Liu 	regions_info->regions_gpa = virt_to_phys(vm_region);
29888f537d5SShuo Liu 	user_vm_pa = memmap->user_vm_pa;
2993d658600SDavid Hildenbrand 	for (i = 0; i < nr_pages; i += 1 << order) {
30088f537d5SShuo Liu 		u32 region_size;
30188f537d5SShuo Liu 
30288f537d5SShuo Liu 		page = pages[i];
30388f537d5SShuo Liu 		VM_BUG_ON_PAGE(PageTail(page), page);
30488f537d5SShuo Liu 		order = compound_order(page);
30588f537d5SShuo Liu 		region_size = PAGE_SIZE << order;
30688f537d5SShuo Liu 		vm_region->type = ACRN_MEM_REGION_ADD;
30788f537d5SShuo Liu 		vm_region->user_vm_pa = user_vm_pa;
30888f537d5SShuo Liu 		vm_region->service_vm_pa = page_to_phys(page);
30988f537d5SShuo Liu 		vm_region->size = region_size;
31088f537d5SShuo Liu 		vm_region->attr = (ACRN_MEM_TYPE_WB & ACRN_MEM_TYPE_MASK) |
31188f537d5SShuo Liu 				  (memmap->attr & ACRN_MEM_ACCESS_RIGHT_MASK);
31288f537d5SShuo Liu 
31388f537d5SShuo Liu 		vm_region++;
31488f537d5SShuo Liu 		user_vm_pa += region_size;
31588f537d5SShuo Liu 	}
31688f537d5SShuo Liu 
31788f537d5SShuo Liu 	/* Inform the ACRN Hypervisor to set up EPT mappings */
31888f537d5SShuo Liu 	ret = hcall_set_memory_regions(virt_to_phys(regions_info));
31988f537d5SShuo Liu 	if (ret < 0) {
32088f537d5SShuo Liu 		dev_dbg(acrn_dev.this_device,
32188f537d5SShuo Liu 			"Failed to set regions, VM[%u]!\n", vm->vmid);
32288f537d5SShuo Liu 		goto unset_region;
32388f537d5SShuo Liu 	}
32488f537d5SShuo Liu 	kfree(regions_info);
32588f537d5SShuo Liu 
32688f537d5SShuo Liu 	dev_dbg(acrn_dev.this_device,
32788f537d5SShuo Liu 		"%s: VM[%u] service-GVA[%pK] user-GPA[%pK] size[0x%llx]\n",
32888f537d5SShuo Liu 		__func__, vm->vmid,
32988f537d5SShuo Liu 		remap_vaddr, (void *)memmap->user_vm_pa, memmap->len);
33088f537d5SShuo Liu 	return ret;
33188f537d5SShuo Liu 
33288f537d5SShuo Liu unset_region:
33388f537d5SShuo Liu 	kfree(regions_info);
33488f537d5SShuo Liu unmap_kernel_map:
33588f537d5SShuo Liu 	mutex_lock(&vm->regions_mapping_lock);
33688f537d5SShuo Liu 	vm->regions_mapping_count--;
33788f537d5SShuo Liu 	mutex_unlock(&vm->regions_mapping_lock);
33888f537d5SShuo Liu unmap_no_count:
33988f537d5SShuo Liu 	vunmap(remap_vaddr);
34088f537d5SShuo Liu put_pages:
34188f537d5SShuo Liu 	for (i = 0; i < pinned; i++)
34288f537d5SShuo Liu 		unpin_user_page(pages[i]);
34388f537d5SShuo Liu free_pages:
34488f537d5SShuo Liu 	vfree(pages);
34588f537d5SShuo Liu 	return ret;
34688f537d5SShuo Liu }
34788f537d5SShuo Liu 
34888f537d5SShuo Liu /**
34988f537d5SShuo Liu  * acrn_vm_all_ram_unmap() - Destroy a RAM EPT mapping of User VM.
35088f537d5SShuo Liu  * @vm:	The User VM
35188f537d5SShuo Liu  */
35288f537d5SShuo Liu void acrn_vm_all_ram_unmap(struct acrn_vm *vm)
35388f537d5SShuo Liu {
35488f537d5SShuo Liu 	struct vm_memory_mapping *region_mapping;
35588f537d5SShuo Liu 	int i, j;
35688f537d5SShuo Liu 
35788f537d5SShuo Liu 	mutex_lock(&vm->regions_mapping_lock);
35888f537d5SShuo Liu 	for (i = 0; i < vm->regions_mapping_count; i++) {
35988f537d5SShuo Liu 		region_mapping = &vm->regions_mapping[i];
36088f537d5SShuo Liu 		vunmap(region_mapping->service_vm_va);
36188f537d5SShuo Liu 		for (j = 0; j < region_mapping->npages; j++)
36288f537d5SShuo Liu 			unpin_user_page(region_mapping->pages[j]);
36388f537d5SShuo Liu 		vfree(region_mapping->pages);
36488f537d5SShuo Liu 	}
36588f537d5SShuo Liu 	mutex_unlock(&vm->regions_mapping_lock);
36688f537d5SShuo Liu }
367