xref: /freebsd/sys/dev/vmm/vmm_mem.c (revision c76c2a19ae3763d17aa6a60a5831ed24cbc16e83)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  */
7 
8 #include <sys/types.h>
9 #include <sys/lock.h>
10 #include <sys/sx.h>
11 #include <sys/systm.h>
12 
13 #include <machine/vmm.h>
14 
15 #include <vm/vm.h>
16 #include <vm/vm_param.h>
17 #include <vm/vm_extern.h>
18 #include <vm/pmap.h>
19 #include <vm/vm_map.h>
20 #include <vm/vm_object.h>
21 #include <vm/vm_page.h>
22 
23 #include <dev/vmm/vmm_dev.h>
24 #include <dev/vmm/vmm_mem.h>
25 
26 static void vm_free_memmap(struct vm *vm, int ident);
27 
28 void
vm_mem_init(struct vm_mem * mem)29 vm_mem_init(struct vm_mem *mem)
30 {
31 	sx_init(&mem->mem_segs_lock, "vm_mem_segs");
32 }
33 
34 static bool
sysmem_mapping(struct vm_mem * mem,int idx)35 sysmem_mapping(struct vm_mem *mem, int idx)
36 {
37 	if (mem->mem_maps[idx].len != 0 &&
38 	    mem->mem_segs[mem->mem_maps[idx].segid].sysmem)
39 		return (true);
40 	else
41 		return (false);
42 }
43 
44 bool
vm_memseg_sysmem(struct vm * vm,int ident)45 vm_memseg_sysmem(struct vm *vm, int ident)
46 {
47 	struct vm_mem *mem;
48 
49 	mem = vm_mem(vm);
50 	vm_assert_memseg_locked(vm);
51 
52 	if (ident < 0 || ident >= VM_MAX_MEMSEGS)
53 		return (false);
54 
55 	return (mem->mem_segs[ident].sysmem);
56 }
57 
58 void
vm_mem_cleanup(struct vm * vm)59 vm_mem_cleanup(struct vm *vm)
60 {
61 	struct vm_mem *mem;
62 
63 	mem = vm_mem(vm);
64 
65 	/*
66 	 * System memory is removed from the guest address space only when
67 	 * the VM is destroyed. This is because the mapping remains the same
68 	 * across VM reset.
69 	 *
70 	 * Device memory can be relocated by the guest (e.g. using PCI BARs)
71 	 * so those mappings are removed on a VM reset.
72 	 */
73 	for (int i = 0; i < VM_MAX_MEMMAPS; i++) {
74 		if (!sysmem_mapping(mem, i))
75 			vm_free_memmap(vm, i);
76 	}
77 }
78 
79 void
vm_mem_destroy(struct vm * vm)80 vm_mem_destroy(struct vm *vm)
81 {
82 	struct vm_mem *mem;
83 
84 	mem = vm_mem(vm);
85 	vm_assert_memseg_xlocked(vm);
86 
87 	for (int i = 0; i < VM_MAX_MEMMAPS; i++) {
88 		if (sysmem_mapping(mem, i))
89 			vm_free_memmap(vm, i);
90 	}
91 
92 	for (int i = 0; i < VM_MAX_MEMSEGS; i++)
93 		vm_free_memseg(vm, i);
94 
95 	sx_xunlock(&mem->mem_segs_lock);
96 	sx_destroy(&mem->mem_segs_lock);
97 }
98 
99 void
vm_slock_memsegs(struct vm * vm)100 vm_slock_memsegs(struct vm *vm)
101 {
102 	sx_slock(&vm_mem(vm)->mem_segs_lock);
103 }
104 
105 void
vm_xlock_memsegs(struct vm * vm)106 vm_xlock_memsegs(struct vm *vm)
107 {
108 	sx_xlock(&vm_mem(vm)->mem_segs_lock);
109 }
110 
111 void
vm_unlock_memsegs(struct vm * vm)112 vm_unlock_memsegs(struct vm *vm)
113 {
114 	sx_unlock(&vm_mem(vm)->mem_segs_lock);
115 }
116 
117 void
vm_assert_memseg_locked(struct vm * vm)118 vm_assert_memseg_locked(struct vm *vm)
119 {
120 	sx_assert(&vm_mem(vm)->mem_segs_lock, SX_LOCKED);
121 }
122 
123 void
vm_assert_memseg_xlocked(struct vm * vm)124 vm_assert_memseg_xlocked(struct vm *vm)
125 {
126 	sx_assert(&vm_mem(vm)->mem_segs_lock, SX_XLOCKED);
127 }
128 
129 /*
130  * Return 'true' if 'gpa' is allocated in the guest address space.
131  *
132  * This function is called in the context of a running vcpu which acts as
133  * an implicit lock on 'vm->mem_maps[]'.
134  */
135 bool
vm_mem_allocated(struct vcpu * vcpu,vm_paddr_t gpa)136 vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa)
137 {
138 	struct vm *vm = vcpu_vm(vcpu);
139 	struct vm_mem_map *mm;
140 	int i;
141 
142 #ifdef INVARIANTS
143 	int hostcpu, state;
144 	state = vcpu_get_state(vcpu, &hostcpu);
145 	KASSERT(state == VCPU_RUNNING && hostcpu == curcpu,
146 	    ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu));
147 #endif
148 
149 	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
150 		mm = &vm_mem(vm)->mem_maps[i];
151 		if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len)
152 			return (true);		/* 'gpa' is sysmem or devmem */
153 	}
154 
155 	return (false);
156 }
157 
158 int
vm_alloc_memseg(struct vm * vm,int ident,size_t len,bool sysmem)159 vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem)
160 {
161 	struct vm_mem *mem;
162 	struct vm_mem_seg *seg;
163 	vm_object_t obj;
164 
165 	mem = vm_mem(vm);
166 	vm_assert_memseg_xlocked(vm);
167 
168 	if (ident < 0 || ident >= VM_MAX_MEMSEGS)
169 		return (EINVAL);
170 
171 	if (len == 0 || (len & PAGE_MASK))
172 		return (EINVAL);
173 
174 	seg = &mem->mem_segs[ident];
175 	if (seg->object != NULL) {
176 		if (seg->len == len && seg->sysmem == sysmem)
177 			return (EEXIST);
178 		else
179 			return (EINVAL);
180 	}
181 
182 	obj = vm_object_allocate(OBJT_SWAP, len >> PAGE_SHIFT);
183 	if (obj == NULL)
184 		return (ENOMEM);
185 
186 	seg->len = len;
187 	seg->object = obj;
188 	seg->sysmem = sysmem;
189 	return (0);
190 }
191 
192 int
vm_get_memseg(struct vm * vm,int ident,size_t * len,bool * sysmem,vm_object_t * objptr)193 vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
194     vm_object_t *objptr)
195 {
196 	struct vm_mem *mem;
197 	struct vm_mem_seg *seg;
198 
199 	mem = vm_mem(vm);
200 
201 	vm_assert_memseg_locked(vm);
202 
203 	if (ident < 0 || ident >= VM_MAX_MEMSEGS)
204 		return (EINVAL);
205 
206 	seg = &mem->mem_segs[ident];
207 	if (len)
208 		*len = seg->len;
209 	if (sysmem)
210 		*sysmem = seg->sysmem;
211 	if (objptr)
212 		*objptr = seg->object;
213 	return (0);
214 }
215 
216 void
vm_free_memseg(struct vm * vm,int ident)217 vm_free_memseg(struct vm *vm, int ident)
218 {
219 	struct vm_mem_seg *seg;
220 
221 	KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS,
222 	    ("%s: invalid memseg ident %d", __func__, ident));
223 
224 	seg = &vm_mem(vm)->mem_segs[ident];
225 	if (seg->object != NULL) {
226 		vm_object_deallocate(seg->object);
227 		bzero(seg, sizeof(struct vm_mem_seg));
228 	}
229 }
230 
231 int
vm_mmap_memseg(struct vm * vm,vm_paddr_t gpa,int segid,vm_ooffset_t first,size_t len,int prot,int flags)232 vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
233     size_t len, int prot, int flags)
234 {
235 	struct vm_mem *mem;
236 	struct vm_mem_seg *seg;
237 	struct vm_mem_map *m, *map;
238 	struct vmspace *vmspace;
239 	vm_ooffset_t last;
240 	int i, error;
241 
242 	if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0)
243 		return (EINVAL);
244 
245 	if (flags & ~VM_MEMMAP_F_WIRED)
246 		return (EINVAL);
247 
248 	if (segid < 0 || segid >= VM_MAX_MEMSEGS)
249 		return (EINVAL);
250 
251 	mem = vm_mem(vm);
252 	seg = &mem->mem_segs[segid];
253 	if (seg->object == NULL)
254 		return (EINVAL);
255 
256 	last = first + len;
257 	if (first < 0 || first >= last || last > seg->len)
258 		return (EINVAL);
259 
260 	if ((gpa | first | last) & PAGE_MASK)
261 		return (EINVAL);
262 
263 	map = NULL;
264 	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
265 		m = &mem->mem_maps[i];
266 		if (m->len == 0) {
267 			map = m;
268 			break;
269 		}
270 	}
271 	if (map == NULL)
272 		return (ENOSPC);
273 
274 	vmspace = vm_vmspace(vm);
275 	error = vm_map_find(&vmspace->vm_map, seg->object, first, &gpa,
276 	    len, 0, VMFS_NO_SPACE, prot, prot, 0);
277 	if (error != KERN_SUCCESS)
278 		return (EFAULT);
279 
280 	vm_object_reference(seg->object);
281 
282 	if (flags & VM_MEMMAP_F_WIRED) {
283 		error = vm_map_wire(&vmspace->vm_map, gpa, gpa + len,
284 		    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
285 		if (error != KERN_SUCCESS) {
286 			vm_map_remove(&vmspace->vm_map, gpa, gpa + len);
287 			return (error == KERN_RESOURCE_SHORTAGE ? ENOMEM :
288 			    EFAULT);
289 		}
290 	}
291 
292 	map->gpa = gpa;
293 	map->len = len;
294 	map->segoff = first;
295 	map->segid = segid;
296 	map->prot = prot;
297 	map->flags = flags;
298 	return (0);
299 }
300 
301 int
vm_munmap_memseg(struct vm * vm,vm_paddr_t gpa,size_t len)302 vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len)
303 {
304 	struct vm_mem *mem;
305 	struct vm_mem_map *m;
306 	int i;
307 
308 	mem = vm_mem(vm);
309 	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
310 		m = &mem->mem_maps[i];
311 #ifdef VM_MEMMAP_F_IOMMU
312 		if ((m->flags & VM_MEMMAP_F_IOMMU) != 0)
313 			continue;
314 #endif
315 		if (m->gpa == gpa && m->len == len) {
316 			vm_free_memmap(vm, i);
317 			return (0);
318 		}
319 	}
320 
321 	return (EINVAL);
322 }
323 
324 int
vm_mmap_getnext(struct vm * vm,vm_paddr_t * gpa,int * segid,vm_ooffset_t * segoff,size_t * len,int * prot,int * flags)325 vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
326     vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
327 {
328 	struct vm_mem *mem;
329 	struct vm_mem_map *mm, *mmnext;
330 	int i;
331 
332 	mem = vm_mem(vm);
333 
334 	mmnext = NULL;
335 	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
336 		mm = &mem->mem_maps[i];
337 		if (mm->len == 0 || mm->gpa < *gpa)
338 			continue;
339 		if (mmnext == NULL || mm->gpa < mmnext->gpa)
340 			mmnext = mm;
341 	}
342 
343 	if (mmnext != NULL) {
344 		*gpa = mmnext->gpa;
345 		if (segid)
346 			*segid = mmnext->segid;
347 		if (segoff)
348 			*segoff = mmnext->segoff;
349 		if (len)
350 			*len = mmnext->len;
351 		if (prot)
352 			*prot = mmnext->prot;
353 		if (flags)
354 			*flags = mmnext->flags;
355 		return (0);
356 	} else {
357 		return (ENOENT);
358 	}
359 }
360 
361 static void
vm_free_memmap(struct vm * vm,int ident)362 vm_free_memmap(struct vm *vm, int ident)
363 {
364 	struct vm_mem_map *mm;
365 	int error __diagused;
366 
367 	mm = &vm_mem(vm)->mem_maps[ident];
368 	if (mm->len) {
369 		error = vm_map_remove(&vm_vmspace(vm)->vm_map, mm->gpa,
370 		    mm->gpa + mm->len);
371 		KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d",
372 		    __func__, error));
373 		bzero(mm, sizeof(struct vm_mem_map));
374 	}
375 }
376 
377 vm_paddr_t
vmm_sysmem_maxaddr(struct vm * vm)378 vmm_sysmem_maxaddr(struct vm *vm)
379 {
380 	struct vm_mem *mem;
381 	struct vm_mem_map *mm;
382 	vm_paddr_t maxaddr;
383 	int i;
384 
385 	mem = vm_mem(vm);
386 	maxaddr = 0;
387 	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
388 		mm = &mem->mem_maps[i];
389 		if (sysmem_mapping(mem, i)) {
390 			if (maxaddr < mm->gpa + mm->len)
391 				maxaddr = mm->gpa + mm->len;
392 		}
393 	}
394 	return (maxaddr);
395 }
396 
397 static void *
_vm_gpa_hold(struct vm * vm,vm_paddr_t gpa,size_t len,int reqprot,void ** cookie)398 _vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
399     void **cookie)
400 {
401 	struct vm_mem_map *mm;
402 	vm_page_t m;
403 	int i, count, pageoff;
404 
405 	pageoff = gpa & PAGE_MASK;
406 	if (len > PAGE_SIZE - pageoff)
407 		panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
408 
409 	count = 0;
410 	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
411 		mm = &vm_mem(vm)->mem_maps[i];
412 		if (gpa >= mm->gpa && gpa < mm->gpa + mm->len) {
413 			count = vm_fault_quick_hold_pages(
414 			    &vm_vmspace(vm)->vm_map, trunc_page(gpa),
415 			    PAGE_SIZE, reqprot, &m, 1);
416 			break;
417 		}
418 	}
419 
420 	if (count == 1) {
421 		*cookie = m;
422 		return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff));
423 	} else {
424 		*cookie = NULL;
425 		return (NULL);
426 	}
427 }
428 
429 void *
vm_gpa_hold(struct vcpu * vcpu,vm_paddr_t gpa,size_t len,int reqprot,void ** cookie)430 vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot,
431     void **cookie)
432 {
433 #ifdef INVARIANTS
434 	/*
435 	 * The current vcpu should be frozen to ensure 'vm_memmap[]'
436 	 * stability.
437 	 */
438 	int state = vcpu_get_state(vcpu, NULL);
439 	KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
440 	    __func__, state));
441 #endif
442 	return (_vm_gpa_hold(vcpu_vm(vcpu), gpa, len, reqprot, cookie));
443 }
444 
445 void *
vm_gpa_hold_global(struct vm * vm,vm_paddr_t gpa,size_t len,int reqprot,void ** cookie)446 vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
447     void **cookie)
448 {
449 	vm_assert_memseg_locked(vm);
450 	return (_vm_gpa_hold(vm, gpa, len, reqprot, cookie));
451 }
452 
453 void
vm_gpa_release(void * cookie)454 vm_gpa_release(void *cookie)
455 {
456 	vm_page_t m = cookie;
457 
458 	vm_page_unwire(m, PQ_ACTIVE);
459 }
460