1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * All rights reserved.
6 */
7
8 #include <sys/types.h>
9 #include <sys/lock.h>
10 #include <sys/malloc.h>
11 #include <sys/sx.h>
12 #include <sys/systm.h>
13
14 #include <machine/vmm.h>
15
16 #include <vm/vm.h>
17 #include <vm/vm_param.h>
18 #include <vm/vm_extern.h>
19 #include <vm/pmap.h>
20 #include <vm/vm_map.h>
21 #include <vm/vm_object.h>
22 #include <vm/vm_page.h>
23
24 #include <dev/vmm/vmm_dev.h>
25 #include <dev/vmm/vmm_mem.h>
26
27 static void vm_free_memmap(struct vm *vm, int ident);
28
29 void
vm_mem_init(struct vm_mem * mem)30 vm_mem_init(struct vm_mem *mem)
31 {
32 sx_init(&mem->mem_segs_lock, "vm_mem_segs");
33 }
34
35 static bool
sysmem_mapping(struct vm_mem * mem,int idx)36 sysmem_mapping(struct vm_mem *mem, int idx)
37 {
38 if (mem->mem_maps[idx].len != 0 &&
39 mem->mem_segs[mem->mem_maps[idx].segid].sysmem)
40 return (true);
41 else
42 return (false);
43 }
44
45 bool
vm_memseg_sysmem(struct vm * vm,int ident)46 vm_memseg_sysmem(struct vm *vm, int ident)
47 {
48 struct vm_mem *mem;
49
50 mem = vm_mem(vm);
51 vm_assert_memseg_locked(vm);
52
53 if (ident < 0 || ident >= VM_MAX_MEMSEGS)
54 return (false);
55
56 return (mem->mem_segs[ident].sysmem);
57 }
58
59 void
vm_mem_cleanup(struct vm * vm)60 vm_mem_cleanup(struct vm *vm)
61 {
62 struct vm_mem *mem;
63
64 mem = vm_mem(vm);
65
66 /*
67 * System memory is removed from the guest address space only when
68 * the VM is destroyed. This is because the mapping remains the same
69 * across VM reset.
70 *
71 * Device memory can be relocated by the guest (e.g. using PCI BARs)
72 * so those mappings are removed on a VM reset.
73 */
74 for (int i = 0; i < VM_MAX_MEMMAPS; i++) {
75 if (!sysmem_mapping(mem, i))
76 vm_free_memmap(vm, i);
77 }
78 }
79
80 void
vm_mem_destroy(struct vm * vm)81 vm_mem_destroy(struct vm *vm)
82 {
83 struct vm_mem *mem;
84
85 mem = vm_mem(vm);
86 vm_assert_memseg_xlocked(vm);
87
88 for (int i = 0; i < VM_MAX_MEMMAPS; i++) {
89 if (sysmem_mapping(mem, i))
90 vm_free_memmap(vm, i);
91 }
92
93 for (int i = 0; i < VM_MAX_MEMSEGS; i++)
94 vm_free_memseg(vm, i);
95
96 sx_xunlock(&mem->mem_segs_lock);
97 sx_destroy(&mem->mem_segs_lock);
98 }
99
100 void
vm_slock_memsegs(struct vm * vm)101 vm_slock_memsegs(struct vm *vm)
102 {
103 sx_slock(&vm_mem(vm)->mem_segs_lock);
104 }
105
106 void
vm_xlock_memsegs(struct vm * vm)107 vm_xlock_memsegs(struct vm *vm)
108 {
109 sx_xlock(&vm_mem(vm)->mem_segs_lock);
110 }
111
112 void
vm_unlock_memsegs(struct vm * vm)113 vm_unlock_memsegs(struct vm *vm)
114 {
115 sx_unlock(&vm_mem(vm)->mem_segs_lock);
116 }
117
118 void
vm_assert_memseg_locked(struct vm * vm)119 vm_assert_memseg_locked(struct vm *vm)
120 {
121 sx_assert(&vm_mem(vm)->mem_segs_lock, SX_LOCKED);
122 }
123
124 void
vm_assert_memseg_xlocked(struct vm * vm)125 vm_assert_memseg_xlocked(struct vm *vm)
126 {
127 sx_assert(&vm_mem(vm)->mem_segs_lock, SX_XLOCKED);
128 }
129
130 /*
131 * Return 'true' if 'gpa' is allocated in the guest address space.
132 *
133 * This function is called in the context of a running vcpu which acts as
134 * an implicit lock on 'vm->mem_maps[]'.
135 */
136 bool
vm_mem_allocated(struct vcpu * vcpu,vm_paddr_t gpa)137 vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa)
138 {
139 struct vm *vm = vcpu_vm(vcpu);
140 struct vm_mem_map *mm;
141 int i;
142
143 #ifdef INVARIANTS
144 int hostcpu, state;
145 state = vcpu_get_state(vcpu, &hostcpu);
146 KASSERT(state == VCPU_RUNNING && hostcpu == curcpu,
147 ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu));
148 #endif
149
150 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
151 mm = &vm_mem(vm)->mem_maps[i];
152 if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len)
153 return (true); /* 'gpa' is sysmem or devmem */
154 }
155
156 return (false);
157 }
158
159 int
vm_alloc_memseg(struct vm * vm,int ident,size_t len,bool sysmem,struct domainset * obj_domainset)160 vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem,
161 struct domainset *obj_domainset)
162 {
163 struct vm_mem_seg *seg;
164 struct vm_mem *mem;
165 vm_object_t obj;
166
167 mem = vm_mem(vm);
168 vm_assert_memseg_xlocked(vm);
169
170 if (ident < 0 || ident >= VM_MAX_MEMSEGS)
171 return (EINVAL);
172
173 if (len == 0 || (len & PAGE_MASK))
174 return (EINVAL);
175
176 seg = &mem->mem_segs[ident];
177 if (seg->object != NULL) {
178 if (seg->len == len && seg->sysmem == sysmem)
179 return (EEXIST);
180 else
181 return (EINVAL);
182 }
183
184 /*
185 * When given an impossible policy, signal an
186 * error to the user.
187 */
188 if (obj_domainset != NULL && domainset_empty_vm(obj_domainset))
189 return (EINVAL);
190 obj = vm_object_allocate(OBJT_SWAP, len >> PAGE_SHIFT);
191 if (obj == NULL)
192 return (ENOMEM);
193
194 seg->len = len;
195 seg->object = obj;
196 if (obj_domainset != NULL)
197 seg->object->domain.dr_policy = obj_domainset;
198 seg->sysmem = sysmem;
199
200 return (0);
201 }
202
203 int
vm_get_memseg(struct vm * vm,int ident,size_t * len,bool * sysmem,vm_object_t * objptr)204 vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
205 vm_object_t *objptr)
206 {
207 struct vm_mem *mem;
208 struct vm_mem_seg *seg;
209
210 mem = vm_mem(vm);
211
212 vm_assert_memseg_locked(vm);
213
214 if (ident < 0 || ident >= VM_MAX_MEMSEGS)
215 return (EINVAL);
216
217 seg = &mem->mem_segs[ident];
218 if (len)
219 *len = seg->len;
220 if (sysmem)
221 *sysmem = seg->sysmem;
222 if (objptr)
223 *objptr = seg->object;
224 return (0);
225 }
226
227 void
vm_free_memseg(struct vm * vm,int ident)228 vm_free_memseg(struct vm *vm, int ident)
229 {
230 struct vm_mem_seg *seg;
231
232 KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS,
233 ("%s: invalid memseg ident %d", __func__, ident));
234
235 seg = &vm_mem(vm)->mem_segs[ident];
236 if (seg->object != NULL) {
237 vm_object_deallocate(seg->object);
238 bzero(seg, sizeof(struct vm_mem_seg));
239 }
240 }
241
242 int
vm_mmap_memseg(struct vm * vm,vm_paddr_t gpa,int segid,vm_ooffset_t first,size_t len,int prot,int flags)243 vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
244 size_t len, int prot, int flags)
245 {
246 struct vm_mem *mem;
247 struct vm_mem_seg *seg;
248 struct vm_mem_map *m, *map;
249 struct vmspace *vmspace;
250 vm_ooffset_t last;
251 int i, error;
252
253 if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0)
254 return (EINVAL);
255
256 if (flags & ~VM_MEMMAP_F_WIRED)
257 return (EINVAL);
258
259 if (segid < 0 || segid >= VM_MAX_MEMSEGS)
260 return (EINVAL);
261
262 mem = vm_mem(vm);
263 seg = &mem->mem_segs[segid];
264 if (seg->object == NULL)
265 return (EINVAL);
266
267 last = first + len;
268 if (first < 0 || first >= last || last > seg->len)
269 return (EINVAL);
270
271 if ((gpa | first | last) & PAGE_MASK)
272 return (EINVAL);
273
274 map = NULL;
275 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
276 m = &mem->mem_maps[i];
277 if (m->len == 0) {
278 map = m;
279 break;
280 }
281 }
282 if (map == NULL)
283 return (ENOSPC);
284
285 vmspace = vm_vmspace(vm);
286 error = vm_map_find(&vmspace->vm_map, seg->object, first, &gpa,
287 len, 0, VMFS_NO_SPACE, prot, prot, 0);
288 if (error != KERN_SUCCESS)
289 return (EFAULT);
290
291 vm_object_reference(seg->object);
292
293 if (flags & VM_MEMMAP_F_WIRED) {
294 error = vm_map_wire(&vmspace->vm_map, gpa, gpa + len,
295 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
296 if (error != KERN_SUCCESS) {
297 vm_map_remove(&vmspace->vm_map, gpa, gpa + len);
298 return (error == KERN_RESOURCE_SHORTAGE ? ENOMEM :
299 EFAULT);
300 }
301 }
302
303 map->gpa = gpa;
304 map->len = len;
305 map->segoff = first;
306 map->segid = segid;
307 map->prot = prot;
308 map->flags = flags;
309 return (0);
310 }
311
312 int
vm_munmap_memseg(struct vm * vm,vm_paddr_t gpa,size_t len)313 vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len)
314 {
315 struct vm_mem *mem;
316 struct vm_mem_map *m;
317 int i;
318
319 mem = vm_mem(vm);
320 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
321 m = &mem->mem_maps[i];
322 #ifdef VM_MEMMAP_F_IOMMU
323 if ((m->flags & VM_MEMMAP_F_IOMMU) != 0)
324 continue;
325 #endif
326 if (m->gpa == gpa && m->len == len) {
327 vm_free_memmap(vm, i);
328 return (0);
329 }
330 }
331
332 return (EINVAL);
333 }
334
335 int
vm_mmap_getnext(struct vm * vm,vm_paddr_t * gpa,int * segid,vm_ooffset_t * segoff,size_t * len,int * prot,int * flags)336 vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
337 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
338 {
339 struct vm_mem *mem;
340 struct vm_mem_map *mm, *mmnext;
341 int i;
342
343 mem = vm_mem(vm);
344
345 mmnext = NULL;
346 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
347 mm = &mem->mem_maps[i];
348 if (mm->len == 0 || mm->gpa < *gpa)
349 continue;
350 if (mmnext == NULL || mm->gpa < mmnext->gpa)
351 mmnext = mm;
352 }
353
354 if (mmnext != NULL) {
355 *gpa = mmnext->gpa;
356 if (segid)
357 *segid = mmnext->segid;
358 if (segoff)
359 *segoff = mmnext->segoff;
360 if (len)
361 *len = mmnext->len;
362 if (prot)
363 *prot = mmnext->prot;
364 if (flags)
365 *flags = mmnext->flags;
366 return (0);
367 } else {
368 return (ENOENT);
369 }
370 }
371
372 static void
vm_free_memmap(struct vm * vm,int ident)373 vm_free_memmap(struct vm *vm, int ident)
374 {
375 struct vm_mem_map *mm;
376 int error __diagused;
377
378 mm = &vm_mem(vm)->mem_maps[ident];
379 if (mm->len) {
380 error = vm_map_remove(&vm_vmspace(vm)->vm_map, mm->gpa,
381 mm->gpa + mm->len);
382 KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d",
383 __func__, error));
384 bzero(mm, sizeof(struct vm_mem_map));
385 }
386 }
387
388 vm_paddr_t
vmm_sysmem_maxaddr(struct vm * vm)389 vmm_sysmem_maxaddr(struct vm *vm)
390 {
391 struct vm_mem *mem;
392 struct vm_mem_map *mm;
393 vm_paddr_t maxaddr;
394 int i;
395
396 mem = vm_mem(vm);
397 maxaddr = 0;
398 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
399 mm = &mem->mem_maps[i];
400 if (sysmem_mapping(mem, i)) {
401 if (maxaddr < mm->gpa + mm->len)
402 maxaddr = mm->gpa + mm->len;
403 }
404 }
405 return (maxaddr);
406 }
407
408 static void *
_vm_gpa_hold(struct vm * vm,vm_paddr_t gpa,size_t len,int reqprot,void ** cookie)409 _vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
410 void **cookie)
411 {
412 struct vm_mem_map *mm;
413 vm_page_t m;
414 int i, count, pageoff;
415
416 pageoff = gpa & PAGE_MASK;
417 if (len > PAGE_SIZE - pageoff)
418 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
419
420 count = 0;
421 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
422 mm = &vm_mem(vm)->mem_maps[i];
423 if (gpa >= mm->gpa && gpa < mm->gpa + mm->len) {
424 count = vm_fault_quick_hold_pages(
425 &vm_vmspace(vm)->vm_map, trunc_page(gpa),
426 PAGE_SIZE, reqprot, &m, 1);
427 break;
428 }
429 }
430
431 if (count == 1) {
432 *cookie = m;
433 return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff));
434 } else {
435 *cookie = NULL;
436 return (NULL);
437 }
438 }
439
440 void *
vm_gpa_hold(struct vcpu * vcpu,vm_paddr_t gpa,size_t len,int reqprot,void ** cookie)441 vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot,
442 void **cookie)
443 {
444 #ifdef INVARIANTS
445 /*
446 * The current vcpu should be frozen to ensure 'vm_memmap[]'
447 * stability.
448 */
449 int state = vcpu_get_state(vcpu, NULL);
450 KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
451 __func__, state));
452 #endif
453 return (_vm_gpa_hold(vcpu_vm(vcpu), gpa, len, reqprot, cookie));
454 }
455
456 void *
vm_gpa_hold_global(struct vm * vm,vm_paddr_t gpa,size_t len,int reqprot,void ** cookie)457 vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
458 void **cookie)
459 {
460 vm_assert_memseg_locked(vm);
461 return (_vm_gpa_hold(vm, gpa, len, reqprot, cookie));
462 }
463
464 void
vm_gpa_release(void * cookie)465 vm_gpa_release(void *cookie)
466 {
467 vm_page_t m = cookie;
468
469 vm_page_unwire(m, PQ_ACTIVE);
470 }
471