xref: /freebsd/sys/dev/vmm/vmm_mem.h (revision 059b0b7046639121f3dca48f5de051e019f9d57c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  */
7 
8 #ifndef _DEV_VMM_MEM_H_
9 #define	_DEV_VMM_MEM_H_
10 
11 /* Maximum number of NUMA domains in a guest. */
12 #define VM_MAXMEMDOM 8
13 #define VM_MAXSYSMEM VM_MAXMEMDOM
14 
15 /*
16  * Identifiers for memory segments.
17  * Each guest NUMA domain is represented by a single system
18  * memory segment from [VM_SYSMEM, VM_MAXSYSMEM).
19  * The remaining identifiers can be used to create devmem segments.
20  */
21 enum {
22         VM_SYSMEM = 0,
23         VM_BOOTROM = VM_MAXSYSMEM,
24         VM_FRAMEBUFFER,
25         VM_PCIROM,
26         VM_MEMSEG_END
27 };
28 
29 #define	VM_MAX_MEMSEGS	VM_MEMSEG_END
30 #define	VM_MAX_MEMMAPS	(VM_MAX_MEMSEGS * 2)
31 
32 #ifdef _KERNEL
33 
34 #include <sys/types.h>
35 #include <sys/_sx.h>
36 
37 struct vm;
38 struct vm_object;
39 struct vmspace;
40 
41 struct vm_mem_seg {
42 	size_t	len;
43 	bool	sysmem;
44 	struct vm_object *object;
45 };
46 
47 struct vm_mem_map {
48 	vm_paddr_t	gpa;
49 	size_t		len;
50 	vm_ooffset_t	segoff;
51 	int		segid;
52 	int		prot;
53 	int		flags;
54 };
55 
56 struct vm_mem {
57 	struct vm_mem_map	mem_maps[VM_MAX_MEMMAPS];
58 	struct vm_mem_seg	mem_segs[VM_MAX_MEMSEGS];
59 	struct sx		mem_segs_lock;
60 	struct vmspace		*mem_vmspace;
61 };
62 
63 int	vm_mem_init(struct vm_mem *mem, vm_offset_t lo, vm_offset_t hi);
64 void	vm_mem_cleanup(struct vm *vm);
65 void	vm_mem_destroy(struct vm *vm);
66 
67 struct vmspace *vm_vmspace(struct vm *vm);
68 
69 /*
70  * APIs that modify the guest memory map require all vcpus to be frozen.
71  */
72 void vm_slock_memsegs(struct vm *vm);
73 void vm_xlock_memsegs(struct vm *vm);
74 void vm_unlock_memsegs(struct vm *vm);
75 void vm_assert_memseg_locked(struct vm *vm);
76 void vm_assert_memseg_xlocked(struct vm *vm);
77 int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off,
78     size_t len, int prot, int flags);
79 int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len);
80 int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem,
81     struct domainset *obj_domainset);
82 void vm_free_memseg(struct vm *vm, int ident);
83 
84 /*
85  * APIs that inspect the guest memory map require only a *single* vcpu to
86  * be frozen. This acts like a read lock on the guest memory map since any
87  * modification requires *all* vcpus to be frozen.
88  */
89 int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
90     vm_ooffset_t *segoff, size_t *len, int *prot, int *flags);
91 bool vm_memseg_sysmem(struct vm *vm, int ident);
92 int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
93     struct vm_object **objptr);
94 vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm);
95 void *vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len,
96     int prot, void **cookie);
97 void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len,
98     int prot, void **cookie);
99 void vm_gpa_release(void *cookie);
100 bool vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa);
101 
102 int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
103     uint64_t gla, int prot, uint64_t *gpa, int *is_fault);
104 
105 #endif /* _KERNEL */
106 
107 #endif /* !_DEV_VMM_MEM_H_ */
108