xref: /freebsd/sys/dev/vmm/vmm_mem.h (revision dafba19e42e78cd3d7c9264ece49ddd3d7d70da5)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  */
7 
8 #ifndef _DEV_VMM_MEM_H_
9 #define	_DEV_VMM_MEM_H_
10 
11 /* Maximum number of NUMA domains in a guest. */
12 #define VM_MAXMEMDOM 8
13 #define VM_MAXSYSMEM VM_MAXMEMDOM
14 
15 /*
16  * Identifiers for memory segments.
17  * Each guest NUMA domain is represented by a single system
18  * memory segment from [VM_SYSMEM, VM_MAXSYSMEM).
19  * The remaining identifiers can be used to create devmem segments.
20  */
21 enum {
22         VM_SYSMEM = 0,
23         VM_BOOTROM = VM_MAXSYSMEM,
24         VM_FRAMEBUFFER,
25         VM_PCIROM,
26         VM_MEMSEG_END
27 };
28 
29 #define	VM_MAX_MEMSEGS	VM_MEMSEG_END
30 #define	VM_MAX_MEMMAPS	(VM_MAX_MEMSEGS * 2)
31 
32 #ifdef _KERNEL
33 
34 #include <sys/types.h>
35 #include <sys/_sx.h>
36 
37 struct domainset;
38 struct vcpu;
39 struct vm;
40 struct vm_guest_paging;
41 struct vm_object;
42 struct vmspace;
43 
44 struct vm_mem_seg {
45 	size_t	len;
46 	bool	sysmem;
47 	struct vm_object *object;
48 };
49 
50 struct vm_mem_map {
51 	vm_paddr_t	gpa;
52 	size_t		len;
53 	vm_ooffset_t	segoff;
54 	int		segid;
55 	int		prot;
56 	int		flags;
57 };
58 
59 struct vm_mem {
60 	struct vm_mem_map	mem_maps[VM_MAX_MEMMAPS];
61 	struct vm_mem_seg	mem_segs[VM_MAX_MEMSEGS];
62 	struct sx		mem_segs_lock;
63 	struct vmspace		*mem_vmspace;
64 };
65 
66 int	vm_mem_init(struct vm_mem *mem, vm_offset_t lo, vm_offset_t hi);
67 void	vm_mem_cleanup(struct vm *vm);
68 void	vm_mem_destroy(struct vm *vm);
69 
70 struct vmspace *vm_vmspace(struct vm *vm);
71 
72 /*
73  * APIs that modify the guest memory map require all vcpus to be frozen.
74  */
75 void vm_slock_memsegs(struct vm *vm);
76 void vm_xlock_memsegs(struct vm *vm);
77 void vm_unlock_memsegs(struct vm *vm);
78 void vm_assert_memseg_locked(struct vm *vm);
79 void vm_assert_memseg_xlocked(struct vm *vm);
80 int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off,
81     size_t len, int prot, int flags);
82 int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len);
83 int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem,
84     struct domainset *obj_domainset);
85 void vm_free_memseg(struct vm *vm, int ident);
86 
87 /*
88  * APIs that inspect the guest memory map require only a *single* vcpu to
89  * be frozen. This acts like a read lock on the guest memory map since any
90  * modification requires *all* vcpus to be frozen.
91  */
92 int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
93     vm_ooffset_t *segoff, size_t *len, int *prot, int *flags);
94 bool vm_memseg_sysmem(struct vm *vm, int ident);
95 int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
96     struct vm_object **objptr);
97 vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm);
98 void *vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len,
99     int prot, void **cookie);
100 void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len,
101     int prot, void **cookie);
102 void vm_gpa_release(void *cookie);
103 bool vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa);
104 
105 int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
106     uint64_t gla, int prot, uint64_t *gpa, int *is_fault);
107 
108 #endif /* _KERNEL */
109 
110 #endif /* !_DEV_VMM_MEM_H_ */
111