xref: /freebsd/sys/dev/vmm/vmm_mem.h (revision a4197ea477771d525c2970d0c42acab727e43f16)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  */
7 
8 #ifndef _DEV_VMM_MEM_H_
9 #define	_DEV_VMM_MEM_H_
10 
11 /* Maximum number of NUMA domains in a guest. */
12 #define VM_MAXMEMDOM 8
13 #define VM_MAXSYSMEM VM_MAXMEMDOM
14 
15 /*
16  * Identifiers for memory segments.
17  * Each guest NUMA domain is represented by a single system
18  * memory segment from [VM_SYSMEM, VM_MAXSYSMEM).
19  * The remaining identifiers can be used to create devmem segments.
20  */
21 enum {
22         VM_SYSMEM = 0,
23         VM_BOOTROM = VM_MAXSYSMEM,
24         VM_FRAMEBUFFER,
25         VM_PCIROM,
26         VM_MEMSEG_END
27 };
28 
29 #define	VM_MAX_MEMSEGS	VM_MEMSEG_END
30 #define	VM_MAX_MEMMAPS	(VM_MAX_MEMSEGS * 2)
31 
32 #ifdef _KERNEL
33 
34 #include <sys/types.h>
35 #include <sys/_sx.h>
36 
37 struct vm;
38 struct vm_object;
39 
40 struct vm_mem_seg {
41 	size_t	len;
42 	bool	sysmem;
43 	struct vm_object *object;
44 };
45 
46 struct vm_mem_map {
47 	vm_paddr_t	gpa;
48 	size_t		len;
49 	vm_ooffset_t	segoff;
50 	int		segid;
51 	int		prot;
52 	int		flags;
53 };
54 
55 struct vm_mem {
56 	struct vm_mem_map	mem_maps[VM_MAX_MEMMAPS];
57 	struct vm_mem_seg	mem_segs[VM_MAX_MEMSEGS];
58 	struct sx		mem_segs_lock;
59 };
60 
61 void	vm_mem_init(struct vm_mem *mem);
62 void	vm_mem_cleanup(struct vm *vm);
63 void	vm_mem_destroy(struct vm *vm);
64 
65 /*
66  * APIs that modify the guest memory map require all vcpus to be frozen.
67  */
68 void vm_slock_memsegs(struct vm *vm);
69 void vm_xlock_memsegs(struct vm *vm);
70 void vm_unlock_memsegs(struct vm *vm);
71 void vm_assert_memseg_locked(struct vm *vm);
72 void vm_assert_memseg_xlocked(struct vm *vm);
73 int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off,
74     size_t len, int prot, int flags);
75 int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len);
76 int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem,
77     struct domainset *obj_domainset);
78 void vm_free_memseg(struct vm *vm, int ident);
79 
80 /*
81  * APIs that inspect the guest memory map require only a *single* vcpu to
82  * be frozen. This acts like a read lock on the guest memory map since any
83  * modification requires *all* vcpus to be frozen.
84  */
85 int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
86     vm_ooffset_t *segoff, size_t *len, int *prot, int *flags);
87 bool vm_memseg_sysmem(struct vm *vm, int ident);
88 int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
89     struct vm_object **objptr);
90 vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm);
91 void *vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len,
92     int prot, void **cookie);
93 void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len,
94     int prot, void **cookie);
95 void vm_gpa_release(void *cookie);
96 bool vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa);
97 
98 int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
99     uint64_t gla, int prot, uint64_t *gpa, int *is_fault);
100 
101 #endif /* _KERNEL */
102 
103 #endif /* !_DEV_VMM_MEM_H_ */
104