xref: /freebsd/sys/dev/vmm/vmm_mem.h (revision c76c2a19ae3763d17aa6a60a5831ed24cbc16e83)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  */
7 
8 #ifndef _DEV_VMM_MEM_H_
9 #define	_DEV_VMM_MEM_H_
10 
11 #ifdef _KERNEL
12 
13 #include <sys/types.h>
14 #include <sys/_sx.h>
15 
16 struct vm;
17 struct vm_object;
18 
19 struct vm_mem_seg {
20 	size_t	len;
21 	bool	sysmem;
22 	struct vm_object *object;
23 };
24 
25 struct vm_mem_map {
26 	vm_paddr_t	gpa;
27 	size_t		len;
28 	vm_ooffset_t	segoff;
29 	int		segid;
30 	int		prot;
31 	int		flags;
32 };
33 
34 #define	VM_MAX_MEMSEGS	4
35 #define	VM_MAX_MEMMAPS	8
36 
37 struct vm_mem {
38 	struct vm_mem_map	mem_maps[VM_MAX_MEMMAPS];
39 	struct vm_mem_seg	mem_segs[VM_MAX_MEMSEGS];
40 	struct sx		mem_segs_lock;
41 };
42 
43 void	vm_mem_init(struct vm_mem *mem);
44 void	vm_mem_cleanup(struct vm *vm);
45 void	vm_mem_destroy(struct vm *vm);
46 
47 /*
48  * APIs that modify the guest memory map require all vcpus to be frozen.
49  */
50 void vm_slock_memsegs(struct vm *vm);
51 void vm_xlock_memsegs(struct vm *vm);
52 void vm_unlock_memsegs(struct vm *vm);
53 void vm_assert_memseg_locked(struct vm *vm);
54 void vm_assert_memseg_xlocked(struct vm *vm);
55 int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off,
56     size_t len, int prot, int flags);
57 int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len);
58 int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem);
59 void vm_free_memseg(struct vm *vm, int ident);
60 
61 /*
62  * APIs that inspect the guest memory map require only a *single* vcpu to
63  * be frozen. This acts like a read lock on the guest memory map since any
64  * modification requires *all* vcpus to be frozen.
65  */
66 int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
67     vm_ooffset_t *segoff, size_t *len, int *prot, int *flags);
68 bool vm_memseg_sysmem(struct vm *vm, int ident);
69 int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
70     struct vm_object **objptr);
71 vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm);
72 void *vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len,
73     int prot, void **cookie);
74 void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len,
75     int prot, void **cookie);
76 void vm_gpa_release(void *cookie);
77 bool vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa);
78 
79 int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
80     uint64_t gla, int prot, uint64_t *gpa, int *is_fault);
81 
82 #endif /* _KERNEL */
83 
84 #endif /* !_DEV_VMM_MEM_H_ */
85