xref: /freebsd/sys/dev/vmm/vmm_vm.h (revision 5f13d6b60740c021951ae0e4d096903cfa1679e2)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  */
7 
8 #ifndef _DEV_VMM_VM_H_
9 #define	_DEV_VMM_VM_H_
10 
11 #ifdef _KERNEL
12 #include <sys/_cpuset.h>
13 
14 #include <machine/vmm.h>
15 
16 #include <dev/vmm/vmm_param.h>
17 #include <dev/vmm/vmm_mem.h>
18 
19 struct vcpu;
20 
21 enum vcpu_state {
22 	VCPU_IDLE,
23 	VCPU_FROZEN,
24 	VCPU_RUNNING,
25 	VCPU_SLEEPING,
26 };
27 
28 /*
29  * Initialization:
30  * (a) allocated when vcpu is created
31  * (i) initialized when vcpu is created and when it is reinitialized
32  * (o) initialized the first time the vcpu is created
33  * (x) initialized before use
34  */
35 struct vcpu {
36 	struct mtx 	mtx;		/* (o) protects 'state' and 'hostcpu' */
37 	enum vcpu_state	state;		/* (o) vcpu state */
38 	int		vcpuid;		/* (o) */
39 	int		hostcpu;	/* (o) vcpu's host cpu */
40 	int		reqidle;	/* (i) request vcpu to idle */
41 	struct vm	*vm;		/* (o) */
42 	void		*cookie;	/* (i) cpu-specific data */
43 	void		*stats;		/* (a,i) statistics */
44 
45 	VMM_VCPU_MD_FIELDS;
46 };
47 
48 #define	vcpu_lock_init(v)	mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
49 #define	vcpu_lock_destroy(v)	mtx_destroy(&((v)->mtx))
50 #define	vcpu_lock(v)		mtx_lock_spin(&((v)->mtx))
51 #define	vcpu_unlock(v)		mtx_unlock_spin(&((v)->mtx))
52 #define	vcpu_assert_locked(v)	mtx_assert(&((v)->mtx), MA_OWNED)
53 
54 extern int vmm_ipinum;
55 
56 int vcpu_set_state(struct vcpu *vcpu, enum vcpu_state state, bool from_idle);
57 int vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
58     bool from_idle);
59 int vcpu_set_state_all(struct vm *vm, enum vcpu_state state);
60 enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu);
61 void vcpu_notify_event(struct vcpu *vcpu);
62 void vcpu_notify_event_locked(struct vcpu *vcpu);
63 int vcpu_debugged(struct vcpu *vcpu);
64 
65 static inline void *
vcpu_stats(struct vcpu * vcpu)66 vcpu_stats(struct vcpu *vcpu)
67 {
68 	return (vcpu->stats);
69 }
70 
71 static inline struct vm *
vcpu_vm(struct vcpu * vcpu)72 vcpu_vm(struct vcpu *vcpu)
73 {
74 	return (vcpu->vm);
75 }
76 
77 static inline int
vcpu_vcpuid(struct vcpu * vcpu)78 vcpu_vcpuid(struct vcpu *vcpu)
79 {
80 	return (vcpu->vcpuid);
81 }
82 
83 static int __inline
vcpu_is_running(struct vcpu * vcpu,int * hostcpu)84 vcpu_is_running(struct vcpu *vcpu, int *hostcpu)
85 {
86 	return (vcpu_get_state(vcpu, hostcpu) == VCPU_RUNNING);
87 }
88 
89 #ifdef _SYS_PROC_H_
90 static int __inline
vcpu_should_yield(struct vcpu * vcpu)91 vcpu_should_yield(struct vcpu *vcpu)
92 {
93 	struct thread *td;
94 
95 	td = curthread;
96 	return (td->td_ast != 0 || td->td_owepreempt != 0);
97 }
98 #endif
99 
100 typedef void (*vm_rendezvous_func_t)(struct vcpu *vcpu, void *arg);
101 int vm_handle_rendezvous(struct vcpu *vcpu);
102 
103 /*
104  * Rendezvous all vcpus specified in 'dest' and execute 'func(arg)'.
105  * The rendezvous 'func(arg)' is not allowed to do anything that will
106  * cause the thread to be put to sleep.
107  *
108  * The caller cannot hold any locks when initiating the rendezvous.
109  *
110  * The implementation of this API may cause vcpus other than those specified
111  * by 'dest' to be stalled. The caller should not rely on any vcpus making
112  * forward progress when the rendezvous is in progress.
113  */
114 int vm_smp_rendezvous(struct vcpu *vcpu, cpuset_t dest,
115     vm_rendezvous_func_t func, void *arg);
116 
117 /*
118  * Initialization:
119  * (o) initialized the first time the VM is created
120  * (i) initialized when VM is created and when it is reinitialized
121  * (x) initialized before use
122  *
123  * Locking:
124  * [m] mem_segs_lock
125  * [r] rendezvous_mtx
126  * [v] reads require one frozen vcpu, writes require freezing all vcpus
127  */
128 struct vm {
129 	void		*cookie;		/* (i) cpu-specific data */
130 	struct vcpu	**vcpu;			/* (o) guest vcpus */
131 	struct vm_mem	mem;			/* (i) [m+v] guest memory */
132 
133 	char		name[VM_MAX_NAMELEN + 1]; /* (o) virtual machine name */
134 	struct sx	vcpus_init_lock;	/* (o) */
135 
136 	bool		dying;			/* (o) is dying */
137 	int		suspend;		/* (i) stop VM execution */
138 
139 	volatile cpuset_t active_cpus;		/* (i) active vcpus */
140 	volatile cpuset_t debug_cpus;		/* (i) vcpus stopped for debug */
141 	volatile cpuset_t suspended_cpus; 	/* (i) suspended vcpus */
142 	volatile cpuset_t halted_cpus;		/* (x) cpus in a hard halt */
143 
144 	cpuset_t	rendezvous_req_cpus;	/* (x) [r] rendezvous requested */
145 	cpuset_t	rendezvous_done_cpus;	/* (x) [r] rendezvous finished */
146 	void		*rendezvous_arg;	/* (x) [r] rendezvous func/arg */
147 	vm_rendezvous_func_t rendezvous_func;
148 	struct mtx	rendezvous_mtx;		/* (o) rendezvous lock */
149 
150 	uint16_t	sockets;		/* (o) num of sockets */
151 	uint16_t	cores;			/* (o) num of cores/socket */
152 	uint16_t	threads;		/* (o) num of threads/core */
153 	uint16_t	maxcpus;		/* (o) max pluggable cpus */
154 
155 	VMM_VM_MD_FIELDS;
156 };
157 
158 int vm_create(const char *name, struct vm **retvm);
159 struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid);
160 void vm_destroy(struct vm *vm);
161 int vm_reinit(struct vm *vm);
162 void vm_reset(struct vm *vm);
163 
164 void vm_lock_vcpus(struct vm *vm);
165 void vm_unlock_vcpus(struct vm *vm);
166 void vm_disable_vcpu_creation(struct vm *vm);
167 
168 int vm_suspend(struct vm *vm, enum vm_suspend_how how);
169 int vm_activate_cpu(struct vcpu *vcpu);
170 int vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu);
171 int vm_resume_cpu(struct vm *vm, struct vcpu *vcpu);
172 
173 cpuset_t vm_active_cpus(struct vm *vm);
174 cpuset_t vm_debug_cpus(struct vm *vm);
175 cpuset_t vm_suspended_cpus(struct vm *vm);
176 
177 uint16_t vm_get_maxcpus(struct vm *vm);
178 void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
179     uint16_t *threads, uint16_t *maxcpus);
180 int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
181     uint16_t threads, uint16_t maxcpus);
182 
183 static inline const char *
vm_name(struct vm * vm)184 vm_name(struct vm *vm)
185 {
186 	return (vm->name);
187 }
188 
189 static inline struct vm_mem *
vm_mem(struct vm * vm)190 vm_mem(struct vm *vm)
191 {
192 	return (&vm->mem);
193 }
194 
195 static inline struct vcpu *
vm_vcpu(struct vm * vm,int vcpuid)196 vm_vcpu(struct vm *vm, int vcpuid)
197 {
198 	return (vm->vcpu[vcpuid]);
199 }
200 
201 struct vm_eventinfo {
202 	cpuset_t *rptr;		/* rendezvous cookie */
203 	int	*sptr;		/* suspend cookie */
204 	int	*iptr;		/* reqidle cookie */
205 };
206 
207 static inline int
vcpu_rendezvous_pending(struct vcpu * vcpu,struct vm_eventinfo * info)208 vcpu_rendezvous_pending(struct vcpu *vcpu, struct vm_eventinfo *info)
209 {
210 	/*
211 	 * This check isn't done with atomic operations or under a lock because
212 	 * there's no need to. If the vcpuid bit is set, the vcpu is part of a
213 	 * rendezvous and the bit won't be cleared until the vcpu enters the
214 	 * rendezvous. On rendezvous exit, the cpuset is cleared and the vcpu
215 	 * will see an empty cpuset. So, the races are harmless.
216 	 */
217 	return (CPU_ISSET(vcpu_vcpuid(vcpu), info->rptr));
218 }
219 
220 static inline int
vcpu_suspended(struct vm_eventinfo * info)221 vcpu_suspended(struct vm_eventinfo *info)
222 {
223 	return (*info->sptr);
224 }
225 
226 static inline int
vcpu_reqidle(struct vm_eventinfo * info)227 vcpu_reqidle(struct vm_eventinfo *info)
228 {
229 	return (*info->iptr);
230 }
231 #endif /* _KERNEL */
232 
233 #endif /* !_DEV_VMM_VM_H_ */
234