1*ed85203fSMark Johnston /*-
2*ed85203fSMark Johnston * SPDX-License-Identifier: BSD-2-Clause
3*ed85203fSMark Johnston *
4*ed85203fSMark Johnston * Copyright (c) 2011 NetApp, Inc.
5*ed85203fSMark Johnston * All rights reserved.
6*ed85203fSMark Johnston */
7*ed85203fSMark Johnston
8*ed85203fSMark Johnston #include <sys/param.h>
9*ed85203fSMark Johnston #include <sys/kernel.h>
10*ed85203fSMark Johnston #include <sys/lock.h>
11*ed85203fSMark Johnston #include <sys/mutex.h>
12*ed85203fSMark Johnston #include <sys/proc.h>
13*ed85203fSMark Johnston #include <sys/sx.h>
14*ed85203fSMark Johnston #include <sys/sysctl.h>
15*ed85203fSMark Johnston
16*ed85203fSMark Johnston #include <machine/smp.h>
17*ed85203fSMark Johnston
18*ed85203fSMark Johnston #include <dev/vmm/vmm_vm.h>
19*ed85203fSMark Johnston
20*ed85203fSMark Johnston SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, NULL);
21*ed85203fSMark Johnston
22*ed85203fSMark Johnston int vmm_ipinum;
23*ed85203fSMark Johnston SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
24*ed85203fSMark Johnston "IPI vector used for vcpu notifications");
25*ed85203fSMark Johnston
26*ed85203fSMark Johnston /*
27*ed85203fSMark Johnston * Invoke the rendezvous function on the specified vcpu if applicable. Return
28*ed85203fSMark Johnston * true if the rendezvous is finished, false otherwise.
29*ed85203fSMark Johnston */
30*ed85203fSMark Johnston static bool
vm_rendezvous(struct vcpu * vcpu)31*ed85203fSMark Johnston vm_rendezvous(struct vcpu *vcpu)
32*ed85203fSMark Johnston {
33*ed85203fSMark Johnston struct vm *vm = vcpu->vm;
34*ed85203fSMark Johnston int vcpuid;
35*ed85203fSMark Johnston
36*ed85203fSMark Johnston mtx_assert(&vcpu->vm->rendezvous_mtx, MA_OWNED);
37*ed85203fSMark Johnston KASSERT(vcpu->vm->rendezvous_func != NULL,
38*ed85203fSMark Johnston ("vm_rendezvous: no rendezvous pending"));
39*ed85203fSMark Johnston
40*ed85203fSMark Johnston /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */
41*ed85203fSMark Johnston CPU_AND(&vm->rendezvous_req_cpus, &vm->rendezvous_req_cpus,
42*ed85203fSMark Johnston &vm->active_cpus);
43*ed85203fSMark Johnston
44*ed85203fSMark Johnston vcpuid = vcpu->vcpuid;
45*ed85203fSMark Johnston if (CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&
46*ed85203fSMark Johnston !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) {
47*ed85203fSMark Johnston (*vm->rendezvous_func)(vcpu, vm->rendezvous_arg);
48*ed85203fSMark Johnston CPU_SET(vcpuid, &vm->rendezvous_done_cpus);
49*ed85203fSMark Johnston }
50*ed85203fSMark Johnston if (CPU_CMP(&vm->rendezvous_req_cpus, &vm->rendezvous_done_cpus) == 0) {
51*ed85203fSMark Johnston CPU_ZERO(&vm->rendezvous_req_cpus);
52*ed85203fSMark Johnston vm->rendezvous_func = NULL;
53*ed85203fSMark Johnston wakeup(&vm->rendezvous_func);
54*ed85203fSMark Johnston return (true);
55*ed85203fSMark Johnston }
56*ed85203fSMark Johnston return (false);
57*ed85203fSMark Johnston }
58*ed85203fSMark Johnston
59*ed85203fSMark Johnston int
vm_handle_rendezvous(struct vcpu * vcpu)60*ed85203fSMark Johnston vm_handle_rendezvous(struct vcpu *vcpu)
61*ed85203fSMark Johnston {
62*ed85203fSMark Johnston struct vm *vm;
63*ed85203fSMark Johnston struct thread *td;
64*ed85203fSMark Johnston
65*ed85203fSMark Johnston td = curthread;
66*ed85203fSMark Johnston vm = vcpu->vm;
67*ed85203fSMark Johnston
68*ed85203fSMark Johnston mtx_lock(&vm->rendezvous_mtx);
69*ed85203fSMark Johnston while (vm->rendezvous_func != NULL) {
70*ed85203fSMark Johnston if (vm_rendezvous(vcpu))
71*ed85203fSMark Johnston break;
72*ed85203fSMark Johnston
73*ed85203fSMark Johnston mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0,
74*ed85203fSMark Johnston "vmrndv", hz);
75*ed85203fSMark Johnston if (td_ast_pending(td, TDA_SUSPEND)) {
76*ed85203fSMark Johnston int error;
77*ed85203fSMark Johnston
78*ed85203fSMark Johnston mtx_unlock(&vm->rendezvous_mtx);
79*ed85203fSMark Johnston error = thread_check_susp(td, true);
80*ed85203fSMark Johnston if (error != 0)
81*ed85203fSMark Johnston return (error);
82*ed85203fSMark Johnston mtx_lock(&vm->rendezvous_mtx);
83*ed85203fSMark Johnston }
84*ed85203fSMark Johnston }
85*ed85203fSMark Johnston mtx_unlock(&vm->rendezvous_mtx);
86*ed85203fSMark Johnston return (0);
87*ed85203fSMark Johnston }
88*ed85203fSMark Johnston
89*ed85203fSMark Johnston static void
vcpu_wait_idle(struct vcpu * vcpu)90*ed85203fSMark Johnston vcpu_wait_idle(struct vcpu *vcpu)
91*ed85203fSMark Johnston {
92*ed85203fSMark Johnston KASSERT(vcpu->state != VCPU_IDLE, ("vcpu already idle"));
93*ed85203fSMark Johnston
94*ed85203fSMark Johnston vcpu->reqidle = 1;
95*ed85203fSMark Johnston vcpu_notify_event_locked(vcpu);
96*ed85203fSMark Johnston msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
97*ed85203fSMark Johnston }
98*ed85203fSMark Johnston
99*ed85203fSMark Johnston int
vcpu_set_state_locked(struct vcpu * vcpu,enum vcpu_state newstate,bool from_idle)100*ed85203fSMark Johnston vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
101*ed85203fSMark Johnston bool from_idle)
102*ed85203fSMark Johnston {
103*ed85203fSMark Johnston int error;
104*ed85203fSMark Johnston
105*ed85203fSMark Johnston vcpu_assert_locked(vcpu);
106*ed85203fSMark Johnston
107*ed85203fSMark Johnston /*
108*ed85203fSMark Johnston * State transitions from the vmmdev_ioctl() must always begin from
109*ed85203fSMark Johnston * the VCPU_IDLE state. This guarantees that there is only a single
110*ed85203fSMark Johnston * ioctl() operating on a vcpu at any point.
111*ed85203fSMark Johnston */
112*ed85203fSMark Johnston if (from_idle) {
113*ed85203fSMark Johnston while (vcpu->state != VCPU_IDLE)
114*ed85203fSMark Johnston vcpu_wait_idle(vcpu);
115*ed85203fSMark Johnston } else {
116*ed85203fSMark Johnston KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
117*ed85203fSMark Johnston "vcpu idle state"));
118*ed85203fSMark Johnston }
119*ed85203fSMark Johnston
120*ed85203fSMark Johnston if (vcpu->state == VCPU_RUNNING) {
121*ed85203fSMark Johnston KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d "
122*ed85203fSMark Johnston "mismatch for running vcpu", curcpu, vcpu->hostcpu));
123*ed85203fSMark Johnston } else {
124*ed85203fSMark Johnston KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a "
125*ed85203fSMark Johnston "vcpu that is not running", vcpu->hostcpu));
126*ed85203fSMark Johnston }
127*ed85203fSMark Johnston
128*ed85203fSMark Johnston /*
129*ed85203fSMark Johnston * The following state transitions are allowed:
130*ed85203fSMark Johnston * IDLE -> FROZEN -> IDLE
131*ed85203fSMark Johnston * FROZEN -> RUNNING -> FROZEN
132*ed85203fSMark Johnston * FROZEN -> SLEEPING -> FROZEN
133*ed85203fSMark Johnston */
134*ed85203fSMark Johnston switch (vcpu->state) {
135*ed85203fSMark Johnston case VCPU_IDLE:
136*ed85203fSMark Johnston case VCPU_RUNNING:
137*ed85203fSMark Johnston case VCPU_SLEEPING:
138*ed85203fSMark Johnston error = (newstate != VCPU_FROZEN);
139*ed85203fSMark Johnston break;
140*ed85203fSMark Johnston case VCPU_FROZEN:
141*ed85203fSMark Johnston error = (newstate == VCPU_FROZEN);
142*ed85203fSMark Johnston break;
143*ed85203fSMark Johnston default:
144*ed85203fSMark Johnston error = 1;
145*ed85203fSMark Johnston break;
146*ed85203fSMark Johnston }
147*ed85203fSMark Johnston
148*ed85203fSMark Johnston if (error)
149*ed85203fSMark Johnston return (EBUSY);
150*ed85203fSMark Johnston
151*ed85203fSMark Johnston vcpu->state = newstate;
152*ed85203fSMark Johnston if (newstate == VCPU_RUNNING)
153*ed85203fSMark Johnston vcpu->hostcpu = curcpu;
154*ed85203fSMark Johnston else
155*ed85203fSMark Johnston vcpu->hostcpu = NOCPU;
156*ed85203fSMark Johnston
157*ed85203fSMark Johnston if (newstate == VCPU_IDLE)
158*ed85203fSMark Johnston wakeup(&vcpu->state);
159*ed85203fSMark Johnston
160*ed85203fSMark Johnston return (0);
161*ed85203fSMark Johnston }
162*ed85203fSMark Johnston
163*ed85203fSMark Johnston /*
164*ed85203fSMark Johnston * Try to lock all of the vCPUs in the VM while taking care to avoid deadlocks
165*ed85203fSMark Johnston * with vm_smp_rendezvous().
166*ed85203fSMark Johnston *
167*ed85203fSMark Johnston * The complexity here suggests that the rendezvous mechanism needs a rethink.
168*ed85203fSMark Johnston */
169*ed85203fSMark Johnston int
vcpu_set_state_all(struct vm * vm,enum vcpu_state newstate)170*ed85203fSMark Johnston vcpu_set_state_all(struct vm *vm, enum vcpu_state newstate)
171*ed85203fSMark Johnston {
172*ed85203fSMark Johnston cpuset_t locked;
173*ed85203fSMark Johnston struct vcpu *vcpu;
174*ed85203fSMark Johnston int error, i;
175*ed85203fSMark Johnston uint16_t maxcpus;
176*ed85203fSMark Johnston
177*ed85203fSMark Johnston KASSERT(newstate != VCPU_IDLE,
178*ed85203fSMark Johnston ("vcpu_set_state_all: invalid target state %d", newstate));
179*ed85203fSMark Johnston
180*ed85203fSMark Johnston error = 0;
181*ed85203fSMark Johnston CPU_ZERO(&locked);
182*ed85203fSMark Johnston maxcpus = vm->maxcpus;
183*ed85203fSMark Johnston
184*ed85203fSMark Johnston mtx_lock(&vm->rendezvous_mtx);
185*ed85203fSMark Johnston restart:
186*ed85203fSMark Johnston if (vm->rendezvous_func != NULL) {
187*ed85203fSMark Johnston /*
188*ed85203fSMark Johnston * If we have a pending rendezvous, then the initiator may be
189*ed85203fSMark Johnston * blocked waiting for other vCPUs to execute the callback. The
190*ed85203fSMark Johnston * current thread may be a vCPU thread so we must not block
191*ed85203fSMark Johnston * waiting for the initiator, otherwise we get a deadlock.
192*ed85203fSMark Johnston * Thus, execute the callback on behalf of any idle vCPUs.
193*ed85203fSMark Johnston */
194*ed85203fSMark Johnston for (i = 0; i < maxcpus; i++) {
195*ed85203fSMark Johnston vcpu = vm_vcpu(vm, i);
196*ed85203fSMark Johnston if (vcpu == NULL)
197*ed85203fSMark Johnston continue;
198*ed85203fSMark Johnston vcpu_lock(vcpu);
199*ed85203fSMark Johnston if (vcpu->state == VCPU_IDLE) {
200*ed85203fSMark Johnston (void)vcpu_set_state_locked(vcpu, VCPU_FROZEN,
201*ed85203fSMark Johnston true);
202*ed85203fSMark Johnston CPU_SET(i, &locked);
203*ed85203fSMark Johnston }
204*ed85203fSMark Johnston if (CPU_ISSET(i, &locked)) {
205*ed85203fSMark Johnston /*
206*ed85203fSMark Johnston * We can safely execute the callback on this
207*ed85203fSMark Johnston * vCPU's behalf.
208*ed85203fSMark Johnston */
209*ed85203fSMark Johnston vcpu_unlock(vcpu);
210*ed85203fSMark Johnston (void)vm_rendezvous(vcpu);
211*ed85203fSMark Johnston vcpu_lock(vcpu);
212*ed85203fSMark Johnston }
213*ed85203fSMark Johnston vcpu_unlock(vcpu);
214*ed85203fSMark Johnston }
215*ed85203fSMark Johnston }
216*ed85203fSMark Johnston
217*ed85203fSMark Johnston /*
218*ed85203fSMark Johnston * Now wait for remaining vCPUs to become idle. This may include the
219*ed85203fSMark Johnston * initiator of a rendezvous that is currently blocked on the rendezvous
220*ed85203fSMark Johnston * mutex.
221*ed85203fSMark Johnston */
222*ed85203fSMark Johnston CPU_FOREACH_ISCLR(i, &locked) {
223*ed85203fSMark Johnston if (i >= maxcpus)
224*ed85203fSMark Johnston break;
225*ed85203fSMark Johnston vcpu = vm_vcpu(vm, i);
226*ed85203fSMark Johnston if (vcpu == NULL)
227*ed85203fSMark Johnston continue;
228*ed85203fSMark Johnston vcpu_lock(vcpu);
229*ed85203fSMark Johnston while (vcpu->state != VCPU_IDLE) {
230*ed85203fSMark Johnston mtx_unlock(&vm->rendezvous_mtx);
231*ed85203fSMark Johnston vcpu_wait_idle(vcpu);
232*ed85203fSMark Johnston vcpu_unlock(vcpu);
233*ed85203fSMark Johnston mtx_lock(&vm->rendezvous_mtx);
234*ed85203fSMark Johnston if (vm->rendezvous_func != NULL)
235*ed85203fSMark Johnston goto restart;
236*ed85203fSMark Johnston vcpu_lock(vcpu);
237*ed85203fSMark Johnston }
238*ed85203fSMark Johnston error = vcpu_set_state_locked(vcpu, newstate, true);
239*ed85203fSMark Johnston vcpu_unlock(vcpu);
240*ed85203fSMark Johnston if (error != 0) {
241*ed85203fSMark Johnston /* Roll back state changes. */
242*ed85203fSMark Johnston CPU_FOREACH_ISSET(i, &locked)
243*ed85203fSMark Johnston (void)vcpu_set_state(vcpu, VCPU_IDLE, false);
244*ed85203fSMark Johnston break;
245*ed85203fSMark Johnston }
246*ed85203fSMark Johnston CPU_SET(i, &locked);
247*ed85203fSMark Johnston }
248*ed85203fSMark Johnston mtx_unlock(&vm->rendezvous_mtx);
249*ed85203fSMark Johnston return (error);
250*ed85203fSMark Johnston }
251*ed85203fSMark Johnston
252*ed85203fSMark Johnston
253*ed85203fSMark Johnston int
vcpu_set_state(struct vcpu * vcpu,enum vcpu_state newstate,bool from_idle)254*ed85203fSMark Johnston vcpu_set_state(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle)
255*ed85203fSMark Johnston {
256*ed85203fSMark Johnston int error;
257*ed85203fSMark Johnston
258*ed85203fSMark Johnston vcpu_lock(vcpu);
259*ed85203fSMark Johnston error = vcpu_set_state_locked(vcpu, newstate, from_idle);
260*ed85203fSMark Johnston vcpu_unlock(vcpu);
261*ed85203fSMark Johnston
262*ed85203fSMark Johnston return (error);
263*ed85203fSMark Johnston }
264*ed85203fSMark Johnston
265*ed85203fSMark Johnston enum vcpu_state
vcpu_get_state(struct vcpu * vcpu,int * hostcpu)266*ed85203fSMark Johnston vcpu_get_state(struct vcpu *vcpu, int *hostcpu)
267*ed85203fSMark Johnston {
268*ed85203fSMark Johnston enum vcpu_state state;
269*ed85203fSMark Johnston
270*ed85203fSMark Johnston vcpu_lock(vcpu);
271*ed85203fSMark Johnston state = vcpu->state;
272*ed85203fSMark Johnston if (hostcpu != NULL)
273*ed85203fSMark Johnston *hostcpu = vcpu->hostcpu;
274*ed85203fSMark Johnston vcpu_unlock(vcpu);
275*ed85203fSMark Johnston
276*ed85203fSMark Johnston return (state);
277*ed85203fSMark Johnston }
278*ed85203fSMark Johnston
279*ed85203fSMark Johnston /*
280*ed85203fSMark Johnston * This function is called to ensure that a vcpu "sees" a pending event
281*ed85203fSMark Johnston * as soon as possible:
282*ed85203fSMark Johnston * - If the vcpu thread is sleeping then it is woken up.
283*ed85203fSMark Johnston * - If the vcpu is running on a different host_cpu then an IPI will be directed
284*ed85203fSMark Johnston * to the host_cpu to cause the vcpu to trap into the hypervisor.
285*ed85203fSMark Johnston */
286*ed85203fSMark Johnston void
vcpu_notify_event_locked(struct vcpu * vcpu)287*ed85203fSMark Johnston vcpu_notify_event_locked(struct vcpu *vcpu)
288*ed85203fSMark Johnston {
289*ed85203fSMark Johnston int hostcpu;
290*ed85203fSMark Johnston
291*ed85203fSMark Johnston hostcpu = vcpu->hostcpu;
292*ed85203fSMark Johnston if (vcpu->state == VCPU_RUNNING) {
293*ed85203fSMark Johnston KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
294*ed85203fSMark Johnston if (hostcpu != curcpu) {
295*ed85203fSMark Johnston ipi_cpu(hostcpu, vmm_ipinum);
296*ed85203fSMark Johnston } else {
297*ed85203fSMark Johnston /*
298*ed85203fSMark Johnston * If the 'vcpu' is running on 'curcpu' then it must
299*ed85203fSMark Johnston * be sending a notification to itself (e.g. SELF_IPI).
300*ed85203fSMark Johnston * The pending event will be picked up when the vcpu
301*ed85203fSMark Johnston * transitions back to guest context.
302*ed85203fSMark Johnston */
303*ed85203fSMark Johnston }
304*ed85203fSMark Johnston } else {
305*ed85203fSMark Johnston KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent "
306*ed85203fSMark Johnston "with hostcpu %d", vcpu->state, hostcpu));
307*ed85203fSMark Johnston if (vcpu->state == VCPU_SLEEPING)
308*ed85203fSMark Johnston wakeup_one(vcpu);
309*ed85203fSMark Johnston }
310*ed85203fSMark Johnston }
311*ed85203fSMark Johnston
312*ed85203fSMark Johnston void
vcpu_notify_event(struct vcpu * vcpu)313*ed85203fSMark Johnston vcpu_notify_event(struct vcpu *vcpu)
314*ed85203fSMark Johnston {
315*ed85203fSMark Johnston vcpu_lock(vcpu);
316*ed85203fSMark Johnston vcpu_notify_event_locked(vcpu);
317*ed85203fSMark Johnston vcpu_unlock(vcpu);
318*ed85203fSMark Johnston }
319*ed85203fSMark Johnston
320*ed85203fSMark Johnston int
vcpu_debugged(struct vcpu * vcpu)321*ed85203fSMark Johnston vcpu_debugged(struct vcpu *vcpu)
322*ed85203fSMark Johnston {
323*ed85203fSMark Johnston return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus));
324*ed85203fSMark Johnston }
325*ed85203fSMark Johnston
326*ed85203fSMark Johnston void
vm_lock_vcpus(struct vm * vm)327*ed85203fSMark Johnston vm_lock_vcpus(struct vm *vm)
328*ed85203fSMark Johnston {
329*ed85203fSMark Johnston sx_xlock(&vm->vcpus_init_lock);
330*ed85203fSMark Johnston }
331*ed85203fSMark Johnston
332*ed85203fSMark Johnston void
vm_unlock_vcpus(struct vm * vm)333*ed85203fSMark Johnston vm_unlock_vcpus(struct vm *vm)
334*ed85203fSMark Johnston {
335*ed85203fSMark Johnston sx_unlock(&vm->vcpus_init_lock);
336*ed85203fSMark Johnston }
337*ed85203fSMark Johnston
338*ed85203fSMark Johnston void
vm_disable_vcpu_creation(struct vm * vm)339*ed85203fSMark Johnston vm_disable_vcpu_creation(struct vm *vm)
340*ed85203fSMark Johnston {
341*ed85203fSMark Johnston sx_xlock(&vm->vcpus_init_lock);
342*ed85203fSMark Johnston vm->dying = true;
343*ed85203fSMark Johnston sx_xunlock(&vm->vcpus_init_lock);
344*ed85203fSMark Johnston }
345*ed85203fSMark Johnston
346*ed85203fSMark Johnston uint16_t
vm_get_maxcpus(struct vm * vm)347*ed85203fSMark Johnston vm_get_maxcpus(struct vm *vm)
348*ed85203fSMark Johnston {
349*ed85203fSMark Johnston return (vm->maxcpus);
350*ed85203fSMark Johnston }
351*ed85203fSMark Johnston
352*ed85203fSMark Johnston void
vm_get_topology(struct vm * vm,uint16_t * sockets,uint16_t * cores,uint16_t * threads,uint16_t * maxcpus)353*ed85203fSMark Johnston vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
354*ed85203fSMark Johnston uint16_t *threads, uint16_t *maxcpus)
355*ed85203fSMark Johnston {
356*ed85203fSMark Johnston *sockets = vm->sockets;
357*ed85203fSMark Johnston *cores = vm->cores;
358*ed85203fSMark Johnston *threads = vm->threads;
359*ed85203fSMark Johnston *maxcpus = vm->maxcpus;
360*ed85203fSMark Johnston }
361*ed85203fSMark Johnston
362*ed85203fSMark Johnston int
vm_set_topology(struct vm * vm,uint16_t sockets,uint16_t cores,uint16_t threads,uint16_t maxcpus __unused)363*ed85203fSMark Johnston vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
364*ed85203fSMark Johnston uint16_t threads, uint16_t maxcpus __unused)
365*ed85203fSMark Johnston {
366*ed85203fSMark Johnston /* Ignore maxcpus. */
367*ed85203fSMark Johnston if (sockets * cores * threads > vm->maxcpus)
368*ed85203fSMark Johnston return (EINVAL);
369*ed85203fSMark Johnston vm->sockets = sockets;
370*ed85203fSMark Johnston vm->cores = cores;
371*ed85203fSMark Johnston vm->threads = threads;
372*ed85203fSMark Johnston return (0);
373*ed85203fSMark Johnston }
374*ed85203fSMark Johnston
375*ed85203fSMark Johnston int
vm_suspend(struct vm * vm,enum vm_suspend_how how)376*ed85203fSMark Johnston vm_suspend(struct vm *vm, enum vm_suspend_how how)
377*ed85203fSMark Johnston {
378*ed85203fSMark Johnston int i;
379*ed85203fSMark Johnston
380*ed85203fSMark Johnston if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST)
381*ed85203fSMark Johnston return (EINVAL);
382*ed85203fSMark Johnston
383*ed85203fSMark Johnston if (atomic_cmpset_int(&vm->suspend, 0, how) == 0)
384*ed85203fSMark Johnston return (EALREADY);
385*ed85203fSMark Johnston
386*ed85203fSMark Johnston /*
387*ed85203fSMark Johnston * Notify all active vcpus that they are now suspended.
388*ed85203fSMark Johnston */
389*ed85203fSMark Johnston for (i = 0; i < vm->maxcpus; i++) {
390*ed85203fSMark Johnston if (CPU_ISSET(i, &vm->active_cpus))
391*ed85203fSMark Johnston vcpu_notify_event(vm_vcpu(vm, i));
392*ed85203fSMark Johnston }
393*ed85203fSMark Johnston
394*ed85203fSMark Johnston return (0);
395*ed85203fSMark Johnston }
396*ed85203fSMark Johnston
397*ed85203fSMark Johnston int
vm_reinit(struct vm * vm)398*ed85203fSMark Johnston vm_reinit(struct vm *vm)
399*ed85203fSMark Johnston {
400*ed85203fSMark Johnston int error;
401*ed85203fSMark Johnston
402*ed85203fSMark Johnston /*
403*ed85203fSMark Johnston * A virtual machine can be reset only if all vcpus are suspended.
404*ed85203fSMark Johnston */
405*ed85203fSMark Johnston if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
406*ed85203fSMark Johnston vm_reset(vm);
407*ed85203fSMark Johnston error = 0;
408*ed85203fSMark Johnston } else {
409*ed85203fSMark Johnston error = EBUSY;
410*ed85203fSMark Johnston }
411*ed85203fSMark Johnston
412*ed85203fSMark Johnston return (error);
413*ed85203fSMark Johnston }
414*ed85203fSMark Johnston
415*ed85203fSMark Johnston int
vm_activate_cpu(struct vcpu * vcpu)416*ed85203fSMark Johnston vm_activate_cpu(struct vcpu *vcpu)
417*ed85203fSMark Johnston {
418*ed85203fSMark Johnston struct vm *vm = vcpu->vm;
419*ed85203fSMark Johnston
420*ed85203fSMark Johnston if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
421*ed85203fSMark Johnston return (EBUSY);
422*ed85203fSMark Johnston
423*ed85203fSMark Johnston CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus);
424*ed85203fSMark Johnston return (0);
425*ed85203fSMark Johnston }
426*ed85203fSMark Johnston
427*ed85203fSMark Johnston int
vm_suspend_cpu(struct vm * vm,struct vcpu * vcpu)428*ed85203fSMark Johnston vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu)
429*ed85203fSMark Johnston {
430*ed85203fSMark Johnston if (vcpu == NULL) {
431*ed85203fSMark Johnston vm->debug_cpus = vm->active_cpus;
432*ed85203fSMark Johnston for (int i = 0; i < vm->maxcpus; i++) {
433*ed85203fSMark Johnston if (CPU_ISSET(i, &vm->active_cpus))
434*ed85203fSMark Johnston vcpu_notify_event(vm_vcpu(vm, i));
435*ed85203fSMark Johnston }
436*ed85203fSMark Johnston } else {
437*ed85203fSMark Johnston if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
438*ed85203fSMark Johnston return (EINVAL);
439*ed85203fSMark Johnston
440*ed85203fSMark Johnston CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
441*ed85203fSMark Johnston vcpu_notify_event(vcpu);
442*ed85203fSMark Johnston }
443*ed85203fSMark Johnston return (0);
444*ed85203fSMark Johnston }
445*ed85203fSMark Johnston
446*ed85203fSMark Johnston int
vm_resume_cpu(struct vm * vm,struct vcpu * vcpu)447*ed85203fSMark Johnston vm_resume_cpu(struct vm *vm, struct vcpu *vcpu)
448*ed85203fSMark Johnston {
449*ed85203fSMark Johnston if (vcpu == NULL) {
450*ed85203fSMark Johnston CPU_ZERO(&vm->debug_cpus);
451*ed85203fSMark Johnston } else {
452*ed85203fSMark Johnston if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus))
453*ed85203fSMark Johnston return (EINVAL);
454*ed85203fSMark Johnston
455*ed85203fSMark Johnston CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
456*ed85203fSMark Johnston }
457*ed85203fSMark Johnston return (0);
458*ed85203fSMark Johnston }
459*ed85203fSMark Johnston
460*ed85203fSMark Johnston cpuset_t
vm_active_cpus(struct vm * vm)461*ed85203fSMark Johnston vm_active_cpus(struct vm *vm)
462*ed85203fSMark Johnston {
463*ed85203fSMark Johnston return (vm->active_cpus);
464*ed85203fSMark Johnston }
465*ed85203fSMark Johnston
466*ed85203fSMark Johnston cpuset_t
vm_debug_cpus(struct vm * vm)467*ed85203fSMark Johnston vm_debug_cpus(struct vm *vm)
468*ed85203fSMark Johnston {
469*ed85203fSMark Johnston return (vm->debug_cpus);
470*ed85203fSMark Johnston }
471*ed85203fSMark Johnston
472*ed85203fSMark Johnston cpuset_t
vm_suspended_cpus(struct vm * vm)473*ed85203fSMark Johnston vm_suspended_cpus(struct vm *vm)
474*ed85203fSMark Johnston {
475*ed85203fSMark Johnston return (vm->suspended_cpus);
476*ed85203fSMark Johnston }
477