1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28 /*
29 * This file and its contents are supplied under the terms of the
30 * Common Development and Distribution License ("CDDL"), version 1.0.
31 * You may only use this file in accordance with the terms of version
32 * 1.0 of the CDDL.
33 *
34 * A full copy of the text of the CDDL should have accompanied this
35 * source. A copy of the CDDL is also available via the Internet at
36 * http://www.illumos.org/license/CDDL.
37 */
38 /* This file is dual-licensed; see usr/src/contrib/bhyve/LICENSE */
39
40 /*
41 * Copyright 2015 Pluribus Networks Inc.
42 * Copyright 2019 Joyent, Inc.
43 * Copyright 2025 Oxide Computer Company
44 * Copyright 2021 OmniOS Community Edition (OmniOSce) Association.
45 */
46
47 #ifndef _VMM_KERNEL_H_
48 #define _VMM_KERNEL_H_
49
50 #include <sys/sdt.h>
51 #include <x86/segments.h>
52 #include <sys/vmm.h>
53 #include <sys/vmm_data.h>
54 #include <sys/linker_set.h>
55
56 SDT_PROVIDER_DECLARE(vmm);
57
58 struct vm;
59 struct vm_exception;
60 struct seg_desc;
61 struct vm_exit;
62 struct vie;
63 struct vm_run;
64 struct vhpet;
65 struct vioapic;
66 struct vlapic;
67 struct vmspace;
68 struct vm_client;
69 struct vm_object;
70 struct vm_guest_paging;
71 struct vmm_data_req;
72
73 /* Return values for architecture-specific calculation of the TSC multiplier */
74 typedef enum {
75 FR_VALID, /* valid multiplier, scaling needed */
76 FR_SCALING_NOT_NEEDED, /* scaling not required */
77 FR_SCALING_NOT_SUPPORTED, /* scaling not supported by platform */
78 FR_OUT_OF_RANGE, /* freq ratio out of supported range */
79 } freqratio_res_t;
80
81 typedef int (*vmm_init_func_t)(void);
82 typedef int (*vmm_cleanup_func_t)(void);
83 typedef void (*vmm_resume_func_t)(void);
84 typedef void * (*vmi_init_func_t)(struct vm *vm);
85 typedef int (*vmi_run_func_t)(void *vmi, int vcpu, uint64_t rip);
86 typedef void (*vmi_cleanup_func_t)(void *vmi);
87 typedef int (*vmi_get_register_t)(void *vmi, int vcpu, int num,
88 uint64_t *retval);
89 typedef int (*vmi_set_register_t)(void *vmi, int vcpu, int num,
90 uint64_t val);
91 typedef int (*vmi_get_desc_t)(void *vmi, int vcpu, int num,
92 struct seg_desc *desc);
93 typedef int (*vmi_set_desc_t)(void *vmi, int vcpu, int num,
94 const struct seg_desc *desc);
95 typedef int (*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval);
96 typedef int (*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val);
97 typedef struct vlapic *(*vmi_vlapic_init)(void *vmi, int vcpu);
98 typedef void (*vmi_vlapic_cleanup)(void *vmi, struct vlapic *vlapic);
99 typedef void (*vmi_savectx)(void *vmi, int vcpu);
100 typedef void (*vmi_restorectx)(void *vmi, int vcpu);
101 typedef void (*vmi_pause_t)(void *vmi, int vcpu);
102
103 typedef int (*vmi_get_msr_t)(void *vmi, int vcpu, uint32_t msr,
104 uint64_t *valp);
105 typedef int (*vmi_set_msr_t)(void *vmi, int vcpu, uint32_t msr,
106 uint64_t val);
107 typedef freqratio_res_t (*vmi_freqratio_t)(uint64_t guest_hz,
108 uint64_t host_hz, uint64_t *mult);
109
110 struct vmm_ops {
111 vmm_init_func_t init; /* module wide initialization */
112 vmm_cleanup_func_t cleanup;
113 vmm_resume_func_t resume;
114
115 vmi_init_func_t vminit; /* vm-specific initialization */
116 vmi_run_func_t vmrun;
117 vmi_cleanup_func_t vmcleanup;
118 vmi_get_register_t vmgetreg;
119 vmi_set_register_t vmsetreg;
120 vmi_get_desc_t vmgetdesc;
121 vmi_set_desc_t vmsetdesc;
122 vmi_get_cap_t vmgetcap;
123 vmi_set_cap_t vmsetcap;
124 vmi_vlapic_init vlapic_init;
125 vmi_vlapic_cleanup vlapic_cleanup;
126 vmi_pause_t vmpause;
127
128 vmi_savectx vmsavectx;
129 vmi_restorectx vmrestorectx;
130
131 vmi_get_msr_t vmgetmsr;
132 vmi_set_msr_t vmsetmsr;
133
134 vmi_freqratio_t vmfreqratio;
135 uint32_t fr_intsize;
136 uint32_t fr_fracsize;
137 };
138
139 extern struct vmm_ops vmm_ops_intel;
140 extern struct vmm_ops vmm_ops_amd;
141
142 int vm_create(uint64_t flags, struct vm **retvm);
143 void vm_destroy(struct vm *vm);
144 int vm_reinit(struct vm *vm, uint64_t);
145 uint16_t vm_get_maxcpus(struct vm *vm);
146 void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
147 uint16_t *threads, uint16_t *maxcpus);
148 int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
149 uint16_t threads, uint16_t maxcpus);
150
151 int vm_pause_instance(struct vm *);
152 int vm_resume_instance(struct vm *);
153 bool vm_is_paused(struct vm *);
154
155 /*
156 * APIs that race against hardware.
157 */
158 int vm_track_dirty_pages(struct vm *, uint64_t, size_t, uint8_t *);
159 int vm_npt_do_operation(struct vm *, uint64_t, size_t, uint32_t, uint8_t *,
160 int *);
161
162 /*
163 * APIs that modify the guest memory map require all vcpus to be frozen.
164 */
165 int vm_mmap_memseg(struct vm *, vm_paddr_t, int, uintptr_t, size_t, int, int);
166 int vm_munmap_memseg(struct vm *, vm_paddr_t, size_t);
167 int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem);
168 void vm_free_memseg(struct vm *vm, int ident);
169 int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
170 int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len);
171 int vm_assign_pptdev(struct vm *vm, int pptfd);
172 int vm_unassign_pptdev(struct vm *vm, int pptfd);
173
174 /*
175 * APIs that inspect the guest memory map require only a *single* vcpu to
176 * be frozen. This acts like a read lock on the guest memory map since any
177 * modification requires *all* vcpus to be frozen.
178 */
179 int vm_mmap_getnext(struct vm *, vm_paddr_t *, int *, uintptr_t *, size_t *,
180 int *, int *);
181 int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
182 struct vm_object **objptr);
183 vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm);
184 bool vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa);
185
186 int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval);
187 int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val);
188 int vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
189 struct seg_desc *ret_desc);
190 int vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
191 const struct seg_desc *desc);
192 int vm_get_run_state(struct vm *vm, int vcpuid, uint32_t *state,
193 uint8_t *sipi_vec);
194 int vm_set_run_state(struct vm *vm, int vcpuid, uint32_t state,
195 uint8_t sipi_vec);
196 int vm_get_fpu(struct vm *vm, int vcpuid, void *buf, size_t len);
197 int vm_set_fpu(struct vm *vm, int vcpuid, void *buf, size_t len);
198 int vm_run(struct vm *vm, int vcpuid, const struct vm_entry *);
199 int vm_suspend(struct vm *, enum vm_suspend_how, int);
200 int vm_inject_nmi(struct vm *vm, int vcpu);
201 bool vm_nmi_pending(struct vm *vm, int vcpuid);
202 void vm_nmi_clear(struct vm *vm, int vcpuid);
203 int vm_inject_extint(struct vm *vm, int vcpu);
204 bool vm_extint_pending(struct vm *vm, int vcpuid);
205 void vm_extint_clear(struct vm *vm, int vcpuid);
206 int vm_inject_init(struct vm *vm, int vcpuid);
207 int vm_inject_sipi(struct vm *vm, int vcpuid, uint8_t vec);
208 struct vlapic *vm_lapic(struct vm *vm, int cpu);
209 struct vioapic *vm_ioapic(struct vm *vm);
210 struct vhpet *vm_hpet(struct vm *vm);
211 int vm_get_capability(struct vm *vm, int vcpu, int type, int *val);
212 int vm_set_capability(struct vm *vm, int vcpu, int type, int val);
213 int vm_get_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state *state);
214 int vm_set_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state state);
215 int vm_apicid2vcpuid(struct vm *vm, int apicid);
216 int vm_activate_cpu(struct vm *vm, int vcpu);
217 int vm_suspend_cpu(struct vm *vm, int vcpu);
218 int vm_resume_cpu(struct vm *vm, int vcpu);
219 struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid);
220 struct vie *vm_vie_ctx(struct vm *vm, int vcpuid);
221 void vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip);
222 void vm_exit_debug(struct vm *vm, int vcpuid, uint64_t rip);
223 void vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip);
224 void vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip);
225 void vm_exit_run_state(struct vm *vm, int vcpuid, uint64_t rip);
226 int vm_service_mmio_read(struct vm *vm, int cpuid, uint64_t gpa, uint64_t *rval,
227 int rsize);
228 int vm_service_mmio_write(struct vm *vm, int cpuid, uint64_t gpa, uint64_t wval,
229 int wsize);
230
231 #ifdef _SYS__CPUSET_H_
232 cpuset_t vm_active_cpus(struct vm *vm);
233 cpuset_t vm_debug_cpus(struct vm *vm);
234 #endif /* _SYS__CPUSET_H_ */
235
236 bool vcpu_entry_bailout_checks(struct vm *vm, int vcpuid, uint64_t rip);
237 bool vcpu_run_state_pending(struct vm *vm, int vcpuid);
238 int vcpu_arch_reset(struct vm *vm, int vcpuid, bool init_only);
239 int vm_vcpu_barrier(struct vm *, int);
240
241 /*
242 * Return true if device indicated by bus/slot/func is supposed to be a
243 * pci passthrough device.
244 *
245 * Return false otherwise.
246 */
247 bool vmm_is_pptdev(int bus, int slot, int func);
248
249 void *vm_iommu_domain(struct vm *vm);
250
251 enum vcpu_state {
252 VCPU_IDLE,
253 VCPU_FROZEN,
254 VCPU_RUNNING,
255 VCPU_SLEEPING,
256 };
257
258 int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state,
259 bool from_idle);
260 enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu, int *hostcpu);
261 void vcpu_block_run(struct vm *, int);
262 void vcpu_unblock_run(struct vm *, int);
263
264 uint64_t vcpu_tsc_offset(struct vm *vm, int vcpuid, bool phys_adj);
265 hrtime_t vm_normalize_hrtime(struct vm *, hrtime_t);
266 hrtime_t vm_denormalize_hrtime(struct vm *, hrtime_t);
267 uint64_t vm_get_freq_multiplier(struct vm *);
268
269 static __inline bool
vcpu_is_running(struct vm * vm,int vcpu,int * hostcpu)270 vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu)
271 {
272 return (vcpu_get_state(vm, vcpu, hostcpu) == VCPU_RUNNING);
273 }
274
275 #ifdef _SYS_THREAD_H
276 static __inline int
vcpu_should_yield(struct vm * vm,int vcpu)277 vcpu_should_yield(struct vm *vm, int vcpu)
278 {
279
280 if (curthread->t_astflag)
281 return (1);
282 else if (CPU->cpu_runrun)
283 return (1);
284 else
285 return (0);
286 }
287 #endif /* _SYS_THREAD_H */
288
289 typedef enum vcpu_notify {
290 VCPU_NOTIFY_NONE,
291 VCPU_NOTIFY_APIC, /* Posted intr notification (if possible) */
292 VCPU_NOTIFY_EXIT, /* IPI to cause VM exit */
293 } vcpu_notify_t;
294
295 void *vcpu_stats(struct vm *vm, int vcpu);
296 void vcpu_notify_event(struct vm *vm, int vcpuid);
297 void vcpu_notify_event_type(struct vm *vm, int vcpuid, vcpu_notify_t);
298 void *vm_get_cookie(struct vm *);
299 struct vmspace *vm_get_vmspace(struct vm *vm);
300 struct vm_client *vm_get_vmclient(struct vm *vm, int vcpuid);
301 struct vatpic *vm_atpic(struct vm *vm);
302 struct vatpit *vm_atpit(struct vm *vm);
303 struct vpmtmr *vm_pmtmr(struct vm *vm);
304 struct vrtc *vm_rtc(struct vm *vm);
305
306 /*
307 * Inject exception 'vector' into the guest vcpu. This function returns 0 on
308 * success and non-zero on failure.
309 *
310 * Wrapper functions like 'vm_inject_gp()' should be preferred to calling
311 * this function directly because they enforce the trap-like or fault-like
312 * behavior of an exception.
313 *
314 * This function should only be called in the context of the thread that is
315 * executing this vcpu.
316 */
317 int vm_inject_exception(struct vm *vm, int vcpuid, uint8_t vector,
318 bool err_valid, uint32_t errcode, bool restart_instruction);
319
320 /*
321 * This function is called after a VM-exit that occurred during exception or
322 * interrupt delivery through the IDT. The format of 'intinfo' is described
323 * in Figure 15-1, "EXITINTINFO for All Intercepts", APM, Vol 2.
324 *
325 * If a VM-exit handler completes the event delivery successfully then it
326 * should call vm_exit_intinfo() to extinguish the pending event. For e.g.,
327 * if the task switch emulation is triggered via a task gate then it should
328 * call this function with 'intinfo=0' to indicate that the external event
329 * is not pending anymore.
330 *
331 * Return value is 0 on success and non-zero on failure.
332 */
333 int vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t intinfo);
334
335 /*
336 * This function is called before every VM-entry to retrieve a pending
337 * event that should be injected into the guest. This function combines
338 * nested events into a double or triple fault.
339 *
340 * Returns false if there are no events that need to be injected into the guest.
341 */
342 bool vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *info);
343
344 int vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2);
345
346 enum vm_reg_name vm_segment_name(int seg_encoding);
347
348 struct vm_copyinfo {
349 uint64_t gpa;
350 size_t len;
351 int prot;
352 void *hva;
353 void *cookie;
354 };
355
356 /*
357 * Set up 'copyinfo[]' to copy to/from guest linear address space starting
358 * at 'gla' and 'len' bytes long. The 'prot' should be set to PROT_READ for
359 * a copyin or PROT_WRITE for a copyout.
360 *
361 * retval is_fault Interpretation
362 * 0 0 Success
363 * 0 1 An exception was injected into the guest
364 * EFAULT N/A Unrecoverable error
365 *
366 * The 'copyinfo[]' can be passed to 'vm_copyin()' or 'vm_copyout()' only if
367 * the return value is 0. The 'copyinfo[]' resources should be freed by calling
368 * 'vm_copy_teardown()' after the copy is done.
369 */
370 int vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
371 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
372 uint_t num_copyinfo, int *is_fault);
373 void vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
374 uint_t num_copyinfo);
375 void vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
376 void *kaddr, size_t len);
377 void vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
378 struct vm_copyinfo *copyinfo, size_t len);
379
380 int vcpu_trace_exceptions(struct vm *vm, int vcpuid);
381 int vcpu_trap_wbinvd(struct vm *vm, int vcpuid);
382
383 void vm_inject_ud(struct vm *vm, int vcpuid);
384 void vm_inject_gp(struct vm *vm, int vcpuid);
385 void vm_inject_ac(struct vm *vm, int vcpuid, uint32_t errcode);
386 void vm_inject_ss(struct vm *vm, int vcpuid, uint32_t errcode);
387 void vm_inject_pf(struct vm *vm, int vcpuid, uint32_t errcode, uint64_t cr2);
388
389 /*
390 * Both SVM and VMX have complex logic for injecting events such as exceptions
391 * or interrupts into the guest. Within those two backends, the progress of
392 * event injection is tracked by event_inject_state, hopefully making it easier
393 * to reason about.
394 */
395 enum event_inject_state {
396 EIS_CAN_INJECT = 0, /* exception/interrupt can be injected */
397 EIS_EV_EXISTING = 1, /* blocked by existing event */
398 EIS_EV_INJECTED = 2, /* blocked by injected event */
399 EIS_GI_BLOCK = 3, /* blocked by guest interruptability */
400
401 /*
402 * Flag to request an immediate exit from VM context after event
403 * injection in order to perform more processing
404 */
405 EIS_REQ_EXIT = (1 << 15),
406 };
407
408 /* Possible result codes for MSR access emulation */
409 typedef enum vm_msr_result {
410 VMR_OK = 0, /* succesfully emulated */
411 VMR_GP = 1, /* #GP should be injected */
412 VMR_UNHANLDED = 2, /* handle in userspace, kernel cannot emulate */
413 } vm_msr_result_t;
414
415 enum vm_cpuid_capability {
416 VCC_NONE,
417 VCC_NO_EXECUTE,
418 VCC_FFXSR,
419 VCC_TCE,
420 VCC_LAST
421 };
422
423 /* Possible flags and entry count limit definited in sys/vmm.h */
424 typedef struct vcpu_cpuid_config {
425 uint32_t vcc_flags;
426 uint32_t vcc_nent;
427 struct vcpu_cpuid_entry *vcc_entries;
428 } vcpu_cpuid_config_t;
429
430 vcpu_cpuid_config_t *vm_cpuid_config(struct vm *, int);
431 int vm_get_cpuid(struct vm *, int, vcpu_cpuid_config_t *);
432 int vm_set_cpuid(struct vm *, int, const vcpu_cpuid_config_t *);
433 void vcpu_emulate_cpuid(struct vm *, int, uint64_t *, uint64_t *, uint64_t *,
434 uint64_t *);
435 void legacy_emulate_cpuid(struct vm *, int, uint32_t *, uint32_t *, uint32_t *,
436 uint32_t *);
437 void vcpu_cpuid_init(vcpu_cpuid_config_t *);
438 void vcpu_cpuid_cleanup(vcpu_cpuid_config_t *);
439
440 bool vm_cpuid_capability(struct vm *, int, enum vm_cpuid_capability);
441 bool validate_guest_xcr0(uint64_t, uint64_t);
442
443 void vmm_sol_glue_init(void);
444 void vmm_sol_glue_cleanup(void);
445
446 void *vmm_contig_alloc(size_t);
447 void vmm_contig_free(void *, size_t);
448
449 int vmm_mod_load(void);
450 int vmm_mod_unload(void);
451
452 bool vmm_check_iommu(void);
453
454 void vmm_call_trap(uint64_t);
455
456 uint64_t vmm_host_tsc_delta(void);
457
458 /*
459 * Because of tangled headers, this is not exposed directly via the vmm_drv
460 * interface, but rather mirrored as vmm_drv_iop_cb_t in vmm_drv.h.
461 */
462 typedef int (*ioport_handler_t)(void *, bool, uint16_t, uint8_t, uint32_t *);
463
464 int vm_ioport_access(struct vm *vm, int vcpuid, bool in, uint16_t port,
465 uint8_t bytes, uint32_t *val);
466
467 int vm_ioport_attach(struct vm *vm, uint16_t port, ioport_handler_t func,
468 void *arg, void **cookie);
469 int vm_ioport_detach(struct vm *vm, void **cookie, ioport_handler_t *old_func,
470 void **old_arg);
471
472 int vm_ioport_hook(struct vm *, uint16_t, ioport_handler_t, void *, void **);
473 void vm_ioport_unhook(struct vm *, void **);
474
475 enum vcpu_ustate {
476 VU_INIT = 0, /* initialized but has not yet attempted to run */
477 VU_RUN, /* running in guest context */
478 VU_IDLE, /* idle (HLTed, wait-for-SIPI, etc) */
479 VU_EMU_KERN, /* emulation performed in-kernel */
480 VU_EMU_USER, /* emulation performed in userspace */
481 VU_SCHED, /* off-cpu for interrupt, preempt, lock contention */
482 VU_MAX
483 };
484
485 void vcpu_ustate_change(struct vm *, int, enum vcpu_ustate);
486
487 typedef struct vmm_kstats {
488 kstat_named_t vk_name;
489 } vmm_kstats_t;
490
491 typedef struct vmm_vcpu_kstats {
492 kstat_named_t vvk_vcpu;
493 kstat_named_t vvk_time_init;
494 kstat_named_t vvk_time_run;
495 kstat_named_t vvk_time_idle;
496 kstat_named_t vvk_time_emu_kern;
497 kstat_named_t vvk_time_emu_user;
498 kstat_named_t vvk_time_sched;
499 } vmm_vcpu_kstats_t;
500
501 #define VMM_KSTAT_CLASS "misc"
502
503 int vmm_kstat_update_vcpu(struct kstat *, int);
504
505 typedef struct vmm_data_req {
506 uint16_t vdr_class;
507 uint16_t vdr_version;
508 uint32_t vdr_flags;
509 uint32_t vdr_len;
510 void *vdr_data;
511 uint32_t *vdr_result_len;
512 int vdr_vcpuid;
513 } vmm_data_req_t;
514
515 typedef int (*vmm_data_writef_t)(void *, const vmm_data_req_t *);
516 typedef int (*vmm_data_readf_t)(void *, const vmm_data_req_t *);
517 typedef int (*vmm_data_vcpu_writef_t)(struct vm *, int, const vmm_data_req_t *);
518 typedef int (*vmm_data_vcpu_readf_t)(struct vm *, int, const vmm_data_req_t *);
519
520 typedef struct vmm_data_version_entry {
521 uint16_t vdve_class;
522 uint16_t vdve_version;
523
524 /*
525 * If these handlers accept/emit a single item of a fixed length, it
526 * should be specified in vdve_len_expect. The vmm-data logic will then
527 * ensure that requests possess at least that specified length before
528 * calling into the defined handlers.
529 */
530 uint16_t vdve_len_expect;
531
532 /*
533 * For handlers which deal with (potentially) multiple items of a fixed
534 * length, vdve_len_per_item is used to hint (via the VDC_VERSION class)
535 * to userspace what that item size is. Although not strictly mutually
536 * exclusive with vdve_len_expect, it is nonsensical to set them both.
537 */
538 uint16_t vdve_len_per_item;
539
540 /*
541 * A vmm-data handler is expected to provide read/write functions which
542 * are either VM-wide (via vdve_readf and vdve_writef) or per-vCPU
543 * (via vdve_vcpu_readf and vdve_vcpu_writef). Providing both is not
544 * allowed (but is not currently checked at compile time).
545 */
546
547 /* VM-wide handlers */
548 vmm_data_readf_t vdve_readf;
549 vmm_data_writef_t vdve_writef;
550
551 /* Per-vCPU handlers */
552 vmm_data_vcpu_readf_t vdve_vcpu_readf;
553 vmm_data_vcpu_writef_t vdve_vcpu_writef;
554
555 /*
556 * The vdve_vcpu_readf/writef handlers can rely on vcpuid to be within
557 * the [0, VM_MAXCPU) bounds. If they also can handle vcpuid == -1 (for
558 * VM-wide data), then they can opt into such cases by setting
559 * vdve_vcpu_wildcard to true.
560 *
561 * At a later time, it would make sense to improve the logic so a
562 * vmm-data class could define both the VM-wide and per-vCPU handlers,
563 * letting the incoming vcpuid determine which would be called. Until
564 * then, vdve_vcpu_wildcard is the stopgap.
565 */
566 bool vdve_vcpu_wildcard;
567 } vmm_data_version_entry_t;
568
569 #define VMM_DATA_VERSION(sym) SET_ENTRY(vmm_data_version_entries, sym)
570
571 int vmm_data_read(struct vm *, const vmm_data_req_t *);
572 int vmm_data_write(struct vm *, const vmm_data_req_t *);
573
574 /*
575 * TSC Scaling
576 */
577 uint64_t vmm_calc_freq_multiplier(uint64_t guest_hz, uint64_t host_hz,
578 uint32_t frac);
579
580 /* represents a multiplier for a guest in which no scaling is required */
581 #define VM_TSCM_NOSCALE 0
582
583 #endif /* _VMM_KERNEL_H_ */
584