1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28 /*
29 * This file and its contents are supplied under the terms of the
30 * Common Development and Distribution License ("CDDL"), version 1.0.
31 * You may only use this file in accordance with the terms of version
32 * 1.0 of the CDDL.
33 *
34 * A full copy of the text of the CDDL should have accompanied this
35 * source. A copy of the CDDL is also available via the Internet at
36 * http://www.illumos.org/license/CDDL.
37 *
38 * Copyright 2015 Pluribus Networks Inc.
39 * Copyright 2019 Joyent, Inc.
40 * Copyright 2024 Oxide Computer Company
41 * Copyright 2021 OmniOS Community Edition (OmniOSce) Association.
42 */
43
44 #ifndef _VMM_KERNEL_H_
45 #define _VMM_KERNEL_H_
46
47 #include <sys/sdt.h>
48 #include <x86/segments.h>
49 #include <sys/vmm.h>
50 #include <sys/vmm_data.h>
51 #include <sys/linker_set.h>
52
53 SDT_PROVIDER_DECLARE(vmm);
54
55 struct vm;
56 struct vm_exception;
57 struct seg_desc;
58 struct vm_exit;
59 struct vie;
60 struct vm_run;
61 struct vhpet;
62 struct vioapic;
63 struct vlapic;
64 struct vmspace;
65 struct vm_client;
66 struct vm_object;
67 struct vm_guest_paging;
68 struct vmm_data_req;
69
70 /* Return values for architecture-specific calculation of the TSC multiplier */
71 typedef enum {
72 FR_VALID, /* valid multiplier, scaling needed */
73 FR_SCALING_NOT_NEEDED, /* scaling not required */
74 FR_SCALING_NOT_SUPPORTED, /* scaling not supported by platform */
75 FR_OUT_OF_RANGE, /* freq ratio out of supported range */
76 } freqratio_res_t;
77
78 typedef int (*vmm_init_func_t)(void);
79 typedef int (*vmm_cleanup_func_t)(void);
80 typedef void (*vmm_resume_func_t)(void);
81 typedef void * (*vmi_init_func_t)(struct vm *vm);
82 typedef int (*vmi_run_func_t)(void *vmi, int vcpu, uint64_t rip);
83 typedef void (*vmi_cleanup_func_t)(void *vmi);
84 typedef int (*vmi_get_register_t)(void *vmi, int vcpu, int num,
85 uint64_t *retval);
86 typedef int (*vmi_set_register_t)(void *vmi, int vcpu, int num,
87 uint64_t val);
88 typedef int (*vmi_get_desc_t)(void *vmi, int vcpu, int num,
89 struct seg_desc *desc);
90 typedef int (*vmi_set_desc_t)(void *vmi, int vcpu, int num,
91 const struct seg_desc *desc);
92 typedef int (*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval);
93 typedef int (*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val);
94 typedef struct vlapic *(*vmi_vlapic_init)(void *vmi, int vcpu);
95 typedef void (*vmi_vlapic_cleanup)(void *vmi, struct vlapic *vlapic);
96 typedef void (*vmi_savectx)(void *vmi, int vcpu);
97 typedef void (*vmi_restorectx)(void *vmi, int vcpu);
98 typedef void (*vmi_pause_t)(void *vmi, int vcpu);
99
100 typedef int (*vmi_get_msr_t)(void *vmi, int vcpu, uint32_t msr,
101 uint64_t *valp);
102 typedef int (*vmi_set_msr_t)(void *vmi, int vcpu, uint32_t msr,
103 uint64_t val);
104 typedef freqratio_res_t (*vmi_freqratio_t)(uint64_t guest_hz,
105 uint64_t host_hz, uint64_t *mult);
106
107 struct vmm_ops {
108 vmm_init_func_t init; /* module wide initialization */
109 vmm_cleanup_func_t cleanup;
110 vmm_resume_func_t resume;
111
112 vmi_init_func_t vminit; /* vm-specific initialization */
113 vmi_run_func_t vmrun;
114 vmi_cleanup_func_t vmcleanup;
115 vmi_get_register_t vmgetreg;
116 vmi_set_register_t vmsetreg;
117 vmi_get_desc_t vmgetdesc;
118 vmi_set_desc_t vmsetdesc;
119 vmi_get_cap_t vmgetcap;
120 vmi_set_cap_t vmsetcap;
121 vmi_vlapic_init vlapic_init;
122 vmi_vlapic_cleanup vlapic_cleanup;
123 vmi_pause_t vmpause;
124
125 vmi_savectx vmsavectx;
126 vmi_restorectx vmrestorectx;
127
128 vmi_get_msr_t vmgetmsr;
129 vmi_set_msr_t vmsetmsr;
130
131 vmi_freqratio_t vmfreqratio;
132 uint32_t fr_intsize;
133 uint32_t fr_fracsize;
134 };
135
136 extern struct vmm_ops vmm_ops_intel;
137 extern struct vmm_ops vmm_ops_amd;
138
139 int vm_create(uint64_t flags, struct vm **retvm);
140 void vm_destroy(struct vm *vm);
141 int vm_reinit(struct vm *vm, uint64_t);
142 uint16_t vm_get_maxcpus(struct vm *vm);
143 void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
144 uint16_t *threads, uint16_t *maxcpus);
145 int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
146 uint16_t threads, uint16_t maxcpus);
147
148 int vm_pause_instance(struct vm *);
149 int vm_resume_instance(struct vm *);
150 bool vm_is_paused(struct vm *);
151
152 /*
153 * APIs that race against hardware.
154 */
155 int vm_track_dirty_pages(struct vm *, uint64_t, size_t, uint8_t *);
156 int vm_npt_do_operation(struct vm *, uint64_t, size_t, uint32_t, uint8_t *,
157 int *);
158
159 /*
160 * APIs that modify the guest memory map require all vcpus to be frozen.
161 */
162 int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off,
163 size_t len, int prot, int flags);
164 int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len);
165 int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem);
166 void vm_free_memseg(struct vm *vm, int ident);
167 int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
168 int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len);
169 int vm_assign_pptdev(struct vm *vm, int pptfd);
170 int vm_unassign_pptdev(struct vm *vm, int pptfd);
171
172 /*
173 * APIs that inspect the guest memory map require only a *single* vcpu to
174 * be frozen. This acts like a read lock on the guest memory map since any
175 * modification requires *all* vcpus to be frozen.
176 */
177 int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
178 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags);
179 int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
180 struct vm_object **objptr);
181 vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm);
182 bool vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa);
183
184 int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval);
185 int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val);
186 int vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
187 struct seg_desc *ret_desc);
188 int vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
189 const struct seg_desc *desc);
190 int vm_get_run_state(struct vm *vm, int vcpuid, uint32_t *state,
191 uint8_t *sipi_vec);
192 int vm_set_run_state(struct vm *vm, int vcpuid, uint32_t state,
193 uint8_t sipi_vec);
194 int vm_get_fpu(struct vm *vm, int vcpuid, void *buf, size_t len);
195 int vm_set_fpu(struct vm *vm, int vcpuid, void *buf, size_t len);
196 int vm_run(struct vm *vm, int vcpuid, const struct vm_entry *);
197 int vm_suspend(struct vm *, enum vm_suspend_how, int);
198 int vm_inject_nmi(struct vm *vm, int vcpu);
199 bool vm_nmi_pending(struct vm *vm, int vcpuid);
200 void vm_nmi_clear(struct vm *vm, int vcpuid);
201 int vm_inject_extint(struct vm *vm, int vcpu);
202 bool vm_extint_pending(struct vm *vm, int vcpuid);
203 void vm_extint_clear(struct vm *vm, int vcpuid);
204 int vm_inject_init(struct vm *vm, int vcpuid);
205 int vm_inject_sipi(struct vm *vm, int vcpuid, uint8_t vec);
206 struct vlapic *vm_lapic(struct vm *vm, int cpu);
207 struct vioapic *vm_ioapic(struct vm *vm);
208 struct vhpet *vm_hpet(struct vm *vm);
209 int vm_get_capability(struct vm *vm, int vcpu, int type, int *val);
210 int vm_set_capability(struct vm *vm, int vcpu, int type, int val);
211 int vm_get_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state *state);
212 int vm_set_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state state);
213 int vm_apicid2vcpuid(struct vm *vm, int apicid);
214 int vm_activate_cpu(struct vm *vm, int vcpu);
215 int vm_suspend_cpu(struct vm *vm, int vcpu);
216 int vm_resume_cpu(struct vm *vm, int vcpu);
217 struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid);
218 struct vie *vm_vie_ctx(struct vm *vm, int vcpuid);
219 void vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip);
220 void vm_exit_debug(struct vm *vm, int vcpuid, uint64_t rip);
221 void vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip);
222 void vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip);
223 void vm_exit_run_state(struct vm *vm, int vcpuid, uint64_t rip);
224 int vm_service_mmio_read(struct vm *vm, int cpuid, uint64_t gpa, uint64_t *rval,
225 int rsize);
226 int vm_service_mmio_write(struct vm *vm, int cpuid, uint64_t gpa, uint64_t wval,
227 int wsize);
228
229 #ifdef _SYS__CPUSET_H_
230 cpuset_t vm_active_cpus(struct vm *vm);
231 cpuset_t vm_debug_cpus(struct vm *vm);
232 #endif /* _SYS__CPUSET_H_ */
233
234 bool vcpu_entry_bailout_checks(struct vm *vm, int vcpuid, uint64_t rip);
235 bool vcpu_run_state_pending(struct vm *vm, int vcpuid);
236 int vcpu_arch_reset(struct vm *vm, int vcpuid, bool init_only);
237 int vm_vcpu_barrier(struct vm *, int);
238
239 /*
240 * Return true if device indicated by bus/slot/func is supposed to be a
241 * pci passthrough device.
242 *
243 * Return false otherwise.
244 */
245 bool vmm_is_pptdev(int bus, int slot, int func);
246
247 void *vm_iommu_domain(struct vm *vm);
248
249 enum vcpu_state {
250 VCPU_IDLE,
251 VCPU_FROZEN,
252 VCPU_RUNNING,
253 VCPU_SLEEPING,
254 };
255
256 int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state,
257 bool from_idle);
258 enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu, int *hostcpu);
259 void vcpu_block_run(struct vm *, int);
260 void vcpu_unblock_run(struct vm *, int);
261
262 uint64_t vcpu_tsc_offset(struct vm *vm, int vcpuid, bool phys_adj);
263 hrtime_t vm_normalize_hrtime(struct vm *, hrtime_t);
264 hrtime_t vm_denormalize_hrtime(struct vm *, hrtime_t);
265 uint64_t vm_get_freq_multiplier(struct vm *);
266
267 static __inline bool
vcpu_is_running(struct vm * vm,int vcpu,int * hostcpu)268 vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu)
269 {
270 return (vcpu_get_state(vm, vcpu, hostcpu) == VCPU_RUNNING);
271 }
272
273 #ifdef _SYS_THREAD_H
274 static __inline int
vcpu_should_yield(struct vm * vm,int vcpu)275 vcpu_should_yield(struct vm *vm, int vcpu)
276 {
277
278 if (curthread->t_astflag)
279 return (1);
280 else if (CPU->cpu_runrun)
281 return (1);
282 else
283 return (0);
284 }
285 #endif /* _SYS_THREAD_H */
286
287 typedef enum vcpu_notify {
288 VCPU_NOTIFY_NONE,
289 VCPU_NOTIFY_APIC, /* Posted intr notification (if possible) */
290 VCPU_NOTIFY_EXIT, /* IPI to cause VM exit */
291 } vcpu_notify_t;
292
293 void *vcpu_stats(struct vm *vm, int vcpu);
294 void vcpu_notify_event(struct vm *vm, int vcpuid);
295 void vcpu_notify_event_type(struct vm *vm, int vcpuid, vcpu_notify_t);
296 struct vmspace *vm_get_vmspace(struct vm *vm);
297 struct vm_client *vm_get_vmclient(struct vm *vm, int vcpuid);
298 struct vatpic *vm_atpic(struct vm *vm);
299 struct vatpit *vm_atpit(struct vm *vm);
300 struct vpmtmr *vm_pmtmr(struct vm *vm);
301 struct vrtc *vm_rtc(struct vm *vm);
302
303 /*
304 * Inject exception 'vector' into the guest vcpu. This function returns 0 on
305 * success and non-zero on failure.
306 *
307 * Wrapper functions like 'vm_inject_gp()' should be preferred to calling
308 * this function directly because they enforce the trap-like or fault-like
309 * behavior of an exception.
310 *
311 * This function should only be called in the context of the thread that is
312 * executing this vcpu.
313 */
314 int vm_inject_exception(struct vm *vm, int vcpuid, uint8_t vector,
315 bool err_valid, uint32_t errcode, bool restart_instruction);
316
317 /*
318 * This function is called after a VM-exit that occurred during exception or
319 * interrupt delivery through the IDT. The format of 'intinfo' is described
320 * in Figure 15-1, "EXITINTINFO for All Intercepts", APM, Vol 2.
321 *
322 * If a VM-exit handler completes the event delivery successfully then it
323 * should call vm_exit_intinfo() to extinguish the pending event. For e.g.,
324 * if the task switch emulation is triggered via a task gate then it should
325 * call this function with 'intinfo=0' to indicate that the external event
326 * is not pending anymore.
327 *
328 * Return value is 0 on success and non-zero on failure.
329 */
330 int vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t intinfo);
331
332 /*
333 * This function is called before every VM-entry to retrieve a pending
334 * event that should be injected into the guest. This function combines
335 * nested events into a double or triple fault.
336 *
337 * Returns false if there are no events that need to be injected into the guest.
338 */
339 bool vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *info);
340
341 int vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2);
342
343 enum vm_reg_name vm_segment_name(int seg_encoding);
344
345 struct vm_copyinfo {
346 uint64_t gpa;
347 size_t len;
348 int prot;
349 void *hva;
350 void *cookie;
351 };
352
353 /*
354 * Set up 'copyinfo[]' to copy to/from guest linear address space starting
355 * at 'gla' and 'len' bytes long. The 'prot' should be set to PROT_READ for
356 * a copyin or PROT_WRITE for a copyout.
357 *
358 * retval is_fault Interpretation
359 * 0 0 Success
360 * 0 1 An exception was injected into the guest
361 * EFAULT N/A Unrecoverable error
362 *
363 * The 'copyinfo[]' can be passed to 'vm_copyin()' or 'vm_copyout()' only if
364 * the return value is 0. The 'copyinfo[]' resources should be freed by calling
365 * 'vm_copy_teardown()' after the copy is done.
366 */
367 int vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
368 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
369 uint_t num_copyinfo, int *is_fault);
370 void vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
371 uint_t num_copyinfo);
372 void vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
373 void *kaddr, size_t len);
374 void vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
375 struct vm_copyinfo *copyinfo, size_t len);
376
377 int vcpu_trace_exceptions(struct vm *vm, int vcpuid);
378 int vcpu_trap_wbinvd(struct vm *vm, int vcpuid);
379
380 void vm_inject_ud(struct vm *vm, int vcpuid);
381 void vm_inject_gp(struct vm *vm, int vcpuid);
382 void vm_inject_ac(struct vm *vm, int vcpuid, uint32_t errcode);
383 void vm_inject_ss(struct vm *vm, int vcpuid, uint32_t errcode);
384 void vm_inject_pf(struct vm *vm, int vcpuid, uint32_t errcode, uint64_t cr2);
385
386 /*
387 * Both SVM and VMX have complex logic for injecting events such as exceptions
388 * or interrupts into the guest. Within those two backends, the progress of
389 * event injection is tracked by event_inject_state, hopefully making it easier
390 * to reason about.
391 */
392 enum event_inject_state {
393 EIS_CAN_INJECT = 0, /* exception/interrupt can be injected */
394 EIS_EV_EXISTING = 1, /* blocked by existing event */
395 EIS_EV_INJECTED = 2, /* blocked by injected event */
396 EIS_GI_BLOCK = 3, /* blocked by guest interruptability */
397
398 /*
399 * Flag to request an immediate exit from VM context after event
400 * injection in order to perform more processing
401 */
402 EIS_REQ_EXIT = (1 << 15),
403 };
404
405 /* Possible result codes for MSR access emulation */
406 typedef enum vm_msr_result {
407 VMR_OK = 0, /* succesfully emulated */
408 VMR_GP = 1, /* #GP should be injected */
409 VMR_UNHANLDED = 2, /* handle in userspace, kernel cannot emulate */
410 } vm_msr_result_t;
411
412 enum vm_cpuid_capability {
413 VCC_NONE,
414 VCC_NO_EXECUTE,
415 VCC_FFXSR,
416 VCC_TCE,
417 VCC_LAST
418 };
419
420 /* Possible flags and entry count limit definited in sys/vmm.h */
421 typedef struct vcpu_cpuid_config {
422 uint32_t vcc_flags;
423 uint32_t vcc_nent;
424 struct vcpu_cpuid_entry *vcc_entries;
425 } vcpu_cpuid_config_t;
426
427 vcpu_cpuid_config_t *vm_cpuid_config(struct vm *, int);
428 int vm_get_cpuid(struct vm *, int, vcpu_cpuid_config_t *);
429 int vm_set_cpuid(struct vm *, int, const vcpu_cpuid_config_t *);
430 void vcpu_emulate_cpuid(struct vm *, int, uint64_t *, uint64_t *, uint64_t *,
431 uint64_t *);
432 void legacy_emulate_cpuid(struct vm *, int, uint32_t *, uint32_t *, uint32_t *,
433 uint32_t *);
434 void vcpu_cpuid_init(vcpu_cpuid_config_t *);
435 void vcpu_cpuid_cleanup(vcpu_cpuid_config_t *);
436
437 bool vm_cpuid_capability(struct vm *, int, enum vm_cpuid_capability);
438 bool validate_guest_xcr0(uint64_t, uint64_t);
439
440 void vmm_sol_glue_init(void);
441 void vmm_sol_glue_cleanup(void);
442
443 void *vmm_contig_alloc(size_t);
444 void vmm_contig_free(void *, size_t);
445
446 int vmm_mod_load(void);
447 int vmm_mod_unload(void);
448
449 bool vmm_check_iommu(void);
450
451 void vmm_call_trap(uint64_t);
452
453 uint64_t vmm_host_tsc_delta(void);
454
455 /*
456 * Because of tangled headers, this is not exposed directly via the vmm_drv
457 * interface, but rather mirrored as vmm_drv_iop_cb_t in vmm_drv.h.
458 */
459 typedef int (*ioport_handler_t)(void *, bool, uint16_t, uint8_t, uint32_t *);
460
461 int vm_ioport_access(struct vm *vm, int vcpuid, bool in, uint16_t port,
462 uint8_t bytes, uint32_t *val);
463
464 int vm_ioport_attach(struct vm *vm, uint16_t port, ioport_handler_t func,
465 void *arg, void **cookie);
466 int vm_ioport_detach(struct vm *vm, void **cookie, ioport_handler_t *old_func,
467 void **old_arg);
468
469 int vm_ioport_hook(struct vm *, uint16_t, ioport_handler_t, void *, void **);
470 void vm_ioport_unhook(struct vm *, void **);
471
472 enum vcpu_ustate {
473 VU_INIT = 0, /* initialized but has not yet attempted to run */
474 VU_RUN, /* running in guest context */
475 VU_IDLE, /* idle (HLTed, wait-for-SIPI, etc) */
476 VU_EMU_KERN, /* emulation performed in-kernel */
477 VU_EMU_USER, /* emulation performed in userspace */
478 VU_SCHED, /* off-cpu for interrupt, preempt, lock contention */
479 VU_MAX
480 };
481
482 void vcpu_ustate_change(struct vm *, int, enum vcpu_ustate);
483
484 typedef struct vmm_kstats {
485 kstat_named_t vk_name;
486 } vmm_kstats_t;
487
488 typedef struct vmm_vcpu_kstats {
489 kstat_named_t vvk_vcpu;
490 kstat_named_t vvk_time_init;
491 kstat_named_t vvk_time_run;
492 kstat_named_t vvk_time_idle;
493 kstat_named_t vvk_time_emu_kern;
494 kstat_named_t vvk_time_emu_user;
495 kstat_named_t vvk_time_sched;
496 } vmm_vcpu_kstats_t;
497
498 #define VMM_KSTAT_CLASS "misc"
499
500 int vmm_kstat_update_vcpu(struct kstat *, int);
501
502 typedef struct vmm_data_req {
503 uint16_t vdr_class;
504 uint16_t vdr_version;
505 uint32_t vdr_flags;
506 uint32_t vdr_len;
507 void *vdr_data;
508 uint32_t *vdr_result_len;
509 int vdr_vcpuid;
510 } vmm_data_req_t;
511
512 typedef int (*vmm_data_writef_t)(void *, const vmm_data_req_t *);
513 typedef int (*vmm_data_readf_t)(void *, const vmm_data_req_t *);
514 typedef int (*vmm_data_vcpu_writef_t)(struct vm *, int, const vmm_data_req_t *);
515 typedef int (*vmm_data_vcpu_readf_t)(struct vm *, int, const vmm_data_req_t *);
516
517 typedef struct vmm_data_version_entry {
518 uint16_t vdve_class;
519 uint16_t vdve_version;
520
521 /*
522 * If these handlers accept/emit a single item of a fixed length, it
523 * should be specified in vdve_len_expect. The vmm-data logic will then
524 * ensure that requests possess at least that specified length before
525 * calling into the defined handlers.
526 */
527 uint16_t vdve_len_expect;
528
529 /*
530 * For handlers which deal with (potentially) multiple items of a fixed
531 * length, vdve_len_per_item is used to hint (via the VDC_VERSION class)
532 * to userspace what that item size is. Although not strictly mutually
533 * exclusive with vdve_len_expect, it is nonsensical to set them both.
534 */
535 uint16_t vdve_len_per_item;
536
537 /*
538 * A vmm-data handler is expected to provide read/write functions which
539 * are either VM-wide (via vdve_readf and vdve_writef) or per-vCPU
540 * (via vdve_vcpu_readf and vdve_vcpu_writef). Providing both is not
541 * allowed (but is not currently checked at compile time).
542 */
543
544 /* VM-wide handlers */
545 vmm_data_readf_t vdve_readf;
546 vmm_data_writef_t vdve_writef;
547
548 /* Per-vCPU handlers */
549 vmm_data_vcpu_readf_t vdve_vcpu_readf;
550 vmm_data_vcpu_writef_t vdve_vcpu_writef;
551
552 /*
553 * The vdve_vcpu_readf/writef handlers can rely on vcpuid to be within
554 * the [0, VM_MAXCPU) bounds. If they also can handle vcpuid == -1 (for
555 * VM-wide data), then they can opt into such cases by setting
556 * vdve_vcpu_wildcard to true.
557 *
558 * At a later time, it would make sense to improve the logic so a
559 * vmm-data class could define both the VM-wide and per-vCPU handlers,
560 * letting the incoming vcpuid determine which would be called. Until
561 * then, vdve_vcpu_wildcard is the stopgap.
562 */
563 bool vdve_vcpu_wildcard;
564 } vmm_data_version_entry_t;
565
566 #define VMM_DATA_VERSION(sym) SET_ENTRY(vmm_data_version_entries, sym)
567
568 int vmm_data_read(struct vm *, const vmm_data_req_t *);
569 int vmm_data_write(struct vm *, const vmm_data_req_t *);
570
571 /*
572 * TSC Scaling
573 */
574 uint64_t vmm_calc_freq_multiplier(uint64_t guest_hz, uint64_t host_hz,
575 uint32_t frac);
576
577 /* represents a multiplier for a guest in which no scaling is required */
578 #define VM_TSCM_NOSCALE 0
579
580 #endif /* _VMM_KERNEL_H_ */
581