1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /* 29 * This file and its contents are supplied under the terms of the 30 * Common Development and Distribution License ("CDDL"), version 1.0. 31 * You may only use this file in accordance with the terms of version 32 * 1.0 of the CDDL. 33 * 34 * A full copy of the text of the CDDL should have accompanied this 35 * source. A copy of the CDDL is also available via the Internet at 36 * http://www.illumos.org/license/CDDL. 37 * 38 * Copyright 2015 Pluribus Networks Inc. 39 * Copyright 2019 Joyent, Inc. 40 * Copyright 2023 Oxide Computer Company 41 * Copyright 2021 OmniOS Community Edition (OmniOSce) Association. 42 */ 43 44 #ifndef _VMM_KERNEL_H_ 45 #define _VMM_KERNEL_H_ 46 47 #include <sys/sdt.h> 48 #include <x86/segments.h> 49 #include <sys/vmm.h> 50 #include <sys/vmm_data.h> 51 #include <sys/linker_set.h> 52 53 SDT_PROVIDER_DECLARE(vmm); 54 55 struct vm; 56 struct vm_exception; 57 struct seg_desc; 58 struct vm_exit; 59 struct vie; 60 struct vm_run; 61 struct vhpet; 62 struct vioapic; 63 struct vlapic; 64 struct vmspace; 65 struct vm_client; 66 struct vm_object; 67 struct vm_guest_paging; 68 struct vmm_data_req; 69 70 /* Return values for architecture-specific calculation of the TSC multiplier */ 71 typedef enum { 72 FR_VALID, /* valid multiplier, scaling needed */ 73 FR_SCALING_NOT_NEEDED, /* scaling not required */ 74 FR_SCALING_NOT_SUPPORTED, /* scaling not supported by platform */ 75 FR_OUT_OF_RANGE, /* freq ratio out of supported range */ 76 } freqratio_res_t; 77 78 typedef int (*vmm_init_func_t)(void); 79 typedef int (*vmm_cleanup_func_t)(void); 80 typedef void (*vmm_resume_func_t)(void); 81 typedef void * (*vmi_init_func_t)(struct vm *vm); 82 typedef int (*vmi_run_func_t)(void *vmi, int vcpu, uint64_t rip); 83 typedef void (*vmi_cleanup_func_t)(void *vmi); 84 typedef int (*vmi_get_register_t)(void *vmi, int vcpu, int num, 85 uint64_t *retval); 86 typedef int (*vmi_set_register_t)(void *vmi, int vcpu, int num, 87 uint64_t val); 88 typedef int (*vmi_get_desc_t)(void *vmi, int vcpu, int num, 89 struct seg_desc *desc); 90 typedef int (*vmi_set_desc_t)(void *vmi, int vcpu, int num, 91 const struct seg_desc *desc); 92 typedef int (*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval); 93 typedef int (*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val); 94 typedef struct vlapic *(*vmi_vlapic_init)(void *vmi, int vcpu); 95 typedef void (*vmi_vlapic_cleanup)(void *vmi, struct vlapic *vlapic); 96 typedef void (*vmi_savectx)(void *vmi, int vcpu); 97 typedef void (*vmi_restorectx)(void *vmi, int vcpu); 98 typedef void (*vmi_pause_t)(void *vmi, int vcpu); 99 100 typedef int (*vmi_get_msr_t)(void *vmi, int vcpu, uint32_t msr, 101 uint64_t *valp); 102 typedef int (*vmi_set_msr_t)(void *vmi, int vcpu, uint32_t msr, 103 uint64_t val); 104 typedef freqratio_res_t (*vmi_freqratio_t)(uint64_t guest_hz, 105 uint64_t host_hz, uint64_t *mult); 106 107 struct vmm_ops { 108 vmm_init_func_t init; /* module wide initialization */ 109 vmm_cleanup_func_t cleanup; 110 vmm_resume_func_t resume; 111 112 vmi_init_func_t vminit; /* vm-specific initialization */ 113 vmi_run_func_t vmrun; 114 vmi_cleanup_func_t vmcleanup; 115 vmi_get_register_t vmgetreg; 116 vmi_set_register_t vmsetreg; 117 vmi_get_desc_t vmgetdesc; 118 vmi_set_desc_t vmsetdesc; 119 vmi_get_cap_t vmgetcap; 120 vmi_set_cap_t vmsetcap; 121 vmi_vlapic_init vlapic_init; 122 vmi_vlapic_cleanup vlapic_cleanup; 123 vmi_pause_t vmpause; 124 125 vmi_savectx vmsavectx; 126 vmi_restorectx vmrestorectx; 127 128 vmi_get_msr_t vmgetmsr; 129 vmi_set_msr_t vmsetmsr; 130 131 vmi_freqratio_t vmfreqratio; 132 uint32_t fr_intsize; 133 uint32_t fr_fracsize; 134 }; 135 136 extern struct vmm_ops vmm_ops_intel; 137 extern struct vmm_ops vmm_ops_amd; 138 139 int vm_create(uint64_t flags, struct vm **retvm); 140 void vm_destroy(struct vm *vm); 141 int vm_reinit(struct vm *vm, uint64_t); 142 uint16_t vm_get_maxcpus(struct vm *vm); 143 void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, 144 uint16_t *threads, uint16_t *maxcpus); 145 int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, 146 uint16_t threads, uint16_t maxcpus); 147 148 int vm_pause_instance(struct vm *); 149 int vm_resume_instance(struct vm *); 150 bool vm_is_paused(struct vm *); 151 152 /* 153 * APIs that race against hardware. 154 */ 155 int vm_track_dirty_pages(struct vm *, uint64_t, size_t, uint8_t *); 156 157 /* 158 * APIs that modify the guest memory map require all vcpus to be frozen. 159 */ 160 int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off, 161 size_t len, int prot, int flags); 162 int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len); 163 int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem); 164 void vm_free_memseg(struct vm *vm, int ident); 165 int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa); 166 int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len); 167 int vm_assign_pptdev(struct vm *vm, int pptfd); 168 int vm_unassign_pptdev(struct vm *vm, int pptfd); 169 170 /* 171 * APIs that inspect the guest memory map require only a *single* vcpu to 172 * be frozen. This acts like a read lock on the guest memory map since any 173 * modification requires *all* vcpus to be frozen. 174 */ 175 int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, 176 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags); 177 int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem, 178 struct vm_object **objptr); 179 vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm); 180 bool vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa); 181 182 int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval); 183 int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val); 184 int vm_get_seg_desc(struct vm *vm, int vcpu, int reg, 185 struct seg_desc *ret_desc); 186 int vm_set_seg_desc(struct vm *vm, int vcpu, int reg, 187 const struct seg_desc *desc); 188 int vm_get_run_state(struct vm *vm, int vcpuid, uint32_t *state, 189 uint8_t *sipi_vec); 190 int vm_set_run_state(struct vm *vm, int vcpuid, uint32_t state, 191 uint8_t sipi_vec); 192 int vm_get_fpu(struct vm *vm, int vcpuid, void *buf, size_t len); 193 int vm_set_fpu(struct vm *vm, int vcpuid, void *buf, size_t len); 194 int vm_run(struct vm *vm, int vcpuid, const struct vm_entry *); 195 int vm_suspend(struct vm *, enum vm_suspend_how, int); 196 int vm_inject_nmi(struct vm *vm, int vcpu); 197 bool vm_nmi_pending(struct vm *vm, int vcpuid); 198 void vm_nmi_clear(struct vm *vm, int vcpuid); 199 int vm_inject_extint(struct vm *vm, int vcpu); 200 bool vm_extint_pending(struct vm *vm, int vcpuid); 201 void vm_extint_clear(struct vm *vm, int vcpuid); 202 int vm_inject_init(struct vm *vm, int vcpuid); 203 int vm_inject_sipi(struct vm *vm, int vcpuid, uint8_t vec); 204 struct vlapic *vm_lapic(struct vm *vm, int cpu); 205 struct vioapic *vm_ioapic(struct vm *vm); 206 struct vhpet *vm_hpet(struct vm *vm); 207 int vm_get_capability(struct vm *vm, int vcpu, int type, int *val); 208 int vm_set_capability(struct vm *vm, int vcpu, int type, int val); 209 int vm_get_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state *state); 210 int vm_set_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state state); 211 int vm_apicid2vcpuid(struct vm *vm, int apicid); 212 int vm_activate_cpu(struct vm *vm, int vcpu); 213 int vm_suspend_cpu(struct vm *vm, int vcpu); 214 int vm_resume_cpu(struct vm *vm, int vcpu); 215 struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid); 216 struct vie *vm_vie_ctx(struct vm *vm, int vcpuid); 217 void vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip); 218 void vm_exit_debug(struct vm *vm, int vcpuid, uint64_t rip); 219 void vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip); 220 void vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip); 221 void vm_exit_run_state(struct vm *vm, int vcpuid, uint64_t rip); 222 int vm_service_mmio_read(struct vm *vm, int cpuid, uint64_t gpa, uint64_t *rval, 223 int rsize); 224 int vm_service_mmio_write(struct vm *vm, int cpuid, uint64_t gpa, uint64_t wval, 225 int wsize); 226 227 #ifdef _SYS__CPUSET_H_ 228 cpuset_t vm_active_cpus(struct vm *vm); 229 cpuset_t vm_debug_cpus(struct vm *vm); 230 #endif /* _SYS__CPUSET_H_ */ 231 232 bool vcpu_entry_bailout_checks(struct vm *vm, int vcpuid, uint64_t rip); 233 bool vcpu_run_state_pending(struct vm *vm, int vcpuid); 234 int vcpu_arch_reset(struct vm *vm, int vcpuid, bool init_only); 235 int vm_vcpu_barrier(struct vm *, int); 236 237 /* 238 * Return true if device indicated by bus/slot/func is supposed to be a 239 * pci passthrough device. 240 * 241 * Return false otherwise. 242 */ 243 bool vmm_is_pptdev(int bus, int slot, int func); 244 245 void *vm_iommu_domain(struct vm *vm); 246 247 enum vcpu_state { 248 VCPU_IDLE, 249 VCPU_FROZEN, 250 VCPU_RUNNING, 251 VCPU_SLEEPING, 252 }; 253 254 int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state, 255 bool from_idle); 256 enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu, int *hostcpu); 257 void vcpu_block_run(struct vm *, int); 258 void vcpu_unblock_run(struct vm *, int); 259 260 uint64_t vcpu_tsc_offset(struct vm *vm, int vcpuid, bool phys_adj); 261 hrtime_t vm_normalize_hrtime(struct vm *, hrtime_t); 262 hrtime_t vm_denormalize_hrtime(struct vm *, hrtime_t); 263 uint64_t vm_get_freq_multiplier(struct vm *); 264 265 static __inline bool 266 vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu) 267 { 268 return (vcpu_get_state(vm, vcpu, hostcpu) == VCPU_RUNNING); 269 } 270 271 #ifdef _SYS_THREAD_H 272 static __inline int 273 vcpu_should_yield(struct vm *vm, int vcpu) 274 { 275 276 if (curthread->t_astflag) 277 return (1); 278 else if (CPU->cpu_runrun) 279 return (1); 280 else 281 return (0); 282 } 283 #endif /* _SYS_THREAD_H */ 284 285 typedef enum vcpu_notify { 286 VCPU_NOTIFY_NONE, 287 VCPU_NOTIFY_APIC, /* Posted intr notification (if possible) */ 288 VCPU_NOTIFY_EXIT, /* IPI to cause VM exit */ 289 } vcpu_notify_t; 290 291 void *vcpu_stats(struct vm *vm, int vcpu); 292 void vcpu_notify_event(struct vm *vm, int vcpuid); 293 void vcpu_notify_event_type(struct vm *vm, int vcpuid, vcpu_notify_t); 294 struct vmspace *vm_get_vmspace(struct vm *vm); 295 struct vm_client *vm_get_vmclient(struct vm *vm, int vcpuid); 296 struct vatpic *vm_atpic(struct vm *vm); 297 struct vatpit *vm_atpit(struct vm *vm); 298 struct vpmtmr *vm_pmtmr(struct vm *vm); 299 struct vrtc *vm_rtc(struct vm *vm); 300 301 /* 302 * Inject exception 'vector' into the guest vcpu. This function returns 0 on 303 * success and non-zero on failure. 304 * 305 * Wrapper functions like 'vm_inject_gp()' should be preferred to calling 306 * this function directly because they enforce the trap-like or fault-like 307 * behavior of an exception. 308 * 309 * This function should only be called in the context of the thread that is 310 * executing this vcpu. 311 */ 312 int vm_inject_exception(struct vm *vm, int vcpuid, uint8_t vector, 313 bool err_valid, uint32_t errcode, bool restart_instruction); 314 315 /* 316 * This function is called after a VM-exit that occurred during exception or 317 * interrupt delivery through the IDT. The format of 'intinfo' is described 318 * in Figure 15-1, "EXITINTINFO for All Intercepts", APM, Vol 2. 319 * 320 * If a VM-exit handler completes the event delivery successfully then it 321 * should call vm_exit_intinfo() to extinguish the pending event. For e.g., 322 * if the task switch emulation is triggered via a task gate then it should 323 * call this function with 'intinfo=0' to indicate that the external event 324 * is not pending anymore. 325 * 326 * Return value is 0 on success and non-zero on failure. 327 */ 328 int vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t intinfo); 329 330 /* 331 * This function is called before every VM-entry to retrieve a pending 332 * event that should be injected into the guest. This function combines 333 * nested events into a double or triple fault. 334 * 335 * Returns false if there are no events that need to be injected into the guest. 336 */ 337 bool vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *info); 338 339 int vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2); 340 341 enum vm_reg_name vm_segment_name(int seg_encoding); 342 343 struct vm_copyinfo { 344 uint64_t gpa; 345 size_t len; 346 int prot; 347 void *hva; 348 void *cookie; 349 }; 350 351 /* 352 * Set up 'copyinfo[]' to copy to/from guest linear address space starting 353 * at 'gla' and 'len' bytes long. The 'prot' should be set to PROT_READ for 354 * a copyin or PROT_WRITE for a copyout. 355 * 356 * retval is_fault Interpretation 357 * 0 0 Success 358 * 0 1 An exception was injected into the guest 359 * EFAULT N/A Unrecoverable error 360 * 361 * The 'copyinfo[]' can be passed to 'vm_copyin()' or 'vm_copyout()' only if 362 * the return value is 0. The 'copyinfo[]' resources should be freed by calling 363 * 'vm_copy_teardown()' after the copy is done. 364 */ 365 int vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging, 366 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo, 367 uint_t num_copyinfo, int *is_fault); 368 void vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, 369 uint_t num_copyinfo); 370 void vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, 371 void *kaddr, size_t len); 372 void vm_copyout(struct vm *vm, int vcpuid, const void *kaddr, 373 struct vm_copyinfo *copyinfo, size_t len); 374 375 int vcpu_trace_exceptions(struct vm *vm, int vcpuid); 376 int vcpu_trap_wbinvd(struct vm *vm, int vcpuid); 377 378 void vm_inject_ud(struct vm *vm, int vcpuid); 379 void vm_inject_gp(struct vm *vm, int vcpuid); 380 void vm_inject_ac(struct vm *vm, int vcpuid, uint32_t errcode); 381 void vm_inject_ss(struct vm *vm, int vcpuid, uint32_t errcode); 382 void vm_inject_pf(struct vm *vm, int vcpuid, uint32_t errcode, uint64_t cr2); 383 384 /* 385 * Both SVM and VMX have complex logic for injecting events such as exceptions 386 * or interrupts into the guest. Within those two backends, the progress of 387 * event injection is tracked by event_inject_state, hopefully making it easier 388 * to reason about. 389 */ 390 enum event_inject_state { 391 EIS_CAN_INJECT = 0, /* exception/interrupt can be injected */ 392 EIS_EV_EXISTING = 1, /* blocked by existing event */ 393 EIS_EV_INJECTED = 2, /* blocked by injected event */ 394 EIS_GI_BLOCK = 3, /* blocked by guest interruptability */ 395 396 /* 397 * Flag to request an immediate exit from VM context after event 398 * injection in order to perform more processing 399 */ 400 EIS_REQ_EXIT = (1 << 15), 401 }; 402 403 /* Possible result codes for MSR access emulation */ 404 typedef enum vm_msr_result { 405 VMR_OK = 0, /* succesfully emulated */ 406 VMR_GP = 1, /* #GP should be injected */ 407 VMR_UNHANLDED = 2, /* handle in userspace, kernel cannot emulate */ 408 } vm_msr_result_t; 409 410 enum vm_cpuid_capability { 411 VCC_NONE, 412 VCC_NO_EXECUTE, 413 VCC_FFXSR, 414 VCC_TCE, 415 VCC_LAST 416 }; 417 418 /* Possible flags and entry count limit definited in sys/vmm.h */ 419 typedef struct vcpu_cpuid_config { 420 uint32_t vcc_flags; 421 uint32_t vcc_nent; 422 struct vcpu_cpuid_entry *vcc_entries; 423 } vcpu_cpuid_config_t; 424 425 vcpu_cpuid_config_t *vm_cpuid_config(struct vm *, int); 426 int vm_get_cpuid(struct vm *, int, vcpu_cpuid_config_t *); 427 int vm_set_cpuid(struct vm *, int, const vcpu_cpuid_config_t *); 428 void vcpu_emulate_cpuid(struct vm *, int, uint64_t *, uint64_t *, uint64_t *, 429 uint64_t *); 430 void legacy_emulate_cpuid(struct vm *, int, uint32_t *, uint32_t *, uint32_t *, 431 uint32_t *); 432 void vcpu_cpuid_init(vcpu_cpuid_config_t *); 433 void vcpu_cpuid_cleanup(vcpu_cpuid_config_t *); 434 435 bool vm_cpuid_capability(struct vm *, int, enum vm_cpuid_capability); 436 bool validate_guest_xcr0(uint64_t, uint64_t); 437 438 void vmm_sol_glue_init(void); 439 void vmm_sol_glue_cleanup(void); 440 441 void *vmm_contig_alloc(size_t); 442 void vmm_contig_free(void *, size_t); 443 444 int vmm_mod_load(void); 445 int vmm_mod_unload(void); 446 447 bool vmm_check_iommu(void); 448 449 void vmm_call_trap(uint64_t); 450 451 uint64_t vmm_host_tsc_delta(void); 452 453 /* 454 * Because of tangled headers, this is not exposed directly via the vmm_drv 455 * interface, but rather mirrored as vmm_drv_iop_cb_t in vmm_drv.h. 456 */ 457 typedef int (*ioport_handler_t)(void *, bool, uint16_t, uint8_t, uint32_t *); 458 459 int vm_ioport_access(struct vm *vm, int vcpuid, bool in, uint16_t port, 460 uint8_t bytes, uint32_t *val); 461 462 int vm_ioport_attach(struct vm *vm, uint16_t port, ioport_handler_t func, 463 void *arg, void **cookie); 464 int vm_ioport_detach(struct vm *vm, void **cookie, ioport_handler_t *old_func, 465 void **old_arg); 466 467 int vm_ioport_hook(struct vm *, uint16_t, ioport_handler_t, void *, void **); 468 void vm_ioport_unhook(struct vm *, void **); 469 470 enum vcpu_ustate { 471 VU_INIT = 0, /* initialized but has not yet attempted to run */ 472 VU_RUN, /* running in guest context */ 473 VU_IDLE, /* idle (HLTed, wait-for-SIPI, etc) */ 474 VU_EMU_KERN, /* emulation performed in-kernel */ 475 VU_EMU_USER, /* emulation performed in userspace */ 476 VU_SCHED, /* off-cpu for interrupt, preempt, lock contention */ 477 VU_MAX 478 }; 479 480 void vcpu_ustate_change(struct vm *, int, enum vcpu_ustate); 481 482 typedef struct vmm_kstats { 483 kstat_named_t vk_name; 484 } vmm_kstats_t; 485 486 typedef struct vmm_vcpu_kstats { 487 kstat_named_t vvk_vcpu; 488 kstat_named_t vvk_time_init; 489 kstat_named_t vvk_time_run; 490 kstat_named_t vvk_time_idle; 491 kstat_named_t vvk_time_emu_kern; 492 kstat_named_t vvk_time_emu_user; 493 kstat_named_t vvk_time_sched; 494 } vmm_vcpu_kstats_t; 495 496 #define VMM_KSTAT_CLASS "misc" 497 498 int vmm_kstat_update_vcpu(struct kstat *, int); 499 500 typedef struct vmm_data_req { 501 uint16_t vdr_class; 502 uint16_t vdr_version; 503 uint32_t vdr_flags; 504 uint32_t vdr_len; 505 void *vdr_data; 506 uint32_t *vdr_result_len; 507 } vmm_data_req_t; 508 509 typedef int (*vmm_data_writef_t)(void *, const vmm_data_req_t *); 510 typedef int (*vmm_data_readf_t)(void *, const vmm_data_req_t *); 511 typedef int (*vmm_data_vcpu_writef_t)(struct vm *, int, const vmm_data_req_t *); 512 typedef int (*vmm_data_vcpu_readf_t)(struct vm *, int, const vmm_data_req_t *); 513 514 typedef struct vmm_data_version_entry { 515 uint16_t vdve_class; 516 uint16_t vdve_version; 517 518 /* 519 * If these handlers accept/emit a single item of a fixed length, it 520 * should be specified in vdve_len_expect. The vmm-data logic will then 521 * ensure that requests possess at least that specified length before 522 * calling into the defined handlers. 523 */ 524 uint16_t vdve_len_expect; 525 526 /* 527 * For handlers which deal with (potentially) multiple items of a fixed 528 * length, vdve_len_per_item is used to hint (via the VDC_VERSION class) 529 * to userspace what that item size is. Although not strictly mutually 530 * exclusive with vdve_len_expect, it is nonsensical to set them both. 531 */ 532 uint16_t vdve_len_per_item; 533 534 /* 535 * A vmm-data handler is expected to provide read/write functions which 536 * are either VM-wide (via vdve_readf and vdve_writef) or per-vCPU 537 * (via vdve_vcpu_readf and vdve_vcpu_writef). Providing both is not 538 * allowed (but is not currently checked at compile time). 539 */ 540 541 /* VM-wide handlers */ 542 vmm_data_readf_t vdve_readf; 543 vmm_data_writef_t vdve_writef; 544 545 /* Per-vCPU handlers */ 546 vmm_data_vcpu_readf_t vdve_vcpu_readf; 547 vmm_data_vcpu_writef_t vdve_vcpu_writef; 548 549 /* 550 * The vdve_vcpu_readf/writef handlers can rely on vcpuid to be within 551 * the [0, VM_MAXCPU) bounds. If they also can handle vcpuid == -1 (for 552 * VM-wide data), then they can opt into such cases by setting 553 * vdve_vcpu_wildcard to true. 554 * 555 * At a later time, it would make sense to improve the logic so a 556 * vmm-data class could define both the VM-wide and per-vCPU handlers, 557 * letting the incoming vcpuid determine which would be called. Until 558 * then, vdve_vcpu_wildcard is the stopgap. 559 */ 560 bool vdve_vcpu_wildcard; 561 } vmm_data_version_entry_t; 562 563 #define VMM_DATA_VERSION(sym) SET_ENTRY(vmm_data_version_entries, sym) 564 565 int vmm_data_read(struct vm *, int, const vmm_data_req_t *); 566 int vmm_data_write(struct vm *, int, const vmm_data_req_t *); 567 568 /* 569 * TSC Scaling 570 */ 571 uint64_t vmm_calc_freq_multiplier(uint64_t guest_hz, uint64_t host_hz, 572 uint32_t frac); 573 574 /* represents a multiplier for a guest in which no scaling is required */ 575 #define VM_TSCM_NOSCALE 0 576 577 #endif /* _VMM_KERNEL_H_ */ 578