1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #ifndef _VMMAPI_H_ 30 #define _VMMAPI_H_ 31 32 #include <sys/param.h> 33 #include <sys/cpuset.h> 34 #include <machine/vmm.h> 35 #include <machine/vmm_dev.h> 36 37 #include <stdbool.h> 38 39 /* 40 * API version for out-of-tree consumers like grub-bhyve for making compile 41 * time decisions. 42 */ 43 #define VMMAPI_VERSION 0200 /* 2 digit major followed by 2 digit minor */ 44 45 struct iovec; 46 struct vcpu; 47 struct vmctx; 48 struct vm_snapshot_meta; 49 enum x2apic_state; 50 51 /* 52 * Different styles of mapping the memory assigned to a VM into the address 53 * space of the controlling process. 54 */ 55 enum vm_mmap_style { 56 VM_MMAP_NONE, /* no mapping */ 57 VM_MMAP_ALL, /* fully and statically mapped */ 58 VM_MMAP_SPARSE, /* mappings created on-demand */ 59 }; 60 61 /* 62 * 'flags' value passed to 'vm_set_memflags()'. 63 */ 64 #define VM_MEM_F_INCORE 0x01 /* include guest memory in core file */ 65 #define VM_MEM_F_WIRED 0x02 /* guest memory is wired */ 66 67 /* 68 * Identifiers for memory segments: 69 * - vm_setup_memory() uses VM_SYSMEM for the system memory segment. 70 * - the remaining identifiers can be used to create devmem segments. 71 */ 72 enum { 73 VM_SYSMEM, 74 VM_BOOTROM, 75 VM_FRAMEBUFFER, 76 VM_PCIROM, 77 }; 78 79 __BEGIN_DECLS 80 /* 81 * Get the length and name of the memory segment identified by 'segid'. 82 * Note that system memory segments are identified with a nul name. 83 * 84 * Returns 0 on success and non-zero otherwise. 85 */ 86 int vm_get_memseg(struct vmctx *ctx, int ident, size_t *lenp, char *name, 87 size_t namesiz); 88 89 /* 90 * Iterate over the guest address space. This function finds an address range 91 * that starts at an address >= *gpa. 92 * 93 * Returns 0 if the next address range was found and non-zero otherwise. 94 */ 95 int vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid, 96 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags); 97 98 int vm_get_guestmem_from_ctx(struct vmctx *ctx, char **guest_baseaddr, 99 size_t *lowmem_size, size_t *highmem_size); 100 101 /* 102 * Create a device memory segment identified by 'segid'. 103 * 104 * Returns a pointer to the memory segment on success and MAP_FAILED otherwise. 105 */ 106 void *vm_create_devmem(struct vmctx *ctx, int segid, const char *name, 107 size_t len); 108 109 /* 110 * Map the memory segment identified by 'segid' into the guest address space 111 * at [gpa,gpa+len) with protection 'prot'. 112 */ 113 int vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, 114 vm_ooffset_t segoff, size_t len, int prot); 115 116 int vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len); 117 118 int vm_create(const char *name); 119 struct vmctx *vm_open(const char *name); 120 void vm_close(struct vmctx *ctx); 121 void vm_destroy(struct vmctx *ctx); 122 int vm_limit_rights(struct vmctx *ctx); 123 struct vcpu *vm_vcpu_open(struct vmctx *ctx, int vcpuid); 124 void vm_vcpu_close(struct vcpu *vcpu); 125 int vcpu_id(struct vcpu *vcpu); 126 int vm_parse_memsize(const char *optarg, size_t *memsize); 127 int vm_setup_memory(struct vmctx *ctx, size_t len, enum vm_mmap_style s); 128 void *vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len); 129 /* inverse operation to vm_map_gpa - extract guest address from host pointer */ 130 vm_paddr_t vm_rev_map_gpa(struct vmctx *ctx, void *addr); 131 #ifdef __amd64__ 132 int vm_get_gpa_pmap(struct vmctx *, uint64_t gpa, uint64_t *pte, int *num); 133 int vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging, 134 uint64_t gla, int prot, uint64_t *gpa, int *fault); 135 #endif 136 int vm_gla2gpa_nofault(struct vcpu *vcpu, 137 struct vm_guest_paging *paging, uint64_t gla, int prot, 138 uint64_t *gpa, int *fault); 139 uint32_t vm_get_lowmem_limit(struct vmctx *ctx); 140 void vm_set_memflags(struct vmctx *ctx, int flags); 141 int vm_get_memflags(struct vmctx *ctx); 142 const char *vm_get_name(struct vmctx *ctx); 143 size_t vm_get_lowmem_size(struct vmctx *ctx); 144 vm_paddr_t vm_get_highmem_base(struct vmctx *ctx); 145 size_t vm_get_highmem_size(struct vmctx *ctx); 146 #ifdef __amd64__ 147 int vm_set_desc(struct vcpu *vcpu, int reg, 148 uint64_t base, uint32_t limit, uint32_t access); 149 int vm_get_desc(struct vcpu *vcpu, int reg, 150 uint64_t *base, uint32_t *limit, uint32_t *access); 151 int vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *seg_desc); 152 #endif 153 int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val); 154 int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval); 155 int vm_set_register_set(struct vcpu *vcpu, unsigned int count, 156 const int *regnums, uint64_t *regvals); 157 int vm_get_register_set(struct vcpu *vcpu, unsigned int count, 158 const int *regnums, uint64_t *regvals); 159 int vm_run(struct vcpu *vcpu, struct vm_run *vmrun); 160 int vm_suspend(struct vmctx *ctx, enum vm_suspend_how how); 161 int vm_reinit(struct vmctx *ctx); 162 int vm_raise_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg, 163 int bus, int slot, int func); 164 #ifdef __aarch64__ 165 int vm_attach_vgic(struct vmctx *ctx, uint64_t dist_start, size_t dist_size, 166 uint64_t redist_start, size_t redist_size); 167 int vm_assert_irq(struct vmctx *ctx, uint32_t irq); 168 int vm_deassert_irq(struct vmctx *ctx, uint32_t irq); 169 int vm_inject_exception(struct vcpu *vcpu, uint64_t esr, uint64_t far); 170 #endif 171 #ifdef __amd64__ 172 int vm_apicid2vcpu(struct vmctx *ctx, int apicid); 173 int vm_inject_exception(struct vcpu *vcpu, int vector, 174 int errcode_valid, uint32_t errcode, int restart_instruction); 175 int vm_lapic_irq(struct vcpu *vcpu, int vector); 176 int vm_lapic_local_irq(struct vcpu *vcpu, int vector); 177 int vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg); 178 int vm_ioapic_assert_irq(struct vmctx *ctx, int irq); 179 int vm_ioapic_deassert_irq(struct vmctx *ctx, int irq); 180 int vm_ioapic_pulse_irq(struct vmctx *ctx, int irq); 181 int vm_ioapic_pincount(struct vmctx *ctx, int *pincount); 182 int vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq); 183 int vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq); 184 int vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq); 185 int vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq, 186 enum vm_intr_trigger trigger); 187 int vm_inject_nmi(struct vcpu *vcpu); 188 int vm_readwrite_kernemu_device(struct vcpu *vcpu, 189 vm_paddr_t gpa, bool write, int size, uint64_t *value); 190 #endif 191 int vm_capability_name2type(const char *capname); 192 const char *vm_capability_type2name(int type); 193 int vm_get_capability(struct vcpu *vcpu, enum vm_cap_type cap, 194 int *retval); 195 int vm_set_capability(struct vcpu *vcpu, enum vm_cap_type cap, 196 int val); 197 int vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func); 198 int vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func); 199 int vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, 200 vm_paddr_t gpa, size_t len, vm_paddr_t hpa); 201 int vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, 202 vm_paddr_t gpa, size_t len); 203 int vm_setup_pptdev_msi(struct vmctx *ctx, int bus, int slot, 204 int func, uint64_t addr, uint64_t msg, int numvec); 205 int vm_setup_pptdev_msix(struct vmctx *ctx, int bus, int slot, 206 int func, int idx, uint64_t addr, uint64_t msg, 207 uint32_t vector_control); 208 int vm_disable_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func); 209 210 int vm_get_intinfo(struct vcpu *vcpu, uint64_t *i1, uint64_t *i2); 211 int vm_set_intinfo(struct vcpu *vcpu, uint64_t exit_intinfo); 212 213 /* 214 * Return a pointer to the statistics buffer. Note that this is not MT-safe. 215 */ 216 uint64_t *vm_get_stats(struct vcpu *vcpu, struct timeval *ret_tv, 217 int *ret_entries); 218 const char *vm_get_stat_desc(struct vmctx *ctx, int index); 219 220 #ifdef __amd64__ 221 int vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *s); 222 int vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state s); 223 224 int vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities); 225 226 /* 227 * Translate the GLA range [gla,gla+len) into GPA segments in 'iov'. 228 * The 'iovcnt' should be big enough to accommodate all GPA segments. 229 * 230 * retval fault Interpretation 231 * 0 0 Success 232 * 0 1 An exception was injected into the guest 233 * EFAULT N/A Error 234 */ 235 int vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *pg, 236 uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt, 237 int *fault); 238 #endif 239 void vm_copyin(struct iovec *guest_iov, void *host_dst, size_t len); 240 void vm_copyout(const void *host_src, struct iovec *guest_iov, size_t len); 241 void vm_copy_teardown(struct iovec *iov, int iovcnt); 242 243 #ifdef __amd64__ 244 /* RTC */ 245 int vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value); 246 int vm_rtc_read(struct vmctx *ctx, int offset, uint8_t *retval); 247 int vm_rtc_settime(struct vmctx *ctx, time_t secs); 248 int vm_rtc_gettime(struct vmctx *ctx, time_t *secs); 249 #endif 250 251 /* Reset vcpu register state */ 252 int vcpu_reset(struct vcpu *vcpu); 253 254 int vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus); 255 int vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus); 256 int vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus); 257 int vm_activate_cpu(struct vcpu *vcpu); 258 int vm_suspend_all_cpus(struct vmctx *ctx); 259 int vm_suspend_cpu(struct vcpu *vcpu); 260 int vm_resume_all_cpus(struct vmctx *ctx); 261 int vm_resume_cpu(struct vcpu *vcpu); 262 int vm_restart_instruction(struct vcpu *vcpu); 263 264 /* CPU topology */ 265 int vm_set_topology(struct vmctx *ctx, uint16_t sockets, uint16_t cores, 266 uint16_t threads, uint16_t maxcpus); 267 int vm_get_topology(struct vmctx *ctx, uint16_t *sockets, uint16_t *cores, 268 uint16_t *threads, uint16_t *maxcpus); 269 270 /* 271 * FreeBSD specific APIs 272 */ 273 int vm_setup_freebsd_registers(struct vcpu *vcpu, 274 uint64_t rip, uint64_t cr3, uint64_t gdtbase, 275 uint64_t rsp); 276 int vm_setup_freebsd_registers_i386(struct vcpu *vcpu, 277 uint32_t eip, uint32_t gdtbase, 278 uint32_t esp); 279 void vm_setup_freebsd_gdt(uint64_t *gdtr); 280 281 /* 282 * Save and restore 283 */ 284 int vm_snapshot_req(struct vmctx *ctx, struct vm_snapshot_meta *meta); 285 int vm_restore_time(struct vmctx *ctx); 286 287 /* 288 * Deprecated interfaces, do not use them in new code. 289 */ 290 int vm_get_device_fd(struct vmctx *ctx); 291 const cap_ioctl_t *vm_get_ioctls(size_t *len); 292 293 __END_DECLS 294 295 #endif /* _VMMAPI_H_ */ 296