Home
last modified time | relevance | path

Searched full:vcpu (Results 1 – 25 of 137) sorted by relevance

123456

/freebsd/sys/amd64/vmm/
H A Dvmm.c97 * (a) allocated when vcpu is created
98 * (i) initialized when vcpu is created and when it is reinitialized
99 * (o) initialized the first time the vcpu is created
102 struct vcpu { struct
104 enum vcpu_state state; /* (o) vcpu state */ argument
106 int hostcpu; /* (o) vcpu's host cpu */ argument
107 int reqidle; /* (i) request vcpu to idle */ argument
128 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) argument
160 * [v] reads require one frozen vcpu, writes require freezing all vcpus
187 struct vcpu **vcpu; /* (o) guest vcpus */ member
[all …]
H A Dvmm_lapic.h32 struct vcpu;
36 int lapic_rdmsr(struct vcpu *vcpu, u_int msr, uint64_t *rval, bool *retu);
37 int lapic_wrmsr(struct vcpu *vcpu, u_int msr, uint64_t wval, bool *retu);
39 int lapic_mmio_read(struct vcpu *vcpu, uint64_t gpa,
41 int lapic_mmio_write(struct vcpu *vcpu, uint64_t gpa,
48 int lapic_set_intr(struct vcpu *vcpu, int vector, bool trig);
53 lapic_intr_level(struct vcpu *vcpu, int vector) in lapic_intr_level() argument
56 return (lapic_set_intr(vcpu, vector, LAPIC_TRIG_LEVEL)); in lapic_intr_level()
60 lapic_intr_edge(struct vcpu *vcpu, int vector) in lapic_intr_edge() argument
63 return (lapic_set_intr(vcpu, vector, LAPIC_TRIG_EDGE)); in lapic_intr_edge()
[all …]
H A Dvmm_instruction_emul.c289 vie_read_register(struct vcpu *vcpu, enum vm_reg_name reg, uint64_t *rval) in vie_read_register() argument
293 error = vm_get_register(vcpu, reg, rval); in vie_read_register()
325 vie_read_bytereg(struct vcpu *vcpu, struct vie *vie, uint8_t *rval) in vie_read_bytereg() argument
332 error = vm_get_register(vcpu, reg, &val); in vie_read_bytereg()
346 vie_write_bytereg(struct vcpu *vcpu, struct vie *vie, uint8_t byte) in vie_write_bytereg() argument
353 error = vm_get_register(vcpu, reg, &origval); in vie_write_bytereg()
366 error = vm_set_register(vcpu, reg, val); in vie_write_bytereg()
372 vie_update_register(struct vcpu *vcpu, enum vm_reg_name reg, in vie_update_register() argument
381 error = vie_read_register(vcpu, reg, &origval); in vie_update_register()
396 error = vm_set_register(vcpu, reg, val); in vie_update_register()
[all …]
/freebsd/sys/riscv/vmm/
H A Dvmm.c80 struct vcpu { struct
84 int hostcpu; /* host cpuid this vcpu last ran on */ argument
95 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) argument
146 struct vcpu **vcpu; /* (i) guest vcpus */ member
163 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
169 "IPI vector used for vcpu notifications");
177 static void vcpu_notify_event_locked(struct vcpu *vcpu);
191 vcpu_cleanup(struct vcpu *vcpu, bool destroy) in vcpu_cleanup() argument
193 vmmops_vcpu_cleanup(vcpu->cookie); in vcpu_cleanup()
194 vcpu->cookie = NULL; in vcpu_cleanup()
[all …]
/freebsd/sys/amd64/vmm/amd/
H A Dsvm.c294 svm_set_tsc_offset(struct svm_vcpu *vcpu, uint64_t offset) in svm_set_tsc_offset() argument
298 ctrl = svm_get_vmcb_ctrl(vcpu); in svm_set_tsc_offset()
301 svm_set_dirty(vcpu, VMCB_CACHE_I); in svm_set_tsc_offset()
302 SVM_CTR1(vcpu, "tsc offset changed to %#lx", offset); in svm_set_tsc_offset()
304 vm_set_tsc_offset(vcpu->vcpu, offset); in svm_set_tsc_offset()
354 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor.
390 svm_get_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask) in svm_get_intercept() argument
396 ctrl = svm_get_vmcb_ctrl(vcpu); in svm_get_intercept()
401 svm_set_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask, int enabled) in svm_set_intercept() argument
408 ctrl = svm_get_vmcb_ctrl(vcpu); in svm_set_intercept()
[all …]
H A Dsvm_softc.h40 uint32_t rflags_tf; /* saved RFLAGS.TF value when single-stepping a vcpu */
52 struct vcpu *vcpu; member
53 struct vmcb *vmcb; /* hardware saved vcpu context */
54 struct svm_regctx swctx; /* software saved vcpu context */
57 int lastcpu; /* host cpu that the vcpu last ran on */
59 long eptgen; /* pmap->pm_eptgen when the vcpu last ran */
77 #define SVM_CTR0(vcpu, format) \ argument
78 VCPU_CTR0((vcpu)->sc->vm, (vcpu)->vcpuid, format)
80 #define SVM_CTR1(vcpu, format, p1) \ argument
81 VCPU_CTR1((vcpu)->sc->vm, (vcpu)->vcpuid, format, p1)
[all …]
H A Dvmcb.c118 vmcb_access(struct svm_vcpu *vcpu, int write, int ident, uint64_t *val) in vmcb_access() argument
124 vmcb = svm_get_vmcb(vcpu); in vmcb_access()
147 SVM_CTR1(vcpu, "Invalid size %d for VMCB access: %d", bytes); in vmcb_access()
153 svm_set_dirty(vcpu, 0xffffffff); in vmcb_access()
162 vmcb_read(struct svm_vcpu *vcpu, int ident, uint64_t *retval) in vmcb_read() argument
169 vmcb = svm_get_vmcb(vcpu); in vmcb_read()
174 return (vmcb_access(vcpu, 0, ident, retval)); in vmcb_read()
248 *retval = vlapic_get_cr8(vm_lapic(vcpu->vcpu)); in vmcb_read()
268 vmcb_write(struct svm_vcpu *vcpu, int ident, uint64_t val) in vmcb_write() argument
275 vmcb = svm_get_vmcb(vcpu); in vmcb_write()
[all …]
/freebsd/sys/arm64/vmm/
H A Dvmm.c76 struct vcpu { struct
80 int hostcpu; /* host cpuid this vcpu last ran on */ argument
91 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) argument
151 struct vcpu **vcpu; /* (i) guest vcpus */ member
166 static int vm_handle_wfi(struct vcpu *vcpu,
172 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
178 "IPI vector used for vcpu notifications");
240 static void vcpu_notify_event_locked(struct vcpu *vcpu);
290 vcpu_cleanup(struct vcpu *vcpu, bool destroy) in vcpu_cleanup() argument
292 vmmops_vcpu_cleanup(vcpu->cookie); in vcpu_cleanup()
[all …]
/freebsd/sys/amd64/include/
H A Dvmm.h36 struct vcpu;
179 typedef void * (*vmi_vcpu_init_func_t)(void *vmi, struct vcpu *vcpu,
228 struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid);
258 * APIs that inspect the guest memory map require only a *single* vcpu to
267 void *vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len,
272 bool vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa);
274 int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval);
275 int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val);
276 int vm_get_seg_desc(struct vcpu *vcpu, int reg,
278 int vm_set_seg_desc(struct vcpu *vcpu, int reg,
[all …]
/freebsd/lib/libvmmapi/
H A Dvmmapi.h46 struct vcpu;
126 struct vcpu *vm_vcpu_open(struct vmctx *ctx, int vcpuid);
127 void vm_vcpu_close(struct vcpu *vcpu);
128 int vcpu_id(struct vcpu *vcpu);
136 int vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging,
139 int vm_gla2gpa_nofault(struct vcpu *vcpu,
150 int vm_set_desc(struct vcpu *vcpu, int reg,
152 int vm_get_desc(struct vcpu *vcpu, int reg,
154 int vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *seg_desc);
156 int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val);
[all …]
H A Dvmmapi.c210 struct vcpu *
213 struct vcpu *vcpu; in vm_vcpu_open() local
215 vcpu = malloc(sizeof(*vcpu)); in vm_vcpu_open()
216 vcpu->ctx = ctx; in vm_vcpu_open()
217 vcpu->vcpuid = vcpuid; in vm_vcpu_open()
218 return (vcpu); in vm_vcpu_open()
222 vm_vcpu_close(struct vcpu *vcpu) in vm_vcpu_close() argument
224 free(vcpu); in vm_vcpu_close()
228 vcpu_id(struct vcpu *vcpu) in vcpu_id() argument
230 return (vcpu->vcpuid); in vcpu_id()
[all …]
/freebsd/sys/riscv/include/
H A Dvmm.h45 struct vcpu;
126 struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid);
147 * APIs that inspect the guest memory map require only a *single* vcpu to
156 void *vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len,
161 bool vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa);
163 int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
171 int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval);
172 int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val);
173 int vm_run(struct vcpu *vcpu);
176 int vcpu_vcpuid(struct vcpu *vcpu);
[all …]
/freebsd/usr.sbin/bhyve/amd64/
H A Dtask_switch.c100 GETREG(struct vcpu *vcpu, int reg) in GETREG() argument
105 error = vm_get_register(vcpu, reg, &val); in GETREG()
111 SETREG(struct vcpu *vcpu, int reg, uint64_t val) in SETREG() argument
115 error = vm_set_register(vcpu, reg, val); in SETREG()
151 sel_exception(struct vcpu *vcpu, int vector, uint16_t sel, int ext) in sel_exception() argument
165 vm_inject_fault(vcpu, vector, 1, sel); in sel_exception()
173 desc_table_limit_check(struct vcpu *vcpu, uint16_t sel) in desc_table_limit_check() argument
180 error = vm_get_desc(vcpu, reg, &base, &limit, &access); in desc_table_limit_check()
203 desc_table_rw(struct vcpu *vcpu, struct vm_guest_paging *paging, in desc_table_rw() argument
213 error = vm_get_desc(vcpu, reg, &base, &limit, &access); in desc_table_rw()
[all …]
H A Dvmexit.c60 vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid, in vm_inject_fault() argument
67 error = vm_inject_exception(vcpu, vector, errcode_valid, errcode, in vm_inject_fault()
73 vmexit_inout(struct vmctx *ctx, struct vcpu *vcpu, struct vm_run *vmrun) in vmexit_inout() argument
84 error = emulate_inout(ctx, vcpu, vme); in vmexit_inout()
97 vmexit_rdmsr(struct vmctx *ctx __unused, struct vcpu *vcpu, in vmexit_rdmsr() argument
108 error = emulate_rdmsr(vcpu, vme->u.msr.code, &val); in vmexit_rdmsr()
110 EPRINTLN("rdmsr to register %#x on vcpu %d", in vmexit_rdmsr()
111 vme->u.msr.code, vcpu_id(vcpu)); in vmexit_rdmsr()
113 vm_inject_gp(vcpu); in vmexit_rdmsr()
119 error = vm_set_register(vcpu, VM_REG_GUEST_RAX, eax); in vmexit_rdmsr()
[all …]
/freebsd/sys/arm64/include/
H A Dvmm.h38 struct vcpu;
145 struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid);
166 * APIs that inspect the guest memory map require only a *single* vcpu to
175 void *vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len,
180 bool vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa);
182 int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
190 int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval);
191 int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val);
192 int vm_run(struct vcpu *vcpu);
195 int vcpu_vcpuid(struct vcpu *vcpu);
[all …]
/freebsd/sys/amd64/vmm/intel/
H A Dvmx.c816 * bitmap is currently per-VM rather than per-vCPU while the in vmx_modinit()
818 * per-vCPU basis). in vmx_modinit()
1130 vmx_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid) in vmx_vcpu_init()
1134 struct vmx_vcpu *vcpu; in vmx_vcpu_init() local
1141 vcpu = malloc(sizeof(*vcpu), M_VMX, M_WAITOK | M_ZERO); in vmx_vcpu_init()
1142 vcpu->vmx = vmx; in vmx_vcpu_init()
1143 vcpu->vcpu = vcpu1; in vmx_vcpu_init()
1144 vcpu->vcpuid = vcpuid; in vmx_vcpu_init()
1145 vcpu->vmcs = malloc_aligned(sizeof(*vmcs), PAGE_SIZE, M_VMX, in vmx_vcpu_init()
1147 vcpu->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_VMX, in vmx_vcpu_init()
[all …]
H A Dvmx.h98 int lastcpu; /* host cpu that this 'vcpu' last ran on */
129 struct vcpu *vcpu; member
152 #define VMX_CTR0(vcpu, format) \ argument
153 VCPU_CTR0((vcpu)->vmx->vm, (vcpu)->vcpuid, format)
155 #define VMX_CTR1(vcpu, format, p1) \ argument
156 VCPU_CTR1((vcpu)->vmx->vm, (vcpu)->vcpuid, format, p1)
158 #define VMX_CTR2(vcpu, format, p1, p2) \ argument
159 VCPU_CTR2((vcpu)->vmx->vm, (vcpu)->vcpuid, format, p1, p2)
161 #define VMX_CTR3(vcpu, format, p1, p2, p3) \ argument
162 VCPU_CTR3((vcpu)->vmx->vm, (vcpu)->vcpuid, format, p1, p2, p3)
[all …]
H A Dvmx_msr.c312 vmx_msr_guest_init(struct vmx *vmx, struct vmx_vcpu *vcpu) in vmx_msr_guest_init() argument
318 if (vcpu->vcpuid == 0) { in vmx_msr_guest_init()
329 vcpu->guest_msrs[IDX_MSR_PAT] = PAT_VALUE(0, PAT_WRITE_BACK) | in vmx_msr_guest_init()
342 vmx_msr_guest_enter(struct vmx_vcpu *vcpu) in vmx_msr_guest_enter() argument
347 wrmsr(MSR_LSTAR, vcpu->guest_msrs[IDX_MSR_LSTAR]); in vmx_msr_guest_enter()
348 wrmsr(MSR_CSTAR, vcpu->guest_msrs[IDX_MSR_CSTAR]); in vmx_msr_guest_enter()
349 wrmsr(MSR_STAR, vcpu->guest_msrs[IDX_MSR_STAR]); in vmx_msr_guest_enter()
350 wrmsr(MSR_SF_MASK, vcpu->guest_msrs[IDX_MSR_SF_MASK]); in vmx_msr_guest_enter()
351 wrmsr(MSR_KGSBASE, vcpu->guest_msrs[IDX_MSR_KGSBASE]); in vmx_msr_guest_enter()
355 vmx_msr_guest_enter_tsc_aux(struct vmx *vmx, struct vmx_vcpu *vcpu) in vmx_msr_guest_enter_tsc_aux() argument
[all …]
/freebsd/sys/contrib/xen/
H A Dvcpu.h2 * vcpu.h
4 * VCPU initialisation, query, and hotplug.
35 * @cmd == VCPUOP_??? (VCPU operation).
36 * @vcpuid == VCPU to operate on.
41 * Initialise a VCPU. Each VCPU can be initialised only once. A
42 * newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
45 * structure containing the initial state for the VCPU. For x86
52 * Bring up a VCPU. This makes the VCPU runnable. This operation will fail
53 * if the VCPU has not been initialised (VCPUOP_initialise).
58 * Bring down a VCPU (i.e., make it non-runnable).
[all …]
/freebsd/usr.sbin/bhyve/aarch64/
H A Dvmexit.c60 vmexit_inst_emul(struct vmctx *ctx __unused, struct vcpu *vcpu, in vmexit_inst_emul() argument
70 err = emulate_mem(vcpu, vme->u.inst_emul.gpa, vie, in vmexit_inst_emul()
89 vmexit_reg_emul(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, in vmexit_reg_emul()
104 vmexit_suspend(struct vmctx *ctx, struct vcpu *vcpu, struct vm_run *vmrun) in vmexit_suspend() argument
108 int vcpuid = vcpu_id(vcpu); in vmexit_suspend()
132 vmexit_debug(struct vmctx *ctx __unused, struct vcpu *vcpu, in vmexit_debug() argument
135 gdb_cpu_suspend(vcpu); in vmexit_debug()
138 * window between activation of the vCPU thread and the STARTUP IPI. in vmexit_debug()
145 vmexit_bogus(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, in vmexit_bogus()
173 for (int vcpu = 0; vcpu < guest_ncpus; vcpu++) { in smccc_affinity_info() local
[all …]
/freebsd/usr.sbin/bhyve/
H A Dmem.c63 * Per-vCPU cache. Since most accesses from a vCPU will be to
141 typedef int (mem_cb_t)(struct vcpu *vcpu, uint64_t gpa, struct mem_range *mr,
145 mem_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg) in mem_read() argument
150 error = (*mr->handler)(vcpu, MEM_F_READ, gpa, size, rval, mr->arg1, in mem_read()
156 mem_write(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size, void *arg) in mem_write() argument
161 error = (*mr->handler)(vcpu, MEM_F_WRITE, gpa, size, &wval, mr->arg1, in mem_write()
167 access_memory(struct vcpu *vcpu, uint64_t paddr, mem_cb_t *cb, void *arg) in access_memory() argument
172 vcpuid = vcpu_id(vcpu); in access_memory()
175 * First check the per-vCPU cache in access_memory()
186 /* Update the per-vCPU cache */ in access_memory()
[all …]
H A Dbhyverun.c111 static void vm_loop(struct vmctx *ctx, struct vcpu *vcpu);
115 struct vcpu *vcpu; member
127 * manual page syntax specification, this results in a topology of 1 vCPU.
245 int vcpu, pcpu; in bhyve_pincpu_parse() local
247 if (sscanf(opt, "%d:%d", &vcpu, &pcpu) != 2) { in bhyve_pincpu_parse()
252 if (vcpu < 0) { in bhyve_pincpu_parse()
253 fprintf(stderr, "invalid vcpu '%d'\n", vcpu); in bhyve_pincpu_parse()
263 snprintf(key, sizeof(key), "vcpu.%d.cpuset", vcpu); in bhyve_pincpu_parse()
278 parse_cpuset(int vcpu, const char *list, cpuset_t *set) in parse_cpuset() argument
289 errx(4, "invalid cpuset for vcpu %d: '%s'", vcpu, list); in parse_cpuset()
[all …]
H A Dgdb.c132 * When a vCPU stops to due to an event that should be reported to the
134 * The vCPU thread then sets 'stopped_vcpu' if it is not already set
137 * vCPU. When the debugger resumes execution via continue or step,
141 * An idle vCPU will have all of the boolean fields set to false.
143 * When a vCPU is stepped, 'stepping' is set to true when the vCPU is
144 * released to execute the stepped instruction. When the vCPU reports
147 * When a vCPU hits a breakpoint set by the debug server,
162 static struct vcpu **vcpus;
287 guest_paging_info(struct vcpu *vcpu, struct vm_guest_paging *paging) in guest_paging_info() argument
298 if (vm_get_register_set(vcpu, nitems(regset), regset, regs) == -1) in guest_paging_info()
[all …]
/freebsd/sys/arm64/vmm/io/
H A Dvtimer.c77 eprintf("No active vcpu\n"); in vtimer_virtual_timer_intr()
93 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), in vtimer_virtual_timer_intr()
182 * Configure physical timer interrupts for the VCPU. in vtimer_cpuinit()
244 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), in vtimer_sync_hwstate()
247 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), in vtimer_sync_hwstate()
250 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), in vtimer_sync_hwstate()
262 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), in vtimer_inject_irq_callout_phys()
272 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), in vtimer_inject_irq_callout_virt()
292 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), in vtimer_schedule_irq()
307 vtimer_remove_irq(struct hypctx *hypctx, struct vcpu *vcpu) in vtimer_remove_irq() argument
[all …]
/freebsd/usr.sbin/bhyve/riscv/
H A Dvmexit.c74 vmexit_inst_emul(struct vmctx *ctx __unused, struct vcpu *vcpu, in vmexit_inst_emul() argument
84 err = emulate_mem(vcpu, vme->u.inst_emul.gpa, vie, in vmexit_inst_emul()
103 vmexit_suspend(struct vmctx *ctx, struct vcpu *vcpu, struct vm_run *vmrun) in vmexit_suspend() argument
107 int vcpuid = vcpu_id(vcpu); in vmexit_suspend()
134 vmexit_debug(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, in vmexit_debug()
142 vmexit_bogus(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, in vmexit_bogus()
176 vmexit_ecall_hsm(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, in vmexit_ecall_hsm()
179 struct vcpu *newvcpu; in vmexit_ecall_hsm()
230 error = vm_set_register(vcpu, VM_REG_GUEST_A0, ret); in vmexit_ecall_hsm()
235 vmexit_ecall_base(struct vmctx *ctx __unused, struct vcpu *vcpu, in vmexit_ecall_base() argument
[all …]

123456