Home
last modified time | relevance | path

Searched refs:hypctx (Results 1 – 21 of 21) sorted by relevance

/freebsd/sys/arm64/vmm/
H A Dvmm_hyp.c39 struct hypctx;
41 uint64_t VMM_HYP_FUNC(do_call_guest)(struct hypctx *);
44 vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest, in vmm_hyp_reg_store() argument
51 hypctx->vtimer_cpu.cntkctl_el1 = in vmm_hyp_reg_store()
53 hypctx->vtimer_cpu.virt_timer.cntx_cval_el0 = in vmm_hyp_reg_store()
55 hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0 = in vmm_hyp_reg_store()
64 hypctx->vtimer_cpu.phys_timer.cntx_cval_el0 = in vmm_hyp_reg_store()
66 hypctx->vtimer_cpu.phys_timer.cntx_ctl_el0 = in vmm_hyp_reg_store()
72 hypctx->vgic_v3_regs.ich_eisr_el2 = in vmm_hyp_reg_store()
74 hypctx->vgic_v3_regs.ich_elrsr_el2 = in vmm_hyp_reg_store()
[all …]
H A Dvmm_arm64.c107 DPCPU_DEFINE_STATIC(struct hypctx *, vcpu);
110 arm64_set_active_vcpu(struct hypctx *hypctx) in arm64_set_active_vcpu() argument
112 DPCPU_SET(vcpu, hypctx); in arm64_set_active_vcpu()
115 struct hypctx *
488 sizeof(struct hypctx *) * vm_get_maxcpus(vm))); in el2_hyp_size()
494 return (round_page(sizeof(struct hypctx))); in el2_hypctx_size()
561 struct hypctx *hypctx; in vmmops_vcpu_init() local
565 hypctx = malloc_aligned(size, PAGE_SIZE, M_HYP, M_WAITOK | M_ZERO); in vmmops_vcpu_init()
569 hyp->ctx[vcpuid] = hypctx; in vmmops_vcpu_init()
571 hypctx->hyp = hyp; in vmmops_vcpu_init()
[all …]
H A Dvmm_handlers.c52 vmm_nvhe_enter_guest(struct hyp *hyp, struct hypctx *hypctx) in vmm_nvhe_enter_guest() argument
54 return (vmm_call_hyp(HYP_ENTER_GUEST, hyp->el2_addr, hypctx->el2_addr)); in vmm_nvhe_enter_guest()
58 (struct hyp *hyp, struct hypctx *hypctx))
H A Dvmm_handlers.h34 struct hypctx;
37 uint64_t vmm_enter_guest(struct hyp *, struct hypctx *);
43 uint64_t vmm_vhe_enter_guest(struct hyp *, struct hypctx *);
H A Darm64.h45 struct hypctx { struct
154 struct hypctx *ctx[]; argument
165 struct hypctx *arm64_get_active_vcpu(void);
166 void raise_data_insn_abort(struct hypctx *, uint64_t, bool, int);
H A Dvmm.c432 struct hypctx *hypctx; in vmm_write_oslar_el1() local
434 hypctx = vcpu_get_cookie(vcpu); in vmm_write_oslar_el1()
437 hypctx->dbg_oslock = (wval & OSLAR_OSLK) == OSLAR_OSLK; in vmm_write_oslar_el1()
444 struct hypctx *hypctx; in vmm_read_oslsr_el1() local
447 hypctx = vcpu_get_cookie(vcpu); in vmm_read_oslsr_el1()
449 if (hypctx->dbg_oslock) in vmm_read_oslsr_el1()
861 struct hypctx *hypctx; in vm_handle_smccc_call() local
864 hypctx = vcpu_get_cookie(vcpu); in vm_handle_smccc_call()
866 if ((hypctx->tf.tf_esr & ESR_ELx_ISS_MASK) != 0) in vm_handle_smccc_call()
870 vme->u.smccc_call.func_id = hypctx->tf.tf_x[0]; in vm_handle_smccc_call()
[all …]
H A Dvmm_nvhe.c96 (struct hypctx *)x2)); in vmm_hyp_enter()
H A Dvmm_reset.c52 struct hypctx *el2ctx; in reset_vm_el01_regs()
117 struct hypctx *el2ctx; in reset_vm_el2_regs()
/freebsd/sys/riscv/vmm/
H A Dvmm_riscv.c78 DPCPU_DEFINE_STATIC(struct hypctx *, vcpu);
91 riscv_set_active_vcpu(struct hypctx *hypctx) in riscv_set_active_vcpu() argument
94 DPCPU_SET(vcpu, hypctx); in riscv_set_active_vcpu()
97 struct hypctx *
130 sizeof(struct hypctx *) * vm_get_maxcpus(vm)); in vmmops_init()
162 vmmops_vcpu_restore_csrs(struct hypctx *hypctx) in vmmops_vcpu_restore_csrs() argument
166 csrs = &hypctx->guest_csrs; in vmmops_vcpu_restore_csrs()
180 vmmops_vcpu_save_csrs(struct hypctx *hypctx) in vmmops_vcpu_save_csrs() argument
184 csrs = &hypctx->guest_csrs; in vmmops_vcpu_save_csrs()
200 struct hypctx *hypctx; in vmmops_vcpu_init() local
[all …]
H A Dvmm_sbi.c45 vmm_sbi_handle_rfnc(struct vcpu *vcpu, struct hypctx *hypctx) in vmm_sbi_handle_rfnc() argument
57 func_id = hypctx->guest_regs.hyp_a[6]; in vmm_sbi_handle_rfnc()
58 hart_mask = hypctx->guest_regs.hyp_a[0]; in vmm_sbi_handle_rfnc()
59 hart_mask_base = hypctx->guest_regs.hyp_a[1]; in vmm_sbi_handle_rfnc()
63 fence.start = hypctx->guest_regs.hyp_a[2]; in vmm_sbi_handle_rfnc()
64 fence.size = hypctx->guest_regs.hyp_a[3]; in vmm_sbi_handle_rfnc()
65 fence.asid = hypctx->guest_regs.hyp_a[4]; in vmm_sbi_handle_rfnc()
83 hyp = hypctx->hyp; in vmm_sbi_handle_rfnc()
114 vmm_sbi_handle_time(struct vcpu *vcpu, struct hypctx *hypctx) in vmm_sbi_handle_time() argument
119 func_id = hypctx->guest_regs.hyp_a[6]; in vmm_sbi_handle_time()
[all …]
H A Dvmm_fence.c50 vmm_fence_dequeue(struct hypctx *hypctx, struct vmm_fence *new_fence) in vmm_fence_dequeue() argument
55 mtx_lock_spin(&hypctx->fence_queue_mtx); in vmm_fence_dequeue()
56 queue = hypctx->fence_queue; in vmm_fence_dequeue()
57 fence = &queue[hypctx->fence_queue_head]; in vmm_fence_dequeue()
61 hypctx->fence_queue_head = in vmm_fence_dequeue()
62 (hypctx->fence_queue_head + 1) % VMM_FENCE_QUEUE_SIZE; in vmm_fence_dequeue()
64 mtx_unlock_spin(&hypctx->fence_queue_mtx); in vmm_fence_dequeue()
67 mtx_unlock_spin(&hypctx->fence_queue_mtx); in vmm_fence_dequeue()
73 vmm_fence_enqueue(struct hypctx *hypctx, struct vmm_fence *new_fence) in vmm_fence_enqueue() argument
78 mtx_lock_spin(&hypctx->fence_queue_mtx); in vmm_fence_enqueue()
[all …]
H A Dvmm_vtimer.c68 vtimer_cpuinit(struct hypctx *hypctx) in vtimer_cpuinit() argument
74 vtimer = &hypctx->vtimer; in vtimer_cpuinit()
88 struct hypctx *hypctx; in vtimer_inject_irq_callout() local
91 hypctx = arg; in vtimer_inject_irq_callout()
92 hyp = hypctx->hyp; in vtimer_inject_irq_callout()
94 atomic_set_32(&hypctx->interrupts_pending, HVIP_VSTIP); in vtimer_inject_irq_callout()
95 vcpu_notify_event(vm_vcpu(hyp->vm, hypctx->cpu_id)); in vtimer_inject_irq_callout()
99 vtimer_set_timer(struct hypctx *hypctx, uint64_t next_val) in vtimer_set_timer() argument
106 vtimer = &hypctx->vtimer; in vtimer_set_timer()
112 atomic_clear_32(&hypctx->interrupts_pending, HVIP_VSTIP); in vtimer_set_timer()
[all …]
H A Dvmm_aplic.h37 struct hypctx;
46 int aplic_check_pending(struct hypctx *hypctx);
48 void aplic_cpuinit(struct hypctx *hypctx);
49 void aplic_cpucleanup(struct hypctx *hypctx);
50 void aplic_flush_hwstate(struct hypctx *hypctx);
51 void aplic_sync_hwstate(struct hypctx *hypctx);
H A Driscv.h84 struct hypctx { struct
114 struct hypctx *ctx[]; argument
127 struct hypctx *riscv_get_active_vcpu(void);
128 void vmm_switch(struct hypctx *);
133 int riscv_check_ipi(struct hypctx *hypctx, bool clear);
134 bool riscv_check_interrupts_pending(struct hypctx *hypctx);
H A Dvmm_aplic.c330 struct hypctx *hypctx; in mem_read() local
337 hypctx = vcpu_get_cookie(vcpu); in mem_read()
338 hyp = hypctx->hyp; in mem_read()
359 struct hypctx *hypctx; in mem_write() local
366 hypctx = vcpu_get_cookie(vcpu); in mem_write()
367 hyp = hypctx->hyp; in mem_write()
450 aplic_check_pending(struct hypctx *hypctx) in aplic_check_pending() argument
457 hyp = hypctx->hyp; in aplic_check_pending()
468 if (irq->target_hart != hypctx->cpu_id) in aplic_check_pending()
560 aplic_cpuinit(struct hypctx *hypctx) in aplic_cpuinit() argument
[all …]
H A Dvmm_vtimer.h36 struct hypctx;
44 void vtimer_cpuinit(struct hypctx *hypctx);
45 int vtimer_set_timer(struct hypctx *hypctx, uint64_t next_val);
H A Dvmm_fence.h36 struct hypctx;
40 void vmm_fence_process(struct hypctx *hypctx);
/freebsd/sys/arm64/vmm/io/
H A Dvtimer.c72 static void vtimer_schedule_irq(struct hypctx *hypctx, bool phys);
77 struct hypctx *hypctx; in vtimer_virtual_timer_intr() local
81 hypctx = arm64_get_active_vcpu(); in vtimer_virtual_timer_intr()
84 if (!hypctx) { in vtimer_virtual_timer_intr()
100 hypctx->hyp->vtimer.cntvoff_el2; in vtimer_virtual_timer_intr()
101 if (hypctx->vtimer_cpu.virt_timer.cntx_cval_el0 < cntpct_el0) in vtimer_virtual_timer_intr()
102 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), in vtimer_virtual_timer_intr()
105 cntv_ctl = hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0; in vtimer_virtual_timer_intr()
211 vtimer_cpuinit(struct hypctx *hypctx) in vtimer_cpuinit() argument
215 vtimer_cpu = &hypctx->vtimer_cpu; in vtimer_cpuinit()
[all …]
H A Dvgic_v3.c158 typedef void (register_read)(struct hypctx *, u_int, uint64_t *, void *);
159 typedef void (register_write)(struct hypctx *, u_int, u_int, u_int,
429 struct hypctx *hypctx; in mpidr_to_vcpu() local
433 hypctx = hyp->ctx[i]; in mpidr_to_vcpu()
434 if (hypctx != NULL && (hypctx->vmpidr_el2 & GICD_AFF) == mpidr) in mpidr_to_vcpu()
465 vgic_v3_cpuinit(device_t dev, struct hypctx *hypctx) in vgic_v3_cpuinit() argument
471 hypctx->vgic_cpu = malloc(sizeof(*hypctx->vgic_cpu), in vgic_v3_cpuinit()
473 vgic_cpu = hypctx->vgic_cpu; in vgic_v3_cpuinit()
484 irq->mpidr = hypctx->vmpidr_el2 & GICD_AFF; in vgic_v3_cpuinit()
485 irq->target_vcpu = vcpu_vcpuid(hypctx->vcpu); in vgic_v3_cpuinit()
[all …]
H A Dvtimer.h38 struct hypctx;
71 void vtimer_cpuinit(struct hypctx *);
72 void vtimer_cpucleanup(struct hypctx *);
75 void vtimer_sync_hwstate(struct hypctx *hypctx);
/freebsd/sys/riscv/riscv/
H A Dgenassym.c103 ASSYM(HYP_H_RA, offsetof(struct hypctx, host_regs.hyp_ra));
104 ASSYM(HYP_H_SP, offsetof(struct hypctx, host_regs.hyp_sp));
105 ASSYM(HYP_H_GP, offsetof(struct hypctx, host_regs.hyp_gp));
106 ASSYM(HYP_H_TP, offsetof(struct hypctx, host_regs.hyp_tp));
107 ASSYM(HYP_H_T, offsetof(struct hypctx, host_regs.hyp_t));
108 ASSYM(HYP_H_S, offsetof(struct hypctx, host_regs.hyp_s));
109 ASSYM(HYP_H_A, offsetof(struct hypctx, host_regs.hyp_a));
110 ASSYM(HYP_H_SEPC, offsetof(struct hypctx, host_regs.hyp_sepc));
111 ASSYM(HYP_H_SSTATUS, offsetof(struct hypctx, host_regs.hyp_sstatus));
112 ASSYM(HYP_H_HSTATUS, offsetof(struct hypctx, host_regs.hyp_hstatus));
[all …]