| /linux/arch/arm64/kvm/hyp/nvhe/ |
| H A D | hyp-main.c | 25 void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt); 90 __fpsimd_restore_state(host_data_ptr(host_ctxt.fp_regs)); in fpsimd_sve_sync() 165 static void handle___pkvm_vcpu_load(struct kvm_cpu_context *host_ctxt) in handle___pkvm_vcpu_load() argument 167 DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1); in handle___pkvm_vcpu_load() 168 DECLARE_REG(unsigned int, vcpu_idx, host_ctxt, 2); in handle___pkvm_vcpu_load() 169 DECLARE_REG(u64, hcr_el2, host_ctxt, 3); in handle___pkvm_vcpu_load() 186 static void handle___pkvm_vcpu_put(struct kvm_cpu_context *host_ctxt) in handle___pkvm_vcpu_put() argument 198 static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) in handle___kvm_vcpu_run() argument 200 DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1); in handle___kvm_vcpu_run() 236 cpu_reg(host_ctxt, 1) = ret; in handle___kvm_vcpu_run() [all …]
|
| H A D | psci-relay.c | 20 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt); 72 static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt) in psci_forward() argument 74 return psci_call(cpu_reg(host_ctxt, 0), cpu_reg(host_ctxt, 1), in psci_forward() 75 cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3)); in psci_forward() 107 static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_cpu_on() argument 109 DECLARE_REG(u64, mpidr, host_ctxt, 1); in psci_cpu_on() 110 DECLARE_REG(unsigned long, pc, host_ctxt, 2); in psci_cpu_on() 111 DECLARE_REG(unsigned long, r0, host_ctxt, 3); in psci_cpu_on() 151 static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_cpu_suspend() argument 153 DECLARE_REG(u64, power_state, host_ctxt, 1); in psci_cpu_suspend() [all …]
|
| H A D | switch.c | 244 struct kvm_cpu_context *host_ctxt; in __kvm_vcpu_run() local 261 host_ctxt = host_data_ptr(host_ctxt); in __kvm_vcpu_run() 262 host_ctxt->__hyp_running_vcpu = vcpu; in __kvm_vcpu_run() 267 __sysreg_save_state_nvhe(host_ctxt); in __kvm_vcpu_run() 332 __sysreg_restore_state_nvhe(host_ctxt); in __kvm_vcpu_run() 351 host_ctxt->__hyp_running_vcpu = NULL; in __kvm_vcpu_run() 361 struct kvm_cpu_context *host_ctxt; in hyp_panic() local 364 host_ctxt = host_data_ptr(host_ctxt); in hyp_panic() 365 vcpu = host_ctxt->__hyp_running_vcpu; in hyp_panic() 371 __sysreg_restore_state_nvhe(host_ctxt); in hyp_panic() [all …]
|
| H A D | tlb.c | 24 struct kvm_cpu_context *host_ctxt; in enter_vmid_context() local 27 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; in enter_vmid_context() 28 vcpu = host_ctxt->__hyp_running_vcpu; in enter_vmid_context() 121 struct kvm_cpu_context *host_ctxt; in exit_vmid_context() local 124 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; in exit_vmid_context() 125 vcpu = host_ctxt->__hyp_running_vcpu; in exit_vmid_context()
|
| H A D | ffa.c | 865 bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id) in kvm_host_ffa_handler() argument 893 if (!do_ffa_features(&res, host_ctxt)) in kvm_host_ffa_handler() 898 do_ffa_rxtx_map(&res, host_ctxt); in kvm_host_ffa_handler() 901 do_ffa_rxtx_unmap(&res, host_ctxt); in kvm_host_ffa_handler() 905 do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt); in kvm_host_ffa_handler() 908 do_ffa_mem_reclaim(&res, host_ctxt); in kvm_host_ffa_handler() 912 do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt); in kvm_host_ffa_handler() 915 do_ffa_mem_frag_tx(&res, host_ctxt); in kvm_host_ffa_handler() 918 do_ffa_version(&res, host_ctxt); in kvm_host_ffa_handler() 921 do_ffa_part_get(&res, host_ctxt); in kvm_host_ffa_handler() [all …]
|
| H A D | mem_protect.c | 606 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt) in handle_host_mem_abort() argument
|
| /linux/arch/arm64/kvm/hyp/include/hyp/ |
| H A D | debug-sr.h | 136 struct kvm_cpu_context *host_ctxt; in __debug_switch_to_guest_common() local 144 host_ctxt = host_data_ptr(host_ctxt); in __debug_switch_to_guest_common() 149 __debug_save_state(host_dbg, host_ctxt); in __debug_switch_to_guest_common() 155 struct kvm_cpu_context *host_ctxt; in __debug_switch_to_host_common() local 163 host_ctxt = host_data_ptr(host_ctxt); in __debug_switch_to_host_common() 169 __debug_restore_state(host_dbg, host_ctxt); in __debug_switch_to_host_common()
|
| H A D | switch.h | 214 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); in __activate_traps_hfgxtr() 246 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); in __deactivate_traps_hfgxtr() 301 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); in __activate_traps_common() 338 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); in __deactivate_traps_common() 536 __fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs)); in kvm_hyp_save_fpsimd_host()
|
| H A D | sysreg-sr.h | 33 return host_data_ptr(host_ctxt) != ctxt; in ctxt_is_guest()
|
| /linux/arch/arm64/kvm/hyp/vhe/ |
| H A D | switch.c | 218 host_data_ptr(host_ctxt)->__hyp_running_vcpu = vcpu; in kvm_vcpu_load_vhe() 230 host_data_ptr(host_ctxt)->__hyp_running_vcpu = NULL; in kvm_vcpu_put_vhe() 574 struct kvm_cpu_context *host_ctxt; in __kvm_vcpu_run_vhe() local 578 host_ctxt = host_data_ptr(host_ctxt); in __kvm_vcpu_run_vhe() 583 sysreg_save_host_state_vhe(host_ctxt); in __kvm_vcpu_run_vhe() 609 sysreg_restore_host_state_vhe(host_ctxt); in __kvm_vcpu_run_vhe() 660 struct kvm_cpu_context *host_ctxt; in __hyp_call_panic() local 663 host_ctxt = host_data_ptr(host_ctxt); in __hyp_call_panic() 664 vcpu = host_ctxt->__hyp_running_vcpu; in __hyp_call_panic() 667 sysreg_restore_host_state_vhe(host_ctxt); in __hyp_call_panic()
|
| H A D | sysreg-sr.c | 200 struct kvm_cpu_context *host_ctxt; in __vcpu_load_switch_sysregs() local 203 host_ctxt = host_data_ptr(host_ctxt); in __vcpu_load_switch_sysregs() 204 __sysreg_save_user_state(host_ctxt); in __vcpu_load_switch_sysregs() 261 struct kvm_cpu_context *host_ctxt; in __vcpu_put_switch_sysregs() local 263 host_ctxt = host_data_ptr(host_ctxt); in __vcpu_put_switch_sysregs() 274 __sysreg_restore_user_state(host_ctxt); in __vcpu_put_switch_sysregs()
|
| /linux/arch/arm64/include/asm/ |
| H A D | kvm_hyp.h | 122 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id); 125 void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr, 134 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
|
| H A D | kvm_asm.h | 293 void handle_trap(struct kvm_cpu_context *host_ctxt);
|
| H A D | kvm_host.h | 720 struct kvm_cpu_context host_ctxt; member
|
| /linux/arch/arm64/kvm/hyp/include/nvhe/ |
| H A D | ffa.h | 15 bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);
|
| H A D | mem_protect.h | 55 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
|
| /linux/arch/arm64/kernel/ |
| H A D | asm-offsets.c | 118 DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt)); in main()
|
| /linux/arch/arm64/kvm/ |
| H A D | arm.c | 2131 kvm_init_host_cpu_context(host_data_ptr(host_ctxt)); in cpu_hyp_init_context()
|