| /linux/arch/arm64/kvm/ |
| H A D | reset.c | 124 vcpu->arch.sve_state = buf; in kvm_vcpu_finalize_sve() 155 void *sve_state = vcpu->arch.sve_state; in kvm_arm_vcpu_destroy() local 158 if (sve_state) in kvm_arm_vcpu_destroy() 159 kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu)); in kvm_arm_vcpu_destroy() 160 kfree(sve_state); in kvm_arm_vcpu_destroy() 169 memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu)); in kvm_vcpu_reset_sve()
|
| H A D | fpsimd.c | 77 fp_state.sve_state = vcpu->arch.sve_state; in kvm_arch_vcpu_ctxsync_fp()
|
| H A D | arm.c | 2395 struct cpu_sve_state *sve_state; in teardown_hyp_mode() local 2397 sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state; in teardown_hyp_mode() 2398 free_pages((unsigned long) sve_state, pkvm_host_sve_state_order()); in teardown_hyp_mode() 2519 per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = page_address(page); in init_pkvm_host_sve_state() 2540 struct cpu_sve_state *sve_state; in finalize_init_hyp_mode() local 2542 sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state; in finalize_init_hyp_mode() 2543 per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = in finalize_init_hyp_mode() 2544 kern_hyp_va(sve_state); in finalize_init_hyp_mode()
|
| H A D | guest.c | 348 if (WARN_ON(vcpu->arch.sve_state)) in set_sve_vls() 503 if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset, in get_sve_reg() 529 if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr, in set_sve_reg()
|
| /linux/arch/arm64/kernel/ |
| H A D | fpsimd.c | 504 sve_save_state((char *)last->sve_state + in fpsimd_save_user_state() 617 #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \ argument 664 void *sst = task->thread.sve_state; in fpsimd_to_sve() 688 void const *sst = task->thread.sve_state; in sve_to_fpsimd() 735 kfree(task->thread.sve_state); in sve_free() 736 task->thread.sve_state = NULL; in sve_free() 751 if (task->thread.sve_state) { in sve_alloc() 753 memset(task->thread.sve_state, 0, in sve_alloc() 759 task->thread.sve_state = in sve_alloc() 787 void *sst = task->thread.sve_state; in fpsimd_sync_to_effective_state_zeropad() [all …]
|
| H A D | process.c | 364 dst->thread.sve_state = NULL; in arch_dup_task_struct() 389 dst->thread.sve_state = kzalloc(sve_state_size(src), in copy_thread_za() 391 if (!dst->thread.sve_state) in copy_thread_za() 398 kfree(dst->thread.sve_state); in copy_thread_za() 399 dst->thread.sve_state = NULL; in copy_thread_za()
|
| H A D | ptrace.c | 849 membuf_write(&to, target->thread.sve_state, end - start); in sve_get_common() 959 if (!target->thread.sve_state) in sve_set_common() 1016 target->thread.sve_state, in sve_set_common() 1164 if (!target->thread.sve_state) { in za_set() 1166 if (!target->thread.sve_state) { in za_set() 1246 if (!target->thread.sve_state) in zt_set()
|
| H A D | signal.c | 391 current->thread.sve_state, in preserve_sve_context() 459 if (!current->thread.sve_state) { in restore_sve_fpsimd_context() 474 err = __copy_from_user(current->thread.sve_state, in restore_sve_fpsimd_context() 590 if (!current->thread.sve_state) in restore_za_context()
|
| /linux/arch/arm64/kvm/hyp/nvhe/ |
| H A D | pkvm.c | 389 void *sve_state; in unpin_host_sve_state() local 394 sve_state = kern_hyp_va(hyp_vcpu->vcpu.arch.sve_state); in unpin_host_sve_state() 395 hyp_unpin_shared_mem(sve_state, in unpin_host_sve_state() 396 sve_state + vcpu_sve_state_size(&hyp_vcpu->vcpu)); in unpin_host_sve_state() 443 void *sve_state; in pkvm_vcpu_init_sve() local 454 sve_state = kern_hyp_va(READ_ONCE(host_vcpu->arch.sve_state)); in pkvm_vcpu_init_sve() 456 if (!sve_state || !sve_state_size) { in pkvm_vcpu_init_sve() 461 ret = hyp_pin_shared_mem(sve_state, sve_state + sve_state_size); in pkvm_vcpu_init_sve() 465 vcpu->arch.sve_state = sve_state; in pkvm_vcpu_init_sve()
|
| H A D | hyp-main.c | 42 struct cpu_sve_state *sve_state = *host_data_ptr(sve_state); in __hyp_sve_restore_host() local 54 __sve_restore_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl), in __hyp_sve_restore_host() 55 &sve_state->fpsr, in __hyp_sve_restore_host() 57 write_sysreg_el1(sve_state->zcr_el1, SYS_ZCR); in __hyp_sve_restore_host()
|
| /linux/arch/arm64/kvm/hyp/include/hyp/ |
| H A D | switch.h | 458 struct cpu_sve_state *sve_state = *host_data_ptr(sve_state); in __hyp_sve_save_host() local 460 sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR); in __hyp_sve_save_host() 462 __sve_save_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl), in __hyp_sve_save_host() 463 &sve_state->fpsr, in __hyp_sve_save_host()
|
| /linux/arch/arm64/include/asm/ |
| H A D | processor.h | 167 void *sve_state; /* SVE registers, if any */ member
|
| H A D | kvm_host.h | 726 struct cpu_sve_state *sve_state; member 810 void *sve_state; member 1052 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
|