/linux/arch/x86/kernel/fpu/ |
H A D | xstate.c | 370 xstate_init_xcomp_bv(&init_fpstate.regs.xsave, init_fpstate.xfeatures); in setup_init_fpu_buf() 375 os_xrstor_booting(&init_fpstate.regs.xsave); in setup_init_fpu_buf() 936 static void *__raw_xsave_addr(struct xregs_state *xsave, int xfeature_nr) in __raw_xsave_addr() argument 938 u64 xcomp_bv = xsave->header.xcomp_bv; in __raw_xsave_addr() 948 return (void *)xsave + xfeature_get_offset(xcomp_bv, xfeature_nr); in __raw_xsave_addr() 969 void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr) in get_xsave_addr() argument 995 if (!(xsave->header.xfeatures & BIT_ULL(xfeature_nr))) in get_xsave_addr() 998 return __raw_xsave_addr(xsave, xfeature_nr); in get_xsave_addr() 1007 void __user *get_xsave_addr_user(struct xregs_state __user *xsave, int xfeature_nr) in get_xsave_addr_user() argument 1012 return (void __user *)xsave + xstate_offsets[xfeature_nr]; in get_xsave_addr_user() [all …]
|
H A D | xstate.h | 13 static inline void xstate_init_xcomp_bv(struct xregs_state *xsave, u64 mask) in xstate_init_xcomp_bv() argument 20 xsave->header.xcomp_bv = mask | XCOMP_BV_COMPACTED_FORMAT; in xstate_init_xcomp_bv() 57 extern void __user *get_xsave_addr_user(struct xregs_state __user *xsave, int xfeature_nr); 212 XSTATE_XSAVE(&fpstate->regs.xsave, lmask, hmask, err); in os_xsave() 229 XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask); in os_xrstor() 239 XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask); in os_xrstor_supervisor() 337 struct xregs_state *xstate = &fpstate->regs.xsave; in os_xrstor_safe()
|
H A D | core.c | 97 if (fpu->fpstate->regs.xsave.header.xfeatures & AVX512_TRACKING_MASK) in update_avx_timestamp() 385 ustate->xsave.header.xfeatures = XFEATURE_MASK_FPSSE; in fpu_copy_guest_fpstate_to_uabi() 397 if (ustate->xsave.header.xfeatures & ~XFEATURE_MASK_FPSSE) in fpu_copy_uabi_to_guest_fpstate() 405 if (ustate->xsave.header.xfeatures & ~xcr0) in fpu_copy_uabi_to_guest_fpstate() 413 if (!(ustate->xsave.header.xfeatures & XFEATURE_MASK_PKRU)) in fpu_copy_uabi_to_guest_fpstate() 479 return sizeof(init_fpstate.regs.xsave); in init_fpstate_copy_size() 511 xstate_init_xcomp_bv(&fpstate->regs.xsave, fpstate->xfeatures); in fpstate_init_user() 566 xstate = get_xsave_addr(&dst->thread.fpu.fpstate->regs.xsave, in update_fpu_shstk() 643 dst_fpu->fpstate->regs.xsave.header.xfeatures &= ~XFEATURE_MASK_PASID; in fpu_clone()
|
H A D | regset.c | 125 fpu->fpstate->regs.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE; in xfpregs_set() 197 cetregs = get_xsave_addr(&fpu->fpstate->regs.xsave, XFEATURE_CET_USER); in ssp_get() 217 struct xregs_state *xsave = &fpu->fpstate->regs.xsave; in ssp_set() local 242 cetregs = get_xsave_addr(xsave, XFEATURE_CET_USER); in ssp_set() 462 fpu->fpstate->regs.xsave.header.xfeatures |= XFEATURE_MASK_FP; in fpregs_set()
|
H A D | signal.c | 72 struct xregs_state *xsave = &tsk->thread.fpu.fpstate->regs.xsave; in save_fsave_header() local 84 __put_user(xsave->i387.swd, &fp->status) || in save_fsave_header() 415 fpregs->xsave.header.xfeatures |= XFEATURE_MASK_FPSSE; in __fpu_restore_sig() 434 fpregs->xsave.header.xfeatures &= mask; in __fpu_restore_sig()
|
/linux/tools/testing/selftests/kvm/x86_64/ |
H A D | sev_smoke_test.c | 46 "xsave (%rdi)\n" 72 struct kvm_xsave __attribute__((aligned(64))) xsave = { 0 }; in test_sync_vmsa() local 84 "xsave (%2)\n" in test_sync_vmsa() 86 : "=m"(xsave) in test_sync_vmsa() 87 : "A"(XFEATURE_MASK_X87_AVX), "r"(&xsave), "m" (x87val) in test_sync_vmsa() 89 vcpu_xsave_set(vcpu, &xsave); in test_sync_vmsa() 105 compare_xsave((u8 *)&xsave, (u8 *)hva); in test_sync_vmsa()
|
H A D | state_test.c | 299 xstate_bv = (void *)&((uint8_t *)state->xsave->region)[512]; in main() 303 vcpu_xsave_set(vcpuN, state->xsave); in main() 305 vcpu_xsave_set(vcpuN, state->xsave); in main() 308 vcpu_xsave_set(vcpuN, state->xsave); in main() 310 vcpu_xsave_set(vcpuN, state->xsave); in main()
|
H A D | amx_test.c | 271 /* Compacted mode, get amx offset by xsave area in main() 276 void *amx_start = (void *)state->xsave + amx_offset; in main()
|
/linux/arch/x86/include/asm/fpu/ |
H A D | xstate.h | 111 void xsaves(struct xregs_state *xsave, u64 mask); 112 void xrstors(struct xregs_state *xsave, u64 mask);
|
H A D | api.h | 146 extern void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr);
|
H A D | types.h | 354 struct xregs_state xsave; member
|
/linux/arch/x86/include/asm/trace/ |
H A D | fpu.h | 25 __entry->xfeatures = fpu->fpstate->regs.xsave.header.xfeatures; 26 __entry->xcomp_bv = fpu->fpstate->regs.xsave.header.xcomp_bv;
|
/linux/tools/testing/selftests/mm/ |
H A D | protection_keys.c | 1552 void *xsave; in test_ptrace_modifies_pkru() local 1592 xsave = (void *)malloc(xsave_size); in test_ptrace_modifies_pkru() 1593 pkey_assert(xsave > 0); in test_ptrace_modifies_pkru() 1596 iov.iov_base = xsave; in test_ptrace_modifies_pkru() 1601 pkey_register = (u32 *)(xsave + pkey_offset); in test_ptrace_modifies_pkru() 1610 memset(xsave, 0xCC, xsave_size); in test_ptrace_modifies_pkru() 1625 memset(xsave, 0xCC, xsave_size); in test_ptrace_modifies_pkru() 1631 xstate_bv = (u64 *)(xsave + 512); in test_ptrace_modifies_pkru() 1638 memset(xsave, 0xCC, xsave_size); in test_ptrace_modifies_pkru() 1652 memset(xsave, 0xCC, xsave_size); in test_ptrace_modifies_pkru() [all …]
|
/linux/arch/x86/events/intel/ |
H A D | lbr.c | 414 xrstors(&task_ctx->xsave, XFEATURE_MASK_LBR); in intel_pmu_arch_lbr_xrstors() 499 xsaves(&task_ctx->xsave, XFEATURE_MASK_LBR); in intel_pmu_arch_lbr_xsaves() 991 struct x86_perf_task_context_arch_lbr_xsave *xsave = cpuc->lbr_xsave; in intel_pmu_arch_lbr_read_xsave() local 993 if (!xsave) { in intel_pmu_arch_lbr_read_xsave() 997 xsaves(&xsave->xsave, XFEATURE_MASK_LBR); in intel_pmu_arch_lbr_read_xsave() 999 intel_pmu_store_lbr(cpuc, xsave->lbr.entries); in intel_pmu_arch_lbr_read_xsave()
|
/linux/tools/testing/selftests/kvm/include/x86_64/ |
H A D | processor.h | 407 struct kvm_xsave *xsave; member 888 struct kvm_xsave *xsave) in vcpu_xsave_get() argument 890 vcpu_ioctl(vcpu, KVM_GET_XSAVE, xsave); in vcpu_xsave_get() 893 struct kvm_xsave *xsave) in vcpu_xsave2_get() argument 895 vcpu_ioctl(vcpu, KVM_GET_XSAVE2, xsave); in vcpu_xsave2_get() 898 struct kvm_xsave *xsave) in vcpu_xsave_set() argument 900 vcpu_ioctl(vcpu, KVM_SET_XSAVE, xsave); in vcpu_xsave_set()
|
/linux/tools/testing/selftests/kvm/lib/x86_64/ |
H A D | processor.c | 1059 state->xsave = malloc(size); in vcpu_save_state() 1060 vcpu_xsave2_get(vcpu, state->xsave); in vcpu_save_state() 1062 state->xsave = malloc(sizeof(struct kvm_xsave)); in vcpu_save_state() 1063 vcpu_xsave_get(vcpu, state->xsave); in vcpu_save_state() 1132 vcpu_xsave_set(vcpu, state->xsave); in kvm_get_cpu_address_width() 1144 free(state->xsave); in kvm_init_vm_address_properties()
|
/linux/include/video/ |
H A D | newport.h | 162 npireg_t xsave; /* copy of xstart integer value for BLOCk addressing MODE */ member 312 unsigned int xsave; member
|
/linux/lib/zstd/common/ |
H A D | cpu.h | 122 C(xsave, 26)
|
/linux/tools/testing/selftests/x86/ |
H A D | amx.c | 42 static inline void xsave(struct xsave_buffer *xbuf, uint64_t rfbm) in xsave() function 580 xsave(xbuf2, XFEATURE_MASK_XTILEDATA); in __validate_tiledata_regs()
|
/linux/arch/x86/kvm/ |
H A D | x86.c | 5843 struct kvm_xsave *xsave; in kvm_arch_vcpu_ioctl() member 6051 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL); in kvm_arch_vcpu_ioctl() 6053 if (!u.xsave) in kvm_arch_vcpu_ioctl() 6056 r = kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); in kvm_arch_vcpu_ioctl() 6061 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave))) in kvm_arch_vcpu_ioctl() 6069 u.xsave = memdup_user(argp, size); in kvm_arch_vcpu_ioctl() 6070 if (IS_ERR(u.xsave)) { in kvm_arch_vcpu_ioctl() 6071 r = PTR_ERR(u.xsave); in kvm_arch_vcpu_ioctl() 6075 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); in kvm_arch_vcpu_ioctl() 6082 u.xsave = kzalloc(size, GFP_KERNEL); in kvm_arch_vcpu_ioctl() [all …]
|
/linux/tools/arch/x86/kcpuid/ |
H A D | cpuid.csv | 58 … 1, 0, ecx, 26, xsave , XSAVE (and related instructions) support 372 … 1, compacted_xsave_64byte_aligned, When compacted, subleaf-N feature xsave area is 64-byte al…
|
/linux/Documentation/admin-guide/ |
H A D | kernel-parameters.txt | 4147 and restore using xsave. The kernel will fallback to 4152 xsave to save the states. By using this parameter, 4154 xsave doesn't support modified optimization while 4159 form of xsave area. The kernel will fall back to use 4161 in standard form of xsave area. By using this 4162 parameter, xsave area per process might occupy more
|