1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2cbf6b1baSPaul Mundt #include <linux/mm.h> 3cbf6b1baSPaul Mundt #include <linux/kernel.h> 45a0e3ad6STejun Heo #include <linux/slab.h> 5174cd4b1SIngo Molnar #include <linux/sched/signal.h> 668db0cf1SIngo Molnar #include <linux/sched/task_stack.h> 75d920bb9SFilippo Arcidiacono #include <linux/export.h> 85d920bb9SFilippo Arcidiacono #include <linux/stackprotector.h> 9936c163aSPaul Mundt #include <asm/fpu.h> 104cf421e5SIngo Molnar #include <asm/ptrace.h> 11cbf6b1baSPaul Mundt 120ea820cfSPaul Mundt struct kmem_cache *task_xstate_cachep = NULL; 130ea820cfSPaul Mundt unsigned int xstate_size; 140ea820cfSPaul Mundt 15*050e9baaSLinus Torvalds #ifdef CONFIG_STACKPROTECTOR 165d920bb9SFilippo Arcidiacono unsigned long __stack_chk_guard __read_mostly; 175d920bb9SFilippo Arcidiacono EXPORT_SYMBOL(__stack_chk_guard); 185d920bb9SFilippo Arcidiacono #endif 195d920bb9SFilippo Arcidiacono 2055ccf3feSSuresh Siddha /* 2155ccf3feSSuresh Siddha * this gets called so that we can store lazy state into memory and copy the 2255ccf3feSSuresh Siddha * current task into the new thread. 2355ccf3feSSuresh Siddha */ 240ea820cfSPaul Mundt int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 250ea820cfSPaul Mundt { 2655ccf3feSSuresh Siddha #ifdef CONFIG_SUPERH32 2755ccf3feSSuresh Siddha unlazy_fpu(src, task_pt_regs(src)); 2855ccf3feSSuresh Siddha #endif 290ea820cfSPaul Mundt *dst = *src; 300ea820cfSPaul Mundt 310ea820cfSPaul Mundt if (src->thread.xstate) { 320ea820cfSPaul Mundt dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, 330ea820cfSPaul Mundt GFP_KERNEL); 340ea820cfSPaul Mundt if (!dst->thread.xstate) 350ea820cfSPaul Mundt return -ENOMEM; 360ea820cfSPaul Mundt memcpy(dst->thread.xstate, src->thread.xstate, xstate_size); 370ea820cfSPaul Mundt } 380ea820cfSPaul Mundt 390ea820cfSPaul Mundt return 0; 400ea820cfSPaul Mundt } 410ea820cfSPaul Mundt 420ea820cfSPaul Mundt void free_thread_xstate(struct task_struct *tsk) 430ea820cfSPaul Mundt { 440ea820cfSPaul Mundt if (tsk->thread.xstate) { 450ea820cfSPaul Mundt kmem_cache_free(task_xstate_cachep, tsk->thread.xstate); 460ea820cfSPaul Mundt tsk->thread.xstate = NULL; 470ea820cfSPaul Mundt } 480ea820cfSPaul Mundt } 490ea820cfSPaul Mundt 50df9a7b9bSThomas Gleixner void arch_release_task_struct(struct task_struct *tsk) 51cbf6b1baSPaul Mundt { 52df9a7b9bSThomas Gleixner free_thread_xstate(tsk); 53cbf6b1baSPaul Mundt } 54cbf6b1baSPaul Mundt 550ea820cfSPaul Mundt void arch_task_cache_init(void) 560ea820cfSPaul Mundt { 570ea820cfSPaul Mundt if (!xstate_size) 580ea820cfSPaul Mundt return; 590ea820cfSPaul Mundt 600ea820cfSPaul Mundt task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size, 610ea820cfSPaul Mundt __alignof__(union thread_xstate), 6275f296d9SLevin, Alexander (Sasha Levin) SLAB_PANIC, NULL); 630ea820cfSPaul Mundt } 640ea820cfSPaul Mundt 650ea820cfSPaul Mundt #ifdef CONFIG_SH_FPU_EMU 660ea820cfSPaul Mundt # define HAVE_SOFTFP 1 670ea820cfSPaul Mundt #else 680ea820cfSPaul Mundt # define HAVE_SOFTFP 0 690ea820cfSPaul Mundt #endif 700ea820cfSPaul Mundt 714603f53aSPaul Gortmaker void init_thread_xstate(void) 720ea820cfSPaul Mundt { 730ea820cfSPaul Mundt if (boot_cpu_data.flags & CPU_HAS_FPU) 740ea820cfSPaul Mundt xstate_size = sizeof(struct sh_fpu_hard_struct); 750ea820cfSPaul Mundt else if (HAVE_SOFTFP) 760ea820cfSPaul Mundt xstate_size = sizeof(struct sh_fpu_soft_struct); 770ea820cfSPaul Mundt else 780ea820cfSPaul Mundt xstate_size = 0; 790ea820cfSPaul Mundt } 80