1cbf6b1baSPaul Mundt #include <linux/mm.h> 2cbf6b1baSPaul Mundt #include <linux/kernel.h> 35a0e3ad6STejun Heo #include <linux/slab.h> 4cbf6b1baSPaul Mundt #include <linux/sched.h> 55d920bb9SFilippo Arcidiacono #include <linux/export.h> 65d920bb9SFilippo Arcidiacono #include <linux/stackprotector.h> 7936c163aSPaul Mundt #include <asm/fpu.h> 8*4cf421e5SIngo Molnar #include <asm/ptrace.h> 9cbf6b1baSPaul Mundt 100ea820cfSPaul Mundt struct kmem_cache *task_xstate_cachep = NULL; 110ea820cfSPaul Mundt unsigned int xstate_size; 120ea820cfSPaul Mundt 135d920bb9SFilippo Arcidiacono #ifdef CONFIG_CC_STACKPROTECTOR 145d920bb9SFilippo Arcidiacono unsigned long __stack_chk_guard __read_mostly; 155d920bb9SFilippo Arcidiacono EXPORT_SYMBOL(__stack_chk_guard); 165d920bb9SFilippo Arcidiacono #endif 175d920bb9SFilippo Arcidiacono 1855ccf3feSSuresh Siddha /* 1955ccf3feSSuresh Siddha * this gets called so that we can store lazy state into memory and copy the 2055ccf3feSSuresh Siddha * current task into the new thread. 2155ccf3feSSuresh Siddha */ 220ea820cfSPaul Mundt int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 230ea820cfSPaul Mundt { 2455ccf3feSSuresh Siddha #ifdef CONFIG_SUPERH32 2555ccf3feSSuresh Siddha unlazy_fpu(src, task_pt_regs(src)); 2655ccf3feSSuresh Siddha #endif 270ea820cfSPaul Mundt *dst = *src; 280ea820cfSPaul Mundt 290ea820cfSPaul Mundt if (src->thread.xstate) { 300ea820cfSPaul Mundt dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, 310ea820cfSPaul Mundt GFP_KERNEL); 320ea820cfSPaul Mundt if (!dst->thread.xstate) 330ea820cfSPaul Mundt return -ENOMEM; 340ea820cfSPaul Mundt memcpy(dst->thread.xstate, src->thread.xstate, xstate_size); 350ea820cfSPaul Mundt } 360ea820cfSPaul Mundt 370ea820cfSPaul Mundt return 0; 380ea820cfSPaul Mundt } 390ea820cfSPaul Mundt 400ea820cfSPaul Mundt void free_thread_xstate(struct task_struct *tsk) 410ea820cfSPaul Mundt { 420ea820cfSPaul Mundt if (tsk->thread.xstate) { 430ea820cfSPaul Mundt kmem_cache_free(task_xstate_cachep, tsk->thread.xstate); 440ea820cfSPaul Mundt tsk->thread.xstate = NULL; 450ea820cfSPaul Mundt } 460ea820cfSPaul Mundt } 470ea820cfSPaul Mundt 48df9a7b9bSThomas Gleixner void arch_release_task_struct(struct task_struct *tsk) 49cbf6b1baSPaul Mundt { 50df9a7b9bSThomas Gleixner free_thread_xstate(tsk); 51cbf6b1baSPaul Mundt } 52cbf6b1baSPaul Mundt 530ea820cfSPaul Mundt void arch_task_cache_init(void) 540ea820cfSPaul Mundt { 550ea820cfSPaul Mundt if (!xstate_size) 560ea820cfSPaul Mundt return; 570ea820cfSPaul Mundt 580ea820cfSPaul Mundt task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size, 590ea820cfSPaul Mundt __alignof__(union thread_xstate), 600ea820cfSPaul Mundt SLAB_PANIC | SLAB_NOTRACK, NULL); 610ea820cfSPaul Mundt } 620ea820cfSPaul Mundt 630ea820cfSPaul Mundt #ifdef CONFIG_SH_FPU_EMU 640ea820cfSPaul Mundt # define HAVE_SOFTFP 1 650ea820cfSPaul Mundt #else 660ea820cfSPaul Mundt # define HAVE_SOFTFP 0 670ea820cfSPaul Mundt #endif 680ea820cfSPaul Mundt 694603f53aSPaul Gortmaker void init_thread_xstate(void) 700ea820cfSPaul Mundt { 710ea820cfSPaul Mundt if (boot_cpu_data.flags & CPU_HAS_FPU) 720ea820cfSPaul Mundt xstate_size = sizeof(struct sh_fpu_hard_struct); 730ea820cfSPaul Mundt else if (HAVE_SOFTFP) 740ea820cfSPaul Mundt xstate_size = sizeof(struct sh_fpu_soft_struct); 750ea820cfSPaul Mundt else 760ea820cfSPaul Mundt xstate_size = 0; 770ea820cfSPaul Mundt } 78