xref: /linux/arch/sh/kernel/process.c (revision 4b4193256c8d3bc3a5397b5cd9494c2ad386317d)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2cbf6b1baSPaul Mundt #include <linux/mm.h>
3cbf6b1baSPaul Mundt #include <linux/kernel.h>
45a0e3ad6STejun Heo #include <linux/slab.h>
5174cd4b1SIngo Molnar #include <linux/sched/signal.h>
668db0cf1SIngo Molnar #include <linux/sched/task_stack.h>
75d920bb9SFilippo Arcidiacono #include <linux/export.h>
85d920bb9SFilippo Arcidiacono #include <linux/stackprotector.h>
9936c163aSPaul Mundt #include <asm/fpu.h>
104cf421e5SIngo Molnar #include <asm/ptrace.h>
11cbf6b1baSPaul Mundt 
120ea820cfSPaul Mundt struct kmem_cache *task_xstate_cachep = NULL;
130ea820cfSPaul Mundt unsigned int xstate_size;
140ea820cfSPaul Mundt 
15*050e9baaSLinus Torvalds #ifdef CONFIG_STACKPROTECTOR
165d920bb9SFilippo Arcidiacono unsigned long __stack_chk_guard __read_mostly;
175d920bb9SFilippo Arcidiacono EXPORT_SYMBOL(__stack_chk_guard);
185d920bb9SFilippo Arcidiacono #endif
195d920bb9SFilippo Arcidiacono 
2055ccf3feSSuresh Siddha /*
2155ccf3feSSuresh Siddha  * this gets called so that we can store lazy state into memory and copy the
2255ccf3feSSuresh Siddha  * current task into the new thread.
2355ccf3feSSuresh Siddha  */
arch_dup_task_struct(struct task_struct * dst,struct task_struct * src)240ea820cfSPaul Mundt int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
250ea820cfSPaul Mundt {
2655ccf3feSSuresh Siddha 	unlazy_fpu(src, task_pt_regs(src));
270ea820cfSPaul Mundt 	*dst = *src;
280ea820cfSPaul Mundt 
290ea820cfSPaul Mundt 	if (src->thread.xstate) {
300ea820cfSPaul Mundt 		dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
310ea820cfSPaul Mundt 						      GFP_KERNEL);
320ea820cfSPaul Mundt 		if (!dst->thread.xstate)
330ea820cfSPaul Mundt 			return -ENOMEM;
340ea820cfSPaul Mundt 		memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
350ea820cfSPaul Mundt 	}
360ea820cfSPaul Mundt 
370ea820cfSPaul Mundt 	return 0;
380ea820cfSPaul Mundt }
390ea820cfSPaul Mundt 
free_thread_xstate(struct task_struct * tsk)400ea820cfSPaul Mundt void free_thread_xstate(struct task_struct *tsk)
410ea820cfSPaul Mundt {
420ea820cfSPaul Mundt 	if (tsk->thread.xstate) {
430ea820cfSPaul Mundt 		kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
440ea820cfSPaul Mundt 		tsk->thread.xstate = NULL;
450ea820cfSPaul Mundt 	}
460ea820cfSPaul Mundt }
470ea820cfSPaul Mundt 
arch_release_task_struct(struct task_struct * tsk)48df9a7b9bSThomas Gleixner void arch_release_task_struct(struct task_struct *tsk)
49cbf6b1baSPaul Mundt {
50df9a7b9bSThomas Gleixner 	free_thread_xstate(tsk);
51cbf6b1baSPaul Mundt }
52cbf6b1baSPaul Mundt 
arch_task_cache_init(void)530ea820cfSPaul Mundt void arch_task_cache_init(void)
540ea820cfSPaul Mundt {
550ea820cfSPaul Mundt 	if (!xstate_size)
560ea820cfSPaul Mundt 		return;
570ea820cfSPaul Mundt 
580ea820cfSPaul Mundt 	task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
590ea820cfSPaul Mundt 					       __alignof__(union thread_xstate),
6075f296d9SLevin, Alexander (Sasha Levin) 					       SLAB_PANIC, NULL);
610ea820cfSPaul Mundt }
620ea820cfSPaul Mundt 
630ea820cfSPaul Mundt #ifdef CONFIG_SH_FPU_EMU
640ea820cfSPaul Mundt # define HAVE_SOFTFP	1
650ea820cfSPaul Mundt #else
660ea820cfSPaul Mundt # define HAVE_SOFTFP	0
670ea820cfSPaul Mundt #endif
680ea820cfSPaul Mundt 
init_thread_xstate(void)694603f53aSPaul Gortmaker void init_thread_xstate(void)
700ea820cfSPaul Mundt {
710ea820cfSPaul Mundt 	if (boot_cpu_data.flags & CPU_HAS_FPU)
720ea820cfSPaul Mundt 		xstate_size = sizeof(struct sh_fpu_hard_struct);
730ea820cfSPaul Mundt 	else if (HAVE_SOFTFP)
740ea820cfSPaul Mundt 		xstate_size = sizeof(struct sh_fpu_soft_struct);
750ea820cfSPaul Mundt 	else
760ea820cfSPaul Mundt 		xstate_size = 0;
770ea820cfSPaul Mundt }
78