xref: /linux/arch/sh/kernel/process.c (revision 5d920bb929a99446062a48cf90867bbca57b8e77)
1cbf6b1baSPaul Mundt #include <linux/mm.h>
2cbf6b1baSPaul Mundt #include <linux/kernel.h>
35a0e3ad6STejun Heo #include <linux/slab.h>
4cbf6b1baSPaul Mundt #include <linux/sched.h>
5*5d920bb9SFilippo Arcidiacono #include <linux/export.h>
6*5d920bb9SFilippo Arcidiacono #include <linux/stackprotector.h>
7cbf6b1baSPaul Mundt 
80ea820cfSPaul Mundt struct kmem_cache *task_xstate_cachep = NULL;
90ea820cfSPaul Mundt unsigned int xstate_size;
100ea820cfSPaul Mundt 
11*5d920bb9SFilippo Arcidiacono #ifdef CONFIG_CC_STACKPROTECTOR
12*5d920bb9SFilippo Arcidiacono unsigned long __stack_chk_guard __read_mostly;
13*5d920bb9SFilippo Arcidiacono EXPORT_SYMBOL(__stack_chk_guard);
14*5d920bb9SFilippo Arcidiacono #endif
15*5d920bb9SFilippo Arcidiacono 
160ea820cfSPaul Mundt int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
170ea820cfSPaul Mundt {
180ea820cfSPaul Mundt 	*dst = *src;
190ea820cfSPaul Mundt 
200ea820cfSPaul Mundt 	if (src->thread.xstate) {
210ea820cfSPaul Mundt 		dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
220ea820cfSPaul Mundt 						      GFP_KERNEL);
230ea820cfSPaul Mundt 		if (!dst->thread.xstate)
240ea820cfSPaul Mundt 			return -ENOMEM;
250ea820cfSPaul Mundt 		memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
260ea820cfSPaul Mundt 	}
270ea820cfSPaul Mundt 
280ea820cfSPaul Mundt 	return 0;
290ea820cfSPaul Mundt }
300ea820cfSPaul Mundt 
310ea820cfSPaul Mundt void free_thread_xstate(struct task_struct *tsk)
320ea820cfSPaul Mundt {
330ea820cfSPaul Mundt 	if (tsk->thread.xstate) {
340ea820cfSPaul Mundt 		kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
350ea820cfSPaul Mundt 		tsk->thread.xstate = NULL;
360ea820cfSPaul Mundt 	}
370ea820cfSPaul Mundt }
380ea820cfSPaul Mundt 
39cbf6b1baSPaul Mundt #if THREAD_SHIFT < PAGE_SHIFT
40cbf6b1baSPaul Mundt static struct kmem_cache *thread_info_cache;
41cbf6b1baSPaul Mundt 
42b15ed691SNobuhiro Iwamatsu struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
43cbf6b1baSPaul Mundt {
44cbf6b1baSPaul Mundt 	struct thread_info *ti;
45cbf6b1baSPaul Mundt #ifdef CONFIG_DEBUG_STACK_USAGE
46b6a84016SEric Dumazet 	gfp_t mask = GFP_KERNEL | __GFP_ZERO;
47b6a84016SEric Dumazet #else
48b6a84016SEric Dumazet 	gfp_t mask = GFP_KERNEL;
49cbf6b1baSPaul Mundt #endif
50b6a84016SEric Dumazet 
51b6a84016SEric Dumazet 	ti = kmem_cache_alloc_node(thread_info_cache, mask, node);
52cbf6b1baSPaul Mundt 	return ti;
53cbf6b1baSPaul Mundt }
54cbf6b1baSPaul Mundt 
55cbf6b1baSPaul Mundt void free_thread_info(struct thread_info *ti)
56cbf6b1baSPaul Mundt {
570ea820cfSPaul Mundt 	free_thread_xstate(ti->task);
58cbf6b1baSPaul Mundt 	kmem_cache_free(thread_info_cache, ti);
59cbf6b1baSPaul Mundt }
60cbf6b1baSPaul Mundt 
61cbf6b1baSPaul Mundt void thread_info_cache_init(void)
62cbf6b1baSPaul Mundt {
63cbf6b1baSPaul Mundt 	thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
64a3705799SPaul Mundt 					      THREAD_SIZE, SLAB_PANIC, NULL);
65cbf6b1baSPaul Mundt }
66cbf6b1baSPaul Mundt #else
67b15ed691SNobuhiro Iwamatsu struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
68cbf6b1baSPaul Mundt {
69cbf6b1baSPaul Mundt #ifdef CONFIG_DEBUG_STACK_USAGE
70cbf6b1baSPaul Mundt 	gfp_t mask = GFP_KERNEL | __GFP_ZERO;
71cbf6b1baSPaul Mundt #else
72cbf6b1baSPaul Mundt 	gfp_t mask = GFP_KERNEL;
73cbf6b1baSPaul Mundt #endif
74b6a84016SEric Dumazet 	struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
75b6a84016SEric Dumazet 
76b6a84016SEric Dumazet 	return page ? page_address(page) : NULL;
77cbf6b1baSPaul Mundt }
78cbf6b1baSPaul Mundt 
79cbf6b1baSPaul Mundt void free_thread_info(struct thread_info *ti)
80cbf6b1baSPaul Mundt {
810ea820cfSPaul Mundt 	free_thread_xstate(ti->task);
82cbf6b1baSPaul Mundt 	free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
83cbf6b1baSPaul Mundt }
84cbf6b1baSPaul Mundt #endif /* THREAD_SHIFT < PAGE_SHIFT */
850ea820cfSPaul Mundt 
860ea820cfSPaul Mundt void arch_task_cache_init(void)
870ea820cfSPaul Mundt {
880ea820cfSPaul Mundt 	if (!xstate_size)
890ea820cfSPaul Mundt 		return;
900ea820cfSPaul Mundt 
910ea820cfSPaul Mundt 	task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
920ea820cfSPaul Mundt 					       __alignof__(union thread_xstate),
930ea820cfSPaul Mundt 					       SLAB_PANIC | SLAB_NOTRACK, NULL);
940ea820cfSPaul Mundt }
950ea820cfSPaul Mundt 
960ea820cfSPaul Mundt #ifdef CONFIG_SH_FPU_EMU
970ea820cfSPaul Mundt # define HAVE_SOFTFP	1
980ea820cfSPaul Mundt #else
990ea820cfSPaul Mundt # define HAVE_SOFTFP	0
1000ea820cfSPaul Mundt #endif
1010ea820cfSPaul Mundt 
1024a6feab0SPaul Mundt void __cpuinit init_thread_xstate(void)
1030ea820cfSPaul Mundt {
1040ea820cfSPaul Mundt 	if (boot_cpu_data.flags & CPU_HAS_FPU)
1050ea820cfSPaul Mundt 		xstate_size = sizeof(struct sh_fpu_hard_struct);
1060ea820cfSPaul Mundt 	else if (HAVE_SOFTFP)
1070ea820cfSPaul Mundt 		xstate_size = sizeof(struct sh_fpu_soft_struct);
1080ea820cfSPaul Mundt 	else
1090ea820cfSPaul Mundt 		xstate_size = 0;
1100ea820cfSPaul Mundt }
111