xref: /linux/arch/sh/kernel/process.c (revision 93df8a1ed6231727c5db94a80b1a6bd5ee67cec3)
1 #include <linux/mm.h>
2 #include <linux/kernel.h>
3 #include <linux/slab.h>
4 #include <linux/sched.h>
5 #include <linux/export.h>
6 #include <linux/stackprotector.h>
7 #include <asm/fpu.h>
8 
9 struct kmem_cache *task_xstate_cachep = NULL;
10 unsigned int xstate_size;
11 
12 #ifdef CONFIG_CC_STACKPROTECTOR
13 unsigned long __stack_chk_guard __read_mostly;
14 EXPORT_SYMBOL(__stack_chk_guard);
15 #endif
16 
17 /*
18  * this gets called so that we can store lazy state into memory and copy the
19  * current task into the new thread.
20  */
21 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
22 {
23 #ifdef CONFIG_SUPERH32
24 	unlazy_fpu(src, task_pt_regs(src));
25 #endif
26 	*dst = *src;
27 
28 	if (src->thread.xstate) {
29 		dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
30 						      GFP_KERNEL);
31 		if (!dst->thread.xstate)
32 			return -ENOMEM;
33 		memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
34 	}
35 
36 	return 0;
37 }
38 
39 void free_thread_xstate(struct task_struct *tsk)
40 {
41 	if (tsk->thread.xstate) {
42 		kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
43 		tsk->thread.xstate = NULL;
44 	}
45 }
46 
47 void arch_release_task_struct(struct task_struct *tsk)
48 {
49 	free_thread_xstate(tsk);
50 }
51 
52 void arch_task_cache_init(void)
53 {
54 	if (!xstate_size)
55 		return;
56 
57 	task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
58 					       __alignof__(union thread_xstate),
59 					       SLAB_PANIC | SLAB_NOTRACK, NULL);
60 }
61 
62 #ifdef CONFIG_SH_FPU_EMU
63 # define HAVE_SOFTFP	1
64 #else
65 # define HAVE_SOFTFP	0
66 #endif
67 
68 void init_thread_xstate(void)
69 {
70 	if (boot_cpu_data.flags & CPU_HAS_FPU)
71 		xstate_size = sizeof(struct sh_fpu_hard_struct);
72 	else if (HAVE_SOFTFP)
73 		xstate_size = sizeof(struct sh_fpu_soft_struct);
74 	else
75 		xstate_size = 0;
76 }
77