xref: /linux/arch/powerpc/include/asm/thread_info.h (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2b8b572e1SStephen Rothwell /* thread_info.h: PowerPC low-level thread information
3b8b572e1SStephen Rothwell  * adapted from the i386 version by Paul Mackerras
4b8b572e1SStephen Rothwell  *
5b8b572e1SStephen Rothwell  * Copyright (C) 2002  David Howells (dhowells@redhat.com)
6b8b572e1SStephen Rothwell  * - Incorporating suggestions made by Linus Torvalds and Dave Miller
7b8b572e1SStephen Rothwell  */
8b8b572e1SStephen Rothwell 
9b8b572e1SStephen Rothwell #ifndef _ASM_POWERPC_THREAD_INFO_H
10b8b572e1SStephen Rothwell #define _ASM_POWERPC_THREAD_INFO_H
11b8b572e1SStephen Rothwell 
12ec0c464cSChristophe Leroy #include <asm/asm-const.h>
1302847487SChristophe Leroy #include <asm/page.h>
14ec0c464cSChristophe Leroy 
15b8b572e1SStephen Rothwell #ifdef __KERNEL__
16b8b572e1SStephen Rothwell 
17f1acb109SMichael Ellerman #if defined(CONFIG_KASAN) && CONFIG_THREAD_SHIFT < 15
183e8635fbSMichael Ellerman #define MIN_THREAD_SHIFT	(CONFIG_THREAD_SHIFT + 1)
193e8635fbSMichael Ellerman #else
203e8635fbSMichael Ellerman #define MIN_THREAD_SHIFT	CONFIG_THREAD_SHIFT
213e8635fbSMichael Ellerman #endif
223e8635fbSMichael Ellerman 
233e8635fbSMichael Ellerman #if defined(CONFIG_VMAP_STACK) && MIN_THREAD_SHIFT < PAGE_SHIFT
2402847487SChristophe Leroy #define THREAD_SHIFT		PAGE_SHIFT
2502847487SChristophe Leroy #else
263e8635fbSMichael Ellerman #define THREAD_SHIFT		MIN_THREAD_SHIFT
2702847487SChristophe Leroy #endif
28b8b572e1SStephen Rothwell 
29b8b572e1SStephen Rothwell #define THREAD_SIZE		(1 << THREAD_SHIFT)
30b8b572e1SStephen Rothwell 
3163289e7dSChristophe Leroy /*
3263289e7dSChristophe Leroy  * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by
3363289e7dSChristophe Leroy  * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
3463289e7dSChristophe Leroy  * assembly.
3563289e7dSChristophe Leroy  */
3663289e7dSChristophe Leroy #ifdef CONFIG_VMAP_STACK
3763289e7dSChristophe Leroy #define THREAD_ALIGN_SHIFT	(THREAD_SHIFT + 1)
3863289e7dSChristophe Leroy #else
3963289e7dSChristophe Leroy #define THREAD_ALIGN_SHIFT	THREAD_SHIFT
4063289e7dSChristophe Leroy #endif
4163289e7dSChristophe Leroy 
4263289e7dSChristophe Leroy #define THREAD_ALIGN		(1 << THREAD_ALIGN_SHIFT)
4363289e7dSChristophe Leroy 
44b8b572e1SStephen Rothwell #ifndef __ASSEMBLY__
45b8b572e1SStephen Rothwell #include <linux/cache.h>
46b8b572e1SStephen Rothwell #include <asm/processor.h>
47c223c903SChristophe Leroy #include <asm/accounting.h>
48ac9c8901SNicholas Miehlbradt #include <asm/ppc_asm.h>
49b8b572e1SStephen Rothwell 
505434ae74SNicholas Piggin #define SLB_PRELOAD_NR	16U
51b8b572e1SStephen Rothwell /*
52b8b572e1SStephen Rothwell  * low level task data.
53b8b572e1SStephen Rothwell  */
54b8b572e1SStephen Rothwell struct thread_info {
55b8b572e1SStephen Rothwell 	int		preempt_count;		/* 0 => preemptable,
56b8b572e1SStephen Rothwell 						   <0 => BUG */
57227d735dSArd Biesheuvel #ifdef CONFIG_SMP
58227d735dSArd Biesheuvel 	unsigned int	cpu;
59227d735dSArd Biesheuvel #endif
60b8b572e1SStephen Rothwell 	unsigned long	local_flags;		/* private flags for thread */
61a4520b25SChristophe Leroy #ifdef CONFIG_LIVEPATCH_64
625d31a96eSMichael Ellerman 	unsigned long *livepatch_sp;
635d31a96eSMichael Ellerman #endif
64c223c903SChristophe Leroy #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC32)
65c223c903SChristophe Leroy 	struct cpu_accounting_data accounting;
66c223c903SChristophe Leroy #endif
675434ae74SNicholas Piggin 	unsigned char slb_preload_nr;
685434ae74SNicholas Piggin 	unsigned char slb_preload_tail;
695434ae74SNicholas Piggin 	u32 slb_preload_esid[SLB_PRELOAD_NR];
705434ae74SNicholas Piggin 
71b8b572e1SStephen Rothwell 	/* low level flags - has atomic operations done on it */
72b8b572e1SStephen Rothwell 	unsigned long	flags ____cacheline_aligned_in_smp;
73b8b572e1SStephen Rothwell };
74b8b572e1SStephen Rothwell 
75b8b572e1SStephen Rothwell /*
76b8b572e1SStephen Rothwell  * macros/functions for gaining access to the thread information structure
77b8b572e1SStephen Rothwell  */
78b8b572e1SStephen Rothwell #define INIT_THREAD_INFO(tsk)			\
79b8b572e1SStephen Rothwell {						\
80c99e6efeSPeter Zijlstra 	.preempt_count = INIT_PREEMPT_COUNT,	\
81b8b572e1SStephen Rothwell 	.flags =	0,			\
82b8b572e1SStephen Rothwell }
83b8b572e1SStephen Rothwell 
84b8b572e1SStephen Rothwell #define THREAD_SIZE_ORDER	(THREAD_SHIFT - PAGE_SHIFT)
85b8b572e1SStephen Rothwell 
86b8b572e1SStephen Rothwell /* how to get the thread information struct from C */
87fd70d9f9SMathieu Malaterre extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
88425d3314SNicholas Piggin 
89425d3314SNicholas Piggin void arch_setup_new_exec(void);
90425d3314SNicholas Piggin #define arch_setup_new_exec arch_setup_new_exec
91425d3314SNicholas Piggin 
92b8b572e1SStephen Rothwell #endif /* __ASSEMBLY__ */
93b8b572e1SStephen Rothwell 
94b8b572e1SStephen Rothwell /*
95b8b572e1SStephen Rothwell  * thread information flag bit numbers
96b8b572e1SStephen Rothwell  */
97b8b572e1SStephen Rothwell #define TIF_SYSCALL_TRACE	0	/* syscall trace active */
98b8b572e1SStephen Rothwell #define TIF_SIGPENDING		1	/* signal pending */
99b8b572e1SStephen Rothwell #define TIF_NEED_RESCHED	2	/* rescheduling necessary */
100900f0713SJens Axboe #define TIF_NOTIFY_SIGNAL	3	/* signal notifications exist */
1015521eb4bSBreno Leitao #define TIF_SYSCALL_EMU		4	/* syscall emulation active */
102d31626f7SPaul Mackerras #define TIF_RESTORE_TM		5	/* need to restore TM FP/VEC/VSX */
103a768f784SJosh Poimboeuf #define TIF_PATCH_PENDING	6	/* pending live patching update */
104b8b572e1SStephen Rothwell #define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
105b8b572e1SStephen Rothwell #define TIF_SINGLESTEP		8	/* singlestepping active */
106b8b572e1SStephen Rothwell #define TIF_SECCOMP		10	/* secure computing */
107b8b572e1SStephen Rothwell #define TIF_RESTOREALL		11	/* Restore all regs (implies NOERROR) */
108b8b572e1SStephen Rothwell #define TIF_NOERROR		12	/* Force successful syscall return */
109b8b572e1SStephen Rothwell #define TIF_NOTIFY_RESUME	13	/* callback before returning to user */
1108b7b80b9SAnanth N Mavinakayanahalli #define TIF_UPROBE		14	/* breakpointed or single-stepping */
11102424d89SIan Munsie #define TIF_SYSCALL_TRACEPOINT	15	/* syscall tracepoint instrumentation */
112f0d1128fSTiejun Chen #define TIF_EMULATE_STACK_STORE	16	/* Is an instruction emulation
113f0d1128fSTiejun Chen 						for stack store? */
11422ecbe8dSLi Zhong #define TIF_MEMDIE		17	/* is terminating due to OOM killer */
115373c76d6SRusty Russell #if defined(CONFIG_PPC64)
116373c76d6SRusty Russell #define TIF_ELF2ABI		18	/* function descriptors must die! */
117373c76d6SRusty Russell #endif
1183e378680SMichael Ellerman #define TIF_POLLING_NRFLAG	19	/* true if poll_idle() is polling TIF_NEED_RESCHED */
11916d7c69cSBreno Leitao #define TIF_32BIT		20	/* 32 bit binary */
120b8b572e1SStephen Rothwell 
121b8b572e1SStephen Rothwell /* as above, but as bit values */
122b8b572e1SStephen Rothwell #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
123b8b572e1SStephen Rothwell #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
124b8b572e1SStephen Rothwell #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
125900f0713SJens Axboe #define _TIF_NOTIFY_SIGNAL	(1<<TIF_NOTIFY_SIGNAL)
126b8b572e1SStephen Rothwell #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
127b8b572e1SStephen Rothwell #define _TIF_32BIT		(1<<TIF_32BIT)
128d31626f7SPaul Mackerras #define _TIF_RESTORE_TM		(1<<TIF_RESTORE_TM)
129a768f784SJosh Poimboeuf #define _TIF_PATCH_PENDING	(1<<TIF_PATCH_PENDING)
130b8b572e1SStephen Rothwell #define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
131b8b572e1SStephen Rothwell #define _TIF_SINGLESTEP		(1<<TIF_SINGLESTEP)
132b8b572e1SStephen Rothwell #define _TIF_SECCOMP		(1<<TIF_SECCOMP)
133b8b572e1SStephen Rothwell #define _TIF_RESTOREALL		(1<<TIF_RESTOREALL)
134b8b572e1SStephen Rothwell #define _TIF_NOERROR		(1<<TIF_NOERROR)
135b8b572e1SStephen Rothwell #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
1368b7b80b9SAnanth N Mavinakayanahalli #define _TIF_UPROBE		(1<<TIF_UPROBE)
13702424d89SIan Munsie #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
138f0d1128fSTiejun Chen #define _TIF_EMULATE_STACK_STORE	(1<<TIF_EMULATE_STACK_STORE)
1395521eb4bSBreno Leitao #define _TIF_SYSCALL_EMU	(1<<TIF_SYSCALL_EMU)
14010ea8343SMichael Ellerman #define _TIF_SYSCALL_DOTRACE	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
14122ecbe8dSLi Zhong 				 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
1422a06bf3eSNicholas Piggin 				 _TIF_SYSCALL_EMU)
143b8b572e1SStephen Rothwell 
144b8b572e1SStephen Rothwell #define _TIF_USER_WORK_MASK	(_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
145d31626f7SPaul Mackerras 				 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
146900f0713SJens Axboe 				 _TIF_RESTORE_TM | _TIF_PATCH_PENDING | \
147900f0713SJens Axboe 				 _TIF_NOTIFY_SIGNAL)
148b8b572e1SStephen Rothwell #define _TIF_PERSYSCALL_MASK	(_TIF_RESTOREALL|_TIF_NOERROR)
149b8b572e1SStephen Rothwell 
150b8b572e1SStephen Rothwell /* Bits in local_flags */
151b8b572e1SStephen Rothwell /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
152b8b572e1SStephen Rothwell #define TLF_NAPPING		0	/* idle thread enabled NAP mode */
153b8b572e1SStephen Rothwell #define TLF_SLEEPING		1	/* suspend code enabled SLEEP mode */
154d6bf29b4SPeter Zijlstra #define TLF_LAZY_MMU		3	/* tlb_batch is active */
155fe1952fcSBenjamin Herrenschmidt #define TLF_RUNLATCH		4	/* Is the runlatch enabled? */
156b8b572e1SStephen Rothwell 
157b8b572e1SStephen Rothwell #define _TLF_NAPPING		(1 << TLF_NAPPING)
158b8b572e1SStephen Rothwell #define _TLF_SLEEPING		(1 << TLF_SLEEPING)
159d6bf29b4SPeter Zijlstra #define _TLF_LAZY_MMU		(1 << TLF_LAZY_MMU)
160fe1952fcSBenjamin Herrenschmidt #define _TLF_RUNLATCH		(1 << TLF_RUNLATCH)
161b8b572e1SStephen Rothwell 
162b8b572e1SStephen Rothwell #ifndef __ASSEMBLY__
163a465f9b6SAnton Blanchard 
clear_thread_local_flags(unsigned int flags)16498db179aSNicholas Piggin static inline void clear_thread_local_flags(unsigned int flags)
16598db179aSNicholas Piggin {
16698db179aSNicholas Piggin 	struct thread_info *ti = current_thread_info();
16798db179aSNicholas Piggin 	ti->local_flags &= ~flags;
16898db179aSNicholas Piggin }
16998db179aSNicholas Piggin 
test_thread_local_flags(unsigned int flags)170fe1952fcSBenjamin Herrenschmidt static inline bool test_thread_local_flags(unsigned int flags)
171fe1952fcSBenjamin Herrenschmidt {
172fe1952fcSBenjamin Herrenschmidt 	struct thread_info *ti = current_thread_info();
173fe1952fcSBenjamin Herrenschmidt 	return (ti->local_flags & flags) != 0;
174fe1952fcSBenjamin Herrenschmidt }
175fe1952fcSBenjamin Herrenschmidt 
1760a7601b6SMichal Suchanek #ifdef CONFIG_COMPAT
177a465f9b6SAnton Blanchard #define is_32bit_task()	(test_thread_flag(TIF_32BIT))
17825274524SChristophe Leroy #define is_tsk_32bit_task(tsk)	(test_tsk_thread_flag(tsk, TIF_32BIT))
179eed7c420SNicholas Piggin #define clear_tsk_compat_task(tsk) (clear_tsk_thread_flag(p, TIF_32BIT))
180a465f9b6SAnton Blanchard #else
1810a7601b6SMichal Suchanek #define is_32bit_task()	(IS_ENABLED(CONFIG_PPC32))
18225274524SChristophe Leroy #define is_tsk_32bit_task(tsk)	(IS_ENABLED(CONFIG_PPC32))
183eed7c420SNicholas Piggin #define clear_tsk_compat_task(tsk) do { } while (0)
184a465f9b6SAnton Blanchard #endif
185a465f9b6SAnton Blanchard 
186106ea7ffSAndrew Donnellan #if defined(CONFIG_PPC64)
187373c76d6SRusty Russell #define is_elf2_task() (test_thread_flag(TIF_ELF2ABI))
188373c76d6SRusty Russell #else
189373c76d6SRusty Russell #define is_elf2_task() (0)
190373c76d6SRusty Russell #endif
191373c76d6SRusty Russell 
192ac9c8901SNicholas Miehlbradt /*
193ac9c8901SNicholas Miehlbradt  * Walks up the stack frames to make sure that the specified object is
194ac9c8901SNicholas Miehlbradt  * entirely contained by a single stack frame.
195ac9c8901SNicholas Miehlbradt  *
196ac9c8901SNicholas Miehlbradt  * Returns:
197ac9c8901SNicholas Miehlbradt  *	GOOD_FRAME	if within a frame
198ac9c8901SNicholas Miehlbradt  *	BAD_STACK	if placed across a frame boundary (or outside stack)
199ac9c8901SNicholas Miehlbradt  */
arch_within_stack_frames(const void * const stack,const void * const stackend,const void * obj,unsigned long len)200ac9c8901SNicholas Miehlbradt static inline int arch_within_stack_frames(const void * const stack,
201ac9c8901SNicholas Miehlbradt 					   const void * const stackend,
202ac9c8901SNicholas Miehlbradt 					   const void *obj, unsigned long len)
203ac9c8901SNicholas Miehlbradt {
204ac9c8901SNicholas Miehlbradt 	const void *params;
205ac9c8901SNicholas Miehlbradt 	const void *frame;
206ac9c8901SNicholas Miehlbradt 
207ac9c8901SNicholas Miehlbradt 	params = *(const void * const *)current_stack_pointer + STACK_FRAME_PARAMS;
208ac9c8901SNicholas Miehlbradt 	frame = **(const void * const * const *)current_stack_pointer;
209ac9c8901SNicholas Miehlbradt 
210ac9c8901SNicholas Miehlbradt 	/*
211ac9c8901SNicholas Miehlbradt 	 * low -----------------------------------------------------------> high
212ac9c8901SNicholas Miehlbradt 	 * [backchain][metadata][params][local vars][saved registers][backchain]
213ac9c8901SNicholas Miehlbradt 	 *                      ^------------------------------------^
214ac9c8901SNicholas Miehlbradt 	 *                      |  allows copies only in this region |
215ac9c8901SNicholas Miehlbradt 	 *                      |                                    |
216ac9c8901SNicholas Miehlbradt 	 *                    params                               frame
217ac9c8901SNicholas Miehlbradt 	 * The metadata region contains the saved LR, CR etc.
218ac9c8901SNicholas Miehlbradt 	 */
219ac9c8901SNicholas Miehlbradt 	while (stack <= frame && frame < stackend) {
220ac9c8901SNicholas Miehlbradt 		if (obj + len <= frame)
221ac9c8901SNicholas Miehlbradt 			return obj >= params ? GOOD_FRAME : BAD_STACK;
222ac9c8901SNicholas Miehlbradt 		params = frame + STACK_FRAME_PARAMS;
223ac9c8901SNicholas Miehlbradt 		frame = *(const void * const *)frame;
224ac9c8901SNicholas Miehlbradt 	}
225ac9c8901SNicholas Miehlbradt 
226ac9c8901SNicholas Miehlbradt 	return BAD_STACK;
227ac9c8901SNicholas Miehlbradt }
228ac9c8901SNicholas Miehlbradt 
229*dca5b1d6SChristophe Leroy #ifdef CONFIG_PPC32
230*dca5b1d6SChristophe Leroy extern void *emergency_ctx[];
231*dca5b1d6SChristophe Leroy #endif
232*dca5b1d6SChristophe Leroy 
233b8b572e1SStephen Rothwell #endif	/* !__ASSEMBLY__ */
234b8b572e1SStephen Rothwell 
235b8b572e1SStephen Rothwell #endif /* __KERNEL__ */
236b8b572e1SStephen Rothwell 
237b8b572e1SStephen Rothwell #endif /* _ASM_POWERPC_THREAD_INFO_H */
238