xref: /linux/arch/arm/include/asm/processor.h (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 /*
2  *  arch/arm/include/asm/processor.h
3  *
4  *  Copyright (C) 1995-1999 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #ifndef __ASM_ARM_PROCESSOR_H
12 #define __ASM_ARM_PROCESSOR_H
13 
14 #ifdef __KERNEL__
15 
16 #include <asm/hw_breakpoint.h>
17 #include <asm/ptrace.h>
18 #include <asm/types.h>
19 #include <asm/unified.h>
20 
21 #ifdef __KERNEL__
22 #define STACK_TOP	((current->personality & ADDR_LIMIT_32BIT) ? \
23 			 TASK_SIZE : TASK_SIZE_26)
24 #define STACK_TOP_MAX	TASK_SIZE
25 #endif
26 
27 struct debug_info {
28 #ifdef CONFIG_HAVE_HW_BREAKPOINT
29 	struct perf_event	*hbp[ARM_MAX_HBP_SLOTS];
30 #endif
31 };
32 
33 struct thread_struct {
34 							/* fault info	  */
35 	unsigned long		address;
36 	unsigned long		trap_no;
37 	unsigned long		error_code;
38 							/* debugging	  */
39 	struct debug_info	debug;
40 };
41 
42 /*
43  * Everything usercopied to/from thread_struct is statically-sized, so
44  * no hardened usercopy whitelist is needed.
45  */
46 static inline void arch_thread_struct_whitelist(unsigned long *offset,
47 						unsigned long *size)
48 {
49 	*offset = *size = 0;
50 }
51 
52 #define INIT_THREAD  {	}
53 
54 #define start_thread(regs,pc,sp)					\
55 ({									\
56 	unsigned long r7, r8, r9;					\
57 									\
58 	if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC)) {			\
59 		r7 = regs->ARM_r7;					\
60 		r8 = regs->ARM_r8;					\
61 		r9 = regs->ARM_r9;					\
62 	}								\
63 	memset(regs->uregs, 0, sizeof(regs->uregs));			\
64 	if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) &&			\
65 	    current->personality & FDPIC_FUNCPTRS) {			\
66 		regs->ARM_r7 = r7;					\
67 		regs->ARM_r8 = r8;					\
68 		regs->ARM_r9 = r9;					\
69 		regs->ARM_r10 = current->mm->start_data;		\
70 	} else if (!IS_ENABLED(CONFIG_MMU))				\
71 		regs->ARM_r10 = current->mm->start_data;		\
72 	if (current->personality & ADDR_LIMIT_32BIT)			\
73 		regs->ARM_cpsr = USR_MODE;				\
74 	else								\
75 		regs->ARM_cpsr = USR26_MODE;				\
76 	if (elf_hwcap & HWCAP_THUMB && pc & 1)				\
77 		regs->ARM_cpsr |= PSR_T_BIT;				\
78 	regs->ARM_cpsr |= PSR_ENDSTATE;					\
79 	regs->ARM_pc = pc & ~1;		/* pc */			\
80 	regs->ARM_sp = sp;		/* sp */			\
81 })
82 
83 /* Forward declaration, a strange C thing */
84 struct task_struct;
85 
86 /* Free all resources held by a thread. */
87 extern void release_thread(struct task_struct *);
88 
89 unsigned long get_wchan(struct task_struct *p);
90 
91 #if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
92 #define cpu_relax()			smp_mb()
93 #else
94 #define cpu_relax()			barrier()
95 #endif
96 
97 #define task_pt_regs(p) \
98 	((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
99 
100 #define KSTK_EIP(tsk)	task_pt_regs(tsk)->ARM_pc
101 #define KSTK_ESP(tsk)	task_pt_regs(tsk)->ARM_sp
102 
103 #ifdef CONFIG_SMP
104 #define __ALT_SMP_ASM(smp, up)						\
105 	"9998:	" smp "\n"						\
106 	"	.pushsection \".alt.smp.init\", \"a\"\n"		\
107 	"	.long	9998b\n"					\
108 	"	" up "\n"						\
109 	"	.popsection\n"
110 #else
111 #define __ALT_SMP_ASM(smp, up)	up
112 #endif
113 
114 /*
115  * Prefetching support - only ARMv5.
116  */
117 #if __LINUX_ARM_ARCH__ >= 5
118 
119 #define ARCH_HAS_PREFETCH
120 static inline void prefetch(const void *ptr)
121 {
122 	__asm__ __volatile__(
123 		"pld\t%a0"
124 		:: "p" (ptr));
125 }
126 
127 #if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
128 #define ARCH_HAS_PREFETCHW
129 static inline void prefetchw(const void *ptr)
130 {
131 	__asm__ __volatile__(
132 		".arch_extension	mp\n"
133 		__ALT_SMP_ASM(
134 			WASM(pldw)		"\t%a0",
135 			WASM(pld)		"\t%a0"
136 		)
137 		:: "p" (ptr));
138 }
139 #endif
140 #endif
141 
142 #define HAVE_ARCH_PICK_MMAP_LAYOUT
143 
144 #endif
145 
146 #endif /* __ASM_ARM_PROCESSOR_H */
147