xref: /linux/arch/arm/include/asm/ptrace.h (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *  arch/arm/include/asm/ptrace.h
4  *
5  *  Copyright (C) 1996-2003 Russell King
6  */
7 #ifndef __ASM_ARM_PTRACE_H
8 #define __ASM_ARM_PTRACE_H
9 
10 #include <uapi/asm/ptrace.h>
11 
12 #ifndef __ASSEMBLY__
13 #include <linux/bitfield.h>
14 #include <linux/types.h>
15 
16 struct pt_regs {
17 	unsigned long uregs[18];
18 };
19 
20 struct svc_pt_regs {
21 	struct pt_regs regs;
22 	u32 dacr;
23 	u32 ttbcr;
24 };
25 
26 #define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
27 
28 #define user_mode(regs)	\
29 	(((regs)->ARM_cpsr & 0xf) == 0)
30 
31 #ifdef CONFIG_ARM_THUMB
32 #define thumb_mode(regs) \
33 	(((regs)->ARM_cpsr & PSR_T_BIT))
34 #else
35 #define thumb_mode(regs) (0)
36 #endif
37 
38 #ifndef CONFIG_CPU_V7M
39 #define isa_mode(regs) \
40 	(FIELD_GET(PSR_J_BIT, (regs)->ARM_cpsr) << 1 | \
41 	 FIELD_GET(PSR_T_BIT, (regs)->ARM_cpsr))
42 #else
43 #define isa_mode(regs) 1 /* Thumb */
44 #endif
45 
46 #define processor_mode(regs) \
47 	((regs)->ARM_cpsr & MODE_MASK)
48 
49 #define interrupts_enabled(regs) \
50 	(!((regs)->ARM_cpsr & PSR_I_BIT))
51 
52 #define fast_interrupts_enabled(regs) \
53 	(!((regs)->ARM_cpsr & PSR_F_BIT))
54 
55 /* Are the current registers suitable for user mode?
56  * (used to maintain security in signal handlers)
57  */
58 static inline int valid_user_regs(struct pt_regs *regs)
59 {
60 #ifndef CONFIG_CPU_V7M
61 	unsigned long mode = regs->ARM_cpsr & MODE_MASK;
62 
63 	/*
64 	 * Always clear the F (FIQ) and A (delayed abort) bits
65 	 */
66 	regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
67 
68 	if ((regs->ARM_cpsr & PSR_I_BIT) == 0) {
69 		if (mode == USR_MODE)
70 			return 1;
71 		if (elf_hwcap & HWCAP_26BIT && mode == USR26_MODE)
72 			return 1;
73 	}
74 
75 	/*
76 	 * Force CPSR to something logical...
77 	 */
78 	regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT;
79 	if (!(elf_hwcap & HWCAP_26BIT))
80 		regs->ARM_cpsr |= USR_MODE;
81 
82 	return 0;
83 #else /* ifndef CONFIG_CPU_V7M */
84 	return 1;
85 #endif
86 }
87 
88 static inline long regs_return_value(struct pt_regs *regs)
89 {
90 	return regs->ARM_r0;
91 }
92 
93 #define instruction_pointer(regs)	(regs)->ARM_pc
94 
95 #ifdef CONFIG_THUMB2_KERNEL
96 #define frame_pointer(regs) (regs)->ARM_r7
97 #else
98 #define frame_pointer(regs) (regs)->ARM_fp
99 #endif
100 
101 static inline void instruction_pointer_set(struct pt_regs *regs,
102 					   unsigned long val)
103 {
104 	instruction_pointer(regs) = val;
105 }
106 
107 #ifdef CONFIG_SMP
108 extern unsigned long profile_pc(struct pt_regs *regs);
109 #else
110 #define profile_pc(regs) instruction_pointer(regs)
111 #endif
112 
113 #define predicate(x)		((x) & 0xf0000000)
114 #define PREDICATE_ALWAYS	0xe0000000
115 
116 /*
117  * True if instr is a 32-bit thumb instruction. This works if instr
118  * is the first or only half-word of a thumb instruction. It also works
119  * when instr holds all 32-bits of a wide thumb instruction if stored
120  * in the form (first_half<<16)|(second_half)
121  */
122 #define is_wide_instruction(instr)	((unsigned)(instr) >= 0xe800)
123 
124 /*
125  * kprobe-based event tracer support
126  */
127 #include <linux/compiler.h>
128 #define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0))
129 
130 extern int regs_query_register_offset(const char *name);
131 extern const char *regs_query_register_name(unsigned int offset);
132 extern bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr);
133 extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
134 					       unsigned int n);
135 
136 /**
137  * regs_get_register() - get register value from its offset
138  * @regs:	   pt_regs from which register value is gotten
139  * @offset:    offset number of the register.
140  *
141  * regs_get_register returns the value of a register whose offset from @regs.
142  * The @offset is the offset of the register in struct pt_regs.
143  * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
144  */
145 static inline unsigned long regs_get_register(struct pt_regs *regs,
146 					      unsigned int offset)
147 {
148 	if (unlikely(offset > MAX_REG_OFFSET))
149 		return 0;
150 	return *(unsigned long *)((unsigned long)regs + offset);
151 }
152 
153 /* Valid only for Kernel mode traps. */
154 static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
155 {
156 	return regs->ARM_sp;
157 }
158 
159 static inline unsigned long user_stack_pointer(struct pt_regs *regs)
160 {
161 	return regs->ARM_sp;
162 }
163 
164 #define current_pt_regs(void) ({ (struct pt_regs *)			\
165 		((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1;	\
166 })
167 
168 static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
169 {
170 	regs->ARM_r0 = rc;
171 }
172 
173 /*
174  * Update ITSTATE after normal execution of an IT block instruction.
175  *
176  * The 8 IT state bits are split into two parts in CPSR:
177  *	ITSTATE<1:0> are in CPSR<26:25>
178  *	ITSTATE<7:2> are in CPSR<15:10>
179  */
180 static inline unsigned long it_advance(unsigned long cpsr)
181 {
182 	if ((cpsr & 0x06000400) == 0) {
183 		/* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
184 		cpsr &= ~PSR_IT_MASK;
185 	} else {
186 		/* We need to shift left ITSTATE<4:0> */
187 		const unsigned long mask = 0x06001c00;  /* Mask ITSTATE<4:0> */
188 		unsigned long it = cpsr & mask;
189 		it <<= 1;
190 		it |= it >> (27 - 10);  /* Carry ITSTATE<2> to correct place */
191 		it &= mask;
192 		cpsr &= ~mask;
193 		cpsr |= it;
194 	}
195 	return cpsr;
196 }
197 
198 int syscall_trace_enter(struct pt_regs *regs);
199 void syscall_trace_exit(struct pt_regs *regs);
200 
201 #endif /* __ASSEMBLY__ */
202 #endif
203