xref: /linux/arch/arm/include/asm/ptrace.h (revision 45d8b572fac3aa8b49d53c946b3685eaf78a2824)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *  arch/arm/include/asm/ptrace.h
4  *
5  *  Copyright (C) 1996-2003 Russell King
6  */
7 #ifndef __ASM_ARM_PTRACE_H
8 #define __ASM_ARM_PTRACE_H
9 
10 #include <uapi/asm/ptrace.h>
11 
12 #ifndef __ASSEMBLY__
13 #include <linux/bitfield.h>
14 #include <linux/types.h>
15 
16 struct pt_regs {
17 	unsigned long uregs[18];
18 };
19 
20 struct svc_pt_regs {
21 	struct pt_regs regs;
22 	u32 dacr;
23 };
24 
25 #define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
26 
27 #define user_mode(regs)	\
28 	(((regs)->ARM_cpsr & 0xf) == 0)
29 
30 #ifdef CONFIG_ARM_THUMB
31 #define thumb_mode(regs) \
32 	(((regs)->ARM_cpsr & PSR_T_BIT))
33 #else
34 #define thumb_mode(regs) (0)
35 #endif
36 
37 #ifndef CONFIG_CPU_V7M
38 #define isa_mode(regs) \
39 	(FIELD_GET(PSR_J_BIT, (regs)->ARM_cpsr) << 1 | \
40 	 FIELD_GET(PSR_T_BIT, (regs)->ARM_cpsr))
41 #else
42 #define isa_mode(regs) 1 /* Thumb */
43 #endif
44 
45 #define processor_mode(regs) \
46 	((regs)->ARM_cpsr & MODE_MASK)
47 
48 #define interrupts_enabled(regs) \
49 	(!((regs)->ARM_cpsr & PSR_I_BIT))
50 
51 #define fast_interrupts_enabled(regs) \
52 	(!((regs)->ARM_cpsr & PSR_F_BIT))
53 
54 /* Are the current registers suitable for user mode?
55  * (used to maintain security in signal handlers)
56  */
57 static inline int valid_user_regs(struct pt_regs *regs)
58 {
59 #ifndef CONFIG_CPU_V7M
60 	unsigned long mode = regs->ARM_cpsr & MODE_MASK;
61 
62 	/*
63 	 * Always clear the F (FIQ) and A (delayed abort) bits
64 	 */
65 	regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
66 
67 	if ((regs->ARM_cpsr & PSR_I_BIT) == 0) {
68 		if (mode == USR_MODE)
69 			return 1;
70 		if (elf_hwcap & HWCAP_26BIT && mode == USR26_MODE)
71 			return 1;
72 	}
73 
74 	/*
75 	 * Force CPSR to something logical...
76 	 */
77 	regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT;
78 	if (!(elf_hwcap & HWCAP_26BIT))
79 		regs->ARM_cpsr |= USR_MODE;
80 
81 	return 0;
82 #else /* ifndef CONFIG_CPU_V7M */
83 	return 1;
84 #endif
85 }
86 
87 static inline long regs_return_value(struct pt_regs *regs)
88 {
89 	return regs->ARM_r0;
90 }
91 
92 #define instruction_pointer(regs)	(regs)->ARM_pc
93 
94 #ifdef CONFIG_THUMB2_KERNEL
95 #define frame_pointer(regs) (regs)->ARM_r7
96 #else
97 #define frame_pointer(regs) (regs)->ARM_fp
98 #endif
99 
100 static inline void instruction_pointer_set(struct pt_regs *regs,
101 					   unsigned long val)
102 {
103 	instruction_pointer(regs) = val;
104 }
105 
106 #ifdef CONFIG_SMP
107 extern unsigned long profile_pc(struct pt_regs *regs);
108 #else
109 #define profile_pc(regs) instruction_pointer(regs)
110 #endif
111 
112 #define predicate(x)		((x) & 0xf0000000)
113 #define PREDICATE_ALWAYS	0xe0000000
114 
115 /*
116  * True if instr is a 32-bit thumb instruction. This works if instr
117  * is the first or only half-word of a thumb instruction. It also works
118  * when instr holds all 32-bits of a wide thumb instruction if stored
119  * in the form (first_half<<16)|(second_half)
120  */
121 #define is_wide_instruction(instr)	((unsigned)(instr) >= 0xe800)
122 
123 /*
124  * kprobe-based event tracer support
125  */
126 #include <linux/compiler.h>
127 #define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0))
128 
129 extern int regs_query_register_offset(const char *name);
130 extern const char *regs_query_register_name(unsigned int offset);
131 extern bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr);
132 extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
133 					       unsigned int n);
134 
135 /**
136  * regs_get_register() - get register value from its offset
137  * @regs:	   pt_regs from which register value is gotten
138  * @offset:    offset number of the register.
139  *
140  * regs_get_register returns the value of a register whose offset from @regs.
141  * The @offset is the offset of the register in struct pt_regs.
142  * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
143  */
144 static inline unsigned long regs_get_register(struct pt_regs *regs,
145 					      unsigned int offset)
146 {
147 	if (unlikely(offset > MAX_REG_OFFSET))
148 		return 0;
149 	return *(unsigned long *)((unsigned long)regs + offset);
150 }
151 
152 /* Valid only for Kernel mode traps. */
153 static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
154 {
155 	return regs->ARM_sp;
156 }
157 
158 static inline unsigned long user_stack_pointer(struct pt_regs *regs)
159 {
160 	return regs->ARM_sp;
161 }
162 
163 #define current_pt_regs(void) ({ (struct pt_regs *)			\
164 		((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1;	\
165 })
166 
167 static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
168 {
169 	regs->ARM_r0 = rc;
170 }
171 
172 /*
173  * Update ITSTATE after normal execution of an IT block instruction.
174  *
175  * The 8 IT state bits are split into two parts in CPSR:
176  *	ITSTATE<1:0> are in CPSR<26:25>
177  *	ITSTATE<7:2> are in CPSR<15:10>
178  */
179 static inline unsigned long it_advance(unsigned long cpsr)
180 {
181 	if ((cpsr & 0x06000400) == 0) {
182 		/* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
183 		cpsr &= ~PSR_IT_MASK;
184 	} else {
185 		/* We need to shift left ITSTATE<4:0> */
186 		const unsigned long mask = 0x06001c00;  /* Mask ITSTATE<4:0> */
187 		unsigned long it = cpsr & mask;
188 		it <<= 1;
189 		it |= it >> (27 - 10);  /* Carry ITSTATE<2> to correct place */
190 		it &= mask;
191 		cpsr &= ~mask;
192 		cpsr |= it;
193 	}
194 	return cpsr;
195 }
196 
197 int syscall_trace_enter(struct pt_regs *regs);
198 void syscall_trace_exit(struct pt_regs *regs);
199 
200 #endif /* __ASSEMBLY__ */
201 #endif
202