xref: /linux/arch/arc/kernel/unaligned.c (revision 7ec462100ef9142344ddbf86f2c3008b97acddbe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2011-2012 Synopsys (www.synopsys.com)
4  *
5  * vineetg : May 2011
6  *  -Adapted (from .26 to .35)
7  *  -original contribution by Tim.yao@amlogic.com
8  */
9 
10 #include <linux/types.h>
11 #include <linux/perf_event.h>
12 #include <linux/ptrace.h>
13 #include <linux/uaccess.h>
14 #include <asm/disasm.h>
15 #include "unaligned.h"
16 
17 #ifdef CONFIG_CPU_BIG_ENDIAN
18 #define BE		1
19 #define FIRST_BYTE_16	"swap %1, %1\n swape %1, %1\n"
20 #define FIRST_BYTE_32	"swape %1, %1\n"
21 #else
22 #define BE		0
23 #define FIRST_BYTE_16
24 #define FIRST_BYTE_32
25 #endif
26 
27 #define __get8_unaligned_check(val, addr, err)		\
28 	__asm__(					\
29 	"1:	ldb.ab	%1, [%2, 1]\n"			\
30 	"2:\n"						\
31 	"	.section .fixup,\"ax\"\n"		\
32 	"	.align	4\n"				\
33 	"3:	mov	%0, 1\n"			\
34 	"	j	2b\n"				\
35 	"	.previous\n"				\
36 	"	.section __ex_table,\"a\"\n"		\
37 	"	.align	4\n"				\
38 	"	.long	1b, 3b\n"			\
39 	"	.previous\n"				\
40 	: "=r" (err), "=&r" (val), "=r" (addr)		\
41 	: "0" (err), "2" (addr))
42 
43 #define get16_unaligned_check(val, addr)		\
44 	do {						\
45 		unsigned int err = 0, v, a = addr;	\
46 		__get8_unaligned_check(v, a, err);	\
47 		val =  v << ((BE) ? 8 : 0);		\
48 		__get8_unaligned_check(v, a, err);	\
49 		val |= v << ((BE) ? 0 : 8);		\
50 		if (err)				\
51 			goto fault;			\
52 	} while (0)
53 
54 #define get32_unaligned_check(val, addr)		\
55 	do {						\
56 		unsigned int err = 0, v, a = addr;	\
57 		__get8_unaligned_check(v, a, err);	\
58 		val =  v << ((BE) ? 24 : 0);		\
59 		__get8_unaligned_check(v, a, err);	\
60 		val |= v << ((BE) ? 16 : 8);		\
61 		__get8_unaligned_check(v, a, err);	\
62 		val |= v << ((BE) ? 8 : 16);		\
63 		__get8_unaligned_check(v, a, err);	\
64 		val |= v << ((BE) ? 0 : 24);		\
65 		if (err)				\
66 			goto fault;			\
67 	} while (0)
68 
69 #define put16_unaligned_check(val, addr)		\
70 	do {						\
71 		unsigned int err = 0, v = val, a = addr;\
72 							\
73 		__asm__(				\
74 		FIRST_BYTE_16				\
75 		"1:	stb.ab	%1, [%2, 1]\n"		\
76 		"	lsr %1, %1, 8\n"		\
77 		"2:	stb	%1, [%2]\n"		\
78 		"3:\n"					\
79 		"	.section .fixup,\"ax\"\n"	\
80 		"	.align	4\n"			\
81 		"4:	mov	%0, 1\n"		\
82 		"	j	3b\n"			\
83 		"	.previous\n"			\
84 		"	.section __ex_table,\"a\"\n"	\
85 		"	.align	4\n"			\
86 		"	.long	1b, 4b\n"		\
87 		"	.long	2b, 4b\n"		\
88 		"	.previous\n"			\
89 		: "=r" (err), "=&r" (v), "=&r" (a)	\
90 		: "0" (err), "1" (v), "2" (a));		\
91 							\
92 		if (err)				\
93 			goto fault;			\
94 	} while (0)
95 
96 #define put32_unaligned_check(val, addr)		\
97 	do {						\
98 		unsigned int err = 0, v = val, a = addr;\
99 							\
100 		__asm__(				\
101 		FIRST_BYTE_32				\
102 		"1:	stb.ab	%1, [%2, 1]\n"		\
103 		"	lsr %1, %1, 8\n"		\
104 		"2:	stb.ab	%1, [%2, 1]\n"		\
105 		"	lsr %1, %1, 8\n"		\
106 		"3:	stb.ab	%1, [%2, 1]\n"		\
107 		"	lsr %1, %1, 8\n"		\
108 		"4:	stb	%1, [%2]\n"		\
109 		"5:\n"					\
110 		"	.section .fixup,\"ax\"\n"	\
111 		"	.align	4\n"			\
112 		"6:	mov	%0, 1\n"		\
113 		"	j	5b\n"			\
114 		"	.previous\n"			\
115 		"	.section __ex_table,\"a\"\n"	\
116 		"	.align	4\n"			\
117 		"	.long	1b, 6b\n"		\
118 		"	.long	2b, 6b\n"		\
119 		"	.long	3b, 6b\n"		\
120 		"	.long	4b, 6b\n"		\
121 		"	.previous\n"			\
122 		: "=r" (err), "=&r" (v), "=&r" (a)	\
123 		: "0" (err), "1" (v), "2" (a));		\
124 							\
125 		if (err)				\
126 			goto fault;			\
127 	} while (0)
128 
129 /* sysctl hooks */
130 int unaligned_enabled __read_mostly = 1;	/* Enabled by default */
131 int no_unaligned_warning __read_mostly = 1;	/* Only 1 warning by default */
132 
fixup_load(struct disasm_state * state,struct pt_regs * regs,struct callee_regs * cregs)133 static void fixup_load(struct disasm_state *state, struct pt_regs *regs,
134 			struct callee_regs *cregs)
135 {
136 	int val;
137 
138 	/* register write back */
139 	if ((state->aa == 1) || (state->aa == 2)) {
140 		set_reg(state->wb_reg, state->src1 + state->src2, regs, cregs);
141 
142 		if (state->aa == 2)
143 			state->src2 = 0;
144 	}
145 
146 	if (state->zz == 0) {
147 		get32_unaligned_check(val, state->src1 + state->src2);
148 	} else {
149 		get16_unaligned_check(val, state->src1 + state->src2);
150 
151 		if (state->x)
152 			val = (val << 16) >> 16;
153 	}
154 
155 	if (state->pref == 0)
156 		set_reg(state->dest, val, regs, cregs);
157 
158 	return;
159 
160 fault:	state->fault = 1;
161 }
162 
fixup_store(struct disasm_state * state,struct pt_regs * regs,struct callee_regs * cregs)163 static void fixup_store(struct disasm_state *state, struct pt_regs *regs,
164 			struct callee_regs *cregs)
165 {
166 	/* register write back */
167 	if ((state->aa == 1) || (state->aa == 2)) {
168 		set_reg(state->wb_reg, state->src2 + state->src3, regs, cregs);
169 
170 		if (state->aa == 3)
171 			state->src3 = 0;
172 	} else if (state->aa == 3) {
173 		if (state->zz == 2) {
174 			set_reg(state->wb_reg, state->src2 + (state->src3 << 1),
175 				regs, cregs);
176 		} else if (!state->zz) {
177 			set_reg(state->wb_reg, state->src2 + (state->src3 << 2),
178 				regs, cregs);
179 		} else {
180 			goto fault;
181 		}
182 	}
183 
184 	/* write fix-up */
185 	if (!state->zz)
186 		put32_unaligned_check(state->src1, state->src2 + state->src3);
187 	else
188 		put16_unaligned_check(state->src1, state->src2 + state->src3);
189 
190 	return;
191 
192 fault:	state->fault = 1;
193 }
194 
195 /*
196  * Handle an unaligned access
197  * Returns 0 if successfully handled, 1 if some error happened
198  */
misaligned_fixup(unsigned long address,struct pt_regs * regs,struct callee_regs * cregs)199 int misaligned_fixup(unsigned long address, struct pt_regs *regs,
200 		     struct callee_regs *cregs)
201 {
202 	struct disasm_state state;
203 	char buf[TASK_COMM_LEN];
204 
205 	/* handle user mode only and only if enabled by sysadmin */
206 	if (!user_mode(regs) || !unaligned_enabled)
207 		return 1;
208 
209 	if (no_unaligned_warning) {
210 		pr_warn_once("%s(%d) made unaligned access which was emulated"
211 			     " by kernel assist\n. This can degrade application"
212 			     " performance significantly\n. To enable further"
213 			     " logging of such instances, please \n"
214 			     " echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n",
215 			     get_task_comm(buf, current), task_pid_nr(current));
216 	} else {
217 		/* Add rate limiting if it gets down to it */
218 		pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n",
219 			get_task_comm(buf, current), task_pid_nr(current),
220 			address, regs->ret);
221 
222 	}
223 
224 	disasm_instr(regs->ret, &state, 1, regs, cregs);
225 
226 	if (state.fault)
227 		goto fault;
228 
229 	/* ldb/stb should not have unaligned exception */
230 	if ((state.zz == 1) || (state.di))
231 		goto fault;
232 
233 	if (!state.write)
234 		fixup_load(&state, regs, cregs);
235 	else
236 		fixup_store(&state, regs, cregs);
237 
238 	if (state.fault)
239 		goto fault;
240 
241 	/* clear any remnants of delay slot */
242 	if (delay_mode(regs)) {
243 		regs->ret = regs->bta & ~1U;
244 		regs->status32 &= ~STATUS_DE_MASK;
245 	} else {
246 		regs->ret += state.instr_len;
247 
248 		/* handle zero-overhead-loop */
249 		if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
250 			regs->ret = regs->lp_start;
251 			regs->lp_count--;
252 		}
253 	}
254 
255 	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
256 	return 0;
257 
258 fault:
259 	pr_err("Alignment trap: fault in fix-up %08lx at [<%08lx>]\n",
260 		state.words[0], address);
261 
262 	return 1;
263 }
264