xref: /linux/arch/arm64/lib/insn.c (revision 627277ba7c2398dc4f95cc9be8222bb2d9477800)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Huawei Ltd.
4  * Author: Jiang Liu <liuj97@gmail.com>
5  *
6  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
7  */
8 #include <linux/bitfield.h>
9 #include <linux/bitops.h>
10 #include <linux/bug.h>
11 #include <linux/printk.h>
12 #include <linux/sizes.h>
13 #include <linux/types.h>
14 
15 #include <asm/debug-monitors.h>
16 #include <asm/errno.h>
17 #include <asm/insn.h>
18 #include <asm/kprobes.h>
19 
20 #define AARCH64_INSN_SF_BIT	BIT(31)
21 #define AARCH64_INSN_N_BIT	BIT(22)
22 #define AARCH64_INSN_LSL_12	BIT(22)
23 
aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,u32 * maskp,int * shiftp)24 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
25 						u32 *maskp, int *shiftp)
26 {
27 	u32 mask;
28 	int shift;
29 
30 	switch (type) {
31 	case AARCH64_INSN_IMM_26:
32 		mask = BIT(26) - 1;
33 		shift = 0;
34 		break;
35 	case AARCH64_INSN_IMM_19:
36 		mask = BIT(19) - 1;
37 		shift = 5;
38 		break;
39 	case AARCH64_INSN_IMM_16:
40 		mask = BIT(16) - 1;
41 		shift = 5;
42 		break;
43 	case AARCH64_INSN_IMM_14:
44 		mask = BIT(14) - 1;
45 		shift = 5;
46 		break;
47 	case AARCH64_INSN_IMM_12:
48 		mask = BIT(12) - 1;
49 		shift = 10;
50 		break;
51 	case AARCH64_INSN_IMM_9:
52 		mask = BIT(9) - 1;
53 		shift = 12;
54 		break;
55 	case AARCH64_INSN_IMM_7:
56 		mask = BIT(7) - 1;
57 		shift = 15;
58 		break;
59 	case AARCH64_INSN_IMM_6:
60 	case AARCH64_INSN_IMM_S:
61 		mask = BIT(6) - 1;
62 		shift = 10;
63 		break;
64 	case AARCH64_INSN_IMM_R:
65 		mask = BIT(6) - 1;
66 		shift = 16;
67 		break;
68 	case AARCH64_INSN_IMM_N:
69 		mask = 1;
70 		shift = 22;
71 		break;
72 	default:
73 		return -EINVAL;
74 	}
75 
76 	*maskp = mask;
77 	*shiftp = shift;
78 
79 	return 0;
80 }
81 
82 #define ADR_IMM_HILOSPLIT	2
83 #define ADR_IMM_SIZE		SZ_2M
84 #define ADR_IMM_LOMASK		((1 << ADR_IMM_HILOSPLIT) - 1)
85 #define ADR_IMM_HIMASK		((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
86 #define ADR_IMM_LOSHIFT		29
87 #define ADR_IMM_HISHIFT		5
88 
aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type,u32 insn)89 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
90 {
91 	u32 immlo, immhi, mask;
92 	int shift;
93 
94 	switch (type) {
95 	case AARCH64_INSN_IMM_ADR:
96 		shift = 0;
97 		immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
98 		immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
99 		insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
100 		mask = ADR_IMM_SIZE - 1;
101 		break;
102 	default:
103 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
104 			pr_err("%s: unknown immediate encoding %d\n", __func__,
105 			       type);
106 			return 0;
107 		}
108 	}
109 
110 	return (insn >> shift) & mask;
111 }
112 
aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,u32 insn,u64 imm)113 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
114 				  u32 insn, u64 imm)
115 {
116 	u32 immlo, immhi, mask;
117 	int shift;
118 
119 	if (insn == AARCH64_BREAK_FAULT)
120 		return AARCH64_BREAK_FAULT;
121 
122 	switch (type) {
123 	case AARCH64_INSN_IMM_ADR:
124 		shift = 0;
125 		immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
126 		imm >>= ADR_IMM_HILOSPLIT;
127 		immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
128 		imm = immlo | immhi;
129 		mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
130 			(ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
131 		break;
132 	default:
133 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
134 			pr_err("%s: unknown immediate encoding %d\n", __func__,
135 			       type);
136 			return AARCH64_BREAK_FAULT;
137 		}
138 	}
139 
140 	/* Update the immediate field. */
141 	insn &= ~(mask << shift);
142 	insn |= (imm & mask) << shift;
143 
144 	return insn;
145 }
146 
aarch64_insn_decode_register(enum aarch64_insn_register_type type,u32 insn)147 u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
148 					u32 insn)
149 {
150 	int shift;
151 
152 	switch (type) {
153 	case AARCH64_INSN_REGTYPE_RT:
154 	case AARCH64_INSN_REGTYPE_RD:
155 		shift = 0;
156 		break;
157 	case AARCH64_INSN_REGTYPE_RN:
158 		shift = 5;
159 		break;
160 	case AARCH64_INSN_REGTYPE_RT2:
161 	case AARCH64_INSN_REGTYPE_RA:
162 		shift = 10;
163 		break;
164 	case AARCH64_INSN_REGTYPE_RM:
165 		shift = 16;
166 		break;
167 	default:
168 		pr_err("%s: unknown register type encoding %d\n", __func__,
169 		       type);
170 		return 0;
171 	}
172 
173 	return (insn >> shift) & GENMASK(4, 0);
174 }
175 
aarch64_insn_encode_register(enum aarch64_insn_register_type type,u32 insn,enum aarch64_insn_register reg)176 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
177 					u32 insn,
178 					enum aarch64_insn_register reg)
179 {
180 	int shift;
181 
182 	if (insn == AARCH64_BREAK_FAULT)
183 		return AARCH64_BREAK_FAULT;
184 
185 	if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
186 		pr_err("%s: unknown register encoding %d\n", __func__, reg);
187 		return AARCH64_BREAK_FAULT;
188 	}
189 
190 	switch (type) {
191 	case AARCH64_INSN_REGTYPE_RT:
192 	case AARCH64_INSN_REGTYPE_RD:
193 		shift = 0;
194 		break;
195 	case AARCH64_INSN_REGTYPE_RN:
196 		shift = 5;
197 		break;
198 	case AARCH64_INSN_REGTYPE_RT2:
199 	case AARCH64_INSN_REGTYPE_RA:
200 		shift = 10;
201 		break;
202 	case AARCH64_INSN_REGTYPE_RM:
203 	case AARCH64_INSN_REGTYPE_RS:
204 		shift = 16;
205 		break;
206 	default:
207 		pr_err("%s: unknown register type encoding %d\n", __func__,
208 		       type);
209 		return AARCH64_BREAK_FAULT;
210 	}
211 
212 	insn &= ~(GENMASK(4, 0) << shift);
213 	insn |= reg << shift;
214 
215 	return insn;
216 }
217 
218 static const u32 aarch64_insn_ldst_size[] = {
219 	[AARCH64_INSN_SIZE_8] = 0,
220 	[AARCH64_INSN_SIZE_16] = 1,
221 	[AARCH64_INSN_SIZE_32] = 2,
222 	[AARCH64_INSN_SIZE_64] = 3,
223 };
224 
aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,u32 insn)225 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
226 					 u32 insn)
227 {
228 	u32 size;
229 
230 	if (type < AARCH64_INSN_SIZE_8 || type > AARCH64_INSN_SIZE_64) {
231 		pr_err("%s: unknown size encoding %d\n", __func__, type);
232 		return AARCH64_BREAK_FAULT;
233 	}
234 
235 	size = aarch64_insn_ldst_size[type];
236 	insn &= ~GENMASK(31, 30);
237 	insn |= size << 30;
238 
239 	return insn;
240 }
241 
label_imm_common(unsigned long pc,unsigned long addr,long range)242 static inline long label_imm_common(unsigned long pc, unsigned long addr,
243 				     long range)
244 {
245 	long offset;
246 
247 	if ((pc & 0x3) || (addr & 0x3)) {
248 		pr_err("%s: A64 instructions must be word aligned\n", __func__);
249 		return range;
250 	}
251 
252 	offset = ((long)addr - (long)pc);
253 
254 	if (offset < -range || offset >= range) {
255 		pr_err("%s: offset out of range\n", __func__);
256 		return range;
257 	}
258 
259 	return offset;
260 }
261 
aarch64_insn_gen_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_branch_type type)262 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
263 					  enum aarch64_insn_branch_type type)
264 {
265 	u32 insn;
266 	long offset;
267 
268 	/*
269 	 * B/BL support [-128M, 128M) offset
270 	 * ARM64 virtual address arrangement guarantees all kernel and module
271 	 * texts are within +/-128M.
272 	 */
273 	offset = label_imm_common(pc, addr, SZ_128M);
274 	if (offset >= SZ_128M)
275 		return AARCH64_BREAK_FAULT;
276 
277 	switch (type) {
278 	case AARCH64_INSN_BRANCH_LINK:
279 		insn = aarch64_insn_get_bl_value();
280 		break;
281 	case AARCH64_INSN_BRANCH_NOLINK:
282 		insn = aarch64_insn_get_b_value();
283 		break;
284 	default:
285 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
286 		return AARCH64_BREAK_FAULT;
287 	}
288 
289 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
290 					     offset >> 2);
291 }
292 
aarch64_insn_gen_comp_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_register reg,enum aarch64_insn_variant variant,enum aarch64_insn_branch_type type)293 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
294 				     enum aarch64_insn_register reg,
295 				     enum aarch64_insn_variant variant,
296 				     enum aarch64_insn_branch_type type)
297 {
298 	u32 insn;
299 	long offset;
300 
301 	offset = label_imm_common(pc, addr, SZ_1M);
302 	if (offset >= SZ_1M)
303 		return AARCH64_BREAK_FAULT;
304 
305 	switch (type) {
306 	case AARCH64_INSN_BRANCH_COMP_ZERO:
307 		insn = aarch64_insn_get_cbz_value();
308 		break;
309 	case AARCH64_INSN_BRANCH_COMP_NONZERO:
310 		insn = aarch64_insn_get_cbnz_value();
311 		break;
312 	default:
313 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
314 		return AARCH64_BREAK_FAULT;
315 	}
316 
317 	switch (variant) {
318 	case AARCH64_INSN_VARIANT_32BIT:
319 		break;
320 	case AARCH64_INSN_VARIANT_64BIT:
321 		insn |= AARCH64_INSN_SF_BIT;
322 		break;
323 	default:
324 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
325 		return AARCH64_BREAK_FAULT;
326 	}
327 
328 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
329 
330 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
331 					     offset >> 2);
332 }
333 
aarch64_insn_gen_cond_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_condition cond)334 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
335 				     enum aarch64_insn_condition cond)
336 {
337 	u32 insn;
338 	long offset;
339 
340 	offset = label_imm_common(pc, addr, SZ_1M);
341 
342 	insn = aarch64_insn_get_bcond_value();
343 
344 	if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
345 		pr_err("%s: unknown condition encoding %d\n", __func__, cond);
346 		return AARCH64_BREAK_FAULT;
347 	}
348 	insn |= cond;
349 
350 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
351 					     offset >> 2);
352 }
353 
aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,enum aarch64_insn_branch_type type)354 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
355 				enum aarch64_insn_branch_type type)
356 {
357 	u32 insn;
358 
359 	switch (type) {
360 	case AARCH64_INSN_BRANCH_NOLINK:
361 		insn = aarch64_insn_get_br_value();
362 		break;
363 	case AARCH64_INSN_BRANCH_LINK:
364 		insn = aarch64_insn_get_blr_value();
365 		break;
366 	case AARCH64_INSN_BRANCH_RETURN:
367 		insn = aarch64_insn_get_ret_value();
368 		break;
369 	default:
370 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
371 		return AARCH64_BREAK_FAULT;
372 	}
373 
374 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
375 }
376 
aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,enum aarch64_insn_register base,enum aarch64_insn_register offset,enum aarch64_insn_size_type size,enum aarch64_insn_ldst_type type)377 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
378 				    enum aarch64_insn_register base,
379 				    enum aarch64_insn_register offset,
380 				    enum aarch64_insn_size_type size,
381 				    enum aarch64_insn_ldst_type type)
382 {
383 	u32 insn;
384 
385 	switch (type) {
386 	case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
387 		insn = aarch64_insn_get_ldr_reg_value();
388 		break;
389 	case AARCH64_INSN_LDST_SIGNED_LOAD_REG_OFFSET:
390 		insn = aarch64_insn_get_signed_ldr_reg_value();
391 		break;
392 	case AARCH64_INSN_LDST_STORE_REG_OFFSET:
393 		insn = aarch64_insn_get_str_reg_value();
394 		break;
395 	default:
396 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
397 		return AARCH64_BREAK_FAULT;
398 	}
399 
400 	insn = aarch64_insn_encode_ldst_size(size, insn);
401 
402 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
403 
404 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
405 					    base);
406 
407 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
408 					    offset);
409 }
410 
aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg,enum aarch64_insn_register base,unsigned int imm,enum aarch64_insn_size_type size,enum aarch64_insn_ldst_type type)411 u32 aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg,
412 				    enum aarch64_insn_register base,
413 				    unsigned int imm,
414 				    enum aarch64_insn_size_type size,
415 				    enum aarch64_insn_ldst_type type)
416 {
417 	u32 insn;
418 	u32 shift;
419 
420 	if (size < AARCH64_INSN_SIZE_8 || size > AARCH64_INSN_SIZE_64) {
421 		pr_err("%s: unknown size encoding %d\n", __func__, type);
422 		return AARCH64_BREAK_FAULT;
423 	}
424 
425 	shift = aarch64_insn_ldst_size[size];
426 	if (imm & ~(BIT(12 + shift) - BIT(shift))) {
427 		pr_err("%s: invalid imm: %d\n", __func__, imm);
428 		return AARCH64_BREAK_FAULT;
429 	}
430 
431 	imm >>= shift;
432 
433 	switch (type) {
434 	case AARCH64_INSN_LDST_LOAD_IMM_OFFSET:
435 		insn = aarch64_insn_get_ldr_imm_value();
436 		break;
437 	case AARCH64_INSN_LDST_SIGNED_LOAD_IMM_OFFSET:
438 		insn = aarch64_insn_get_signed_load_imm_value();
439 		break;
440 	case AARCH64_INSN_LDST_STORE_IMM_OFFSET:
441 		insn = aarch64_insn_get_str_imm_value();
442 		break;
443 	default:
444 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
445 		return AARCH64_BREAK_FAULT;
446 	}
447 
448 	insn = aarch64_insn_encode_ldst_size(size, insn);
449 
450 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
451 
452 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
453 					    base);
454 
455 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
456 }
457 
aarch64_insn_gen_load_literal(unsigned long pc,unsigned long addr,enum aarch64_insn_register reg,bool is64bit)458 u32 aarch64_insn_gen_load_literal(unsigned long pc, unsigned long addr,
459 				  enum aarch64_insn_register reg,
460 				  bool is64bit)
461 {
462 	u32 insn;
463 	long offset;
464 
465 	offset = label_imm_common(pc, addr, SZ_1M);
466 	if (offset >= SZ_1M)
467 		return AARCH64_BREAK_FAULT;
468 
469 	insn = aarch64_insn_get_ldr_lit_value();
470 
471 	if (is64bit)
472 		insn |= BIT(30);
473 
474 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
475 
476 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
477 					     offset >> 2);
478 }
479 
aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,enum aarch64_insn_register reg2,enum aarch64_insn_register base,int offset,enum aarch64_insn_variant variant,enum aarch64_insn_ldst_type type)480 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
481 				     enum aarch64_insn_register reg2,
482 				     enum aarch64_insn_register base,
483 				     int offset,
484 				     enum aarch64_insn_variant variant,
485 				     enum aarch64_insn_ldst_type type)
486 {
487 	u32 insn;
488 	int shift;
489 
490 	switch (type) {
491 	case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
492 		insn = aarch64_insn_get_ldp_pre_value();
493 		break;
494 	case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
495 		insn = aarch64_insn_get_stp_pre_value();
496 		break;
497 	case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
498 		insn = aarch64_insn_get_ldp_post_value();
499 		break;
500 	case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
501 		insn = aarch64_insn_get_stp_post_value();
502 		break;
503 	default:
504 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
505 		return AARCH64_BREAK_FAULT;
506 	}
507 
508 	switch (variant) {
509 	case AARCH64_INSN_VARIANT_32BIT:
510 		if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
511 			pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
512 			       __func__, offset);
513 			return AARCH64_BREAK_FAULT;
514 		}
515 		shift = 2;
516 		break;
517 	case AARCH64_INSN_VARIANT_64BIT:
518 		if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
519 			pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
520 			       __func__, offset);
521 			return AARCH64_BREAK_FAULT;
522 		}
523 		shift = 3;
524 		insn |= AARCH64_INSN_SF_BIT;
525 		break;
526 	default:
527 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
528 		return AARCH64_BREAK_FAULT;
529 	}
530 
531 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
532 					    reg1);
533 
534 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
535 					    reg2);
536 
537 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
538 					    base);
539 
540 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
541 					     offset >> shift);
542 }
543 
aarch64_insn_gen_load_acq_store_rel(enum aarch64_insn_register reg,enum aarch64_insn_register base,enum aarch64_insn_size_type size,enum aarch64_insn_ldst_type type)544 u32 aarch64_insn_gen_load_acq_store_rel(enum aarch64_insn_register reg,
545 					enum aarch64_insn_register base,
546 					enum aarch64_insn_size_type size,
547 					enum aarch64_insn_ldst_type type)
548 {
549 	u32 insn;
550 
551 	switch (type) {
552 	case AARCH64_INSN_LDST_LOAD_ACQ:
553 		insn = aarch64_insn_get_load_acq_value();
554 		break;
555 	case AARCH64_INSN_LDST_STORE_REL:
556 		insn = aarch64_insn_get_store_rel_value();
557 		break;
558 	default:
559 		pr_err("%s: unknown load-acquire/store-release encoding %d\n",
560 		       __func__, type);
561 		return AARCH64_BREAK_FAULT;
562 	}
563 
564 	insn = aarch64_insn_encode_ldst_size(size, insn);
565 
566 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
567 					    reg);
568 
569 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
570 					    base);
571 }
572 
aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,enum aarch64_insn_register base,enum aarch64_insn_register state,enum aarch64_insn_size_type size,enum aarch64_insn_ldst_type type)573 u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
574 				   enum aarch64_insn_register base,
575 				   enum aarch64_insn_register state,
576 				   enum aarch64_insn_size_type size,
577 				   enum aarch64_insn_ldst_type type)
578 {
579 	u32 insn;
580 
581 	switch (type) {
582 	case AARCH64_INSN_LDST_LOAD_EX:
583 	case AARCH64_INSN_LDST_LOAD_ACQ_EX:
584 		insn = aarch64_insn_get_load_ex_value();
585 		if (type == AARCH64_INSN_LDST_LOAD_ACQ_EX)
586 			insn |= BIT(15);
587 		break;
588 	case AARCH64_INSN_LDST_STORE_EX:
589 	case AARCH64_INSN_LDST_STORE_REL_EX:
590 		insn = aarch64_insn_get_store_ex_value();
591 		if (type == AARCH64_INSN_LDST_STORE_REL_EX)
592 			insn |= BIT(15);
593 		break;
594 	default:
595 		pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
596 		return AARCH64_BREAK_FAULT;
597 	}
598 
599 	insn = aarch64_insn_encode_ldst_size(size, insn);
600 
601 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
602 					    reg);
603 
604 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
605 					    base);
606 
607 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
608 					    AARCH64_INSN_REG_ZR);
609 
610 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
611 					    state);
612 }
613 
614 #ifdef CONFIG_ARM64_LSE_ATOMICS
aarch64_insn_encode_ldst_order(enum aarch64_insn_mem_order_type type,u32 insn)615 static u32 aarch64_insn_encode_ldst_order(enum aarch64_insn_mem_order_type type,
616 					  u32 insn)
617 {
618 	u32 order;
619 
620 	switch (type) {
621 	case AARCH64_INSN_MEM_ORDER_NONE:
622 		order = 0;
623 		break;
624 	case AARCH64_INSN_MEM_ORDER_ACQ:
625 		order = 2;
626 		break;
627 	case AARCH64_INSN_MEM_ORDER_REL:
628 		order = 1;
629 		break;
630 	case AARCH64_INSN_MEM_ORDER_ACQREL:
631 		order = 3;
632 		break;
633 	default:
634 		pr_err("%s: unknown mem order %d\n", __func__, type);
635 		return AARCH64_BREAK_FAULT;
636 	}
637 
638 	insn &= ~GENMASK(23, 22);
639 	insn |= order << 22;
640 
641 	return insn;
642 }
643 
aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,enum aarch64_insn_register address,enum aarch64_insn_register value,enum aarch64_insn_size_type size,enum aarch64_insn_mem_atomic_op op,enum aarch64_insn_mem_order_type order)644 u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
645 				  enum aarch64_insn_register address,
646 				  enum aarch64_insn_register value,
647 				  enum aarch64_insn_size_type size,
648 				  enum aarch64_insn_mem_atomic_op op,
649 				  enum aarch64_insn_mem_order_type order)
650 {
651 	u32 insn;
652 
653 	switch (op) {
654 	case AARCH64_INSN_MEM_ATOMIC_ADD:
655 		insn = aarch64_insn_get_ldadd_value();
656 		break;
657 	case AARCH64_INSN_MEM_ATOMIC_CLR:
658 		insn = aarch64_insn_get_ldclr_value();
659 		break;
660 	case AARCH64_INSN_MEM_ATOMIC_EOR:
661 		insn = aarch64_insn_get_ldeor_value();
662 		break;
663 	case AARCH64_INSN_MEM_ATOMIC_SET:
664 		insn = aarch64_insn_get_ldset_value();
665 		break;
666 	case AARCH64_INSN_MEM_ATOMIC_SWP:
667 		insn = aarch64_insn_get_swp_value();
668 		break;
669 	default:
670 		pr_err("%s: unimplemented mem atomic op %d\n", __func__, op);
671 		return AARCH64_BREAK_FAULT;
672 	}
673 
674 	switch (size) {
675 	case AARCH64_INSN_SIZE_32:
676 	case AARCH64_INSN_SIZE_64:
677 		break;
678 	default:
679 		pr_err("%s: unimplemented size encoding %d\n", __func__, size);
680 		return AARCH64_BREAK_FAULT;
681 	}
682 
683 	insn = aarch64_insn_encode_ldst_size(size, insn);
684 
685 	insn = aarch64_insn_encode_ldst_order(order, insn);
686 
687 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
688 					    result);
689 
690 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
691 					    address);
692 
693 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
694 					    value);
695 }
696 
aarch64_insn_encode_cas_order(enum aarch64_insn_mem_order_type type,u32 insn)697 static u32 aarch64_insn_encode_cas_order(enum aarch64_insn_mem_order_type type,
698 					 u32 insn)
699 {
700 	u32 order;
701 
702 	switch (type) {
703 	case AARCH64_INSN_MEM_ORDER_NONE:
704 		order = 0;
705 		break;
706 	case AARCH64_INSN_MEM_ORDER_ACQ:
707 		order = BIT(22);
708 		break;
709 	case AARCH64_INSN_MEM_ORDER_REL:
710 		order = BIT(15);
711 		break;
712 	case AARCH64_INSN_MEM_ORDER_ACQREL:
713 		order = BIT(15) | BIT(22);
714 		break;
715 	default:
716 		pr_err("%s: unknown mem order %d\n", __func__, type);
717 		return AARCH64_BREAK_FAULT;
718 	}
719 
720 	insn &= ~(BIT(15) | BIT(22));
721 	insn |= order;
722 
723 	return insn;
724 }
725 
aarch64_insn_gen_cas(enum aarch64_insn_register result,enum aarch64_insn_register address,enum aarch64_insn_register value,enum aarch64_insn_size_type size,enum aarch64_insn_mem_order_type order)726 u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
727 			 enum aarch64_insn_register address,
728 			 enum aarch64_insn_register value,
729 			 enum aarch64_insn_size_type size,
730 			 enum aarch64_insn_mem_order_type order)
731 {
732 	u32 insn;
733 
734 	switch (size) {
735 	case AARCH64_INSN_SIZE_32:
736 	case AARCH64_INSN_SIZE_64:
737 		break;
738 	default:
739 		pr_err("%s: unimplemented size encoding %d\n", __func__, size);
740 		return AARCH64_BREAK_FAULT;
741 	}
742 
743 	insn = aarch64_insn_get_cas_value();
744 
745 	insn = aarch64_insn_encode_ldst_size(size, insn);
746 
747 	insn = aarch64_insn_encode_cas_order(order, insn);
748 
749 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
750 					    result);
751 
752 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
753 					    address);
754 
755 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
756 					    value);
757 }
758 #endif
759 
aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,enum aarch64_insn_register src,int imm,enum aarch64_insn_variant variant,enum aarch64_insn_adsb_type type)760 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
761 				 enum aarch64_insn_register src,
762 				 int imm, enum aarch64_insn_variant variant,
763 				 enum aarch64_insn_adsb_type type)
764 {
765 	u32 insn;
766 
767 	switch (type) {
768 	case AARCH64_INSN_ADSB_ADD:
769 		insn = aarch64_insn_get_add_imm_value();
770 		break;
771 	case AARCH64_INSN_ADSB_SUB:
772 		insn = aarch64_insn_get_sub_imm_value();
773 		break;
774 	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
775 		insn = aarch64_insn_get_adds_imm_value();
776 		break;
777 	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
778 		insn = aarch64_insn_get_subs_imm_value();
779 		break;
780 	default:
781 		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
782 		return AARCH64_BREAK_FAULT;
783 	}
784 
785 	switch (variant) {
786 	case AARCH64_INSN_VARIANT_32BIT:
787 		break;
788 	case AARCH64_INSN_VARIANT_64BIT:
789 		insn |= AARCH64_INSN_SF_BIT;
790 		break;
791 	default:
792 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
793 		return AARCH64_BREAK_FAULT;
794 	}
795 
796 	/* We can't encode more than a 24bit value (12bit + 12bit shift) */
797 	if (imm & ~(BIT(24) - 1))
798 		goto out;
799 
800 	/* If we have something in the top 12 bits... */
801 	if (imm & ~(SZ_4K - 1)) {
802 		/* ... and in the low 12 bits -> error */
803 		if (imm & (SZ_4K - 1))
804 			goto out;
805 
806 		imm >>= 12;
807 		insn |= AARCH64_INSN_LSL_12;
808 	}
809 
810 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
811 
812 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
813 
814 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
815 
816 out:
817 	pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
818 	return AARCH64_BREAK_FAULT;
819 }
820 
aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,enum aarch64_insn_register src,int immr,int imms,enum aarch64_insn_variant variant,enum aarch64_insn_bitfield_type type)821 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
822 			      enum aarch64_insn_register src,
823 			      int immr, int imms,
824 			      enum aarch64_insn_variant variant,
825 			      enum aarch64_insn_bitfield_type type)
826 {
827 	u32 insn;
828 	u32 mask;
829 
830 	switch (type) {
831 	case AARCH64_INSN_BITFIELD_MOVE:
832 		insn = aarch64_insn_get_bfm_value();
833 		break;
834 	case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
835 		insn = aarch64_insn_get_ubfm_value();
836 		break;
837 	case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
838 		insn = aarch64_insn_get_sbfm_value();
839 		break;
840 	default:
841 		pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
842 		return AARCH64_BREAK_FAULT;
843 	}
844 
845 	switch (variant) {
846 	case AARCH64_INSN_VARIANT_32BIT:
847 		mask = GENMASK(4, 0);
848 		break;
849 	case AARCH64_INSN_VARIANT_64BIT:
850 		insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
851 		mask = GENMASK(5, 0);
852 		break;
853 	default:
854 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
855 		return AARCH64_BREAK_FAULT;
856 	}
857 
858 	if (immr & ~mask) {
859 		pr_err("%s: invalid immr encoding %d\n", __func__, immr);
860 		return AARCH64_BREAK_FAULT;
861 	}
862 	if (imms & ~mask) {
863 		pr_err("%s: invalid imms encoding %d\n", __func__, imms);
864 		return AARCH64_BREAK_FAULT;
865 	}
866 
867 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
868 
869 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
870 
871 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
872 
873 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
874 }
875 
aarch64_insn_gen_movewide(enum aarch64_insn_register dst,int imm,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_movewide_type type)876 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
877 			      int imm, int shift,
878 			      enum aarch64_insn_variant variant,
879 			      enum aarch64_insn_movewide_type type)
880 {
881 	u32 insn;
882 
883 	switch (type) {
884 	case AARCH64_INSN_MOVEWIDE_ZERO:
885 		insn = aarch64_insn_get_movz_value();
886 		break;
887 	case AARCH64_INSN_MOVEWIDE_KEEP:
888 		insn = aarch64_insn_get_movk_value();
889 		break;
890 	case AARCH64_INSN_MOVEWIDE_INVERSE:
891 		insn = aarch64_insn_get_movn_value();
892 		break;
893 	default:
894 		pr_err("%s: unknown movewide encoding %d\n", __func__, type);
895 		return AARCH64_BREAK_FAULT;
896 	}
897 
898 	if (imm & ~(SZ_64K - 1)) {
899 		pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
900 		return AARCH64_BREAK_FAULT;
901 	}
902 
903 	switch (variant) {
904 	case AARCH64_INSN_VARIANT_32BIT:
905 		if (shift != 0 && shift != 16) {
906 			pr_err("%s: invalid shift encoding %d\n", __func__,
907 			       shift);
908 			return AARCH64_BREAK_FAULT;
909 		}
910 		break;
911 	case AARCH64_INSN_VARIANT_64BIT:
912 		insn |= AARCH64_INSN_SF_BIT;
913 		if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
914 			pr_err("%s: invalid shift encoding %d\n", __func__,
915 			       shift);
916 			return AARCH64_BREAK_FAULT;
917 		}
918 		break;
919 	default:
920 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
921 		return AARCH64_BREAK_FAULT;
922 	}
923 
924 	insn |= (shift >> 4) << 21;
925 
926 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
927 
928 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
929 }
930 
aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_adsb_type type)931 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
932 					 enum aarch64_insn_register src,
933 					 enum aarch64_insn_register reg,
934 					 int shift,
935 					 enum aarch64_insn_variant variant,
936 					 enum aarch64_insn_adsb_type type)
937 {
938 	u32 insn;
939 
940 	switch (type) {
941 	case AARCH64_INSN_ADSB_ADD:
942 		insn = aarch64_insn_get_add_value();
943 		break;
944 	case AARCH64_INSN_ADSB_SUB:
945 		insn = aarch64_insn_get_sub_value();
946 		break;
947 	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
948 		insn = aarch64_insn_get_adds_value();
949 		break;
950 	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
951 		insn = aarch64_insn_get_subs_value();
952 		break;
953 	default:
954 		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
955 		return AARCH64_BREAK_FAULT;
956 	}
957 
958 	switch (variant) {
959 	case AARCH64_INSN_VARIANT_32BIT:
960 		if (shift & ~(SZ_32 - 1)) {
961 			pr_err("%s: invalid shift encoding %d\n", __func__,
962 			       shift);
963 			return AARCH64_BREAK_FAULT;
964 		}
965 		break;
966 	case AARCH64_INSN_VARIANT_64BIT:
967 		insn |= AARCH64_INSN_SF_BIT;
968 		if (shift & ~(SZ_64 - 1)) {
969 			pr_err("%s: invalid shift encoding %d\n", __func__,
970 			       shift);
971 			return AARCH64_BREAK_FAULT;
972 		}
973 		break;
974 	default:
975 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
976 		return AARCH64_BREAK_FAULT;
977 	}
978 
979 
980 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
981 
982 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
983 
984 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
985 
986 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
987 }
988 
aarch64_insn_gen_data1(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_variant variant,enum aarch64_insn_data1_type type)989 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
990 			   enum aarch64_insn_register src,
991 			   enum aarch64_insn_variant variant,
992 			   enum aarch64_insn_data1_type type)
993 {
994 	u32 insn;
995 
996 	switch (type) {
997 	case AARCH64_INSN_DATA1_REVERSE_16:
998 		insn = aarch64_insn_get_rev16_value();
999 		break;
1000 	case AARCH64_INSN_DATA1_REVERSE_32:
1001 		insn = aarch64_insn_get_rev32_value();
1002 		break;
1003 	case AARCH64_INSN_DATA1_REVERSE_64:
1004 		if (variant != AARCH64_INSN_VARIANT_64BIT) {
1005 			pr_err("%s: invalid variant for reverse64 %d\n",
1006 			       __func__, variant);
1007 			return AARCH64_BREAK_FAULT;
1008 		}
1009 		insn = aarch64_insn_get_rev64_value();
1010 		break;
1011 	default:
1012 		pr_err("%s: unknown data1 encoding %d\n", __func__, type);
1013 		return AARCH64_BREAK_FAULT;
1014 	}
1015 
1016 	switch (variant) {
1017 	case AARCH64_INSN_VARIANT_32BIT:
1018 		break;
1019 	case AARCH64_INSN_VARIANT_64BIT:
1020 		insn |= AARCH64_INSN_SF_BIT;
1021 		break;
1022 	default:
1023 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1024 		return AARCH64_BREAK_FAULT;
1025 	}
1026 
1027 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1028 
1029 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1030 }
1031 
aarch64_insn_gen_data2(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,enum aarch64_insn_variant variant,enum aarch64_insn_data2_type type)1032 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
1033 			   enum aarch64_insn_register src,
1034 			   enum aarch64_insn_register reg,
1035 			   enum aarch64_insn_variant variant,
1036 			   enum aarch64_insn_data2_type type)
1037 {
1038 	u32 insn;
1039 
1040 	switch (type) {
1041 	case AARCH64_INSN_DATA2_UDIV:
1042 		insn = aarch64_insn_get_udiv_value();
1043 		break;
1044 	case AARCH64_INSN_DATA2_SDIV:
1045 		insn = aarch64_insn_get_sdiv_value();
1046 		break;
1047 	case AARCH64_INSN_DATA2_LSLV:
1048 		insn = aarch64_insn_get_lslv_value();
1049 		break;
1050 	case AARCH64_INSN_DATA2_LSRV:
1051 		insn = aarch64_insn_get_lsrv_value();
1052 		break;
1053 	case AARCH64_INSN_DATA2_ASRV:
1054 		insn = aarch64_insn_get_asrv_value();
1055 		break;
1056 	case AARCH64_INSN_DATA2_RORV:
1057 		insn = aarch64_insn_get_rorv_value();
1058 		break;
1059 	default:
1060 		pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1061 		return AARCH64_BREAK_FAULT;
1062 	}
1063 
1064 	switch (variant) {
1065 	case AARCH64_INSN_VARIANT_32BIT:
1066 		break;
1067 	case AARCH64_INSN_VARIANT_64BIT:
1068 		insn |= AARCH64_INSN_SF_BIT;
1069 		break;
1070 	default:
1071 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1072 		return AARCH64_BREAK_FAULT;
1073 	}
1074 
1075 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1076 
1077 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1078 
1079 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1080 }
1081 
aarch64_insn_gen_data3(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg1,enum aarch64_insn_register reg2,enum aarch64_insn_variant variant,enum aarch64_insn_data3_type type)1082 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1083 			   enum aarch64_insn_register src,
1084 			   enum aarch64_insn_register reg1,
1085 			   enum aarch64_insn_register reg2,
1086 			   enum aarch64_insn_variant variant,
1087 			   enum aarch64_insn_data3_type type)
1088 {
1089 	u32 insn;
1090 
1091 	switch (type) {
1092 	case AARCH64_INSN_DATA3_MADD:
1093 		insn = aarch64_insn_get_madd_value();
1094 		break;
1095 	case AARCH64_INSN_DATA3_MSUB:
1096 		insn = aarch64_insn_get_msub_value();
1097 		break;
1098 	default:
1099 		pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1100 		return AARCH64_BREAK_FAULT;
1101 	}
1102 
1103 	switch (variant) {
1104 	case AARCH64_INSN_VARIANT_32BIT:
1105 		break;
1106 	case AARCH64_INSN_VARIANT_64BIT:
1107 		insn |= AARCH64_INSN_SF_BIT;
1108 		break;
1109 	default:
1110 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1111 		return AARCH64_BREAK_FAULT;
1112 	}
1113 
1114 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1115 
1116 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1117 
1118 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1119 					    reg1);
1120 
1121 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1122 					    reg2);
1123 }
1124 
aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_logic_type type)1125 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1126 					 enum aarch64_insn_register src,
1127 					 enum aarch64_insn_register reg,
1128 					 int shift,
1129 					 enum aarch64_insn_variant variant,
1130 					 enum aarch64_insn_logic_type type)
1131 {
1132 	u32 insn;
1133 
1134 	switch (type) {
1135 	case AARCH64_INSN_LOGIC_AND:
1136 		insn = aarch64_insn_get_and_value();
1137 		break;
1138 	case AARCH64_INSN_LOGIC_BIC:
1139 		insn = aarch64_insn_get_bic_value();
1140 		break;
1141 	case AARCH64_INSN_LOGIC_ORR:
1142 		insn = aarch64_insn_get_orr_value();
1143 		break;
1144 	case AARCH64_INSN_LOGIC_ORN:
1145 		insn = aarch64_insn_get_orn_value();
1146 		break;
1147 	case AARCH64_INSN_LOGIC_EOR:
1148 		insn = aarch64_insn_get_eor_value();
1149 		break;
1150 	case AARCH64_INSN_LOGIC_EON:
1151 		insn = aarch64_insn_get_eon_value();
1152 		break;
1153 	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1154 		insn = aarch64_insn_get_ands_value();
1155 		break;
1156 	case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1157 		insn = aarch64_insn_get_bics_value();
1158 		break;
1159 	default:
1160 		pr_err("%s: unknown logical encoding %d\n", __func__, type);
1161 		return AARCH64_BREAK_FAULT;
1162 	}
1163 
1164 	switch (variant) {
1165 	case AARCH64_INSN_VARIANT_32BIT:
1166 		if (shift & ~(SZ_32 - 1)) {
1167 			pr_err("%s: invalid shift encoding %d\n", __func__,
1168 			       shift);
1169 			return AARCH64_BREAK_FAULT;
1170 		}
1171 		break;
1172 	case AARCH64_INSN_VARIANT_64BIT:
1173 		insn |= AARCH64_INSN_SF_BIT;
1174 		if (shift & ~(SZ_64 - 1)) {
1175 			pr_err("%s: invalid shift encoding %d\n", __func__,
1176 			       shift);
1177 			return AARCH64_BREAK_FAULT;
1178 		}
1179 		break;
1180 	default:
1181 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1182 		return AARCH64_BREAK_FAULT;
1183 	}
1184 
1185 
1186 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1187 
1188 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1189 
1190 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1191 
1192 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1193 }
1194 
1195 /*
1196  * MOV (register) is architecturally an alias of ORR (shifted register) where
1197  * MOV <*d>, <*m> is equivalent to ORR <*d>, <*ZR>, <*m>
1198  */
aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_variant variant)1199 u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,
1200 			      enum aarch64_insn_register src,
1201 			      enum aarch64_insn_variant variant)
1202 {
1203 	return aarch64_insn_gen_logical_shifted_reg(dst, AARCH64_INSN_REG_ZR,
1204 						    src, 0, variant,
1205 						    AARCH64_INSN_LOGIC_ORR);
1206 }
1207 
aarch64_insn_gen_adr(unsigned long pc,unsigned long addr,enum aarch64_insn_register reg,enum aarch64_insn_adr_type type)1208 u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
1209 			 enum aarch64_insn_register reg,
1210 			 enum aarch64_insn_adr_type type)
1211 {
1212 	u32 insn;
1213 	s32 offset;
1214 
1215 	switch (type) {
1216 	case AARCH64_INSN_ADR_TYPE_ADR:
1217 		insn = aarch64_insn_get_adr_value();
1218 		offset = addr - pc;
1219 		break;
1220 	case AARCH64_INSN_ADR_TYPE_ADRP:
1221 		insn = aarch64_insn_get_adrp_value();
1222 		offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12;
1223 		break;
1224 	default:
1225 		pr_err("%s: unknown adr encoding %d\n", __func__, type);
1226 		return AARCH64_BREAK_FAULT;
1227 	}
1228 
1229 	if (offset < -SZ_1M || offset >= SZ_1M)
1230 		return AARCH64_BREAK_FAULT;
1231 
1232 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg);
1233 
1234 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset);
1235 }
1236 
1237 /*
1238  * Decode the imm field of a branch, and return the byte offset as a
1239  * signed value (so it can be used when computing a new branch
1240  * target).
1241  */
aarch64_get_branch_offset(u32 insn)1242 s32 aarch64_get_branch_offset(u32 insn)
1243 {
1244 	s32 imm;
1245 
1246 	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1247 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1248 		return (imm << 6) >> 4;
1249 	}
1250 
1251 	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1252 	    aarch64_insn_is_bcond(insn)) {
1253 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1254 		return (imm << 13) >> 11;
1255 	}
1256 
1257 	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1258 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1259 		return (imm << 18) >> 16;
1260 	}
1261 
1262 	/* Unhandled instruction */
1263 	BUG();
1264 }
1265 
1266 /*
1267  * Encode the displacement of a branch in the imm field and return the
1268  * updated instruction.
1269  */
aarch64_set_branch_offset(u32 insn,s32 offset)1270 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1271 {
1272 	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1273 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1274 						     offset >> 2);
1275 
1276 	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1277 	    aarch64_insn_is_bcond(insn))
1278 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1279 						     offset >> 2);
1280 
1281 	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1282 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1283 						     offset >> 2);
1284 
1285 	/* Unhandled instruction */
1286 	BUG();
1287 }
1288 
aarch64_insn_adrp_get_offset(u32 insn)1289 s32 aarch64_insn_adrp_get_offset(u32 insn)
1290 {
1291 	BUG_ON(!aarch64_insn_is_adrp(insn));
1292 	return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1293 }
1294 
aarch64_insn_adrp_set_offset(u32 insn,s32 offset)1295 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1296 {
1297 	BUG_ON(!aarch64_insn_is_adrp(insn));
1298 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1299 						offset >> 12);
1300 }
1301 
1302 /*
1303  * Extract the Op/CR data from a msr/mrs instruction.
1304  */
aarch64_insn_extract_system_reg(u32 insn)1305 u32 aarch64_insn_extract_system_reg(u32 insn)
1306 {
1307 	return (insn & 0x1FFFE0) >> 5;
1308 }
1309 
aarch32_insn_is_wide(u32 insn)1310 bool aarch32_insn_is_wide(u32 insn)
1311 {
1312 	return insn >= 0xe800;
1313 }
1314 
1315 /*
1316  * Macros/defines for extracting register numbers from instruction.
1317  */
aarch32_insn_extract_reg_num(u32 insn,int offset)1318 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1319 {
1320 	return (insn & (0xf << offset)) >> offset;
1321 }
1322 
1323 #define OPC2_MASK	0x7
1324 #define OPC2_OFFSET	5
aarch32_insn_mcr_extract_opc2(u32 insn)1325 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1326 {
1327 	return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1328 }
1329 
1330 #define CRM_MASK	0xf
aarch32_insn_mcr_extract_crm(u32 insn)1331 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1332 {
1333 	return insn & CRM_MASK;
1334 }
1335 
range_of_ones(u64 val)1336 static bool range_of_ones(u64 val)
1337 {
1338 	/* Doesn't handle full ones or full zeroes */
1339 	u64 sval = val >> __ffs64(val);
1340 
1341 	/* One of Sean Eron Anderson's bithack tricks */
1342 	return ((sval + 1) & (sval)) == 0;
1343 }
1344 
aarch64_encode_immediate(u64 imm,enum aarch64_insn_variant variant,u32 insn)1345 static u32 aarch64_encode_immediate(u64 imm,
1346 				    enum aarch64_insn_variant variant,
1347 				    u32 insn)
1348 {
1349 	unsigned int immr, imms, n, ones, ror, esz, tmp;
1350 	u64 mask;
1351 
1352 	switch (variant) {
1353 	case AARCH64_INSN_VARIANT_32BIT:
1354 		esz = 32;
1355 		break;
1356 	case AARCH64_INSN_VARIANT_64BIT:
1357 		insn |= AARCH64_INSN_SF_BIT;
1358 		esz = 64;
1359 		break;
1360 	default:
1361 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1362 		return AARCH64_BREAK_FAULT;
1363 	}
1364 
1365 	mask = GENMASK(esz - 1, 0);
1366 
1367 	/* Can't encode full zeroes, full ones, or value wider than the mask */
1368 	if (!imm || imm == mask || imm & ~mask)
1369 		return AARCH64_BREAK_FAULT;
1370 
1371 	/*
1372 	 * Inverse of Replicate(). Try to spot a repeating pattern
1373 	 * with a pow2 stride.
1374 	 */
1375 	for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
1376 		u64 emask = BIT(tmp) - 1;
1377 
1378 		if ((imm & emask) != ((imm >> tmp) & emask))
1379 			break;
1380 
1381 		esz = tmp;
1382 		mask = emask;
1383 	}
1384 
1385 	/* N is only set if we're encoding a 64bit value */
1386 	n = esz == 64;
1387 
1388 	/* Trim imm to the element size */
1389 	imm &= mask;
1390 
1391 	/* That's how many ones we need to encode */
1392 	ones = hweight64(imm);
1393 
1394 	/*
1395 	 * imms is set to (ones - 1), prefixed with a string of ones
1396 	 * and a zero if they fit. Cap it to 6 bits.
1397 	 */
1398 	imms  = ones - 1;
1399 	imms |= 0xf << ffs(esz);
1400 	imms &= BIT(6) - 1;
1401 
1402 	/* Compute the rotation */
1403 	if (range_of_ones(imm)) {
1404 		/*
1405 		 * Pattern: 0..01..10..0
1406 		 *
1407 		 * Compute how many rotate we need to align it right
1408 		 */
1409 		ror = __ffs64(imm);
1410 	} else {
1411 		/*
1412 		 * Pattern: 0..01..10..01..1
1413 		 *
1414 		 * Fill the unused top bits with ones, and check if
1415 		 * the result is a valid immediate (all ones with a
1416 		 * contiguous ranges of zeroes).
1417 		 */
1418 		imm |= ~mask;
1419 		if (!range_of_ones(~imm))
1420 			return AARCH64_BREAK_FAULT;
1421 
1422 		/*
1423 		 * Compute the rotation to get a continuous set of
1424 		 * ones, with the first bit set at position 0
1425 		 */
1426 		ror = fls64(~imm);
1427 	}
1428 
1429 	/*
1430 	 * immr is the number of bits we need to rotate back to the
1431 	 * original set of ones. Note that this is relative to the
1432 	 * element size...
1433 	 */
1434 	immr = (esz - ror) % esz;
1435 
1436 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
1437 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
1438 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
1439 }
1440 
aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,enum aarch64_insn_variant variant,enum aarch64_insn_register Rn,enum aarch64_insn_register Rd,u64 imm)1441 u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
1442 				       enum aarch64_insn_variant variant,
1443 				       enum aarch64_insn_register Rn,
1444 				       enum aarch64_insn_register Rd,
1445 				       u64 imm)
1446 {
1447 	u32 insn;
1448 
1449 	switch (type) {
1450 	case AARCH64_INSN_LOGIC_AND:
1451 		insn = aarch64_insn_get_and_imm_value();
1452 		break;
1453 	case AARCH64_INSN_LOGIC_ORR:
1454 		insn = aarch64_insn_get_orr_imm_value();
1455 		break;
1456 	case AARCH64_INSN_LOGIC_EOR:
1457 		insn = aarch64_insn_get_eor_imm_value();
1458 		break;
1459 	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1460 		insn = aarch64_insn_get_ands_imm_value();
1461 		break;
1462 	default:
1463 		pr_err("%s: unknown logical encoding %d\n", __func__, type);
1464 		return AARCH64_BREAK_FAULT;
1465 	}
1466 
1467 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1468 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1469 	return aarch64_encode_immediate(imm, variant, insn);
1470 }
1471 
aarch64_insn_gen_extr(enum aarch64_insn_variant variant,enum aarch64_insn_register Rm,enum aarch64_insn_register Rn,enum aarch64_insn_register Rd,u8 lsb)1472 u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
1473 			  enum aarch64_insn_register Rm,
1474 			  enum aarch64_insn_register Rn,
1475 			  enum aarch64_insn_register Rd,
1476 			  u8 lsb)
1477 {
1478 	u32 insn;
1479 
1480 	insn = aarch64_insn_get_extr_value();
1481 
1482 	switch (variant) {
1483 	case AARCH64_INSN_VARIANT_32BIT:
1484 		if (lsb > 31)
1485 			return AARCH64_BREAK_FAULT;
1486 		break;
1487 	case AARCH64_INSN_VARIANT_64BIT:
1488 		if (lsb > 63)
1489 			return AARCH64_BREAK_FAULT;
1490 		insn |= AARCH64_INSN_SF_BIT;
1491 		insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
1492 		break;
1493 	default:
1494 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1495 		return AARCH64_BREAK_FAULT;
1496 	}
1497 
1498 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
1499 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1500 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1501 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
1502 }
1503 
__get_barrier_crm_val(enum aarch64_insn_mb_type type)1504 static u32 __get_barrier_crm_val(enum aarch64_insn_mb_type type)
1505 {
1506 	switch (type) {
1507 	case AARCH64_INSN_MB_SY:
1508 		return 0xf;
1509 	case AARCH64_INSN_MB_ST:
1510 		return 0xe;
1511 	case AARCH64_INSN_MB_LD:
1512 		return 0xd;
1513 	case AARCH64_INSN_MB_ISH:
1514 		return 0xb;
1515 	case AARCH64_INSN_MB_ISHST:
1516 		return 0xa;
1517 	case AARCH64_INSN_MB_ISHLD:
1518 		return 0x9;
1519 	case AARCH64_INSN_MB_NSH:
1520 		return 0x7;
1521 	case AARCH64_INSN_MB_NSHST:
1522 		return 0x6;
1523 	case AARCH64_INSN_MB_NSHLD:
1524 		return 0x5;
1525 	default:
1526 		pr_err("%s: unknown barrier type %d\n", __func__, type);
1527 		return AARCH64_BREAK_FAULT;
1528 	}
1529 }
1530 
aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)1531 u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
1532 {
1533 	u32 opt;
1534 	u32 insn;
1535 
1536 	opt = __get_barrier_crm_val(type);
1537 	if (opt == AARCH64_BREAK_FAULT)
1538 		return AARCH64_BREAK_FAULT;
1539 
1540 	insn = aarch64_insn_get_dmb_value();
1541 	insn &= ~GENMASK(11, 8);
1542 	insn |= (opt << 8);
1543 
1544 	return insn;
1545 }
1546 
aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type)1547 u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type)
1548 {
1549 	u32 opt, insn;
1550 
1551 	opt = __get_barrier_crm_val(type);
1552 	if (opt == AARCH64_BREAK_FAULT)
1553 		return AARCH64_BREAK_FAULT;
1554 
1555 	insn = aarch64_insn_get_dsb_base_value();
1556 	insn &= ~GENMASK(11, 8);
1557 	insn |= (opt << 8);
1558 
1559 	return insn;
1560 }
1561 
aarch64_insn_gen_mrs(enum aarch64_insn_register result,enum aarch64_insn_system_register sysreg)1562 u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result,
1563 			 enum aarch64_insn_system_register sysreg)
1564 {
1565 	u32 insn = aarch64_insn_get_mrs_value();
1566 
1567 	insn &= ~GENMASK(19, 0);
1568 	insn |= sysreg << 5;
1569 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT,
1570 					    insn, result);
1571 }
1572