xref: /linux/arch/arm64/lib/insn.c (revision f694f30e81c4ade358eb8c75273bac1a48f0cb8f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Huawei Ltd.
4  * Author: Jiang Liu <liuj97@gmail.com>
5  *
6  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
7  */
8 #include <linux/bitops.h>
9 #include <linux/bug.h>
10 #include <linux/printk.h>
11 #include <linux/sizes.h>
12 #include <linux/types.h>
13 
14 #include <asm/debug-monitors.h>
15 #include <asm/errno.h>
16 #include <asm/insn.h>
17 #include <asm/kprobes.h>
18 
19 #define AARCH64_INSN_SF_BIT	BIT(31)
20 #define AARCH64_INSN_N_BIT	BIT(22)
21 #define AARCH64_INSN_LSL_12	BIT(22)
22 
23 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
24 						u32 *maskp, int *shiftp)
25 {
26 	u32 mask;
27 	int shift;
28 
29 	switch (type) {
30 	case AARCH64_INSN_IMM_26:
31 		mask = BIT(26) - 1;
32 		shift = 0;
33 		break;
34 	case AARCH64_INSN_IMM_19:
35 		mask = BIT(19) - 1;
36 		shift = 5;
37 		break;
38 	case AARCH64_INSN_IMM_16:
39 		mask = BIT(16) - 1;
40 		shift = 5;
41 		break;
42 	case AARCH64_INSN_IMM_14:
43 		mask = BIT(14) - 1;
44 		shift = 5;
45 		break;
46 	case AARCH64_INSN_IMM_12:
47 		mask = BIT(12) - 1;
48 		shift = 10;
49 		break;
50 	case AARCH64_INSN_IMM_9:
51 		mask = BIT(9) - 1;
52 		shift = 12;
53 		break;
54 	case AARCH64_INSN_IMM_7:
55 		mask = BIT(7) - 1;
56 		shift = 15;
57 		break;
58 	case AARCH64_INSN_IMM_6:
59 	case AARCH64_INSN_IMM_S:
60 		mask = BIT(6) - 1;
61 		shift = 10;
62 		break;
63 	case AARCH64_INSN_IMM_R:
64 		mask = BIT(6) - 1;
65 		shift = 16;
66 		break;
67 	case AARCH64_INSN_IMM_N:
68 		mask = 1;
69 		shift = 22;
70 		break;
71 	default:
72 		return -EINVAL;
73 	}
74 
75 	*maskp = mask;
76 	*shiftp = shift;
77 
78 	return 0;
79 }
80 
81 #define ADR_IMM_HILOSPLIT	2
82 #define ADR_IMM_SIZE		SZ_2M
83 #define ADR_IMM_LOMASK		((1 << ADR_IMM_HILOSPLIT) - 1)
84 #define ADR_IMM_HIMASK		((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
85 #define ADR_IMM_LOSHIFT		29
86 #define ADR_IMM_HISHIFT		5
87 
88 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
89 {
90 	u32 immlo, immhi, mask;
91 	int shift;
92 
93 	switch (type) {
94 	case AARCH64_INSN_IMM_ADR:
95 		shift = 0;
96 		immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
97 		immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
98 		insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
99 		mask = ADR_IMM_SIZE - 1;
100 		break;
101 	default:
102 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
103 			pr_err("%s: unknown immediate encoding %d\n", __func__,
104 			       type);
105 			return 0;
106 		}
107 	}
108 
109 	return (insn >> shift) & mask;
110 }
111 
112 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
113 				  u32 insn, u64 imm)
114 {
115 	u32 immlo, immhi, mask;
116 	int shift;
117 
118 	if (insn == AARCH64_BREAK_FAULT)
119 		return AARCH64_BREAK_FAULT;
120 
121 	switch (type) {
122 	case AARCH64_INSN_IMM_ADR:
123 		shift = 0;
124 		immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
125 		imm >>= ADR_IMM_HILOSPLIT;
126 		immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
127 		imm = immlo | immhi;
128 		mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
129 			(ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
130 		break;
131 	default:
132 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
133 			pr_err("%s: unknown immediate encoding %d\n", __func__,
134 			       type);
135 			return AARCH64_BREAK_FAULT;
136 		}
137 	}
138 
139 	/* Update the immediate field. */
140 	insn &= ~(mask << shift);
141 	insn |= (imm & mask) << shift;
142 
143 	return insn;
144 }
145 
146 u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
147 					u32 insn)
148 {
149 	int shift;
150 
151 	switch (type) {
152 	case AARCH64_INSN_REGTYPE_RT:
153 	case AARCH64_INSN_REGTYPE_RD:
154 		shift = 0;
155 		break;
156 	case AARCH64_INSN_REGTYPE_RN:
157 		shift = 5;
158 		break;
159 	case AARCH64_INSN_REGTYPE_RT2:
160 	case AARCH64_INSN_REGTYPE_RA:
161 		shift = 10;
162 		break;
163 	case AARCH64_INSN_REGTYPE_RM:
164 		shift = 16;
165 		break;
166 	default:
167 		pr_err("%s: unknown register type encoding %d\n", __func__,
168 		       type);
169 		return 0;
170 	}
171 
172 	return (insn >> shift) & GENMASK(4, 0);
173 }
174 
175 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
176 					u32 insn,
177 					enum aarch64_insn_register reg)
178 {
179 	int shift;
180 
181 	if (insn == AARCH64_BREAK_FAULT)
182 		return AARCH64_BREAK_FAULT;
183 
184 	if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
185 		pr_err("%s: unknown register encoding %d\n", __func__, reg);
186 		return AARCH64_BREAK_FAULT;
187 	}
188 
189 	switch (type) {
190 	case AARCH64_INSN_REGTYPE_RT:
191 	case AARCH64_INSN_REGTYPE_RD:
192 		shift = 0;
193 		break;
194 	case AARCH64_INSN_REGTYPE_RN:
195 		shift = 5;
196 		break;
197 	case AARCH64_INSN_REGTYPE_RT2:
198 	case AARCH64_INSN_REGTYPE_RA:
199 		shift = 10;
200 		break;
201 	case AARCH64_INSN_REGTYPE_RM:
202 	case AARCH64_INSN_REGTYPE_RS:
203 		shift = 16;
204 		break;
205 	default:
206 		pr_err("%s: unknown register type encoding %d\n", __func__,
207 		       type);
208 		return AARCH64_BREAK_FAULT;
209 	}
210 
211 	insn &= ~(GENMASK(4, 0) << shift);
212 	insn |= reg << shift;
213 
214 	return insn;
215 }
216 
217 static const u32 aarch64_insn_ldst_size[] = {
218 	[AARCH64_INSN_SIZE_8] = 0,
219 	[AARCH64_INSN_SIZE_16] = 1,
220 	[AARCH64_INSN_SIZE_32] = 2,
221 	[AARCH64_INSN_SIZE_64] = 3,
222 };
223 
224 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
225 					 u32 insn)
226 {
227 	u32 size;
228 
229 	if (type < AARCH64_INSN_SIZE_8 || type > AARCH64_INSN_SIZE_64) {
230 		pr_err("%s: unknown size encoding %d\n", __func__, type);
231 		return AARCH64_BREAK_FAULT;
232 	}
233 
234 	size = aarch64_insn_ldst_size[type];
235 	insn &= ~GENMASK(31, 30);
236 	insn |= size << 30;
237 
238 	return insn;
239 }
240 
241 static inline long label_imm_common(unsigned long pc, unsigned long addr,
242 				     long range)
243 {
244 	long offset;
245 
246 	if ((pc & 0x3) || (addr & 0x3)) {
247 		pr_err("%s: A64 instructions must be word aligned\n", __func__);
248 		return range;
249 	}
250 
251 	offset = ((long)addr - (long)pc);
252 
253 	if (offset < -range || offset >= range) {
254 		pr_err("%s: offset out of range\n", __func__);
255 		return range;
256 	}
257 
258 	return offset;
259 }
260 
261 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
262 					  enum aarch64_insn_branch_type type)
263 {
264 	u32 insn;
265 	long offset;
266 
267 	/*
268 	 * B/BL support [-128M, 128M) offset
269 	 * ARM64 virtual address arrangement guarantees all kernel and module
270 	 * texts are within +/-128M.
271 	 */
272 	offset = label_imm_common(pc, addr, SZ_128M);
273 	if (offset >= SZ_128M)
274 		return AARCH64_BREAK_FAULT;
275 
276 	switch (type) {
277 	case AARCH64_INSN_BRANCH_LINK:
278 		insn = aarch64_insn_get_bl_value();
279 		break;
280 	case AARCH64_INSN_BRANCH_NOLINK:
281 		insn = aarch64_insn_get_b_value();
282 		break;
283 	default:
284 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
285 		return AARCH64_BREAK_FAULT;
286 	}
287 
288 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
289 					     offset >> 2);
290 }
291 
292 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
293 				     enum aarch64_insn_register reg,
294 				     enum aarch64_insn_variant variant,
295 				     enum aarch64_insn_branch_type type)
296 {
297 	u32 insn;
298 	long offset;
299 
300 	offset = label_imm_common(pc, addr, SZ_1M);
301 	if (offset >= SZ_1M)
302 		return AARCH64_BREAK_FAULT;
303 
304 	switch (type) {
305 	case AARCH64_INSN_BRANCH_COMP_ZERO:
306 		insn = aarch64_insn_get_cbz_value();
307 		break;
308 	case AARCH64_INSN_BRANCH_COMP_NONZERO:
309 		insn = aarch64_insn_get_cbnz_value();
310 		break;
311 	default:
312 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
313 		return AARCH64_BREAK_FAULT;
314 	}
315 
316 	switch (variant) {
317 	case AARCH64_INSN_VARIANT_32BIT:
318 		break;
319 	case AARCH64_INSN_VARIANT_64BIT:
320 		insn |= AARCH64_INSN_SF_BIT;
321 		break;
322 	default:
323 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
324 		return AARCH64_BREAK_FAULT;
325 	}
326 
327 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
328 
329 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
330 					     offset >> 2);
331 }
332 
333 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
334 				     enum aarch64_insn_condition cond)
335 {
336 	u32 insn;
337 	long offset;
338 
339 	offset = label_imm_common(pc, addr, SZ_1M);
340 
341 	insn = aarch64_insn_get_bcond_value();
342 
343 	if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
344 		pr_err("%s: unknown condition encoding %d\n", __func__, cond);
345 		return AARCH64_BREAK_FAULT;
346 	}
347 	insn |= cond;
348 
349 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
350 					     offset >> 2);
351 }
352 
353 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
354 				enum aarch64_insn_branch_type type)
355 {
356 	u32 insn;
357 
358 	switch (type) {
359 	case AARCH64_INSN_BRANCH_NOLINK:
360 		insn = aarch64_insn_get_br_value();
361 		break;
362 	case AARCH64_INSN_BRANCH_LINK:
363 		insn = aarch64_insn_get_blr_value();
364 		break;
365 	case AARCH64_INSN_BRANCH_RETURN:
366 		insn = aarch64_insn_get_ret_value();
367 		break;
368 	default:
369 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
370 		return AARCH64_BREAK_FAULT;
371 	}
372 
373 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
374 }
375 
376 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
377 				    enum aarch64_insn_register base,
378 				    enum aarch64_insn_register offset,
379 				    enum aarch64_insn_size_type size,
380 				    enum aarch64_insn_ldst_type type)
381 {
382 	u32 insn;
383 
384 	switch (type) {
385 	case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
386 		insn = aarch64_insn_get_ldr_reg_value();
387 		break;
388 	case AARCH64_INSN_LDST_SIGNED_LOAD_REG_OFFSET:
389 		insn = aarch64_insn_get_signed_ldr_reg_value();
390 		break;
391 	case AARCH64_INSN_LDST_STORE_REG_OFFSET:
392 		insn = aarch64_insn_get_str_reg_value();
393 		break;
394 	default:
395 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
396 		return AARCH64_BREAK_FAULT;
397 	}
398 
399 	insn = aarch64_insn_encode_ldst_size(size, insn);
400 
401 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
402 
403 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
404 					    base);
405 
406 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
407 					    offset);
408 }
409 
410 u32 aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg,
411 				    enum aarch64_insn_register base,
412 				    unsigned int imm,
413 				    enum aarch64_insn_size_type size,
414 				    enum aarch64_insn_ldst_type type)
415 {
416 	u32 insn;
417 	u32 shift;
418 
419 	if (size < AARCH64_INSN_SIZE_8 || size > AARCH64_INSN_SIZE_64) {
420 		pr_err("%s: unknown size encoding %d\n", __func__, type);
421 		return AARCH64_BREAK_FAULT;
422 	}
423 
424 	shift = aarch64_insn_ldst_size[size];
425 	if (imm & ~(BIT(12 + shift) - BIT(shift))) {
426 		pr_err("%s: invalid imm: %d\n", __func__, imm);
427 		return AARCH64_BREAK_FAULT;
428 	}
429 
430 	imm >>= shift;
431 
432 	switch (type) {
433 	case AARCH64_INSN_LDST_LOAD_IMM_OFFSET:
434 		insn = aarch64_insn_get_ldr_imm_value();
435 		break;
436 	case AARCH64_INSN_LDST_SIGNED_LOAD_IMM_OFFSET:
437 		insn = aarch64_insn_get_signed_load_imm_value();
438 		break;
439 	case AARCH64_INSN_LDST_STORE_IMM_OFFSET:
440 		insn = aarch64_insn_get_str_imm_value();
441 		break;
442 	default:
443 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
444 		return AARCH64_BREAK_FAULT;
445 	}
446 
447 	insn = aarch64_insn_encode_ldst_size(size, insn);
448 
449 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
450 
451 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
452 					    base);
453 
454 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
455 }
456 
457 u32 aarch64_insn_gen_load_literal(unsigned long pc, unsigned long addr,
458 				  enum aarch64_insn_register reg,
459 				  bool is64bit)
460 {
461 	u32 insn;
462 	long offset;
463 
464 	offset = label_imm_common(pc, addr, SZ_1M);
465 	if (offset >= SZ_1M)
466 		return AARCH64_BREAK_FAULT;
467 
468 	insn = aarch64_insn_get_ldr_lit_value();
469 
470 	if (is64bit)
471 		insn |= BIT(30);
472 
473 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
474 
475 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
476 					     offset >> 2);
477 }
478 
479 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
480 				     enum aarch64_insn_register reg2,
481 				     enum aarch64_insn_register base,
482 				     int offset,
483 				     enum aarch64_insn_variant variant,
484 				     enum aarch64_insn_ldst_type type)
485 {
486 	u32 insn;
487 	int shift;
488 
489 	switch (type) {
490 	case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
491 		insn = aarch64_insn_get_ldp_pre_value();
492 		break;
493 	case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
494 		insn = aarch64_insn_get_stp_pre_value();
495 		break;
496 	case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
497 		insn = aarch64_insn_get_ldp_post_value();
498 		break;
499 	case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
500 		insn = aarch64_insn_get_stp_post_value();
501 		break;
502 	default:
503 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
504 		return AARCH64_BREAK_FAULT;
505 	}
506 
507 	switch (variant) {
508 	case AARCH64_INSN_VARIANT_32BIT:
509 		if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
510 			pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
511 			       __func__, offset);
512 			return AARCH64_BREAK_FAULT;
513 		}
514 		shift = 2;
515 		break;
516 	case AARCH64_INSN_VARIANT_64BIT:
517 		if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
518 			pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
519 			       __func__, offset);
520 			return AARCH64_BREAK_FAULT;
521 		}
522 		shift = 3;
523 		insn |= AARCH64_INSN_SF_BIT;
524 		break;
525 	default:
526 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
527 		return AARCH64_BREAK_FAULT;
528 	}
529 
530 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
531 					    reg1);
532 
533 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
534 					    reg2);
535 
536 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
537 					    base);
538 
539 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
540 					     offset >> shift);
541 }
542 
543 u32 aarch64_insn_gen_load_acq_store_rel(enum aarch64_insn_register reg,
544 					enum aarch64_insn_register base,
545 					enum aarch64_insn_size_type size,
546 					enum aarch64_insn_ldst_type type)
547 {
548 	u32 insn;
549 
550 	switch (type) {
551 	case AARCH64_INSN_LDST_LOAD_ACQ:
552 		insn = aarch64_insn_get_load_acq_value();
553 		break;
554 	case AARCH64_INSN_LDST_STORE_REL:
555 		insn = aarch64_insn_get_store_rel_value();
556 		break;
557 	default:
558 		pr_err("%s: unknown load-acquire/store-release encoding %d\n",
559 		       __func__, type);
560 		return AARCH64_BREAK_FAULT;
561 	}
562 
563 	insn = aarch64_insn_encode_ldst_size(size, insn);
564 
565 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
566 					    reg);
567 
568 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
569 					    base);
570 }
571 
572 u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
573 				   enum aarch64_insn_register base,
574 				   enum aarch64_insn_register state,
575 				   enum aarch64_insn_size_type size,
576 				   enum aarch64_insn_ldst_type type)
577 {
578 	u32 insn;
579 
580 	switch (type) {
581 	case AARCH64_INSN_LDST_LOAD_EX:
582 	case AARCH64_INSN_LDST_LOAD_ACQ_EX:
583 		insn = aarch64_insn_get_load_ex_value();
584 		if (type == AARCH64_INSN_LDST_LOAD_ACQ_EX)
585 			insn |= BIT(15);
586 		break;
587 	case AARCH64_INSN_LDST_STORE_EX:
588 	case AARCH64_INSN_LDST_STORE_REL_EX:
589 		insn = aarch64_insn_get_store_ex_value();
590 		if (type == AARCH64_INSN_LDST_STORE_REL_EX)
591 			insn |= BIT(15);
592 		break;
593 	default:
594 		pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
595 		return AARCH64_BREAK_FAULT;
596 	}
597 
598 	insn = aarch64_insn_encode_ldst_size(size, insn);
599 
600 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
601 					    reg);
602 
603 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
604 					    base);
605 
606 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
607 					    AARCH64_INSN_REG_ZR);
608 
609 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
610 					    state);
611 }
612 
613 #ifdef CONFIG_ARM64_LSE_ATOMICS
614 static u32 aarch64_insn_encode_ldst_order(enum aarch64_insn_mem_order_type type,
615 					  u32 insn)
616 {
617 	u32 order;
618 
619 	switch (type) {
620 	case AARCH64_INSN_MEM_ORDER_NONE:
621 		order = 0;
622 		break;
623 	case AARCH64_INSN_MEM_ORDER_ACQ:
624 		order = 2;
625 		break;
626 	case AARCH64_INSN_MEM_ORDER_REL:
627 		order = 1;
628 		break;
629 	case AARCH64_INSN_MEM_ORDER_ACQREL:
630 		order = 3;
631 		break;
632 	default:
633 		pr_err("%s: unknown mem order %d\n", __func__, type);
634 		return AARCH64_BREAK_FAULT;
635 	}
636 
637 	insn &= ~GENMASK(23, 22);
638 	insn |= order << 22;
639 
640 	return insn;
641 }
642 
643 u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
644 				  enum aarch64_insn_register address,
645 				  enum aarch64_insn_register value,
646 				  enum aarch64_insn_size_type size,
647 				  enum aarch64_insn_mem_atomic_op op,
648 				  enum aarch64_insn_mem_order_type order)
649 {
650 	u32 insn;
651 
652 	switch (op) {
653 	case AARCH64_INSN_MEM_ATOMIC_ADD:
654 		insn = aarch64_insn_get_ldadd_value();
655 		break;
656 	case AARCH64_INSN_MEM_ATOMIC_CLR:
657 		insn = aarch64_insn_get_ldclr_value();
658 		break;
659 	case AARCH64_INSN_MEM_ATOMIC_EOR:
660 		insn = aarch64_insn_get_ldeor_value();
661 		break;
662 	case AARCH64_INSN_MEM_ATOMIC_SET:
663 		insn = aarch64_insn_get_ldset_value();
664 		break;
665 	case AARCH64_INSN_MEM_ATOMIC_SWP:
666 		insn = aarch64_insn_get_swp_value();
667 		break;
668 	default:
669 		pr_err("%s: unimplemented mem atomic op %d\n", __func__, op);
670 		return AARCH64_BREAK_FAULT;
671 	}
672 
673 	switch (size) {
674 	case AARCH64_INSN_SIZE_32:
675 	case AARCH64_INSN_SIZE_64:
676 		break;
677 	default:
678 		pr_err("%s: unimplemented size encoding %d\n", __func__, size);
679 		return AARCH64_BREAK_FAULT;
680 	}
681 
682 	insn = aarch64_insn_encode_ldst_size(size, insn);
683 
684 	insn = aarch64_insn_encode_ldst_order(order, insn);
685 
686 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
687 					    result);
688 
689 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
690 					    address);
691 
692 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
693 					    value);
694 }
695 
696 static u32 aarch64_insn_encode_cas_order(enum aarch64_insn_mem_order_type type,
697 					 u32 insn)
698 {
699 	u32 order;
700 
701 	switch (type) {
702 	case AARCH64_INSN_MEM_ORDER_NONE:
703 		order = 0;
704 		break;
705 	case AARCH64_INSN_MEM_ORDER_ACQ:
706 		order = BIT(22);
707 		break;
708 	case AARCH64_INSN_MEM_ORDER_REL:
709 		order = BIT(15);
710 		break;
711 	case AARCH64_INSN_MEM_ORDER_ACQREL:
712 		order = BIT(15) | BIT(22);
713 		break;
714 	default:
715 		pr_err("%s: unknown mem order %d\n", __func__, type);
716 		return AARCH64_BREAK_FAULT;
717 	}
718 
719 	insn &= ~(BIT(15) | BIT(22));
720 	insn |= order;
721 
722 	return insn;
723 }
724 
725 u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
726 			 enum aarch64_insn_register address,
727 			 enum aarch64_insn_register value,
728 			 enum aarch64_insn_size_type size,
729 			 enum aarch64_insn_mem_order_type order)
730 {
731 	u32 insn;
732 
733 	switch (size) {
734 	case AARCH64_INSN_SIZE_32:
735 	case AARCH64_INSN_SIZE_64:
736 		break;
737 	default:
738 		pr_err("%s: unimplemented size encoding %d\n", __func__, size);
739 		return AARCH64_BREAK_FAULT;
740 	}
741 
742 	insn = aarch64_insn_get_cas_value();
743 
744 	insn = aarch64_insn_encode_ldst_size(size, insn);
745 
746 	insn = aarch64_insn_encode_cas_order(order, insn);
747 
748 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
749 					    result);
750 
751 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
752 					    address);
753 
754 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
755 					    value);
756 }
757 #endif
758 
759 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
760 				 enum aarch64_insn_register src,
761 				 int imm, enum aarch64_insn_variant variant,
762 				 enum aarch64_insn_adsb_type type)
763 {
764 	u32 insn;
765 
766 	switch (type) {
767 	case AARCH64_INSN_ADSB_ADD:
768 		insn = aarch64_insn_get_add_imm_value();
769 		break;
770 	case AARCH64_INSN_ADSB_SUB:
771 		insn = aarch64_insn_get_sub_imm_value();
772 		break;
773 	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
774 		insn = aarch64_insn_get_adds_imm_value();
775 		break;
776 	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
777 		insn = aarch64_insn_get_subs_imm_value();
778 		break;
779 	default:
780 		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
781 		return AARCH64_BREAK_FAULT;
782 	}
783 
784 	switch (variant) {
785 	case AARCH64_INSN_VARIANT_32BIT:
786 		break;
787 	case AARCH64_INSN_VARIANT_64BIT:
788 		insn |= AARCH64_INSN_SF_BIT;
789 		break;
790 	default:
791 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
792 		return AARCH64_BREAK_FAULT;
793 	}
794 
795 	/* We can't encode more than a 24bit value (12bit + 12bit shift) */
796 	if (imm & ~(BIT(24) - 1))
797 		goto out;
798 
799 	/* If we have something in the top 12 bits... */
800 	if (imm & ~(SZ_4K - 1)) {
801 		/* ... and in the low 12 bits -> error */
802 		if (imm & (SZ_4K - 1))
803 			goto out;
804 
805 		imm >>= 12;
806 		insn |= AARCH64_INSN_LSL_12;
807 	}
808 
809 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
810 
811 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
812 
813 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
814 
815 out:
816 	pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
817 	return AARCH64_BREAK_FAULT;
818 }
819 
820 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
821 			      enum aarch64_insn_register src,
822 			      int immr, int imms,
823 			      enum aarch64_insn_variant variant,
824 			      enum aarch64_insn_bitfield_type type)
825 {
826 	u32 insn;
827 	u32 mask;
828 
829 	switch (type) {
830 	case AARCH64_INSN_BITFIELD_MOVE:
831 		insn = aarch64_insn_get_bfm_value();
832 		break;
833 	case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
834 		insn = aarch64_insn_get_ubfm_value();
835 		break;
836 	case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
837 		insn = aarch64_insn_get_sbfm_value();
838 		break;
839 	default:
840 		pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
841 		return AARCH64_BREAK_FAULT;
842 	}
843 
844 	switch (variant) {
845 	case AARCH64_INSN_VARIANT_32BIT:
846 		mask = GENMASK(4, 0);
847 		break;
848 	case AARCH64_INSN_VARIANT_64BIT:
849 		insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
850 		mask = GENMASK(5, 0);
851 		break;
852 	default:
853 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
854 		return AARCH64_BREAK_FAULT;
855 	}
856 
857 	if (immr & ~mask) {
858 		pr_err("%s: invalid immr encoding %d\n", __func__, immr);
859 		return AARCH64_BREAK_FAULT;
860 	}
861 	if (imms & ~mask) {
862 		pr_err("%s: invalid imms encoding %d\n", __func__, imms);
863 		return AARCH64_BREAK_FAULT;
864 	}
865 
866 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
867 
868 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
869 
870 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
871 
872 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
873 }
874 
875 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
876 			      int imm, int shift,
877 			      enum aarch64_insn_variant variant,
878 			      enum aarch64_insn_movewide_type type)
879 {
880 	u32 insn;
881 
882 	switch (type) {
883 	case AARCH64_INSN_MOVEWIDE_ZERO:
884 		insn = aarch64_insn_get_movz_value();
885 		break;
886 	case AARCH64_INSN_MOVEWIDE_KEEP:
887 		insn = aarch64_insn_get_movk_value();
888 		break;
889 	case AARCH64_INSN_MOVEWIDE_INVERSE:
890 		insn = aarch64_insn_get_movn_value();
891 		break;
892 	default:
893 		pr_err("%s: unknown movewide encoding %d\n", __func__, type);
894 		return AARCH64_BREAK_FAULT;
895 	}
896 
897 	if (imm & ~(SZ_64K - 1)) {
898 		pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
899 		return AARCH64_BREAK_FAULT;
900 	}
901 
902 	switch (variant) {
903 	case AARCH64_INSN_VARIANT_32BIT:
904 		if (shift != 0 && shift != 16) {
905 			pr_err("%s: invalid shift encoding %d\n", __func__,
906 			       shift);
907 			return AARCH64_BREAK_FAULT;
908 		}
909 		break;
910 	case AARCH64_INSN_VARIANT_64BIT:
911 		insn |= AARCH64_INSN_SF_BIT;
912 		if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
913 			pr_err("%s: invalid shift encoding %d\n", __func__,
914 			       shift);
915 			return AARCH64_BREAK_FAULT;
916 		}
917 		break;
918 	default:
919 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
920 		return AARCH64_BREAK_FAULT;
921 	}
922 
923 	insn |= (shift >> 4) << 21;
924 
925 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
926 
927 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
928 }
929 
930 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
931 					 enum aarch64_insn_register src,
932 					 enum aarch64_insn_register reg,
933 					 int shift,
934 					 enum aarch64_insn_variant variant,
935 					 enum aarch64_insn_adsb_type type)
936 {
937 	u32 insn;
938 
939 	switch (type) {
940 	case AARCH64_INSN_ADSB_ADD:
941 		insn = aarch64_insn_get_add_value();
942 		break;
943 	case AARCH64_INSN_ADSB_SUB:
944 		insn = aarch64_insn_get_sub_value();
945 		break;
946 	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
947 		insn = aarch64_insn_get_adds_value();
948 		break;
949 	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
950 		insn = aarch64_insn_get_subs_value();
951 		break;
952 	default:
953 		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
954 		return AARCH64_BREAK_FAULT;
955 	}
956 
957 	switch (variant) {
958 	case AARCH64_INSN_VARIANT_32BIT:
959 		if (shift & ~(SZ_32 - 1)) {
960 			pr_err("%s: invalid shift encoding %d\n", __func__,
961 			       shift);
962 			return AARCH64_BREAK_FAULT;
963 		}
964 		break;
965 	case AARCH64_INSN_VARIANT_64BIT:
966 		insn |= AARCH64_INSN_SF_BIT;
967 		if (shift & ~(SZ_64 - 1)) {
968 			pr_err("%s: invalid shift encoding %d\n", __func__,
969 			       shift);
970 			return AARCH64_BREAK_FAULT;
971 		}
972 		break;
973 	default:
974 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
975 		return AARCH64_BREAK_FAULT;
976 	}
977 
978 
979 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
980 
981 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
982 
983 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
984 
985 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
986 }
987 
988 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
989 			   enum aarch64_insn_register src,
990 			   enum aarch64_insn_variant variant,
991 			   enum aarch64_insn_data1_type type)
992 {
993 	u32 insn;
994 
995 	switch (type) {
996 	case AARCH64_INSN_DATA1_REVERSE_16:
997 		insn = aarch64_insn_get_rev16_value();
998 		break;
999 	case AARCH64_INSN_DATA1_REVERSE_32:
1000 		insn = aarch64_insn_get_rev32_value();
1001 		break;
1002 	case AARCH64_INSN_DATA1_REVERSE_64:
1003 		if (variant != AARCH64_INSN_VARIANT_64BIT) {
1004 			pr_err("%s: invalid variant for reverse64 %d\n",
1005 			       __func__, variant);
1006 			return AARCH64_BREAK_FAULT;
1007 		}
1008 		insn = aarch64_insn_get_rev64_value();
1009 		break;
1010 	default:
1011 		pr_err("%s: unknown data1 encoding %d\n", __func__, type);
1012 		return AARCH64_BREAK_FAULT;
1013 	}
1014 
1015 	switch (variant) {
1016 	case AARCH64_INSN_VARIANT_32BIT:
1017 		break;
1018 	case AARCH64_INSN_VARIANT_64BIT:
1019 		insn |= AARCH64_INSN_SF_BIT;
1020 		break;
1021 	default:
1022 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1023 		return AARCH64_BREAK_FAULT;
1024 	}
1025 
1026 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1027 
1028 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1029 }
1030 
1031 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
1032 			   enum aarch64_insn_register src,
1033 			   enum aarch64_insn_register reg,
1034 			   enum aarch64_insn_variant variant,
1035 			   enum aarch64_insn_data2_type type)
1036 {
1037 	u32 insn;
1038 
1039 	switch (type) {
1040 	case AARCH64_INSN_DATA2_UDIV:
1041 		insn = aarch64_insn_get_udiv_value();
1042 		break;
1043 	case AARCH64_INSN_DATA2_SDIV:
1044 		insn = aarch64_insn_get_sdiv_value();
1045 		break;
1046 	case AARCH64_INSN_DATA2_LSLV:
1047 		insn = aarch64_insn_get_lslv_value();
1048 		break;
1049 	case AARCH64_INSN_DATA2_LSRV:
1050 		insn = aarch64_insn_get_lsrv_value();
1051 		break;
1052 	case AARCH64_INSN_DATA2_ASRV:
1053 		insn = aarch64_insn_get_asrv_value();
1054 		break;
1055 	case AARCH64_INSN_DATA2_RORV:
1056 		insn = aarch64_insn_get_rorv_value();
1057 		break;
1058 	default:
1059 		pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1060 		return AARCH64_BREAK_FAULT;
1061 	}
1062 
1063 	switch (variant) {
1064 	case AARCH64_INSN_VARIANT_32BIT:
1065 		break;
1066 	case AARCH64_INSN_VARIANT_64BIT:
1067 		insn |= AARCH64_INSN_SF_BIT;
1068 		break;
1069 	default:
1070 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1071 		return AARCH64_BREAK_FAULT;
1072 	}
1073 
1074 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1075 
1076 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1077 
1078 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1079 }
1080 
1081 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1082 			   enum aarch64_insn_register src,
1083 			   enum aarch64_insn_register reg1,
1084 			   enum aarch64_insn_register reg2,
1085 			   enum aarch64_insn_variant variant,
1086 			   enum aarch64_insn_data3_type type)
1087 {
1088 	u32 insn;
1089 
1090 	switch (type) {
1091 	case AARCH64_INSN_DATA3_MADD:
1092 		insn = aarch64_insn_get_madd_value();
1093 		break;
1094 	case AARCH64_INSN_DATA3_MSUB:
1095 		insn = aarch64_insn_get_msub_value();
1096 		break;
1097 	default:
1098 		pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1099 		return AARCH64_BREAK_FAULT;
1100 	}
1101 
1102 	switch (variant) {
1103 	case AARCH64_INSN_VARIANT_32BIT:
1104 		break;
1105 	case AARCH64_INSN_VARIANT_64BIT:
1106 		insn |= AARCH64_INSN_SF_BIT;
1107 		break;
1108 	default:
1109 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1110 		return AARCH64_BREAK_FAULT;
1111 	}
1112 
1113 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1114 
1115 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1116 
1117 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1118 					    reg1);
1119 
1120 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1121 					    reg2);
1122 }
1123 
1124 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1125 					 enum aarch64_insn_register src,
1126 					 enum aarch64_insn_register reg,
1127 					 int shift,
1128 					 enum aarch64_insn_variant variant,
1129 					 enum aarch64_insn_logic_type type)
1130 {
1131 	u32 insn;
1132 
1133 	switch (type) {
1134 	case AARCH64_INSN_LOGIC_AND:
1135 		insn = aarch64_insn_get_and_value();
1136 		break;
1137 	case AARCH64_INSN_LOGIC_BIC:
1138 		insn = aarch64_insn_get_bic_value();
1139 		break;
1140 	case AARCH64_INSN_LOGIC_ORR:
1141 		insn = aarch64_insn_get_orr_value();
1142 		break;
1143 	case AARCH64_INSN_LOGIC_ORN:
1144 		insn = aarch64_insn_get_orn_value();
1145 		break;
1146 	case AARCH64_INSN_LOGIC_EOR:
1147 		insn = aarch64_insn_get_eor_value();
1148 		break;
1149 	case AARCH64_INSN_LOGIC_EON:
1150 		insn = aarch64_insn_get_eon_value();
1151 		break;
1152 	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1153 		insn = aarch64_insn_get_ands_value();
1154 		break;
1155 	case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1156 		insn = aarch64_insn_get_bics_value();
1157 		break;
1158 	default:
1159 		pr_err("%s: unknown logical encoding %d\n", __func__, type);
1160 		return AARCH64_BREAK_FAULT;
1161 	}
1162 
1163 	switch (variant) {
1164 	case AARCH64_INSN_VARIANT_32BIT:
1165 		if (shift & ~(SZ_32 - 1)) {
1166 			pr_err("%s: invalid shift encoding %d\n", __func__,
1167 			       shift);
1168 			return AARCH64_BREAK_FAULT;
1169 		}
1170 		break;
1171 	case AARCH64_INSN_VARIANT_64BIT:
1172 		insn |= AARCH64_INSN_SF_BIT;
1173 		if (shift & ~(SZ_64 - 1)) {
1174 			pr_err("%s: invalid shift encoding %d\n", __func__,
1175 			       shift);
1176 			return AARCH64_BREAK_FAULT;
1177 		}
1178 		break;
1179 	default:
1180 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1181 		return AARCH64_BREAK_FAULT;
1182 	}
1183 
1184 
1185 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1186 
1187 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1188 
1189 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1190 
1191 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1192 }
1193 
1194 /*
1195  * MOV (register) is architecturally an alias of ORR (shifted register) where
1196  * MOV <*d>, <*m> is equivalent to ORR <*d>, <*ZR>, <*m>
1197  */
1198 u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,
1199 			      enum aarch64_insn_register src,
1200 			      enum aarch64_insn_variant variant)
1201 {
1202 	return aarch64_insn_gen_logical_shifted_reg(dst, AARCH64_INSN_REG_ZR,
1203 						    src, 0, variant,
1204 						    AARCH64_INSN_LOGIC_ORR);
1205 }
1206 
1207 u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
1208 			 enum aarch64_insn_register reg,
1209 			 enum aarch64_insn_adr_type type)
1210 {
1211 	u32 insn;
1212 	s32 offset;
1213 
1214 	switch (type) {
1215 	case AARCH64_INSN_ADR_TYPE_ADR:
1216 		insn = aarch64_insn_get_adr_value();
1217 		offset = addr - pc;
1218 		break;
1219 	case AARCH64_INSN_ADR_TYPE_ADRP:
1220 		insn = aarch64_insn_get_adrp_value();
1221 		offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12;
1222 		break;
1223 	default:
1224 		pr_err("%s: unknown adr encoding %d\n", __func__, type);
1225 		return AARCH64_BREAK_FAULT;
1226 	}
1227 
1228 	if (offset < -SZ_1M || offset >= SZ_1M)
1229 		return AARCH64_BREAK_FAULT;
1230 
1231 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg);
1232 
1233 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset);
1234 }
1235 
1236 /*
1237  * Decode the imm field of a branch, and return the byte offset as a
1238  * signed value (so it can be used when computing a new branch
1239  * target).
1240  */
1241 s32 aarch64_get_branch_offset(u32 insn)
1242 {
1243 	s32 imm;
1244 
1245 	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1246 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1247 		return (imm << 6) >> 4;
1248 	}
1249 
1250 	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1251 	    aarch64_insn_is_bcond(insn)) {
1252 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1253 		return (imm << 13) >> 11;
1254 	}
1255 
1256 	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1257 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1258 		return (imm << 18) >> 16;
1259 	}
1260 
1261 	/* Unhandled instruction */
1262 	BUG();
1263 }
1264 
1265 /*
1266  * Encode the displacement of a branch in the imm field and return the
1267  * updated instruction.
1268  */
1269 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1270 {
1271 	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1272 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1273 						     offset >> 2);
1274 
1275 	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1276 	    aarch64_insn_is_bcond(insn))
1277 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1278 						     offset >> 2);
1279 
1280 	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1281 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1282 						     offset >> 2);
1283 
1284 	/* Unhandled instruction */
1285 	BUG();
1286 }
1287 
1288 s32 aarch64_insn_adrp_get_offset(u32 insn)
1289 {
1290 	BUG_ON(!aarch64_insn_is_adrp(insn));
1291 	return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1292 }
1293 
1294 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1295 {
1296 	BUG_ON(!aarch64_insn_is_adrp(insn));
1297 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1298 						offset >> 12);
1299 }
1300 
1301 /*
1302  * Extract the Op/CR data from a msr/mrs instruction.
1303  */
1304 u32 aarch64_insn_extract_system_reg(u32 insn)
1305 {
1306 	return (insn & 0x1FFFE0) >> 5;
1307 }
1308 
1309 bool aarch32_insn_is_wide(u32 insn)
1310 {
1311 	return insn >= 0xe800;
1312 }
1313 
1314 /*
1315  * Macros/defines for extracting register numbers from instruction.
1316  */
1317 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1318 {
1319 	return (insn & (0xf << offset)) >> offset;
1320 }
1321 
1322 #define OPC2_MASK	0x7
1323 #define OPC2_OFFSET	5
1324 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1325 {
1326 	return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1327 }
1328 
1329 #define CRM_MASK	0xf
1330 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1331 {
1332 	return insn & CRM_MASK;
1333 }
1334 
1335 static bool range_of_ones(u64 val)
1336 {
1337 	/* Doesn't handle full ones or full zeroes */
1338 	u64 sval = val >> __ffs64(val);
1339 
1340 	/* One of Sean Eron Anderson's bithack tricks */
1341 	return ((sval + 1) & (sval)) == 0;
1342 }
1343 
1344 static u32 aarch64_encode_immediate(u64 imm,
1345 				    enum aarch64_insn_variant variant,
1346 				    u32 insn)
1347 {
1348 	unsigned int immr, imms, n, ones, ror, esz, tmp;
1349 	u64 mask;
1350 
1351 	switch (variant) {
1352 	case AARCH64_INSN_VARIANT_32BIT:
1353 		esz = 32;
1354 		break;
1355 	case AARCH64_INSN_VARIANT_64BIT:
1356 		insn |= AARCH64_INSN_SF_BIT;
1357 		esz = 64;
1358 		break;
1359 	default:
1360 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1361 		return AARCH64_BREAK_FAULT;
1362 	}
1363 
1364 	mask = GENMASK(esz - 1, 0);
1365 
1366 	/* Can't encode full zeroes, full ones, or value wider than the mask */
1367 	if (!imm || imm == mask || imm & ~mask)
1368 		return AARCH64_BREAK_FAULT;
1369 
1370 	/*
1371 	 * Inverse of Replicate(). Try to spot a repeating pattern
1372 	 * with a pow2 stride.
1373 	 */
1374 	for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
1375 		u64 emask = BIT(tmp) - 1;
1376 
1377 		if ((imm & emask) != ((imm >> tmp) & emask))
1378 			break;
1379 
1380 		esz = tmp;
1381 		mask = emask;
1382 	}
1383 
1384 	/* N is only set if we're encoding a 64bit value */
1385 	n = esz == 64;
1386 
1387 	/* Trim imm to the element size */
1388 	imm &= mask;
1389 
1390 	/* That's how many ones we need to encode */
1391 	ones = hweight64(imm);
1392 
1393 	/*
1394 	 * imms is set to (ones - 1), prefixed with a string of ones
1395 	 * and a zero if they fit. Cap it to 6 bits.
1396 	 */
1397 	imms  = ones - 1;
1398 	imms |= 0xf << ffs(esz);
1399 	imms &= BIT(6) - 1;
1400 
1401 	/* Compute the rotation */
1402 	if (range_of_ones(imm)) {
1403 		/*
1404 		 * Pattern: 0..01..10..0
1405 		 *
1406 		 * Compute how many rotate we need to align it right
1407 		 */
1408 		ror = __ffs64(imm);
1409 	} else {
1410 		/*
1411 		 * Pattern: 0..01..10..01..1
1412 		 *
1413 		 * Fill the unused top bits with ones, and check if
1414 		 * the result is a valid immediate (all ones with a
1415 		 * contiguous ranges of zeroes).
1416 		 */
1417 		imm |= ~mask;
1418 		if (!range_of_ones(~imm))
1419 			return AARCH64_BREAK_FAULT;
1420 
1421 		/*
1422 		 * Compute the rotation to get a continuous set of
1423 		 * ones, with the first bit set at position 0
1424 		 */
1425 		ror = fls64(~imm);
1426 	}
1427 
1428 	/*
1429 	 * immr is the number of bits we need to rotate back to the
1430 	 * original set of ones. Note that this is relative to the
1431 	 * element size...
1432 	 */
1433 	immr = (esz - ror) % esz;
1434 
1435 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
1436 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
1437 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
1438 }
1439 
1440 u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
1441 				       enum aarch64_insn_variant variant,
1442 				       enum aarch64_insn_register Rn,
1443 				       enum aarch64_insn_register Rd,
1444 				       u64 imm)
1445 {
1446 	u32 insn;
1447 
1448 	switch (type) {
1449 	case AARCH64_INSN_LOGIC_AND:
1450 		insn = aarch64_insn_get_and_imm_value();
1451 		break;
1452 	case AARCH64_INSN_LOGIC_ORR:
1453 		insn = aarch64_insn_get_orr_imm_value();
1454 		break;
1455 	case AARCH64_INSN_LOGIC_EOR:
1456 		insn = aarch64_insn_get_eor_imm_value();
1457 		break;
1458 	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1459 		insn = aarch64_insn_get_ands_imm_value();
1460 		break;
1461 	default:
1462 		pr_err("%s: unknown logical encoding %d\n", __func__, type);
1463 		return AARCH64_BREAK_FAULT;
1464 	}
1465 
1466 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1467 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1468 	return aarch64_encode_immediate(imm, variant, insn);
1469 }
1470 
1471 u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
1472 			  enum aarch64_insn_register Rm,
1473 			  enum aarch64_insn_register Rn,
1474 			  enum aarch64_insn_register Rd,
1475 			  u8 lsb)
1476 {
1477 	u32 insn;
1478 
1479 	insn = aarch64_insn_get_extr_value();
1480 
1481 	switch (variant) {
1482 	case AARCH64_INSN_VARIANT_32BIT:
1483 		if (lsb > 31)
1484 			return AARCH64_BREAK_FAULT;
1485 		break;
1486 	case AARCH64_INSN_VARIANT_64BIT:
1487 		if (lsb > 63)
1488 			return AARCH64_BREAK_FAULT;
1489 		insn |= AARCH64_INSN_SF_BIT;
1490 		insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
1491 		break;
1492 	default:
1493 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1494 		return AARCH64_BREAK_FAULT;
1495 	}
1496 
1497 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
1498 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1499 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1500 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
1501 }
1502 
1503 u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
1504 {
1505 	u32 opt;
1506 	u32 insn;
1507 
1508 	switch (type) {
1509 	case AARCH64_INSN_MB_SY:
1510 		opt = 0xf;
1511 		break;
1512 	case AARCH64_INSN_MB_ST:
1513 		opt = 0xe;
1514 		break;
1515 	case AARCH64_INSN_MB_LD:
1516 		opt = 0xd;
1517 		break;
1518 	case AARCH64_INSN_MB_ISH:
1519 		opt = 0xb;
1520 		break;
1521 	case AARCH64_INSN_MB_ISHST:
1522 		opt = 0xa;
1523 		break;
1524 	case AARCH64_INSN_MB_ISHLD:
1525 		opt = 0x9;
1526 		break;
1527 	case AARCH64_INSN_MB_NSH:
1528 		opt = 0x7;
1529 		break;
1530 	case AARCH64_INSN_MB_NSHST:
1531 		opt = 0x6;
1532 		break;
1533 	case AARCH64_INSN_MB_NSHLD:
1534 		opt = 0x5;
1535 		break;
1536 	default:
1537 		pr_err("%s: unknown dmb type %d\n", __func__, type);
1538 		return AARCH64_BREAK_FAULT;
1539 	}
1540 
1541 	insn = aarch64_insn_get_dmb_value();
1542 	insn &= ~GENMASK(11, 8);
1543 	insn |= (opt << 8);
1544 
1545 	return insn;
1546 }
1547 
1548 u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result,
1549 			 enum aarch64_insn_system_register sysreg)
1550 {
1551 	u32 insn = aarch64_insn_get_mrs_value();
1552 
1553 	insn &= ~GENMASK(19, 0);
1554 	insn |= sysreg << 5;
1555 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT,
1556 					    insn, result);
1557 }
1558