1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Huawei Ltd. 4 * Author: Jiang Liu <liuj97@gmail.com> 5 * 6 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com> 7 */ 8 #include <linux/bitfield.h> 9 #include <linux/bitops.h> 10 #include <linux/bug.h> 11 #include <linux/printk.h> 12 #include <linux/sizes.h> 13 #include <linux/types.h> 14 15 #include <asm/debug-monitors.h> 16 #include <asm/errno.h> 17 #include <asm/insn.h> 18 #include <asm/kprobes.h> 19 20 #define AARCH64_INSN_SF_BIT BIT(31) 21 #define AARCH64_INSN_N_BIT BIT(22) 22 #define AARCH64_INSN_LSL_12 BIT(22) 23 24 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type, 25 u32 *maskp, int *shiftp) 26 { 27 u32 mask; 28 int shift; 29 30 switch (type) { 31 case AARCH64_INSN_IMM_26: 32 mask = BIT(26) - 1; 33 shift = 0; 34 break; 35 case AARCH64_INSN_IMM_19: 36 mask = BIT(19) - 1; 37 shift = 5; 38 break; 39 case AARCH64_INSN_IMM_16: 40 mask = BIT(16) - 1; 41 shift = 5; 42 break; 43 case AARCH64_INSN_IMM_14: 44 mask = BIT(14) - 1; 45 shift = 5; 46 break; 47 case AARCH64_INSN_IMM_12: 48 mask = BIT(12) - 1; 49 shift = 10; 50 break; 51 case AARCH64_INSN_IMM_9: 52 mask = BIT(9) - 1; 53 shift = 12; 54 break; 55 case AARCH64_INSN_IMM_7: 56 mask = BIT(7) - 1; 57 shift = 15; 58 break; 59 case AARCH64_INSN_IMM_6: 60 case AARCH64_INSN_IMM_S: 61 mask = BIT(6) - 1; 62 shift = 10; 63 break; 64 case AARCH64_INSN_IMM_R: 65 mask = BIT(6) - 1; 66 shift = 16; 67 break; 68 case AARCH64_INSN_IMM_N: 69 mask = 1; 70 shift = 22; 71 break; 72 default: 73 return -EINVAL; 74 } 75 76 *maskp = mask; 77 *shiftp = shift; 78 79 return 0; 80 } 81 82 #define ADR_IMM_HILOSPLIT 2 83 #define ADR_IMM_SIZE SZ_2M 84 #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1) 85 #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1) 86 #define ADR_IMM_LOSHIFT 29 87 #define ADR_IMM_HISHIFT 5 88 89 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn) 90 { 91 u32 immlo, immhi, mask; 92 int shift; 93 94 switch (type) { 95 case AARCH64_INSN_IMM_ADR: 96 shift = 0; 97 immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK; 98 immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK; 99 insn = (immhi << ADR_IMM_HILOSPLIT) | immlo; 100 mask = ADR_IMM_SIZE - 1; 101 break; 102 default: 103 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { 104 pr_err("%s: unknown immediate encoding %d\n", __func__, 105 type); 106 return 0; 107 } 108 } 109 110 return (insn >> shift) & mask; 111 } 112 113 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, 114 u32 insn, u64 imm) 115 { 116 u32 immlo, immhi, mask; 117 int shift; 118 119 if (insn == AARCH64_BREAK_FAULT) 120 return AARCH64_BREAK_FAULT; 121 122 switch (type) { 123 case AARCH64_INSN_IMM_ADR: 124 shift = 0; 125 immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT; 126 imm >>= ADR_IMM_HILOSPLIT; 127 immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT; 128 imm = immlo | immhi; 129 mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) | 130 (ADR_IMM_HIMASK << ADR_IMM_HISHIFT)); 131 break; 132 default: 133 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { 134 pr_err("%s: unknown immediate encoding %d\n", __func__, 135 type); 136 return AARCH64_BREAK_FAULT; 137 } 138 } 139 140 /* Update the immediate field. */ 141 insn &= ~(mask << shift); 142 insn |= (imm & mask) << shift; 143 144 return insn; 145 } 146 147 u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type, 148 u32 insn) 149 { 150 int shift; 151 152 switch (type) { 153 case AARCH64_INSN_REGTYPE_RT: 154 case AARCH64_INSN_REGTYPE_RD: 155 shift = 0; 156 break; 157 case AARCH64_INSN_REGTYPE_RN: 158 shift = 5; 159 break; 160 case AARCH64_INSN_REGTYPE_RT2: 161 case AARCH64_INSN_REGTYPE_RA: 162 shift = 10; 163 break; 164 case AARCH64_INSN_REGTYPE_RM: 165 shift = 16; 166 break; 167 default: 168 pr_err("%s: unknown register type encoding %d\n", __func__, 169 type); 170 return 0; 171 } 172 173 return (insn >> shift) & GENMASK(4, 0); 174 } 175 176 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type, 177 u32 insn, 178 enum aarch64_insn_register reg) 179 { 180 int shift; 181 182 if (insn == AARCH64_BREAK_FAULT) 183 return AARCH64_BREAK_FAULT; 184 185 if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) { 186 pr_err("%s: unknown register encoding %d\n", __func__, reg); 187 return AARCH64_BREAK_FAULT; 188 } 189 190 switch (type) { 191 case AARCH64_INSN_REGTYPE_RT: 192 case AARCH64_INSN_REGTYPE_RD: 193 shift = 0; 194 break; 195 case AARCH64_INSN_REGTYPE_RN: 196 shift = 5; 197 break; 198 case AARCH64_INSN_REGTYPE_RT2: 199 case AARCH64_INSN_REGTYPE_RA: 200 shift = 10; 201 break; 202 case AARCH64_INSN_REGTYPE_RM: 203 case AARCH64_INSN_REGTYPE_RS: 204 shift = 16; 205 break; 206 default: 207 pr_err("%s: unknown register type encoding %d\n", __func__, 208 type); 209 return AARCH64_BREAK_FAULT; 210 } 211 212 insn &= ~(GENMASK(4, 0) << shift); 213 insn |= reg << shift; 214 215 return insn; 216 } 217 218 static const u32 aarch64_insn_ldst_size[] = { 219 [AARCH64_INSN_SIZE_8] = 0, 220 [AARCH64_INSN_SIZE_16] = 1, 221 [AARCH64_INSN_SIZE_32] = 2, 222 [AARCH64_INSN_SIZE_64] = 3, 223 }; 224 225 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type, 226 u32 insn) 227 { 228 u32 size; 229 230 if (type < AARCH64_INSN_SIZE_8 || type > AARCH64_INSN_SIZE_64) { 231 pr_err("%s: unknown size encoding %d\n", __func__, type); 232 return AARCH64_BREAK_FAULT; 233 } 234 235 size = aarch64_insn_ldst_size[type]; 236 insn &= ~GENMASK(31, 30); 237 insn |= size << 30; 238 239 return insn; 240 } 241 242 static inline long label_imm_common(unsigned long pc, unsigned long addr, 243 long range) 244 { 245 long offset; 246 247 if ((pc & 0x3) || (addr & 0x3)) { 248 pr_err("%s: A64 instructions must be word aligned\n", __func__); 249 return range; 250 } 251 252 offset = ((long)addr - (long)pc); 253 254 if (offset < -range || offset >= range) { 255 pr_err("%s: offset out of range\n", __func__); 256 return range; 257 } 258 259 return offset; 260 } 261 262 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, 263 enum aarch64_insn_branch_type type) 264 { 265 u32 insn; 266 long offset; 267 268 /* 269 * B/BL support [-128M, 128M) offset 270 * ARM64 virtual address arrangement guarantees all kernel and module 271 * texts are within +/-128M. 272 */ 273 offset = label_imm_common(pc, addr, SZ_128M); 274 if (offset >= SZ_128M) 275 return AARCH64_BREAK_FAULT; 276 277 switch (type) { 278 case AARCH64_INSN_BRANCH_LINK: 279 insn = aarch64_insn_get_bl_value(); 280 break; 281 case AARCH64_INSN_BRANCH_NOLINK: 282 insn = aarch64_insn_get_b_value(); 283 break; 284 default: 285 pr_err("%s: unknown branch encoding %d\n", __func__, type); 286 return AARCH64_BREAK_FAULT; 287 } 288 289 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, 290 offset >> 2); 291 } 292 293 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr, 294 enum aarch64_insn_register reg, 295 enum aarch64_insn_variant variant, 296 enum aarch64_insn_branch_type type) 297 { 298 u32 insn; 299 long offset; 300 301 offset = label_imm_common(pc, addr, SZ_1M); 302 if (offset >= SZ_1M) 303 return AARCH64_BREAK_FAULT; 304 305 switch (type) { 306 case AARCH64_INSN_BRANCH_COMP_ZERO: 307 insn = aarch64_insn_get_cbz_value(); 308 break; 309 case AARCH64_INSN_BRANCH_COMP_NONZERO: 310 insn = aarch64_insn_get_cbnz_value(); 311 break; 312 default: 313 pr_err("%s: unknown branch encoding %d\n", __func__, type); 314 return AARCH64_BREAK_FAULT; 315 } 316 317 switch (variant) { 318 case AARCH64_INSN_VARIANT_32BIT: 319 break; 320 case AARCH64_INSN_VARIANT_64BIT: 321 insn |= AARCH64_INSN_SF_BIT; 322 break; 323 default: 324 pr_err("%s: unknown variant encoding %d\n", __func__, variant); 325 return AARCH64_BREAK_FAULT; 326 } 327 328 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); 329 330 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn, 331 offset >> 2); 332 } 333 334 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr, 335 enum aarch64_insn_condition cond) 336 { 337 u32 insn; 338 long offset; 339 340 offset = label_imm_common(pc, addr, SZ_1M); 341 342 insn = aarch64_insn_get_bcond_value(); 343 344 if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) { 345 pr_err("%s: unknown condition encoding %d\n", __func__, cond); 346 return AARCH64_BREAK_FAULT; 347 } 348 insn |= cond; 349 350 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn, 351 offset >> 2); 352 } 353 354 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg, 355 enum aarch64_insn_branch_type type) 356 { 357 u32 insn; 358 359 switch (type) { 360 case AARCH64_INSN_BRANCH_NOLINK: 361 insn = aarch64_insn_get_br_value(); 362 break; 363 case AARCH64_INSN_BRANCH_LINK: 364 insn = aarch64_insn_get_blr_value(); 365 break; 366 case AARCH64_INSN_BRANCH_RETURN: 367 insn = aarch64_insn_get_ret_value(); 368 break; 369 default: 370 pr_err("%s: unknown branch encoding %d\n", __func__, type); 371 return AARCH64_BREAK_FAULT; 372 } 373 374 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg); 375 } 376 377 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg, 378 enum aarch64_insn_register base, 379 enum aarch64_insn_register offset, 380 enum aarch64_insn_size_type size, 381 enum aarch64_insn_ldst_type type) 382 { 383 u32 insn; 384 385 switch (type) { 386 case AARCH64_INSN_LDST_LOAD_REG_OFFSET: 387 insn = aarch64_insn_get_ldr_reg_value(); 388 break; 389 case AARCH64_INSN_LDST_SIGNED_LOAD_REG_OFFSET: 390 insn = aarch64_insn_get_signed_ldr_reg_value(); 391 break; 392 case AARCH64_INSN_LDST_STORE_REG_OFFSET: 393 insn = aarch64_insn_get_str_reg_value(); 394 break; 395 default: 396 pr_err("%s: unknown load/store encoding %d\n", __func__, type); 397 return AARCH64_BREAK_FAULT; 398 } 399 400 insn = aarch64_insn_encode_ldst_size(size, insn); 401 402 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); 403 404 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, 405 base); 406 407 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, 408 offset); 409 } 410 411 u32 aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg, 412 enum aarch64_insn_register base, 413 unsigned int imm, 414 enum aarch64_insn_size_type size, 415 enum aarch64_insn_ldst_type type) 416 { 417 u32 insn; 418 u32 shift; 419 420 if (size < AARCH64_INSN_SIZE_8 || size > AARCH64_INSN_SIZE_64) { 421 pr_err("%s: unknown size encoding %d\n", __func__, type); 422 return AARCH64_BREAK_FAULT; 423 } 424 425 shift = aarch64_insn_ldst_size[size]; 426 if (imm & ~(BIT(12 + shift) - BIT(shift))) { 427 pr_err("%s: invalid imm: %d\n", __func__, imm); 428 return AARCH64_BREAK_FAULT; 429 } 430 431 imm >>= shift; 432 433 switch (type) { 434 case AARCH64_INSN_LDST_LOAD_IMM_OFFSET: 435 insn = aarch64_insn_get_ldr_imm_value(); 436 break; 437 case AARCH64_INSN_LDST_SIGNED_LOAD_IMM_OFFSET: 438 insn = aarch64_insn_get_signed_load_imm_value(); 439 break; 440 case AARCH64_INSN_LDST_STORE_IMM_OFFSET: 441 insn = aarch64_insn_get_str_imm_value(); 442 break; 443 default: 444 pr_err("%s: unknown load/store encoding %d\n", __func__, type); 445 return AARCH64_BREAK_FAULT; 446 } 447 448 insn = aarch64_insn_encode_ldst_size(size, insn); 449 450 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); 451 452 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, 453 base); 454 455 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm); 456 } 457 458 u32 aarch64_insn_gen_load_literal(unsigned long pc, unsigned long addr, 459 enum aarch64_insn_register reg, 460 bool is64bit) 461 { 462 u32 insn; 463 long offset; 464 465 offset = label_imm_common(pc, addr, SZ_1M); 466 if (offset >= SZ_1M) 467 return AARCH64_BREAK_FAULT; 468 469 insn = aarch64_insn_get_ldr_lit_value(); 470 471 if (is64bit) 472 insn |= BIT(30); 473 474 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); 475 476 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn, 477 offset >> 2); 478 } 479 480 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1, 481 enum aarch64_insn_register reg2, 482 enum aarch64_insn_register base, 483 int offset, 484 enum aarch64_insn_variant variant, 485 enum aarch64_insn_ldst_type type) 486 { 487 u32 insn; 488 int shift; 489 490 switch (type) { 491 case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX: 492 insn = aarch64_insn_get_ldp_pre_value(); 493 break; 494 case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX: 495 insn = aarch64_insn_get_stp_pre_value(); 496 break; 497 case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX: 498 insn = aarch64_insn_get_ldp_post_value(); 499 break; 500 case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX: 501 insn = aarch64_insn_get_stp_post_value(); 502 break; 503 default: 504 pr_err("%s: unknown load/store encoding %d\n", __func__, type); 505 return AARCH64_BREAK_FAULT; 506 } 507 508 switch (variant) { 509 case AARCH64_INSN_VARIANT_32BIT: 510 if ((offset & 0x3) || (offset < -256) || (offset > 252)) { 511 pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n", 512 __func__, offset); 513 return AARCH64_BREAK_FAULT; 514 } 515 shift = 2; 516 break; 517 case AARCH64_INSN_VARIANT_64BIT: 518 if ((offset & 0x7) || (offset < -512) || (offset > 504)) { 519 pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n", 520 __func__, offset); 521 return AARCH64_BREAK_FAULT; 522 } 523 shift = 3; 524 insn |= AARCH64_INSN_SF_BIT; 525 break; 526 default: 527 pr_err("%s: unknown variant encoding %d\n", __func__, variant); 528 return AARCH64_BREAK_FAULT; 529 } 530 531 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, 532 reg1); 533 534 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn, 535 reg2); 536 537 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, 538 base); 539 540 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn, 541 offset >> shift); 542 } 543 544 u32 aarch64_insn_gen_load_acq_store_rel(enum aarch64_insn_register reg, 545 enum aarch64_insn_register base, 546 enum aarch64_insn_size_type size, 547 enum aarch64_insn_ldst_type type) 548 { 549 u32 insn; 550 551 switch (type) { 552 case AARCH64_INSN_LDST_LOAD_ACQ: 553 insn = aarch64_insn_get_load_acq_value(); 554 break; 555 case AARCH64_INSN_LDST_STORE_REL: 556 insn = aarch64_insn_get_store_rel_value(); 557 break; 558 default: 559 pr_err("%s: unknown load-acquire/store-release encoding %d\n", 560 __func__, type); 561 return AARCH64_BREAK_FAULT; 562 } 563 564 insn = aarch64_insn_encode_ldst_size(size, insn); 565 566 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, 567 reg); 568 569 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, 570 base); 571 } 572 573 u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg, 574 enum aarch64_insn_register base, 575 enum aarch64_insn_register state, 576 enum aarch64_insn_size_type size, 577 enum aarch64_insn_ldst_type type) 578 { 579 u32 insn; 580 581 switch (type) { 582 case AARCH64_INSN_LDST_LOAD_EX: 583 case AARCH64_INSN_LDST_LOAD_ACQ_EX: 584 insn = aarch64_insn_get_load_ex_value(); 585 if (type == AARCH64_INSN_LDST_LOAD_ACQ_EX) 586 insn |= BIT(15); 587 break; 588 case AARCH64_INSN_LDST_STORE_EX: 589 case AARCH64_INSN_LDST_STORE_REL_EX: 590 insn = aarch64_insn_get_store_ex_value(); 591 if (type == AARCH64_INSN_LDST_STORE_REL_EX) 592 insn |= BIT(15); 593 break; 594 default: 595 pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type); 596 return AARCH64_BREAK_FAULT; 597 } 598 599 insn = aarch64_insn_encode_ldst_size(size, insn); 600 601 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, 602 reg); 603 604 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, 605 base); 606 607 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn, 608 AARCH64_INSN_REG_ZR); 609 610 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn, 611 state); 612 } 613 614 static u32 aarch64_insn_encode_ldst_order(enum aarch64_insn_mem_order_type type, 615 u32 insn) 616 { 617 u32 order; 618 619 switch (type) { 620 case AARCH64_INSN_MEM_ORDER_NONE: 621 order = 0; 622 break; 623 case AARCH64_INSN_MEM_ORDER_ACQ: 624 order = 2; 625 break; 626 case AARCH64_INSN_MEM_ORDER_REL: 627 order = 1; 628 break; 629 case AARCH64_INSN_MEM_ORDER_ACQREL: 630 order = 3; 631 break; 632 default: 633 pr_err("%s: unknown mem order %d\n", __func__, type); 634 return AARCH64_BREAK_FAULT; 635 } 636 637 insn &= ~GENMASK(23, 22); 638 insn |= order << 22; 639 640 return insn; 641 } 642 643 u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result, 644 enum aarch64_insn_register address, 645 enum aarch64_insn_register value, 646 enum aarch64_insn_size_type size, 647 enum aarch64_insn_mem_atomic_op op, 648 enum aarch64_insn_mem_order_type order) 649 { 650 u32 insn; 651 652 switch (op) { 653 case AARCH64_INSN_MEM_ATOMIC_ADD: 654 insn = aarch64_insn_get_ldadd_value(); 655 break; 656 case AARCH64_INSN_MEM_ATOMIC_CLR: 657 insn = aarch64_insn_get_ldclr_value(); 658 break; 659 case AARCH64_INSN_MEM_ATOMIC_EOR: 660 insn = aarch64_insn_get_ldeor_value(); 661 break; 662 case AARCH64_INSN_MEM_ATOMIC_SET: 663 insn = aarch64_insn_get_ldset_value(); 664 break; 665 case AARCH64_INSN_MEM_ATOMIC_SWP: 666 insn = aarch64_insn_get_swp_value(); 667 break; 668 default: 669 pr_err("%s: unimplemented mem atomic op %d\n", __func__, op); 670 return AARCH64_BREAK_FAULT; 671 } 672 673 switch (size) { 674 case AARCH64_INSN_SIZE_32: 675 case AARCH64_INSN_SIZE_64: 676 break; 677 default: 678 pr_err("%s: unimplemented size encoding %d\n", __func__, size); 679 return AARCH64_BREAK_FAULT; 680 } 681 682 insn = aarch64_insn_encode_ldst_size(size, insn); 683 684 insn = aarch64_insn_encode_ldst_order(order, insn); 685 686 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, 687 result); 688 689 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, 690 address); 691 692 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn, 693 value); 694 } 695 696 static u32 aarch64_insn_encode_cas_order(enum aarch64_insn_mem_order_type type, 697 u32 insn) 698 { 699 u32 order; 700 701 switch (type) { 702 case AARCH64_INSN_MEM_ORDER_NONE: 703 order = 0; 704 break; 705 case AARCH64_INSN_MEM_ORDER_ACQ: 706 order = BIT(22); 707 break; 708 case AARCH64_INSN_MEM_ORDER_REL: 709 order = BIT(15); 710 break; 711 case AARCH64_INSN_MEM_ORDER_ACQREL: 712 order = BIT(15) | BIT(22); 713 break; 714 default: 715 pr_err("%s: unknown mem order %d\n", __func__, type); 716 return AARCH64_BREAK_FAULT; 717 } 718 719 insn &= ~(BIT(15) | BIT(22)); 720 insn |= order; 721 722 return insn; 723 } 724 725 u32 aarch64_insn_gen_cas(enum aarch64_insn_register result, 726 enum aarch64_insn_register address, 727 enum aarch64_insn_register value, 728 enum aarch64_insn_size_type size, 729 enum aarch64_insn_mem_order_type order) 730 { 731 u32 insn; 732 733 switch (size) { 734 case AARCH64_INSN_SIZE_32: 735 case AARCH64_INSN_SIZE_64: 736 break; 737 default: 738 pr_err("%s: unimplemented size encoding %d\n", __func__, size); 739 return AARCH64_BREAK_FAULT; 740 } 741 742 insn = aarch64_insn_get_cas_value(); 743 744 insn = aarch64_insn_encode_ldst_size(size, insn); 745 746 insn = aarch64_insn_encode_cas_order(order, insn); 747 748 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, 749 result); 750 751 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, 752 address); 753 754 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn, 755 value); 756 } 757 758 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, 759 enum aarch64_insn_register src, 760 int imm, enum aarch64_insn_variant variant, 761 enum aarch64_insn_adsb_type type) 762 { 763 u32 insn; 764 765 switch (type) { 766 case AARCH64_INSN_ADSB_ADD: 767 insn = aarch64_insn_get_add_imm_value(); 768 break; 769 case AARCH64_INSN_ADSB_SUB: 770 insn = aarch64_insn_get_sub_imm_value(); 771 break; 772 case AARCH64_INSN_ADSB_ADD_SETFLAGS: 773 insn = aarch64_insn_get_adds_imm_value(); 774 break; 775 case AARCH64_INSN_ADSB_SUB_SETFLAGS: 776 insn = aarch64_insn_get_subs_imm_value(); 777 break; 778 default: 779 pr_err("%s: unknown add/sub encoding %d\n", __func__, type); 780 return AARCH64_BREAK_FAULT; 781 } 782 783 switch (variant) { 784 case AARCH64_INSN_VARIANT_32BIT: 785 break; 786 case AARCH64_INSN_VARIANT_64BIT: 787 insn |= AARCH64_INSN_SF_BIT; 788 break; 789 default: 790 pr_err("%s: unknown variant encoding %d\n", __func__, variant); 791 return AARCH64_BREAK_FAULT; 792 } 793 794 /* We can't encode more than a 24bit value (12bit + 12bit shift) */ 795 if (imm & ~(BIT(24) - 1)) 796 goto out; 797 798 /* If we have something in the top 12 bits... */ 799 if (imm & ~(SZ_4K - 1)) { 800 /* ... and in the low 12 bits -> error */ 801 if (imm & (SZ_4K - 1)) 802 goto out; 803 804 imm >>= 12; 805 insn |= AARCH64_INSN_LSL_12; 806 } 807 808 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); 809 810 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); 811 812 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm); 813 814 out: 815 pr_err("%s: invalid immediate encoding %d\n", __func__, imm); 816 return AARCH64_BREAK_FAULT; 817 } 818 819 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst, 820 enum aarch64_insn_register src, 821 int immr, int imms, 822 enum aarch64_insn_variant variant, 823 enum aarch64_insn_bitfield_type type) 824 { 825 u32 insn; 826 u32 mask; 827 828 switch (type) { 829 case AARCH64_INSN_BITFIELD_MOVE: 830 insn = aarch64_insn_get_bfm_value(); 831 break; 832 case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED: 833 insn = aarch64_insn_get_ubfm_value(); 834 break; 835 case AARCH64_INSN_BITFIELD_MOVE_SIGNED: 836 insn = aarch64_insn_get_sbfm_value(); 837 break; 838 default: 839 pr_err("%s: unknown bitfield encoding %d\n", __func__, type); 840 return AARCH64_BREAK_FAULT; 841 } 842 843 switch (variant) { 844 case AARCH64_INSN_VARIANT_32BIT: 845 mask = GENMASK(4, 0); 846 break; 847 case AARCH64_INSN_VARIANT_64BIT: 848 insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT; 849 mask = GENMASK(5, 0); 850 break; 851 default: 852 pr_err("%s: unknown variant encoding %d\n", __func__, variant); 853 return AARCH64_BREAK_FAULT; 854 } 855 856 if (immr & ~mask) { 857 pr_err("%s: invalid immr encoding %d\n", __func__, immr); 858 return AARCH64_BREAK_FAULT; 859 } 860 if (imms & ~mask) { 861 pr_err("%s: invalid imms encoding %d\n", __func__, imms); 862 return AARCH64_BREAK_FAULT; 863 } 864 865 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); 866 867 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); 868 869 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr); 870 871 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms); 872 } 873 874 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, 875 int imm, int shift, 876 enum aarch64_insn_variant variant, 877 enum aarch64_insn_movewide_type type) 878 { 879 u32 insn; 880 881 switch (type) { 882 case AARCH64_INSN_MOVEWIDE_ZERO: 883 insn = aarch64_insn_get_movz_value(); 884 break; 885 case AARCH64_INSN_MOVEWIDE_KEEP: 886 insn = aarch64_insn_get_movk_value(); 887 break; 888 case AARCH64_INSN_MOVEWIDE_INVERSE: 889 insn = aarch64_insn_get_movn_value(); 890 break; 891 default: 892 pr_err("%s: unknown movewide encoding %d\n", __func__, type); 893 return AARCH64_BREAK_FAULT; 894 } 895 896 if (imm & ~(SZ_64K - 1)) { 897 pr_err("%s: invalid immediate encoding %d\n", __func__, imm); 898 return AARCH64_BREAK_FAULT; 899 } 900 901 switch (variant) { 902 case AARCH64_INSN_VARIANT_32BIT: 903 if (shift != 0 && shift != 16) { 904 pr_err("%s: invalid shift encoding %d\n", __func__, 905 shift); 906 return AARCH64_BREAK_FAULT; 907 } 908 break; 909 case AARCH64_INSN_VARIANT_64BIT: 910 insn |= AARCH64_INSN_SF_BIT; 911 if (shift != 0 && shift != 16 && shift != 32 && shift != 48) { 912 pr_err("%s: invalid shift encoding %d\n", __func__, 913 shift); 914 return AARCH64_BREAK_FAULT; 915 } 916 break; 917 default: 918 pr_err("%s: unknown variant encoding %d\n", __func__, variant); 919 return AARCH64_BREAK_FAULT; 920 } 921 922 insn |= (shift >> 4) << 21; 923 924 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); 925 926 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); 927 } 928 929 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst, 930 enum aarch64_insn_register src, 931 enum aarch64_insn_register reg, 932 int shift, 933 enum aarch64_insn_variant variant, 934 enum aarch64_insn_adsb_type type) 935 { 936 u32 insn; 937 938 switch (type) { 939 case AARCH64_INSN_ADSB_ADD: 940 insn = aarch64_insn_get_add_value(); 941 break; 942 case AARCH64_INSN_ADSB_SUB: 943 insn = aarch64_insn_get_sub_value(); 944 break; 945 case AARCH64_INSN_ADSB_ADD_SETFLAGS: 946 insn = aarch64_insn_get_adds_value(); 947 break; 948 case AARCH64_INSN_ADSB_SUB_SETFLAGS: 949 insn = aarch64_insn_get_subs_value(); 950 break; 951 default: 952 pr_err("%s: unknown add/sub encoding %d\n", __func__, type); 953 return AARCH64_BREAK_FAULT; 954 } 955 956 switch (variant) { 957 case AARCH64_INSN_VARIANT_32BIT: 958 if (shift & ~(SZ_32 - 1)) { 959 pr_err("%s: invalid shift encoding %d\n", __func__, 960 shift); 961 return AARCH64_BREAK_FAULT; 962 } 963 break; 964 case AARCH64_INSN_VARIANT_64BIT: 965 insn |= AARCH64_INSN_SF_BIT; 966 if (shift & ~(SZ_64 - 1)) { 967 pr_err("%s: invalid shift encoding %d\n", __func__, 968 shift); 969 return AARCH64_BREAK_FAULT; 970 } 971 break; 972 default: 973 pr_err("%s: unknown variant encoding %d\n", __func__, variant); 974 return AARCH64_BREAK_FAULT; 975 } 976 977 978 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); 979 980 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); 981 982 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg); 983 984 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); 985 } 986 987 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst, 988 enum aarch64_insn_register src, 989 enum aarch64_insn_variant variant, 990 enum aarch64_insn_data1_type type) 991 { 992 u32 insn; 993 994 switch (type) { 995 case AARCH64_INSN_DATA1_REVERSE_16: 996 insn = aarch64_insn_get_rev16_value(); 997 break; 998 case AARCH64_INSN_DATA1_REVERSE_32: 999 insn = aarch64_insn_get_rev32_value(); 1000 break; 1001 case AARCH64_INSN_DATA1_REVERSE_64: 1002 if (variant != AARCH64_INSN_VARIANT_64BIT) { 1003 pr_err("%s: invalid variant for reverse64 %d\n", 1004 __func__, variant); 1005 return AARCH64_BREAK_FAULT; 1006 } 1007 insn = aarch64_insn_get_rev64_value(); 1008 break; 1009 default: 1010 pr_err("%s: unknown data1 encoding %d\n", __func__, type); 1011 return AARCH64_BREAK_FAULT; 1012 } 1013 1014 switch (variant) { 1015 case AARCH64_INSN_VARIANT_32BIT: 1016 break; 1017 case AARCH64_INSN_VARIANT_64BIT: 1018 insn |= AARCH64_INSN_SF_BIT; 1019 break; 1020 default: 1021 pr_err("%s: unknown variant encoding %d\n", __func__, variant); 1022 return AARCH64_BREAK_FAULT; 1023 } 1024 1025 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); 1026 1027 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); 1028 } 1029 1030 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst, 1031 enum aarch64_insn_register src, 1032 enum aarch64_insn_register reg, 1033 enum aarch64_insn_variant variant, 1034 enum aarch64_insn_data2_type type) 1035 { 1036 u32 insn; 1037 1038 switch (type) { 1039 case AARCH64_INSN_DATA2_UDIV: 1040 insn = aarch64_insn_get_udiv_value(); 1041 break; 1042 case AARCH64_INSN_DATA2_SDIV: 1043 insn = aarch64_insn_get_sdiv_value(); 1044 break; 1045 case AARCH64_INSN_DATA2_LSLV: 1046 insn = aarch64_insn_get_lslv_value(); 1047 break; 1048 case AARCH64_INSN_DATA2_LSRV: 1049 insn = aarch64_insn_get_lsrv_value(); 1050 break; 1051 case AARCH64_INSN_DATA2_ASRV: 1052 insn = aarch64_insn_get_asrv_value(); 1053 break; 1054 case AARCH64_INSN_DATA2_RORV: 1055 insn = aarch64_insn_get_rorv_value(); 1056 break; 1057 default: 1058 pr_err("%s: unknown data2 encoding %d\n", __func__, type); 1059 return AARCH64_BREAK_FAULT; 1060 } 1061 1062 switch (variant) { 1063 case AARCH64_INSN_VARIANT_32BIT: 1064 break; 1065 case AARCH64_INSN_VARIANT_64BIT: 1066 insn |= AARCH64_INSN_SF_BIT; 1067 break; 1068 default: 1069 pr_err("%s: unknown variant encoding %d\n", __func__, variant); 1070 return AARCH64_BREAK_FAULT; 1071 } 1072 1073 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); 1074 1075 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); 1076 1077 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg); 1078 } 1079 1080 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst, 1081 enum aarch64_insn_register src, 1082 enum aarch64_insn_register reg1, 1083 enum aarch64_insn_register reg2, 1084 enum aarch64_insn_variant variant, 1085 enum aarch64_insn_data3_type type) 1086 { 1087 u32 insn; 1088 1089 switch (type) { 1090 case AARCH64_INSN_DATA3_MADD: 1091 insn = aarch64_insn_get_madd_value(); 1092 break; 1093 case AARCH64_INSN_DATA3_MSUB: 1094 insn = aarch64_insn_get_msub_value(); 1095 break; 1096 default: 1097 pr_err("%s: unknown data3 encoding %d\n", __func__, type); 1098 return AARCH64_BREAK_FAULT; 1099 } 1100 1101 switch (variant) { 1102 case AARCH64_INSN_VARIANT_32BIT: 1103 break; 1104 case AARCH64_INSN_VARIANT_64BIT: 1105 insn |= AARCH64_INSN_SF_BIT; 1106 break; 1107 default: 1108 pr_err("%s: unknown variant encoding %d\n", __func__, variant); 1109 return AARCH64_BREAK_FAULT; 1110 } 1111 1112 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); 1113 1114 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src); 1115 1116 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, 1117 reg1); 1118 1119 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, 1120 reg2); 1121 } 1122 1123 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, 1124 enum aarch64_insn_register src, 1125 enum aarch64_insn_register reg, 1126 int shift, 1127 enum aarch64_insn_variant variant, 1128 enum aarch64_insn_logic_type type) 1129 { 1130 u32 insn; 1131 1132 switch (type) { 1133 case AARCH64_INSN_LOGIC_AND: 1134 insn = aarch64_insn_get_and_value(); 1135 break; 1136 case AARCH64_INSN_LOGIC_BIC: 1137 insn = aarch64_insn_get_bic_value(); 1138 break; 1139 case AARCH64_INSN_LOGIC_ORR: 1140 insn = aarch64_insn_get_orr_value(); 1141 break; 1142 case AARCH64_INSN_LOGIC_ORN: 1143 insn = aarch64_insn_get_orn_value(); 1144 break; 1145 case AARCH64_INSN_LOGIC_EOR: 1146 insn = aarch64_insn_get_eor_value(); 1147 break; 1148 case AARCH64_INSN_LOGIC_EON: 1149 insn = aarch64_insn_get_eon_value(); 1150 break; 1151 case AARCH64_INSN_LOGIC_AND_SETFLAGS: 1152 insn = aarch64_insn_get_ands_value(); 1153 break; 1154 case AARCH64_INSN_LOGIC_BIC_SETFLAGS: 1155 insn = aarch64_insn_get_bics_value(); 1156 break; 1157 default: 1158 pr_err("%s: unknown logical encoding %d\n", __func__, type); 1159 return AARCH64_BREAK_FAULT; 1160 } 1161 1162 switch (variant) { 1163 case AARCH64_INSN_VARIANT_32BIT: 1164 if (shift & ~(SZ_32 - 1)) { 1165 pr_err("%s: invalid shift encoding %d\n", __func__, 1166 shift); 1167 return AARCH64_BREAK_FAULT; 1168 } 1169 break; 1170 case AARCH64_INSN_VARIANT_64BIT: 1171 insn |= AARCH64_INSN_SF_BIT; 1172 if (shift & ~(SZ_64 - 1)) { 1173 pr_err("%s: invalid shift encoding %d\n", __func__, 1174 shift); 1175 return AARCH64_BREAK_FAULT; 1176 } 1177 break; 1178 default: 1179 pr_err("%s: unknown variant encoding %d\n", __func__, variant); 1180 return AARCH64_BREAK_FAULT; 1181 } 1182 1183 1184 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); 1185 1186 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); 1187 1188 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg); 1189 1190 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); 1191 } 1192 1193 /* 1194 * MOV (register) is architecturally an alias of ORR (shifted register) where 1195 * MOV <*d>, <*m> is equivalent to ORR <*d>, <*ZR>, <*m> 1196 */ 1197 u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst, 1198 enum aarch64_insn_register src, 1199 enum aarch64_insn_variant variant) 1200 { 1201 return aarch64_insn_gen_logical_shifted_reg(dst, AARCH64_INSN_REG_ZR, 1202 src, 0, variant, 1203 AARCH64_INSN_LOGIC_ORR); 1204 } 1205 1206 u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr, 1207 enum aarch64_insn_register reg, 1208 enum aarch64_insn_adr_type type) 1209 { 1210 u32 insn; 1211 s32 offset; 1212 1213 switch (type) { 1214 case AARCH64_INSN_ADR_TYPE_ADR: 1215 insn = aarch64_insn_get_adr_value(); 1216 offset = addr - pc; 1217 break; 1218 case AARCH64_INSN_ADR_TYPE_ADRP: 1219 insn = aarch64_insn_get_adrp_value(); 1220 offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12; 1221 break; 1222 default: 1223 pr_err("%s: unknown adr encoding %d\n", __func__, type); 1224 return AARCH64_BREAK_FAULT; 1225 } 1226 1227 if (offset < -SZ_1M || offset >= SZ_1M) 1228 return AARCH64_BREAK_FAULT; 1229 1230 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg); 1231 1232 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset); 1233 } 1234 1235 /* 1236 * Decode the imm field of a branch, and return the byte offset as a 1237 * signed value (so it can be used when computing a new branch 1238 * target). 1239 */ 1240 s32 aarch64_get_branch_offset(u32 insn) 1241 { 1242 s32 imm; 1243 1244 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) { 1245 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn); 1246 return (imm << 6) >> 4; 1247 } 1248 1249 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) || 1250 aarch64_insn_is_bcond(insn)) { 1251 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn); 1252 return (imm << 13) >> 11; 1253 } 1254 1255 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) { 1256 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn); 1257 return (imm << 18) >> 16; 1258 } 1259 1260 /* Unhandled instruction */ 1261 BUG(); 1262 } 1263 1264 /* 1265 * Encode the displacement of a branch in the imm field and return the 1266 * updated instruction. 1267 */ 1268 u32 aarch64_set_branch_offset(u32 insn, s32 offset) 1269 { 1270 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) 1271 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, 1272 offset >> 2); 1273 1274 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) || 1275 aarch64_insn_is_bcond(insn)) 1276 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn, 1277 offset >> 2); 1278 1279 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) 1280 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn, 1281 offset >> 2); 1282 1283 /* Unhandled instruction */ 1284 BUG(); 1285 } 1286 1287 s32 aarch64_insn_adrp_get_offset(u32 insn) 1288 { 1289 BUG_ON(!aarch64_insn_is_adrp(insn)); 1290 return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12; 1291 } 1292 1293 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset) 1294 { 1295 BUG_ON(!aarch64_insn_is_adrp(insn)); 1296 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, 1297 offset >> 12); 1298 } 1299 1300 /* 1301 * Extract the Op/CR data from a msr/mrs instruction. 1302 */ 1303 u32 aarch64_insn_extract_system_reg(u32 insn) 1304 { 1305 return (insn & 0x1FFFE0) >> 5; 1306 } 1307 1308 bool aarch32_insn_is_wide(u32 insn) 1309 { 1310 return insn >= 0xe800; 1311 } 1312 1313 /* 1314 * Macros/defines for extracting register numbers from instruction. 1315 */ 1316 u32 aarch32_insn_extract_reg_num(u32 insn, int offset) 1317 { 1318 return (insn & (0xf << offset)) >> offset; 1319 } 1320 1321 #define OPC2_MASK 0x7 1322 #define OPC2_OFFSET 5 1323 u32 aarch32_insn_mcr_extract_opc2(u32 insn) 1324 { 1325 return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET; 1326 } 1327 1328 #define CRM_MASK 0xf 1329 u32 aarch32_insn_mcr_extract_crm(u32 insn) 1330 { 1331 return insn & CRM_MASK; 1332 } 1333 1334 static bool range_of_ones(u64 val) 1335 { 1336 /* Doesn't handle full ones or full zeroes */ 1337 u64 sval = val >> __ffs64(val); 1338 1339 /* One of Sean Eron Anderson's bithack tricks */ 1340 return ((sval + 1) & (sval)) == 0; 1341 } 1342 1343 static u32 aarch64_encode_immediate(u64 imm, 1344 enum aarch64_insn_variant variant, 1345 u32 insn) 1346 { 1347 unsigned int immr, imms, n, ones, ror, esz, tmp; 1348 u64 mask; 1349 1350 switch (variant) { 1351 case AARCH64_INSN_VARIANT_32BIT: 1352 esz = 32; 1353 break; 1354 case AARCH64_INSN_VARIANT_64BIT: 1355 insn |= AARCH64_INSN_SF_BIT; 1356 esz = 64; 1357 break; 1358 default: 1359 pr_err("%s: unknown variant encoding %d\n", __func__, variant); 1360 return AARCH64_BREAK_FAULT; 1361 } 1362 1363 mask = GENMASK(esz - 1, 0); 1364 1365 /* Can't encode full zeroes, full ones, or value wider than the mask */ 1366 if (!imm || imm == mask || imm & ~mask) 1367 return AARCH64_BREAK_FAULT; 1368 1369 /* 1370 * Inverse of Replicate(). Try to spot a repeating pattern 1371 * with a pow2 stride. 1372 */ 1373 for (tmp = esz / 2; tmp >= 2; tmp /= 2) { 1374 u64 emask = BIT(tmp) - 1; 1375 1376 if ((imm & emask) != ((imm >> tmp) & emask)) 1377 break; 1378 1379 esz = tmp; 1380 mask = emask; 1381 } 1382 1383 /* N is only set if we're encoding a 64bit value */ 1384 n = esz == 64; 1385 1386 /* Trim imm to the element size */ 1387 imm &= mask; 1388 1389 /* That's how many ones we need to encode */ 1390 ones = hweight64(imm); 1391 1392 /* 1393 * imms is set to (ones - 1), prefixed with a string of ones 1394 * and a zero if they fit. Cap it to 6 bits. 1395 */ 1396 imms = ones - 1; 1397 imms |= 0xf << ffs(esz); 1398 imms &= BIT(6) - 1; 1399 1400 /* Compute the rotation */ 1401 if (range_of_ones(imm)) { 1402 /* 1403 * Pattern: 0..01..10..0 1404 * 1405 * Compute how many rotate we need to align it right 1406 */ 1407 ror = __ffs64(imm); 1408 } else { 1409 /* 1410 * Pattern: 0..01..10..01..1 1411 * 1412 * Fill the unused top bits with ones, and check if 1413 * the result is a valid immediate (all ones with a 1414 * contiguous ranges of zeroes). 1415 */ 1416 imm |= ~mask; 1417 if (!range_of_ones(~imm)) 1418 return AARCH64_BREAK_FAULT; 1419 1420 /* 1421 * Compute the rotation to get a continuous set of 1422 * ones, with the first bit set at position 0 1423 */ 1424 ror = fls64(~imm); 1425 } 1426 1427 /* 1428 * immr is the number of bits we need to rotate back to the 1429 * original set of ones. Note that this is relative to the 1430 * element size... 1431 */ 1432 immr = (esz - ror) % esz; 1433 1434 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n); 1435 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr); 1436 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms); 1437 } 1438 1439 u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type, 1440 enum aarch64_insn_variant variant, 1441 enum aarch64_insn_register Rn, 1442 enum aarch64_insn_register Rd, 1443 u64 imm) 1444 { 1445 u32 insn; 1446 1447 switch (type) { 1448 case AARCH64_INSN_LOGIC_AND: 1449 insn = aarch64_insn_get_and_imm_value(); 1450 break; 1451 case AARCH64_INSN_LOGIC_ORR: 1452 insn = aarch64_insn_get_orr_imm_value(); 1453 break; 1454 case AARCH64_INSN_LOGIC_EOR: 1455 insn = aarch64_insn_get_eor_imm_value(); 1456 break; 1457 case AARCH64_INSN_LOGIC_AND_SETFLAGS: 1458 insn = aarch64_insn_get_ands_imm_value(); 1459 break; 1460 default: 1461 pr_err("%s: unknown logical encoding %d\n", __func__, type); 1462 return AARCH64_BREAK_FAULT; 1463 } 1464 1465 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd); 1466 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn); 1467 return aarch64_encode_immediate(imm, variant, insn); 1468 } 1469 1470 u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant, 1471 enum aarch64_insn_register Rm, 1472 enum aarch64_insn_register Rn, 1473 enum aarch64_insn_register Rd, 1474 u8 lsb) 1475 { 1476 u32 insn; 1477 1478 insn = aarch64_insn_get_extr_value(); 1479 1480 switch (variant) { 1481 case AARCH64_INSN_VARIANT_32BIT: 1482 if (lsb > 31) 1483 return AARCH64_BREAK_FAULT; 1484 break; 1485 case AARCH64_INSN_VARIANT_64BIT: 1486 if (lsb > 63) 1487 return AARCH64_BREAK_FAULT; 1488 insn |= AARCH64_INSN_SF_BIT; 1489 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1); 1490 break; 1491 default: 1492 pr_err("%s: unknown variant encoding %d\n", __func__, variant); 1493 return AARCH64_BREAK_FAULT; 1494 } 1495 1496 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb); 1497 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd); 1498 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn); 1499 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm); 1500 } 1501 1502 static u32 __get_barrier_crm_val(enum aarch64_insn_mb_type type) 1503 { 1504 switch (type) { 1505 case AARCH64_INSN_MB_SY: 1506 return 0xf; 1507 case AARCH64_INSN_MB_ST: 1508 return 0xe; 1509 case AARCH64_INSN_MB_LD: 1510 return 0xd; 1511 case AARCH64_INSN_MB_ISH: 1512 return 0xb; 1513 case AARCH64_INSN_MB_ISHST: 1514 return 0xa; 1515 case AARCH64_INSN_MB_ISHLD: 1516 return 0x9; 1517 case AARCH64_INSN_MB_NSH: 1518 return 0x7; 1519 case AARCH64_INSN_MB_NSHST: 1520 return 0x6; 1521 case AARCH64_INSN_MB_NSHLD: 1522 return 0x5; 1523 default: 1524 pr_err("%s: unknown barrier type %d\n", __func__, type); 1525 return AARCH64_BREAK_FAULT; 1526 } 1527 } 1528 1529 u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type) 1530 { 1531 u32 opt; 1532 u32 insn; 1533 1534 opt = __get_barrier_crm_val(type); 1535 if (opt == AARCH64_BREAK_FAULT) 1536 return AARCH64_BREAK_FAULT; 1537 1538 insn = aarch64_insn_get_dmb_value(); 1539 insn &= ~GENMASK(11, 8); 1540 insn |= (opt << 8); 1541 1542 return insn; 1543 } 1544 1545 u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type) 1546 { 1547 u32 opt, insn; 1548 1549 opt = __get_barrier_crm_val(type); 1550 if (opt == AARCH64_BREAK_FAULT) 1551 return AARCH64_BREAK_FAULT; 1552 1553 insn = aarch64_insn_get_dsb_base_value(); 1554 insn &= ~GENMASK(11, 8); 1555 insn |= (opt << 8); 1556 1557 return insn; 1558 } 1559 1560 u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result, 1561 enum aarch64_insn_system_register sysreg) 1562 { 1563 u32 insn = aarch64_insn_get_mrs_value(); 1564 1565 insn &= ~GENMASK(19, 0); 1566 insn |= sysreg << 5; 1567 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, 1568 insn, result); 1569 } 1570