Lines Matching full:shift

28 	int shift;  in aarch64_get_imm_shift_mask()  local
33 shift = 0; in aarch64_get_imm_shift_mask()
37 shift = 5; in aarch64_get_imm_shift_mask()
41 shift = 5; in aarch64_get_imm_shift_mask()
45 shift = 5; in aarch64_get_imm_shift_mask()
49 shift = 10; in aarch64_get_imm_shift_mask()
53 shift = 12; in aarch64_get_imm_shift_mask()
57 shift = 15; in aarch64_get_imm_shift_mask()
62 shift = 10; in aarch64_get_imm_shift_mask()
66 shift = 16; in aarch64_get_imm_shift_mask()
70 shift = 22; in aarch64_get_imm_shift_mask()
77 *shiftp = shift; in aarch64_get_imm_shift_mask()
92 int shift; in aarch64_insn_decode_immediate() local
96 shift = 0; in aarch64_insn_decode_immediate()
103 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { in aarch64_insn_decode_immediate()
110 return (insn >> shift) & mask; in aarch64_insn_decode_immediate()
117 int shift; in aarch64_insn_encode_immediate() local
124 shift = 0; in aarch64_insn_encode_immediate()
133 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { in aarch64_insn_encode_immediate()
141 insn &= ~(mask << shift); in aarch64_insn_encode_immediate()
142 insn |= (imm & mask) << shift; in aarch64_insn_encode_immediate()
150 int shift; in aarch64_insn_decode_register() local
155 shift = 0; in aarch64_insn_decode_register()
158 shift = 5; in aarch64_insn_decode_register()
162 shift = 10; in aarch64_insn_decode_register()
165 shift = 16; in aarch64_insn_decode_register()
173 return (insn >> shift) & GENMASK(4, 0); in aarch64_insn_decode_register()
180 int shift; in aarch64_insn_encode_register() local
193 shift = 0; in aarch64_insn_encode_register()
196 shift = 5; in aarch64_insn_encode_register()
200 shift = 10; in aarch64_insn_encode_register()
204 shift = 16; in aarch64_insn_encode_register()
212 insn &= ~(GENMASK(4, 0) << shift); in aarch64_insn_encode_register()
213 insn |= reg << shift; in aarch64_insn_encode_register()
418 u32 shift; in aarch64_insn_gen_load_store_imm() local
425 shift = aarch64_insn_ldst_size[size]; in aarch64_insn_gen_load_store_imm()
426 if (imm & ~(BIT(12 + shift) - BIT(shift))) { in aarch64_insn_gen_load_store_imm()
431 imm >>= shift; in aarch64_insn_gen_load_store_imm()
488 int shift; in aarch64_insn_gen_load_store_pair() local
515 shift = 2; in aarch64_insn_gen_load_store_pair()
523 shift = 3; in aarch64_insn_gen_load_store_pair()
541 offset >> shift); in aarch64_insn_gen_load_store_pair()
796 /* We can't encode more than a 24bit value (12bit + 12bit shift) */ in aarch64_insn_gen_add_sub_imm()
877 int imm, int shift, in aarch64_insn_gen_movewide() argument
905 if (shift != 0 && shift != 16) { in aarch64_insn_gen_movewide()
906 pr_err("%s: invalid shift encoding %d\n", __func__, in aarch64_insn_gen_movewide()
907 shift); in aarch64_insn_gen_movewide()
913 if (shift != 0 && shift != 16 && shift != 32 && shift != 48) { in aarch64_insn_gen_movewide()
914 pr_err("%s: invalid shift encoding %d\n", __func__, in aarch64_insn_gen_movewide()
915 shift); in aarch64_insn_gen_movewide()
924 insn |= (shift >> 4) << 21; in aarch64_insn_gen_movewide()
934 int shift, in aarch64_insn_gen_add_sub_shifted_reg() argument
960 if (shift & ~(SZ_32 - 1)) { in aarch64_insn_gen_add_sub_shifted_reg()
961 pr_err("%s: invalid shift encoding %d\n", __func__, in aarch64_insn_gen_add_sub_shifted_reg()
962 shift); in aarch64_insn_gen_add_sub_shifted_reg()
968 if (shift & ~(SZ_64 - 1)) { in aarch64_insn_gen_add_sub_shifted_reg()
969 pr_err("%s: invalid shift encoding %d\n", __func__, in aarch64_insn_gen_add_sub_shifted_reg()
970 shift); in aarch64_insn_gen_add_sub_shifted_reg()
986 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); in aarch64_insn_gen_add_sub_shifted_reg()
1128 int shift, in aarch64_insn_gen_logical_shifted_reg() argument
1166 if (shift & ~(SZ_32 - 1)) { in aarch64_insn_gen_logical_shifted_reg()
1167 pr_err("%s: invalid shift encoding %d\n", __func__, in aarch64_insn_gen_logical_shifted_reg()
1168 shift); in aarch64_insn_gen_logical_shifted_reg()
1174 if (shift & ~(SZ_64 - 1)) { in aarch64_insn_gen_logical_shifted_reg()
1175 pr_err("%s: invalid shift encoding %d\n", __func__, in aarch64_insn_gen_logical_shifted_reg()
1176 shift); in aarch64_insn_gen_logical_shifted_reg()
1192 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); in aarch64_insn_gen_logical_shifted_reg()