1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 */ 5 6 #include <linux/bpf.h> 7 8 #include "disasm.h" 9 10 #define __BPF_FUNC_STR_FN(x) [BPF_FUNC_ ## x] = __stringify(bpf_ ## x) 11 static const char * const func_id_str[] = { 12 __BPF_FUNC_MAPPER(__BPF_FUNC_STR_FN) 13 }; 14 #undef __BPF_FUNC_STR_FN 15 16 static const char *__func_get_name(const struct bpf_insn_cbs *cbs, 17 const struct bpf_insn *insn, 18 char *buff, size_t len) 19 { 20 BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID); 21 22 if (!insn->src_reg && 23 insn->imm >= 0 && insn->imm < __BPF_FUNC_MAX_ID && 24 func_id_str[insn->imm]) 25 return func_id_str[insn->imm]; 26 27 if (cbs && cbs->cb_call) { 28 const char *res; 29 30 res = cbs->cb_call(cbs->private_data, insn); 31 if (res) 32 return res; 33 } 34 35 if (insn->src_reg == BPF_PSEUDO_CALL) 36 snprintf(buff, len, "%+d", insn->imm); 37 else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) 38 snprintf(buff, len, "kernel-function"); 39 40 return buff; 41 } 42 43 static const char *__func_imm_name(const struct bpf_insn_cbs *cbs, 44 const struct bpf_insn *insn, 45 u64 full_imm, char *buff, size_t len) 46 { 47 if (cbs && cbs->cb_imm) 48 return cbs->cb_imm(cbs->private_data, insn, full_imm); 49 50 snprintf(buff, len, "0x%llx", (unsigned long long)full_imm); 51 return buff; 52 } 53 54 const char *func_id_name(int id) 55 { 56 if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id]) 57 return func_id_str[id]; 58 else 59 return "unknown"; 60 } 61 62 const char *const bpf_class_string[8] = { 63 [BPF_LD] = "ld", 64 [BPF_LDX] = "ldx", 65 [BPF_ST] = "st", 66 [BPF_STX] = "stx", 67 [BPF_ALU] = "alu", 68 [BPF_JMP] = "jmp", 69 [BPF_JMP32] = "jmp32", 70 [BPF_ALU64] = "alu64", 71 }; 72 73 const char *const bpf_alu_string[16] = { 74 [BPF_ADD >> 4] = "+=", 75 [BPF_SUB >> 4] = "-=", 76 [BPF_MUL >> 4] = "*=", 77 [BPF_DIV >> 4] = "/=", 78 [BPF_OR >> 4] = "|=", 79 [BPF_AND >> 4] = "&=", 80 [BPF_LSH >> 4] = "<<=", 81 [BPF_RSH >> 4] = ">>=", 82 [BPF_NEG >> 4] = "neg", 83 [BPF_MOD >> 4] = "%=", 84 [BPF_XOR >> 4] = "^=", 85 [BPF_MOV >> 4] = "=", 86 [BPF_ARSH >> 4] = "s>>=", 87 [BPF_END >> 4] = "endian", 88 }; 89 90 static const char *const bpf_alu_sign_string[16] = { 91 [BPF_DIV >> 4] = "s/=", 92 [BPF_MOD >> 4] = "s%=", 93 }; 94 95 static const char *const bpf_movsx_string[4] = { 96 [0] = "(s8)", 97 [1] = "(s16)", 98 [3] = "(s32)", 99 }; 100 101 static const char *const bpf_atomic_alu_string[16] = { 102 [BPF_ADD >> 4] = "add", 103 [BPF_AND >> 4] = "and", 104 [BPF_OR >> 4] = "or", 105 [BPF_XOR >> 4] = "xor", 106 }; 107 108 static const char *const bpf_ldst_string[] = { 109 [BPF_W >> 3] = "u32", 110 [BPF_H >> 3] = "u16", 111 [BPF_B >> 3] = "u8", 112 [BPF_DW >> 3] = "u64", 113 }; 114 115 static const char *const bpf_ldsx_string[] = { 116 [BPF_W >> 3] = "s32", 117 [BPF_H >> 3] = "s16", 118 [BPF_B >> 3] = "s8", 119 }; 120 121 static const char *const bpf_jmp_string[16] = { 122 [BPF_JA >> 4] = "jmp", 123 [BPF_JEQ >> 4] = "==", 124 [BPF_JGT >> 4] = ">", 125 [BPF_JLT >> 4] = "<", 126 [BPF_JGE >> 4] = ">=", 127 [BPF_JLE >> 4] = "<=", 128 [BPF_JSET >> 4] = "&", 129 [BPF_JNE >> 4] = "!=", 130 [BPF_JSGT >> 4] = "s>", 131 [BPF_JSLT >> 4] = "s<", 132 [BPF_JSGE >> 4] = "s>=", 133 [BPF_JSLE >> 4] = "s<=", 134 [BPF_CALL >> 4] = "call", 135 [BPF_EXIT >> 4] = "exit", 136 }; 137 138 static void print_bpf_end_insn(bpf_insn_print_t verbose, 139 void *private_data, 140 const struct bpf_insn *insn) 141 { 142 verbose(private_data, "(%02x) r%d = %s%d r%d\n", 143 insn->code, insn->dst_reg, 144 BPF_SRC(insn->code) == BPF_TO_BE ? "be" : "le", 145 insn->imm, insn->dst_reg); 146 } 147 148 static void print_bpf_bswap_insn(bpf_insn_print_t verbose, 149 void *private_data, 150 const struct bpf_insn *insn) 151 { 152 verbose(private_data, "(%02x) r%d = bswap%d r%d\n", 153 insn->code, insn->dst_reg, 154 insn->imm, insn->dst_reg); 155 } 156 157 static bool is_sdiv_smod(const struct bpf_insn *insn) 158 { 159 return (BPF_OP(insn->code) == BPF_DIV || BPF_OP(insn->code) == BPF_MOD) && 160 insn->off == 1; 161 } 162 163 static bool is_movsx(const struct bpf_insn *insn) 164 { 165 return BPF_OP(insn->code) == BPF_MOV && 166 (insn->off == 8 || insn->off == 16 || insn->off == 32); 167 } 168 169 void print_bpf_insn(const struct bpf_insn_cbs *cbs, 170 const struct bpf_insn *insn, 171 bool allow_ptr_leaks) 172 { 173 const bpf_insn_print_t verbose = cbs->cb_print; 174 u8 class = BPF_CLASS(insn->code); 175 176 if (class == BPF_ALU || class == BPF_ALU64) { 177 if (BPF_OP(insn->code) == BPF_END) { 178 if (class == BPF_ALU64) 179 print_bpf_bswap_insn(verbose, cbs->private_data, insn); 180 else 181 print_bpf_end_insn(verbose, cbs->private_data, insn); 182 } else if (BPF_OP(insn->code) == BPF_NEG) { 183 verbose(cbs->private_data, "(%02x) %c%d = -%c%d\n", 184 insn->code, class == BPF_ALU ? 'w' : 'r', 185 insn->dst_reg, class == BPF_ALU ? 'w' : 'r', 186 insn->dst_reg); 187 } else if (BPF_SRC(insn->code) == BPF_X) { 188 verbose(cbs->private_data, "(%02x) %c%d %s %s%c%d\n", 189 insn->code, class == BPF_ALU ? 'w' : 'r', 190 insn->dst_reg, 191 is_sdiv_smod(insn) ? bpf_alu_sign_string[BPF_OP(insn->code) >> 4] 192 : bpf_alu_string[BPF_OP(insn->code) >> 4], 193 is_movsx(insn) ? bpf_movsx_string[(insn->off >> 3) - 1] : "", 194 class == BPF_ALU ? 'w' : 'r', 195 insn->src_reg); 196 } else { 197 verbose(cbs->private_data, "(%02x) %c%d %s %d\n", 198 insn->code, class == BPF_ALU ? 'w' : 'r', 199 insn->dst_reg, 200 is_sdiv_smod(insn) ? bpf_alu_sign_string[BPF_OP(insn->code) >> 4] 201 : bpf_alu_string[BPF_OP(insn->code) >> 4], 202 insn->imm); 203 } 204 } else if (class == BPF_STX) { 205 if (BPF_MODE(insn->code) == BPF_MEM) 206 verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = r%d\n", 207 insn->code, 208 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 209 insn->dst_reg, 210 insn->off, insn->src_reg); 211 else if (BPF_MODE(insn->code) == BPF_ATOMIC && 212 (insn->imm == BPF_ADD || insn->imm == BPF_AND || 213 insn->imm == BPF_OR || insn->imm == BPF_XOR)) { 214 verbose(cbs->private_data, "(%02x) lock *(%s *)(r%d %+d) %s r%d\n", 215 insn->code, 216 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 217 insn->dst_reg, insn->off, 218 bpf_alu_string[BPF_OP(insn->imm) >> 4], 219 insn->src_reg); 220 } else if (BPF_MODE(insn->code) == BPF_ATOMIC && 221 (insn->imm == (BPF_ADD | BPF_FETCH) || 222 insn->imm == (BPF_AND | BPF_FETCH) || 223 insn->imm == (BPF_OR | BPF_FETCH) || 224 insn->imm == (BPF_XOR | BPF_FETCH))) { 225 verbose(cbs->private_data, "(%02x) r%d = atomic%s_fetch_%s((%s *)(r%d %+d), r%d)\n", 226 insn->code, insn->src_reg, 227 BPF_SIZE(insn->code) == BPF_DW ? "64" : "", 228 bpf_atomic_alu_string[BPF_OP(insn->imm) >> 4], 229 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 230 insn->dst_reg, insn->off, insn->src_reg); 231 } else if (BPF_MODE(insn->code) == BPF_ATOMIC && 232 insn->imm == BPF_CMPXCHG) { 233 verbose(cbs->private_data, "(%02x) r0 = atomic%s_cmpxchg((%s *)(r%d %+d), r0, r%d)\n", 234 insn->code, 235 BPF_SIZE(insn->code) == BPF_DW ? "64" : "", 236 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 237 insn->dst_reg, insn->off, 238 insn->src_reg); 239 } else if (BPF_MODE(insn->code) == BPF_ATOMIC && 240 insn->imm == BPF_XCHG) { 241 verbose(cbs->private_data, "(%02x) r%d = atomic%s_xchg((%s *)(r%d %+d), r%d)\n", 242 insn->code, insn->src_reg, 243 BPF_SIZE(insn->code) == BPF_DW ? "64" : "", 244 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 245 insn->dst_reg, insn->off, insn->src_reg); 246 } else { 247 verbose(cbs->private_data, "BUG_%02x\n", insn->code); 248 } 249 } else if (class == BPF_ST) { 250 if (BPF_MODE(insn->code) == BPF_MEM) { 251 verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n", 252 insn->code, 253 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 254 insn->dst_reg, 255 insn->off, insn->imm); 256 } else if (BPF_MODE(insn->code) == 0xc0 /* BPF_NOSPEC, no UAPI */) { 257 verbose(cbs->private_data, "(%02x) nospec\n", insn->code); 258 } else { 259 verbose(cbs->private_data, "BUG_st_%02x\n", insn->code); 260 } 261 } else if (class == BPF_LDX) { 262 if (BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_MEMSX) { 263 verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code); 264 return; 265 } 266 verbose(cbs->private_data, "(%02x) r%d = *(%s *)(r%d %+d)\n", 267 insn->code, insn->dst_reg, 268 BPF_MODE(insn->code) == BPF_MEM ? 269 bpf_ldst_string[BPF_SIZE(insn->code) >> 3] : 270 bpf_ldsx_string[BPF_SIZE(insn->code) >> 3], 271 insn->src_reg, insn->off); 272 } else if (class == BPF_LD) { 273 if (BPF_MODE(insn->code) == BPF_ABS) { 274 verbose(cbs->private_data, "(%02x) r0 = *(%s *)skb[%d]\n", 275 insn->code, 276 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 277 insn->imm); 278 } else if (BPF_MODE(insn->code) == BPF_IND) { 279 verbose(cbs->private_data, "(%02x) r0 = *(%s *)skb[r%d + %d]\n", 280 insn->code, 281 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 282 insn->src_reg, insn->imm); 283 } else if (BPF_MODE(insn->code) == BPF_IMM && 284 BPF_SIZE(insn->code) == BPF_DW) { 285 /* At this point, we already made sure that the second 286 * part of the ldimm64 insn is accessible. 287 */ 288 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 289 bool is_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD || 290 insn->src_reg == BPF_PSEUDO_MAP_VALUE; 291 char tmp[64]; 292 293 if (is_ptr && !allow_ptr_leaks) 294 imm = 0; 295 296 verbose(cbs->private_data, "(%02x) r%d = %s\n", 297 insn->code, insn->dst_reg, 298 __func_imm_name(cbs, insn, imm, 299 tmp, sizeof(tmp))); 300 } else { 301 verbose(cbs->private_data, "BUG_ld_%02x\n", insn->code); 302 return; 303 } 304 } else if (class == BPF_JMP32 || class == BPF_JMP) { 305 u8 opcode = BPF_OP(insn->code); 306 307 if (opcode == BPF_CALL) { 308 char tmp[64]; 309 310 if (insn->src_reg == BPF_PSEUDO_CALL) { 311 verbose(cbs->private_data, "(%02x) call pc%s\n", 312 insn->code, 313 __func_get_name(cbs, insn, 314 tmp, sizeof(tmp))); 315 } else { 316 strcpy(tmp, "unknown"); 317 verbose(cbs->private_data, "(%02x) call %s#%d\n", insn->code, 318 __func_get_name(cbs, insn, 319 tmp, sizeof(tmp)), 320 insn->imm); 321 } 322 } else if (insn->code == (BPF_JMP | BPF_JA)) { 323 verbose(cbs->private_data, "(%02x) goto pc%+d\n", 324 insn->code, insn->off); 325 } else if (insn->code == (BPF_JMP32 | BPF_JA)) { 326 verbose(cbs->private_data, "(%02x) gotol pc%+d\n", 327 insn->code, insn->imm); 328 } else if (insn->code == (BPF_JMP | BPF_EXIT)) { 329 verbose(cbs->private_data, "(%02x) exit\n", insn->code); 330 } else if (BPF_SRC(insn->code) == BPF_X) { 331 verbose(cbs->private_data, 332 "(%02x) if %c%d %s %c%d goto pc%+d\n", 333 insn->code, class == BPF_JMP32 ? 'w' : 'r', 334 insn->dst_reg, 335 bpf_jmp_string[BPF_OP(insn->code) >> 4], 336 class == BPF_JMP32 ? 'w' : 'r', 337 insn->src_reg, insn->off); 338 } else { 339 verbose(cbs->private_data, 340 "(%02x) if %c%d %s 0x%x goto pc%+d\n", 341 insn->code, class == BPF_JMP32 ? 'w' : 'r', 342 insn->dst_reg, 343 bpf_jmp_string[BPF_OP(insn->code) >> 4], 344 insn->imm, insn->off); 345 } 346 } else { 347 verbose(cbs->private_data, "(%02x) %s\n", 348 insn->code, bpf_class_string[class]); 349 } 350 } 351