1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Testsuite for eBPF verifier 4 * 5 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com 6 * Copyright (c) 2017 Facebook 7 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io 8 */ 9 10 #include <endian.h> 11 #include <asm/types.h> 12 #include <linux/types.h> 13 #include <stdint.h> 14 #include <stdio.h> 15 #include <stdlib.h> 16 #include <unistd.h> 17 #include <errno.h> 18 #include <string.h> 19 #include <stddef.h> 20 #include <stdbool.h> 21 #include <sched.h> 22 #include <limits.h> 23 #include <assert.h> 24 25 #include <sys/capability.h> 26 27 #include <linux/unistd.h> 28 #include <linux/filter.h> 29 #include <linux/bpf_perf_event.h> 30 #include <linux/bpf.h> 31 #include <linux/if_ether.h> 32 #include <linux/btf.h> 33 34 #include <bpf/btf.h> 35 #include <bpf/bpf.h> 36 #include <bpf/libbpf.h> 37 38 #ifdef HAVE_GENHDR 39 # include "autoconf.h" 40 #else 41 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__) 42 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1 43 # endif 44 #endif 45 #include "bpf_rand.h" 46 #include "bpf_util.h" 47 #include "test_btf.h" 48 #include "../../../include/linux/filter.h" 49 50 #ifndef ENOTSUPP 51 #define ENOTSUPP 524 52 #endif 53 54 #define MAX_INSNS BPF_MAXINSNS 55 #define MAX_TEST_INSNS 1000000 56 #define MAX_FIXUPS 8 57 #define MAX_NR_MAPS 22 58 #define MAX_TEST_RUNS 8 59 #define POINTER_VALUE 0xcafe4all 60 #define TEST_DATA_LEN 64 61 62 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0) 63 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1) 64 65 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled" 66 static bool unpriv_disabled = false; 67 static int skips; 68 static bool verbose = false; 69 70 struct kfunc_btf_id_pair { 71 const char *kfunc; 72 int insn_idx; 73 }; 74 75 struct bpf_test { 76 const char *descr; 77 struct bpf_insn insns[MAX_INSNS]; 78 struct bpf_insn *fill_insns; 79 int fixup_map_hash_8b[MAX_FIXUPS]; 80 int fixup_map_hash_48b[MAX_FIXUPS]; 81 int fixup_map_hash_16b[MAX_FIXUPS]; 82 int fixup_map_array_48b[MAX_FIXUPS]; 83 int fixup_map_sockmap[MAX_FIXUPS]; 84 int fixup_map_sockhash[MAX_FIXUPS]; 85 int fixup_map_xskmap[MAX_FIXUPS]; 86 int fixup_map_stacktrace[MAX_FIXUPS]; 87 int fixup_prog1[MAX_FIXUPS]; 88 int fixup_prog2[MAX_FIXUPS]; 89 int fixup_map_in_map[MAX_FIXUPS]; 90 int fixup_cgroup_storage[MAX_FIXUPS]; 91 int fixup_percpu_cgroup_storage[MAX_FIXUPS]; 92 int fixup_map_spin_lock[MAX_FIXUPS]; 93 int fixup_map_array_ro[MAX_FIXUPS]; 94 int fixup_map_array_wo[MAX_FIXUPS]; 95 int fixup_map_array_small[MAX_FIXUPS]; 96 int fixup_sk_storage_map[MAX_FIXUPS]; 97 int fixup_map_event_output[MAX_FIXUPS]; 98 int fixup_map_reuseport_array[MAX_FIXUPS]; 99 int fixup_map_ringbuf[MAX_FIXUPS]; 100 int fixup_map_timer[MAX_FIXUPS]; 101 struct kfunc_btf_id_pair fixup_kfunc_btf_id[MAX_FIXUPS]; 102 /* Expected verifier log output for result REJECT or VERBOSE_ACCEPT. 103 * Can be a tab-separated sequence of expected strings. An empty string 104 * means no log verification. 105 */ 106 const char *errstr; 107 const char *errstr_unpriv; 108 uint32_t insn_processed; 109 int prog_len; 110 enum { 111 UNDEF, 112 ACCEPT, 113 REJECT, 114 VERBOSE_ACCEPT, 115 } result, result_unpriv; 116 enum bpf_prog_type prog_type; 117 uint8_t flags; 118 void (*fill_helper)(struct bpf_test *self); 119 int runs; 120 #define bpf_testdata_struct_t \ 121 struct { \ 122 uint32_t retval, retval_unpriv; \ 123 union { \ 124 __u8 data[TEST_DATA_LEN]; \ 125 __u64 data64[TEST_DATA_LEN / 8]; \ 126 }; \ 127 } 128 union { 129 bpf_testdata_struct_t; 130 bpf_testdata_struct_t retvals[MAX_TEST_RUNS]; 131 }; 132 enum bpf_attach_type expected_attach_type; 133 const char *kfunc; 134 }; 135 136 /* Note we want this to be 64 bit aligned so that the end of our array is 137 * actually the end of the structure. 138 */ 139 #define MAX_ENTRIES 11 140 141 struct test_val { 142 unsigned int index; 143 int foo[MAX_ENTRIES]; 144 }; 145 146 struct other_val { 147 long long foo; 148 long long bar; 149 }; 150 151 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self) 152 { 153 /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */ 154 #define PUSH_CNT 51 155 /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */ 156 unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6; 157 struct bpf_insn *insn = self->fill_insns; 158 int i = 0, j, k = 0; 159 160 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); 161 loop: 162 for (j = 0; j < PUSH_CNT; j++) { 163 insn[i++] = BPF_LD_ABS(BPF_B, 0); 164 /* jump to error label */ 165 insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3); 166 i++; 167 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6); 168 insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1); 169 insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2); 170 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 171 BPF_FUNC_skb_vlan_push), 172 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3); 173 i++; 174 } 175 176 for (j = 0; j < PUSH_CNT; j++) { 177 insn[i++] = BPF_LD_ABS(BPF_B, 0); 178 insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3); 179 i++; 180 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6); 181 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 182 BPF_FUNC_skb_vlan_pop), 183 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3); 184 i++; 185 } 186 if (++k < 5) 187 goto loop; 188 189 for (; i < len - 3; i++) 190 insn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef); 191 insn[len - 3] = BPF_JMP_A(1); 192 /* error label */ 193 insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0); 194 insn[len - 1] = BPF_EXIT_INSN(); 195 self->prog_len = len; 196 } 197 198 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self) 199 { 200 struct bpf_insn *insn = self->fill_insns; 201 /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns, 202 * but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted 203 * to extend the error value of the inlined ld_abs sequence which then 204 * contains 7 insns. so, set the dividend to 7 so the testcase could 205 * work on all arches. 206 */ 207 unsigned int len = (1 << 15) / 7; 208 int i = 0; 209 210 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); 211 insn[i++] = BPF_LD_ABS(BPF_B, 0); 212 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2); 213 i++; 214 while (i < len - 1) 215 insn[i++] = BPF_LD_ABS(BPF_B, 1); 216 insn[i] = BPF_EXIT_INSN(); 217 self->prog_len = i + 1; 218 } 219 220 static void bpf_fill_rand_ld_dw(struct bpf_test *self) 221 { 222 struct bpf_insn *insn = self->fill_insns; 223 uint64_t res = 0; 224 int i = 0; 225 226 insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0); 227 while (i < self->retval) { 228 uint64_t val = bpf_semi_rand_get(); 229 struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) }; 230 231 res ^= val; 232 insn[i++] = tmp[0]; 233 insn[i++] = tmp[1]; 234 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1); 235 } 236 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0); 237 insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32); 238 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1); 239 insn[i] = BPF_EXIT_INSN(); 240 self->prog_len = i + 1; 241 res ^= (res >> 32); 242 self->retval = (uint32_t)res; 243 } 244 245 #define MAX_JMP_SEQ 8192 246 247 /* test the sequence of 8k jumps */ 248 static void bpf_fill_scale1(struct bpf_test *self) 249 { 250 struct bpf_insn *insn = self->fill_insns; 251 int i = 0, k = 0; 252 253 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); 254 /* test to check that the long sequence of jumps is acceptable */ 255 while (k++ < MAX_JMP_SEQ) { 256 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 257 BPF_FUNC_get_prandom_u32); 258 insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2); 259 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10); 260 insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 261 -8 * (k % 64 + 1)); 262 } 263 /* is_state_visited() doesn't allocate state for pruning for every jump. 264 * Hence multiply jmps by 4 to accommodate that heuristic 265 */ 266 while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4) 267 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42); 268 insn[i] = BPF_EXIT_INSN(); 269 self->prog_len = i + 1; 270 self->retval = 42; 271 } 272 273 /* test the sequence of 8k jumps in inner most function (function depth 8)*/ 274 static void bpf_fill_scale2(struct bpf_test *self) 275 { 276 struct bpf_insn *insn = self->fill_insns; 277 int i = 0, k = 0; 278 279 #define FUNC_NEST 7 280 for (k = 0; k < FUNC_NEST; k++) { 281 insn[i++] = BPF_CALL_REL(1); 282 insn[i++] = BPF_EXIT_INSN(); 283 } 284 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); 285 /* test to check that the long sequence of jumps is acceptable */ 286 k = 0; 287 while (k++ < MAX_JMP_SEQ) { 288 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 289 BPF_FUNC_get_prandom_u32); 290 insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2); 291 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10); 292 insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 293 -8 * (k % (64 - 4 * FUNC_NEST) + 1)); 294 } 295 while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4) 296 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42); 297 insn[i] = BPF_EXIT_INSN(); 298 self->prog_len = i + 1; 299 self->retval = 42; 300 } 301 302 static void bpf_fill_scale(struct bpf_test *self) 303 { 304 switch (self->retval) { 305 case 1: 306 return bpf_fill_scale1(self); 307 case 2: 308 return bpf_fill_scale2(self); 309 default: 310 self->prog_len = 0; 311 break; 312 } 313 } 314 315 static int bpf_fill_torturous_jumps_insn_1(struct bpf_insn *insn) 316 { 317 unsigned int len = 259, hlen = 128; 318 int i; 319 320 insn[0] = BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32); 321 for (i = 1; i <= hlen; i++) { 322 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, i, hlen); 323 insn[i + hlen] = BPF_JMP_A(hlen - i); 324 } 325 insn[len - 2] = BPF_MOV64_IMM(BPF_REG_0, 1); 326 insn[len - 1] = BPF_EXIT_INSN(); 327 328 return len; 329 } 330 331 static int bpf_fill_torturous_jumps_insn_2(struct bpf_insn *insn) 332 { 333 unsigned int len = 4100, jmp_off = 2048; 334 int i, j; 335 336 insn[0] = BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32); 337 for (i = 1; i <= jmp_off; i++) { 338 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, i, jmp_off); 339 } 340 insn[i++] = BPF_JMP_A(jmp_off); 341 for (; i <= jmp_off * 2 + 1; i+=16) { 342 for (j = 0; j < 16; j++) { 343 insn[i + j] = BPF_JMP_A(16 - j - 1); 344 } 345 } 346 347 insn[len - 2] = BPF_MOV64_IMM(BPF_REG_0, 2); 348 insn[len - 1] = BPF_EXIT_INSN(); 349 350 return len; 351 } 352 353 static void bpf_fill_torturous_jumps(struct bpf_test *self) 354 { 355 struct bpf_insn *insn = self->fill_insns; 356 int i = 0; 357 358 switch (self->retval) { 359 case 1: 360 self->prog_len = bpf_fill_torturous_jumps_insn_1(insn); 361 return; 362 case 2: 363 self->prog_len = bpf_fill_torturous_jumps_insn_2(insn); 364 return; 365 case 3: 366 /* main */ 367 insn[i++] = BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4); 368 insn[i++] = BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 262); 369 insn[i++] = BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0); 370 insn[i++] = BPF_MOV64_IMM(BPF_REG_0, 3); 371 insn[i++] = BPF_EXIT_INSN(); 372 373 /* subprog 1 */ 374 i += bpf_fill_torturous_jumps_insn_1(insn + i); 375 376 /* subprog 2 */ 377 i += bpf_fill_torturous_jumps_insn_2(insn + i); 378 379 self->prog_len = i; 380 return; 381 default: 382 self->prog_len = 0; 383 break; 384 } 385 } 386 387 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */ 388 #define BPF_SK_LOOKUP(func) \ 389 /* struct bpf_sock_tuple tuple = {} */ \ 390 BPF_MOV64_IMM(BPF_REG_2, 0), \ 391 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \ 392 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \ 393 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \ 394 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \ 395 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \ 396 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \ 397 /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \ 398 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \ 399 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \ 400 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \ 401 BPF_MOV64_IMM(BPF_REG_4, 0), \ 402 BPF_MOV64_IMM(BPF_REG_5, 0), \ 403 BPF_EMIT_CALL(BPF_FUNC_ ## func) 404 405 /* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return 406 * value into 0 and does necessary preparation for direct packet access 407 * through r2. The allowed access range is 8 bytes. 408 */ 409 #define BPF_DIRECT_PKT_R2 \ 410 BPF_MOV64_IMM(BPF_REG_0, 0), \ 411 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \ 412 offsetof(struct __sk_buff, data)), \ 413 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \ 414 offsetof(struct __sk_buff, data_end)), \ 415 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \ 416 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \ 417 BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \ 418 BPF_EXIT_INSN() 419 420 /* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random 421 * positive u32, and zero-extend it into 64-bit. 422 */ 423 #define BPF_RAND_UEXT_R7 \ 424 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \ 425 BPF_FUNC_get_prandom_u32), \ 426 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \ 427 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \ 428 BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33) 429 430 /* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random 431 * negative u32, and sign-extend it into 64-bit. 432 */ 433 #define BPF_RAND_SEXT_R7 \ 434 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \ 435 BPF_FUNC_get_prandom_u32), \ 436 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \ 437 BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \ 438 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \ 439 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32) 440 441 static struct bpf_test tests[] = { 442 #define FILL_ARRAY 443 #include <verifier/tests.h> 444 #undef FILL_ARRAY 445 }; 446 447 static int probe_filter_length(const struct bpf_insn *fp) 448 { 449 int len; 450 451 for (len = MAX_INSNS - 1; len > 0; --len) 452 if (fp[len].code != 0 || fp[len].imm != 0) 453 break; 454 return len + 1; 455 } 456 457 static bool skip_unsupported_map(enum bpf_map_type map_type) 458 { 459 if (!libbpf_probe_bpf_map_type(map_type, NULL)) { 460 printf("SKIP (unsupported map type %d)\n", map_type); 461 skips++; 462 return true; 463 } 464 return false; 465 } 466 467 static int __create_map(uint32_t type, uint32_t size_key, 468 uint32_t size_value, uint32_t max_elem, 469 uint32_t extra_flags) 470 { 471 LIBBPF_OPTS(bpf_map_create_opts, opts); 472 int fd; 473 474 opts.map_flags = (type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0) | extra_flags; 475 fd = bpf_map_create(type, NULL, size_key, size_value, max_elem, &opts); 476 if (fd < 0) { 477 if (skip_unsupported_map(type)) 478 return -1; 479 printf("Failed to create hash map '%s'!\n", strerror(errno)); 480 } 481 482 return fd; 483 } 484 485 static int create_map(uint32_t type, uint32_t size_key, 486 uint32_t size_value, uint32_t max_elem) 487 { 488 return __create_map(type, size_key, size_value, max_elem, 0); 489 } 490 491 static void update_map(int fd, int index) 492 { 493 struct test_val value = { 494 .index = (6 + 1) * sizeof(int), 495 .foo[6] = 0xabcdef12, 496 }; 497 498 assert(!bpf_map_update_elem(fd, &index, &value, 0)); 499 } 500 501 static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret) 502 { 503 struct bpf_insn prog[] = { 504 BPF_MOV64_IMM(BPF_REG_0, ret), 505 BPF_EXIT_INSN(), 506 }; 507 508 return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL); 509 } 510 511 static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd, 512 int idx, int ret) 513 { 514 struct bpf_insn prog[] = { 515 BPF_MOV64_IMM(BPF_REG_3, idx), 516 BPF_LD_MAP_FD(BPF_REG_2, mfd), 517 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 518 BPF_FUNC_tail_call), 519 BPF_MOV64_IMM(BPF_REG_0, ret), 520 BPF_EXIT_INSN(), 521 }; 522 523 return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL); 524 } 525 526 static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem, 527 int p1key, int p2key, int p3key) 528 { 529 int mfd, p1fd, p2fd, p3fd; 530 531 mfd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, NULL, sizeof(int), 532 sizeof(int), max_elem, NULL); 533 if (mfd < 0) { 534 if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY)) 535 return -1; 536 printf("Failed to create prog array '%s'!\n", strerror(errno)); 537 return -1; 538 } 539 540 p1fd = create_prog_dummy_simple(prog_type, 42); 541 p2fd = create_prog_dummy_loop(prog_type, mfd, p2key, 41); 542 p3fd = create_prog_dummy_simple(prog_type, 24); 543 if (p1fd < 0 || p2fd < 0 || p3fd < 0) 544 goto err; 545 if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0) 546 goto err; 547 if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0) 548 goto err; 549 if (bpf_map_update_elem(mfd, &p3key, &p3fd, BPF_ANY) < 0) { 550 err: 551 close(mfd); 552 mfd = -1; 553 } 554 close(p3fd); 555 close(p2fd); 556 close(p1fd); 557 return mfd; 558 } 559 560 static int create_map_in_map(void) 561 { 562 LIBBPF_OPTS(bpf_map_create_opts, opts); 563 int inner_map_fd, outer_map_fd; 564 565 inner_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 566 sizeof(int), 1, NULL); 567 if (inner_map_fd < 0) { 568 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY)) 569 return -1; 570 printf("Failed to create array '%s'!\n", strerror(errno)); 571 return inner_map_fd; 572 } 573 574 opts.inner_map_fd = inner_map_fd; 575 outer_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL, 576 sizeof(int), sizeof(int), 1, &opts); 577 if (outer_map_fd < 0) { 578 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS)) 579 return -1; 580 printf("Failed to create array of maps '%s'!\n", 581 strerror(errno)); 582 } 583 584 close(inner_map_fd); 585 586 return outer_map_fd; 587 } 588 589 static int create_cgroup_storage(bool percpu) 590 { 591 enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE : 592 BPF_MAP_TYPE_CGROUP_STORAGE; 593 int fd; 594 595 fd = bpf_map_create(type, NULL, sizeof(struct bpf_cgroup_storage_key), 596 TEST_DATA_LEN, 0, NULL); 597 if (fd < 0) { 598 if (skip_unsupported_map(type)) 599 return -1; 600 printf("Failed to create cgroup storage '%s'!\n", 601 strerror(errno)); 602 } 603 604 return fd; 605 } 606 607 /* struct bpf_spin_lock { 608 * int val; 609 * }; 610 * struct val { 611 * int cnt; 612 * struct bpf_spin_lock l; 613 * }; 614 * struct bpf_timer { 615 * __u64 :64; 616 * __u64 :64; 617 * } __attribute__((aligned(8))); 618 * struct timer { 619 * struct bpf_timer t; 620 * }; 621 */ 622 static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t"; 623 static __u32 btf_raw_types[] = { 624 /* int */ 625 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 626 /* struct bpf_spin_lock */ /* [2] */ 627 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), 628 BTF_MEMBER_ENC(15, 1, 0), /* int val; */ 629 /* struct val */ /* [3] */ 630 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8), 631 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */ 632 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */ 633 /* struct bpf_timer */ /* [4] */ 634 BTF_TYPE_ENC(25, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0), 16), 635 /* struct timer */ /* [5] */ 636 BTF_TYPE_ENC(35, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16), 637 BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */ 638 }; 639 640 static int load_btf(void) 641 { 642 struct btf_header hdr = { 643 .magic = BTF_MAGIC, 644 .version = BTF_VERSION, 645 .hdr_len = sizeof(struct btf_header), 646 .type_len = sizeof(btf_raw_types), 647 .str_off = sizeof(btf_raw_types), 648 .str_len = sizeof(btf_str_sec), 649 }; 650 void *ptr, *raw_btf; 651 int btf_fd; 652 653 ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) + 654 sizeof(btf_str_sec)); 655 656 memcpy(ptr, &hdr, sizeof(hdr)); 657 ptr += sizeof(hdr); 658 memcpy(ptr, btf_raw_types, hdr.type_len); 659 ptr += hdr.type_len; 660 memcpy(ptr, btf_str_sec, hdr.str_len); 661 ptr += hdr.str_len; 662 663 btf_fd = bpf_btf_load(raw_btf, ptr - raw_btf, NULL); 664 free(raw_btf); 665 if (btf_fd < 0) 666 return -1; 667 return btf_fd; 668 } 669 670 static int create_map_spin_lock(void) 671 { 672 LIBBPF_OPTS(bpf_map_create_opts, opts, 673 .btf_key_type_id = 1, 674 .btf_value_type_id = 3, 675 ); 676 int fd, btf_fd; 677 678 btf_fd = load_btf(); 679 if (btf_fd < 0) 680 return -1; 681 opts.btf_fd = btf_fd; 682 fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 8, 1, &opts); 683 if (fd < 0) 684 printf("Failed to create map with spin_lock\n"); 685 return fd; 686 } 687 688 static int create_sk_storage_map(void) 689 { 690 LIBBPF_OPTS(bpf_map_create_opts, opts, 691 .map_flags = BPF_F_NO_PREALLOC, 692 .btf_key_type_id = 1, 693 .btf_value_type_id = 3, 694 ); 695 int fd, btf_fd; 696 697 btf_fd = load_btf(); 698 if (btf_fd < 0) 699 return -1; 700 opts.btf_fd = btf_fd; 701 fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "test_map", 4, 8, 0, &opts); 702 close(opts.btf_fd); 703 if (fd < 0) 704 printf("Failed to create sk_storage_map\n"); 705 return fd; 706 } 707 708 static int create_map_timer(void) 709 { 710 LIBBPF_OPTS(bpf_map_create_opts, opts, 711 .btf_key_type_id = 1, 712 .btf_value_type_id = 5, 713 ); 714 int fd, btf_fd; 715 716 btf_fd = load_btf(); 717 if (btf_fd < 0) 718 return -1; 719 720 opts.btf_fd = btf_fd; 721 fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 16, 1, &opts); 722 if (fd < 0) 723 printf("Failed to create map with timer\n"); 724 return fd; 725 } 726 727 static char bpf_vlog[UINT_MAX >> 8]; 728 729 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, 730 struct bpf_insn *prog, int *map_fds) 731 { 732 int *fixup_map_hash_8b = test->fixup_map_hash_8b; 733 int *fixup_map_hash_48b = test->fixup_map_hash_48b; 734 int *fixup_map_hash_16b = test->fixup_map_hash_16b; 735 int *fixup_map_array_48b = test->fixup_map_array_48b; 736 int *fixup_map_sockmap = test->fixup_map_sockmap; 737 int *fixup_map_sockhash = test->fixup_map_sockhash; 738 int *fixup_map_xskmap = test->fixup_map_xskmap; 739 int *fixup_map_stacktrace = test->fixup_map_stacktrace; 740 int *fixup_prog1 = test->fixup_prog1; 741 int *fixup_prog2 = test->fixup_prog2; 742 int *fixup_map_in_map = test->fixup_map_in_map; 743 int *fixup_cgroup_storage = test->fixup_cgroup_storage; 744 int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage; 745 int *fixup_map_spin_lock = test->fixup_map_spin_lock; 746 int *fixup_map_array_ro = test->fixup_map_array_ro; 747 int *fixup_map_array_wo = test->fixup_map_array_wo; 748 int *fixup_map_array_small = test->fixup_map_array_small; 749 int *fixup_sk_storage_map = test->fixup_sk_storage_map; 750 int *fixup_map_event_output = test->fixup_map_event_output; 751 int *fixup_map_reuseport_array = test->fixup_map_reuseport_array; 752 int *fixup_map_ringbuf = test->fixup_map_ringbuf; 753 int *fixup_map_timer = test->fixup_map_timer; 754 struct kfunc_btf_id_pair *fixup_kfunc_btf_id = test->fixup_kfunc_btf_id; 755 756 if (test->fill_helper) { 757 test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn)); 758 test->fill_helper(test); 759 } 760 761 /* Allocating HTs with 1 elem is fine here, since we only test 762 * for verifier and not do a runtime lookup, so the only thing 763 * that really matters is value size in this case. 764 */ 765 if (*fixup_map_hash_8b) { 766 map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long), 767 sizeof(long long), 1); 768 do { 769 prog[*fixup_map_hash_8b].imm = map_fds[0]; 770 fixup_map_hash_8b++; 771 } while (*fixup_map_hash_8b); 772 } 773 774 if (*fixup_map_hash_48b) { 775 map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long), 776 sizeof(struct test_val), 1); 777 do { 778 prog[*fixup_map_hash_48b].imm = map_fds[1]; 779 fixup_map_hash_48b++; 780 } while (*fixup_map_hash_48b); 781 } 782 783 if (*fixup_map_hash_16b) { 784 map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long), 785 sizeof(struct other_val), 1); 786 do { 787 prog[*fixup_map_hash_16b].imm = map_fds[2]; 788 fixup_map_hash_16b++; 789 } while (*fixup_map_hash_16b); 790 } 791 792 if (*fixup_map_array_48b) { 793 map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int), 794 sizeof(struct test_val), 1); 795 update_map(map_fds[3], 0); 796 do { 797 prog[*fixup_map_array_48b].imm = map_fds[3]; 798 fixup_map_array_48b++; 799 } while (*fixup_map_array_48b); 800 } 801 802 if (*fixup_prog1) { 803 map_fds[4] = create_prog_array(prog_type, 4, 0, 1, 2); 804 do { 805 prog[*fixup_prog1].imm = map_fds[4]; 806 fixup_prog1++; 807 } while (*fixup_prog1); 808 } 809 810 if (*fixup_prog2) { 811 map_fds[5] = create_prog_array(prog_type, 8, 7, 1, 2); 812 do { 813 prog[*fixup_prog2].imm = map_fds[5]; 814 fixup_prog2++; 815 } while (*fixup_prog2); 816 } 817 818 if (*fixup_map_in_map) { 819 map_fds[6] = create_map_in_map(); 820 do { 821 prog[*fixup_map_in_map].imm = map_fds[6]; 822 fixup_map_in_map++; 823 } while (*fixup_map_in_map); 824 } 825 826 if (*fixup_cgroup_storage) { 827 map_fds[7] = create_cgroup_storage(false); 828 do { 829 prog[*fixup_cgroup_storage].imm = map_fds[7]; 830 fixup_cgroup_storage++; 831 } while (*fixup_cgroup_storage); 832 } 833 834 if (*fixup_percpu_cgroup_storage) { 835 map_fds[8] = create_cgroup_storage(true); 836 do { 837 prog[*fixup_percpu_cgroup_storage].imm = map_fds[8]; 838 fixup_percpu_cgroup_storage++; 839 } while (*fixup_percpu_cgroup_storage); 840 } 841 if (*fixup_map_sockmap) { 842 map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int), 843 sizeof(int), 1); 844 do { 845 prog[*fixup_map_sockmap].imm = map_fds[9]; 846 fixup_map_sockmap++; 847 } while (*fixup_map_sockmap); 848 } 849 if (*fixup_map_sockhash) { 850 map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int), 851 sizeof(int), 1); 852 do { 853 prog[*fixup_map_sockhash].imm = map_fds[10]; 854 fixup_map_sockhash++; 855 } while (*fixup_map_sockhash); 856 } 857 if (*fixup_map_xskmap) { 858 map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int), 859 sizeof(int), 1); 860 do { 861 prog[*fixup_map_xskmap].imm = map_fds[11]; 862 fixup_map_xskmap++; 863 } while (*fixup_map_xskmap); 864 } 865 if (*fixup_map_stacktrace) { 866 map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32), 867 sizeof(u64), 1); 868 do { 869 prog[*fixup_map_stacktrace].imm = map_fds[12]; 870 fixup_map_stacktrace++; 871 } while (*fixup_map_stacktrace); 872 } 873 if (*fixup_map_spin_lock) { 874 map_fds[13] = create_map_spin_lock(); 875 do { 876 prog[*fixup_map_spin_lock].imm = map_fds[13]; 877 fixup_map_spin_lock++; 878 } while (*fixup_map_spin_lock); 879 } 880 if (*fixup_map_array_ro) { 881 map_fds[14] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int), 882 sizeof(struct test_val), 1, 883 BPF_F_RDONLY_PROG); 884 update_map(map_fds[14], 0); 885 do { 886 prog[*fixup_map_array_ro].imm = map_fds[14]; 887 fixup_map_array_ro++; 888 } while (*fixup_map_array_ro); 889 } 890 if (*fixup_map_array_wo) { 891 map_fds[15] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int), 892 sizeof(struct test_val), 1, 893 BPF_F_WRONLY_PROG); 894 update_map(map_fds[15], 0); 895 do { 896 prog[*fixup_map_array_wo].imm = map_fds[15]; 897 fixup_map_array_wo++; 898 } while (*fixup_map_array_wo); 899 } 900 if (*fixup_map_array_small) { 901 map_fds[16] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int), 902 1, 1, 0); 903 update_map(map_fds[16], 0); 904 do { 905 prog[*fixup_map_array_small].imm = map_fds[16]; 906 fixup_map_array_small++; 907 } while (*fixup_map_array_small); 908 } 909 if (*fixup_sk_storage_map) { 910 map_fds[17] = create_sk_storage_map(); 911 do { 912 prog[*fixup_sk_storage_map].imm = map_fds[17]; 913 fixup_sk_storage_map++; 914 } while (*fixup_sk_storage_map); 915 } 916 if (*fixup_map_event_output) { 917 map_fds[18] = __create_map(BPF_MAP_TYPE_PERF_EVENT_ARRAY, 918 sizeof(int), sizeof(int), 1, 0); 919 do { 920 prog[*fixup_map_event_output].imm = map_fds[18]; 921 fixup_map_event_output++; 922 } while (*fixup_map_event_output); 923 } 924 if (*fixup_map_reuseport_array) { 925 map_fds[19] = __create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, 926 sizeof(u32), sizeof(u64), 1, 0); 927 do { 928 prog[*fixup_map_reuseport_array].imm = map_fds[19]; 929 fixup_map_reuseport_array++; 930 } while (*fixup_map_reuseport_array); 931 } 932 if (*fixup_map_ringbuf) { 933 map_fds[20] = create_map(BPF_MAP_TYPE_RINGBUF, 0, 934 0, 4096); 935 do { 936 prog[*fixup_map_ringbuf].imm = map_fds[20]; 937 fixup_map_ringbuf++; 938 } while (*fixup_map_ringbuf); 939 } 940 if (*fixup_map_timer) { 941 map_fds[21] = create_map_timer(); 942 do { 943 prog[*fixup_map_timer].imm = map_fds[21]; 944 fixup_map_timer++; 945 } while (*fixup_map_timer); 946 } 947 948 /* Patch in kfunc BTF IDs */ 949 if (fixup_kfunc_btf_id->kfunc) { 950 struct btf *btf; 951 int btf_id; 952 953 do { 954 btf_id = 0; 955 btf = btf__load_vmlinux_btf(); 956 if (btf) { 957 btf_id = btf__find_by_name_kind(btf, 958 fixup_kfunc_btf_id->kfunc, 959 BTF_KIND_FUNC); 960 btf_id = btf_id < 0 ? 0 : btf_id; 961 } 962 btf__free(btf); 963 prog[fixup_kfunc_btf_id->insn_idx].imm = btf_id; 964 fixup_kfunc_btf_id++; 965 } while (fixup_kfunc_btf_id->kfunc); 966 } 967 } 968 969 struct libcap { 970 struct __user_cap_header_struct hdr; 971 struct __user_cap_data_struct data[2]; 972 }; 973 974 static int set_admin(bool admin) 975 { 976 cap_t caps; 977 /* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */ 978 const cap_value_t cap_net_admin = CAP_NET_ADMIN; 979 const cap_value_t cap_sys_admin = CAP_SYS_ADMIN; 980 struct libcap *cap; 981 int ret = -1; 982 983 caps = cap_get_proc(); 984 if (!caps) { 985 perror("cap_get_proc"); 986 return -1; 987 } 988 cap = (struct libcap *)caps; 989 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_sys_admin, CAP_CLEAR)) { 990 perror("cap_set_flag clear admin"); 991 goto out; 992 } 993 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_net_admin, 994 admin ? CAP_SET : CAP_CLEAR)) { 995 perror("cap_set_flag set_or_clear net"); 996 goto out; 997 } 998 /* libcap is likely old and simply ignores CAP_BPF and CAP_PERFMON, 999 * so update effective bits manually 1000 */ 1001 if (admin) { 1002 cap->data[1].effective |= 1 << (38 /* CAP_PERFMON */ - 32); 1003 cap->data[1].effective |= 1 << (39 /* CAP_BPF */ - 32); 1004 } else { 1005 cap->data[1].effective &= ~(1 << (38 - 32)); 1006 cap->data[1].effective &= ~(1 << (39 - 32)); 1007 } 1008 if (cap_set_proc(caps)) { 1009 perror("cap_set_proc"); 1010 goto out; 1011 } 1012 ret = 0; 1013 out: 1014 if (cap_free(caps)) 1015 perror("cap_free"); 1016 return ret; 1017 } 1018 1019 static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val, 1020 void *data, size_t size_data) 1021 { 1022 __u8 tmp[TEST_DATA_LEN << 2]; 1023 __u32 size_tmp = sizeof(tmp); 1024 int err, saved_errno; 1025 LIBBPF_OPTS(bpf_test_run_opts, topts, 1026 .data_in = data, 1027 .data_size_in = size_data, 1028 .data_out = tmp, 1029 .data_size_out = size_tmp, 1030 .repeat = 1, 1031 ); 1032 1033 if (unpriv) 1034 set_admin(true); 1035 err = bpf_prog_test_run_opts(fd_prog, &topts); 1036 saved_errno = errno; 1037 1038 if (unpriv) 1039 set_admin(false); 1040 1041 if (err) { 1042 switch (saved_errno) { 1043 case ENOTSUPP: 1044 printf("Did not run the program (not supported) "); 1045 return 0; 1046 case EPERM: 1047 if (unpriv) { 1048 printf("Did not run the program (no permission) "); 1049 return 0; 1050 } 1051 /* fallthrough; */ 1052 default: 1053 printf("FAIL: Unexpected bpf_prog_test_run error (%s) ", 1054 strerror(saved_errno)); 1055 return err; 1056 } 1057 } 1058 1059 if (topts.retval != expected_val && expected_val != POINTER_VALUE) { 1060 printf("FAIL retval %d != %d ", topts.retval, expected_val); 1061 return 1; 1062 } 1063 1064 return 0; 1065 } 1066 1067 /* Returns true if every part of exp (tab-separated) appears in log, in order. 1068 * 1069 * If exp is an empty string, returns true. 1070 */ 1071 static bool cmp_str_seq(const char *log, const char *exp) 1072 { 1073 char needle[200]; 1074 const char *p, *q; 1075 int len; 1076 1077 do { 1078 if (!strlen(exp)) 1079 break; 1080 p = strchr(exp, '\t'); 1081 if (!p) 1082 p = exp + strlen(exp); 1083 1084 len = p - exp; 1085 if (len >= sizeof(needle) || !len) { 1086 printf("FAIL\nTestcase bug\n"); 1087 return false; 1088 } 1089 strncpy(needle, exp, len); 1090 needle[len] = 0; 1091 q = strstr(log, needle); 1092 if (!q) { 1093 printf("FAIL\nUnexpected verifier log!\n" 1094 "EXP: %s\nRES:\n", needle); 1095 return false; 1096 } 1097 log = q + len; 1098 exp = p + 1; 1099 } while (*p); 1100 return true; 1101 } 1102 1103 static void do_test_single(struct bpf_test *test, bool unpriv, 1104 int *passes, int *errors) 1105 { 1106 int fd_prog, expected_ret, alignment_prevented_execution; 1107 int prog_len, prog_type = test->prog_type; 1108 struct bpf_insn *prog = test->insns; 1109 LIBBPF_OPTS(bpf_prog_load_opts, opts); 1110 int run_errs, run_successes; 1111 int map_fds[MAX_NR_MAPS]; 1112 const char *expected_err; 1113 int saved_errno; 1114 int fixup_skips; 1115 __u32 pflags; 1116 int i, err; 1117 1118 for (i = 0; i < MAX_NR_MAPS; i++) 1119 map_fds[i] = -1; 1120 1121 if (!prog_type) 1122 prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 1123 fixup_skips = skips; 1124 do_test_fixup(test, prog_type, prog, map_fds); 1125 if (test->fill_insns) { 1126 prog = test->fill_insns; 1127 prog_len = test->prog_len; 1128 } else { 1129 prog_len = probe_filter_length(prog); 1130 } 1131 /* If there were some map skips during fixup due to missing bpf 1132 * features, skip this test. 1133 */ 1134 if (fixup_skips != skips) 1135 return; 1136 1137 pflags = BPF_F_TEST_RND_HI32; 1138 if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT) 1139 pflags |= BPF_F_STRICT_ALIGNMENT; 1140 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) 1141 pflags |= BPF_F_ANY_ALIGNMENT; 1142 if (test->flags & ~3) 1143 pflags |= test->flags; 1144 1145 expected_ret = unpriv && test->result_unpriv != UNDEF ? 1146 test->result_unpriv : test->result; 1147 expected_err = unpriv && test->errstr_unpriv ? 1148 test->errstr_unpriv : test->errstr; 1149 1150 opts.expected_attach_type = test->expected_attach_type; 1151 if (verbose) 1152 opts.log_level = 1; 1153 else if (expected_ret == VERBOSE_ACCEPT) 1154 opts.log_level = 2; 1155 else 1156 opts.log_level = 4; 1157 opts.prog_flags = pflags; 1158 1159 if (prog_type == BPF_PROG_TYPE_TRACING && test->kfunc) { 1160 int attach_btf_id; 1161 1162 attach_btf_id = libbpf_find_vmlinux_btf_id(test->kfunc, 1163 opts.expected_attach_type); 1164 if (attach_btf_id < 0) { 1165 printf("FAIL\nFailed to find BTF ID for '%s'!\n", 1166 test->kfunc); 1167 (*errors)++; 1168 return; 1169 } 1170 1171 opts.attach_btf_id = attach_btf_id; 1172 } 1173 1174 opts.log_buf = bpf_vlog; 1175 opts.log_size = sizeof(bpf_vlog); 1176 fd_prog = bpf_prog_load(prog_type, NULL, "GPL", prog, prog_len, &opts); 1177 saved_errno = errno; 1178 1179 /* BPF_PROG_TYPE_TRACING requires more setup and 1180 * bpf_probe_prog_type won't give correct answer 1181 */ 1182 if (fd_prog < 0 && prog_type != BPF_PROG_TYPE_TRACING && 1183 !libbpf_probe_bpf_prog_type(prog_type, NULL)) { 1184 printf("SKIP (unsupported program type %d)\n", prog_type); 1185 skips++; 1186 goto close_fds; 1187 } 1188 1189 if (fd_prog < 0 && saved_errno == ENOTSUPP) { 1190 printf("SKIP (program uses an unsupported feature)\n"); 1191 skips++; 1192 goto close_fds; 1193 } 1194 1195 alignment_prevented_execution = 0; 1196 1197 if (expected_ret == ACCEPT || expected_ret == VERBOSE_ACCEPT) { 1198 if (fd_prog < 0) { 1199 printf("FAIL\nFailed to load prog '%s'!\n", 1200 strerror(saved_errno)); 1201 goto fail_log; 1202 } 1203 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1204 if (fd_prog >= 0 && 1205 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)) 1206 alignment_prevented_execution = 1; 1207 #endif 1208 if (expected_ret == VERBOSE_ACCEPT && !cmp_str_seq(bpf_vlog, expected_err)) { 1209 goto fail_log; 1210 } 1211 } else { 1212 if (fd_prog >= 0) { 1213 printf("FAIL\nUnexpected success to load!\n"); 1214 goto fail_log; 1215 } 1216 if (!expected_err || !cmp_str_seq(bpf_vlog, expected_err)) { 1217 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n", 1218 expected_err, bpf_vlog); 1219 goto fail_log; 1220 } 1221 } 1222 1223 if (!unpriv && test->insn_processed) { 1224 uint32_t insn_processed; 1225 char *proc; 1226 1227 proc = strstr(bpf_vlog, "processed "); 1228 insn_processed = atoi(proc + 10); 1229 if (test->insn_processed != insn_processed) { 1230 printf("FAIL\nUnexpected insn_processed %u vs %u\n", 1231 insn_processed, test->insn_processed); 1232 goto fail_log; 1233 } 1234 } 1235 1236 if (verbose) 1237 printf(", verifier log:\n%s", bpf_vlog); 1238 1239 run_errs = 0; 1240 run_successes = 0; 1241 if (!alignment_prevented_execution && fd_prog >= 0 && test->runs >= 0) { 1242 uint32_t expected_val; 1243 int i; 1244 1245 if (!test->runs) 1246 test->runs = 1; 1247 1248 for (i = 0; i < test->runs; i++) { 1249 if (unpriv && test->retvals[i].retval_unpriv) 1250 expected_val = test->retvals[i].retval_unpriv; 1251 else 1252 expected_val = test->retvals[i].retval; 1253 1254 err = do_prog_test_run(fd_prog, unpriv, expected_val, 1255 test->retvals[i].data, 1256 sizeof(test->retvals[i].data)); 1257 if (err) { 1258 printf("(run %d/%d) ", i + 1, test->runs); 1259 run_errs++; 1260 } else { 1261 run_successes++; 1262 } 1263 } 1264 } 1265 1266 if (!run_errs) { 1267 (*passes)++; 1268 if (run_successes > 1) 1269 printf("%d cases ", run_successes); 1270 printf("OK"); 1271 if (alignment_prevented_execution) 1272 printf(" (NOTE: not executed due to unknown alignment)"); 1273 printf("\n"); 1274 } else { 1275 printf("\n"); 1276 goto fail_log; 1277 } 1278 close_fds: 1279 if (test->fill_insns) 1280 free(test->fill_insns); 1281 close(fd_prog); 1282 for (i = 0; i < MAX_NR_MAPS; i++) 1283 close(map_fds[i]); 1284 sched_yield(); 1285 return; 1286 fail_log: 1287 (*errors)++; 1288 printf("%s", bpf_vlog); 1289 goto close_fds; 1290 } 1291 1292 static bool is_admin(void) 1293 { 1294 cap_flag_value_t net_priv = CAP_CLEAR; 1295 bool perfmon_priv = false; 1296 bool bpf_priv = false; 1297 struct libcap *cap; 1298 cap_t caps; 1299 1300 #ifdef CAP_IS_SUPPORTED 1301 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) { 1302 perror("cap_get_flag"); 1303 return false; 1304 } 1305 #endif 1306 caps = cap_get_proc(); 1307 if (!caps) { 1308 perror("cap_get_proc"); 1309 return false; 1310 } 1311 cap = (struct libcap *)caps; 1312 bpf_priv = cap->data[1].effective & (1 << (39/* CAP_BPF */ - 32)); 1313 perfmon_priv = cap->data[1].effective & (1 << (38/* CAP_PERFMON */ - 32)); 1314 if (cap_get_flag(caps, CAP_NET_ADMIN, CAP_EFFECTIVE, &net_priv)) 1315 perror("cap_get_flag NET"); 1316 if (cap_free(caps)) 1317 perror("cap_free"); 1318 return bpf_priv && perfmon_priv && net_priv == CAP_SET; 1319 } 1320 1321 static void get_unpriv_disabled() 1322 { 1323 char buf[2]; 1324 FILE *fd; 1325 1326 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r"); 1327 if (!fd) { 1328 perror("fopen /proc/sys/"UNPRIV_SYSCTL); 1329 unpriv_disabled = true; 1330 return; 1331 } 1332 if (fgets(buf, 2, fd) == buf && atoi(buf)) 1333 unpriv_disabled = true; 1334 fclose(fd); 1335 } 1336 1337 static bool test_as_unpriv(struct bpf_test *test) 1338 { 1339 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1340 /* Some architectures have strict alignment requirements. In 1341 * that case, the BPF verifier detects if a program has 1342 * unaligned accesses and rejects them. A user can pass 1343 * BPF_F_ANY_ALIGNMENT to a program to override this 1344 * check. That, however, will only work when a privileged user 1345 * loads a program. An unprivileged user loading a program 1346 * with this flag will be rejected prior entering the 1347 * verifier. 1348 */ 1349 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) 1350 return false; 1351 #endif 1352 return !test->prog_type || 1353 test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER || 1354 test->prog_type == BPF_PROG_TYPE_CGROUP_SKB; 1355 } 1356 1357 static int do_test(bool unpriv, unsigned int from, unsigned int to) 1358 { 1359 int i, passes = 0, errors = 0; 1360 1361 for (i = from; i < to; i++) { 1362 struct bpf_test *test = &tests[i]; 1363 1364 /* Program types that are not supported by non-root we 1365 * skip right away. 1366 */ 1367 if (test_as_unpriv(test) && unpriv_disabled) { 1368 printf("#%d/u %s SKIP\n", i, test->descr); 1369 skips++; 1370 } else if (test_as_unpriv(test)) { 1371 if (!unpriv) 1372 set_admin(false); 1373 printf("#%d/u %s ", i, test->descr); 1374 do_test_single(test, true, &passes, &errors); 1375 if (!unpriv) 1376 set_admin(true); 1377 } 1378 1379 if (unpriv) { 1380 printf("#%d/p %s SKIP\n", i, test->descr); 1381 skips++; 1382 } else { 1383 printf("#%d/p %s ", i, test->descr); 1384 do_test_single(test, false, &passes, &errors); 1385 } 1386 } 1387 1388 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes, 1389 skips, errors); 1390 return errors ? EXIT_FAILURE : EXIT_SUCCESS; 1391 } 1392 1393 int main(int argc, char **argv) 1394 { 1395 unsigned int from = 0, to = ARRAY_SIZE(tests); 1396 bool unpriv = !is_admin(); 1397 int arg = 1; 1398 1399 if (argc > 1 && strcmp(argv[1], "-v") == 0) { 1400 arg++; 1401 verbose = true; 1402 argc--; 1403 } 1404 1405 if (argc == 3) { 1406 unsigned int l = atoi(argv[arg]); 1407 unsigned int u = atoi(argv[arg + 1]); 1408 1409 if (l < to && u < to) { 1410 from = l; 1411 to = u + 1; 1412 } 1413 } else if (argc == 2) { 1414 unsigned int t = atoi(argv[arg]); 1415 1416 if (t < to) { 1417 from = t; 1418 to = t + 1; 1419 } 1420 } 1421 1422 get_unpriv_disabled(); 1423 if (unpriv && unpriv_disabled) { 1424 printf("Cannot run as unprivileged user with sysctl %s.\n", 1425 UNPRIV_SYSCTL); 1426 return EXIT_FAILURE; 1427 } 1428 1429 /* Use libbpf 1.0 API mode */ 1430 libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 1431 1432 bpf_semi_rand_init(); 1433 return do_test(unpriv, from, to); 1434 } 1435