1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016,2017 Facebook 4 */ 5 #include <linux/bpf.h> 6 #include <linux/btf.h> 7 #include <linux/err.h> 8 #include <linux/slab.h> 9 #include <linux/mm.h> 10 #include <linux/filter.h> 11 #include <linux/perf_event.h> 12 #include <uapi/linux/btf.h> 13 #include <linux/rcupdate_trace.h> 14 #include <linux/btf_ids.h> 15 16 #include "map_in_map.h" 17 18 #define ARRAY_CREATE_FLAG_MASK \ 19 (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \ 20 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP) 21 22 static void bpf_array_free_percpu(struct bpf_array *array) 23 { 24 int i; 25 26 for (i = 0; i < array->map.max_entries; i++) { 27 free_percpu(array->pptrs[i]); 28 cond_resched(); 29 } 30 } 31 32 static int bpf_array_alloc_percpu(struct bpf_array *array) 33 { 34 void __percpu *ptr; 35 int i; 36 37 for (i = 0; i < array->map.max_entries; i++) { 38 ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8, 39 GFP_USER | __GFP_NOWARN); 40 if (!ptr) { 41 bpf_array_free_percpu(array); 42 return -ENOMEM; 43 } 44 array->pptrs[i] = ptr; 45 cond_resched(); 46 } 47 48 return 0; 49 } 50 51 /* Called from syscall */ 52 int array_map_alloc_check(union bpf_attr *attr) 53 { 54 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 55 int numa_node = bpf_map_attr_numa_node(attr); 56 57 /* check sanity of attributes */ 58 if (attr->max_entries == 0 || attr->key_size != 4 || 59 attr->value_size == 0 || 60 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || 61 !bpf_map_flags_access_ok(attr->map_flags) || 62 (percpu && numa_node != NUMA_NO_NODE)) 63 return -EINVAL; 64 65 if (attr->map_type != BPF_MAP_TYPE_ARRAY && 66 attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP)) 67 return -EINVAL; 68 69 if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY && 70 attr->map_flags & BPF_F_PRESERVE_ELEMS) 71 return -EINVAL; 72 73 /* avoid overflow on round_up(map->value_size) */ 74 if (attr->value_size > INT_MAX) 75 return -E2BIG; 76 77 return 0; 78 } 79 80 static struct bpf_map *array_map_alloc(union bpf_attr *attr) 81 { 82 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 83 int numa_node = bpf_map_attr_numa_node(attr); 84 u32 elem_size, index_mask, max_entries; 85 bool bypass_spec_v1 = bpf_bypass_spec_v1(); 86 u64 array_size, mask64; 87 struct bpf_array *array; 88 89 elem_size = round_up(attr->value_size, 8); 90 91 max_entries = attr->max_entries; 92 93 /* On 32 bit archs roundup_pow_of_two() with max_entries that has 94 * upper most bit set in u32 space is undefined behavior due to 95 * resulting 1U << 32, so do it manually here in u64 space. 96 */ 97 mask64 = fls_long(max_entries - 1); 98 mask64 = 1ULL << mask64; 99 mask64 -= 1; 100 101 index_mask = mask64; 102 if (!bypass_spec_v1) { 103 /* round up array size to nearest power of 2, 104 * since cpu will speculate within index_mask limits 105 */ 106 max_entries = index_mask + 1; 107 /* Check for overflows. */ 108 if (max_entries < attr->max_entries) 109 return ERR_PTR(-E2BIG); 110 } 111 112 array_size = sizeof(*array); 113 if (percpu) { 114 array_size += (u64) max_entries * sizeof(void *); 115 } else { 116 /* rely on vmalloc() to return page-aligned memory and 117 * ensure array->value is exactly page-aligned 118 */ 119 if (attr->map_flags & BPF_F_MMAPABLE) { 120 array_size = PAGE_ALIGN(array_size); 121 array_size += PAGE_ALIGN((u64) max_entries * elem_size); 122 } else { 123 array_size += (u64) max_entries * elem_size; 124 } 125 } 126 127 /* allocate all map elements and zero-initialize them */ 128 if (attr->map_flags & BPF_F_MMAPABLE) { 129 void *data; 130 131 /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */ 132 data = bpf_map_area_mmapable_alloc(array_size, numa_node); 133 if (!data) 134 return ERR_PTR(-ENOMEM); 135 array = data + PAGE_ALIGN(sizeof(struct bpf_array)) 136 - offsetof(struct bpf_array, value); 137 } else { 138 array = bpf_map_area_alloc(array_size, numa_node); 139 } 140 if (!array) 141 return ERR_PTR(-ENOMEM); 142 array->index_mask = index_mask; 143 array->map.bypass_spec_v1 = bypass_spec_v1; 144 145 /* copy mandatory map attributes */ 146 bpf_map_init_from_attr(&array->map, attr); 147 array->elem_size = elem_size; 148 149 if (percpu && bpf_array_alloc_percpu(array)) { 150 bpf_map_area_free(array); 151 return ERR_PTR(-ENOMEM); 152 } 153 154 return &array->map; 155 } 156 157 static void *array_map_elem_ptr(struct bpf_array* array, u32 index) 158 { 159 return array->value + (u64)array->elem_size * index; 160 } 161 162 /* Called from syscall or from eBPF program */ 163 static void *array_map_lookup_elem(struct bpf_map *map, void *key) 164 { 165 struct bpf_array *array = container_of(map, struct bpf_array, map); 166 u32 index = *(u32 *)key; 167 168 if (unlikely(index >= array->map.max_entries)) 169 return NULL; 170 171 return array->value + (u64)array->elem_size * (index & array->index_mask); 172 } 173 174 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, 175 u32 off) 176 { 177 struct bpf_array *array = container_of(map, struct bpf_array, map); 178 179 if (map->max_entries != 1) 180 return -ENOTSUPP; 181 if (off >= map->value_size) 182 return -EINVAL; 183 184 *imm = (unsigned long)array->value; 185 return 0; 186 } 187 188 static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm, 189 u32 *off) 190 { 191 struct bpf_array *array = container_of(map, struct bpf_array, map); 192 u64 base = (unsigned long)array->value; 193 u64 range = array->elem_size; 194 195 if (map->max_entries != 1) 196 return -ENOTSUPP; 197 if (imm < base || imm >= base + range) 198 return -ENOENT; 199 200 *off = imm - base; 201 return 0; 202 } 203 204 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ 205 static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 206 { 207 struct bpf_array *array = container_of(map, struct bpf_array, map); 208 struct bpf_insn *insn = insn_buf; 209 u32 elem_size = array->elem_size; 210 const int ret = BPF_REG_0; 211 const int map_ptr = BPF_REG_1; 212 const int index = BPF_REG_2; 213 214 if (map->map_flags & BPF_F_INNER_MAP) 215 return -EOPNOTSUPP; 216 217 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 218 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 219 if (!map->bypass_spec_v1) { 220 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); 221 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); 222 } else { 223 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); 224 } 225 226 if (is_power_of_2(elem_size)) { 227 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 228 } else { 229 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 230 } 231 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); 232 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 233 *insn++ = BPF_MOV64_IMM(ret, 0); 234 return insn - insn_buf; 235 } 236 237 /* Called from eBPF program */ 238 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) 239 { 240 struct bpf_array *array = container_of(map, struct bpf_array, map); 241 u32 index = *(u32 *)key; 242 243 if (unlikely(index >= array->map.max_entries)) 244 return NULL; 245 246 return this_cpu_ptr(array->pptrs[index & array->index_mask]); 247 } 248 249 static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) 250 { 251 struct bpf_array *array = container_of(map, struct bpf_array, map); 252 u32 index = *(u32 *)key; 253 254 if (cpu >= nr_cpu_ids) 255 return NULL; 256 257 if (unlikely(index >= array->map.max_entries)) 258 return NULL; 259 260 return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu); 261 } 262 263 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) 264 { 265 struct bpf_array *array = container_of(map, struct bpf_array, map); 266 u32 index = *(u32 *)key; 267 void __percpu *pptr; 268 int cpu, off = 0; 269 u32 size; 270 271 if (unlikely(index >= array->map.max_entries)) 272 return -ENOENT; 273 274 /* per_cpu areas are zero-filled and bpf programs can only 275 * access 'value_size' of them, so copying rounded areas 276 * will not leak any kernel data 277 */ 278 size = array->elem_size; 279 rcu_read_lock(); 280 pptr = array->pptrs[index & array->index_mask]; 281 for_each_possible_cpu(cpu) { 282 copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu)); 283 check_and_init_map_value(map, value + off); 284 off += size; 285 } 286 rcu_read_unlock(); 287 return 0; 288 } 289 290 /* Called from syscall */ 291 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 292 { 293 struct bpf_array *array = container_of(map, struct bpf_array, map); 294 u32 index = key ? *(u32 *)key : U32_MAX; 295 u32 *next = (u32 *)next_key; 296 297 if (index >= array->map.max_entries) { 298 *next = 0; 299 return 0; 300 } 301 302 if (index == array->map.max_entries - 1) 303 return -ENOENT; 304 305 *next = index + 1; 306 return 0; 307 } 308 309 /* Called from syscall or from eBPF program */ 310 static int array_map_update_elem(struct bpf_map *map, void *key, void *value, 311 u64 map_flags) 312 { 313 struct bpf_array *array = container_of(map, struct bpf_array, map); 314 u32 index = *(u32 *)key; 315 char *val; 316 317 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST)) 318 /* unknown flags */ 319 return -EINVAL; 320 321 if (unlikely(index >= array->map.max_entries)) 322 /* all elements were pre-allocated, cannot insert a new one */ 323 return -E2BIG; 324 325 if (unlikely(map_flags & BPF_NOEXIST)) 326 /* all elements already exist */ 327 return -EEXIST; 328 329 if (unlikely((map_flags & BPF_F_LOCK) && 330 !btf_record_has_field(map->record, BPF_SPIN_LOCK))) 331 return -EINVAL; 332 333 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 334 val = this_cpu_ptr(array->pptrs[index & array->index_mask]); 335 copy_map_value(map, val, value); 336 bpf_obj_free_fields(array->map.record, val); 337 } else { 338 val = array->value + 339 (u64)array->elem_size * (index & array->index_mask); 340 if (map_flags & BPF_F_LOCK) 341 copy_map_value_locked(map, val, value, false); 342 else 343 copy_map_value(map, val, value); 344 bpf_obj_free_fields(array->map.record, val); 345 } 346 return 0; 347 } 348 349 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 350 u64 map_flags) 351 { 352 struct bpf_array *array = container_of(map, struct bpf_array, map); 353 u32 index = *(u32 *)key; 354 void __percpu *pptr; 355 int cpu, off = 0; 356 u32 size; 357 358 if (unlikely(map_flags > BPF_EXIST)) 359 /* unknown flags */ 360 return -EINVAL; 361 362 if (unlikely(index >= array->map.max_entries)) 363 /* all elements were pre-allocated, cannot insert a new one */ 364 return -E2BIG; 365 366 if (unlikely(map_flags == BPF_NOEXIST)) 367 /* all elements already exist */ 368 return -EEXIST; 369 370 /* the user space will provide round_up(value_size, 8) bytes that 371 * will be copied into per-cpu area. bpf programs can only access 372 * value_size of it. During lookup the same extra bytes will be 373 * returned or zeros which were zero-filled by percpu_alloc, 374 * so no kernel data leaks possible 375 */ 376 size = array->elem_size; 377 rcu_read_lock(); 378 pptr = array->pptrs[index & array->index_mask]; 379 for_each_possible_cpu(cpu) { 380 copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off); 381 bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu)); 382 off += size; 383 } 384 rcu_read_unlock(); 385 return 0; 386 } 387 388 /* Called from syscall or from eBPF program */ 389 static int array_map_delete_elem(struct bpf_map *map, void *key) 390 { 391 return -EINVAL; 392 } 393 394 static void *array_map_vmalloc_addr(struct bpf_array *array) 395 { 396 return (void *)round_down((unsigned long)array, PAGE_SIZE); 397 } 398 399 static void array_map_free_timers(struct bpf_map *map) 400 { 401 struct bpf_array *array = container_of(map, struct bpf_array, map); 402 int i; 403 404 /* We don't reset or free fields other than timer on uref dropping to zero. */ 405 if (!btf_record_has_field(map->record, BPF_TIMER)) 406 return; 407 408 for (i = 0; i < array->map.max_entries; i++) 409 bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i)); 410 } 411 412 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ 413 static void array_map_free(struct bpf_map *map) 414 { 415 struct bpf_array *array = container_of(map, struct bpf_array, map); 416 int i; 417 418 if (!IS_ERR_OR_NULL(map->record)) { 419 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 420 for (i = 0; i < array->map.max_entries; i++) { 421 void __percpu *pptr = array->pptrs[i & array->index_mask]; 422 int cpu; 423 424 for_each_possible_cpu(cpu) { 425 bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu)); 426 cond_resched(); 427 } 428 } 429 } else { 430 for (i = 0; i < array->map.max_entries; i++) 431 bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i)); 432 } 433 bpf_map_free_record(map); 434 } 435 436 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 437 bpf_array_free_percpu(array); 438 439 if (array->map.map_flags & BPF_F_MMAPABLE) 440 bpf_map_area_free(array_map_vmalloc_addr(array)); 441 else 442 bpf_map_area_free(array); 443 } 444 445 static void array_map_seq_show_elem(struct bpf_map *map, void *key, 446 struct seq_file *m) 447 { 448 void *value; 449 450 rcu_read_lock(); 451 452 value = array_map_lookup_elem(map, key); 453 if (!value) { 454 rcu_read_unlock(); 455 return; 456 } 457 458 if (map->btf_key_type_id) 459 seq_printf(m, "%u: ", *(u32 *)key); 460 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); 461 seq_puts(m, "\n"); 462 463 rcu_read_unlock(); 464 } 465 466 static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key, 467 struct seq_file *m) 468 { 469 struct bpf_array *array = container_of(map, struct bpf_array, map); 470 u32 index = *(u32 *)key; 471 void __percpu *pptr; 472 int cpu; 473 474 rcu_read_lock(); 475 476 seq_printf(m, "%u: {\n", *(u32 *)key); 477 pptr = array->pptrs[index & array->index_mask]; 478 for_each_possible_cpu(cpu) { 479 seq_printf(m, "\tcpu%d: ", cpu); 480 btf_type_seq_show(map->btf, map->btf_value_type_id, 481 per_cpu_ptr(pptr, cpu), m); 482 seq_puts(m, "\n"); 483 } 484 seq_puts(m, "}\n"); 485 486 rcu_read_unlock(); 487 } 488 489 static int array_map_check_btf(const struct bpf_map *map, 490 const struct btf *btf, 491 const struct btf_type *key_type, 492 const struct btf_type *value_type) 493 { 494 u32 int_data; 495 496 /* One exception for keyless BTF: .bss/.data/.rodata map */ 497 if (btf_type_is_void(key_type)) { 498 if (map->map_type != BPF_MAP_TYPE_ARRAY || 499 map->max_entries != 1) 500 return -EINVAL; 501 502 if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC) 503 return -EINVAL; 504 505 return 0; 506 } 507 508 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) 509 return -EINVAL; 510 511 int_data = *(u32 *)(key_type + 1); 512 /* bpf array can only take a u32 key. This check makes sure 513 * that the btf matches the attr used during map_create. 514 */ 515 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data)) 516 return -EINVAL; 517 518 return 0; 519 } 520 521 static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) 522 { 523 struct bpf_array *array = container_of(map, struct bpf_array, map); 524 pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT; 525 526 if (!(map->map_flags & BPF_F_MMAPABLE)) 527 return -EINVAL; 528 529 if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > 530 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size)) 531 return -EINVAL; 532 533 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), 534 vma->vm_pgoff + pgoff); 535 } 536 537 static bool array_map_meta_equal(const struct bpf_map *meta0, 538 const struct bpf_map *meta1) 539 { 540 if (!bpf_map_meta_equal(meta0, meta1)) 541 return false; 542 return meta0->map_flags & BPF_F_INNER_MAP ? true : 543 meta0->max_entries == meta1->max_entries; 544 } 545 546 struct bpf_iter_seq_array_map_info { 547 struct bpf_map *map; 548 void *percpu_value_buf; 549 u32 index; 550 }; 551 552 static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos) 553 { 554 struct bpf_iter_seq_array_map_info *info = seq->private; 555 struct bpf_map *map = info->map; 556 struct bpf_array *array; 557 u32 index; 558 559 if (info->index >= map->max_entries) 560 return NULL; 561 562 if (*pos == 0) 563 ++*pos; 564 array = container_of(map, struct bpf_array, map); 565 index = info->index & array->index_mask; 566 if (info->percpu_value_buf) 567 return array->pptrs[index]; 568 return array_map_elem_ptr(array, index); 569 } 570 571 static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) 572 { 573 struct bpf_iter_seq_array_map_info *info = seq->private; 574 struct bpf_map *map = info->map; 575 struct bpf_array *array; 576 u32 index; 577 578 ++*pos; 579 ++info->index; 580 if (info->index >= map->max_entries) 581 return NULL; 582 583 array = container_of(map, struct bpf_array, map); 584 index = info->index & array->index_mask; 585 if (info->percpu_value_buf) 586 return array->pptrs[index]; 587 return array_map_elem_ptr(array, index); 588 } 589 590 static int __bpf_array_map_seq_show(struct seq_file *seq, void *v) 591 { 592 struct bpf_iter_seq_array_map_info *info = seq->private; 593 struct bpf_iter__bpf_map_elem ctx = {}; 594 struct bpf_map *map = info->map; 595 struct bpf_array *array = container_of(map, struct bpf_array, map); 596 struct bpf_iter_meta meta; 597 struct bpf_prog *prog; 598 int off = 0, cpu = 0; 599 void __percpu **pptr; 600 u32 size; 601 602 meta.seq = seq; 603 prog = bpf_iter_get_info(&meta, v == NULL); 604 if (!prog) 605 return 0; 606 607 ctx.meta = &meta; 608 ctx.map = info->map; 609 if (v) { 610 ctx.key = &info->index; 611 612 if (!info->percpu_value_buf) { 613 ctx.value = v; 614 } else { 615 pptr = v; 616 size = array->elem_size; 617 for_each_possible_cpu(cpu) { 618 copy_map_value_long(map, info->percpu_value_buf + off, 619 per_cpu_ptr(pptr, cpu)); 620 check_and_init_map_value(map, info->percpu_value_buf + off); 621 off += size; 622 } 623 ctx.value = info->percpu_value_buf; 624 } 625 } 626 627 return bpf_iter_run_prog(prog, &ctx); 628 } 629 630 static int bpf_array_map_seq_show(struct seq_file *seq, void *v) 631 { 632 return __bpf_array_map_seq_show(seq, v); 633 } 634 635 static void bpf_array_map_seq_stop(struct seq_file *seq, void *v) 636 { 637 if (!v) 638 (void)__bpf_array_map_seq_show(seq, NULL); 639 } 640 641 static int bpf_iter_init_array_map(void *priv_data, 642 struct bpf_iter_aux_info *aux) 643 { 644 struct bpf_iter_seq_array_map_info *seq_info = priv_data; 645 struct bpf_map *map = aux->map; 646 struct bpf_array *array = container_of(map, struct bpf_array, map); 647 void *value_buf; 648 u32 buf_size; 649 650 if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 651 buf_size = array->elem_size * num_possible_cpus(); 652 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN); 653 if (!value_buf) 654 return -ENOMEM; 655 656 seq_info->percpu_value_buf = value_buf; 657 } 658 659 /* bpf_iter_attach_map() acquires a map uref, and the uref may be 660 * released before or in the middle of iterating map elements, so 661 * acquire an extra map uref for iterator. 662 */ 663 bpf_map_inc_with_uref(map); 664 seq_info->map = map; 665 return 0; 666 } 667 668 static void bpf_iter_fini_array_map(void *priv_data) 669 { 670 struct bpf_iter_seq_array_map_info *seq_info = priv_data; 671 672 bpf_map_put_with_uref(seq_info->map); 673 kfree(seq_info->percpu_value_buf); 674 } 675 676 static const struct seq_operations bpf_array_map_seq_ops = { 677 .start = bpf_array_map_seq_start, 678 .next = bpf_array_map_seq_next, 679 .stop = bpf_array_map_seq_stop, 680 .show = bpf_array_map_seq_show, 681 }; 682 683 static const struct bpf_iter_seq_info iter_seq_info = { 684 .seq_ops = &bpf_array_map_seq_ops, 685 .init_seq_private = bpf_iter_init_array_map, 686 .fini_seq_private = bpf_iter_fini_array_map, 687 .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info), 688 }; 689 690 static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn, 691 void *callback_ctx, u64 flags) 692 { 693 u32 i, key, num_elems = 0; 694 struct bpf_array *array; 695 bool is_percpu; 696 u64 ret = 0; 697 void *val; 698 699 if (flags != 0) 700 return -EINVAL; 701 702 is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 703 array = container_of(map, struct bpf_array, map); 704 if (is_percpu) 705 migrate_disable(); 706 for (i = 0; i < map->max_entries; i++) { 707 if (is_percpu) 708 val = this_cpu_ptr(array->pptrs[i]); 709 else 710 val = array_map_elem_ptr(array, i); 711 num_elems++; 712 key = i; 713 ret = callback_fn((u64)(long)map, (u64)(long)&key, 714 (u64)(long)val, (u64)(long)callback_ctx, 0); 715 /* return value: 0 - continue, 1 - stop and return */ 716 if (ret) 717 break; 718 } 719 720 if (is_percpu) 721 migrate_enable(); 722 return num_elems; 723 } 724 725 BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array) 726 const struct bpf_map_ops array_map_ops = { 727 .map_meta_equal = array_map_meta_equal, 728 .map_alloc_check = array_map_alloc_check, 729 .map_alloc = array_map_alloc, 730 .map_free = array_map_free, 731 .map_get_next_key = array_map_get_next_key, 732 .map_release_uref = array_map_free_timers, 733 .map_lookup_elem = array_map_lookup_elem, 734 .map_update_elem = array_map_update_elem, 735 .map_delete_elem = array_map_delete_elem, 736 .map_gen_lookup = array_map_gen_lookup, 737 .map_direct_value_addr = array_map_direct_value_addr, 738 .map_direct_value_meta = array_map_direct_value_meta, 739 .map_mmap = array_map_mmap, 740 .map_seq_show_elem = array_map_seq_show_elem, 741 .map_check_btf = array_map_check_btf, 742 .map_lookup_batch = generic_map_lookup_batch, 743 .map_update_batch = generic_map_update_batch, 744 .map_set_for_each_callback_args = map_set_for_each_callback_args, 745 .map_for_each_callback = bpf_for_each_array_elem, 746 .map_btf_id = &array_map_btf_ids[0], 747 .iter_seq_info = &iter_seq_info, 748 }; 749 750 const struct bpf_map_ops percpu_array_map_ops = { 751 .map_meta_equal = bpf_map_meta_equal, 752 .map_alloc_check = array_map_alloc_check, 753 .map_alloc = array_map_alloc, 754 .map_free = array_map_free, 755 .map_get_next_key = array_map_get_next_key, 756 .map_lookup_elem = percpu_array_map_lookup_elem, 757 .map_update_elem = array_map_update_elem, 758 .map_delete_elem = array_map_delete_elem, 759 .map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem, 760 .map_seq_show_elem = percpu_array_map_seq_show_elem, 761 .map_check_btf = array_map_check_btf, 762 .map_lookup_batch = generic_map_lookup_batch, 763 .map_update_batch = generic_map_update_batch, 764 .map_set_for_each_callback_args = map_set_for_each_callback_args, 765 .map_for_each_callback = bpf_for_each_array_elem, 766 .map_btf_id = &array_map_btf_ids[0], 767 .iter_seq_info = &iter_seq_info, 768 }; 769 770 static int fd_array_map_alloc_check(union bpf_attr *attr) 771 { 772 /* only file descriptors can be stored in this type of map */ 773 if (attr->value_size != sizeof(u32)) 774 return -EINVAL; 775 /* Program read-only/write-only not supported for special maps yet. */ 776 if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) 777 return -EINVAL; 778 return array_map_alloc_check(attr); 779 } 780 781 static void fd_array_map_free(struct bpf_map *map) 782 { 783 struct bpf_array *array = container_of(map, struct bpf_array, map); 784 int i; 785 786 /* make sure it's empty */ 787 for (i = 0; i < array->map.max_entries; i++) 788 BUG_ON(array->ptrs[i] != NULL); 789 790 bpf_map_area_free(array); 791 } 792 793 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) 794 { 795 return ERR_PTR(-EOPNOTSUPP); 796 } 797 798 /* only called from syscall */ 799 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) 800 { 801 void **elem, *ptr; 802 int ret = 0; 803 804 if (!map->ops->map_fd_sys_lookup_elem) 805 return -ENOTSUPP; 806 807 rcu_read_lock(); 808 elem = array_map_lookup_elem(map, key); 809 if (elem && (ptr = READ_ONCE(*elem))) 810 *value = map->ops->map_fd_sys_lookup_elem(ptr); 811 else 812 ret = -ENOENT; 813 rcu_read_unlock(); 814 815 return ret; 816 } 817 818 /* only called from syscall */ 819 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 820 void *key, void *value, u64 map_flags) 821 { 822 struct bpf_array *array = container_of(map, struct bpf_array, map); 823 void *new_ptr, *old_ptr; 824 u32 index = *(u32 *)key, ufd; 825 826 if (map_flags != BPF_ANY) 827 return -EINVAL; 828 829 if (index >= array->map.max_entries) 830 return -E2BIG; 831 832 ufd = *(u32 *)value; 833 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); 834 if (IS_ERR(new_ptr)) 835 return PTR_ERR(new_ptr); 836 837 if (map->ops->map_poke_run) { 838 mutex_lock(&array->aux->poke_mutex); 839 old_ptr = xchg(array->ptrs + index, new_ptr); 840 map->ops->map_poke_run(map, index, old_ptr, new_ptr); 841 mutex_unlock(&array->aux->poke_mutex); 842 } else { 843 old_ptr = xchg(array->ptrs + index, new_ptr); 844 } 845 846 if (old_ptr) 847 map->ops->map_fd_put_ptr(old_ptr); 848 return 0; 849 } 850 851 static int fd_array_map_delete_elem(struct bpf_map *map, void *key) 852 { 853 struct bpf_array *array = container_of(map, struct bpf_array, map); 854 void *old_ptr; 855 u32 index = *(u32 *)key; 856 857 if (index >= array->map.max_entries) 858 return -E2BIG; 859 860 if (map->ops->map_poke_run) { 861 mutex_lock(&array->aux->poke_mutex); 862 old_ptr = xchg(array->ptrs + index, NULL); 863 map->ops->map_poke_run(map, index, old_ptr, NULL); 864 mutex_unlock(&array->aux->poke_mutex); 865 } else { 866 old_ptr = xchg(array->ptrs + index, NULL); 867 } 868 869 if (old_ptr) { 870 map->ops->map_fd_put_ptr(old_ptr); 871 return 0; 872 } else { 873 return -ENOENT; 874 } 875 } 876 877 static void *prog_fd_array_get_ptr(struct bpf_map *map, 878 struct file *map_file, int fd) 879 { 880 struct bpf_prog *prog = bpf_prog_get(fd); 881 882 if (IS_ERR(prog)) 883 return prog; 884 885 if (!bpf_prog_map_compatible(map, prog)) { 886 bpf_prog_put(prog); 887 return ERR_PTR(-EINVAL); 888 } 889 890 return prog; 891 } 892 893 static void prog_fd_array_put_ptr(void *ptr) 894 { 895 bpf_prog_put(ptr); 896 } 897 898 static u32 prog_fd_array_sys_lookup_elem(void *ptr) 899 { 900 return ((struct bpf_prog *)ptr)->aux->id; 901 } 902 903 /* decrement refcnt of all bpf_progs that are stored in this map */ 904 static void bpf_fd_array_map_clear(struct bpf_map *map) 905 { 906 struct bpf_array *array = container_of(map, struct bpf_array, map); 907 int i; 908 909 for (i = 0; i < array->map.max_entries; i++) 910 fd_array_map_delete_elem(map, &i); 911 } 912 913 static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, 914 struct seq_file *m) 915 { 916 void **elem, *ptr; 917 u32 prog_id; 918 919 rcu_read_lock(); 920 921 elem = array_map_lookup_elem(map, key); 922 if (elem) { 923 ptr = READ_ONCE(*elem); 924 if (ptr) { 925 seq_printf(m, "%u: ", *(u32 *)key); 926 prog_id = prog_fd_array_sys_lookup_elem(ptr); 927 btf_type_seq_show(map->btf, map->btf_value_type_id, 928 &prog_id, m); 929 seq_puts(m, "\n"); 930 } 931 } 932 933 rcu_read_unlock(); 934 } 935 936 struct prog_poke_elem { 937 struct list_head list; 938 struct bpf_prog_aux *aux; 939 }; 940 941 static int prog_array_map_poke_track(struct bpf_map *map, 942 struct bpf_prog_aux *prog_aux) 943 { 944 struct prog_poke_elem *elem; 945 struct bpf_array_aux *aux; 946 int ret = 0; 947 948 aux = container_of(map, struct bpf_array, map)->aux; 949 mutex_lock(&aux->poke_mutex); 950 list_for_each_entry(elem, &aux->poke_progs, list) { 951 if (elem->aux == prog_aux) 952 goto out; 953 } 954 955 elem = kmalloc(sizeof(*elem), GFP_KERNEL); 956 if (!elem) { 957 ret = -ENOMEM; 958 goto out; 959 } 960 961 INIT_LIST_HEAD(&elem->list); 962 /* We must track the program's aux info at this point in time 963 * since the program pointer itself may not be stable yet, see 964 * also comment in prog_array_map_poke_run(). 965 */ 966 elem->aux = prog_aux; 967 968 list_add_tail(&elem->list, &aux->poke_progs); 969 out: 970 mutex_unlock(&aux->poke_mutex); 971 return ret; 972 } 973 974 static void prog_array_map_poke_untrack(struct bpf_map *map, 975 struct bpf_prog_aux *prog_aux) 976 { 977 struct prog_poke_elem *elem, *tmp; 978 struct bpf_array_aux *aux; 979 980 aux = container_of(map, struct bpf_array, map)->aux; 981 mutex_lock(&aux->poke_mutex); 982 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { 983 if (elem->aux == prog_aux) { 984 list_del_init(&elem->list); 985 kfree(elem); 986 break; 987 } 988 } 989 mutex_unlock(&aux->poke_mutex); 990 } 991 992 static void prog_array_map_poke_run(struct bpf_map *map, u32 key, 993 struct bpf_prog *old, 994 struct bpf_prog *new) 995 { 996 u8 *old_addr, *new_addr, *old_bypass_addr; 997 struct prog_poke_elem *elem; 998 struct bpf_array_aux *aux; 999 1000 aux = container_of(map, struct bpf_array, map)->aux; 1001 WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex)); 1002 1003 list_for_each_entry(elem, &aux->poke_progs, list) { 1004 struct bpf_jit_poke_descriptor *poke; 1005 int i, ret; 1006 1007 for (i = 0; i < elem->aux->size_poke_tab; i++) { 1008 poke = &elem->aux->poke_tab[i]; 1009 1010 /* Few things to be aware of: 1011 * 1012 * 1) We can only ever access aux in this context, but 1013 * not aux->prog since it might not be stable yet and 1014 * there could be danger of use after free otherwise. 1015 * 2) Initially when we start tracking aux, the program 1016 * is not JITed yet and also does not have a kallsyms 1017 * entry. We skip these as poke->tailcall_target_stable 1018 * is not active yet. The JIT will do the final fixup 1019 * before setting it stable. The various 1020 * poke->tailcall_target_stable are successively 1021 * activated, so tail call updates can arrive from here 1022 * while JIT is still finishing its final fixup for 1023 * non-activated poke entries. 1024 * 3) On program teardown, the program's kallsym entry gets 1025 * removed out of RCU callback, but we can only untrack 1026 * from sleepable context, therefore bpf_arch_text_poke() 1027 * might not see that this is in BPF text section and 1028 * bails out with -EINVAL. As these are unreachable since 1029 * RCU grace period already passed, we simply skip them. 1030 * 4) Also programs reaching refcount of zero while patching 1031 * is in progress is okay since we're protected under 1032 * poke_mutex and untrack the programs before the JIT 1033 * buffer is freed. When we're still in the middle of 1034 * patching and suddenly kallsyms entry of the program 1035 * gets evicted, we just skip the rest which is fine due 1036 * to point 3). 1037 * 5) Any other error happening below from bpf_arch_text_poke() 1038 * is a unexpected bug. 1039 */ 1040 if (!READ_ONCE(poke->tailcall_target_stable)) 1041 continue; 1042 if (poke->reason != BPF_POKE_REASON_TAIL_CALL) 1043 continue; 1044 if (poke->tail_call.map != map || 1045 poke->tail_call.key != key) 1046 continue; 1047 1048 old_bypass_addr = old ? NULL : poke->bypass_addr; 1049 old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL; 1050 new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL; 1051 1052 if (new) { 1053 ret = bpf_arch_text_poke(poke->tailcall_target, 1054 BPF_MOD_JUMP, 1055 old_addr, new_addr); 1056 BUG_ON(ret < 0 && ret != -EINVAL); 1057 if (!old) { 1058 ret = bpf_arch_text_poke(poke->tailcall_bypass, 1059 BPF_MOD_JUMP, 1060 poke->bypass_addr, 1061 NULL); 1062 BUG_ON(ret < 0 && ret != -EINVAL); 1063 } 1064 } else { 1065 ret = bpf_arch_text_poke(poke->tailcall_bypass, 1066 BPF_MOD_JUMP, 1067 old_bypass_addr, 1068 poke->bypass_addr); 1069 BUG_ON(ret < 0 && ret != -EINVAL); 1070 /* let other CPUs finish the execution of program 1071 * so that it will not possible to expose them 1072 * to invalid nop, stack unwind, nop state 1073 */ 1074 if (!ret) 1075 synchronize_rcu(); 1076 ret = bpf_arch_text_poke(poke->tailcall_target, 1077 BPF_MOD_JUMP, 1078 old_addr, NULL); 1079 BUG_ON(ret < 0 && ret != -EINVAL); 1080 } 1081 } 1082 } 1083 } 1084 1085 static void prog_array_map_clear_deferred(struct work_struct *work) 1086 { 1087 struct bpf_map *map = container_of(work, struct bpf_array_aux, 1088 work)->map; 1089 bpf_fd_array_map_clear(map); 1090 bpf_map_put(map); 1091 } 1092 1093 static void prog_array_map_clear(struct bpf_map *map) 1094 { 1095 struct bpf_array_aux *aux = container_of(map, struct bpf_array, 1096 map)->aux; 1097 bpf_map_inc(map); 1098 schedule_work(&aux->work); 1099 } 1100 1101 static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr) 1102 { 1103 struct bpf_array_aux *aux; 1104 struct bpf_map *map; 1105 1106 aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT); 1107 if (!aux) 1108 return ERR_PTR(-ENOMEM); 1109 1110 INIT_WORK(&aux->work, prog_array_map_clear_deferred); 1111 INIT_LIST_HEAD(&aux->poke_progs); 1112 mutex_init(&aux->poke_mutex); 1113 1114 map = array_map_alloc(attr); 1115 if (IS_ERR(map)) { 1116 kfree(aux); 1117 return map; 1118 } 1119 1120 container_of(map, struct bpf_array, map)->aux = aux; 1121 aux->map = map; 1122 1123 return map; 1124 } 1125 1126 static void prog_array_map_free(struct bpf_map *map) 1127 { 1128 struct prog_poke_elem *elem, *tmp; 1129 struct bpf_array_aux *aux; 1130 1131 aux = container_of(map, struct bpf_array, map)->aux; 1132 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { 1133 list_del_init(&elem->list); 1134 kfree(elem); 1135 } 1136 kfree(aux); 1137 fd_array_map_free(map); 1138 } 1139 1140 /* prog_array->aux->{type,jited} is a runtime binding. 1141 * Doing static check alone in the verifier is not enough. 1142 * Thus, prog_array_map cannot be used as an inner_map 1143 * and map_meta_equal is not implemented. 1144 */ 1145 const struct bpf_map_ops prog_array_map_ops = { 1146 .map_alloc_check = fd_array_map_alloc_check, 1147 .map_alloc = prog_array_map_alloc, 1148 .map_free = prog_array_map_free, 1149 .map_poke_track = prog_array_map_poke_track, 1150 .map_poke_untrack = prog_array_map_poke_untrack, 1151 .map_poke_run = prog_array_map_poke_run, 1152 .map_get_next_key = array_map_get_next_key, 1153 .map_lookup_elem = fd_array_map_lookup_elem, 1154 .map_delete_elem = fd_array_map_delete_elem, 1155 .map_fd_get_ptr = prog_fd_array_get_ptr, 1156 .map_fd_put_ptr = prog_fd_array_put_ptr, 1157 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, 1158 .map_release_uref = prog_array_map_clear, 1159 .map_seq_show_elem = prog_array_map_seq_show_elem, 1160 .map_btf_id = &array_map_btf_ids[0], 1161 }; 1162 1163 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, 1164 struct file *map_file) 1165 { 1166 struct bpf_event_entry *ee; 1167 1168 ee = kzalloc(sizeof(*ee), GFP_ATOMIC); 1169 if (ee) { 1170 ee->event = perf_file->private_data; 1171 ee->perf_file = perf_file; 1172 ee->map_file = map_file; 1173 } 1174 1175 return ee; 1176 } 1177 1178 static void __bpf_event_entry_free(struct rcu_head *rcu) 1179 { 1180 struct bpf_event_entry *ee; 1181 1182 ee = container_of(rcu, struct bpf_event_entry, rcu); 1183 fput(ee->perf_file); 1184 kfree(ee); 1185 } 1186 1187 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee) 1188 { 1189 call_rcu(&ee->rcu, __bpf_event_entry_free); 1190 } 1191 1192 static void *perf_event_fd_array_get_ptr(struct bpf_map *map, 1193 struct file *map_file, int fd) 1194 { 1195 struct bpf_event_entry *ee; 1196 struct perf_event *event; 1197 struct file *perf_file; 1198 u64 value; 1199 1200 perf_file = perf_event_get(fd); 1201 if (IS_ERR(perf_file)) 1202 return perf_file; 1203 1204 ee = ERR_PTR(-EOPNOTSUPP); 1205 event = perf_file->private_data; 1206 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP) 1207 goto err_out; 1208 1209 ee = bpf_event_entry_gen(perf_file, map_file); 1210 if (ee) 1211 return ee; 1212 ee = ERR_PTR(-ENOMEM); 1213 err_out: 1214 fput(perf_file); 1215 return ee; 1216 } 1217 1218 static void perf_event_fd_array_put_ptr(void *ptr) 1219 { 1220 bpf_event_entry_free_rcu(ptr); 1221 } 1222 1223 static void perf_event_fd_array_release(struct bpf_map *map, 1224 struct file *map_file) 1225 { 1226 struct bpf_array *array = container_of(map, struct bpf_array, map); 1227 struct bpf_event_entry *ee; 1228 int i; 1229 1230 if (map->map_flags & BPF_F_PRESERVE_ELEMS) 1231 return; 1232 1233 rcu_read_lock(); 1234 for (i = 0; i < array->map.max_entries; i++) { 1235 ee = READ_ONCE(array->ptrs[i]); 1236 if (ee && ee->map_file == map_file) 1237 fd_array_map_delete_elem(map, &i); 1238 } 1239 rcu_read_unlock(); 1240 } 1241 1242 static void perf_event_fd_array_map_free(struct bpf_map *map) 1243 { 1244 if (map->map_flags & BPF_F_PRESERVE_ELEMS) 1245 bpf_fd_array_map_clear(map); 1246 fd_array_map_free(map); 1247 } 1248 1249 const struct bpf_map_ops perf_event_array_map_ops = { 1250 .map_meta_equal = bpf_map_meta_equal, 1251 .map_alloc_check = fd_array_map_alloc_check, 1252 .map_alloc = array_map_alloc, 1253 .map_free = perf_event_fd_array_map_free, 1254 .map_get_next_key = array_map_get_next_key, 1255 .map_lookup_elem = fd_array_map_lookup_elem, 1256 .map_delete_elem = fd_array_map_delete_elem, 1257 .map_fd_get_ptr = perf_event_fd_array_get_ptr, 1258 .map_fd_put_ptr = perf_event_fd_array_put_ptr, 1259 .map_release = perf_event_fd_array_release, 1260 .map_check_btf = map_check_no_btf, 1261 .map_btf_id = &array_map_btf_ids[0], 1262 }; 1263 1264 #ifdef CONFIG_CGROUPS 1265 static void *cgroup_fd_array_get_ptr(struct bpf_map *map, 1266 struct file *map_file /* not used */, 1267 int fd) 1268 { 1269 return cgroup_get_from_fd(fd); 1270 } 1271 1272 static void cgroup_fd_array_put_ptr(void *ptr) 1273 { 1274 /* cgroup_put free cgrp after a rcu grace period */ 1275 cgroup_put(ptr); 1276 } 1277 1278 static void cgroup_fd_array_free(struct bpf_map *map) 1279 { 1280 bpf_fd_array_map_clear(map); 1281 fd_array_map_free(map); 1282 } 1283 1284 const struct bpf_map_ops cgroup_array_map_ops = { 1285 .map_meta_equal = bpf_map_meta_equal, 1286 .map_alloc_check = fd_array_map_alloc_check, 1287 .map_alloc = array_map_alloc, 1288 .map_free = cgroup_fd_array_free, 1289 .map_get_next_key = array_map_get_next_key, 1290 .map_lookup_elem = fd_array_map_lookup_elem, 1291 .map_delete_elem = fd_array_map_delete_elem, 1292 .map_fd_get_ptr = cgroup_fd_array_get_ptr, 1293 .map_fd_put_ptr = cgroup_fd_array_put_ptr, 1294 .map_check_btf = map_check_no_btf, 1295 .map_btf_id = &array_map_btf_ids[0], 1296 }; 1297 #endif 1298 1299 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr) 1300 { 1301 struct bpf_map *map, *inner_map_meta; 1302 1303 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); 1304 if (IS_ERR(inner_map_meta)) 1305 return inner_map_meta; 1306 1307 map = array_map_alloc(attr); 1308 if (IS_ERR(map)) { 1309 bpf_map_meta_free(inner_map_meta); 1310 return map; 1311 } 1312 1313 map->inner_map_meta = inner_map_meta; 1314 1315 return map; 1316 } 1317 1318 static void array_of_map_free(struct bpf_map *map) 1319 { 1320 /* map->inner_map_meta is only accessed by syscall which 1321 * is protected by fdget/fdput. 1322 */ 1323 bpf_map_meta_free(map->inner_map_meta); 1324 bpf_fd_array_map_clear(map); 1325 fd_array_map_free(map); 1326 } 1327 1328 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) 1329 { 1330 struct bpf_map **inner_map = array_map_lookup_elem(map, key); 1331 1332 if (!inner_map) 1333 return NULL; 1334 1335 return READ_ONCE(*inner_map); 1336 } 1337 1338 static int array_of_map_gen_lookup(struct bpf_map *map, 1339 struct bpf_insn *insn_buf) 1340 { 1341 struct bpf_array *array = container_of(map, struct bpf_array, map); 1342 u32 elem_size = array->elem_size; 1343 struct bpf_insn *insn = insn_buf; 1344 const int ret = BPF_REG_0; 1345 const int map_ptr = BPF_REG_1; 1346 const int index = BPF_REG_2; 1347 1348 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 1349 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 1350 if (!map->bypass_spec_v1) { 1351 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); 1352 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); 1353 } else { 1354 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); 1355 } 1356 if (is_power_of_2(elem_size)) 1357 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 1358 else 1359 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 1360 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); 1361 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); 1362 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); 1363 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 1364 *insn++ = BPF_MOV64_IMM(ret, 0); 1365 1366 return insn - insn_buf; 1367 } 1368 1369 const struct bpf_map_ops array_of_maps_map_ops = { 1370 .map_alloc_check = fd_array_map_alloc_check, 1371 .map_alloc = array_of_map_alloc, 1372 .map_free = array_of_map_free, 1373 .map_get_next_key = array_map_get_next_key, 1374 .map_lookup_elem = array_of_map_lookup_elem, 1375 .map_delete_elem = fd_array_map_delete_elem, 1376 .map_fd_get_ptr = bpf_map_fd_get_ptr, 1377 .map_fd_put_ptr = bpf_map_fd_put_ptr, 1378 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, 1379 .map_gen_lookup = array_of_map_gen_lookup, 1380 .map_lookup_batch = generic_map_lookup_batch, 1381 .map_update_batch = generic_map_update_batch, 1382 .map_check_btf = map_check_no_btf, 1383 .map_btf_id = &array_map_btf_ids[0], 1384 }; 1385