1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016,2017 Facebook 4 */ 5 #include <linux/bpf.h> 6 #include <linux/btf.h> 7 #include <linux/err.h> 8 #include <linux/slab.h> 9 #include <linux/mm.h> 10 #include <linux/filter.h> 11 #include <linux/perf_event.h> 12 #include <uapi/linux/btf.h> 13 #include <linux/rcupdate_trace.h> 14 #include <linux/btf_ids.h> 15 #include <crypto/sha2.h> 16 17 #include "map_in_map.h" 18 19 #define ARRAY_CREATE_FLAG_MASK \ 20 (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \ 21 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP) 22 23 static void bpf_array_free_percpu(struct bpf_array *array) 24 { 25 int i; 26 27 for (i = 0; i < array->map.max_entries; i++) { 28 free_percpu(array->pptrs[i]); 29 cond_resched(); 30 } 31 } 32 33 static int bpf_array_alloc_percpu(struct bpf_array *array) 34 { 35 void __percpu *ptr; 36 int i; 37 38 for (i = 0; i < array->map.max_entries; i++) { 39 ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8, 40 GFP_USER | __GFP_NOWARN); 41 if (!ptr) { 42 bpf_array_free_percpu(array); 43 return -ENOMEM; 44 } 45 array->pptrs[i] = ptr; 46 cond_resched(); 47 } 48 49 return 0; 50 } 51 52 /* Called from syscall */ 53 int array_map_alloc_check(union bpf_attr *attr) 54 { 55 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 56 int numa_node = bpf_map_attr_numa_node(attr); 57 58 /* check sanity of attributes */ 59 if (attr->max_entries == 0 || attr->key_size != 4 || 60 attr->value_size == 0 || 61 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || 62 !bpf_map_flags_access_ok(attr->map_flags) || 63 (percpu && numa_node != NUMA_NO_NODE)) 64 return -EINVAL; 65 66 if (attr->map_type != BPF_MAP_TYPE_ARRAY && 67 attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP)) 68 return -EINVAL; 69 70 if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY && 71 attr->map_flags & BPF_F_PRESERVE_ELEMS) 72 return -EINVAL; 73 74 /* avoid overflow on round_up(map->value_size) */ 75 if (attr->value_size > INT_MAX) 76 return -E2BIG; 77 /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */ 78 if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE) 79 return -E2BIG; 80 81 return 0; 82 } 83 84 static struct bpf_map *array_map_alloc(union bpf_attr *attr) 85 { 86 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 87 int numa_node = bpf_map_attr_numa_node(attr); 88 u32 elem_size, index_mask, max_entries; 89 bool bypass_spec_v1 = bpf_bypass_spec_v1(NULL); 90 u64 array_size, mask64; 91 struct bpf_array *array; 92 93 elem_size = round_up(attr->value_size, 8); 94 95 max_entries = attr->max_entries; 96 97 /* On 32 bit archs roundup_pow_of_two() with max_entries that has 98 * upper most bit set in u32 space is undefined behavior due to 99 * resulting 1U << 32, so do it manually here in u64 space. 100 */ 101 mask64 = fls_long(max_entries - 1); 102 mask64 = 1ULL << mask64; 103 mask64 -= 1; 104 105 index_mask = mask64; 106 if (!bypass_spec_v1) { 107 /* round up array size to nearest power of 2, 108 * since cpu will speculate within index_mask limits 109 */ 110 max_entries = index_mask + 1; 111 /* Check for overflows. */ 112 if (max_entries < attr->max_entries) 113 return ERR_PTR(-E2BIG); 114 } 115 116 array_size = sizeof(*array); 117 if (percpu) { 118 array_size += (u64) max_entries * sizeof(void *); 119 } else { 120 /* rely on vmalloc() to return page-aligned memory and 121 * ensure array->value is exactly page-aligned 122 */ 123 if (attr->map_flags & BPF_F_MMAPABLE) { 124 array_size = PAGE_ALIGN(array_size); 125 array_size += PAGE_ALIGN((u64) max_entries * elem_size); 126 } else { 127 array_size += (u64) max_entries * elem_size; 128 } 129 } 130 131 /* allocate all map elements and zero-initialize them */ 132 if (attr->map_flags & BPF_F_MMAPABLE) { 133 void *data; 134 135 /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */ 136 data = bpf_map_area_mmapable_alloc(array_size, numa_node); 137 if (!data) 138 return ERR_PTR(-ENOMEM); 139 array = data + PAGE_ALIGN(sizeof(struct bpf_array)) 140 - offsetof(struct bpf_array, value); 141 } else { 142 array = bpf_map_area_alloc(array_size, numa_node); 143 } 144 if (!array) 145 return ERR_PTR(-ENOMEM); 146 array->index_mask = index_mask; 147 array->map.bypass_spec_v1 = bypass_spec_v1; 148 149 /* copy mandatory map attributes */ 150 bpf_map_init_from_attr(&array->map, attr); 151 array->elem_size = elem_size; 152 153 if (percpu && bpf_array_alloc_percpu(array)) { 154 bpf_map_area_free(array); 155 return ERR_PTR(-ENOMEM); 156 } 157 158 return &array->map; 159 } 160 161 static void *array_map_elem_ptr(struct bpf_array* array, u32 index) 162 { 163 return array->value + (u64)array->elem_size * index; 164 } 165 166 /* Called from syscall or from eBPF program */ 167 static void *array_map_lookup_elem(struct bpf_map *map, void *key) 168 { 169 struct bpf_array *array = container_of(map, struct bpf_array, map); 170 u32 index = *(u32 *)key; 171 172 if (unlikely(index >= array->map.max_entries)) 173 return NULL; 174 175 return array->value + (u64)array->elem_size * (index & array->index_mask); 176 } 177 178 static int array_map_get_hash(struct bpf_map *map, u32 hash_buf_size, 179 void *hash_buf) 180 { 181 struct bpf_array *array = container_of(map, struct bpf_array, map); 182 183 sha256(array->value, (u64)array->elem_size * array->map.max_entries, 184 hash_buf); 185 memcpy(array->map.sha, hash_buf, sizeof(array->map.sha)); 186 return 0; 187 } 188 189 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, 190 u32 off) 191 { 192 struct bpf_array *array = container_of(map, struct bpf_array, map); 193 194 if (map->max_entries != 1) 195 return -ENOTSUPP; 196 if (off >= map->value_size) 197 return -EINVAL; 198 199 *imm = (unsigned long)array->value; 200 return 0; 201 } 202 203 static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm, 204 u32 *off) 205 { 206 struct bpf_array *array = container_of(map, struct bpf_array, map); 207 u64 base = (unsigned long)array->value; 208 u64 range = array->elem_size; 209 210 if (map->max_entries != 1) 211 return -ENOTSUPP; 212 if (imm < base || imm >= base + range) 213 return -ENOENT; 214 215 *off = imm - base; 216 return 0; 217 } 218 219 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ 220 static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 221 { 222 struct bpf_array *array = container_of(map, struct bpf_array, map); 223 struct bpf_insn *insn = insn_buf; 224 u32 elem_size = array->elem_size; 225 const int ret = BPF_REG_0; 226 const int map_ptr = BPF_REG_1; 227 const int index = BPF_REG_2; 228 229 if (map->map_flags & BPF_F_INNER_MAP) 230 return -EOPNOTSUPP; 231 232 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 233 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 234 if (!map->bypass_spec_v1) { 235 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); 236 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); 237 } else { 238 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); 239 } 240 241 if (is_power_of_2(elem_size)) { 242 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 243 } else { 244 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 245 } 246 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); 247 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 248 *insn++ = BPF_MOV64_IMM(ret, 0); 249 return insn - insn_buf; 250 } 251 252 /* Called from eBPF program */ 253 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) 254 { 255 struct bpf_array *array = container_of(map, struct bpf_array, map); 256 u32 index = *(u32 *)key; 257 258 if (unlikely(index >= array->map.max_entries)) 259 return NULL; 260 261 return this_cpu_ptr(array->pptrs[index & array->index_mask]); 262 } 263 264 /* emit BPF instructions equivalent to C code of percpu_array_map_lookup_elem() */ 265 static int percpu_array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 266 { 267 struct bpf_array *array = container_of(map, struct bpf_array, map); 268 struct bpf_insn *insn = insn_buf; 269 270 if (!bpf_jit_supports_percpu_insn()) 271 return -EOPNOTSUPP; 272 273 if (map->map_flags & BPF_F_INNER_MAP) 274 return -EOPNOTSUPP; 275 276 BUILD_BUG_ON(offsetof(struct bpf_array, map) != 0); 277 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct bpf_array, pptrs)); 278 279 *insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0); 280 if (!map->bypass_spec_v1) { 281 *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 6); 282 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_0, array->index_mask); 283 } else { 284 *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 5); 285 } 286 287 *insn++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3); 288 *insn++ = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1); 289 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0); 290 *insn++ = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0); 291 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 292 *insn++ = BPF_MOV64_IMM(BPF_REG_0, 0); 293 return insn - insn_buf; 294 } 295 296 static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) 297 { 298 struct bpf_array *array = container_of(map, struct bpf_array, map); 299 u32 index = *(u32 *)key; 300 301 if (cpu >= nr_cpu_ids) 302 return NULL; 303 304 if (unlikely(index >= array->map.max_entries)) 305 return NULL; 306 307 return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu); 308 } 309 310 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) 311 { 312 struct bpf_array *array = container_of(map, struct bpf_array, map); 313 u32 index = *(u32 *)key; 314 void __percpu *pptr; 315 int cpu, off = 0; 316 u32 size; 317 318 if (unlikely(index >= array->map.max_entries)) 319 return -ENOENT; 320 321 /* per_cpu areas are zero-filled and bpf programs can only 322 * access 'value_size' of them, so copying rounded areas 323 * will not leak any kernel data 324 */ 325 size = array->elem_size; 326 rcu_read_lock(); 327 pptr = array->pptrs[index & array->index_mask]; 328 for_each_possible_cpu(cpu) { 329 copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu)); 330 check_and_init_map_value(map, value + off); 331 off += size; 332 } 333 rcu_read_unlock(); 334 return 0; 335 } 336 337 /* Called from syscall */ 338 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 339 { 340 struct bpf_array *array = container_of(map, struct bpf_array, map); 341 u32 index = key ? *(u32 *)key : U32_MAX; 342 u32 *next = (u32 *)next_key; 343 344 if (index >= array->map.max_entries) { 345 *next = 0; 346 return 0; 347 } 348 349 if (index == array->map.max_entries - 1) 350 return -ENOENT; 351 352 *next = index + 1; 353 return 0; 354 } 355 356 /* Called from syscall or from eBPF program */ 357 static long array_map_update_elem(struct bpf_map *map, void *key, void *value, 358 u64 map_flags) 359 { 360 struct bpf_array *array = container_of(map, struct bpf_array, map); 361 u32 index = *(u32 *)key; 362 char *val; 363 364 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST)) 365 /* unknown flags */ 366 return -EINVAL; 367 368 if (unlikely(index >= array->map.max_entries)) 369 /* all elements were pre-allocated, cannot insert a new one */ 370 return -E2BIG; 371 372 if (unlikely(map_flags & BPF_NOEXIST)) 373 /* all elements already exist */ 374 return -EEXIST; 375 376 if (unlikely((map_flags & BPF_F_LOCK) && 377 !btf_record_has_field(map->record, BPF_SPIN_LOCK))) 378 return -EINVAL; 379 380 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 381 val = this_cpu_ptr(array->pptrs[index & array->index_mask]); 382 copy_map_value(map, val, value); 383 bpf_obj_free_fields(array->map.record, val); 384 } else { 385 val = array->value + 386 (u64)array->elem_size * (index & array->index_mask); 387 if (map_flags & BPF_F_LOCK) 388 copy_map_value_locked(map, val, value, false); 389 else 390 copy_map_value(map, val, value); 391 bpf_obj_free_fields(array->map.record, val); 392 } 393 return 0; 394 } 395 396 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 397 u64 map_flags) 398 { 399 struct bpf_array *array = container_of(map, struct bpf_array, map); 400 u32 index = *(u32 *)key; 401 void __percpu *pptr; 402 int cpu, off = 0; 403 u32 size; 404 405 if (unlikely(map_flags > BPF_EXIST)) 406 /* unknown flags */ 407 return -EINVAL; 408 409 if (unlikely(index >= array->map.max_entries)) 410 /* all elements were pre-allocated, cannot insert a new one */ 411 return -E2BIG; 412 413 if (unlikely(map_flags == BPF_NOEXIST)) 414 /* all elements already exist */ 415 return -EEXIST; 416 417 /* the user space will provide round_up(value_size, 8) bytes that 418 * will be copied into per-cpu area. bpf programs can only access 419 * value_size of it. During lookup the same extra bytes will be 420 * returned or zeros which were zero-filled by percpu_alloc, 421 * so no kernel data leaks possible 422 */ 423 size = array->elem_size; 424 rcu_read_lock(); 425 pptr = array->pptrs[index & array->index_mask]; 426 for_each_possible_cpu(cpu) { 427 copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off); 428 bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu)); 429 off += size; 430 } 431 rcu_read_unlock(); 432 return 0; 433 } 434 435 /* Called from syscall or from eBPF program */ 436 static long array_map_delete_elem(struct bpf_map *map, void *key) 437 { 438 return -EINVAL; 439 } 440 441 static void *array_map_vmalloc_addr(struct bpf_array *array) 442 { 443 return (void *)round_down((unsigned long)array, PAGE_SIZE); 444 } 445 446 static void array_map_free_timers_wq(struct bpf_map *map) 447 { 448 struct bpf_array *array = container_of(map, struct bpf_array, map); 449 int i; 450 451 /* We don't reset or free fields other than timer and workqueue 452 * on uref dropping to zero. 453 */ 454 if (btf_record_has_field(map->record, BPF_TIMER | BPF_WORKQUEUE)) { 455 for (i = 0; i < array->map.max_entries; i++) { 456 if (btf_record_has_field(map->record, BPF_TIMER)) 457 bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i)); 458 if (btf_record_has_field(map->record, BPF_WORKQUEUE)) 459 bpf_obj_free_workqueue(map->record, array_map_elem_ptr(array, i)); 460 } 461 } 462 } 463 464 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ 465 static void array_map_free(struct bpf_map *map) 466 { 467 struct bpf_array *array = container_of(map, struct bpf_array, map); 468 int i; 469 470 if (!IS_ERR_OR_NULL(map->record)) { 471 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 472 for (i = 0; i < array->map.max_entries; i++) { 473 void __percpu *pptr = array->pptrs[i & array->index_mask]; 474 int cpu; 475 476 for_each_possible_cpu(cpu) { 477 bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu)); 478 cond_resched(); 479 } 480 } 481 } else { 482 for (i = 0; i < array->map.max_entries; i++) 483 bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i)); 484 } 485 } 486 487 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 488 bpf_array_free_percpu(array); 489 490 if (array->map.map_flags & BPF_F_MMAPABLE) 491 bpf_map_area_free(array_map_vmalloc_addr(array)); 492 else 493 bpf_map_area_free(array); 494 } 495 496 static void array_map_seq_show_elem(struct bpf_map *map, void *key, 497 struct seq_file *m) 498 { 499 void *value; 500 501 rcu_read_lock(); 502 503 value = array_map_lookup_elem(map, key); 504 if (!value) { 505 rcu_read_unlock(); 506 return; 507 } 508 509 if (map->btf_key_type_id) 510 seq_printf(m, "%u: ", *(u32 *)key); 511 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); 512 seq_putc(m, '\n'); 513 514 rcu_read_unlock(); 515 } 516 517 static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key, 518 struct seq_file *m) 519 { 520 struct bpf_array *array = container_of(map, struct bpf_array, map); 521 u32 index = *(u32 *)key; 522 void __percpu *pptr; 523 int cpu; 524 525 rcu_read_lock(); 526 527 seq_printf(m, "%u: {\n", *(u32 *)key); 528 pptr = array->pptrs[index & array->index_mask]; 529 for_each_possible_cpu(cpu) { 530 seq_printf(m, "\tcpu%d: ", cpu); 531 btf_type_seq_show(map->btf, map->btf_value_type_id, 532 per_cpu_ptr(pptr, cpu), m); 533 seq_putc(m, '\n'); 534 } 535 seq_puts(m, "}\n"); 536 537 rcu_read_unlock(); 538 } 539 540 static int array_map_check_btf(const struct bpf_map *map, 541 const struct btf *btf, 542 const struct btf_type *key_type, 543 const struct btf_type *value_type) 544 { 545 /* One exception for keyless BTF: .bss/.data/.rodata map */ 546 if (btf_type_is_void(key_type)) { 547 if (map->map_type != BPF_MAP_TYPE_ARRAY || 548 map->max_entries != 1) 549 return -EINVAL; 550 551 if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC) 552 return -EINVAL; 553 554 return 0; 555 } 556 557 /* 558 * Bpf array can only take a u32 key. This check makes sure 559 * that the btf matches the attr used during map_create. 560 */ 561 if (!btf_type_is_i32(key_type)) 562 return -EINVAL; 563 564 return 0; 565 } 566 567 static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) 568 { 569 struct bpf_array *array = container_of(map, struct bpf_array, map); 570 pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT; 571 572 if (!(map->map_flags & BPF_F_MMAPABLE)) 573 return -EINVAL; 574 575 if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > 576 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size)) 577 return -EINVAL; 578 579 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), 580 vma->vm_pgoff + pgoff); 581 } 582 583 static bool array_map_meta_equal(const struct bpf_map *meta0, 584 const struct bpf_map *meta1) 585 { 586 if (!bpf_map_meta_equal(meta0, meta1)) 587 return false; 588 return meta0->map_flags & BPF_F_INNER_MAP ? true : 589 meta0->max_entries == meta1->max_entries; 590 } 591 592 struct bpf_iter_seq_array_map_info { 593 struct bpf_map *map; 594 void *percpu_value_buf; 595 u32 index; 596 }; 597 598 static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos) 599 { 600 struct bpf_iter_seq_array_map_info *info = seq->private; 601 struct bpf_map *map = info->map; 602 struct bpf_array *array; 603 u32 index; 604 605 if (info->index >= map->max_entries) 606 return NULL; 607 608 if (*pos == 0) 609 ++*pos; 610 array = container_of(map, struct bpf_array, map); 611 index = info->index & array->index_mask; 612 if (info->percpu_value_buf) 613 return (void *)(uintptr_t)array->pptrs[index]; 614 return array_map_elem_ptr(array, index); 615 } 616 617 static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) 618 { 619 struct bpf_iter_seq_array_map_info *info = seq->private; 620 struct bpf_map *map = info->map; 621 struct bpf_array *array; 622 u32 index; 623 624 ++*pos; 625 ++info->index; 626 if (info->index >= map->max_entries) 627 return NULL; 628 629 array = container_of(map, struct bpf_array, map); 630 index = info->index & array->index_mask; 631 if (info->percpu_value_buf) 632 return (void *)(uintptr_t)array->pptrs[index]; 633 return array_map_elem_ptr(array, index); 634 } 635 636 static int __bpf_array_map_seq_show(struct seq_file *seq, void *v) 637 { 638 struct bpf_iter_seq_array_map_info *info = seq->private; 639 struct bpf_iter__bpf_map_elem ctx = {}; 640 struct bpf_map *map = info->map; 641 struct bpf_array *array = container_of(map, struct bpf_array, map); 642 struct bpf_iter_meta meta; 643 struct bpf_prog *prog; 644 int off = 0, cpu = 0; 645 void __percpu *pptr; 646 u32 size; 647 648 meta.seq = seq; 649 prog = bpf_iter_get_info(&meta, v == NULL); 650 if (!prog) 651 return 0; 652 653 ctx.meta = &meta; 654 ctx.map = info->map; 655 if (v) { 656 ctx.key = &info->index; 657 658 if (!info->percpu_value_buf) { 659 ctx.value = v; 660 } else { 661 pptr = (void __percpu *)(uintptr_t)v; 662 size = array->elem_size; 663 for_each_possible_cpu(cpu) { 664 copy_map_value_long(map, info->percpu_value_buf + off, 665 per_cpu_ptr(pptr, cpu)); 666 check_and_init_map_value(map, info->percpu_value_buf + off); 667 off += size; 668 } 669 ctx.value = info->percpu_value_buf; 670 } 671 } 672 673 return bpf_iter_run_prog(prog, &ctx); 674 } 675 676 static int bpf_array_map_seq_show(struct seq_file *seq, void *v) 677 { 678 return __bpf_array_map_seq_show(seq, v); 679 } 680 681 static void bpf_array_map_seq_stop(struct seq_file *seq, void *v) 682 { 683 if (!v) 684 (void)__bpf_array_map_seq_show(seq, NULL); 685 } 686 687 static int bpf_iter_init_array_map(void *priv_data, 688 struct bpf_iter_aux_info *aux) 689 { 690 struct bpf_iter_seq_array_map_info *seq_info = priv_data; 691 struct bpf_map *map = aux->map; 692 struct bpf_array *array = container_of(map, struct bpf_array, map); 693 void *value_buf; 694 u32 buf_size; 695 696 if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 697 buf_size = array->elem_size * num_possible_cpus(); 698 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN); 699 if (!value_buf) 700 return -ENOMEM; 701 702 seq_info->percpu_value_buf = value_buf; 703 } 704 705 /* bpf_iter_attach_map() acquires a map uref, and the uref may be 706 * released before or in the middle of iterating map elements, so 707 * acquire an extra map uref for iterator. 708 */ 709 bpf_map_inc_with_uref(map); 710 seq_info->map = map; 711 return 0; 712 } 713 714 static void bpf_iter_fini_array_map(void *priv_data) 715 { 716 struct bpf_iter_seq_array_map_info *seq_info = priv_data; 717 718 bpf_map_put_with_uref(seq_info->map); 719 kfree(seq_info->percpu_value_buf); 720 } 721 722 static const struct seq_operations bpf_array_map_seq_ops = { 723 .start = bpf_array_map_seq_start, 724 .next = bpf_array_map_seq_next, 725 .stop = bpf_array_map_seq_stop, 726 .show = bpf_array_map_seq_show, 727 }; 728 729 static const struct bpf_iter_seq_info iter_seq_info = { 730 .seq_ops = &bpf_array_map_seq_ops, 731 .init_seq_private = bpf_iter_init_array_map, 732 .fini_seq_private = bpf_iter_fini_array_map, 733 .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info), 734 }; 735 736 static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn, 737 void *callback_ctx, u64 flags) 738 { 739 u32 i, key, num_elems = 0; 740 struct bpf_array *array; 741 bool is_percpu; 742 u64 ret = 0; 743 void *val; 744 745 cant_migrate(); 746 747 if (flags != 0) 748 return -EINVAL; 749 750 is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 751 array = container_of(map, struct bpf_array, map); 752 for (i = 0; i < map->max_entries; i++) { 753 if (is_percpu) 754 val = this_cpu_ptr(array->pptrs[i]); 755 else 756 val = array_map_elem_ptr(array, i); 757 num_elems++; 758 key = i; 759 ret = callback_fn((u64)(long)map, (u64)(long)&key, 760 (u64)(long)val, (u64)(long)callback_ctx, 0); 761 /* return value: 0 - continue, 1 - stop and return */ 762 if (ret) 763 break; 764 } 765 766 return num_elems; 767 } 768 769 static u64 array_map_mem_usage(const struct bpf_map *map) 770 { 771 struct bpf_array *array = container_of(map, struct bpf_array, map); 772 bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 773 u32 elem_size = array->elem_size; 774 u64 entries = map->max_entries; 775 u64 usage = sizeof(*array); 776 777 if (percpu) { 778 usage += entries * sizeof(void *); 779 usage += entries * elem_size * num_possible_cpus(); 780 } else { 781 if (map->map_flags & BPF_F_MMAPABLE) { 782 usage = PAGE_ALIGN(usage); 783 usage += PAGE_ALIGN(entries * elem_size); 784 } else { 785 usage += entries * elem_size; 786 } 787 } 788 return usage; 789 } 790 791 BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array) 792 const struct bpf_map_ops array_map_ops = { 793 .map_meta_equal = array_map_meta_equal, 794 .map_alloc_check = array_map_alloc_check, 795 .map_alloc = array_map_alloc, 796 .map_free = array_map_free, 797 .map_get_next_key = array_map_get_next_key, 798 .map_release_uref = array_map_free_timers_wq, 799 .map_lookup_elem = array_map_lookup_elem, 800 .map_update_elem = array_map_update_elem, 801 .map_delete_elem = array_map_delete_elem, 802 .map_gen_lookup = array_map_gen_lookup, 803 .map_direct_value_addr = array_map_direct_value_addr, 804 .map_direct_value_meta = array_map_direct_value_meta, 805 .map_mmap = array_map_mmap, 806 .map_seq_show_elem = array_map_seq_show_elem, 807 .map_check_btf = array_map_check_btf, 808 .map_lookup_batch = generic_map_lookup_batch, 809 .map_update_batch = generic_map_update_batch, 810 .map_set_for_each_callback_args = map_set_for_each_callback_args, 811 .map_for_each_callback = bpf_for_each_array_elem, 812 .map_mem_usage = array_map_mem_usage, 813 .map_btf_id = &array_map_btf_ids[0], 814 .iter_seq_info = &iter_seq_info, 815 .map_get_hash = &array_map_get_hash, 816 }; 817 818 const struct bpf_map_ops percpu_array_map_ops = { 819 .map_meta_equal = bpf_map_meta_equal, 820 .map_alloc_check = array_map_alloc_check, 821 .map_alloc = array_map_alloc, 822 .map_free = array_map_free, 823 .map_get_next_key = array_map_get_next_key, 824 .map_lookup_elem = percpu_array_map_lookup_elem, 825 .map_gen_lookup = percpu_array_map_gen_lookup, 826 .map_update_elem = array_map_update_elem, 827 .map_delete_elem = array_map_delete_elem, 828 .map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem, 829 .map_seq_show_elem = percpu_array_map_seq_show_elem, 830 .map_check_btf = array_map_check_btf, 831 .map_lookup_batch = generic_map_lookup_batch, 832 .map_update_batch = generic_map_update_batch, 833 .map_set_for_each_callback_args = map_set_for_each_callback_args, 834 .map_for_each_callback = bpf_for_each_array_elem, 835 .map_mem_usage = array_map_mem_usage, 836 .map_btf_id = &array_map_btf_ids[0], 837 .iter_seq_info = &iter_seq_info, 838 }; 839 840 static int fd_array_map_alloc_check(union bpf_attr *attr) 841 { 842 /* only file descriptors can be stored in this type of map */ 843 if (attr->value_size != sizeof(u32)) 844 return -EINVAL; 845 /* Program read-only/write-only not supported for special maps yet. */ 846 if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) 847 return -EINVAL; 848 return array_map_alloc_check(attr); 849 } 850 851 static void fd_array_map_free(struct bpf_map *map) 852 { 853 struct bpf_array *array = container_of(map, struct bpf_array, map); 854 int i; 855 856 /* make sure it's empty */ 857 for (i = 0; i < array->map.max_entries; i++) 858 BUG_ON(array->ptrs[i] != NULL); 859 860 bpf_map_area_free(array); 861 } 862 863 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) 864 { 865 return ERR_PTR(-EOPNOTSUPP); 866 } 867 868 /* only called from syscall */ 869 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) 870 { 871 void **elem, *ptr; 872 int ret = 0; 873 874 if (!map->ops->map_fd_sys_lookup_elem) 875 return -ENOTSUPP; 876 877 rcu_read_lock(); 878 elem = array_map_lookup_elem(map, key); 879 if (elem && (ptr = READ_ONCE(*elem))) 880 *value = map->ops->map_fd_sys_lookup_elem(ptr); 881 else 882 ret = -ENOENT; 883 rcu_read_unlock(); 884 885 return ret; 886 } 887 888 /* only called from syscall */ 889 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 890 void *key, void *value, u64 map_flags) 891 { 892 struct bpf_array *array = container_of(map, struct bpf_array, map); 893 void *new_ptr, *old_ptr; 894 u32 index = *(u32 *)key, ufd; 895 896 if (map_flags != BPF_ANY) 897 return -EINVAL; 898 899 if (index >= array->map.max_entries) 900 return -E2BIG; 901 902 ufd = *(u32 *)value; 903 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); 904 if (IS_ERR(new_ptr)) 905 return PTR_ERR(new_ptr); 906 907 if (map->ops->map_poke_run) { 908 mutex_lock(&array->aux->poke_mutex); 909 old_ptr = xchg(array->ptrs + index, new_ptr); 910 map->ops->map_poke_run(map, index, old_ptr, new_ptr); 911 mutex_unlock(&array->aux->poke_mutex); 912 } else { 913 old_ptr = xchg(array->ptrs + index, new_ptr); 914 } 915 916 if (old_ptr) 917 map->ops->map_fd_put_ptr(map, old_ptr, true); 918 return 0; 919 } 920 921 static long __fd_array_map_delete_elem(struct bpf_map *map, void *key, bool need_defer) 922 { 923 struct bpf_array *array = container_of(map, struct bpf_array, map); 924 void *old_ptr; 925 u32 index = *(u32 *)key; 926 927 if (index >= array->map.max_entries) 928 return -E2BIG; 929 930 if (map->ops->map_poke_run) { 931 mutex_lock(&array->aux->poke_mutex); 932 old_ptr = xchg(array->ptrs + index, NULL); 933 map->ops->map_poke_run(map, index, old_ptr, NULL); 934 mutex_unlock(&array->aux->poke_mutex); 935 } else { 936 old_ptr = xchg(array->ptrs + index, NULL); 937 } 938 939 if (old_ptr) { 940 map->ops->map_fd_put_ptr(map, old_ptr, need_defer); 941 return 0; 942 } else { 943 return -ENOENT; 944 } 945 } 946 947 static long fd_array_map_delete_elem(struct bpf_map *map, void *key) 948 { 949 return __fd_array_map_delete_elem(map, key, true); 950 } 951 952 static void *prog_fd_array_get_ptr(struct bpf_map *map, 953 struct file *map_file, int fd) 954 { 955 struct bpf_prog *prog = bpf_prog_get(fd); 956 bool is_extended; 957 958 if (IS_ERR(prog)) 959 return prog; 960 961 if (prog->type == BPF_PROG_TYPE_EXT || 962 !bpf_prog_map_compatible(map, prog)) { 963 bpf_prog_put(prog); 964 return ERR_PTR(-EINVAL); 965 } 966 967 mutex_lock(&prog->aux->ext_mutex); 968 is_extended = prog->aux->is_extended; 969 if (!is_extended) 970 prog->aux->prog_array_member_cnt++; 971 mutex_unlock(&prog->aux->ext_mutex); 972 if (is_extended) { 973 /* Extended prog can not be tail callee. It's to prevent a 974 * potential infinite loop like: 975 * tail callee prog entry -> tail callee prog subprog -> 976 * freplace prog entry --tailcall-> tail callee prog entry. 977 */ 978 bpf_prog_put(prog); 979 return ERR_PTR(-EBUSY); 980 } 981 982 return prog; 983 } 984 985 static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) 986 { 987 struct bpf_prog *prog = ptr; 988 989 mutex_lock(&prog->aux->ext_mutex); 990 prog->aux->prog_array_member_cnt--; 991 mutex_unlock(&prog->aux->ext_mutex); 992 /* bpf_prog is freed after one RCU or tasks trace grace period */ 993 bpf_prog_put(prog); 994 } 995 996 static u32 prog_fd_array_sys_lookup_elem(void *ptr) 997 { 998 return ((struct bpf_prog *)ptr)->aux->id; 999 } 1000 1001 /* decrement refcnt of all bpf_progs that are stored in this map */ 1002 static void bpf_fd_array_map_clear(struct bpf_map *map, bool need_defer) 1003 { 1004 struct bpf_array *array = container_of(map, struct bpf_array, map); 1005 int i; 1006 1007 for (i = 0; i < array->map.max_entries; i++) 1008 __fd_array_map_delete_elem(map, &i, need_defer); 1009 } 1010 1011 static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, 1012 struct seq_file *m) 1013 { 1014 void **elem, *ptr; 1015 u32 prog_id; 1016 1017 rcu_read_lock(); 1018 1019 elem = array_map_lookup_elem(map, key); 1020 if (elem) { 1021 ptr = READ_ONCE(*elem); 1022 if (ptr) { 1023 seq_printf(m, "%u: ", *(u32 *)key); 1024 prog_id = prog_fd_array_sys_lookup_elem(ptr); 1025 btf_type_seq_show(map->btf, map->btf_value_type_id, 1026 &prog_id, m); 1027 seq_putc(m, '\n'); 1028 } 1029 } 1030 1031 rcu_read_unlock(); 1032 } 1033 1034 struct prog_poke_elem { 1035 struct list_head list; 1036 struct bpf_prog_aux *aux; 1037 }; 1038 1039 static int prog_array_map_poke_track(struct bpf_map *map, 1040 struct bpf_prog_aux *prog_aux) 1041 { 1042 struct prog_poke_elem *elem; 1043 struct bpf_array_aux *aux; 1044 int ret = 0; 1045 1046 aux = container_of(map, struct bpf_array, map)->aux; 1047 mutex_lock(&aux->poke_mutex); 1048 list_for_each_entry(elem, &aux->poke_progs, list) { 1049 if (elem->aux == prog_aux) 1050 goto out; 1051 } 1052 1053 elem = kmalloc(sizeof(*elem), GFP_KERNEL); 1054 if (!elem) { 1055 ret = -ENOMEM; 1056 goto out; 1057 } 1058 1059 INIT_LIST_HEAD(&elem->list); 1060 /* We must track the program's aux info at this point in time 1061 * since the program pointer itself may not be stable yet, see 1062 * also comment in prog_array_map_poke_run(). 1063 */ 1064 elem->aux = prog_aux; 1065 1066 list_add_tail(&elem->list, &aux->poke_progs); 1067 out: 1068 mutex_unlock(&aux->poke_mutex); 1069 return ret; 1070 } 1071 1072 static void prog_array_map_poke_untrack(struct bpf_map *map, 1073 struct bpf_prog_aux *prog_aux) 1074 { 1075 struct prog_poke_elem *elem, *tmp; 1076 struct bpf_array_aux *aux; 1077 1078 aux = container_of(map, struct bpf_array, map)->aux; 1079 mutex_lock(&aux->poke_mutex); 1080 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { 1081 if (elem->aux == prog_aux) { 1082 list_del_init(&elem->list); 1083 kfree(elem); 1084 break; 1085 } 1086 } 1087 mutex_unlock(&aux->poke_mutex); 1088 } 1089 1090 void __weak bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, 1091 struct bpf_prog *new, struct bpf_prog *old) 1092 { 1093 WARN_ON_ONCE(1); 1094 } 1095 1096 static void prog_array_map_poke_run(struct bpf_map *map, u32 key, 1097 struct bpf_prog *old, 1098 struct bpf_prog *new) 1099 { 1100 struct prog_poke_elem *elem; 1101 struct bpf_array_aux *aux; 1102 1103 aux = container_of(map, struct bpf_array, map)->aux; 1104 WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex)); 1105 1106 list_for_each_entry(elem, &aux->poke_progs, list) { 1107 struct bpf_jit_poke_descriptor *poke; 1108 int i; 1109 1110 for (i = 0; i < elem->aux->size_poke_tab; i++) { 1111 poke = &elem->aux->poke_tab[i]; 1112 1113 /* Few things to be aware of: 1114 * 1115 * 1) We can only ever access aux in this context, but 1116 * not aux->prog since it might not be stable yet and 1117 * there could be danger of use after free otherwise. 1118 * 2) Initially when we start tracking aux, the program 1119 * is not JITed yet and also does not have a kallsyms 1120 * entry. We skip these as poke->tailcall_target_stable 1121 * is not active yet. The JIT will do the final fixup 1122 * before setting it stable. The various 1123 * poke->tailcall_target_stable are successively 1124 * activated, so tail call updates can arrive from here 1125 * while JIT is still finishing its final fixup for 1126 * non-activated poke entries. 1127 * 3) Also programs reaching refcount of zero while patching 1128 * is in progress is okay since we're protected under 1129 * poke_mutex and untrack the programs before the JIT 1130 * buffer is freed. 1131 */ 1132 if (!READ_ONCE(poke->tailcall_target_stable)) 1133 continue; 1134 if (poke->reason != BPF_POKE_REASON_TAIL_CALL) 1135 continue; 1136 if (poke->tail_call.map != map || 1137 poke->tail_call.key != key) 1138 continue; 1139 1140 bpf_arch_poke_desc_update(poke, new, old); 1141 } 1142 } 1143 } 1144 1145 static void prog_array_map_clear_deferred(struct work_struct *work) 1146 { 1147 struct bpf_map *map = container_of(work, struct bpf_array_aux, 1148 work)->map; 1149 bpf_fd_array_map_clear(map, true); 1150 bpf_map_put(map); 1151 } 1152 1153 static void prog_array_map_clear(struct bpf_map *map) 1154 { 1155 struct bpf_array_aux *aux = container_of(map, struct bpf_array, 1156 map)->aux; 1157 bpf_map_inc(map); 1158 schedule_work(&aux->work); 1159 } 1160 1161 static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr) 1162 { 1163 struct bpf_array_aux *aux; 1164 struct bpf_map *map; 1165 1166 aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT); 1167 if (!aux) 1168 return ERR_PTR(-ENOMEM); 1169 1170 INIT_WORK(&aux->work, prog_array_map_clear_deferred); 1171 INIT_LIST_HEAD(&aux->poke_progs); 1172 mutex_init(&aux->poke_mutex); 1173 1174 map = array_map_alloc(attr); 1175 if (IS_ERR(map)) { 1176 kfree(aux); 1177 return map; 1178 } 1179 1180 container_of(map, struct bpf_array, map)->aux = aux; 1181 aux->map = map; 1182 1183 return map; 1184 } 1185 1186 static void prog_array_map_free(struct bpf_map *map) 1187 { 1188 struct prog_poke_elem *elem, *tmp; 1189 struct bpf_array_aux *aux; 1190 1191 aux = container_of(map, struct bpf_array, map)->aux; 1192 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { 1193 list_del_init(&elem->list); 1194 kfree(elem); 1195 } 1196 kfree(aux); 1197 fd_array_map_free(map); 1198 } 1199 1200 /* prog_array->aux->{type,jited} is a runtime binding. 1201 * Doing static check alone in the verifier is not enough. 1202 * Thus, prog_array_map cannot be used as an inner_map 1203 * and map_meta_equal is not implemented. 1204 */ 1205 const struct bpf_map_ops prog_array_map_ops = { 1206 .map_alloc_check = fd_array_map_alloc_check, 1207 .map_alloc = prog_array_map_alloc, 1208 .map_free = prog_array_map_free, 1209 .map_poke_track = prog_array_map_poke_track, 1210 .map_poke_untrack = prog_array_map_poke_untrack, 1211 .map_poke_run = prog_array_map_poke_run, 1212 .map_get_next_key = array_map_get_next_key, 1213 .map_lookup_elem = fd_array_map_lookup_elem, 1214 .map_delete_elem = fd_array_map_delete_elem, 1215 .map_fd_get_ptr = prog_fd_array_get_ptr, 1216 .map_fd_put_ptr = prog_fd_array_put_ptr, 1217 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, 1218 .map_release_uref = prog_array_map_clear, 1219 .map_seq_show_elem = prog_array_map_seq_show_elem, 1220 .map_mem_usage = array_map_mem_usage, 1221 .map_btf_id = &array_map_btf_ids[0], 1222 }; 1223 1224 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, 1225 struct file *map_file) 1226 { 1227 struct bpf_event_entry *ee; 1228 1229 ee = kzalloc(sizeof(*ee), GFP_KERNEL); 1230 if (ee) { 1231 ee->event = perf_file->private_data; 1232 ee->perf_file = perf_file; 1233 ee->map_file = map_file; 1234 } 1235 1236 return ee; 1237 } 1238 1239 static void __bpf_event_entry_free(struct rcu_head *rcu) 1240 { 1241 struct bpf_event_entry *ee; 1242 1243 ee = container_of(rcu, struct bpf_event_entry, rcu); 1244 fput(ee->perf_file); 1245 kfree(ee); 1246 } 1247 1248 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee) 1249 { 1250 call_rcu(&ee->rcu, __bpf_event_entry_free); 1251 } 1252 1253 static void *perf_event_fd_array_get_ptr(struct bpf_map *map, 1254 struct file *map_file, int fd) 1255 { 1256 struct bpf_event_entry *ee; 1257 struct perf_event *event; 1258 struct file *perf_file; 1259 u64 value; 1260 1261 perf_file = perf_event_get(fd); 1262 if (IS_ERR(perf_file)) 1263 return perf_file; 1264 1265 ee = ERR_PTR(-EOPNOTSUPP); 1266 event = perf_file->private_data; 1267 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP) 1268 goto err_out; 1269 1270 ee = bpf_event_entry_gen(perf_file, map_file); 1271 if (ee) 1272 return ee; 1273 ee = ERR_PTR(-ENOMEM); 1274 err_out: 1275 fput(perf_file); 1276 return ee; 1277 } 1278 1279 static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) 1280 { 1281 /* bpf_perf_event is freed after one RCU grace period */ 1282 bpf_event_entry_free_rcu(ptr); 1283 } 1284 1285 static void perf_event_fd_array_release(struct bpf_map *map, 1286 struct file *map_file) 1287 { 1288 struct bpf_array *array = container_of(map, struct bpf_array, map); 1289 struct bpf_event_entry *ee; 1290 int i; 1291 1292 if (map->map_flags & BPF_F_PRESERVE_ELEMS) 1293 return; 1294 1295 rcu_read_lock(); 1296 for (i = 0; i < array->map.max_entries; i++) { 1297 ee = READ_ONCE(array->ptrs[i]); 1298 if (ee && ee->map_file == map_file) 1299 __fd_array_map_delete_elem(map, &i, true); 1300 } 1301 rcu_read_unlock(); 1302 } 1303 1304 static void perf_event_fd_array_map_free(struct bpf_map *map) 1305 { 1306 if (map->map_flags & BPF_F_PRESERVE_ELEMS) 1307 bpf_fd_array_map_clear(map, false); 1308 fd_array_map_free(map); 1309 } 1310 1311 const struct bpf_map_ops perf_event_array_map_ops = { 1312 .map_meta_equal = bpf_map_meta_equal, 1313 .map_alloc_check = fd_array_map_alloc_check, 1314 .map_alloc = array_map_alloc, 1315 .map_free = perf_event_fd_array_map_free, 1316 .map_get_next_key = array_map_get_next_key, 1317 .map_lookup_elem = fd_array_map_lookup_elem, 1318 .map_delete_elem = fd_array_map_delete_elem, 1319 .map_fd_get_ptr = perf_event_fd_array_get_ptr, 1320 .map_fd_put_ptr = perf_event_fd_array_put_ptr, 1321 .map_release = perf_event_fd_array_release, 1322 .map_check_btf = map_check_no_btf, 1323 .map_mem_usage = array_map_mem_usage, 1324 .map_btf_id = &array_map_btf_ids[0], 1325 }; 1326 1327 #ifdef CONFIG_CGROUPS 1328 static void *cgroup_fd_array_get_ptr(struct bpf_map *map, 1329 struct file *map_file /* not used */, 1330 int fd) 1331 { 1332 return cgroup_get_from_fd(fd); 1333 } 1334 1335 static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) 1336 { 1337 /* cgroup_put free cgrp after a rcu grace period */ 1338 cgroup_put(ptr); 1339 } 1340 1341 static void cgroup_fd_array_free(struct bpf_map *map) 1342 { 1343 bpf_fd_array_map_clear(map, false); 1344 fd_array_map_free(map); 1345 } 1346 1347 const struct bpf_map_ops cgroup_array_map_ops = { 1348 .map_meta_equal = bpf_map_meta_equal, 1349 .map_alloc_check = fd_array_map_alloc_check, 1350 .map_alloc = array_map_alloc, 1351 .map_free = cgroup_fd_array_free, 1352 .map_get_next_key = array_map_get_next_key, 1353 .map_lookup_elem = fd_array_map_lookup_elem, 1354 .map_delete_elem = fd_array_map_delete_elem, 1355 .map_fd_get_ptr = cgroup_fd_array_get_ptr, 1356 .map_fd_put_ptr = cgroup_fd_array_put_ptr, 1357 .map_check_btf = map_check_no_btf, 1358 .map_mem_usage = array_map_mem_usage, 1359 .map_btf_id = &array_map_btf_ids[0], 1360 }; 1361 #endif 1362 1363 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr) 1364 { 1365 struct bpf_map *map, *inner_map_meta; 1366 1367 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); 1368 if (IS_ERR(inner_map_meta)) 1369 return inner_map_meta; 1370 1371 map = array_map_alloc(attr); 1372 if (IS_ERR(map)) { 1373 bpf_map_meta_free(inner_map_meta); 1374 return map; 1375 } 1376 1377 map->inner_map_meta = inner_map_meta; 1378 1379 return map; 1380 } 1381 1382 static void array_of_map_free(struct bpf_map *map) 1383 { 1384 /* map->inner_map_meta is only accessed by syscall which 1385 * is protected by fdget/fdput. 1386 */ 1387 bpf_map_meta_free(map->inner_map_meta); 1388 bpf_fd_array_map_clear(map, false); 1389 fd_array_map_free(map); 1390 } 1391 1392 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) 1393 { 1394 struct bpf_map **inner_map = array_map_lookup_elem(map, key); 1395 1396 if (!inner_map) 1397 return NULL; 1398 1399 return READ_ONCE(*inner_map); 1400 } 1401 1402 static int array_of_map_gen_lookup(struct bpf_map *map, 1403 struct bpf_insn *insn_buf) 1404 { 1405 struct bpf_array *array = container_of(map, struct bpf_array, map); 1406 u32 elem_size = array->elem_size; 1407 struct bpf_insn *insn = insn_buf; 1408 const int ret = BPF_REG_0; 1409 const int map_ptr = BPF_REG_1; 1410 const int index = BPF_REG_2; 1411 1412 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 1413 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 1414 if (!map->bypass_spec_v1) { 1415 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); 1416 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); 1417 } else { 1418 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); 1419 } 1420 if (is_power_of_2(elem_size)) 1421 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 1422 else 1423 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 1424 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); 1425 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); 1426 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); 1427 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 1428 *insn++ = BPF_MOV64_IMM(ret, 0); 1429 1430 return insn - insn_buf; 1431 } 1432 1433 const struct bpf_map_ops array_of_maps_map_ops = { 1434 .map_alloc_check = fd_array_map_alloc_check, 1435 .map_alloc = array_of_map_alloc, 1436 .map_free = array_of_map_free, 1437 .map_get_next_key = array_map_get_next_key, 1438 .map_lookup_elem = array_of_map_lookup_elem, 1439 .map_delete_elem = fd_array_map_delete_elem, 1440 .map_fd_get_ptr = bpf_map_fd_get_ptr, 1441 .map_fd_put_ptr = bpf_map_fd_put_ptr, 1442 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, 1443 .map_gen_lookup = array_of_map_gen_lookup, 1444 .map_lookup_batch = generic_map_lookup_batch, 1445 .map_update_batch = generic_map_update_batch, 1446 .map_check_btf = map_check_no_btf, 1447 .map_mem_usage = array_map_mem_usage, 1448 .map_btf_id = &array_map_btf_ids[0], 1449 }; 1450