1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 */ 5 #include <linux/bpf.h> 6 #include <linux/btf.h> 7 #include <linux/jhash.h> 8 #include <linux/filter.h> 9 #include <linux/rculist_nulls.h> 10 #include <linux/random.h> 11 #include <uapi/linux/btf.h> 12 #include <linux/rcupdate_trace.h> 13 #include "percpu_freelist.h" 14 #include "bpf_lru_list.h" 15 #include "map_in_map.h" 16 17 #define HTAB_CREATE_FLAG_MASK \ 18 (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \ 19 BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED) 20 21 #define BATCH_OPS(_name) \ 22 .map_lookup_batch = \ 23 _name##_map_lookup_batch, \ 24 .map_lookup_and_delete_batch = \ 25 _name##_map_lookup_and_delete_batch, \ 26 .map_update_batch = \ 27 generic_map_update_batch, \ 28 .map_delete_batch = \ 29 generic_map_delete_batch 30 31 /* 32 * The bucket lock has two protection scopes: 33 * 34 * 1) Serializing concurrent operations from BPF programs on different 35 * CPUs 36 * 37 * 2) Serializing concurrent operations from BPF programs and sys_bpf() 38 * 39 * BPF programs can execute in any context including perf, kprobes and 40 * tracing. As there are almost no limits where perf, kprobes and tracing 41 * can be invoked from the lock operations need to be protected against 42 * deadlocks. Deadlocks can be caused by recursion and by an invocation in 43 * the lock held section when functions which acquire this lock are invoked 44 * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU 45 * variable bpf_prog_active, which prevents BPF programs attached to perf 46 * events, kprobes and tracing to be invoked before the prior invocation 47 * from one of these contexts completed. sys_bpf() uses the same mechanism 48 * by pinning the task to the current CPU and incrementing the recursion 49 * protection across the map operation. 50 * 51 * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain 52 * operations like memory allocations (even with GFP_ATOMIC) from atomic 53 * contexts. This is required because even with GFP_ATOMIC the memory 54 * allocator calls into code paths which acquire locks with long held lock 55 * sections. To ensure the deterministic behaviour these locks are regular 56 * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only 57 * true atomic contexts on an RT kernel are the low level hardware 58 * handling, scheduling, low level interrupt handling, NMIs etc. None of 59 * these contexts should ever do memory allocations. 60 * 61 * As regular device interrupt handlers and soft interrupts are forced into 62 * thread context, the existing code which does 63 * spin_lock*(); alloc(GPF_ATOMIC); spin_unlock*(); 64 * just works. 65 * 66 * In theory the BPF locks could be converted to regular spinlocks as well, 67 * but the bucket locks and percpu_freelist locks can be taken from 68 * arbitrary contexts (perf, kprobes, tracepoints) which are required to be 69 * atomic contexts even on RT. These mechanisms require preallocated maps, 70 * so there is no need to invoke memory allocations within the lock held 71 * sections. 72 * 73 * BPF maps which need dynamic allocation are only used from (forced) 74 * thread context on RT and can therefore use regular spinlocks which in 75 * turn allows to invoke memory allocations from the lock held section. 76 * 77 * On a non RT kernel this distinction is neither possible nor required. 78 * spinlock maps to raw_spinlock and the extra code is optimized out by the 79 * compiler. 80 */ 81 struct bucket { 82 struct hlist_nulls_head head; 83 union { 84 raw_spinlock_t raw_lock; 85 spinlock_t lock; 86 }; 87 }; 88 89 #define HASHTAB_MAP_LOCK_COUNT 8 90 #define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1) 91 92 struct bpf_htab { 93 struct bpf_map map; 94 struct bucket *buckets; 95 void *elems; 96 union { 97 struct pcpu_freelist freelist; 98 struct bpf_lru lru; 99 }; 100 struct htab_elem *__percpu *extra_elems; 101 atomic_t count; /* number of elements in this hashtable */ 102 u32 n_buckets; /* number of hash buckets */ 103 u32 elem_size; /* size of each element in bytes */ 104 u32 hashrnd; 105 struct lock_class_key lockdep_key; 106 int __percpu *map_locked[HASHTAB_MAP_LOCK_COUNT]; 107 }; 108 109 /* each htab element is struct htab_elem + key + value */ 110 struct htab_elem { 111 union { 112 struct hlist_nulls_node hash_node; 113 struct { 114 void *padding; 115 union { 116 struct bpf_htab *htab; 117 struct pcpu_freelist_node fnode; 118 struct htab_elem *batch_flink; 119 }; 120 }; 121 }; 122 union { 123 struct rcu_head rcu; 124 struct bpf_lru_node lru_node; 125 }; 126 u32 hash; 127 char key[] __aligned(8); 128 }; 129 130 static inline bool htab_is_prealloc(const struct bpf_htab *htab) 131 { 132 return !(htab->map.map_flags & BPF_F_NO_PREALLOC); 133 } 134 135 static inline bool htab_use_raw_lock(const struct bpf_htab *htab) 136 { 137 return (!IS_ENABLED(CONFIG_PREEMPT_RT) || htab_is_prealloc(htab)); 138 } 139 140 static void htab_init_buckets(struct bpf_htab *htab) 141 { 142 unsigned i; 143 144 for (i = 0; i < htab->n_buckets; i++) { 145 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); 146 if (htab_use_raw_lock(htab)) { 147 raw_spin_lock_init(&htab->buckets[i].raw_lock); 148 lockdep_set_class(&htab->buckets[i].raw_lock, 149 &htab->lockdep_key); 150 } else { 151 spin_lock_init(&htab->buckets[i].lock); 152 lockdep_set_class(&htab->buckets[i].lock, 153 &htab->lockdep_key); 154 } 155 cond_resched(); 156 } 157 } 158 159 static inline int htab_lock_bucket(const struct bpf_htab *htab, 160 struct bucket *b, u32 hash, 161 unsigned long *pflags) 162 { 163 unsigned long flags; 164 165 hash = hash & HASHTAB_MAP_LOCK_MASK; 166 167 migrate_disable(); 168 if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) { 169 __this_cpu_dec(*(htab->map_locked[hash])); 170 migrate_enable(); 171 return -EBUSY; 172 } 173 174 if (htab_use_raw_lock(htab)) 175 raw_spin_lock_irqsave(&b->raw_lock, flags); 176 else 177 spin_lock_irqsave(&b->lock, flags); 178 *pflags = flags; 179 180 return 0; 181 } 182 183 static inline void htab_unlock_bucket(const struct bpf_htab *htab, 184 struct bucket *b, u32 hash, 185 unsigned long flags) 186 { 187 hash = hash & HASHTAB_MAP_LOCK_MASK; 188 if (htab_use_raw_lock(htab)) 189 raw_spin_unlock_irqrestore(&b->raw_lock, flags); 190 else 191 spin_unlock_irqrestore(&b->lock, flags); 192 __this_cpu_dec(*(htab->map_locked[hash])); 193 migrate_enable(); 194 } 195 196 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node); 197 198 static bool htab_is_lru(const struct bpf_htab *htab) 199 { 200 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || 201 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; 202 } 203 204 static bool htab_is_percpu(const struct bpf_htab *htab) 205 { 206 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || 207 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; 208 } 209 210 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, 211 void __percpu *pptr) 212 { 213 *(void __percpu **)(l->key + key_size) = pptr; 214 } 215 216 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size) 217 { 218 return *(void __percpu **)(l->key + key_size); 219 } 220 221 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) 222 { 223 return *(void **)(l->key + roundup(map->key_size, 8)); 224 } 225 226 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i) 227 { 228 return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size); 229 } 230 231 static void htab_free_elems(struct bpf_htab *htab) 232 { 233 int i; 234 235 if (!htab_is_percpu(htab)) 236 goto free_elems; 237 238 for (i = 0; i < htab->map.max_entries; i++) { 239 void __percpu *pptr; 240 241 pptr = htab_elem_get_ptr(get_htab_elem(htab, i), 242 htab->map.key_size); 243 free_percpu(pptr); 244 cond_resched(); 245 } 246 free_elems: 247 bpf_map_area_free(htab->elems); 248 } 249 250 /* The LRU list has a lock (lru_lock). Each htab bucket has a lock 251 * (bucket_lock). If both locks need to be acquired together, the lock 252 * order is always lru_lock -> bucket_lock and this only happens in 253 * bpf_lru_list.c logic. For example, certain code path of 254 * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(), 255 * will acquire lru_lock first followed by acquiring bucket_lock. 256 * 257 * In hashtab.c, to avoid deadlock, lock acquisition of 258 * bucket_lock followed by lru_lock is not allowed. In such cases, 259 * bucket_lock needs to be released first before acquiring lru_lock. 260 */ 261 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, 262 u32 hash) 263 { 264 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash); 265 struct htab_elem *l; 266 267 if (node) { 268 l = container_of(node, struct htab_elem, lru_node); 269 memcpy(l->key, key, htab->map.key_size); 270 return l; 271 } 272 273 return NULL; 274 } 275 276 static int prealloc_init(struct bpf_htab *htab) 277 { 278 u32 num_entries = htab->map.max_entries; 279 int err = -ENOMEM, i; 280 281 if (!htab_is_percpu(htab) && !htab_is_lru(htab)) 282 num_entries += num_possible_cpus(); 283 284 htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries, 285 htab->map.numa_node); 286 if (!htab->elems) 287 return -ENOMEM; 288 289 if (!htab_is_percpu(htab)) 290 goto skip_percpu_elems; 291 292 for (i = 0; i < num_entries; i++) { 293 u32 size = round_up(htab->map.value_size, 8); 294 void __percpu *pptr; 295 296 pptr = bpf_map_alloc_percpu(&htab->map, size, 8, 297 GFP_USER | __GFP_NOWARN); 298 if (!pptr) 299 goto free_elems; 300 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, 301 pptr); 302 cond_resched(); 303 } 304 305 skip_percpu_elems: 306 if (htab_is_lru(htab)) 307 err = bpf_lru_init(&htab->lru, 308 htab->map.map_flags & BPF_F_NO_COMMON_LRU, 309 offsetof(struct htab_elem, hash) - 310 offsetof(struct htab_elem, lru_node), 311 htab_lru_map_delete_node, 312 htab); 313 else 314 err = pcpu_freelist_init(&htab->freelist); 315 316 if (err) 317 goto free_elems; 318 319 if (htab_is_lru(htab)) 320 bpf_lru_populate(&htab->lru, htab->elems, 321 offsetof(struct htab_elem, lru_node), 322 htab->elem_size, num_entries); 323 else 324 pcpu_freelist_populate(&htab->freelist, 325 htab->elems + offsetof(struct htab_elem, fnode), 326 htab->elem_size, num_entries); 327 328 return 0; 329 330 free_elems: 331 htab_free_elems(htab); 332 return err; 333 } 334 335 static void prealloc_destroy(struct bpf_htab *htab) 336 { 337 htab_free_elems(htab); 338 339 if (htab_is_lru(htab)) 340 bpf_lru_destroy(&htab->lru); 341 else 342 pcpu_freelist_destroy(&htab->freelist); 343 } 344 345 static int alloc_extra_elems(struct bpf_htab *htab) 346 { 347 struct htab_elem *__percpu *pptr, *l_new; 348 struct pcpu_freelist_node *l; 349 int cpu; 350 351 pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8, 352 GFP_USER | __GFP_NOWARN); 353 if (!pptr) 354 return -ENOMEM; 355 356 for_each_possible_cpu(cpu) { 357 l = pcpu_freelist_pop(&htab->freelist); 358 /* pop will succeed, since prealloc_init() 359 * preallocated extra num_possible_cpus elements 360 */ 361 l_new = container_of(l, struct htab_elem, fnode); 362 *per_cpu_ptr(pptr, cpu) = l_new; 363 } 364 htab->extra_elems = pptr; 365 return 0; 366 } 367 368 /* Called from syscall */ 369 static int htab_map_alloc_check(union bpf_attr *attr) 370 { 371 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || 372 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); 373 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || 374 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); 375 /* percpu_lru means each cpu has its own LRU list. 376 * it is different from BPF_MAP_TYPE_PERCPU_HASH where 377 * the map's value itself is percpu. percpu_lru has 378 * nothing to do with the map's value. 379 */ 380 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); 381 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); 382 bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED); 383 int numa_node = bpf_map_attr_numa_node(attr); 384 385 BUILD_BUG_ON(offsetof(struct htab_elem, htab) != 386 offsetof(struct htab_elem, hash_node.pprev)); 387 BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) != 388 offsetof(struct htab_elem, hash_node.pprev)); 389 390 if (lru && !bpf_capable()) 391 /* LRU implementation is much complicated than other 392 * maps. Hence, limit to CAP_BPF. 393 */ 394 return -EPERM; 395 396 if (zero_seed && !capable(CAP_SYS_ADMIN)) 397 /* Guard against local DoS, and discourage production use. */ 398 return -EPERM; 399 400 if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK || 401 !bpf_map_flags_access_ok(attr->map_flags)) 402 return -EINVAL; 403 404 if (!lru && percpu_lru) 405 return -EINVAL; 406 407 if (lru && !prealloc) 408 return -ENOTSUPP; 409 410 if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru)) 411 return -EINVAL; 412 413 /* check sanity of attributes. 414 * value_size == 0 may be allowed in the future to use map as a set 415 */ 416 if (attr->max_entries == 0 || attr->key_size == 0 || 417 attr->value_size == 0) 418 return -EINVAL; 419 420 if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE - 421 sizeof(struct htab_elem)) 422 /* if key_size + value_size is bigger, the user space won't be 423 * able to access the elements via bpf syscall. This check 424 * also makes sure that the elem_size doesn't overflow and it's 425 * kmalloc-able later in htab_map_update_elem() 426 */ 427 return -E2BIG; 428 429 return 0; 430 } 431 432 static struct bpf_map *htab_map_alloc(union bpf_attr *attr) 433 { 434 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || 435 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); 436 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || 437 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); 438 /* percpu_lru means each cpu has its own LRU list. 439 * it is different from BPF_MAP_TYPE_PERCPU_HASH where 440 * the map's value itself is percpu. percpu_lru has 441 * nothing to do with the map's value. 442 */ 443 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); 444 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); 445 struct bpf_htab *htab; 446 int err, i; 447 448 htab = kzalloc(sizeof(*htab), GFP_USER | __GFP_ACCOUNT); 449 if (!htab) 450 return ERR_PTR(-ENOMEM); 451 452 lockdep_register_key(&htab->lockdep_key); 453 454 bpf_map_init_from_attr(&htab->map, attr); 455 456 if (percpu_lru) { 457 /* ensure each CPU's lru list has >=1 elements. 458 * since we are at it, make each lru list has the same 459 * number of elements. 460 */ 461 htab->map.max_entries = roundup(attr->max_entries, 462 num_possible_cpus()); 463 if (htab->map.max_entries < attr->max_entries) 464 htab->map.max_entries = rounddown(attr->max_entries, 465 num_possible_cpus()); 466 } 467 468 /* hash table size must be power of 2 */ 469 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); 470 471 htab->elem_size = sizeof(struct htab_elem) + 472 round_up(htab->map.key_size, 8); 473 if (percpu) 474 htab->elem_size += sizeof(void *); 475 else 476 htab->elem_size += round_up(htab->map.value_size, 8); 477 478 err = -E2BIG; 479 /* prevent zero size kmalloc and check for u32 overflow */ 480 if (htab->n_buckets == 0 || 481 htab->n_buckets > U32_MAX / sizeof(struct bucket)) 482 goto free_htab; 483 484 err = -ENOMEM; 485 htab->buckets = bpf_map_area_alloc(htab->n_buckets * 486 sizeof(struct bucket), 487 htab->map.numa_node); 488 if (!htab->buckets) 489 goto free_htab; 490 491 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) { 492 htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map, 493 sizeof(int), 494 sizeof(int), 495 GFP_USER); 496 if (!htab->map_locked[i]) 497 goto free_map_locked; 498 } 499 500 if (htab->map.map_flags & BPF_F_ZERO_SEED) 501 htab->hashrnd = 0; 502 else 503 htab->hashrnd = get_random_int(); 504 505 htab_init_buckets(htab); 506 507 if (prealloc) { 508 err = prealloc_init(htab); 509 if (err) 510 goto free_map_locked; 511 512 if (!percpu && !lru) { 513 /* lru itself can remove the least used element, so 514 * there is no need for an extra elem during map_update. 515 */ 516 err = alloc_extra_elems(htab); 517 if (err) 518 goto free_prealloc; 519 } 520 } 521 522 return &htab->map; 523 524 free_prealloc: 525 prealloc_destroy(htab); 526 free_map_locked: 527 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) 528 free_percpu(htab->map_locked[i]); 529 bpf_map_area_free(htab->buckets); 530 free_htab: 531 lockdep_unregister_key(&htab->lockdep_key); 532 kfree(htab); 533 return ERR_PTR(err); 534 } 535 536 static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd) 537 { 538 return jhash(key, key_len, hashrnd); 539 } 540 541 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) 542 { 543 return &htab->buckets[hash & (htab->n_buckets - 1)]; 544 } 545 546 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash) 547 { 548 return &__select_bucket(htab, hash)->head; 549 } 550 551 /* this lookup function can only be called with bucket lock taken */ 552 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash, 553 void *key, u32 key_size) 554 { 555 struct hlist_nulls_node *n; 556 struct htab_elem *l; 557 558 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) 559 if (l->hash == hash && !memcmp(&l->key, key, key_size)) 560 return l; 561 562 return NULL; 563 } 564 565 /* can be called without bucket lock. it will repeat the loop in 566 * the unlikely event when elements moved from one bucket into another 567 * while link list is being walked 568 */ 569 static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head, 570 u32 hash, void *key, 571 u32 key_size, u32 n_buckets) 572 { 573 struct hlist_nulls_node *n; 574 struct htab_elem *l; 575 576 again: 577 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) 578 if (l->hash == hash && !memcmp(&l->key, key, key_size)) 579 return l; 580 581 if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1)))) 582 goto again; 583 584 return NULL; 585 } 586 587 /* Called from syscall or from eBPF program directly, so 588 * arguments have to match bpf_map_lookup_elem() exactly. 589 * The return value is adjusted by BPF instructions 590 * in htab_map_gen_lookup(). 591 */ 592 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) 593 { 594 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 595 struct hlist_nulls_head *head; 596 struct htab_elem *l; 597 u32 hash, key_size; 598 599 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held()); 600 601 key_size = map->key_size; 602 603 hash = htab_map_hash(key, key_size, htab->hashrnd); 604 605 head = select_bucket(htab, hash); 606 607 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); 608 609 return l; 610 } 611 612 static void *htab_map_lookup_elem(struct bpf_map *map, void *key) 613 { 614 struct htab_elem *l = __htab_map_lookup_elem(map, key); 615 616 if (l) 617 return l->key + round_up(map->key_size, 8); 618 619 return NULL; 620 } 621 622 /* inline bpf_map_lookup_elem() call. 623 * Instead of: 624 * bpf_prog 625 * bpf_map_lookup_elem 626 * map->ops->map_lookup_elem 627 * htab_map_lookup_elem 628 * __htab_map_lookup_elem 629 * do: 630 * bpf_prog 631 * __htab_map_lookup_elem 632 */ 633 static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 634 { 635 struct bpf_insn *insn = insn_buf; 636 const int ret = BPF_REG_0; 637 638 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, 639 (void *(*)(struct bpf_map *map, void *key))NULL)); 640 *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem)); 641 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); 642 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, 643 offsetof(struct htab_elem, key) + 644 round_up(map->key_size, 8)); 645 return insn - insn_buf; 646 } 647 648 static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map, 649 void *key, const bool mark) 650 { 651 struct htab_elem *l = __htab_map_lookup_elem(map, key); 652 653 if (l) { 654 if (mark) 655 bpf_lru_node_set_ref(&l->lru_node); 656 return l->key + round_up(map->key_size, 8); 657 } 658 659 return NULL; 660 } 661 662 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) 663 { 664 return __htab_lru_map_lookup_elem(map, key, true); 665 } 666 667 static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key) 668 { 669 return __htab_lru_map_lookup_elem(map, key, false); 670 } 671 672 static int htab_lru_map_gen_lookup(struct bpf_map *map, 673 struct bpf_insn *insn_buf) 674 { 675 struct bpf_insn *insn = insn_buf; 676 const int ret = BPF_REG_0; 677 const int ref_reg = BPF_REG_1; 678 679 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, 680 (void *(*)(struct bpf_map *map, void *key))NULL)); 681 *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem)); 682 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4); 683 *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret, 684 offsetof(struct htab_elem, lru_node) + 685 offsetof(struct bpf_lru_node, ref)); 686 *insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1); 687 *insn++ = BPF_ST_MEM(BPF_B, ret, 688 offsetof(struct htab_elem, lru_node) + 689 offsetof(struct bpf_lru_node, ref), 690 1); 691 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, 692 offsetof(struct htab_elem, key) + 693 round_up(map->key_size, 8)); 694 return insn - insn_buf; 695 } 696 697 /* It is called from the bpf_lru_list when the LRU needs to delete 698 * older elements from the htab. 699 */ 700 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) 701 { 702 struct bpf_htab *htab = (struct bpf_htab *)arg; 703 struct htab_elem *l = NULL, *tgt_l; 704 struct hlist_nulls_head *head; 705 struct hlist_nulls_node *n; 706 unsigned long flags; 707 struct bucket *b; 708 int ret; 709 710 tgt_l = container_of(node, struct htab_elem, lru_node); 711 b = __select_bucket(htab, tgt_l->hash); 712 head = &b->head; 713 714 ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags); 715 if (ret) 716 return false; 717 718 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) 719 if (l == tgt_l) { 720 hlist_nulls_del_rcu(&l->hash_node); 721 break; 722 } 723 724 htab_unlock_bucket(htab, b, tgt_l->hash, flags); 725 726 return l == tgt_l; 727 } 728 729 /* Called from syscall */ 730 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 731 { 732 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 733 struct hlist_nulls_head *head; 734 struct htab_elem *l, *next_l; 735 u32 hash, key_size; 736 int i = 0; 737 738 WARN_ON_ONCE(!rcu_read_lock_held()); 739 740 key_size = map->key_size; 741 742 if (!key) 743 goto find_first_elem; 744 745 hash = htab_map_hash(key, key_size, htab->hashrnd); 746 747 head = select_bucket(htab, hash); 748 749 /* lookup the key */ 750 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); 751 752 if (!l) 753 goto find_first_elem; 754 755 /* key was found, get next key in the same bucket */ 756 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)), 757 struct htab_elem, hash_node); 758 759 if (next_l) { 760 /* if next elem in this hash list is non-zero, just return it */ 761 memcpy(next_key, next_l->key, key_size); 762 return 0; 763 } 764 765 /* no more elements in this hash list, go to the next bucket */ 766 i = hash & (htab->n_buckets - 1); 767 i++; 768 769 find_first_elem: 770 /* iterate over buckets */ 771 for (; i < htab->n_buckets; i++) { 772 head = select_bucket(htab, i); 773 774 /* pick first element in the bucket */ 775 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)), 776 struct htab_elem, hash_node); 777 if (next_l) { 778 /* if it's not empty, just return it */ 779 memcpy(next_key, next_l->key, key_size); 780 return 0; 781 } 782 } 783 784 /* iterated over all buckets and all elements */ 785 return -ENOENT; 786 } 787 788 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) 789 { 790 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) 791 free_percpu(htab_elem_get_ptr(l, htab->map.key_size)); 792 kfree(l); 793 } 794 795 static void htab_elem_free_rcu(struct rcu_head *head) 796 { 797 struct htab_elem *l = container_of(head, struct htab_elem, rcu); 798 struct bpf_htab *htab = l->htab; 799 800 htab_elem_free(htab, l); 801 } 802 803 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) 804 { 805 struct bpf_map *map = &htab->map; 806 void *ptr; 807 808 if (map->ops->map_fd_put_ptr) { 809 ptr = fd_htab_map_get_ptr(map, l); 810 map->ops->map_fd_put_ptr(ptr); 811 } 812 } 813 814 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) 815 { 816 htab_put_fd_value(htab, l); 817 818 if (htab_is_prealloc(htab)) { 819 __pcpu_freelist_push(&htab->freelist, &l->fnode); 820 } else { 821 atomic_dec(&htab->count); 822 l->htab = htab; 823 call_rcu(&l->rcu, htab_elem_free_rcu); 824 } 825 } 826 827 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, 828 void *value, bool onallcpus) 829 { 830 if (!onallcpus) { 831 /* copy true value_size bytes */ 832 memcpy(this_cpu_ptr(pptr), value, htab->map.value_size); 833 } else { 834 u32 size = round_up(htab->map.value_size, 8); 835 int off = 0, cpu; 836 837 for_each_possible_cpu(cpu) { 838 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), 839 value + off, size); 840 off += size; 841 } 842 } 843 } 844 845 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr, 846 void *value, bool onallcpus) 847 { 848 /* When using prealloc and not setting the initial value on all cpus, 849 * zero-fill element values for other cpus (just as what happens when 850 * not using prealloc). Otherwise, bpf program has no way to ensure 851 * known initial values for cpus other than current one 852 * (onallcpus=false always when coming from bpf prog). 853 */ 854 if (htab_is_prealloc(htab) && !onallcpus) { 855 u32 size = round_up(htab->map.value_size, 8); 856 int current_cpu = raw_smp_processor_id(); 857 int cpu; 858 859 for_each_possible_cpu(cpu) { 860 if (cpu == current_cpu) 861 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value, 862 size); 863 else 864 memset(per_cpu_ptr(pptr, cpu), 0, size); 865 } 866 } else { 867 pcpu_copy_value(htab, pptr, value, onallcpus); 868 } 869 } 870 871 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab) 872 { 873 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && 874 BITS_PER_LONG == 64; 875 } 876 877 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, 878 void *value, u32 key_size, u32 hash, 879 bool percpu, bool onallcpus, 880 struct htab_elem *old_elem) 881 { 882 u32 size = htab->map.value_size; 883 bool prealloc = htab_is_prealloc(htab); 884 struct htab_elem *l_new, **pl_new; 885 void __percpu *pptr; 886 887 if (prealloc) { 888 if (old_elem) { 889 /* if we're updating the existing element, 890 * use per-cpu extra elems to avoid freelist_pop/push 891 */ 892 pl_new = this_cpu_ptr(htab->extra_elems); 893 l_new = *pl_new; 894 htab_put_fd_value(htab, old_elem); 895 *pl_new = old_elem; 896 } else { 897 struct pcpu_freelist_node *l; 898 899 l = __pcpu_freelist_pop(&htab->freelist); 900 if (!l) 901 return ERR_PTR(-E2BIG); 902 l_new = container_of(l, struct htab_elem, fnode); 903 } 904 } else { 905 if (atomic_inc_return(&htab->count) > htab->map.max_entries) 906 if (!old_elem) { 907 /* when map is full and update() is replacing 908 * old element, it's ok to allocate, since 909 * old element will be freed immediately. 910 * Otherwise return an error 911 */ 912 l_new = ERR_PTR(-E2BIG); 913 goto dec_count; 914 } 915 l_new = bpf_map_kmalloc_node(&htab->map, htab->elem_size, 916 GFP_ATOMIC | __GFP_NOWARN, 917 htab->map.numa_node); 918 if (!l_new) { 919 l_new = ERR_PTR(-ENOMEM); 920 goto dec_count; 921 } 922 check_and_init_map_lock(&htab->map, 923 l_new->key + round_up(key_size, 8)); 924 } 925 926 memcpy(l_new->key, key, key_size); 927 if (percpu) { 928 size = round_up(size, 8); 929 if (prealloc) { 930 pptr = htab_elem_get_ptr(l_new, key_size); 931 } else { 932 /* alloc_percpu zero-fills */ 933 pptr = bpf_map_alloc_percpu(&htab->map, size, 8, 934 GFP_ATOMIC | __GFP_NOWARN); 935 if (!pptr) { 936 kfree(l_new); 937 l_new = ERR_PTR(-ENOMEM); 938 goto dec_count; 939 } 940 } 941 942 pcpu_init_value(htab, pptr, value, onallcpus); 943 944 if (!prealloc) 945 htab_elem_set_ptr(l_new, key_size, pptr); 946 } else if (fd_htab_map_needs_adjust(htab)) { 947 size = round_up(size, 8); 948 memcpy(l_new->key + round_up(key_size, 8), value, size); 949 } else { 950 copy_map_value(&htab->map, 951 l_new->key + round_up(key_size, 8), 952 value); 953 } 954 955 l_new->hash = hash; 956 return l_new; 957 dec_count: 958 atomic_dec(&htab->count); 959 return l_new; 960 } 961 962 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, 963 u64 map_flags) 964 { 965 if (l_old && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST) 966 /* elem already exists */ 967 return -EEXIST; 968 969 if (!l_old && (map_flags & ~BPF_F_LOCK) == BPF_EXIST) 970 /* elem doesn't exist, cannot update it */ 971 return -ENOENT; 972 973 return 0; 974 } 975 976 /* Called from syscall or from eBPF program */ 977 static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, 978 u64 map_flags) 979 { 980 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 981 struct htab_elem *l_new = NULL, *l_old; 982 struct hlist_nulls_head *head; 983 unsigned long flags; 984 struct bucket *b; 985 u32 key_size, hash; 986 int ret; 987 988 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST)) 989 /* unknown flags */ 990 return -EINVAL; 991 992 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held()); 993 994 key_size = map->key_size; 995 996 hash = htab_map_hash(key, key_size, htab->hashrnd); 997 998 b = __select_bucket(htab, hash); 999 head = &b->head; 1000 1001 if (unlikely(map_flags & BPF_F_LOCK)) { 1002 if (unlikely(!map_value_has_spin_lock(map))) 1003 return -EINVAL; 1004 /* find an element without taking the bucket lock */ 1005 l_old = lookup_nulls_elem_raw(head, hash, key, key_size, 1006 htab->n_buckets); 1007 ret = check_flags(htab, l_old, map_flags); 1008 if (ret) 1009 return ret; 1010 if (l_old) { 1011 /* grab the element lock and update value in place */ 1012 copy_map_value_locked(map, 1013 l_old->key + round_up(key_size, 8), 1014 value, false); 1015 return 0; 1016 } 1017 /* fall through, grab the bucket lock and lookup again. 1018 * 99.9% chance that the element won't be found, 1019 * but second lookup under lock has to be done. 1020 */ 1021 } 1022 1023 ret = htab_lock_bucket(htab, b, hash, &flags); 1024 if (ret) 1025 return ret; 1026 1027 l_old = lookup_elem_raw(head, hash, key, key_size); 1028 1029 ret = check_flags(htab, l_old, map_flags); 1030 if (ret) 1031 goto err; 1032 1033 if (unlikely(l_old && (map_flags & BPF_F_LOCK))) { 1034 /* first lookup without the bucket lock didn't find the element, 1035 * but second lookup with the bucket lock found it. 1036 * This case is highly unlikely, but has to be dealt with: 1037 * grab the element lock in addition to the bucket lock 1038 * and update element in place 1039 */ 1040 copy_map_value_locked(map, 1041 l_old->key + round_up(key_size, 8), 1042 value, false); 1043 ret = 0; 1044 goto err; 1045 } 1046 1047 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, 1048 l_old); 1049 if (IS_ERR(l_new)) { 1050 /* all pre-allocated elements are in use or memory exhausted */ 1051 ret = PTR_ERR(l_new); 1052 goto err; 1053 } 1054 1055 /* add new element to the head of the list, so that 1056 * concurrent search will find it before old elem 1057 */ 1058 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 1059 if (l_old) { 1060 hlist_nulls_del_rcu(&l_old->hash_node); 1061 if (!htab_is_prealloc(htab)) 1062 free_htab_elem(htab, l_old); 1063 } 1064 ret = 0; 1065 err: 1066 htab_unlock_bucket(htab, b, hash, flags); 1067 return ret; 1068 } 1069 1070 static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, 1071 u64 map_flags) 1072 { 1073 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1074 struct htab_elem *l_new, *l_old = NULL; 1075 struct hlist_nulls_head *head; 1076 unsigned long flags; 1077 struct bucket *b; 1078 u32 key_size, hash; 1079 int ret; 1080 1081 if (unlikely(map_flags > BPF_EXIST)) 1082 /* unknown flags */ 1083 return -EINVAL; 1084 1085 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held()); 1086 1087 key_size = map->key_size; 1088 1089 hash = htab_map_hash(key, key_size, htab->hashrnd); 1090 1091 b = __select_bucket(htab, hash); 1092 head = &b->head; 1093 1094 /* For LRU, we need to alloc before taking bucket's 1095 * spinlock because getting free nodes from LRU may need 1096 * to remove older elements from htab and this removal 1097 * operation will need a bucket lock. 1098 */ 1099 l_new = prealloc_lru_pop(htab, key, hash); 1100 if (!l_new) 1101 return -ENOMEM; 1102 memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size); 1103 1104 ret = htab_lock_bucket(htab, b, hash, &flags); 1105 if (ret) 1106 return ret; 1107 1108 l_old = lookup_elem_raw(head, hash, key, key_size); 1109 1110 ret = check_flags(htab, l_old, map_flags); 1111 if (ret) 1112 goto err; 1113 1114 /* add new element to the head of the list, so that 1115 * concurrent search will find it before old elem 1116 */ 1117 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 1118 if (l_old) { 1119 bpf_lru_node_set_ref(&l_new->lru_node); 1120 hlist_nulls_del_rcu(&l_old->hash_node); 1121 } 1122 ret = 0; 1123 1124 err: 1125 htab_unlock_bucket(htab, b, hash, flags); 1126 1127 if (ret) 1128 bpf_lru_push_free(&htab->lru, &l_new->lru_node); 1129 else if (l_old) 1130 bpf_lru_push_free(&htab->lru, &l_old->lru_node); 1131 1132 return ret; 1133 } 1134 1135 static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, 1136 void *value, u64 map_flags, 1137 bool onallcpus) 1138 { 1139 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1140 struct htab_elem *l_new = NULL, *l_old; 1141 struct hlist_nulls_head *head; 1142 unsigned long flags; 1143 struct bucket *b; 1144 u32 key_size, hash; 1145 int ret; 1146 1147 if (unlikely(map_flags > BPF_EXIST)) 1148 /* unknown flags */ 1149 return -EINVAL; 1150 1151 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held()); 1152 1153 key_size = map->key_size; 1154 1155 hash = htab_map_hash(key, key_size, htab->hashrnd); 1156 1157 b = __select_bucket(htab, hash); 1158 head = &b->head; 1159 1160 ret = htab_lock_bucket(htab, b, hash, &flags); 1161 if (ret) 1162 return ret; 1163 1164 l_old = lookup_elem_raw(head, hash, key, key_size); 1165 1166 ret = check_flags(htab, l_old, map_flags); 1167 if (ret) 1168 goto err; 1169 1170 if (l_old) { 1171 /* per-cpu hash map can update value in-place */ 1172 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), 1173 value, onallcpus); 1174 } else { 1175 l_new = alloc_htab_elem(htab, key, value, key_size, 1176 hash, true, onallcpus, NULL); 1177 if (IS_ERR(l_new)) { 1178 ret = PTR_ERR(l_new); 1179 goto err; 1180 } 1181 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 1182 } 1183 ret = 0; 1184 err: 1185 htab_unlock_bucket(htab, b, hash, flags); 1186 return ret; 1187 } 1188 1189 static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, 1190 void *value, u64 map_flags, 1191 bool onallcpus) 1192 { 1193 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1194 struct htab_elem *l_new = NULL, *l_old; 1195 struct hlist_nulls_head *head; 1196 unsigned long flags; 1197 struct bucket *b; 1198 u32 key_size, hash; 1199 int ret; 1200 1201 if (unlikely(map_flags > BPF_EXIST)) 1202 /* unknown flags */ 1203 return -EINVAL; 1204 1205 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held()); 1206 1207 key_size = map->key_size; 1208 1209 hash = htab_map_hash(key, key_size, htab->hashrnd); 1210 1211 b = __select_bucket(htab, hash); 1212 head = &b->head; 1213 1214 /* For LRU, we need to alloc before taking bucket's 1215 * spinlock because LRU's elem alloc may need 1216 * to remove older elem from htab and this removal 1217 * operation will need a bucket lock. 1218 */ 1219 if (map_flags != BPF_EXIST) { 1220 l_new = prealloc_lru_pop(htab, key, hash); 1221 if (!l_new) 1222 return -ENOMEM; 1223 } 1224 1225 ret = htab_lock_bucket(htab, b, hash, &flags); 1226 if (ret) 1227 return ret; 1228 1229 l_old = lookup_elem_raw(head, hash, key, key_size); 1230 1231 ret = check_flags(htab, l_old, map_flags); 1232 if (ret) 1233 goto err; 1234 1235 if (l_old) { 1236 bpf_lru_node_set_ref(&l_old->lru_node); 1237 1238 /* per-cpu hash map can update value in-place */ 1239 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), 1240 value, onallcpus); 1241 } else { 1242 pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size), 1243 value, onallcpus); 1244 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 1245 l_new = NULL; 1246 } 1247 ret = 0; 1248 err: 1249 htab_unlock_bucket(htab, b, hash, flags); 1250 if (l_new) 1251 bpf_lru_push_free(&htab->lru, &l_new->lru_node); 1252 return ret; 1253 } 1254 1255 static int htab_percpu_map_update_elem(struct bpf_map *map, void *key, 1256 void *value, u64 map_flags) 1257 { 1258 return __htab_percpu_map_update_elem(map, key, value, map_flags, false); 1259 } 1260 1261 static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, 1262 void *value, u64 map_flags) 1263 { 1264 return __htab_lru_percpu_map_update_elem(map, key, value, map_flags, 1265 false); 1266 } 1267 1268 /* Called from syscall or from eBPF program */ 1269 static int htab_map_delete_elem(struct bpf_map *map, void *key) 1270 { 1271 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1272 struct hlist_nulls_head *head; 1273 struct bucket *b; 1274 struct htab_elem *l; 1275 unsigned long flags; 1276 u32 hash, key_size; 1277 int ret; 1278 1279 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held()); 1280 1281 key_size = map->key_size; 1282 1283 hash = htab_map_hash(key, key_size, htab->hashrnd); 1284 b = __select_bucket(htab, hash); 1285 head = &b->head; 1286 1287 ret = htab_lock_bucket(htab, b, hash, &flags); 1288 if (ret) 1289 return ret; 1290 1291 l = lookup_elem_raw(head, hash, key, key_size); 1292 1293 if (l) { 1294 hlist_nulls_del_rcu(&l->hash_node); 1295 free_htab_elem(htab, l); 1296 } else { 1297 ret = -ENOENT; 1298 } 1299 1300 htab_unlock_bucket(htab, b, hash, flags); 1301 return ret; 1302 } 1303 1304 static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) 1305 { 1306 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1307 struct hlist_nulls_head *head; 1308 struct bucket *b; 1309 struct htab_elem *l; 1310 unsigned long flags; 1311 u32 hash, key_size; 1312 int ret; 1313 1314 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held()); 1315 1316 key_size = map->key_size; 1317 1318 hash = htab_map_hash(key, key_size, htab->hashrnd); 1319 b = __select_bucket(htab, hash); 1320 head = &b->head; 1321 1322 ret = htab_lock_bucket(htab, b, hash, &flags); 1323 if (ret) 1324 return ret; 1325 1326 l = lookup_elem_raw(head, hash, key, key_size); 1327 1328 if (l) 1329 hlist_nulls_del_rcu(&l->hash_node); 1330 else 1331 ret = -ENOENT; 1332 1333 htab_unlock_bucket(htab, b, hash, flags); 1334 if (l) 1335 bpf_lru_push_free(&htab->lru, &l->lru_node); 1336 return ret; 1337 } 1338 1339 static void delete_all_elements(struct bpf_htab *htab) 1340 { 1341 int i; 1342 1343 for (i = 0; i < htab->n_buckets; i++) { 1344 struct hlist_nulls_head *head = select_bucket(htab, i); 1345 struct hlist_nulls_node *n; 1346 struct htab_elem *l; 1347 1348 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { 1349 hlist_nulls_del_rcu(&l->hash_node); 1350 htab_elem_free(htab, l); 1351 } 1352 } 1353 } 1354 1355 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ 1356 static void htab_map_free(struct bpf_map *map) 1357 { 1358 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1359 int i; 1360 1361 /* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback. 1362 * bpf_free_used_maps() is called after bpf prog is no longer executing. 1363 * There is no need to synchronize_rcu() here to protect map elements. 1364 */ 1365 1366 /* some of free_htab_elem() callbacks for elements of this map may 1367 * not have executed. Wait for them. 1368 */ 1369 rcu_barrier(); 1370 if (!htab_is_prealloc(htab)) 1371 delete_all_elements(htab); 1372 else 1373 prealloc_destroy(htab); 1374 1375 free_percpu(htab->extra_elems); 1376 bpf_map_area_free(htab->buckets); 1377 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) 1378 free_percpu(htab->map_locked[i]); 1379 lockdep_unregister_key(&htab->lockdep_key); 1380 kfree(htab); 1381 } 1382 1383 static void htab_map_seq_show_elem(struct bpf_map *map, void *key, 1384 struct seq_file *m) 1385 { 1386 void *value; 1387 1388 rcu_read_lock(); 1389 1390 value = htab_map_lookup_elem(map, key); 1391 if (!value) { 1392 rcu_read_unlock(); 1393 return; 1394 } 1395 1396 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); 1397 seq_puts(m, ": "); 1398 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); 1399 seq_puts(m, "\n"); 1400 1401 rcu_read_unlock(); 1402 } 1403 1404 static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key, 1405 void *value, bool is_lru_map, 1406 bool is_percpu, u64 flags) 1407 { 1408 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1409 struct hlist_nulls_head *head; 1410 unsigned long bflags; 1411 struct htab_elem *l; 1412 u32 hash, key_size; 1413 struct bucket *b; 1414 int ret; 1415 1416 key_size = map->key_size; 1417 1418 hash = htab_map_hash(key, key_size, htab->hashrnd); 1419 b = __select_bucket(htab, hash); 1420 head = &b->head; 1421 1422 ret = htab_lock_bucket(htab, b, hash, &bflags); 1423 if (ret) 1424 return ret; 1425 1426 l = lookup_elem_raw(head, hash, key, key_size); 1427 if (!l) { 1428 ret = -ENOENT; 1429 } else { 1430 if (is_percpu) { 1431 u32 roundup_value_size = round_up(map->value_size, 8); 1432 void __percpu *pptr; 1433 int off = 0, cpu; 1434 1435 pptr = htab_elem_get_ptr(l, key_size); 1436 for_each_possible_cpu(cpu) { 1437 bpf_long_memcpy(value + off, 1438 per_cpu_ptr(pptr, cpu), 1439 roundup_value_size); 1440 off += roundup_value_size; 1441 } 1442 } else { 1443 u32 roundup_key_size = round_up(map->key_size, 8); 1444 1445 if (flags & BPF_F_LOCK) 1446 copy_map_value_locked(map, value, l->key + 1447 roundup_key_size, 1448 true); 1449 else 1450 copy_map_value(map, value, l->key + 1451 roundup_key_size); 1452 check_and_init_map_lock(map, value); 1453 } 1454 1455 hlist_nulls_del_rcu(&l->hash_node); 1456 if (!is_lru_map) 1457 free_htab_elem(htab, l); 1458 } 1459 1460 htab_unlock_bucket(htab, b, hash, bflags); 1461 1462 if (is_lru_map && l) 1463 bpf_lru_push_free(&htab->lru, &l->lru_node); 1464 1465 return ret; 1466 } 1467 1468 static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key, 1469 void *value, u64 flags) 1470 { 1471 return __htab_map_lookup_and_delete_elem(map, key, value, false, false, 1472 flags); 1473 } 1474 1475 static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map, 1476 void *key, void *value, 1477 u64 flags) 1478 { 1479 return __htab_map_lookup_and_delete_elem(map, key, value, false, true, 1480 flags); 1481 } 1482 1483 static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key, 1484 void *value, u64 flags) 1485 { 1486 return __htab_map_lookup_and_delete_elem(map, key, value, true, false, 1487 flags); 1488 } 1489 1490 static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map, 1491 void *key, void *value, 1492 u64 flags) 1493 { 1494 return __htab_map_lookup_and_delete_elem(map, key, value, true, true, 1495 flags); 1496 } 1497 1498 static int 1499 __htab_map_lookup_and_delete_batch(struct bpf_map *map, 1500 const union bpf_attr *attr, 1501 union bpf_attr __user *uattr, 1502 bool do_delete, bool is_lru_map, 1503 bool is_percpu) 1504 { 1505 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1506 u32 bucket_cnt, total, key_size, value_size, roundup_key_size; 1507 void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val; 1508 void __user *uvalues = u64_to_user_ptr(attr->batch.values); 1509 void __user *ukeys = u64_to_user_ptr(attr->batch.keys); 1510 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); 1511 u32 batch, max_count, size, bucket_size; 1512 struct htab_elem *node_to_free = NULL; 1513 u64 elem_map_flags, map_flags; 1514 struct hlist_nulls_head *head; 1515 struct hlist_nulls_node *n; 1516 unsigned long flags = 0; 1517 bool locked = false; 1518 struct htab_elem *l; 1519 struct bucket *b; 1520 int ret = 0; 1521 1522 elem_map_flags = attr->batch.elem_flags; 1523 if ((elem_map_flags & ~BPF_F_LOCK) || 1524 ((elem_map_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map))) 1525 return -EINVAL; 1526 1527 map_flags = attr->batch.flags; 1528 if (map_flags) 1529 return -EINVAL; 1530 1531 max_count = attr->batch.count; 1532 if (!max_count) 1533 return 0; 1534 1535 if (put_user(0, &uattr->batch.count)) 1536 return -EFAULT; 1537 1538 batch = 0; 1539 if (ubatch && copy_from_user(&batch, ubatch, sizeof(batch))) 1540 return -EFAULT; 1541 1542 if (batch >= htab->n_buckets) 1543 return -ENOENT; 1544 1545 key_size = htab->map.key_size; 1546 roundup_key_size = round_up(htab->map.key_size, 8); 1547 value_size = htab->map.value_size; 1548 size = round_up(value_size, 8); 1549 if (is_percpu) 1550 value_size = size * num_possible_cpus(); 1551 total = 0; 1552 /* while experimenting with hash tables with sizes ranging from 10 to 1553 * 1000, it was observed that a bucket can have upto 5 entries. 1554 */ 1555 bucket_size = 5; 1556 1557 alloc: 1558 /* We cannot do copy_from_user or copy_to_user inside 1559 * the rcu_read_lock. Allocate enough space here. 1560 */ 1561 keys = kvmalloc(key_size * bucket_size, GFP_USER | __GFP_NOWARN); 1562 values = kvmalloc(value_size * bucket_size, GFP_USER | __GFP_NOWARN); 1563 if (!keys || !values) { 1564 ret = -ENOMEM; 1565 goto after_loop; 1566 } 1567 1568 again: 1569 bpf_disable_instrumentation(); 1570 rcu_read_lock(); 1571 again_nocopy: 1572 dst_key = keys; 1573 dst_val = values; 1574 b = &htab->buckets[batch]; 1575 head = &b->head; 1576 /* do not grab the lock unless need it (bucket_cnt > 0). */ 1577 if (locked) { 1578 ret = htab_lock_bucket(htab, b, batch, &flags); 1579 if (ret) 1580 goto next_batch; 1581 } 1582 1583 bucket_cnt = 0; 1584 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) 1585 bucket_cnt++; 1586 1587 if (bucket_cnt && !locked) { 1588 locked = true; 1589 goto again_nocopy; 1590 } 1591 1592 if (bucket_cnt > (max_count - total)) { 1593 if (total == 0) 1594 ret = -ENOSPC; 1595 /* Note that since bucket_cnt > 0 here, it is implicit 1596 * that the locked was grabbed, so release it. 1597 */ 1598 htab_unlock_bucket(htab, b, batch, flags); 1599 rcu_read_unlock(); 1600 bpf_enable_instrumentation(); 1601 goto after_loop; 1602 } 1603 1604 if (bucket_cnt > bucket_size) { 1605 bucket_size = bucket_cnt; 1606 /* Note that since bucket_cnt > 0 here, it is implicit 1607 * that the locked was grabbed, so release it. 1608 */ 1609 htab_unlock_bucket(htab, b, batch, flags); 1610 rcu_read_unlock(); 1611 bpf_enable_instrumentation(); 1612 kvfree(keys); 1613 kvfree(values); 1614 goto alloc; 1615 } 1616 1617 /* Next block is only safe to run if you have grabbed the lock */ 1618 if (!locked) 1619 goto next_batch; 1620 1621 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { 1622 memcpy(dst_key, l->key, key_size); 1623 1624 if (is_percpu) { 1625 int off = 0, cpu; 1626 void __percpu *pptr; 1627 1628 pptr = htab_elem_get_ptr(l, map->key_size); 1629 for_each_possible_cpu(cpu) { 1630 bpf_long_memcpy(dst_val + off, 1631 per_cpu_ptr(pptr, cpu), size); 1632 off += size; 1633 } 1634 } else { 1635 value = l->key + roundup_key_size; 1636 if (elem_map_flags & BPF_F_LOCK) 1637 copy_map_value_locked(map, dst_val, value, 1638 true); 1639 else 1640 copy_map_value(map, dst_val, value); 1641 check_and_init_map_lock(map, dst_val); 1642 } 1643 if (do_delete) { 1644 hlist_nulls_del_rcu(&l->hash_node); 1645 1646 /* bpf_lru_push_free() will acquire lru_lock, which 1647 * may cause deadlock. See comments in function 1648 * prealloc_lru_pop(). Let us do bpf_lru_push_free() 1649 * after releasing the bucket lock. 1650 */ 1651 if (is_lru_map) { 1652 l->batch_flink = node_to_free; 1653 node_to_free = l; 1654 } else { 1655 free_htab_elem(htab, l); 1656 } 1657 } 1658 dst_key += key_size; 1659 dst_val += value_size; 1660 } 1661 1662 htab_unlock_bucket(htab, b, batch, flags); 1663 locked = false; 1664 1665 while (node_to_free) { 1666 l = node_to_free; 1667 node_to_free = node_to_free->batch_flink; 1668 bpf_lru_push_free(&htab->lru, &l->lru_node); 1669 } 1670 1671 next_batch: 1672 /* If we are not copying data, we can go to next bucket and avoid 1673 * unlocking the rcu. 1674 */ 1675 if (!bucket_cnt && (batch + 1 < htab->n_buckets)) { 1676 batch++; 1677 goto again_nocopy; 1678 } 1679 1680 rcu_read_unlock(); 1681 bpf_enable_instrumentation(); 1682 if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys, 1683 key_size * bucket_cnt) || 1684 copy_to_user(uvalues + total * value_size, values, 1685 value_size * bucket_cnt))) { 1686 ret = -EFAULT; 1687 goto after_loop; 1688 } 1689 1690 total += bucket_cnt; 1691 batch++; 1692 if (batch >= htab->n_buckets) { 1693 ret = -ENOENT; 1694 goto after_loop; 1695 } 1696 goto again; 1697 1698 after_loop: 1699 if (ret == -EFAULT) 1700 goto out; 1701 1702 /* copy # of entries and next batch */ 1703 ubatch = u64_to_user_ptr(attr->batch.out_batch); 1704 if (copy_to_user(ubatch, &batch, sizeof(batch)) || 1705 put_user(total, &uattr->batch.count)) 1706 ret = -EFAULT; 1707 1708 out: 1709 kvfree(keys); 1710 kvfree(values); 1711 return ret; 1712 } 1713 1714 static int 1715 htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, 1716 union bpf_attr __user *uattr) 1717 { 1718 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, 1719 false, true); 1720 } 1721 1722 static int 1723 htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map, 1724 const union bpf_attr *attr, 1725 union bpf_attr __user *uattr) 1726 { 1727 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, 1728 false, true); 1729 } 1730 1731 static int 1732 htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, 1733 union bpf_attr __user *uattr) 1734 { 1735 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, 1736 false, false); 1737 } 1738 1739 static int 1740 htab_map_lookup_and_delete_batch(struct bpf_map *map, 1741 const union bpf_attr *attr, 1742 union bpf_attr __user *uattr) 1743 { 1744 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, 1745 false, false); 1746 } 1747 1748 static int 1749 htab_lru_percpu_map_lookup_batch(struct bpf_map *map, 1750 const union bpf_attr *attr, 1751 union bpf_attr __user *uattr) 1752 { 1753 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, 1754 true, true); 1755 } 1756 1757 static int 1758 htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map, 1759 const union bpf_attr *attr, 1760 union bpf_attr __user *uattr) 1761 { 1762 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, 1763 true, true); 1764 } 1765 1766 static int 1767 htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, 1768 union bpf_attr __user *uattr) 1769 { 1770 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, 1771 true, false); 1772 } 1773 1774 static int 1775 htab_lru_map_lookup_and_delete_batch(struct bpf_map *map, 1776 const union bpf_attr *attr, 1777 union bpf_attr __user *uattr) 1778 { 1779 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, 1780 true, false); 1781 } 1782 1783 struct bpf_iter_seq_hash_map_info { 1784 struct bpf_map *map; 1785 struct bpf_htab *htab; 1786 void *percpu_value_buf; // non-zero means percpu hash 1787 u32 bucket_id; 1788 u32 skip_elems; 1789 }; 1790 1791 static struct htab_elem * 1792 bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info, 1793 struct htab_elem *prev_elem) 1794 { 1795 const struct bpf_htab *htab = info->htab; 1796 u32 skip_elems = info->skip_elems; 1797 u32 bucket_id = info->bucket_id; 1798 struct hlist_nulls_head *head; 1799 struct hlist_nulls_node *n; 1800 struct htab_elem *elem; 1801 struct bucket *b; 1802 u32 i, count; 1803 1804 if (bucket_id >= htab->n_buckets) 1805 return NULL; 1806 1807 /* try to find next elem in the same bucket */ 1808 if (prev_elem) { 1809 /* no update/deletion on this bucket, prev_elem should be still valid 1810 * and we won't skip elements. 1811 */ 1812 n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node)); 1813 elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node); 1814 if (elem) 1815 return elem; 1816 1817 /* not found, unlock and go to the next bucket */ 1818 b = &htab->buckets[bucket_id++]; 1819 rcu_read_unlock(); 1820 skip_elems = 0; 1821 } 1822 1823 for (i = bucket_id; i < htab->n_buckets; i++) { 1824 b = &htab->buckets[i]; 1825 rcu_read_lock(); 1826 1827 count = 0; 1828 head = &b->head; 1829 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) { 1830 if (count >= skip_elems) { 1831 info->bucket_id = i; 1832 info->skip_elems = count; 1833 return elem; 1834 } 1835 count++; 1836 } 1837 1838 rcu_read_unlock(); 1839 skip_elems = 0; 1840 } 1841 1842 info->bucket_id = i; 1843 info->skip_elems = 0; 1844 return NULL; 1845 } 1846 1847 static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos) 1848 { 1849 struct bpf_iter_seq_hash_map_info *info = seq->private; 1850 struct htab_elem *elem; 1851 1852 elem = bpf_hash_map_seq_find_next(info, NULL); 1853 if (!elem) 1854 return NULL; 1855 1856 if (*pos == 0) 1857 ++*pos; 1858 return elem; 1859 } 1860 1861 static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1862 { 1863 struct bpf_iter_seq_hash_map_info *info = seq->private; 1864 1865 ++*pos; 1866 ++info->skip_elems; 1867 return bpf_hash_map_seq_find_next(info, v); 1868 } 1869 1870 static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem) 1871 { 1872 struct bpf_iter_seq_hash_map_info *info = seq->private; 1873 u32 roundup_key_size, roundup_value_size; 1874 struct bpf_iter__bpf_map_elem ctx = {}; 1875 struct bpf_map *map = info->map; 1876 struct bpf_iter_meta meta; 1877 int ret = 0, off = 0, cpu; 1878 struct bpf_prog *prog; 1879 void __percpu *pptr; 1880 1881 meta.seq = seq; 1882 prog = bpf_iter_get_info(&meta, elem == NULL); 1883 if (prog) { 1884 ctx.meta = &meta; 1885 ctx.map = info->map; 1886 if (elem) { 1887 roundup_key_size = round_up(map->key_size, 8); 1888 ctx.key = elem->key; 1889 if (!info->percpu_value_buf) { 1890 ctx.value = elem->key + roundup_key_size; 1891 } else { 1892 roundup_value_size = round_up(map->value_size, 8); 1893 pptr = htab_elem_get_ptr(elem, map->key_size); 1894 for_each_possible_cpu(cpu) { 1895 bpf_long_memcpy(info->percpu_value_buf + off, 1896 per_cpu_ptr(pptr, cpu), 1897 roundup_value_size); 1898 off += roundup_value_size; 1899 } 1900 ctx.value = info->percpu_value_buf; 1901 } 1902 } 1903 ret = bpf_iter_run_prog(prog, &ctx); 1904 } 1905 1906 return ret; 1907 } 1908 1909 static int bpf_hash_map_seq_show(struct seq_file *seq, void *v) 1910 { 1911 return __bpf_hash_map_seq_show(seq, v); 1912 } 1913 1914 static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v) 1915 { 1916 if (!v) 1917 (void)__bpf_hash_map_seq_show(seq, NULL); 1918 else 1919 rcu_read_unlock(); 1920 } 1921 1922 static int bpf_iter_init_hash_map(void *priv_data, 1923 struct bpf_iter_aux_info *aux) 1924 { 1925 struct bpf_iter_seq_hash_map_info *seq_info = priv_data; 1926 struct bpf_map *map = aux->map; 1927 void *value_buf; 1928 u32 buf_size; 1929 1930 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 1931 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 1932 buf_size = round_up(map->value_size, 8) * num_possible_cpus(); 1933 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN); 1934 if (!value_buf) 1935 return -ENOMEM; 1936 1937 seq_info->percpu_value_buf = value_buf; 1938 } 1939 1940 seq_info->map = map; 1941 seq_info->htab = container_of(map, struct bpf_htab, map); 1942 return 0; 1943 } 1944 1945 static void bpf_iter_fini_hash_map(void *priv_data) 1946 { 1947 struct bpf_iter_seq_hash_map_info *seq_info = priv_data; 1948 1949 kfree(seq_info->percpu_value_buf); 1950 } 1951 1952 static const struct seq_operations bpf_hash_map_seq_ops = { 1953 .start = bpf_hash_map_seq_start, 1954 .next = bpf_hash_map_seq_next, 1955 .stop = bpf_hash_map_seq_stop, 1956 .show = bpf_hash_map_seq_show, 1957 }; 1958 1959 static const struct bpf_iter_seq_info iter_seq_info = { 1960 .seq_ops = &bpf_hash_map_seq_ops, 1961 .init_seq_private = bpf_iter_init_hash_map, 1962 .fini_seq_private = bpf_iter_fini_hash_map, 1963 .seq_priv_size = sizeof(struct bpf_iter_seq_hash_map_info), 1964 }; 1965 1966 static int bpf_for_each_hash_elem(struct bpf_map *map, void *callback_fn, 1967 void *callback_ctx, u64 flags) 1968 { 1969 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1970 struct hlist_nulls_head *head; 1971 struct hlist_nulls_node *n; 1972 struct htab_elem *elem; 1973 u32 roundup_key_size; 1974 int i, num_elems = 0; 1975 void __percpu *pptr; 1976 struct bucket *b; 1977 void *key, *val; 1978 bool is_percpu; 1979 u64 ret = 0; 1980 1981 if (flags != 0) 1982 return -EINVAL; 1983 1984 is_percpu = htab_is_percpu(htab); 1985 1986 roundup_key_size = round_up(map->key_size, 8); 1987 /* disable migration so percpu value prepared here will be the 1988 * same as the one seen by the bpf program with bpf_map_lookup_elem(). 1989 */ 1990 if (is_percpu) 1991 migrate_disable(); 1992 for (i = 0; i < htab->n_buckets; i++) { 1993 b = &htab->buckets[i]; 1994 rcu_read_lock(); 1995 head = &b->head; 1996 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) { 1997 key = elem->key; 1998 if (is_percpu) { 1999 /* current cpu value for percpu map */ 2000 pptr = htab_elem_get_ptr(elem, map->key_size); 2001 val = this_cpu_ptr(pptr); 2002 } else { 2003 val = elem->key + roundup_key_size; 2004 } 2005 num_elems++; 2006 ret = BPF_CAST_CALL(callback_fn)((u64)(long)map, 2007 (u64)(long)key, (u64)(long)val, 2008 (u64)(long)callback_ctx, 0); 2009 /* return value: 0 - continue, 1 - stop and return */ 2010 if (ret) { 2011 rcu_read_unlock(); 2012 goto out; 2013 } 2014 } 2015 rcu_read_unlock(); 2016 } 2017 out: 2018 if (is_percpu) 2019 migrate_enable(); 2020 return num_elems; 2021 } 2022 2023 static int htab_map_btf_id; 2024 const struct bpf_map_ops htab_map_ops = { 2025 .map_meta_equal = bpf_map_meta_equal, 2026 .map_alloc_check = htab_map_alloc_check, 2027 .map_alloc = htab_map_alloc, 2028 .map_free = htab_map_free, 2029 .map_get_next_key = htab_map_get_next_key, 2030 .map_lookup_elem = htab_map_lookup_elem, 2031 .map_lookup_and_delete_elem = htab_map_lookup_and_delete_elem, 2032 .map_update_elem = htab_map_update_elem, 2033 .map_delete_elem = htab_map_delete_elem, 2034 .map_gen_lookup = htab_map_gen_lookup, 2035 .map_seq_show_elem = htab_map_seq_show_elem, 2036 .map_set_for_each_callback_args = map_set_for_each_callback_args, 2037 .map_for_each_callback = bpf_for_each_hash_elem, 2038 BATCH_OPS(htab), 2039 .map_btf_name = "bpf_htab", 2040 .map_btf_id = &htab_map_btf_id, 2041 .iter_seq_info = &iter_seq_info, 2042 }; 2043 2044 static int htab_lru_map_btf_id; 2045 const struct bpf_map_ops htab_lru_map_ops = { 2046 .map_meta_equal = bpf_map_meta_equal, 2047 .map_alloc_check = htab_map_alloc_check, 2048 .map_alloc = htab_map_alloc, 2049 .map_free = htab_map_free, 2050 .map_get_next_key = htab_map_get_next_key, 2051 .map_lookup_elem = htab_lru_map_lookup_elem, 2052 .map_lookup_and_delete_elem = htab_lru_map_lookup_and_delete_elem, 2053 .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys, 2054 .map_update_elem = htab_lru_map_update_elem, 2055 .map_delete_elem = htab_lru_map_delete_elem, 2056 .map_gen_lookup = htab_lru_map_gen_lookup, 2057 .map_seq_show_elem = htab_map_seq_show_elem, 2058 .map_set_for_each_callback_args = map_set_for_each_callback_args, 2059 .map_for_each_callback = bpf_for_each_hash_elem, 2060 BATCH_OPS(htab_lru), 2061 .map_btf_name = "bpf_htab", 2062 .map_btf_id = &htab_lru_map_btf_id, 2063 .iter_seq_info = &iter_seq_info, 2064 }; 2065 2066 /* Called from eBPF program */ 2067 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key) 2068 { 2069 struct htab_elem *l = __htab_map_lookup_elem(map, key); 2070 2071 if (l) 2072 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); 2073 else 2074 return NULL; 2075 } 2076 2077 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key) 2078 { 2079 struct htab_elem *l = __htab_map_lookup_elem(map, key); 2080 2081 if (l) { 2082 bpf_lru_node_set_ref(&l->lru_node); 2083 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); 2084 } 2085 2086 return NULL; 2087 } 2088 2089 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) 2090 { 2091 struct htab_elem *l; 2092 void __percpu *pptr; 2093 int ret = -ENOENT; 2094 int cpu, off = 0; 2095 u32 size; 2096 2097 /* per_cpu areas are zero-filled and bpf programs can only 2098 * access 'value_size' of them, so copying rounded areas 2099 * will not leak any kernel data 2100 */ 2101 size = round_up(map->value_size, 8); 2102 rcu_read_lock(); 2103 l = __htab_map_lookup_elem(map, key); 2104 if (!l) 2105 goto out; 2106 /* We do not mark LRU map element here in order to not mess up 2107 * eviction heuristics when user space does a map walk. 2108 */ 2109 pptr = htab_elem_get_ptr(l, map->key_size); 2110 for_each_possible_cpu(cpu) { 2111 bpf_long_memcpy(value + off, 2112 per_cpu_ptr(pptr, cpu), size); 2113 off += size; 2114 } 2115 ret = 0; 2116 out: 2117 rcu_read_unlock(); 2118 return ret; 2119 } 2120 2121 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 2122 u64 map_flags) 2123 { 2124 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 2125 int ret; 2126 2127 rcu_read_lock(); 2128 if (htab_is_lru(htab)) 2129 ret = __htab_lru_percpu_map_update_elem(map, key, value, 2130 map_flags, true); 2131 else 2132 ret = __htab_percpu_map_update_elem(map, key, value, map_flags, 2133 true); 2134 rcu_read_unlock(); 2135 2136 return ret; 2137 } 2138 2139 static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key, 2140 struct seq_file *m) 2141 { 2142 struct htab_elem *l; 2143 void __percpu *pptr; 2144 int cpu; 2145 2146 rcu_read_lock(); 2147 2148 l = __htab_map_lookup_elem(map, key); 2149 if (!l) { 2150 rcu_read_unlock(); 2151 return; 2152 } 2153 2154 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); 2155 seq_puts(m, ": {\n"); 2156 pptr = htab_elem_get_ptr(l, map->key_size); 2157 for_each_possible_cpu(cpu) { 2158 seq_printf(m, "\tcpu%d: ", cpu); 2159 btf_type_seq_show(map->btf, map->btf_value_type_id, 2160 per_cpu_ptr(pptr, cpu), m); 2161 seq_puts(m, "\n"); 2162 } 2163 seq_puts(m, "}\n"); 2164 2165 rcu_read_unlock(); 2166 } 2167 2168 static int htab_percpu_map_btf_id; 2169 const struct bpf_map_ops htab_percpu_map_ops = { 2170 .map_meta_equal = bpf_map_meta_equal, 2171 .map_alloc_check = htab_map_alloc_check, 2172 .map_alloc = htab_map_alloc, 2173 .map_free = htab_map_free, 2174 .map_get_next_key = htab_map_get_next_key, 2175 .map_lookup_elem = htab_percpu_map_lookup_elem, 2176 .map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem, 2177 .map_update_elem = htab_percpu_map_update_elem, 2178 .map_delete_elem = htab_map_delete_elem, 2179 .map_seq_show_elem = htab_percpu_map_seq_show_elem, 2180 .map_set_for_each_callback_args = map_set_for_each_callback_args, 2181 .map_for_each_callback = bpf_for_each_hash_elem, 2182 BATCH_OPS(htab_percpu), 2183 .map_btf_name = "bpf_htab", 2184 .map_btf_id = &htab_percpu_map_btf_id, 2185 .iter_seq_info = &iter_seq_info, 2186 }; 2187 2188 static int htab_lru_percpu_map_btf_id; 2189 const struct bpf_map_ops htab_lru_percpu_map_ops = { 2190 .map_meta_equal = bpf_map_meta_equal, 2191 .map_alloc_check = htab_map_alloc_check, 2192 .map_alloc = htab_map_alloc, 2193 .map_free = htab_map_free, 2194 .map_get_next_key = htab_map_get_next_key, 2195 .map_lookup_elem = htab_lru_percpu_map_lookup_elem, 2196 .map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem, 2197 .map_update_elem = htab_lru_percpu_map_update_elem, 2198 .map_delete_elem = htab_lru_map_delete_elem, 2199 .map_seq_show_elem = htab_percpu_map_seq_show_elem, 2200 .map_set_for_each_callback_args = map_set_for_each_callback_args, 2201 .map_for_each_callback = bpf_for_each_hash_elem, 2202 BATCH_OPS(htab_lru_percpu), 2203 .map_btf_name = "bpf_htab", 2204 .map_btf_id = &htab_lru_percpu_map_btf_id, 2205 .iter_seq_info = &iter_seq_info, 2206 }; 2207 2208 static int fd_htab_map_alloc_check(union bpf_attr *attr) 2209 { 2210 if (attr->value_size != sizeof(u32)) 2211 return -EINVAL; 2212 return htab_map_alloc_check(attr); 2213 } 2214 2215 static void fd_htab_map_free(struct bpf_map *map) 2216 { 2217 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 2218 struct hlist_nulls_node *n; 2219 struct hlist_nulls_head *head; 2220 struct htab_elem *l; 2221 int i; 2222 2223 for (i = 0; i < htab->n_buckets; i++) { 2224 head = select_bucket(htab, i); 2225 2226 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { 2227 void *ptr = fd_htab_map_get_ptr(map, l); 2228 2229 map->ops->map_fd_put_ptr(ptr); 2230 } 2231 } 2232 2233 htab_map_free(map); 2234 } 2235 2236 /* only called from syscall */ 2237 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) 2238 { 2239 void **ptr; 2240 int ret = 0; 2241 2242 if (!map->ops->map_fd_sys_lookup_elem) 2243 return -ENOTSUPP; 2244 2245 rcu_read_lock(); 2246 ptr = htab_map_lookup_elem(map, key); 2247 if (ptr) 2248 *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr)); 2249 else 2250 ret = -ENOENT; 2251 rcu_read_unlock(); 2252 2253 return ret; 2254 } 2255 2256 /* only called from syscall */ 2257 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 2258 void *key, void *value, u64 map_flags) 2259 { 2260 void *ptr; 2261 int ret; 2262 u32 ufd = *(u32 *)value; 2263 2264 ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); 2265 if (IS_ERR(ptr)) 2266 return PTR_ERR(ptr); 2267 2268 ret = htab_map_update_elem(map, key, &ptr, map_flags); 2269 if (ret) 2270 map->ops->map_fd_put_ptr(ptr); 2271 2272 return ret; 2273 } 2274 2275 static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr) 2276 { 2277 struct bpf_map *map, *inner_map_meta; 2278 2279 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); 2280 if (IS_ERR(inner_map_meta)) 2281 return inner_map_meta; 2282 2283 map = htab_map_alloc(attr); 2284 if (IS_ERR(map)) { 2285 bpf_map_meta_free(inner_map_meta); 2286 return map; 2287 } 2288 2289 map->inner_map_meta = inner_map_meta; 2290 2291 return map; 2292 } 2293 2294 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key) 2295 { 2296 struct bpf_map **inner_map = htab_map_lookup_elem(map, key); 2297 2298 if (!inner_map) 2299 return NULL; 2300 2301 return READ_ONCE(*inner_map); 2302 } 2303 2304 static int htab_of_map_gen_lookup(struct bpf_map *map, 2305 struct bpf_insn *insn_buf) 2306 { 2307 struct bpf_insn *insn = insn_buf; 2308 const int ret = BPF_REG_0; 2309 2310 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, 2311 (void *(*)(struct bpf_map *map, void *key))NULL)); 2312 *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem)); 2313 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2); 2314 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, 2315 offsetof(struct htab_elem, key) + 2316 round_up(map->key_size, 8)); 2317 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); 2318 2319 return insn - insn_buf; 2320 } 2321 2322 static void htab_of_map_free(struct bpf_map *map) 2323 { 2324 bpf_map_meta_free(map->inner_map_meta); 2325 fd_htab_map_free(map); 2326 } 2327 2328 static int htab_of_maps_map_btf_id; 2329 const struct bpf_map_ops htab_of_maps_map_ops = { 2330 .map_alloc_check = fd_htab_map_alloc_check, 2331 .map_alloc = htab_of_map_alloc, 2332 .map_free = htab_of_map_free, 2333 .map_get_next_key = htab_map_get_next_key, 2334 .map_lookup_elem = htab_of_map_lookup_elem, 2335 .map_delete_elem = htab_map_delete_elem, 2336 .map_fd_get_ptr = bpf_map_fd_get_ptr, 2337 .map_fd_put_ptr = bpf_map_fd_put_ptr, 2338 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, 2339 .map_gen_lookup = htab_of_map_gen_lookup, 2340 .map_check_btf = map_check_no_btf, 2341 .map_btf_name = "bpf_htab", 2342 .map_btf_id = &htab_of_maps_map_btf_id, 2343 }; 2344