1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 */ 5 #include <linux/bpf.h> 6 #include <linux/btf.h> 7 #include <linux/jhash.h> 8 #include <linux/filter.h> 9 #include <linux/rculist_nulls.h> 10 #include <linux/random.h> 11 #include <uapi/linux/btf.h> 12 #include <linux/rcupdate_trace.h> 13 #include <linux/btf_ids.h> 14 #include "percpu_freelist.h" 15 #include "bpf_lru_list.h" 16 #include "map_in_map.h" 17 #include <linux/bpf_mem_alloc.h> 18 19 #define HTAB_CREATE_FLAG_MASK \ 20 (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \ 21 BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED) 22 23 #define BATCH_OPS(_name) \ 24 .map_lookup_batch = \ 25 _name##_map_lookup_batch, \ 26 .map_lookup_and_delete_batch = \ 27 _name##_map_lookup_and_delete_batch, \ 28 .map_update_batch = \ 29 generic_map_update_batch, \ 30 .map_delete_batch = \ 31 generic_map_delete_batch 32 33 /* 34 * The bucket lock has two protection scopes: 35 * 36 * 1) Serializing concurrent operations from BPF programs on different 37 * CPUs 38 * 39 * 2) Serializing concurrent operations from BPF programs and sys_bpf() 40 * 41 * BPF programs can execute in any context including perf, kprobes and 42 * tracing. As there are almost no limits where perf, kprobes and tracing 43 * can be invoked from the lock operations need to be protected against 44 * deadlocks. Deadlocks can be caused by recursion and by an invocation in 45 * the lock held section when functions which acquire this lock are invoked 46 * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU 47 * variable bpf_prog_active, which prevents BPF programs attached to perf 48 * events, kprobes and tracing to be invoked before the prior invocation 49 * from one of these contexts completed. sys_bpf() uses the same mechanism 50 * by pinning the task to the current CPU and incrementing the recursion 51 * protection across the map operation. 52 * 53 * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain 54 * operations like memory allocations (even with GFP_ATOMIC) from atomic 55 * contexts. This is required because even with GFP_ATOMIC the memory 56 * allocator calls into code paths which acquire locks with long held lock 57 * sections. To ensure the deterministic behaviour these locks are regular 58 * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only 59 * true atomic contexts on an RT kernel are the low level hardware 60 * handling, scheduling, low level interrupt handling, NMIs etc. None of 61 * these contexts should ever do memory allocations. 62 * 63 * As regular device interrupt handlers and soft interrupts are forced into 64 * thread context, the existing code which does 65 * spin_lock*(); alloc(GFP_ATOMIC); spin_unlock*(); 66 * just works. 67 * 68 * In theory the BPF locks could be converted to regular spinlocks as well, 69 * but the bucket locks and percpu_freelist locks can be taken from 70 * arbitrary contexts (perf, kprobes, tracepoints) which are required to be 71 * atomic contexts even on RT. Before the introduction of bpf_mem_alloc, 72 * it is only safe to use raw spinlock for preallocated hash map on a RT kernel, 73 * because there is no memory allocation within the lock held sections. However 74 * after hash map was fully converted to use bpf_mem_alloc, there will be 75 * non-synchronous memory allocation for non-preallocated hash map, so it is 76 * safe to always use raw spinlock for bucket lock. 77 */ 78 struct bucket { 79 struct hlist_nulls_head head; 80 raw_spinlock_t raw_lock; 81 }; 82 83 #define HASHTAB_MAP_LOCK_COUNT 8 84 #define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1) 85 86 struct bpf_htab { 87 struct bpf_map map; 88 struct bpf_mem_alloc ma; 89 struct bpf_mem_alloc pcpu_ma; 90 struct bucket *buckets; 91 void *elems; 92 union { 93 struct pcpu_freelist freelist; 94 struct bpf_lru lru; 95 }; 96 struct htab_elem *__percpu *extra_elems; 97 /* number of elements in non-preallocated hashtable are kept 98 * in either pcount or count 99 */ 100 struct percpu_counter pcount; 101 atomic_t count; 102 bool use_percpu_counter; 103 u32 n_buckets; /* number of hash buckets */ 104 u32 elem_size; /* size of each element in bytes */ 105 u32 hashrnd; 106 struct lock_class_key lockdep_key; 107 int __percpu *map_locked[HASHTAB_MAP_LOCK_COUNT]; 108 }; 109 110 /* each htab element is struct htab_elem + key + value */ 111 struct htab_elem { 112 union { 113 struct hlist_nulls_node hash_node; 114 struct { 115 void *padding; 116 union { 117 struct pcpu_freelist_node fnode; 118 struct htab_elem *batch_flink; 119 }; 120 }; 121 }; 122 union { 123 /* pointer to per-cpu pointer */ 124 void *ptr_to_pptr; 125 struct bpf_lru_node lru_node; 126 }; 127 u32 hash; 128 char key[] __aligned(8); 129 }; 130 131 static inline bool htab_is_prealloc(const struct bpf_htab *htab) 132 { 133 return !(htab->map.map_flags & BPF_F_NO_PREALLOC); 134 } 135 136 static void htab_init_buckets(struct bpf_htab *htab) 137 { 138 unsigned int i; 139 140 for (i = 0; i < htab->n_buckets; i++) { 141 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); 142 raw_spin_lock_init(&htab->buckets[i].raw_lock); 143 lockdep_set_class(&htab->buckets[i].raw_lock, 144 &htab->lockdep_key); 145 cond_resched(); 146 } 147 } 148 149 static inline int htab_lock_bucket(const struct bpf_htab *htab, 150 struct bucket *b, u32 hash, 151 unsigned long *pflags) 152 { 153 unsigned long flags; 154 155 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); 156 157 preempt_disable(); 158 if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) { 159 __this_cpu_dec(*(htab->map_locked[hash])); 160 preempt_enable(); 161 return -EBUSY; 162 } 163 164 raw_spin_lock_irqsave(&b->raw_lock, flags); 165 *pflags = flags; 166 167 return 0; 168 } 169 170 static inline void htab_unlock_bucket(const struct bpf_htab *htab, 171 struct bucket *b, u32 hash, 172 unsigned long flags) 173 { 174 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); 175 raw_spin_unlock_irqrestore(&b->raw_lock, flags); 176 __this_cpu_dec(*(htab->map_locked[hash])); 177 preempt_enable(); 178 } 179 180 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node); 181 182 static bool htab_is_lru(const struct bpf_htab *htab) 183 { 184 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || 185 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; 186 } 187 188 static bool htab_is_percpu(const struct bpf_htab *htab) 189 { 190 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || 191 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; 192 } 193 194 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, 195 void __percpu *pptr) 196 { 197 *(void __percpu **)(l->key + key_size) = pptr; 198 } 199 200 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size) 201 { 202 return *(void __percpu **)(l->key + key_size); 203 } 204 205 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) 206 { 207 return *(void **)(l->key + roundup(map->key_size, 8)); 208 } 209 210 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i) 211 { 212 return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size); 213 } 214 215 static bool htab_has_extra_elems(struct bpf_htab *htab) 216 { 217 return !htab_is_percpu(htab) && !htab_is_lru(htab); 218 } 219 220 static void htab_free_prealloced_timers(struct bpf_htab *htab) 221 { 222 u32 num_entries = htab->map.max_entries; 223 int i; 224 225 if (!btf_record_has_field(htab->map.record, BPF_TIMER)) 226 return; 227 if (htab_has_extra_elems(htab)) 228 num_entries += num_possible_cpus(); 229 230 for (i = 0; i < num_entries; i++) { 231 struct htab_elem *elem; 232 233 elem = get_htab_elem(htab, i); 234 bpf_obj_free_timer(htab->map.record, elem->key + round_up(htab->map.key_size, 8)); 235 cond_resched(); 236 } 237 } 238 239 static void htab_free_prealloced_fields(struct bpf_htab *htab) 240 { 241 u32 num_entries = htab->map.max_entries; 242 int i; 243 244 if (IS_ERR_OR_NULL(htab->map.record)) 245 return; 246 if (htab_has_extra_elems(htab)) 247 num_entries += num_possible_cpus(); 248 for (i = 0; i < num_entries; i++) { 249 struct htab_elem *elem; 250 251 elem = get_htab_elem(htab, i); 252 if (htab_is_percpu(htab)) { 253 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size); 254 int cpu; 255 256 for_each_possible_cpu(cpu) { 257 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu)); 258 cond_resched(); 259 } 260 } else { 261 bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8)); 262 cond_resched(); 263 } 264 cond_resched(); 265 } 266 } 267 268 static void htab_free_elems(struct bpf_htab *htab) 269 { 270 int i; 271 272 if (!htab_is_percpu(htab)) 273 goto free_elems; 274 275 for (i = 0; i < htab->map.max_entries; i++) { 276 void __percpu *pptr; 277 278 pptr = htab_elem_get_ptr(get_htab_elem(htab, i), 279 htab->map.key_size); 280 free_percpu(pptr); 281 cond_resched(); 282 } 283 free_elems: 284 bpf_map_area_free(htab->elems); 285 } 286 287 /* The LRU list has a lock (lru_lock). Each htab bucket has a lock 288 * (bucket_lock). If both locks need to be acquired together, the lock 289 * order is always lru_lock -> bucket_lock and this only happens in 290 * bpf_lru_list.c logic. For example, certain code path of 291 * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(), 292 * will acquire lru_lock first followed by acquiring bucket_lock. 293 * 294 * In hashtab.c, to avoid deadlock, lock acquisition of 295 * bucket_lock followed by lru_lock is not allowed. In such cases, 296 * bucket_lock needs to be released first before acquiring lru_lock. 297 */ 298 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, 299 u32 hash) 300 { 301 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash); 302 struct htab_elem *l; 303 304 if (node) { 305 l = container_of(node, struct htab_elem, lru_node); 306 memcpy(l->key, key, htab->map.key_size); 307 return l; 308 } 309 310 return NULL; 311 } 312 313 static int prealloc_init(struct bpf_htab *htab) 314 { 315 u32 num_entries = htab->map.max_entries; 316 int err = -ENOMEM, i; 317 318 if (htab_has_extra_elems(htab)) 319 num_entries += num_possible_cpus(); 320 321 htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries, 322 htab->map.numa_node); 323 if (!htab->elems) 324 return -ENOMEM; 325 326 if (!htab_is_percpu(htab)) 327 goto skip_percpu_elems; 328 329 for (i = 0; i < num_entries; i++) { 330 u32 size = round_up(htab->map.value_size, 8); 331 void __percpu *pptr; 332 333 pptr = bpf_map_alloc_percpu(&htab->map, size, 8, 334 GFP_USER | __GFP_NOWARN); 335 if (!pptr) 336 goto free_elems; 337 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, 338 pptr); 339 cond_resched(); 340 } 341 342 skip_percpu_elems: 343 if (htab_is_lru(htab)) 344 err = bpf_lru_init(&htab->lru, 345 htab->map.map_flags & BPF_F_NO_COMMON_LRU, 346 offsetof(struct htab_elem, hash) - 347 offsetof(struct htab_elem, lru_node), 348 htab_lru_map_delete_node, 349 htab); 350 else 351 err = pcpu_freelist_init(&htab->freelist); 352 353 if (err) 354 goto free_elems; 355 356 if (htab_is_lru(htab)) 357 bpf_lru_populate(&htab->lru, htab->elems, 358 offsetof(struct htab_elem, lru_node), 359 htab->elem_size, num_entries); 360 else 361 pcpu_freelist_populate(&htab->freelist, 362 htab->elems + offsetof(struct htab_elem, fnode), 363 htab->elem_size, num_entries); 364 365 return 0; 366 367 free_elems: 368 htab_free_elems(htab); 369 return err; 370 } 371 372 static void prealloc_destroy(struct bpf_htab *htab) 373 { 374 htab_free_elems(htab); 375 376 if (htab_is_lru(htab)) 377 bpf_lru_destroy(&htab->lru); 378 else 379 pcpu_freelist_destroy(&htab->freelist); 380 } 381 382 static int alloc_extra_elems(struct bpf_htab *htab) 383 { 384 struct htab_elem *__percpu *pptr, *l_new; 385 struct pcpu_freelist_node *l; 386 int cpu; 387 388 pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8, 389 GFP_USER | __GFP_NOWARN); 390 if (!pptr) 391 return -ENOMEM; 392 393 for_each_possible_cpu(cpu) { 394 l = pcpu_freelist_pop(&htab->freelist); 395 /* pop will succeed, since prealloc_init() 396 * preallocated extra num_possible_cpus elements 397 */ 398 l_new = container_of(l, struct htab_elem, fnode); 399 *per_cpu_ptr(pptr, cpu) = l_new; 400 } 401 htab->extra_elems = pptr; 402 return 0; 403 } 404 405 /* Called from syscall */ 406 static int htab_map_alloc_check(union bpf_attr *attr) 407 { 408 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || 409 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); 410 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || 411 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); 412 /* percpu_lru means each cpu has its own LRU list. 413 * it is different from BPF_MAP_TYPE_PERCPU_HASH where 414 * the map's value itself is percpu. percpu_lru has 415 * nothing to do with the map's value. 416 */ 417 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); 418 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); 419 bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED); 420 int numa_node = bpf_map_attr_numa_node(attr); 421 422 BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) != 423 offsetof(struct htab_elem, hash_node.pprev)); 424 425 if (zero_seed && !capable(CAP_SYS_ADMIN)) 426 /* Guard against local DoS, and discourage production use. */ 427 return -EPERM; 428 429 if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK || 430 !bpf_map_flags_access_ok(attr->map_flags)) 431 return -EINVAL; 432 433 if (!lru && percpu_lru) 434 return -EINVAL; 435 436 if (lru && !prealloc) 437 return -ENOTSUPP; 438 439 if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru)) 440 return -EINVAL; 441 442 /* check sanity of attributes. 443 * value_size == 0 may be allowed in the future to use map as a set 444 */ 445 if (attr->max_entries == 0 || attr->key_size == 0 || 446 attr->value_size == 0) 447 return -EINVAL; 448 449 if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE - 450 sizeof(struct htab_elem)) 451 /* if key_size + value_size is bigger, the user space won't be 452 * able to access the elements via bpf syscall. This check 453 * also makes sure that the elem_size doesn't overflow and it's 454 * kmalloc-able later in htab_map_update_elem() 455 */ 456 return -E2BIG; 457 458 return 0; 459 } 460 461 static struct bpf_map *htab_map_alloc(union bpf_attr *attr) 462 { 463 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || 464 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); 465 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || 466 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); 467 /* percpu_lru means each cpu has its own LRU list. 468 * it is different from BPF_MAP_TYPE_PERCPU_HASH where 469 * the map's value itself is percpu. percpu_lru has 470 * nothing to do with the map's value. 471 */ 472 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); 473 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); 474 struct bpf_htab *htab; 475 int err, i; 476 477 htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE); 478 if (!htab) 479 return ERR_PTR(-ENOMEM); 480 481 lockdep_register_key(&htab->lockdep_key); 482 483 bpf_map_init_from_attr(&htab->map, attr); 484 485 if (percpu_lru) { 486 /* ensure each CPU's lru list has >=1 elements. 487 * since we are at it, make each lru list has the same 488 * number of elements. 489 */ 490 htab->map.max_entries = roundup(attr->max_entries, 491 num_possible_cpus()); 492 if (htab->map.max_entries < attr->max_entries) 493 htab->map.max_entries = rounddown(attr->max_entries, 494 num_possible_cpus()); 495 } 496 497 /* hash table size must be power of 2 */ 498 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); 499 500 htab->elem_size = sizeof(struct htab_elem) + 501 round_up(htab->map.key_size, 8); 502 if (percpu) 503 htab->elem_size += sizeof(void *); 504 else 505 htab->elem_size += round_up(htab->map.value_size, 8); 506 507 err = -E2BIG; 508 /* prevent zero size kmalloc and check for u32 overflow */ 509 if (htab->n_buckets == 0 || 510 htab->n_buckets > U32_MAX / sizeof(struct bucket)) 511 goto free_htab; 512 513 err = -ENOMEM; 514 htab->buckets = bpf_map_area_alloc(htab->n_buckets * 515 sizeof(struct bucket), 516 htab->map.numa_node); 517 if (!htab->buckets) 518 goto free_htab; 519 520 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) { 521 htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map, 522 sizeof(int), 523 sizeof(int), 524 GFP_USER); 525 if (!htab->map_locked[i]) 526 goto free_map_locked; 527 } 528 529 if (htab->map.map_flags & BPF_F_ZERO_SEED) 530 htab->hashrnd = 0; 531 else 532 htab->hashrnd = get_random_u32(); 533 534 htab_init_buckets(htab); 535 536 /* compute_batch_value() computes batch value as num_online_cpus() * 2 537 * and __percpu_counter_compare() needs 538 * htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus() 539 * for percpu_counter to be faster than atomic_t. In practice the average bpf 540 * hash map size is 10k, which means that a system with 64 cpus will fill 541 * hashmap to 20% of 10k before percpu_counter becomes ineffective. Therefore 542 * define our own batch count as 32 then 10k hash map can be filled up to 80%: 543 * 10k - 8k > 32 _batch_ * 64 _cpus_ 544 * and __percpu_counter_compare() will still be fast. At that point hash map 545 * collisions will dominate its performance anyway. Assume that hash map filled 546 * to 50+% isn't going to be O(1) and use the following formula to choose 547 * between percpu_counter and atomic_t. 548 */ 549 #define PERCPU_COUNTER_BATCH 32 550 if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH) 551 htab->use_percpu_counter = true; 552 553 if (htab->use_percpu_counter) { 554 err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL); 555 if (err) 556 goto free_map_locked; 557 } 558 559 if (prealloc) { 560 err = prealloc_init(htab); 561 if (err) 562 goto free_map_locked; 563 564 if (!percpu && !lru) { 565 /* lru itself can remove the least used element, so 566 * there is no need for an extra elem during map_update. 567 */ 568 err = alloc_extra_elems(htab); 569 if (err) 570 goto free_prealloc; 571 } 572 } else { 573 err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false); 574 if (err) 575 goto free_map_locked; 576 if (percpu) { 577 err = bpf_mem_alloc_init(&htab->pcpu_ma, 578 round_up(htab->map.value_size, 8), true); 579 if (err) 580 goto free_map_locked; 581 } 582 } 583 584 return &htab->map; 585 586 free_prealloc: 587 prealloc_destroy(htab); 588 free_map_locked: 589 if (htab->use_percpu_counter) 590 percpu_counter_destroy(&htab->pcount); 591 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) 592 free_percpu(htab->map_locked[i]); 593 bpf_map_area_free(htab->buckets); 594 bpf_mem_alloc_destroy(&htab->pcpu_ma); 595 bpf_mem_alloc_destroy(&htab->ma); 596 free_htab: 597 lockdep_unregister_key(&htab->lockdep_key); 598 bpf_map_area_free(htab); 599 return ERR_PTR(err); 600 } 601 602 static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd) 603 { 604 if (likely(key_len % 4 == 0)) 605 return jhash2(key, key_len / 4, hashrnd); 606 return jhash(key, key_len, hashrnd); 607 } 608 609 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) 610 { 611 return &htab->buckets[hash & (htab->n_buckets - 1)]; 612 } 613 614 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash) 615 { 616 return &__select_bucket(htab, hash)->head; 617 } 618 619 /* this lookup function can only be called with bucket lock taken */ 620 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash, 621 void *key, u32 key_size) 622 { 623 struct hlist_nulls_node *n; 624 struct htab_elem *l; 625 626 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) 627 if (l->hash == hash && !memcmp(&l->key, key, key_size)) 628 return l; 629 630 return NULL; 631 } 632 633 /* can be called without bucket lock. it will repeat the loop in 634 * the unlikely event when elements moved from one bucket into another 635 * while link list is being walked 636 */ 637 static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head, 638 u32 hash, void *key, 639 u32 key_size, u32 n_buckets) 640 { 641 struct hlist_nulls_node *n; 642 struct htab_elem *l; 643 644 again: 645 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) 646 if (l->hash == hash && !memcmp(&l->key, key, key_size)) 647 return l; 648 649 if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1)))) 650 goto again; 651 652 return NULL; 653 } 654 655 /* Called from syscall or from eBPF program directly, so 656 * arguments have to match bpf_map_lookup_elem() exactly. 657 * The return value is adjusted by BPF instructions 658 * in htab_map_gen_lookup(). 659 */ 660 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) 661 { 662 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 663 struct hlist_nulls_head *head; 664 struct htab_elem *l; 665 u32 hash, key_size; 666 667 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 668 !rcu_read_lock_bh_held()); 669 670 key_size = map->key_size; 671 672 hash = htab_map_hash(key, key_size, htab->hashrnd); 673 674 head = select_bucket(htab, hash); 675 676 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); 677 678 return l; 679 } 680 681 static void *htab_map_lookup_elem(struct bpf_map *map, void *key) 682 { 683 struct htab_elem *l = __htab_map_lookup_elem(map, key); 684 685 if (l) 686 return l->key + round_up(map->key_size, 8); 687 688 return NULL; 689 } 690 691 /* inline bpf_map_lookup_elem() call. 692 * Instead of: 693 * bpf_prog 694 * bpf_map_lookup_elem 695 * map->ops->map_lookup_elem 696 * htab_map_lookup_elem 697 * __htab_map_lookup_elem 698 * do: 699 * bpf_prog 700 * __htab_map_lookup_elem 701 */ 702 static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 703 { 704 struct bpf_insn *insn = insn_buf; 705 const int ret = BPF_REG_0; 706 707 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, 708 (void *(*)(struct bpf_map *map, void *key))NULL)); 709 *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem); 710 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); 711 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, 712 offsetof(struct htab_elem, key) + 713 round_up(map->key_size, 8)); 714 return insn - insn_buf; 715 } 716 717 static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map, 718 void *key, const bool mark) 719 { 720 struct htab_elem *l = __htab_map_lookup_elem(map, key); 721 722 if (l) { 723 if (mark) 724 bpf_lru_node_set_ref(&l->lru_node); 725 return l->key + round_up(map->key_size, 8); 726 } 727 728 return NULL; 729 } 730 731 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) 732 { 733 return __htab_lru_map_lookup_elem(map, key, true); 734 } 735 736 static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key) 737 { 738 return __htab_lru_map_lookup_elem(map, key, false); 739 } 740 741 static int htab_lru_map_gen_lookup(struct bpf_map *map, 742 struct bpf_insn *insn_buf) 743 { 744 struct bpf_insn *insn = insn_buf; 745 const int ret = BPF_REG_0; 746 const int ref_reg = BPF_REG_1; 747 748 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, 749 (void *(*)(struct bpf_map *map, void *key))NULL)); 750 *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem); 751 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4); 752 *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret, 753 offsetof(struct htab_elem, lru_node) + 754 offsetof(struct bpf_lru_node, ref)); 755 *insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1); 756 *insn++ = BPF_ST_MEM(BPF_B, ret, 757 offsetof(struct htab_elem, lru_node) + 758 offsetof(struct bpf_lru_node, ref), 759 1); 760 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, 761 offsetof(struct htab_elem, key) + 762 round_up(map->key_size, 8)); 763 return insn - insn_buf; 764 } 765 766 static void check_and_free_fields(struct bpf_htab *htab, 767 struct htab_elem *elem) 768 { 769 if (htab_is_percpu(htab)) { 770 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size); 771 int cpu; 772 773 for_each_possible_cpu(cpu) 774 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu)); 775 } else { 776 void *map_value = elem->key + round_up(htab->map.key_size, 8); 777 778 bpf_obj_free_fields(htab->map.record, map_value); 779 } 780 } 781 782 /* It is called from the bpf_lru_list when the LRU needs to delete 783 * older elements from the htab. 784 */ 785 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) 786 { 787 struct bpf_htab *htab = arg; 788 struct htab_elem *l = NULL, *tgt_l; 789 struct hlist_nulls_head *head; 790 struct hlist_nulls_node *n; 791 unsigned long flags; 792 struct bucket *b; 793 int ret; 794 795 tgt_l = container_of(node, struct htab_elem, lru_node); 796 b = __select_bucket(htab, tgt_l->hash); 797 head = &b->head; 798 799 ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags); 800 if (ret) 801 return false; 802 803 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) 804 if (l == tgt_l) { 805 hlist_nulls_del_rcu(&l->hash_node); 806 check_and_free_fields(htab, l); 807 break; 808 } 809 810 htab_unlock_bucket(htab, b, tgt_l->hash, flags); 811 812 return l == tgt_l; 813 } 814 815 /* Called from syscall */ 816 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 817 { 818 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 819 struct hlist_nulls_head *head; 820 struct htab_elem *l, *next_l; 821 u32 hash, key_size; 822 int i = 0; 823 824 WARN_ON_ONCE(!rcu_read_lock_held()); 825 826 key_size = map->key_size; 827 828 if (!key) 829 goto find_first_elem; 830 831 hash = htab_map_hash(key, key_size, htab->hashrnd); 832 833 head = select_bucket(htab, hash); 834 835 /* lookup the key */ 836 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); 837 838 if (!l) 839 goto find_first_elem; 840 841 /* key was found, get next key in the same bucket */ 842 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)), 843 struct htab_elem, hash_node); 844 845 if (next_l) { 846 /* if next elem in this hash list is non-zero, just return it */ 847 memcpy(next_key, next_l->key, key_size); 848 return 0; 849 } 850 851 /* no more elements in this hash list, go to the next bucket */ 852 i = hash & (htab->n_buckets - 1); 853 i++; 854 855 find_first_elem: 856 /* iterate over buckets */ 857 for (; i < htab->n_buckets; i++) { 858 head = select_bucket(htab, i); 859 860 /* pick first element in the bucket */ 861 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)), 862 struct htab_elem, hash_node); 863 if (next_l) { 864 /* if it's not empty, just return it */ 865 memcpy(next_key, next_l->key, key_size); 866 return 0; 867 } 868 } 869 870 /* iterated over all buckets and all elements */ 871 return -ENOENT; 872 } 873 874 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) 875 { 876 check_and_free_fields(htab, l); 877 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) 878 bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr); 879 bpf_mem_cache_free(&htab->ma, l); 880 } 881 882 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) 883 { 884 struct bpf_map *map = &htab->map; 885 void *ptr; 886 887 if (map->ops->map_fd_put_ptr) { 888 ptr = fd_htab_map_get_ptr(map, l); 889 map->ops->map_fd_put_ptr(ptr); 890 } 891 } 892 893 static bool is_map_full(struct bpf_htab *htab) 894 { 895 if (htab->use_percpu_counter) 896 return __percpu_counter_compare(&htab->pcount, htab->map.max_entries, 897 PERCPU_COUNTER_BATCH) >= 0; 898 return atomic_read(&htab->count) >= htab->map.max_entries; 899 } 900 901 static void inc_elem_count(struct bpf_htab *htab) 902 { 903 if (htab->use_percpu_counter) 904 percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH); 905 else 906 atomic_inc(&htab->count); 907 } 908 909 static void dec_elem_count(struct bpf_htab *htab) 910 { 911 if (htab->use_percpu_counter) 912 percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH); 913 else 914 atomic_dec(&htab->count); 915 } 916 917 918 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) 919 { 920 htab_put_fd_value(htab, l); 921 922 if (htab_is_prealloc(htab)) { 923 check_and_free_fields(htab, l); 924 __pcpu_freelist_push(&htab->freelist, &l->fnode); 925 } else { 926 dec_elem_count(htab); 927 htab_elem_free(htab, l); 928 } 929 } 930 931 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, 932 void *value, bool onallcpus) 933 { 934 if (!onallcpus) { 935 /* copy true value_size bytes */ 936 copy_map_value(&htab->map, this_cpu_ptr(pptr), value); 937 } else { 938 u32 size = round_up(htab->map.value_size, 8); 939 int off = 0, cpu; 940 941 for_each_possible_cpu(cpu) { 942 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off); 943 off += size; 944 } 945 } 946 } 947 948 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr, 949 void *value, bool onallcpus) 950 { 951 /* When not setting the initial value on all cpus, zero-fill element 952 * values for other cpus. Otherwise, bpf program has no way to ensure 953 * known initial values for cpus other than current one 954 * (onallcpus=false always when coming from bpf prog). 955 */ 956 if (!onallcpus) { 957 int current_cpu = raw_smp_processor_id(); 958 int cpu; 959 960 for_each_possible_cpu(cpu) { 961 if (cpu == current_cpu) 962 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value); 963 else /* Since elem is preallocated, we cannot touch special fields */ 964 zero_map_value(&htab->map, per_cpu_ptr(pptr, cpu)); 965 } 966 } else { 967 pcpu_copy_value(htab, pptr, value, onallcpus); 968 } 969 } 970 971 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab) 972 { 973 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && 974 BITS_PER_LONG == 64; 975 } 976 977 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, 978 void *value, u32 key_size, u32 hash, 979 bool percpu, bool onallcpus, 980 struct htab_elem *old_elem) 981 { 982 u32 size = htab->map.value_size; 983 bool prealloc = htab_is_prealloc(htab); 984 struct htab_elem *l_new, **pl_new; 985 void __percpu *pptr; 986 987 if (prealloc) { 988 if (old_elem) { 989 /* if we're updating the existing element, 990 * use per-cpu extra elems to avoid freelist_pop/push 991 */ 992 pl_new = this_cpu_ptr(htab->extra_elems); 993 l_new = *pl_new; 994 htab_put_fd_value(htab, old_elem); 995 *pl_new = old_elem; 996 } else { 997 struct pcpu_freelist_node *l; 998 999 l = __pcpu_freelist_pop(&htab->freelist); 1000 if (!l) 1001 return ERR_PTR(-E2BIG); 1002 l_new = container_of(l, struct htab_elem, fnode); 1003 } 1004 } else { 1005 if (is_map_full(htab)) 1006 if (!old_elem) 1007 /* when map is full and update() is replacing 1008 * old element, it's ok to allocate, since 1009 * old element will be freed immediately. 1010 * Otherwise return an error 1011 */ 1012 return ERR_PTR(-E2BIG); 1013 inc_elem_count(htab); 1014 l_new = bpf_mem_cache_alloc(&htab->ma); 1015 if (!l_new) { 1016 l_new = ERR_PTR(-ENOMEM); 1017 goto dec_count; 1018 } 1019 } 1020 1021 memcpy(l_new->key, key, key_size); 1022 if (percpu) { 1023 if (prealloc) { 1024 pptr = htab_elem_get_ptr(l_new, key_size); 1025 } else { 1026 /* alloc_percpu zero-fills */ 1027 pptr = bpf_mem_cache_alloc(&htab->pcpu_ma); 1028 if (!pptr) { 1029 bpf_mem_cache_free(&htab->ma, l_new); 1030 l_new = ERR_PTR(-ENOMEM); 1031 goto dec_count; 1032 } 1033 l_new->ptr_to_pptr = pptr; 1034 pptr = *(void **)pptr; 1035 } 1036 1037 pcpu_init_value(htab, pptr, value, onallcpus); 1038 1039 if (!prealloc) 1040 htab_elem_set_ptr(l_new, key_size, pptr); 1041 } else if (fd_htab_map_needs_adjust(htab)) { 1042 size = round_up(size, 8); 1043 memcpy(l_new->key + round_up(key_size, 8), value, size); 1044 } else { 1045 copy_map_value(&htab->map, 1046 l_new->key + round_up(key_size, 8), 1047 value); 1048 } 1049 1050 l_new->hash = hash; 1051 return l_new; 1052 dec_count: 1053 dec_elem_count(htab); 1054 return l_new; 1055 } 1056 1057 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, 1058 u64 map_flags) 1059 { 1060 if (l_old && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST) 1061 /* elem already exists */ 1062 return -EEXIST; 1063 1064 if (!l_old && (map_flags & ~BPF_F_LOCK) == BPF_EXIST) 1065 /* elem doesn't exist, cannot update it */ 1066 return -ENOENT; 1067 1068 return 0; 1069 } 1070 1071 /* Called from syscall or from eBPF program */ 1072 static long htab_map_update_elem(struct bpf_map *map, void *key, void *value, 1073 u64 map_flags) 1074 { 1075 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1076 struct htab_elem *l_new = NULL, *l_old; 1077 struct hlist_nulls_head *head; 1078 unsigned long flags; 1079 struct bucket *b; 1080 u32 key_size, hash; 1081 int ret; 1082 1083 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST)) 1084 /* unknown flags */ 1085 return -EINVAL; 1086 1087 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 1088 !rcu_read_lock_bh_held()); 1089 1090 key_size = map->key_size; 1091 1092 hash = htab_map_hash(key, key_size, htab->hashrnd); 1093 1094 b = __select_bucket(htab, hash); 1095 head = &b->head; 1096 1097 if (unlikely(map_flags & BPF_F_LOCK)) { 1098 if (unlikely(!btf_record_has_field(map->record, BPF_SPIN_LOCK))) 1099 return -EINVAL; 1100 /* find an element without taking the bucket lock */ 1101 l_old = lookup_nulls_elem_raw(head, hash, key, key_size, 1102 htab->n_buckets); 1103 ret = check_flags(htab, l_old, map_flags); 1104 if (ret) 1105 return ret; 1106 if (l_old) { 1107 /* grab the element lock and update value in place */ 1108 copy_map_value_locked(map, 1109 l_old->key + round_up(key_size, 8), 1110 value, false); 1111 return 0; 1112 } 1113 /* fall through, grab the bucket lock and lookup again. 1114 * 99.9% chance that the element won't be found, 1115 * but second lookup under lock has to be done. 1116 */ 1117 } 1118 1119 ret = htab_lock_bucket(htab, b, hash, &flags); 1120 if (ret) 1121 return ret; 1122 1123 l_old = lookup_elem_raw(head, hash, key, key_size); 1124 1125 ret = check_flags(htab, l_old, map_flags); 1126 if (ret) 1127 goto err; 1128 1129 if (unlikely(l_old && (map_flags & BPF_F_LOCK))) { 1130 /* first lookup without the bucket lock didn't find the element, 1131 * but second lookup with the bucket lock found it. 1132 * This case is highly unlikely, but has to be dealt with: 1133 * grab the element lock in addition to the bucket lock 1134 * and update element in place 1135 */ 1136 copy_map_value_locked(map, 1137 l_old->key + round_up(key_size, 8), 1138 value, false); 1139 ret = 0; 1140 goto err; 1141 } 1142 1143 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, 1144 l_old); 1145 if (IS_ERR(l_new)) { 1146 /* all pre-allocated elements are in use or memory exhausted */ 1147 ret = PTR_ERR(l_new); 1148 goto err; 1149 } 1150 1151 /* add new element to the head of the list, so that 1152 * concurrent search will find it before old elem 1153 */ 1154 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 1155 if (l_old) { 1156 hlist_nulls_del_rcu(&l_old->hash_node); 1157 if (!htab_is_prealloc(htab)) 1158 free_htab_elem(htab, l_old); 1159 else 1160 check_and_free_fields(htab, l_old); 1161 } 1162 ret = 0; 1163 err: 1164 htab_unlock_bucket(htab, b, hash, flags); 1165 return ret; 1166 } 1167 1168 static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem) 1169 { 1170 check_and_free_fields(htab, elem); 1171 bpf_lru_push_free(&htab->lru, &elem->lru_node); 1172 } 1173 1174 static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, 1175 u64 map_flags) 1176 { 1177 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1178 struct htab_elem *l_new, *l_old = NULL; 1179 struct hlist_nulls_head *head; 1180 unsigned long flags; 1181 struct bucket *b; 1182 u32 key_size, hash; 1183 int ret; 1184 1185 if (unlikely(map_flags > BPF_EXIST)) 1186 /* unknown flags */ 1187 return -EINVAL; 1188 1189 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 1190 !rcu_read_lock_bh_held()); 1191 1192 key_size = map->key_size; 1193 1194 hash = htab_map_hash(key, key_size, htab->hashrnd); 1195 1196 b = __select_bucket(htab, hash); 1197 head = &b->head; 1198 1199 /* For LRU, we need to alloc before taking bucket's 1200 * spinlock because getting free nodes from LRU may need 1201 * to remove older elements from htab and this removal 1202 * operation will need a bucket lock. 1203 */ 1204 l_new = prealloc_lru_pop(htab, key, hash); 1205 if (!l_new) 1206 return -ENOMEM; 1207 copy_map_value(&htab->map, 1208 l_new->key + round_up(map->key_size, 8), value); 1209 1210 ret = htab_lock_bucket(htab, b, hash, &flags); 1211 if (ret) 1212 goto err_lock_bucket; 1213 1214 l_old = lookup_elem_raw(head, hash, key, key_size); 1215 1216 ret = check_flags(htab, l_old, map_flags); 1217 if (ret) 1218 goto err; 1219 1220 /* add new element to the head of the list, so that 1221 * concurrent search will find it before old elem 1222 */ 1223 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 1224 if (l_old) { 1225 bpf_lru_node_set_ref(&l_new->lru_node); 1226 hlist_nulls_del_rcu(&l_old->hash_node); 1227 } 1228 ret = 0; 1229 1230 err: 1231 htab_unlock_bucket(htab, b, hash, flags); 1232 1233 err_lock_bucket: 1234 if (ret) 1235 htab_lru_push_free(htab, l_new); 1236 else if (l_old) 1237 htab_lru_push_free(htab, l_old); 1238 1239 return ret; 1240 } 1241 1242 static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key, 1243 void *value, u64 map_flags, 1244 bool onallcpus) 1245 { 1246 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1247 struct htab_elem *l_new = NULL, *l_old; 1248 struct hlist_nulls_head *head; 1249 unsigned long flags; 1250 struct bucket *b; 1251 u32 key_size, hash; 1252 int ret; 1253 1254 if (unlikely(map_flags > BPF_EXIST)) 1255 /* unknown flags */ 1256 return -EINVAL; 1257 1258 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 1259 !rcu_read_lock_bh_held()); 1260 1261 key_size = map->key_size; 1262 1263 hash = htab_map_hash(key, key_size, htab->hashrnd); 1264 1265 b = __select_bucket(htab, hash); 1266 head = &b->head; 1267 1268 ret = htab_lock_bucket(htab, b, hash, &flags); 1269 if (ret) 1270 return ret; 1271 1272 l_old = lookup_elem_raw(head, hash, key, key_size); 1273 1274 ret = check_flags(htab, l_old, map_flags); 1275 if (ret) 1276 goto err; 1277 1278 if (l_old) { 1279 /* per-cpu hash map can update value in-place */ 1280 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), 1281 value, onallcpus); 1282 } else { 1283 l_new = alloc_htab_elem(htab, key, value, key_size, 1284 hash, true, onallcpus, NULL); 1285 if (IS_ERR(l_new)) { 1286 ret = PTR_ERR(l_new); 1287 goto err; 1288 } 1289 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 1290 } 1291 ret = 0; 1292 err: 1293 htab_unlock_bucket(htab, b, hash, flags); 1294 return ret; 1295 } 1296 1297 static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, 1298 void *value, u64 map_flags, 1299 bool onallcpus) 1300 { 1301 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1302 struct htab_elem *l_new = NULL, *l_old; 1303 struct hlist_nulls_head *head; 1304 unsigned long flags; 1305 struct bucket *b; 1306 u32 key_size, hash; 1307 int ret; 1308 1309 if (unlikely(map_flags > BPF_EXIST)) 1310 /* unknown flags */ 1311 return -EINVAL; 1312 1313 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 1314 !rcu_read_lock_bh_held()); 1315 1316 key_size = map->key_size; 1317 1318 hash = htab_map_hash(key, key_size, htab->hashrnd); 1319 1320 b = __select_bucket(htab, hash); 1321 head = &b->head; 1322 1323 /* For LRU, we need to alloc before taking bucket's 1324 * spinlock because LRU's elem alloc may need 1325 * to remove older elem from htab and this removal 1326 * operation will need a bucket lock. 1327 */ 1328 if (map_flags != BPF_EXIST) { 1329 l_new = prealloc_lru_pop(htab, key, hash); 1330 if (!l_new) 1331 return -ENOMEM; 1332 } 1333 1334 ret = htab_lock_bucket(htab, b, hash, &flags); 1335 if (ret) 1336 goto err_lock_bucket; 1337 1338 l_old = lookup_elem_raw(head, hash, key, key_size); 1339 1340 ret = check_flags(htab, l_old, map_flags); 1341 if (ret) 1342 goto err; 1343 1344 if (l_old) { 1345 bpf_lru_node_set_ref(&l_old->lru_node); 1346 1347 /* per-cpu hash map can update value in-place */ 1348 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), 1349 value, onallcpus); 1350 } else { 1351 pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size), 1352 value, onallcpus); 1353 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 1354 l_new = NULL; 1355 } 1356 ret = 0; 1357 err: 1358 htab_unlock_bucket(htab, b, hash, flags); 1359 err_lock_bucket: 1360 if (l_new) 1361 bpf_lru_push_free(&htab->lru, &l_new->lru_node); 1362 return ret; 1363 } 1364 1365 static long htab_percpu_map_update_elem(struct bpf_map *map, void *key, 1366 void *value, u64 map_flags) 1367 { 1368 return __htab_percpu_map_update_elem(map, key, value, map_flags, false); 1369 } 1370 1371 static long htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, 1372 void *value, u64 map_flags) 1373 { 1374 return __htab_lru_percpu_map_update_elem(map, key, value, map_flags, 1375 false); 1376 } 1377 1378 /* Called from syscall or from eBPF program */ 1379 static long htab_map_delete_elem(struct bpf_map *map, void *key) 1380 { 1381 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1382 struct hlist_nulls_head *head; 1383 struct bucket *b; 1384 struct htab_elem *l; 1385 unsigned long flags; 1386 u32 hash, key_size; 1387 int ret; 1388 1389 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 1390 !rcu_read_lock_bh_held()); 1391 1392 key_size = map->key_size; 1393 1394 hash = htab_map_hash(key, key_size, htab->hashrnd); 1395 b = __select_bucket(htab, hash); 1396 head = &b->head; 1397 1398 ret = htab_lock_bucket(htab, b, hash, &flags); 1399 if (ret) 1400 return ret; 1401 1402 l = lookup_elem_raw(head, hash, key, key_size); 1403 1404 if (l) { 1405 hlist_nulls_del_rcu(&l->hash_node); 1406 free_htab_elem(htab, l); 1407 } else { 1408 ret = -ENOENT; 1409 } 1410 1411 htab_unlock_bucket(htab, b, hash, flags); 1412 return ret; 1413 } 1414 1415 static long htab_lru_map_delete_elem(struct bpf_map *map, void *key) 1416 { 1417 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1418 struct hlist_nulls_head *head; 1419 struct bucket *b; 1420 struct htab_elem *l; 1421 unsigned long flags; 1422 u32 hash, key_size; 1423 int ret; 1424 1425 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 1426 !rcu_read_lock_bh_held()); 1427 1428 key_size = map->key_size; 1429 1430 hash = htab_map_hash(key, key_size, htab->hashrnd); 1431 b = __select_bucket(htab, hash); 1432 head = &b->head; 1433 1434 ret = htab_lock_bucket(htab, b, hash, &flags); 1435 if (ret) 1436 return ret; 1437 1438 l = lookup_elem_raw(head, hash, key, key_size); 1439 1440 if (l) 1441 hlist_nulls_del_rcu(&l->hash_node); 1442 else 1443 ret = -ENOENT; 1444 1445 htab_unlock_bucket(htab, b, hash, flags); 1446 if (l) 1447 htab_lru_push_free(htab, l); 1448 return ret; 1449 } 1450 1451 static void delete_all_elements(struct bpf_htab *htab) 1452 { 1453 int i; 1454 1455 /* It's called from a worker thread, so disable migration here, 1456 * since bpf_mem_cache_free() relies on that. 1457 */ 1458 migrate_disable(); 1459 for (i = 0; i < htab->n_buckets; i++) { 1460 struct hlist_nulls_head *head = select_bucket(htab, i); 1461 struct hlist_nulls_node *n; 1462 struct htab_elem *l; 1463 1464 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { 1465 hlist_nulls_del_rcu(&l->hash_node); 1466 htab_elem_free(htab, l); 1467 } 1468 } 1469 migrate_enable(); 1470 } 1471 1472 static void htab_free_malloced_timers(struct bpf_htab *htab) 1473 { 1474 int i; 1475 1476 rcu_read_lock(); 1477 for (i = 0; i < htab->n_buckets; i++) { 1478 struct hlist_nulls_head *head = select_bucket(htab, i); 1479 struct hlist_nulls_node *n; 1480 struct htab_elem *l; 1481 1482 hlist_nulls_for_each_entry(l, n, head, hash_node) { 1483 /* We only free timer on uref dropping to zero */ 1484 bpf_obj_free_timer(htab->map.record, l->key + round_up(htab->map.key_size, 8)); 1485 } 1486 cond_resched_rcu(); 1487 } 1488 rcu_read_unlock(); 1489 } 1490 1491 static void htab_map_free_timers(struct bpf_map *map) 1492 { 1493 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1494 1495 /* We only free timer on uref dropping to zero */ 1496 if (!btf_record_has_field(htab->map.record, BPF_TIMER)) 1497 return; 1498 if (!htab_is_prealloc(htab)) 1499 htab_free_malloced_timers(htab); 1500 else 1501 htab_free_prealloced_timers(htab); 1502 } 1503 1504 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ 1505 static void htab_map_free(struct bpf_map *map) 1506 { 1507 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1508 int i; 1509 1510 /* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback. 1511 * bpf_free_used_maps() is called after bpf prog is no longer executing. 1512 * There is no need to synchronize_rcu() here to protect map elements. 1513 */ 1514 1515 /* htab no longer uses call_rcu() directly. bpf_mem_alloc does it 1516 * underneath and is reponsible for waiting for callbacks to finish 1517 * during bpf_mem_alloc_destroy(). 1518 */ 1519 if (!htab_is_prealloc(htab)) { 1520 delete_all_elements(htab); 1521 } else { 1522 htab_free_prealloced_fields(htab); 1523 prealloc_destroy(htab); 1524 } 1525 1526 free_percpu(htab->extra_elems); 1527 bpf_map_area_free(htab->buckets); 1528 bpf_mem_alloc_destroy(&htab->pcpu_ma); 1529 bpf_mem_alloc_destroy(&htab->ma); 1530 if (htab->use_percpu_counter) 1531 percpu_counter_destroy(&htab->pcount); 1532 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) 1533 free_percpu(htab->map_locked[i]); 1534 lockdep_unregister_key(&htab->lockdep_key); 1535 bpf_map_area_free(htab); 1536 } 1537 1538 static void htab_map_seq_show_elem(struct bpf_map *map, void *key, 1539 struct seq_file *m) 1540 { 1541 void *value; 1542 1543 rcu_read_lock(); 1544 1545 value = htab_map_lookup_elem(map, key); 1546 if (!value) { 1547 rcu_read_unlock(); 1548 return; 1549 } 1550 1551 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); 1552 seq_puts(m, ": "); 1553 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); 1554 seq_puts(m, "\n"); 1555 1556 rcu_read_unlock(); 1557 } 1558 1559 static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key, 1560 void *value, bool is_lru_map, 1561 bool is_percpu, u64 flags) 1562 { 1563 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1564 struct hlist_nulls_head *head; 1565 unsigned long bflags; 1566 struct htab_elem *l; 1567 u32 hash, key_size; 1568 struct bucket *b; 1569 int ret; 1570 1571 key_size = map->key_size; 1572 1573 hash = htab_map_hash(key, key_size, htab->hashrnd); 1574 b = __select_bucket(htab, hash); 1575 head = &b->head; 1576 1577 ret = htab_lock_bucket(htab, b, hash, &bflags); 1578 if (ret) 1579 return ret; 1580 1581 l = lookup_elem_raw(head, hash, key, key_size); 1582 if (!l) { 1583 ret = -ENOENT; 1584 } else { 1585 if (is_percpu) { 1586 u32 roundup_value_size = round_up(map->value_size, 8); 1587 void __percpu *pptr; 1588 int off = 0, cpu; 1589 1590 pptr = htab_elem_get_ptr(l, key_size); 1591 for_each_possible_cpu(cpu) { 1592 copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu)); 1593 check_and_init_map_value(&htab->map, value + off); 1594 off += roundup_value_size; 1595 } 1596 } else { 1597 u32 roundup_key_size = round_up(map->key_size, 8); 1598 1599 if (flags & BPF_F_LOCK) 1600 copy_map_value_locked(map, value, l->key + 1601 roundup_key_size, 1602 true); 1603 else 1604 copy_map_value(map, value, l->key + 1605 roundup_key_size); 1606 /* Zeroing special fields in the temp buffer */ 1607 check_and_init_map_value(map, value); 1608 } 1609 1610 hlist_nulls_del_rcu(&l->hash_node); 1611 if (!is_lru_map) 1612 free_htab_elem(htab, l); 1613 } 1614 1615 htab_unlock_bucket(htab, b, hash, bflags); 1616 1617 if (is_lru_map && l) 1618 htab_lru_push_free(htab, l); 1619 1620 return ret; 1621 } 1622 1623 static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key, 1624 void *value, u64 flags) 1625 { 1626 return __htab_map_lookup_and_delete_elem(map, key, value, false, false, 1627 flags); 1628 } 1629 1630 static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map, 1631 void *key, void *value, 1632 u64 flags) 1633 { 1634 return __htab_map_lookup_and_delete_elem(map, key, value, false, true, 1635 flags); 1636 } 1637 1638 static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key, 1639 void *value, u64 flags) 1640 { 1641 return __htab_map_lookup_and_delete_elem(map, key, value, true, false, 1642 flags); 1643 } 1644 1645 static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map, 1646 void *key, void *value, 1647 u64 flags) 1648 { 1649 return __htab_map_lookup_and_delete_elem(map, key, value, true, true, 1650 flags); 1651 } 1652 1653 static int 1654 __htab_map_lookup_and_delete_batch(struct bpf_map *map, 1655 const union bpf_attr *attr, 1656 union bpf_attr __user *uattr, 1657 bool do_delete, bool is_lru_map, 1658 bool is_percpu) 1659 { 1660 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1661 u32 bucket_cnt, total, key_size, value_size, roundup_key_size; 1662 void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val; 1663 void __user *uvalues = u64_to_user_ptr(attr->batch.values); 1664 void __user *ukeys = u64_to_user_ptr(attr->batch.keys); 1665 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); 1666 u32 batch, max_count, size, bucket_size, map_id; 1667 struct htab_elem *node_to_free = NULL; 1668 u64 elem_map_flags, map_flags; 1669 struct hlist_nulls_head *head; 1670 struct hlist_nulls_node *n; 1671 unsigned long flags = 0; 1672 bool locked = false; 1673 struct htab_elem *l; 1674 struct bucket *b; 1675 int ret = 0; 1676 1677 elem_map_flags = attr->batch.elem_flags; 1678 if ((elem_map_flags & ~BPF_F_LOCK) || 1679 ((elem_map_flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK))) 1680 return -EINVAL; 1681 1682 map_flags = attr->batch.flags; 1683 if (map_flags) 1684 return -EINVAL; 1685 1686 max_count = attr->batch.count; 1687 if (!max_count) 1688 return 0; 1689 1690 if (put_user(0, &uattr->batch.count)) 1691 return -EFAULT; 1692 1693 batch = 0; 1694 if (ubatch && copy_from_user(&batch, ubatch, sizeof(batch))) 1695 return -EFAULT; 1696 1697 if (batch >= htab->n_buckets) 1698 return -ENOENT; 1699 1700 key_size = htab->map.key_size; 1701 roundup_key_size = round_up(htab->map.key_size, 8); 1702 value_size = htab->map.value_size; 1703 size = round_up(value_size, 8); 1704 if (is_percpu) 1705 value_size = size * num_possible_cpus(); 1706 total = 0; 1707 /* while experimenting with hash tables with sizes ranging from 10 to 1708 * 1000, it was observed that a bucket can have up to 5 entries. 1709 */ 1710 bucket_size = 5; 1711 1712 alloc: 1713 /* We cannot do copy_from_user or copy_to_user inside 1714 * the rcu_read_lock. Allocate enough space here. 1715 */ 1716 keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN); 1717 values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN); 1718 if (!keys || !values) { 1719 ret = -ENOMEM; 1720 goto after_loop; 1721 } 1722 1723 again: 1724 bpf_disable_instrumentation(); 1725 rcu_read_lock(); 1726 again_nocopy: 1727 dst_key = keys; 1728 dst_val = values; 1729 b = &htab->buckets[batch]; 1730 head = &b->head; 1731 /* do not grab the lock unless need it (bucket_cnt > 0). */ 1732 if (locked) { 1733 ret = htab_lock_bucket(htab, b, batch, &flags); 1734 if (ret) { 1735 rcu_read_unlock(); 1736 bpf_enable_instrumentation(); 1737 goto after_loop; 1738 } 1739 } 1740 1741 bucket_cnt = 0; 1742 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) 1743 bucket_cnt++; 1744 1745 if (bucket_cnt && !locked) { 1746 locked = true; 1747 goto again_nocopy; 1748 } 1749 1750 if (bucket_cnt > (max_count - total)) { 1751 if (total == 0) 1752 ret = -ENOSPC; 1753 /* Note that since bucket_cnt > 0 here, it is implicit 1754 * that the locked was grabbed, so release it. 1755 */ 1756 htab_unlock_bucket(htab, b, batch, flags); 1757 rcu_read_unlock(); 1758 bpf_enable_instrumentation(); 1759 goto after_loop; 1760 } 1761 1762 if (bucket_cnt > bucket_size) { 1763 bucket_size = bucket_cnt; 1764 /* Note that since bucket_cnt > 0 here, it is implicit 1765 * that the locked was grabbed, so release it. 1766 */ 1767 htab_unlock_bucket(htab, b, batch, flags); 1768 rcu_read_unlock(); 1769 bpf_enable_instrumentation(); 1770 kvfree(keys); 1771 kvfree(values); 1772 goto alloc; 1773 } 1774 1775 /* Next block is only safe to run if you have grabbed the lock */ 1776 if (!locked) 1777 goto next_batch; 1778 1779 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { 1780 memcpy(dst_key, l->key, key_size); 1781 1782 if (is_percpu) { 1783 int off = 0, cpu; 1784 void __percpu *pptr; 1785 1786 pptr = htab_elem_get_ptr(l, map->key_size); 1787 for_each_possible_cpu(cpu) { 1788 copy_map_value_long(&htab->map, dst_val + off, per_cpu_ptr(pptr, cpu)); 1789 check_and_init_map_value(&htab->map, dst_val + off); 1790 off += size; 1791 } 1792 } else { 1793 value = l->key + roundup_key_size; 1794 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 1795 struct bpf_map **inner_map = value; 1796 1797 /* Actual value is the id of the inner map */ 1798 map_id = map->ops->map_fd_sys_lookup_elem(*inner_map); 1799 value = &map_id; 1800 } 1801 1802 if (elem_map_flags & BPF_F_LOCK) 1803 copy_map_value_locked(map, dst_val, value, 1804 true); 1805 else 1806 copy_map_value(map, dst_val, value); 1807 /* Zeroing special fields in the temp buffer */ 1808 check_and_init_map_value(map, dst_val); 1809 } 1810 if (do_delete) { 1811 hlist_nulls_del_rcu(&l->hash_node); 1812 1813 /* bpf_lru_push_free() will acquire lru_lock, which 1814 * may cause deadlock. See comments in function 1815 * prealloc_lru_pop(). Let us do bpf_lru_push_free() 1816 * after releasing the bucket lock. 1817 */ 1818 if (is_lru_map) { 1819 l->batch_flink = node_to_free; 1820 node_to_free = l; 1821 } else { 1822 free_htab_elem(htab, l); 1823 } 1824 } 1825 dst_key += key_size; 1826 dst_val += value_size; 1827 } 1828 1829 htab_unlock_bucket(htab, b, batch, flags); 1830 locked = false; 1831 1832 while (node_to_free) { 1833 l = node_to_free; 1834 node_to_free = node_to_free->batch_flink; 1835 htab_lru_push_free(htab, l); 1836 } 1837 1838 next_batch: 1839 /* If we are not copying data, we can go to next bucket and avoid 1840 * unlocking the rcu. 1841 */ 1842 if (!bucket_cnt && (batch + 1 < htab->n_buckets)) { 1843 batch++; 1844 goto again_nocopy; 1845 } 1846 1847 rcu_read_unlock(); 1848 bpf_enable_instrumentation(); 1849 if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys, 1850 key_size * bucket_cnt) || 1851 copy_to_user(uvalues + total * value_size, values, 1852 value_size * bucket_cnt))) { 1853 ret = -EFAULT; 1854 goto after_loop; 1855 } 1856 1857 total += bucket_cnt; 1858 batch++; 1859 if (batch >= htab->n_buckets) { 1860 ret = -ENOENT; 1861 goto after_loop; 1862 } 1863 goto again; 1864 1865 after_loop: 1866 if (ret == -EFAULT) 1867 goto out; 1868 1869 /* copy # of entries and next batch */ 1870 ubatch = u64_to_user_ptr(attr->batch.out_batch); 1871 if (copy_to_user(ubatch, &batch, sizeof(batch)) || 1872 put_user(total, &uattr->batch.count)) 1873 ret = -EFAULT; 1874 1875 out: 1876 kvfree(keys); 1877 kvfree(values); 1878 return ret; 1879 } 1880 1881 static int 1882 htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, 1883 union bpf_attr __user *uattr) 1884 { 1885 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, 1886 false, true); 1887 } 1888 1889 static int 1890 htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map, 1891 const union bpf_attr *attr, 1892 union bpf_attr __user *uattr) 1893 { 1894 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, 1895 false, true); 1896 } 1897 1898 static int 1899 htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, 1900 union bpf_attr __user *uattr) 1901 { 1902 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, 1903 false, false); 1904 } 1905 1906 static int 1907 htab_map_lookup_and_delete_batch(struct bpf_map *map, 1908 const union bpf_attr *attr, 1909 union bpf_attr __user *uattr) 1910 { 1911 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, 1912 false, false); 1913 } 1914 1915 static int 1916 htab_lru_percpu_map_lookup_batch(struct bpf_map *map, 1917 const union bpf_attr *attr, 1918 union bpf_attr __user *uattr) 1919 { 1920 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, 1921 true, true); 1922 } 1923 1924 static int 1925 htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map, 1926 const union bpf_attr *attr, 1927 union bpf_attr __user *uattr) 1928 { 1929 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, 1930 true, true); 1931 } 1932 1933 static int 1934 htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, 1935 union bpf_attr __user *uattr) 1936 { 1937 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, 1938 true, false); 1939 } 1940 1941 static int 1942 htab_lru_map_lookup_and_delete_batch(struct bpf_map *map, 1943 const union bpf_attr *attr, 1944 union bpf_attr __user *uattr) 1945 { 1946 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, 1947 true, false); 1948 } 1949 1950 struct bpf_iter_seq_hash_map_info { 1951 struct bpf_map *map; 1952 struct bpf_htab *htab; 1953 void *percpu_value_buf; // non-zero means percpu hash 1954 u32 bucket_id; 1955 u32 skip_elems; 1956 }; 1957 1958 static struct htab_elem * 1959 bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info, 1960 struct htab_elem *prev_elem) 1961 { 1962 const struct bpf_htab *htab = info->htab; 1963 u32 skip_elems = info->skip_elems; 1964 u32 bucket_id = info->bucket_id; 1965 struct hlist_nulls_head *head; 1966 struct hlist_nulls_node *n; 1967 struct htab_elem *elem; 1968 struct bucket *b; 1969 u32 i, count; 1970 1971 if (bucket_id >= htab->n_buckets) 1972 return NULL; 1973 1974 /* try to find next elem in the same bucket */ 1975 if (prev_elem) { 1976 /* no update/deletion on this bucket, prev_elem should be still valid 1977 * and we won't skip elements. 1978 */ 1979 n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node)); 1980 elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node); 1981 if (elem) 1982 return elem; 1983 1984 /* not found, unlock and go to the next bucket */ 1985 b = &htab->buckets[bucket_id++]; 1986 rcu_read_unlock(); 1987 skip_elems = 0; 1988 } 1989 1990 for (i = bucket_id; i < htab->n_buckets; i++) { 1991 b = &htab->buckets[i]; 1992 rcu_read_lock(); 1993 1994 count = 0; 1995 head = &b->head; 1996 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) { 1997 if (count >= skip_elems) { 1998 info->bucket_id = i; 1999 info->skip_elems = count; 2000 return elem; 2001 } 2002 count++; 2003 } 2004 2005 rcu_read_unlock(); 2006 skip_elems = 0; 2007 } 2008 2009 info->bucket_id = i; 2010 info->skip_elems = 0; 2011 return NULL; 2012 } 2013 2014 static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos) 2015 { 2016 struct bpf_iter_seq_hash_map_info *info = seq->private; 2017 struct htab_elem *elem; 2018 2019 elem = bpf_hash_map_seq_find_next(info, NULL); 2020 if (!elem) 2021 return NULL; 2022 2023 if (*pos == 0) 2024 ++*pos; 2025 return elem; 2026 } 2027 2028 static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2029 { 2030 struct bpf_iter_seq_hash_map_info *info = seq->private; 2031 2032 ++*pos; 2033 ++info->skip_elems; 2034 return bpf_hash_map_seq_find_next(info, v); 2035 } 2036 2037 static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem) 2038 { 2039 struct bpf_iter_seq_hash_map_info *info = seq->private; 2040 u32 roundup_key_size, roundup_value_size; 2041 struct bpf_iter__bpf_map_elem ctx = {}; 2042 struct bpf_map *map = info->map; 2043 struct bpf_iter_meta meta; 2044 int ret = 0, off = 0, cpu; 2045 struct bpf_prog *prog; 2046 void __percpu *pptr; 2047 2048 meta.seq = seq; 2049 prog = bpf_iter_get_info(&meta, elem == NULL); 2050 if (prog) { 2051 ctx.meta = &meta; 2052 ctx.map = info->map; 2053 if (elem) { 2054 roundup_key_size = round_up(map->key_size, 8); 2055 ctx.key = elem->key; 2056 if (!info->percpu_value_buf) { 2057 ctx.value = elem->key + roundup_key_size; 2058 } else { 2059 roundup_value_size = round_up(map->value_size, 8); 2060 pptr = htab_elem_get_ptr(elem, map->key_size); 2061 for_each_possible_cpu(cpu) { 2062 copy_map_value_long(map, info->percpu_value_buf + off, 2063 per_cpu_ptr(pptr, cpu)); 2064 check_and_init_map_value(map, info->percpu_value_buf + off); 2065 off += roundup_value_size; 2066 } 2067 ctx.value = info->percpu_value_buf; 2068 } 2069 } 2070 ret = bpf_iter_run_prog(prog, &ctx); 2071 } 2072 2073 return ret; 2074 } 2075 2076 static int bpf_hash_map_seq_show(struct seq_file *seq, void *v) 2077 { 2078 return __bpf_hash_map_seq_show(seq, v); 2079 } 2080 2081 static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v) 2082 { 2083 if (!v) 2084 (void)__bpf_hash_map_seq_show(seq, NULL); 2085 else 2086 rcu_read_unlock(); 2087 } 2088 2089 static int bpf_iter_init_hash_map(void *priv_data, 2090 struct bpf_iter_aux_info *aux) 2091 { 2092 struct bpf_iter_seq_hash_map_info *seq_info = priv_data; 2093 struct bpf_map *map = aux->map; 2094 void *value_buf; 2095 u32 buf_size; 2096 2097 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 2098 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 2099 buf_size = round_up(map->value_size, 8) * num_possible_cpus(); 2100 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN); 2101 if (!value_buf) 2102 return -ENOMEM; 2103 2104 seq_info->percpu_value_buf = value_buf; 2105 } 2106 2107 bpf_map_inc_with_uref(map); 2108 seq_info->map = map; 2109 seq_info->htab = container_of(map, struct bpf_htab, map); 2110 return 0; 2111 } 2112 2113 static void bpf_iter_fini_hash_map(void *priv_data) 2114 { 2115 struct bpf_iter_seq_hash_map_info *seq_info = priv_data; 2116 2117 bpf_map_put_with_uref(seq_info->map); 2118 kfree(seq_info->percpu_value_buf); 2119 } 2120 2121 static const struct seq_operations bpf_hash_map_seq_ops = { 2122 .start = bpf_hash_map_seq_start, 2123 .next = bpf_hash_map_seq_next, 2124 .stop = bpf_hash_map_seq_stop, 2125 .show = bpf_hash_map_seq_show, 2126 }; 2127 2128 static const struct bpf_iter_seq_info iter_seq_info = { 2129 .seq_ops = &bpf_hash_map_seq_ops, 2130 .init_seq_private = bpf_iter_init_hash_map, 2131 .fini_seq_private = bpf_iter_fini_hash_map, 2132 .seq_priv_size = sizeof(struct bpf_iter_seq_hash_map_info), 2133 }; 2134 2135 static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn, 2136 void *callback_ctx, u64 flags) 2137 { 2138 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 2139 struct hlist_nulls_head *head; 2140 struct hlist_nulls_node *n; 2141 struct htab_elem *elem; 2142 u32 roundup_key_size; 2143 int i, num_elems = 0; 2144 void __percpu *pptr; 2145 struct bucket *b; 2146 void *key, *val; 2147 bool is_percpu; 2148 u64 ret = 0; 2149 2150 if (flags != 0) 2151 return -EINVAL; 2152 2153 is_percpu = htab_is_percpu(htab); 2154 2155 roundup_key_size = round_up(map->key_size, 8); 2156 /* disable migration so percpu value prepared here will be the 2157 * same as the one seen by the bpf program with bpf_map_lookup_elem(). 2158 */ 2159 if (is_percpu) 2160 migrate_disable(); 2161 for (i = 0; i < htab->n_buckets; i++) { 2162 b = &htab->buckets[i]; 2163 rcu_read_lock(); 2164 head = &b->head; 2165 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) { 2166 key = elem->key; 2167 if (is_percpu) { 2168 /* current cpu value for percpu map */ 2169 pptr = htab_elem_get_ptr(elem, map->key_size); 2170 val = this_cpu_ptr(pptr); 2171 } else { 2172 val = elem->key + roundup_key_size; 2173 } 2174 num_elems++; 2175 ret = callback_fn((u64)(long)map, (u64)(long)key, 2176 (u64)(long)val, (u64)(long)callback_ctx, 0); 2177 /* return value: 0 - continue, 1 - stop and return */ 2178 if (ret) { 2179 rcu_read_unlock(); 2180 goto out; 2181 } 2182 } 2183 rcu_read_unlock(); 2184 } 2185 out: 2186 if (is_percpu) 2187 migrate_enable(); 2188 return num_elems; 2189 } 2190 2191 static u64 htab_map_mem_usage(const struct bpf_map *map) 2192 { 2193 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 2194 u32 value_size = round_up(htab->map.value_size, 8); 2195 bool prealloc = htab_is_prealloc(htab); 2196 bool percpu = htab_is_percpu(htab); 2197 bool lru = htab_is_lru(htab); 2198 u64 num_entries; 2199 u64 usage = sizeof(struct bpf_htab); 2200 2201 usage += sizeof(struct bucket) * htab->n_buckets; 2202 usage += sizeof(int) * num_possible_cpus() * HASHTAB_MAP_LOCK_COUNT; 2203 if (prealloc) { 2204 num_entries = map->max_entries; 2205 if (htab_has_extra_elems(htab)) 2206 num_entries += num_possible_cpus(); 2207 2208 usage += htab->elem_size * num_entries; 2209 2210 if (percpu) 2211 usage += value_size * num_possible_cpus() * num_entries; 2212 else if (!lru) 2213 usage += sizeof(struct htab_elem *) * num_possible_cpus(); 2214 } else { 2215 #define LLIST_NODE_SZ sizeof(struct llist_node) 2216 2217 num_entries = htab->use_percpu_counter ? 2218 percpu_counter_sum(&htab->pcount) : 2219 atomic_read(&htab->count); 2220 usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries; 2221 if (percpu) { 2222 usage += (LLIST_NODE_SZ + sizeof(void *)) * num_entries; 2223 usage += value_size * num_possible_cpus() * num_entries; 2224 } 2225 } 2226 return usage; 2227 } 2228 2229 BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab) 2230 const struct bpf_map_ops htab_map_ops = { 2231 .map_meta_equal = bpf_map_meta_equal, 2232 .map_alloc_check = htab_map_alloc_check, 2233 .map_alloc = htab_map_alloc, 2234 .map_free = htab_map_free, 2235 .map_get_next_key = htab_map_get_next_key, 2236 .map_release_uref = htab_map_free_timers, 2237 .map_lookup_elem = htab_map_lookup_elem, 2238 .map_lookup_and_delete_elem = htab_map_lookup_and_delete_elem, 2239 .map_update_elem = htab_map_update_elem, 2240 .map_delete_elem = htab_map_delete_elem, 2241 .map_gen_lookup = htab_map_gen_lookup, 2242 .map_seq_show_elem = htab_map_seq_show_elem, 2243 .map_set_for_each_callback_args = map_set_for_each_callback_args, 2244 .map_for_each_callback = bpf_for_each_hash_elem, 2245 .map_mem_usage = htab_map_mem_usage, 2246 BATCH_OPS(htab), 2247 .map_btf_id = &htab_map_btf_ids[0], 2248 .iter_seq_info = &iter_seq_info, 2249 }; 2250 2251 const struct bpf_map_ops htab_lru_map_ops = { 2252 .map_meta_equal = bpf_map_meta_equal, 2253 .map_alloc_check = htab_map_alloc_check, 2254 .map_alloc = htab_map_alloc, 2255 .map_free = htab_map_free, 2256 .map_get_next_key = htab_map_get_next_key, 2257 .map_release_uref = htab_map_free_timers, 2258 .map_lookup_elem = htab_lru_map_lookup_elem, 2259 .map_lookup_and_delete_elem = htab_lru_map_lookup_and_delete_elem, 2260 .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys, 2261 .map_update_elem = htab_lru_map_update_elem, 2262 .map_delete_elem = htab_lru_map_delete_elem, 2263 .map_gen_lookup = htab_lru_map_gen_lookup, 2264 .map_seq_show_elem = htab_map_seq_show_elem, 2265 .map_set_for_each_callback_args = map_set_for_each_callback_args, 2266 .map_for_each_callback = bpf_for_each_hash_elem, 2267 .map_mem_usage = htab_map_mem_usage, 2268 BATCH_OPS(htab_lru), 2269 .map_btf_id = &htab_map_btf_ids[0], 2270 .iter_seq_info = &iter_seq_info, 2271 }; 2272 2273 /* Called from eBPF program */ 2274 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key) 2275 { 2276 struct htab_elem *l = __htab_map_lookup_elem(map, key); 2277 2278 if (l) 2279 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); 2280 else 2281 return NULL; 2282 } 2283 2284 static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) 2285 { 2286 struct htab_elem *l; 2287 2288 if (cpu >= nr_cpu_ids) 2289 return NULL; 2290 2291 l = __htab_map_lookup_elem(map, key); 2292 if (l) 2293 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu); 2294 else 2295 return NULL; 2296 } 2297 2298 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key) 2299 { 2300 struct htab_elem *l = __htab_map_lookup_elem(map, key); 2301 2302 if (l) { 2303 bpf_lru_node_set_ref(&l->lru_node); 2304 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); 2305 } 2306 2307 return NULL; 2308 } 2309 2310 static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) 2311 { 2312 struct htab_elem *l; 2313 2314 if (cpu >= nr_cpu_ids) 2315 return NULL; 2316 2317 l = __htab_map_lookup_elem(map, key); 2318 if (l) { 2319 bpf_lru_node_set_ref(&l->lru_node); 2320 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu); 2321 } 2322 2323 return NULL; 2324 } 2325 2326 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) 2327 { 2328 struct htab_elem *l; 2329 void __percpu *pptr; 2330 int ret = -ENOENT; 2331 int cpu, off = 0; 2332 u32 size; 2333 2334 /* per_cpu areas are zero-filled and bpf programs can only 2335 * access 'value_size' of them, so copying rounded areas 2336 * will not leak any kernel data 2337 */ 2338 size = round_up(map->value_size, 8); 2339 rcu_read_lock(); 2340 l = __htab_map_lookup_elem(map, key); 2341 if (!l) 2342 goto out; 2343 /* We do not mark LRU map element here in order to not mess up 2344 * eviction heuristics when user space does a map walk. 2345 */ 2346 pptr = htab_elem_get_ptr(l, map->key_size); 2347 for_each_possible_cpu(cpu) { 2348 copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu)); 2349 check_and_init_map_value(map, value + off); 2350 off += size; 2351 } 2352 ret = 0; 2353 out: 2354 rcu_read_unlock(); 2355 return ret; 2356 } 2357 2358 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 2359 u64 map_flags) 2360 { 2361 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 2362 int ret; 2363 2364 rcu_read_lock(); 2365 if (htab_is_lru(htab)) 2366 ret = __htab_lru_percpu_map_update_elem(map, key, value, 2367 map_flags, true); 2368 else 2369 ret = __htab_percpu_map_update_elem(map, key, value, map_flags, 2370 true); 2371 rcu_read_unlock(); 2372 2373 return ret; 2374 } 2375 2376 static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key, 2377 struct seq_file *m) 2378 { 2379 struct htab_elem *l; 2380 void __percpu *pptr; 2381 int cpu; 2382 2383 rcu_read_lock(); 2384 2385 l = __htab_map_lookup_elem(map, key); 2386 if (!l) { 2387 rcu_read_unlock(); 2388 return; 2389 } 2390 2391 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); 2392 seq_puts(m, ": {\n"); 2393 pptr = htab_elem_get_ptr(l, map->key_size); 2394 for_each_possible_cpu(cpu) { 2395 seq_printf(m, "\tcpu%d: ", cpu); 2396 btf_type_seq_show(map->btf, map->btf_value_type_id, 2397 per_cpu_ptr(pptr, cpu), m); 2398 seq_puts(m, "\n"); 2399 } 2400 seq_puts(m, "}\n"); 2401 2402 rcu_read_unlock(); 2403 } 2404 2405 const struct bpf_map_ops htab_percpu_map_ops = { 2406 .map_meta_equal = bpf_map_meta_equal, 2407 .map_alloc_check = htab_map_alloc_check, 2408 .map_alloc = htab_map_alloc, 2409 .map_free = htab_map_free, 2410 .map_get_next_key = htab_map_get_next_key, 2411 .map_lookup_elem = htab_percpu_map_lookup_elem, 2412 .map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem, 2413 .map_update_elem = htab_percpu_map_update_elem, 2414 .map_delete_elem = htab_map_delete_elem, 2415 .map_lookup_percpu_elem = htab_percpu_map_lookup_percpu_elem, 2416 .map_seq_show_elem = htab_percpu_map_seq_show_elem, 2417 .map_set_for_each_callback_args = map_set_for_each_callback_args, 2418 .map_for_each_callback = bpf_for_each_hash_elem, 2419 .map_mem_usage = htab_map_mem_usage, 2420 BATCH_OPS(htab_percpu), 2421 .map_btf_id = &htab_map_btf_ids[0], 2422 .iter_seq_info = &iter_seq_info, 2423 }; 2424 2425 const struct bpf_map_ops htab_lru_percpu_map_ops = { 2426 .map_meta_equal = bpf_map_meta_equal, 2427 .map_alloc_check = htab_map_alloc_check, 2428 .map_alloc = htab_map_alloc, 2429 .map_free = htab_map_free, 2430 .map_get_next_key = htab_map_get_next_key, 2431 .map_lookup_elem = htab_lru_percpu_map_lookup_elem, 2432 .map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem, 2433 .map_update_elem = htab_lru_percpu_map_update_elem, 2434 .map_delete_elem = htab_lru_map_delete_elem, 2435 .map_lookup_percpu_elem = htab_lru_percpu_map_lookup_percpu_elem, 2436 .map_seq_show_elem = htab_percpu_map_seq_show_elem, 2437 .map_set_for_each_callback_args = map_set_for_each_callback_args, 2438 .map_for_each_callback = bpf_for_each_hash_elem, 2439 .map_mem_usage = htab_map_mem_usage, 2440 BATCH_OPS(htab_lru_percpu), 2441 .map_btf_id = &htab_map_btf_ids[0], 2442 .iter_seq_info = &iter_seq_info, 2443 }; 2444 2445 static int fd_htab_map_alloc_check(union bpf_attr *attr) 2446 { 2447 if (attr->value_size != sizeof(u32)) 2448 return -EINVAL; 2449 return htab_map_alloc_check(attr); 2450 } 2451 2452 static void fd_htab_map_free(struct bpf_map *map) 2453 { 2454 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 2455 struct hlist_nulls_node *n; 2456 struct hlist_nulls_head *head; 2457 struct htab_elem *l; 2458 int i; 2459 2460 for (i = 0; i < htab->n_buckets; i++) { 2461 head = select_bucket(htab, i); 2462 2463 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { 2464 void *ptr = fd_htab_map_get_ptr(map, l); 2465 2466 map->ops->map_fd_put_ptr(ptr); 2467 } 2468 } 2469 2470 htab_map_free(map); 2471 } 2472 2473 /* only called from syscall */ 2474 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) 2475 { 2476 void **ptr; 2477 int ret = 0; 2478 2479 if (!map->ops->map_fd_sys_lookup_elem) 2480 return -ENOTSUPP; 2481 2482 rcu_read_lock(); 2483 ptr = htab_map_lookup_elem(map, key); 2484 if (ptr) 2485 *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr)); 2486 else 2487 ret = -ENOENT; 2488 rcu_read_unlock(); 2489 2490 return ret; 2491 } 2492 2493 /* only called from syscall */ 2494 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 2495 void *key, void *value, u64 map_flags) 2496 { 2497 void *ptr; 2498 int ret; 2499 u32 ufd = *(u32 *)value; 2500 2501 ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); 2502 if (IS_ERR(ptr)) 2503 return PTR_ERR(ptr); 2504 2505 ret = htab_map_update_elem(map, key, &ptr, map_flags); 2506 if (ret) 2507 map->ops->map_fd_put_ptr(ptr); 2508 2509 return ret; 2510 } 2511 2512 static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr) 2513 { 2514 struct bpf_map *map, *inner_map_meta; 2515 2516 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); 2517 if (IS_ERR(inner_map_meta)) 2518 return inner_map_meta; 2519 2520 map = htab_map_alloc(attr); 2521 if (IS_ERR(map)) { 2522 bpf_map_meta_free(inner_map_meta); 2523 return map; 2524 } 2525 2526 map->inner_map_meta = inner_map_meta; 2527 2528 return map; 2529 } 2530 2531 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key) 2532 { 2533 struct bpf_map **inner_map = htab_map_lookup_elem(map, key); 2534 2535 if (!inner_map) 2536 return NULL; 2537 2538 return READ_ONCE(*inner_map); 2539 } 2540 2541 static int htab_of_map_gen_lookup(struct bpf_map *map, 2542 struct bpf_insn *insn_buf) 2543 { 2544 struct bpf_insn *insn = insn_buf; 2545 const int ret = BPF_REG_0; 2546 2547 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, 2548 (void *(*)(struct bpf_map *map, void *key))NULL)); 2549 *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem); 2550 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2); 2551 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, 2552 offsetof(struct htab_elem, key) + 2553 round_up(map->key_size, 8)); 2554 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); 2555 2556 return insn - insn_buf; 2557 } 2558 2559 static void htab_of_map_free(struct bpf_map *map) 2560 { 2561 bpf_map_meta_free(map->inner_map_meta); 2562 fd_htab_map_free(map); 2563 } 2564 2565 const struct bpf_map_ops htab_of_maps_map_ops = { 2566 .map_alloc_check = fd_htab_map_alloc_check, 2567 .map_alloc = htab_of_map_alloc, 2568 .map_free = htab_of_map_free, 2569 .map_get_next_key = htab_map_get_next_key, 2570 .map_lookup_elem = htab_of_map_lookup_elem, 2571 .map_delete_elem = htab_map_delete_elem, 2572 .map_fd_get_ptr = bpf_map_fd_get_ptr, 2573 .map_fd_put_ptr = bpf_map_fd_put_ptr, 2574 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, 2575 .map_gen_lookup = htab_of_map_gen_lookup, 2576 .map_check_btf = map_check_no_btf, 2577 .map_mem_usage = htab_map_mem_usage, 2578 BATCH_OPS(htab), 2579 .map_btf_id = &htab_map_btf_ids[0], 2580 }; 2581