1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 */ 5 #include <linux/bpf.h> 6 #include <linux/btf.h> 7 #include <linux/jhash.h> 8 #include <linux/filter.h> 9 #include <linux/rculist_nulls.h> 10 #include <linux/random.h> 11 #include <uapi/linux/btf.h> 12 #include <linux/rcupdate_trace.h> 13 #include <linux/btf_ids.h> 14 #include "percpu_freelist.h" 15 #include "bpf_lru_list.h" 16 #include "map_in_map.h" 17 #include <linux/bpf_mem_alloc.h> 18 19 #define HTAB_CREATE_FLAG_MASK \ 20 (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \ 21 BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED) 22 23 #define BATCH_OPS(_name) \ 24 .map_lookup_batch = \ 25 _name##_map_lookup_batch, \ 26 .map_lookup_and_delete_batch = \ 27 _name##_map_lookup_and_delete_batch, \ 28 .map_update_batch = \ 29 generic_map_update_batch, \ 30 .map_delete_batch = \ 31 generic_map_delete_batch 32 33 /* 34 * The bucket lock has two protection scopes: 35 * 36 * 1) Serializing concurrent operations from BPF programs on different 37 * CPUs 38 * 39 * 2) Serializing concurrent operations from BPF programs and sys_bpf() 40 * 41 * BPF programs can execute in any context including perf, kprobes and 42 * tracing. As there are almost no limits where perf, kprobes and tracing 43 * can be invoked from the lock operations need to be protected against 44 * deadlocks. Deadlocks can be caused by recursion and by an invocation in 45 * the lock held section when functions which acquire this lock are invoked 46 * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU 47 * variable bpf_prog_active, which prevents BPF programs attached to perf 48 * events, kprobes and tracing to be invoked before the prior invocation 49 * from one of these contexts completed. sys_bpf() uses the same mechanism 50 * by pinning the task to the current CPU and incrementing the recursion 51 * protection across the map operation. 52 * 53 * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain 54 * operations like memory allocations (even with GFP_ATOMIC) from atomic 55 * contexts. This is required because even with GFP_ATOMIC the memory 56 * allocator calls into code paths which acquire locks with long held lock 57 * sections. To ensure the deterministic behaviour these locks are regular 58 * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only 59 * true atomic contexts on an RT kernel are the low level hardware 60 * handling, scheduling, low level interrupt handling, NMIs etc. None of 61 * these contexts should ever do memory allocations. 62 * 63 * As regular device interrupt handlers and soft interrupts are forced into 64 * thread context, the existing code which does 65 * spin_lock*(); alloc(GFP_ATOMIC); spin_unlock*(); 66 * just works. 67 * 68 * In theory the BPF locks could be converted to regular spinlocks as well, 69 * but the bucket locks and percpu_freelist locks can be taken from 70 * arbitrary contexts (perf, kprobes, tracepoints) which are required to be 71 * atomic contexts even on RT. Before the introduction of bpf_mem_alloc, 72 * it is only safe to use raw spinlock for preallocated hash map on a RT kernel, 73 * because there is no memory allocation within the lock held sections. However 74 * after hash map was fully converted to use bpf_mem_alloc, there will be 75 * non-synchronous memory allocation for non-preallocated hash map, so it is 76 * safe to always use raw spinlock for bucket lock. 77 */ 78 struct bucket { 79 struct hlist_nulls_head head; 80 raw_spinlock_t raw_lock; 81 }; 82 83 #define HASHTAB_MAP_LOCK_COUNT 8 84 #define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1) 85 86 struct bpf_htab { 87 struct bpf_map map; 88 struct bpf_mem_alloc ma; 89 struct bpf_mem_alloc pcpu_ma; 90 struct bucket *buckets; 91 void *elems; 92 union { 93 struct pcpu_freelist freelist; 94 struct bpf_lru lru; 95 }; 96 struct htab_elem *__percpu *extra_elems; 97 /* number of elements in non-preallocated hashtable are kept 98 * in either pcount or count 99 */ 100 struct percpu_counter pcount; 101 atomic_t count; 102 bool use_percpu_counter; 103 u32 n_buckets; /* number of hash buckets */ 104 u32 elem_size; /* size of each element in bytes */ 105 u32 hashrnd; 106 struct lock_class_key lockdep_key; 107 int __percpu *map_locked[HASHTAB_MAP_LOCK_COUNT]; 108 }; 109 110 /* each htab element is struct htab_elem + key + value */ 111 struct htab_elem { 112 union { 113 struct hlist_nulls_node hash_node; 114 struct { 115 void *padding; 116 union { 117 struct pcpu_freelist_node fnode; 118 struct htab_elem *batch_flink; 119 }; 120 }; 121 }; 122 union { 123 /* pointer to per-cpu pointer */ 124 void *ptr_to_pptr; 125 struct bpf_lru_node lru_node; 126 }; 127 u32 hash; 128 char key[] __aligned(8); 129 }; 130 131 static inline bool htab_is_prealloc(const struct bpf_htab *htab) 132 { 133 return !(htab->map.map_flags & BPF_F_NO_PREALLOC); 134 } 135 136 static void htab_init_buckets(struct bpf_htab *htab) 137 { 138 unsigned int i; 139 140 for (i = 0; i < htab->n_buckets; i++) { 141 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); 142 raw_spin_lock_init(&htab->buckets[i].raw_lock); 143 lockdep_set_class(&htab->buckets[i].raw_lock, 144 &htab->lockdep_key); 145 cond_resched(); 146 } 147 } 148 149 static inline int htab_lock_bucket(const struct bpf_htab *htab, 150 struct bucket *b, u32 hash, 151 unsigned long *pflags) 152 { 153 unsigned long flags; 154 155 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); 156 157 preempt_disable(); 158 if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) { 159 __this_cpu_dec(*(htab->map_locked[hash])); 160 preempt_enable(); 161 return -EBUSY; 162 } 163 164 raw_spin_lock_irqsave(&b->raw_lock, flags); 165 *pflags = flags; 166 167 return 0; 168 } 169 170 static inline void htab_unlock_bucket(const struct bpf_htab *htab, 171 struct bucket *b, u32 hash, 172 unsigned long flags) 173 { 174 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); 175 raw_spin_unlock_irqrestore(&b->raw_lock, flags); 176 __this_cpu_dec(*(htab->map_locked[hash])); 177 preempt_enable(); 178 } 179 180 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node); 181 182 static bool htab_is_lru(const struct bpf_htab *htab) 183 { 184 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || 185 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; 186 } 187 188 static bool htab_is_percpu(const struct bpf_htab *htab) 189 { 190 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || 191 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; 192 } 193 194 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, 195 void __percpu *pptr) 196 { 197 *(void __percpu **)(l->key + key_size) = pptr; 198 } 199 200 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size) 201 { 202 return *(void __percpu **)(l->key + key_size); 203 } 204 205 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) 206 { 207 return *(void **)(l->key + roundup(map->key_size, 8)); 208 } 209 210 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i) 211 { 212 return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size); 213 } 214 215 static bool htab_has_extra_elems(struct bpf_htab *htab) 216 { 217 return !htab_is_percpu(htab) && !htab_is_lru(htab); 218 } 219 220 static void htab_free_prealloced_timers(struct bpf_htab *htab) 221 { 222 u32 num_entries = htab->map.max_entries; 223 int i; 224 225 if (!btf_record_has_field(htab->map.record, BPF_TIMER)) 226 return; 227 if (htab_has_extra_elems(htab)) 228 num_entries += num_possible_cpus(); 229 230 for (i = 0; i < num_entries; i++) { 231 struct htab_elem *elem; 232 233 elem = get_htab_elem(htab, i); 234 bpf_obj_free_timer(htab->map.record, elem->key + round_up(htab->map.key_size, 8)); 235 cond_resched(); 236 } 237 } 238 239 static void htab_free_prealloced_fields(struct bpf_htab *htab) 240 { 241 u32 num_entries = htab->map.max_entries; 242 int i; 243 244 if (IS_ERR_OR_NULL(htab->map.record)) 245 return; 246 if (htab_has_extra_elems(htab)) 247 num_entries += num_possible_cpus(); 248 for (i = 0; i < num_entries; i++) { 249 struct htab_elem *elem; 250 251 elem = get_htab_elem(htab, i); 252 if (htab_is_percpu(htab)) { 253 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size); 254 int cpu; 255 256 for_each_possible_cpu(cpu) { 257 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu)); 258 cond_resched(); 259 } 260 } else { 261 bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8)); 262 cond_resched(); 263 } 264 cond_resched(); 265 } 266 } 267 268 static void htab_free_elems(struct bpf_htab *htab) 269 { 270 int i; 271 272 if (!htab_is_percpu(htab)) 273 goto free_elems; 274 275 for (i = 0; i < htab->map.max_entries; i++) { 276 void __percpu *pptr; 277 278 pptr = htab_elem_get_ptr(get_htab_elem(htab, i), 279 htab->map.key_size); 280 free_percpu(pptr); 281 cond_resched(); 282 } 283 free_elems: 284 bpf_map_area_free(htab->elems); 285 } 286 287 /* The LRU list has a lock (lru_lock). Each htab bucket has a lock 288 * (bucket_lock). If both locks need to be acquired together, the lock 289 * order is always lru_lock -> bucket_lock and this only happens in 290 * bpf_lru_list.c logic. For example, certain code path of 291 * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(), 292 * will acquire lru_lock first followed by acquiring bucket_lock. 293 * 294 * In hashtab.c, to avoid deadlock, lock acquisition of 295 * bucket_lock followed by lru_lock is not allowed. In such cases, 296 * bucket_lock needs to be released first before acquiring lru_lock. 297 */ 298 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, 299 u32 hash) 300 { 301 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash); 302 struct htab_elem *l; 303 304 if (node) { 305 l = container_of(node, struct htab_elem, lru_node); 306 memcpy(l->key, key, htab->map.key_size); 307 return l; 308 } 309 310 return NULL; 311 } 312 313 static int prealloc_init(struct bpf_htab *htab) 314 { 315 u32 num_entries = htab->map.max_entries; 316 int err = -ENOMEM, i; 317 318 if (htab_has_extra_elems(htab)) 319 num_entries += num_possible_cpus(); 320 321 htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries, 322 htab->map.numa_node); 323 if (!htab->elems) 324 return -ENOMEM; 325 326 if (!htab_is_percpu(htab)) 327 goto skip_percpu_elems; 328 329 for (i = 0; i < num_entries; i++) { 330 u32 size = round_up(htab->map.value_size, 8); 331 void __percpu *pptr; 332 333 pptr = bpf_map_alloc_percpu(&htab->map, size, 8, 334 GFP_USER | __GFP_NOWARN); 335 if (!pptr) 336 goto free_elems; 337 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, 338 pptr); 339 cond_resched(); 340 } 341 342 skip_percpu_elems: 343 if (htab_is_lru(htab)) 344 err = bpf_lru_init(&htab->lru, 345 htab->map.map_flags & BPF_F_NO_COMMON_LRU, 346 offsetof(struct htab_elem, hash) - 347 offsetof(struct htab_elem, lru_node), 348 htab_lru_map_delete_node, 349 htab); 350 else 351 err = pcpu_freelist_init(&htab->freelist); 352 353 if (err) 354 goto free_elems; 355 356 if (htab_is_lru(htab)) 357 bpf_lru_populate(&htab->lru, htab->elems, 358 offsetof(struct htab_elem, lru_node), 359 htab->elem_size, num_entries); 360 else 361 pcpu_freelist_populate(&htab->freelist, 362 htab->elems + offsetof(struct htab_elem, fnode), 363 htab->elem_size, num_entries); 364 365 return 0; 366 367 free_elems: 368 htab_free_elems(htab); 369 return err; 370 } 371 372 static void prealloc_destroy(struct bpf_htab *htab) 373 { 374 htab_free_elems(htab); 375 376 if (htab_is_lru(htab)) 377 bpf_lru_destroy(&htab->lru); 378 else 379 pcpu_freelist_destroy(&htab->freelist); 380 } 381 382 static int alloc_extra_elems(struct bpf_htab *htab) 383 { 384 struct htab_elem *__percpu *pptr, *l_new; 385 struct pcpu_freelist_node *l; 386 int cpu; 387 388 pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8, 389 GFP_USER | __GFP_NOWARN); 390 if (!pptr) 391 return -ENOMEM; 392 393 for_each_possible_cpu(cpu) { 394 l = pcpu_freelist_pop(&htab->freelist); 395 /* pop will succeed, since prealloc_init() 396 * preallocated extra num_possible_cpus elements 397 */ 398 l_new = container_of(l, struct htab_elem, fnode); 399 *per_cpu_ptr(pptr, cpu) = l_new; 400 } 401 htab->extra_elems = pptr; 402 return 0; 403 } 404 405 /* Called from syscall */ 406 static int htab_map_alloc_check(union bpf_attr *attr) 407 { 408 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || 409 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); 410 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || 411 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); 412 /* percpu_lru means each cpu has its own LRU list. 413 * it is different from BPF_MAP_TYPE_PERCPU_HASH where 414 * the map's value itself is percpu. percpu_lru has 415 * nothing to do with the map's value. 416 */ 417 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); 418 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); 419 bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED); 420 int numa_node = bpf_map_attr_numa_node(attr); 421 422 BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) != 423 offsetof(struct htab_elem, hash_node.pprev)); 424 425 if (lru && !bpf_capable()) 426 /* LRU implementation is much complicated than other 427 * maps. Hence, limit to CAP_BPF. 428 */ 429 return -EPERM; 430 431 if (zero_seed && !capable(CAP_SYS_ADMIN)) 432 /* Guard against local DoS, and discourage production use. */ 433 return -EPERM; 434 435 if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK || 436 !bpf_map_flags_access_ok(attr->map_flags)) 437 return -EINVAL; 438 439 if (!lru && percpu_lru) 440 return -EINVAL; 441 442 if (lru && !prealloc) 443 return -ENOTSUPP; 444 445 if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru)) 446 return -EINVAL; 447 448 /* check sanity of attributes. 449 * value_size == 0 may be allowed in the future to use map as a set 450 */ 451 if (attr->max_entries == 0 || attr->key_size == 0 || 452 attr->value_size == 0) 453 return -EINVAL; 454 455 if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE - 456 sizeof(struct htab_elem)) 457 /* if key_size + value_size is bigger, the user space won't be 458 * able to access the elements via bpf syscall. This check 459 * also makes sure that the elem_size doesn't overflow and it's 460 * kmalloc-able later in htab_map_update_elem() 461 */ 462 return -E2BIG; 463 464 return 0; 465 } 466 467 static struct bpf_map *htab_map_alloc(union bpf_attr *attr) 468 { 469 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || 470 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); 471 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || 472 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); 473 /* percpu_lru means each cpu has its own LRU list. 474 * it is different from BPF_MAP_TYPE_PERCPU_HASH where 475 * the map's value itself is percpu. percpu_lru has 476 * nothing to do with the map's value. 477 */ 478 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); 479 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); 480 struct bpf_htab *htab; 481 int err, i; 482 483 htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE); 484 if (!htab) 485 return ERR_PTR(-ENOMEM); 486 487 lockdep_register_key(&htab->lockdep_key); 488 489 bpf_map_init_from_attr(&htab->map, attr); 490 491 if (percpu_lru) { 492 /* ensure each CPU's lru list has >=1 elements. 493 * since we are at it, make each lru list has the same 494 * number of elements. 495 */ 496 htab->map.max_entries = roundup(attr->max_entries, 497 num_possible_cpus()); 498 if (htab->map.max_entries < attr->max_entries) 499 htab->map.max_entries = rounddown(attr->max_entries, 500 num_possible_cpus()); 501 } 502 503 /* hash table size must be power of 2 */ 504 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); 505 506 htab->elem_size = sizeof(struct htab_elem) + 507 round_up(htab->map.key_size, 8); 508 if (percpu) 509 htab->elem_size += sizeof(void *); 510 else 511 htab->elem_size += round_up(htab->map.value_size, 8); 512 513 err = -E2BIG; 514 /* prevent zero size kmalloc and check for u32 overflow */ 515 if (htab->n_buckets == 0 || 516 htab->n_buckets > U32_MAX / sizeof(struct bucket)) 517 goto free_htab; 518 519 err = -ENOMEM; 520 htab->buckets = bpf_map_area_alloc(htab->n_buckets * 521 sizeof(struct bucket), 522 htab->map.numa_node); 523 if (!htab->buckets) 524 goto free_htab; 525 526 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) { 527 htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map, 528 sizeof(int), 529 sizeof(int), 530 GFP_USER); 531 if (!htab->map_locked[i]) 532 goto free_map_locked; 533 } 534 535 if (htab->map.map_flags & BPF_F_ZERO_SEED) 536 htab->hashrnd = 0; 537 else 538 htab->hashrnd = get_random_u32(); 539 540 htab_init_buckets(htab); 541 542 /* compute_batch_value() computes batch value as num_online_cpus() * 2 543 * and __percpu_counter_compare() needs 544 * htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus() 545 * for percpu_counter to be faster than atomic_t. In practice the average bpf 546 * hash map size is 10k, which means that a system with 64 cpus will fill 547 * hashmap to 20% of 10k before percpu_counter becomes ineffective. Therefore 548 * define our own batch count as 32 then 10k hash map can be filled up to 80%: 549 * 10k - 8k > 32 _batch_ * 64 _cpus_ 550 * and __percpu_counter_compare() will still be fast. At that point hash map 551 * collisions will dominate its performance anyway. Assume that hash map filled 552 * to 50+% isn't going to be O(1) and use the following formula to choose 553 * between percpu_counter and atomic_t. 554 */ 555 #define PERCPU_COUNTER_BATCH 32 556 if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH) 557 htab->use_percpu_counter = true; 558 559 if (htab->use_percpu_counter) { 560 err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL); 561 if (err) 562 goto free_map_locked; 563 } 564 565 if (prealloc) { 566 err = prealloc_init(htab); 567 if (err) 568 goto free_map_locked; 569 570 if (!percpu && !lru) { 571 /* lru itself can remove the least used element, so 572 * there is no need for an extra elem during map_update. 573 */ 574 err = alloc_extra_elems(htab); 575 if (err) 576 goto free_prealloc; 577 } 578 } else { 579 err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false); 580 if (err) 581 goto free_map_locked; 582 if (percpu) { 583 err = bpf_mem_alloc_init(&htab->pcpu_ma, 584 round_up(htab->map.value_size, 8), true); 585 if (err) 586 goto free_map_locked; 587 } 588 } 589 590 return &htab->map; 591 592 free_prealloc: 593 prealloc_destroy(htab); 594 free_map_locked: 595 if (htab->use_percpu_counter) 596 percpu_counter_destroy(&htab->pcount); 597 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) 598 free_percpu(htab->map_locked[i]); 599 bpf_map_area_free(htab->buckets); 600 bpf_mem_alloc_destroy(&htab->pcpu_ma); 601 bpf_mem_alloc_destroy(&htab->ma); 602 free_htab: 603 lockdep_unregister_key(&htab->lockdep_key); 604 bpf_map_area_free(htab); 605 return ERR_PTR(err); 606 } 607 608 static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd) 609 { 610 return jhash(key, key_len, hashrnd); 611 } 612 613 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) 614 { 615 return &htab->buckets[hash & (htab->n_buckets - 1)]; 616 } 617 618 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash) 619 { 620 return &__select_bucket(htab, hash)->head; 621 } 622 623 /* this lookup function can only be called with bucket lock taken */ 624 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash, 625 void *key, u32 key_size) 626 { 627 struct hlist_nulls_node *n; 628 struct htab_elem *l; 629 630 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) 631 if (l->hash == hash && !memcmp(&l->key, key, key_size)) 632 return l; 633 634 return NULL; 635 } 636 637 /* can be called without bucket lock. it will repeat the loop in 638 * the unlikely event when elements moved from one bucket into another 639 * while link list is being walked 640 */ 641 static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head, 642 u32 hash, void *key, 643 u32 key_size, u32 n_buckets) 644 { 645 struct hlist_nulls_node *n; 646 struct htab_elem *l; 647 648 again: 649 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) 650 if (l->hash == hash && !memcmp(&l->key, key, key_size)) 651 return l; 652 653 if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1)))) 654 goto again; 655 656 return NULL; 657 } 658 659 /* Called from syscall or from eBPF program directly, so 660 * arguments have to match bpf_map_lookup_elem() exactly. 661 * The return value is adjusted by BPF instructions 662 * in htab_map_gen_lookup(). 663 */ 664 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) 665 { 666 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 667 struct hlist_nulls_head *head; 668 struct htab_elem *l; 669 u32 hash, key_size; 670 671 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 672 !rcu_read_lock_bh_held()); 673 674 key_size = map->key_size; 675 676 hash = htab_map_hash(key, key_size, htab->hashrnd); 677 678 head = select_bucket(htab, hash); 679 680 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); 681 682 return l; 683 } 684 685 static void *htab_map_lookup_elem(struct bpf_map *map, void *key) 686 { 687 struct htab_elem *l = __htab_map_lookup_elem(map, key); 688 689 if (l) 690 return l->key + round_up(map->key_size, 8); 691 692 return NULL; 693 } 694 695 /* inline bpf_map_lookup_elem() call. 696 * Instead of: 697 * bpf_prog 698 * bpf_map_lookup_elem 699 * map->ops->map_lookup_elem 700 * htab_map_lookup_elem 701 * __htab_map_lookup_elem 702 * do: 703 * bpf_prog 704 * __htab_map_lookup_elem 705 */ 706 static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 707 { 708 struct bpf_insn *insn = insn_buf; 709 const int ret = BPF_REG_0; 710 711 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, 712 (void *(*)(struct bpf_map *map, void *key))NULL)); 713 *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem); 714 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); 715 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, 716 offsetof(struct htab_elem, key) + 717 round_up(map->key_size, 8)); 718 return insn - insn_buf; 719 } 720 721 static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map, 722 void *key, const bool mark) 723 { 724 struct htab_elem *l = __htab_map_lookup_elem(map, key); 725 726 if (l) { 727 if (mark) 728 bpf_lru_node_set_ref(&l->lru_node); 729 return l->key + round_up(map->key_size, 8); 730 } 731 732 return NULL; 733 } 734 735 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) 736 { 737 return __htab_lru_map_lookup_elem(map, key, true); 738 } 739 740 static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key) 741 { 742 return __htab_lru_map_lookup_elem(map, key, false); 743 } 744 745 static int htab_lru_map_gen_lookup(struct bpf_map *map, 746 struct bpf_insn *insn_buf) 747 { 748 struct bpf_insn *insn = insn_buf; 749 const int ret = BPF_REG_0; 750 const int ref_reg = BPF_REG_1; 751 752 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, 753 (void *(*)(struct bpf_map *map, void *key))NULL)); 754 *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem); 755 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4); 756 *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret, 757 offsetof(struct htab_elem, lru_node) + 758 offsetof(struct bpf_lru_node, ref)); 759 *insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1); 760 *insn++ = BPF_ST_MEM(BPF_B, ret, 761 offsetof(struct htab_elem, lru_node) + 762 offsetof(struct bpf_lru_node, ref), 763 1); 764 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, 765 offsetof(struct htab_elem, key) + 766 round_up(map->key_size, 8)); 767 return insn - insn_buf; 768 } 769 770 static void check_and_free_fields(struct bpf_htab *htab, 771 struct htab_elem *elem) 772 { 773 if (htab_is_percpu(htab)) { 774 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size); 775 int cpu; 776 777 for_each_possible_cpu(cpu) 778 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu)); 779 } else { 780 void *map_value = elem->key + round_up(htab->map.key_size, 8); 781 782 bpf_obj_free_fields(htab->map.record, map_value); 783 } 784 } 785 786 /* It is called from the bpf_lru_list when the LRU needs to delete 787 * older elements from the htab. 788 */ 789 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) 790 { 791 struct bpf_htab *htab = arg; 792 struct htab_elem *l = NULL, *tgt_l; 793 struct hlist_nulls_head *head; 794 struct hlist_nulls_node *n; 795 unsigned long flags; 796 struct bucket *b; 797 int ret; 798 799 tgt_l = container_of(node, struct htab_elem, lru_node); 800 b = __select_bucket(htab, tgt_l->hash); 801 head = &b->head; 802 803 ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags); 804 if (ret) 805 return false; 806 807 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) 808 if (l == tgt_l) { 809 hlist_nulls_del_rcu(&l->hash_node); 810 check_and_free_fields(htab, l); 811 break; 812 } 813 814 htab_unlock_bucket(htab, b, tgt_l->hash, flags); 815 816 return l == tgt_l; 817 } 818 819 /* Called from syscall */ 820 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 821 { 822 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 823 struct hlist_nulls_head *head; 824 struct htab_elem *l, *next_l; 825 u32 hash, key_size; 826 int i = 0; 827 828 WARN_ON_ONCE(!rcu_read_lock_held()); 829 830 key_size = map->key_size; 831 832 if (!key) 833 goto find_first_elem; 834 835 hash = htab_map_hash(key, key_size, htab->hashrnd); 836 837 head = select_bucket(htab, hash); 838 839 /* lookup the key */ 840 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); 841 842 if (!l) 843 goto find_first_elem; 844 845 /* key was found, get next key in the same bucket */ 846 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)), 847 struct htab_elem, hash_node); 848 849 if (next_l) { 850 /* if next elem in this hash list is non-zero, just return it */ 851 memcpy(next_key, next_l->key, key_size); 852 return 0; 853 } 854 855 /* no more elements in this hash list, go to the next bucket */ 856 i = hash & (htab->n_buckets - 1); 857 i++; 858 859 find_first_elem: 860 /* iterate over buckets */ 861 for (; i < htab->n_buckets; i++) { 862 head = select_bucket(htab, i); 863 864 /* pick first element in the bucket */ 865 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)), 866 struct htab_elem, hash_node); 867 if (next_l) { 868 /* if it's not empty, just return it */ 869 memcpy(next_key, next_l->key, key_size); 870 return 0; 871 } 872 } 873 874 /* iterated over all buckets and all elements */ 875 return -ENOENT; 876 } 877 878 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) 879 { 880 check_and_free_fields(htab, l); 881 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) 882 bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr); 883 bpf_mem_cache_free(&htab->ma, l); 884 } 885 886 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) 887 { 888 struct bpf_map *map = &htab->map; 889 void *ptr; 890 891 if (map->ops->map_fd_put_ptr) { 892 ptr = fd_htab_map_get_ptr(map, l); 893 map->ops->map_fd_put_ptr(ptr); 894 } 895 } 896 897 static bool is_map_full(struct bpf_htab *htab) 898 { 899 if (htab->use_percpu_counter) 900 return __percpu_counter_compare(&htab->pcount, htab->map.max_entries, 901 PERCPU_COUNTER_BATCH) >= 0; 902 return atomic_read(&htab->count) >= htab->map.max_entries; 903 } 904 905 static void inc_elem_count(struct bpf_htab *htab) 906 { 907 if (htab->use_percpu_counter) 908 percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH); 909 else 910 atomic_inc(&htab->count); 911 } 912 913 static void dec_elem_count(struct bpf_htab *htab) 914 { 915 if (htab->use_percpu_counter) 916 percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH); 917 else 918 atomic_dec(&htab->count); 919 } 920 921 922 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) 923 { 924 htab_put_fd_value(htab, l); 925 926 if (htab_is_prealloc(htab)) { 927 check_and_free_fields(htab, l); 928 __pcpu_freelist_push(&htab->freelist, &l->fnode); 929 } else { 930 dec_elem_count(htab); 931 htab_elem_free(htab, l); 932 } 933 } 934 935 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, 936 void *value, bool onallcpus) 937 { 938 if (!onallcpus) { 939 /* copy true value_size bytes */ 940 copy_map_value(&htab->map, this_cpu_ptr(pptr), value); 941 } else { 942 u32 size = round_up(htab->map.value_size, 8); 943 int off = 0, cpu; 944 945 for_each_possible_cpu(cpu) { 946 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off); 947 off += size; 948 } 949 } 950 } 951 952 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr, 953 void *value, bool onallcpus) 954 { 955 /* When not setting the initial value on all cpus, zero-fill element 956 * values for other cpus. Otherwise, bpf program has no way to ensure 957 * known initial values for cpus other than current one 958 * (onallcpus=false always when coming from bpf prog). 959 */ 960 if (!onallcpus) { 961 int current_cpu = raw_smp_processor_id(); 962 int cpu; 963 964 for_each_possible_cpu(cpu) { 965 if (cpu == current_cpu) 966 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value); 967 else /* Since elem is preallocated, we cannot touch special fields */ 968 zero_map_value(&htab->map, per_cpu_ptr(pptr, cpu)); 969 } 970 } else { 971 pcpu_copy_value(htab, pptr, value, onallcpus); 972 } 973 } 974 975 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab) 976 { 977 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && 978 BITS_PER_LONG == 64; 979 } 980 981 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, 982 void *value, u32 key_size, u32 hash, 983 bool percpu, bool onallcpus, 984 struct htab_elem *old_elem) 985 { 986 u32 size = htab->map.value_size; 987 bool prealloc = htab_is_prealloc(htab); 988 struct htab_elem *l_new, **pl_new; 989 void __percpu *pptr; 990 991 if (prealloc) { 992 if (old_elem) { 993 /* if we're updating the existing element, 994 * use per-cpu extra elems to avoid freelist_pop/push 995 */ 996 pl_new = this_cpu_ptr(htab->extra_elems); 997 l_new = *pl_new; 998 htab_put_fd_value(htab, old_elem); 999 *pl_new = old_elem; 1000 } else { 1001 struct pcpu_freelist_node *l; 1002 1003 l = __pcpu_freelist_pop(&htab->freelist); 1004 if (!l) 1005 return ERR_PTR(-E2BIG); 1006 l_new = container_of(l, struct htab_elem, fnode); 1007 } 1008 } else { 1009 if (is_map_full(htab)) 1010 if (!old_elem) 1011 /* when map is full and update() is replacing 1012 * old element, it's ok to allocate, since 1013 * old element will be freed immediately. 1014 * Otherwise return an error 1015 */ 1016 return ERR_PTR(-E2BIG); 1017 inc_elem_count(htab); 1018 l_new = bpf_mem_cache_alloc(&htab->ma); 1019 if (!l_new) { 1020 l_new = ERR_PTR(-ENOMEM); 1021 goto dec_count; 1022 } 1023 } 1024 1025 memcpy(l_new->key, key, key_size); 1026 if (percpu) { 1027 if (prealloc) { 1028 pptr = htab_elem_get_ptr(l_new, key_size); 1029 } else { 1030 /* alloc_percpu zero-fills */ 1031 pptr = bpf_mem_cache_alloc(&htab->pcpu_ma); 1032 if (!pptr) { 1033 bpf_mem_cache_free(&htab->ma, l_new); 1034 l_new = ERR_PTR(-ENOMEM); 1035 goto dec_count; 1036 } 1037 l_new->ptr_to_pptr = pptr; 1038 pptr = *(void **)pptr; 1039 } 1040 1041 pcpu_init_value(htab, pptr, value, onallcpus); 1042 1043 if (!prealloc) 1044 htab_elem_set_ptr(l_new, key_size, pptr); 1045 } else if (fd_htab_map_needs_adjust(htab)) { 1046 size = round_up(size, 8); 1047 memcpy(l_new->key + round_up(key_size, 8), value, size); 1048 } else { 1049 copy_map_value(&htab->map, 1050 l_new->key + round_up(key_size, 8), 1051 value); 1052 } 1053 1054 l_new->hash = hash; 1055 return l_new; 1056 dec_count: 1057 dec_elem_count(htab); 1058 return l_new; 1059 } 1060 1061 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, 1062 u64 map_flags) 1063 { 1064 if (l_old && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST) 1065 /* elem already exists */ 1066 return -EEXIST; 1067 1068 if (!l_old && (map_flags & ~BPF_F_LOCK) == BPF_EXIST) 1069 /* elem doesn't exist, cannot update it */ 1070 return -ENOENT; 1071 1072 return 0; 1073 } 1074 1075 /* Called from syscall or from eBPF program */ 1076 static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, 1077 u64 map_flags) 1078 { 1079 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1080 struct htab_elem *l_new = NULL, *l_old; 1081 struct hlist_nulls_head *head; 1082 unsigned long flags; 1083 struct bucket *b; 1084 u32 key_size, hash; 1085 int ret; 1086 1087 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST)) 1088 /* unknown flags */ 1089 return -EINVAL; 1090 1091 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 1092 !rcu_read_lock_bh_held()); 1093 1094 key_size = map->key_size; 1095 1096 hash = htab_map_hash(key, key_size, htab->hashrnd); 1097 1098 b = __select_bucket(htab, hash); 1099 head = &b->head; 1100 1101 if (unlikely(map_flags & BPF_F_LOCK)) { 1102 if (unlikely(!btf_record_has_field(map->record, BPF_SPIN_LOCK))) 1103 return -EINVAL; 1104 /* find an element without taking the bucket lock */ 1105 l_old = lookup_nulls_elem_raw(head, hash, key, key_size, 1106 htab->n_buckets); 1107 ret = check_flags(htab, l_old, map_flags); 1108 if (ret) 1109 return ret; 1110 if (l_old) { 1111 /* grab the element lock and update value in place */ 1112 copy_map_value_locked(map, 1113 l_old->key + round_up(key_size, 8), 1114 value, false); 1115 return 0; 1116 } 1117 /* fall through, grab the bucket lock and lookup again. 1118 * 99.9% chance that the element won't be found, 1119 * but second lookup under lock has to be done. 1120 */ 1121 } 1122 1123 ret = htab_lock_bucket(htab, b, hash, &flags); 1124 if (ret) 1125 return ret; 1126 1127 l_old = lookup_elem_raw(head, hash, key, key_size); 1128 1129 ret = check_flags(htab, l_old, map_flags); 1130 if (ret) 1131 goto err; 1132 1133 if (unlikely(l_old && (map_flags & BPF_F_LOCK))) { 1134 /* first lookup without the bucket lock didn't find the element, 1135 * but second lookup with the bucket lock found it. 1136 * This case is highly unlikely, but has to be dealt with: 1137 * grab the element lock in addition to the bucket lock 1138 * and update element in place 1139 */ 1140 copy_map_value_locked(map, 1141 l_old->key + round_up(key_size, 8), 1142 value, false); 1143 ret = 0; 1144 goto err; 1145 } 1146 1147 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, 1148 l_old); 1149 if (IS_ERR(l_new)) { 1150 /* all pre-allocated elements are in use or memory exhausted */ 1151 ret = PTR_ERR(l_new); 1152 goto err; 1153 } 1154 1155 /* add new element to the head of the list, so that 1156 * concurrent search will find it before old elem 1157 */ 1158 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 1159 if (l_old) { 1160 hlist_nulls_del_rcu(&l_old->hash_node); 1161 if (!htab_is_prealloc(htab)) 1162 free_htab_elem(htab, l_old); 1163 else 1164 check_and_free_fields(htab, l_old); 1165 } 1166 ret = 0; 1167 err: 1168 htab_unlock_bucket(htab, b, hash, flags); 1169 return ret; 1170 } 1171 1172 static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem) 1173 { 1174 check_and_free_fields(htab, elem); 1175 bpf_lru_push_free(&htab->lru, &elem->lru_node); 1176 } 1177 1178 static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, 1179 u64 map_flags) 1180 { 1181 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1182 struct htab_elem *l_new, *l_old = NULL; 1183 struct hlist_nulls_head *head; 1184 unsigned long flags; 1185 struct bucket *b; 1186 u32 key_size, hash; 1187 int ret; 1188 1189 if (unlikely(map_flags > BPF_EXIST)) 1190 /* unknown flags */ 1191 return -EINVAL; 1192 1193 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 1194 !rcu_read_lock_bh_held()); 1195 1196 key_size = map->key_size; 1197 1198 hash = htab_map_hash(key, key_size, htab->hashrnd); 1199 1200 b = __select_bucket(htab, hash); 1201 head = &b->head; 1202 1203 /* For LRU, we need to alloc before taking bucket's 1204 * spinlock because getting free nodes from LRU may need 1205 * to remove older elements from htab and this removal 1206 * operation will need a bucket lock. 1207 */ 1208 l_new = prealloc_lru_pop(htab, key, hash); 1209 if (!l_new) 1210 return -ENOMEM; 1211 copy_map_value(&htab->map, 1212 l_new->key + round_up(map->key_size, 8), value); 1213 1214 ret = htab_lock_bucket(htab, b, hash, &flags); 1215 if (ret) 1216 return ret; 1217 1218 l_old = lookup_elem_raw(head, hash, key, key_size); 1219 1220 ret = check_flags(htab, l_old, map_flags); 1221 if (ret) 1222 goto err; 1223 1224 /* add new element to the head of the list, so that 1225 * concurrent search will find it before old elem 1226 */ 1227 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 1228 if (l_old) { 1229 bpf_lru_node_set_ref(&l_new->lru_node); 1230 hlist_nulls_del_rcu(&l_old->hash_node); 1231 } 1232 ret = 0; 1233 1234 err: 1235 htab_unlock_bucket(htab, b, hash, flags); 1236 1237 if (ret) 1238 htab_lru_push_free(htab, l_new); 1239 else if (l_old) 1240 htab_lru_push_free(htab, l_old); 1241 1242 return ret; 1243 } 1244 1245 static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, 1246 void *value, u64 map_flags, 1247 bool onallcpus) 1248 { 1249 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1250 struct htab_elem *l_new = NULL, *l_old; 1251 struct hlist_nulls_head *head; 1252 unsigned long flags; 1253 struct bucket *b; 1254 u32 key_size, hash; 1255 int ret; 1256 1257 if (unlikely(map_flags > BPF_EXIST)) 1258 /* unknown flags */ 1259 return -EINVAL; 1260 1261 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 1262 !rcu_read_lock_bh_held()); 1263 1264 key_size = map->key_size; 1265 1266 hash = htab_map_hash(key, key_size, htab->hashrnd); 1267 1268 b = __select_bucket(htab, hash); 1269 head = &b->head; 1270 1271 ret = htab_lock_bucket(htab, b, hash, &flags); 1272 if (ret) 1273 return ret; 1274 1275 l_old = lookup_elem_raw(head, hash, key, key_size); 1276 1277 ret = check_flags(htab, l_old, map_flags); 1278 if (ret) 1279 goto err; 1280 1281 if (l_old) { 1282 /* per-cpu hash map can update value in-place */ 1283 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), 1284 value, onallcpus); 1285 } else { 1286 l_new = alloc_htab_elem(htab, key, value, key_size, 1287 hash, true, onallcpus, NULL); 1288 if (IS_ERR(l_new)) { 1289 ret = PTR_ERR(l_new); 1290 goto err; 1291 } 1292 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 1293 } 1294 ret = 0; 1295 err: 1296 htab_unlock_bucket(htab, b, hash, flags); 1297 return ret; 1298 } 1299 1300 static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, 1301 void *value, u64 map_flags, 1302 bool onallcpus) 1303 { 1304 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1305 struct htab_elem *l_new = NULL, *l_old; 1306 struct hlist_nulls_head *head; 1307 unsigned long flags; 1308 struct bucket *b; 1309 u32 key_size, hash; 1310 int ret; 1311 1312 if (unlikely(map_flags > BPF_EXIST)) 1313 /* unknown flags */ 1314 return -EINVAL; 1315 1316 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 1317 !rcu_read_lock_bh_held()); 1318 1319 key_size = map->key_size; 1320 1321 hash = htab_map_hash(key, key_size, htab->hashrnd); 1322 1323 b = __select_bucket(htab, hash); 1324 head = &b->head; 1325 1326 /* For LRU, we need to alloc before taking bucket's 1327 * spinlock because LRU's elem alloc may need 1328 * to remove older elem from htab and this removal 1329 * operation will need a bucket lock. 1330 */ 1331 if (map_flags != BPF_EXIST) { 1332 l_new = prealloc_lru_pop(htab, key, hash); 1333 if (!l_new) 1334 return -ENOMEM; 1335 } 1336 1337 ret = htab_lock_bucket(htab, b, hash, &flags); 1338 if (ret) 1339 return ret; 1340 1341 l_old = lookup_elem_raw(head, hash, key, key_size); 1342 1343 ret = check_flags(htab, l_old, map_flags); 1344 if (ret) 1345 goto err; 1346 1347 if (l_old) { 1348 bpf_lru_node_set_ref(&l_old->lru_node); 1349 1350 /* per-cpu hash map can update value in-place */ 1351 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), 1352 value, onallcpus); 1353 } else { 1354 pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size), 1355 value, onallcpus); 1356 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 1357 l_new = NULL; 1358 } 1359 ret = 0; 1360 err: 1361 htab_unlock_bucket(htab, b, hash, flags); 1362 if (l_new) 1363 bpf_lru_push_free(&htab->lru, &l_new->lru_node); 1364 return ret; 1365 } 1366 1367 static int htab_percpu_map_update_elem(struct bpf_map *map, void *key, 1368 void *value, u64 map_flags) 1369 { 1370 return __htab_percpu_map_update_elem(map, key, value, map_flags, false); 1371 } 1372 1373 static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, 1374 void *value, u64 map_flags) 1375 { 1376 return __htab_lru_percpu_map_update_elem(map, key, value, map_flags, 1377 false); 1378 } 1379 1380 /* Called from syscall or from eBPF program */ 1381 static int htab_map_delete_elem(struct bpf_map *map, void *key) 1382 { 1383 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1384 struct hlist_nulls_head *head; 1385 struct bucket *b; 1386 struct htab_elem *l; 1387 unsigned long flags; 1388 u32 hash, key_size; 1389 int ret; 1390 1391 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 1392 !rcu_read_lock_bh_held()); 1393 1394 key_size = map->key_size; 1395 1396 hash = htab_map_hash(key, key_size, htab->hashrnd); 1397 b = __select_bucket(htab, hash); 1398 head = &b->head; 1399 1400 ret = htab_lock_bucket(htab, b, hash, &flags); 1401 if (ret) 1402 return ret; 1403 1404 l = lookup_elem_raw(head, hash, key, key_size); 1405 1406 if (l) { 1407 hlist_nulls_del_rcu(&l->hash_node); 1408 free_htab_elem(htab, l); 1409 } else { 1410 ret = -ENOENT; 1411 } 1412 1413 htab_unlock_bucket(htab, b, hash, flags); 1414 return ret; 1415 } 1416 1417 static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) 1418 { 1419 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1420 struct hlist_nulls_head *head; 1421 struct bucket *b; 1422 struct htab_elem *l; 1423 unsigned long flags; 1424 u32 hash, key_size; 1425 int ret; 1426 1427 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 1428 !rcu_read_lock_bh_held()); 1429 1430 key_size = map->key_size; 1431 1432 hash = htab_map_hash(key, key_size, htab->hashrnd); 1433 b = __select_bucket(htab, hash); 1434 head = &b->head; 1435 1436 ret = htab_lock_bucket(htab, b, hash, &flags); 1437 if (ret) 1438 return ret; 1439 1440 l = lookup_elem_raw(head, hash, key, key_size); 1441 1442 if (l) 1443 hlist_nulls_del_rcu(&l->hash_node); 1444 else 1445 ret = -ENOENT; 1446 1447 htab_unlock_bucket(htab, b, hash, flags); 1448 if (l) 1449 htab_lru_push_free(htab, l); 1450 return ret; 1451 } 1452 1453 static void delete_all_elements(struct bpf_htab *htab) 1454 { 1455 int i; 1456 1457 /* It's called from a worker thread, so disable migration here, 1458 * since bpf_mem_cache_free() relies on that. 1459 */ 1460 migrate_disable(); 1461 for (i = 0; i < htab->n_buckets; i++) { 1462 struct hlist_nulls_head *head = select_bucket(htab, i); 1463 struct hlist_nulls_node *n; 1464 struct htab_elem *l; 1465 1466 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { 1467 hlist_nulls_del_rcu(&l->hash_node); 1468 htab_elem_free(htab, l); 1469 } 1470 } 1471 migrate_enable(); 1472 } 1473 1474 static void htab_free_malloced_timers(struct bpf_htab *htab) 1475 { 1476 int i; 1477 1478 rcu_read_lock(); 1479 for (i = 0; i < htab->n_buckets; i++) { 1480 struct hlist_nulls_head *head = select_bucket(htab, i); 1481 struct hlist_nulls_node *n; 1482 struct htab_elem *l; 1483 1484 hlist_nulls_for_each_entry(l, n, head, hash_node) { 1485 /* We only free timer on uref dropping to zero */ 1486 bpf_obj_free_timer(htab->map.record, l->key + round_up(htab->map.key_size, 8)); 1487 } 1488 cond_resched_rcu(); 1489 } 1490 rcu_read_unlock(); 1491 } 1492 1493 static void htab_map_free_timers(struct bpf_map *map) 1494 { 1495 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1496 1497 /* We only free timer on uref dropping to zero */ 1498 if (!btf_record_has_field(htab->map.record, BPF_TIMER)) 1499 return; 1500 if (!htab_is_prealloc(htab)) 1501 htab_free_malloced_timers(htab); 1502 else 1503 htab_free_prealloced_timers(htab); 1504 } 1505 1506 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ 1507 static void htab_map_free(struct bpf_map *map) 1508 { 1509 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1510 int i; 1511 1512 /* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback. 1513 * bpf_free_used_maps() is called after bpf prog is no longer executing. 1514 * There is no need to synchronize_rcu() here to protect map elements. 1515 */ 1516 1517 /* htab no longer uses call_rcu() directly. bpf_mem_alloc does it 1518 * underneath and is reponsible for waiting for callbacks to finish 1519 * during bpf_mem_alloc_destroy(). 1520 */ 1521 if (!htab_is_prealloc(htab)) { 1522 delete_all_elements(htab); 1523 } else { 1524 htab_free_prealloced_fields(htab); 1525 prealloc_destroy(htab); 1526 } 1527 1528 free_percpu(htab->extra_elems); 1529 bpf_map_area_free(htab->buckets); 1530 bpf_mem_alloc_destroy(&htab->pcpu_ma); 1531 bpf_mem_alloc_destroy(&htab->ma); 1532 if (htab->use_percpu_counter) 1533 percpu_counter_destroy(&htab->pcount); 1534 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) 1535 free_percpu(htab->map_locked[i]); 1536 lockdep_unregister_key(&htab->lockdep_key); 1537 bpf_map_area_free(htab); 1538 } 1539 1540 static void htab_map_seq_show_elem(struct bpf_map *map, void *key, 1541 struct seq_file *m) 1542 { 1543 void *value; 1544 1545 rcu_read_lock(); 1546 1547 value = htab_map_lookup_elem(map, key); 1548 if (!value) { 1549 rcu_read_unlock(); 1550 return; 1551 } 1552 1553 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); 1554 seq_puts(m, ": "); 1555 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); 1556 seq_puts(m, "\n"); 1557 1558 rcu_read_unlock(); 1559 } 1560 1561 static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key, 1562 void *value, bool is_lru_map, 1563 bool is_percpu, u64 flags) 1564 { 1565 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1566 struct hlist_nulls_head *head; 1567 unsigned long bflags; 1568 struct htab_elem *l; 1569 u32 hash, key_size; 1570 struct bucket *b; 1571 int ret; 1572 1573 key_size = map->key_size; 1574 1575 hash = htab_map_hash(key, key_size, htab->hashrnd); 1576 b = __select_bucket(htab, hash); 1577 head = &b->head; 1578 1579 ret = htab_lock_bucket(htab, b, hash, &bflags); 1580 if (ret) 1581 return ret; 1582 1583 l = lookup_elem_raw(head, hash, key, key_size); 1584 if (!l) { 1585 ret = -ENOENT; 1586 } else { 1587 if (is_percpu) { 1588 u32 roundup_value_size = round_up(map->value_size, 8); 1589 void __percpu *pptr; 1590 int off = 0, cpu; 1591 1592 pptr = htab_elem_get_ptr(l, key_size); 1593 for_each_possible_cpu(cpu) { 1594 copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu)); 1595 check_and_init_map_value(&htab->map, value + off); 1596 off += roundup_value_size; 1597 } 1598 } else { 1599 u32 roundup_key_size = round_up(map->key_size, 8); 1600 1601 if (flags & BPF_F_LOCK) 1602 copy_map_value_locked(map, value, l->key + 1603 roundup_key_size, 1604 true); 1605 else 1606 copy_map_value(map, value, l->key + 1607 roundup_key_size); 1608 /* Zeroing special fields in the temp buffer */ 1609 check_and_init_map_value(map, value); 1610 } 1611 1612 hlist_nulls_del_rcu(&l->hash_node); 1613 if (!is_lru_map) 1614 free_htab_elem(htab, l); 1615 } 1616 1617 htab_unlock_bucket(htab, b, hash, bflags); 1618 1619 if (is_lru_map && l) 1620 htab_lru_push_free(htab, l); 1621 1622 return ret; 1623 } 1624 1625 static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key, 1626 void *value, u64 flags) 1627 { 1628 return __htab_map_lookup_and_delete_elem(map, key, value, false, false, 1629 flags); 1630 } 1631 1632 static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map, 1633 void *key, void *value, 1634 u64 flags) 1635 { 1636 return __htab_map_lookup_and_delete_elem(map, key, value, false, true, 1637 flags); 1638 } 1639 1640 static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key, 1641 void *value, u64 flags) 1642 { 1643 return __htab_map_lookup_and_delete_elem(map, key, value, true, false, 1644 flags); 1645 } 1646 1647 static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map, 1648 void *key, void *value, 1649 u64 flags) 1650 { 1651 return __htab_map_lookup_and_delete_elem(map, key, value, true, true, 1652 flags); 1653 } 1654 1655 static int 1656 __htab_map_lookup_and_delete_batch(struct bpf_map *map, 1657 const union bpf_attr *attr, 1658 union bpf_attr __user *uattr, 1659 bool do_delete, bool is_lru_map, 1660 bool is_percpu) 1661 { 1662 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1663 u32 bucket_cnt, total, key_size, value_size, roundup_key_size; 1664 void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val; 1665 void __user *uvalues = u64_to_user_ptr(attr->batch.values); 1666 void __user *ukeys = u64_to_user_ptr(attr->batch.keys); 1667 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); 1668 u32 batch, max_count, size, bucket_size, map_id; 1669 struct htab_elem *node_to_free = NULL; 1670 u64 elem_map_flags, map_flags; 1671 struct hlist_nulls_head *head; 1672 struct hlist_nulls_node *n; 1673 unsigned long flags = 0; 1674 bool locked = false; 1675 struct htab_elem *l; 1676 struct bucket *b; 1677 int ret = 0; 1678 1679 elem_map_flags = attr->batch.elem_flags; 1680 if ((elem_map_flags & ~BPF_F_LOCK) || 1681 ((elem_map_flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK))) 1682 return -EINVAL; 1683 1684 map_flags = attr->batch.flags; 1685 if (map_flags) 1686 return -EINVAL; 1687 1688 max_count = attr->batch.count; 1689 if (!max_count) 1690 return 0; 1691 1692 if (put_user(0, &uattr->batch.count)) 1693 return -EFAULT; 1694 1695 batch = 0; 1696 if (ubatch && copy_from_user(&batch, ubatch, sizeof(batch))) 1697 return -EFAULT; 1698 1699 if (batch >= htab->n_buckets) 1700 return -ENOENT; 1701 1702 key_size = htab->map.key_size; 1703 roundup_key_size = round_up(htab->map.key_size, 8); 1704 value_size = htab->map.value_size; 1705 size = round_up(value_size, 8); 1706 if (is_percpu) 1707 value_size = size * num_possible_cpus(); 1708 total = 0; 1709 /* while experimenting with hash tables with sizes ranging from 10 to 1710 * 1000, it was observed that a bucket can have up to 5 entries. 1711 */ 1712 bucket_size = 5; 1713 1714 alloc: 1715 /* We cannot do copy_from_user or copy_to_user inside 1716 * the rcu_read_lock. Allocate enough space here. 1717 */ 1718 keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN); 1719 values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN); 1720 if (!keys || !values) { 1721 ret = -ENOMEM; 1722 goto after_loop; 1723 } 1724 1725 again: 1726 bpf_disable_instrumentation(); 1727 rcu_read_lock(); 1728 again_nocopy: 1729 dst_key = keys; 1730 dst_val = values; 1731 b = &htab->buckets[batch]; 1732 head = &b->head; 1733 /* do not grab the lock unless need it (bucket_cnt > 0). */ 1734 if (locked) { 1735 ret = htab_lock_bucket(htab, b, batch, &flags); 1736 if (ret) { 1737 rcu_read_unlock(); 1738 bpf_enable_instrumentation(); 1739 goto after_loop; 1740 } 1741 } 1742 1743 bucket_cnt = 0; 1744 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) 1745 bucket_cnt++; 1746 1747 if (bucket_cnt && !locked) { 1748 locked = true; 1749 goto again_nocopy; 1750 } 1751 1752 if (bucket_cnt > (max_count - total)) { 1753 if (total == 0) 1754 ret = -ENOSPC; 1755 /* Note that since bucket_cnt > 0 here, it is implicit 1756 * that the locked was grabbed, so release it. 1757 */ 1758 htab_unlock_bucket(htab, b, batch, flags); 1759 rcu_read_unlock(); 1760 bpf_enable_instrumentation(); 1761 goto after_loop; 1762 } 1763 1764 if (bucket_cnt > bucket_size) { 1765 bucket_size = bucket_cnt; 1766 /* Note that since bucket_cnt > 0 here, it is implicit 1767 * that the locked was grabbed, so release it. 1768 */ 1769 htab_unlock_bucket(htab, b, batch, flags); 1770 rcu_read_unlock(); 1771 bpf_enable_instrumentation(); 1772 kvfree(keys); 1773 kvfree(values); 1774 goto alloc; 1775 } 1776 1777 /* Next block is only safe to run if you have grabbed the lock */ 1778 if (!locked) 1779 goto next_batch; 1780 1781 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { 1782 memcpy(dst_key, l->key, key_size); 1783 1784 if (is_percpu) { 1785 int off = 0, cpu; 1786 void __percpu *pptr; 1787 1788 pptr = htab_elem_get_ptr(l, map->key_size); 1789 for_each_possible_cpu(cpu) { 1790 copy_map_value_long(&htab->map, dst_val + off, per_cpu_ptr(pptr, cpu)); 1791 check_and_init_map_value(&htab->map, dst_val + off); 1792 off += size; 1793 } 1794 } else { 1795 value = l->key + roundup_key_size; 1796 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 1797 struct bpf_map **inner_map = value; 1798 1799 /* Actual value is the id of the inner map */ 1800 map_id = map->ops->map_fd_sys_lookup_elem(*inner_map); 1801 value = &map_id; 1802 } 1803 1804 if (elem_map_flags & BPF_F_LOCK) 1805 copy_map_value_locked(map, dst_val, value, 1806 true); 1807 else 1808 copy_map_value(map, dst_val, value); 1809 /* Zeroing special fields in the temp buffer */ 1810 check_and_init_map_value(map, dst_val); 1811 } 1812 if (do_delete) { 1813 hlist_nulls_del_rcu(&l->hash_node); 1814 1815 /* bpf_lru_push_free() will acquire lru_lock, which 1816 * may cause deadlock. See comments in function 1817 * prealloc_lru_pop(). Let us do bpf_lru_push_free() 1818 * after releasing the bucket lock. 1819 */ 1820 if (is_lru_map) { 1821 l->batch_flink = node_to_free; 1822 node_to_free = l; 1823 } else { 1824 free_htab_elem(htab, l); 1825 } 1826 } 1827 dst_key += key_size; 1828 dst_val += value_size; 1829 } 1830 1831 htab_unlock_bucket(htab, b, batch, flags); 1832 locked = false; 1833 1834 while (node_to_free) { 1835 l = node_to_free; 1836 node_to_free = node_to_free->batch_flink; 1837 htab_lru_push_free(htab, l); 1838 } 1839 1840 next_batch: 1841 /* If we are not copying data, we can go to next bucket and avoid 1842 * unlocking the rcu. 1843 */ 1844 if (!bucket_cnt && (batch + 1 < htab->n_buckets)) { 1845 batch++; 1846 goto again_nocopy; 1847 } 1848 1849 rcu_read_unlock(); 1850 bpf_enable_instrumentation(); 1851 if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys, 1852 key_size * bucket_cnt) || 1853 copy_to_user(uvalues + total * value_size, values, 1854 value_size * bucket_cnt))) { 1855 ret = -EFAULT; 1856 goto after_loop; 1857 } 1858 1859 total += bucket_cnt; 1860 batch++; 1861 if (batch >= htab->n_buckets) { 1862 ret = -ENOENT; 1863 goto after_loop; 1864 } 1865 goto again; 1866 1867 after_loop: 1868 if (ret == -EFAULT) 1869 goto out; 1870 1871 /* copy # of entries and next batch */ 1872 ubatch = u64_to_user_ptr(attr->batch.out_batch); 1873 if (copy_to_user(ubatch, &batch, sizeof(batch)) || 1874 put_user(total, &uattr->batch.count)) 1875 ret = -EFAULT; 1876 1877 out: 1878 kvfree(keys); 1879 kvfree(values); 1880 return ret; 1881 } 1882 1883 static int 1884 htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, 1885 union bpf_attr __user *uattr) 1886 { 1887 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, 1888 false, true); 1889 } 1890 1891 static int 1892 htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map, 1893 const union bpf_attr *attr, 1894 union bpf_attr __user *uattr) 1895 { 1896 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, 1897 false, true); 1898 } 1899 1900 static int 1901 htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, 1902 union bpf_attr __user *uattr) 1903 { 1904 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, 1905 false, false); 1906 } 1907 1908 static int 1909 htab_map_lookup_and_delete_batch(struct bpf_map *map, 1910 const union bpf_attr *attr, 1911 union bpf_attr __user *uattr) 1912 { 1913 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, 1914 false, false); 1915 } 1916 1917 static int 1918 htab_lru_percpu_map_lookup_batch(struct bpf_map *map, 1919 const union bpf_attr *attr, 1920 union bpf_attr __user *uattr) 1921 { 1922 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, 1923 true, true); 1924 } 1925 1926 static int 1927 htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map, 1928 const union bpf_attr *attr, 1929 union bpf_attr __user *uattr) 1930 { 1931 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, 1932 true, true); 1933 } 1934 1935 static int 1936 htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, 1937 union bpf_attr __user *uattr) 1938 { 1939 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, 1940 true, false); 1941 } 1942 1943 static int 1944 htab_lru_map_lookup_and_delete_batch(struct bpf_map *map, 1945 const union bpf_attr *attr, 1946 union bpf_attr __user *uattr) 1947 { 1948 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, 1949 true, false); 1950 } 1951 1952 struct bpf_iter_seq_hash_map_info { 1953 struct bpf_map *map; 1954 struct bpf_htab *htab; 1955 void *percpu_value_buf; // non-zero means percpu hash 1956 u32 bucket_id; 1957 u32 skip_elems; 1958 }; 1959 1960 static struct htab_elem * 1961 bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info, 1962 struct htab_elem *prev_elem) 1963 { 1964 const struct bpf_htab *htab = info->htab; 1965 u32 skip_elems = info->skip_elems; 1966 u32 bucket_id = info->bucket_id; 1967 struct hlist_nulls_head *head; 1968 struct hlist_nulls_node *n; 1969 struct htab_elem *elem; 1970 struct bucket *b; 1971 u32 i, count; 1972 1973 if (bucket_id >= htab->n_buckets) 1974 return NULL; 1975 1976 /* try to find next elem in the same bucket */ 1977 if (prev_elem) { 1978 /* no update/deletion on this bucket, prev_elem should be still valid 1979 * and we won't skip elements. 1980 */ 1981 n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node)); 1982 elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node); 1983 if (elem) 1984 return elem; 1985 1986 /* not found, unlock and go to the next bucket */ 1987 b = &htab->buckets[bucket_id++]; 1988 rcu_read_unlock(); 1989 skip_elems = 0; 1990 } 1991 1992 for (i = bucket_id; i < htab->n_buckets; i++) { 1993 b = &htab->buckets[i]; 1994 rcu_read_lock(); 1995 1996 count = 0; 1997 head = &b->head; 1998 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) { 1999 if (count >= skip_elems) { 2000 info->bucket_id = i; 2001 info->skip_elems = count; 2002 return elem; 2003 } 2004 count++; 2005 } 2006 2007 rcu_read_unlock(); 2008 skip_elems = 0; 2009 } 2010 2011 info->bucket_id = i; 2012 info->skip_elems = 0; 2013 return NULL; 2014 } 2015 2016 static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos) 2017 { 2018 struct bpf_iter_seq_hash_map_info *info = seq->private; 2019 struct htab_elem *elem; 2020 2021 elem = bpf_hash_map_seq_find_next(info, NULL); 2022 if (!elem) 2023 return NULL; 2024 2025 if (*pos == 0) 2026 ++*pos; 2027 return elem; 2028 } 2029 2030 static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2031 { 2032 struct bpf_iter_seq_hash_map_info *info = seq->private; 2033 2034 ++*pos; 2035 ++info->skip_elems; 2036 return bpf_hash_map_seq_find_next(info, v); 2037 } 2038 2039 static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem) 2040 { 2041 struct bpf_iter_seq_hash_map_info *info = seq->private; 2042 u32 roundup_key_size, roundup_value_size; 2043 struct bpf_iter__bpf_map_elem ctx = {}; 2044 struct bpf_map *map = info->map; 2045 struct bpf_iter_meta meta; 2046 int ret = 0, off = 0, cpu; 2047 struct bpf_prog *prog; 2048 void __percpu *pptr; 2049 2050 meta.seq = seq; 2051 prog = bpf_iter_get_info(&meta, elem == NULL); 2052 if (prog) { 2053 ctx.meta = &meta; 2054 ctx.map = info->map; 2055 if (elem) { 2056 roundup_key_size = round_up(map->key_size, 8); 2057 ctx.key = elem->key; 2058 if (!info->percpu_value_buf) { 2059 ctx.value = elem->key + roundup_key_size; 2060 } else { 2061 roundup_value_size = round_up(map->value_size, 8); 2062 pptr = htab_elem_get_ptr(elem, map->key_size); 2063 for_each_possible_cpu(cpu) { 2064 copy_map_value_long(map, info->percpu_value_buf + off, 2065 per_cpu_ptr(pptr, cpu)); 2066 check_and_init_map_value(map, info->percpu_value_buf + off); 2067 off += roundup_value_size; 2068 } 2069 ctx.value = info->percpu_value_buf; 2070 } 2071 } 2072 ret = bpf_iter_run_prog(prog, &ctx); 2073 } 2074 2075 return ret; 2076 } 2077 2078 static int bpf_hash_map_seq_show(struct seq_file *seq, void *v) 2079 { 2080 return __bpf_hash_map_seq_show(seq, v); 2081 } 2082 2083 static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v) 2084 { 2085 if (!v) 2086 (void)__bpf_hash_map_seq_show(seq, NULL); 2087 else 2088 rcu_read_unlock(); 2089 } 2090 2091 static int bpf_iter_init_hash_map(void *priv_data, 2092 struct bpf_iter_aux_info *aux) 2093 { 2094 struct bpf_iter_seq_hash_map_info *seq_info = priv_data; 2095 struct bpf_map *map = aux->map; 2096 void *value_buf; 2097 u32 buf_size; 2098 2099 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 2100 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 2101 buf_size = round_up(map->value_size, 8) * num_possible_cpus(); 2102 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN); 2103 if (!value_buf) 2104 return -ENOMEM; 2105 2106 seq_info->percpu_value_buf = value_buf; 2107 } 2108 2109 bpf_map_inc_with_uref(map); 2110 seq_info->map = map; 2111 seq_info->htab = container_of(map, struct bpf_htab, map); 2112 return 0; 2113 } 2114 2115 static void bpf_iter_fini_hash_map(void *priv_data) 2116 { 2117 struct bpf_iter_seq_hash_map_info *seq_info = priv_data; 2118 2119 bpf_map_put_with_uref(seq_info->map); 2120 kfree(seq_info->percpu_value_buf); 2121 } 2122 2123 static const struct seq_operations bpf_hash_map_seq_ops = { 2124 .start = bpf_hash_map_seq_start, 2125 .next = bpf_hash_map_seq_next, 2126 .stop = bpf_hash_map_seq_stop, 2127 .show = bpf_hash_map_seq_show, 2128 }; 2129 2130 static const struct bpf_iter_seq_info iter_seq_info = { 2131 .seq_ops = &bpf_hash_map_seq_ops, 2132 .init_seq_private = bpf_iter_init_hash_map, 2133 .fini_seq_private = bpf_iter_fini_hash_map, 2134 .seq_priv_size = sizeof(struct bpf_iter_seq_hash_map_info), 2135 }; 2136 2137 static int bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn, 2138 void *callback_ctx, u64 flags) 2139 { 2140 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 2141 struct hlist_nulls_head *head; 2142 struct hlist_nulls_node *n; 2143 struct htab_elem *elem; 2144 u32 roundup_key_size; 2145 int i, num_elems = 0; 2146 void __percpu *pptr; 2147 struct bucket *b; 2148 void *key, *val; 2149 bool is_percpu; 2150 u64 ret = 0; 2151 2152 if (flags != 0) 2153 return -EINVAL; 2154 2155 is_percpu = htab_is_percpu(htab); 2156 2157 roundup_key_size = round_up(map->key_size, 8); 2158 /* disable migration so percpu value prepared here will be the 2159 * same as the one seen by the bpf program with bpf_map_lookup_elem(). 2160 */ 2161 if (is_percpu) 2162 migrate_disable(); 2163 for (i = 0; i < htab->n_buckets; i++) { 2164 b = &htab->buckets[i]; 2165 rcu_read_lock(); 2166 head = &b->head; 2167 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) { 2168 key = elem->key; 2169 if (is_percpu) { 2170 /* current cpu value for percpu map */ 2171 pptr = htab_elem_get_ptr(elem, map->key_size); 2172 val = this_cpu_ptr(pptr); 2173 } else { 2174 val = elem->key + roundup_key_size; 2175 } 2176 num_elems++; 2177 ret = callback_fn((u64)(long)map, (u64)(long)key, 2178 (u64)(long)val, (u64)(long)callback_ctx, 0); 2179 /* return value: 0 - continue, 1 - stop and return */ 2180 if (ret) { 2181 rcu_read_unlock(); 2182 goto out; 2183 } 2184 } 2185 rcu_read_unlock(); 2186 } 2187 out: 2188 if (is_percpu) 2189 migrate_enable(); 2190 return num_elems; 2191 } 2192 2193 static u64 htab_map_mem_usage(const struct bpf_map *map) 2194 { 2195 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 2196 u32 value_size = round_up(htab->map.value_size, 8); 2197 bool prealloc = htab_is_prealloc(htab); 2198 bool percpu = htab_is_percpu(htab); 2199 bool lru = htab_is_lru(htab); 2200 u64 num_entries; 2201 u64 usage = sizeof(struct bpf_htab); 2202 2203 usage += sizeof(struct bucket) * htab->n_buckets; 2204 usage += sizeof(int) * num_possible_cpus() * HASHTAB_MAP_LOCK_COUNT; 2205 if (prealloc) { 2206 num_entries = map->max_entries; 2207 if (htab_has_extra_elems(htab)) 2208 num_entries += num_possible_cpus(); 2209 2210 usage += htab->elem_size * num_entries; 2211 2212 if (percpu) 2213 usage += value_size * num_possible_cpus() * num_entries; 2214 else if (!lru) 2215 usage += sizeof(struct htab_elem *) * num_possible_cpus(); 2216 } else { 2217 #define LLIST_NODE_SZ sizeof(struct llist_node) 2218 2219 num_entries = htab->use_percpu_counter ? 2220 percpu_counter_sum(&htab->pcount) : 2221 atomic_read(&htab->count); 2222 usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries; 2223 if (percpu) { 2224 usage += (LLIST_NODE_SZ + sizeof(void *)) * num_entries; 2225 usage += value_size * num_possible_cpus() * num_entries; 2226 } 2227 } 2228 return usage; 2229 } 2230 2231 BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab) 2232 const struct bpf_map_ops htab_map_ops = { 2233 .map_meta_equal = bpf_map_meta_equal, 2234 .map_alloc_check = htab_map_alloc_check, 2235 .map_alloc = htab_map_alloc, 2236 .map_free = htab_map_free, 2237 .map_get_next_key = htab_map_get_next_key, 2238 .map_release_uref = htab_map_free_timers, 2239 .map_lookup_elem = htab_map_lookup_elem, 2240 .map_lookup_and_delete_elem = htab_map_lookup_and_delete_elem, 2241 .map_update_elem = htab_map_update_elem, 2242 .map_delete_elem = htab_map_delete_elem, 2243 .map_gen_lookup = htab_map_gen_lookup, 2244 .map_seq_show_elem = htab_map_seq_show_elem, 2245 .map_set_for_each_callback_args = map_set_for_each_callback_args, 2246 .map_for_each_callback = bpf_for_each_hash_elem, 2247 .map_mem_usage = htab_map_mem_usage, 2248 BATCH_OPS(htab), 2249 .map_btf_id = &htab_map_btf_ids[0], 2250 .iter_seq_info = &iter_seq_info, 2251 }; 2252 2253 const struct bpf_map_ops htab_lru_map_ops = { 2254 .map_meta_equal = bpf_map_meta_equal, 2255 .map_alloc_check = htab_map_alloc_check, 2256 .map_alloc = htab_map_alloc, 2257 .map_free = htab_map_free, 2258 .map_get_next_key = htab_map_get_next_key, 2259 .map_release_uref = htab_map_free_timers, 2260 .map_lookup_elem = htab_lru_map_lookup_elem, 2261 .map_lookup_and_delete_elem = htab_lru_map_lookup_and_delete_elem, 2262 .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys, 2263 .map_update_elem = htab_lru_map_update_elem, 2264 .map_delete_elem = htab_lru_map_delete_elem, 2265 .map_gen_lookup = htab_lru_map_gen_lookup, 2266 .map_seq_show_elem = htab_map_seq_show_elem, 2267 .map_set_for_each_callback_args = map_set_for_each_callback_args, 2268 .map_for_each_callback = bpf_for_each_hash_elem, 2269 .map_mem_usage = htab_map_mem_usage, 2270 BATCH_OPS(htab_lru), 2271 .map_btf_id = &htab_map_btf_ids[0], 2272 .iter_seq_info = &iter_seq_info, 2273 }; 2274 2275 /* Called from eBPF program */ 2276 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key) 2277 { 2278 struct htab_elem *l = __htab_map_lookup_elem(map, key); 2279 2280 if (l) 2281 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); 2282 else 2283 return NULL; 2284 } 2285 2286 static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) 2287 { 2288 struct htab_elem *l; 2289 2290 if (cpu >= nr_cpu_ids) 2291 return NULL; 2292 2293 l = __htab_map_lookup_elem(map, key); 2294 if (l) 2295 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu); 2296 else 2297 return NULL; 2298 } 2299 2300 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key) 2301 { 2302 struct htab_elem *l = __htab_map_lookup_elem(map, key); 2303 2304 if (l) { 2305 bpf_lru_node_set_ref(&l->lru_node); 2306 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); 2307 } 2308 2309 return NULL; 2310 } 2311 2312 static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) 2313 { 2314 struct htab_elem *l; 2315 2316 if (cpu >= nr_cpu_ids) 2317 return NULL; 2318 2319 l = __htab_map_lookup_elem(map, key); 2320 if (l) { 2321 bpf_lru_node_set_ref(&l->lru_node); 2322 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu); 2323 } 2324 2325 return NULL; 2326 } 2327 2328 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) 2329 { 2330 struct htab_elem *l; 2331 void __percpu *pptr; 2332 int ret = -ENOENT; 2333 int cpu, off = 0; 2334 u32 size; 2335 2336 /* per_cpu areas are zero-filled and bpf programs can only 2337 * access 'value_size' of them, so copying rounded areas 2338 * will not leak any kernel data 2339 */ 2340 size = round_up(map->value_size, 8); 2341 rcu_read_lock(); 2342 l = __htab_map_lookup_elem(map, key); 2343 if (!l) 2344 goto out; 2345 /* We do not mark LRU map element here in order to not mess up 2346 * eviction heuristics when user space does a map walk. 2347 */ 2348 pptr = htab_elem_get_ptr(l, map->key_size); 2349 for_each_possible_cpu(cpu) { 2350 copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu)); 2351 check_and_init_map_value(map, value + off); 2352 off += size; 2353 } 2354 ret = 0; 2355 out: 2356 rcu_read_unlock(); 2357 return ret; 2358 } 2359 2360 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 2361 u64 map_flags) 2362 { 2363 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 2364 int ret; 2365 2366 rcu_read_lock(); 2367 if (htab_is_lru(htab)) 2368 ret = __htab_lru_percpu_map_update_elem(map, key, value, 2369 map_flags, true); 2370 else 2371 ret = __htab_percpu_map_update_elem(map, key, value, map_flags, 2372 true); 2373 rcu_read_unlock(); 2374 2375 return ret; 2376 } 2377 2378 static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key, 2379 struct seq_file *m) 2380 { 2381 struct htab_elem *l; 2382 void __percpu *pptr; 2383 int cpu; 2384 2385 rcu_read_lock(); 2386 2387 l = __htab_map_lookup_elem(map, key); 2388 if (!l) { 2389 rcu_read_unlock(); 2390 return; 2391 } 2392 2393 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); 2394 seq_puts(m, ": {\n"); 2395 pptr = htab_elem_get_ptr(l, map->key_size); 2396 for_each_possible_cpu(cpu) { 2397 seq_printf(m, "\tcpu%d: ", cpu); 2398 btf_type_seq_show(map->btf, map->btf_value_type_id, 2399 per_cpu_ptr(pptr, cpu), m); 2400 seq_puts(m, "\n"); 2401 } 2402 seq_puts(m, "}\n"); 2403 2404 rcu_read_unlock(); 2405 } 2406 2407 const struct bpf_map_ops htab_percpu_map_ops = { 2408 .map_meta_equal = bpf_map_meta_equal, 2409 .map_alloc_check = htab_map_alloc_check, 2410 .map_alloc = htab_map_alloc, 2411 .map_free = htab_map_free, 2412 .map_get_next_key = htab_map_get_next_key, 2413 .map_lookup_elem = htab_percpu_map_lookup_elem, 2414 .map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem, 2415 .map_update_elem = htab_percpu_map_update_elem, 2416 .map_delete_elem = htab_map_delete_elem, 2417 .map_lookup_percpu_elem = htab_percpu_map_lookup_percpu_elem, 2418 .map_seq_show_elem = htab_percpu_map_seq_show_elem, 2419 .map_set_for_each_callback_args = map_set_for_each_callback_args, 2420 .map_for_each_callback = bpf_for_each_hash_elem, 2421 .map_mem_usage = htab_map_mem_usage, 2422 BATCH_OPS(htab_percpu), 2423 .map_btf_id = &htab_map_btf_ids[0], 2424 .iter_seq_info = &iter_seq_info, 2425 }; 2426 2427 const struct bpf_map_ops htab_lru_percpu_map_ops = { 2428 .map_meta_equal = bpf_map_meta_equal, 2429 .map_alloc_check = htab_map_alloc_check, 2430 .map_alloc = htab_map_alloc, 2431 .map_free = htab_map_free, 2432 .map_get_next_key = htab_map_get_next_key, 2433 .map_lookup_elem = htab_lru_percpu_map_lookup_elem, 2434 .map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem, 2435 .map_update_elem = htab_lru_percpu_map_update_elem, 2436 .map_delete_elem = htab_lru_map_delete_elem, 2437 .map_lookup_percpu_elem = htab_lru_percpu_map_lookup_percpu_elem, 2438 .map_seq_show_elem = htab_percpu_map_seq_show_elem, 2439 .map_set_for_each_callback_args = map_set_for_each_callback_args, 2440 .map_for_each_callback = bpf_for_each_hash_elem, 2441 .map_mem_usage = htab_map_mem_usage, 2442 BATCH_OPS(htab_lru_percpu), 2443 .map_btf_id = &htab_map_btf_ids[0], 2444 .iter_seq_info = &iter_seq_info, 2445 }; 2446 2447 static int fd_htab_map_alloc_check(union bpf_attr *attr) 2448 { 2449 if (attr->value_size != sizeof(u32)) 2450 return -EINVAL; 2451 return htab_map_alloc_check(attr); 2452 } 2453 2454 static void fd_htab_map_free(struct bpf_map *map) 2455 { 2456 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 2457 struct hlist_nulls_node *n; 2458 struct hlist_nulls_head *head; 2459 struct htab_elem *l; 2460 int i; 2461 2462 for (i = 0; i < htab->n_buckets; i++) { 2463 head = select_bucket(htab, i); 2464 2465 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { 2466 void *ptr = fd_htab_map_get_ptr(map, l); 2467 2468 map->ops->map_fd_put_ptr(ptr); 2469 } 2470 } 2471 2472 htab_map_free(map); 2473 } 2474 2475 /* only called from syscall */ 2476 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) 2477 { 2478 void **ptr; 2479 int ret = 0; 2480 2481 if (!map->ops->map_fd_sys_lookup_elem) 2482 return -ENOTSUPP; 2483 2484 rcu_read_lock(); 2485 ptr = htab_map_lookup_elem(map, key); 2486 if (ptr) 2487 *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr)); 2488 else 2489 ret = -ENOENT; 2490 rcu_read_unlock(); 2491 2492 return ret; 2493 } 2494 2495 /* only called from syscall */ 2496 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 2497 void *key, void *value, u64 map_flags) 2498 { 2499 void *ptr; 2500 int ret; 2501 u32 ufd = *(u32 *)value; 2502 2503 ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); 2504 if (IS_ERR(ptr)) 2505 return PTR_ERR(ptr); 2506 2507 ret = htab_map_update_elem(map, key, &ptr, map_flags); 2508 if (ret) 2509 map->ops->map_fd_put_ptr(ptr); 2510 2511 return ret; 2512 } 2513 2514 static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr) 2515 { 2516 struct bpf_map *map, *inner_map_meta; 2517 2518 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); 2519 if (IS_ERR(inner_map_meta)) 2520 return inner_map_meta; 2521 2522 map = htab_map_alloc(attr); 2523 if (IS_ERR(map)) { 2524 bpf_map_meta_free(inner_map_meta); 2525 return map; 2526 } 2527 2528 map->inner_map_meta = inner_map_meta; 2529 2530 return map; 2531 } 2532 2533 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key) 2534 { 2535 struct bpf_map **inner_map = htab_map_lookup_elem(map, key); 2536 2537 if (!inner_map) 2538 return NULL; 2539 2540 return READ_ONCE(*inner_map); 2541 } 2542 2543 static int htab_of_map_gen_lookup(struct bpf_map *map, 2544 struct bpf_insn *insn_buf) 2545 { 2546 struct bpf_insn *insn = insn_buf; 2547 const int ret = BPF_REG_0; 2548 2549 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, 2550 (void *(*)(struct bpf_map *map, void *key))NULL)); 2551 *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem); 2552 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2); 2553 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, 2554 offsetof(struct htab_elem, key) + 2555 round_up(map->key_size, 8)); 2556 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); 2557 2558 return insn - insn_buf; 2559 } 2560 2561 static void htab_of_map_free(struct bpf_map *map) 2562 { 2563 bpf_map_meta_free(map->inner_map_meta); 2564 fd_htab_map_free(map); 2565 } 2566 2567 const struct bpf_map_ops htab_of_maps_map_ops = { 2568 .map_alloc_check = fd_htab_map_alloc_check, 2569 .map_alloc = htab_of_map_alloc, 2570 .map_free = htab_of_map_free, 2571 .map_get_next_key = htab_map_get_next_key, 2572 .map_lookup_elem = htab_of_map_lookup_elem, 2573 .map_delete_elem = htab_map_delete_elem, 2574 .map_fd_get_ptr = bpf_map_fd_get_ptr, 2575 .map_fd_put_ptr = bpf_map_fd_put_ptr, 2576 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, 2577 .map_gen_lookup = htab_of_map_gen_lookup, 2578 .map_check_btf = map_check_no_btf, 2579 .map_mem_usage = htab_map_mem_usage, 2580 BATCH_OPS(htab), 2581 .map_btf_id = &htab_map_btf_ids[0], 2582 }; 2583