1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
4 */
5 #include <linux/bpf.h>
6 #include <linux/btf.h>
7 #include <linux/jhash.h>
8 #include <linux/filter.h>
9 #include <linux/rculist_nulls.h>
10 #include <linux/rcupdate_wait.h>
11 #include <linux/random.h>
12 #include <uapi/linux/btf.h>
13 #include <linux/rcupdate_trace.h>
14 #include <linux/btf_ids.h>
15 #include "percpu_freelist.h"
16 #include "bpf_lru_list.h"
17 #include "map_in_map.h"
18 #include <linux/bpf_mem_alloc.h>
19
20 #define HTAB_CREATE_FLAG_MASK \
21 (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \
22 BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED)
23
24 #define BATCH_OPS(_name) \
25 .map_lookup_batch = \
26 _name##_map_lookup_batch, \
27 .map_lookup_and_delete_batch = \
28 _name##_map_lookup_and_delete_batch, \
29 .map_update_batch = \
30 generic_map_update_batch, \
31 .map_delete_batch = \
32 generic_map_delete_batch
33
34 /*
35 * The bucket lock has two protection scopes:
36 *
37 * 1) Serializing concurrent operations from BPF programs on different
38 * CPUs
39 *
40 * 2) Serializing concurrent operations from BPF programs and sys_bpf()
41 *
42 * BPF programs can execute in any context including perf, kprobes and
43 * tracing. As there are almost no limits where perf, kprobes and tracing
44 * can be invoked from the lock operations need to be protected against
45 * deadlocks. Deadlocks can be caused by recursion and by an invocation in
46 * the lock held section when functions which acquire this lock are invoked
47 * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
48 * variable bpf_prog_active, which prevents BPF programs attached to perf
49 * events, kprobes and tracing to be invoked before the prior invocation
50 * from one of these contexts completed. sys_bpf() uses the same mechanism
51 * by pinning the task to the current CPU and incrementing the recursion
52 * protection across the map operation.
53 *
54 * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
55 * operations like memory allocations (even with GFP_ATOMIC) from atomic
56 * contexts. This is required because even with GFP_ATOMIC the memory
57 * allocator calls into code paths which acquire locks with long held lock
58 * sections. To ensure the deterministic behaviour these locks are regular
59 * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
60 * true atomic contexts on an RT kernel are the low level hardware
61 * handling, scheduling, low level interrupt handling, NMIs etc. None of
62 * these contexts should ever do memory allocations.
63 *
64 * As regular device interrupt handlers and soft interrupts are forced into
65 * thread context, the existing code which does
66 * spin_lock*(); alloc(GFP_ATOMIC); spin_unlock*();
67 * just works.
68 *
69 * In theory the BPF locks could be converted to regular spinlocks as well,
70 * but the bucket locks and percpu_freelist locks can be taken from
71 * arbitrary contexts (perf, kprobes, tracepoints) which are required to be
72 * atomic contexts even on RT. Before the introduction of bpf_mem_alloc,
73 * it is only safe to use raw spinlock for preallocated hash map on a RT kernel,
74 * because there is no memory allocation within the lock held sections. However
75 * after hash map was fully converted to use bpf_mem_alloc, there will be
76 * non-synchronous memory allocation for non-preallocated hash map, so it is
77 * safe to always use raw spinlock for bucket lock.
78 */
79 struct bucket {
80 struct hlist_nulls_head head;
81 raw_spinlock_t raw_lock;
82 };
83
84 #define HASHTAB_MAP_LOCK_COUNT 8
85 #define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
86
87 struct bpf_htab {
88 struct bpf_map map;
89 struct bpf_mem_alloc ma;
90 struct bpf_mem_alloc pcpu_ma;
91 struct bucket *buckets;
92 void *elems;
93 union {
94 struct pcpu_freelist freelist;
95 struct bpf_lru lru;
96 };
97 struct htab_elem *__percpu *extra_elems;
98 /* number of elements in non-preallocated hashtable are kept
99 * in either pcount or count
100 */
101 struct percpu_counter pcount;
102 atomic_t count;
103 bool use_percpu_counter;
104 u32 n_buckets; /* number of hash buckets */
105 u32 elem_size; /* size of each element in bytes */
106 u32 hashrnd;
107 struct lock_class_key lockdep_key;
108 int __percpu *map_locked[HASHTAB_MAP_LOCK_COUNT];
109 };
110
111 /* each htab element is struct htab_elem + key + value */
112 struct htab_elem {
113 union {
114 struct hlist_nulls_node hash_node;
115 struct {
116 void *padding;
117 union {
118 struct pcpu_freelist_node fnode;
119 struct htab_elem *batch_flink;
120 };
121 };
122 };
123 union {
124 /* pointer to per-cpu pointer */
125 void *ptr_to_pptr;
126 struct bpf_lru_node lru_node;
127 };
128 u32 hash;
129 char key[] __aligned(8);
130 };
131
htab_is_prealloc(const struct bpf_htab * htab)132 static inline bool htab_is_prealloc(const struct bpf_htab *htab)
133 {
134 return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
135 }
136
htab_init_buckets(struct bpf_htab * htab)137 static void htab_init_buckets(struct bpf_htab *htab)
138 {
139 unsigned int i;
140
141 for (i = 0; i < htab->n_buckets; i++) {
142 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
143 raw_spin_lock_init(&htab->buckets[i].raw_lock);
144 lockdep_set_class(&htab->buckets[i].raw_lock,
145 &htab->lockdep_key);
146 cond_resched();
147 }
148 }
149
htab_lock_bucket(const struct bpf_htab * htab,struct bucket * b,u32 hash,unsigned long * pflags)150 static inline int htab_lock_bucket(const struct bpf_htab *htab,
151 struct bucket *b, u32 hash,
152 unsigned long *pflags)
153 {
154 unsigned long flags;
155
156 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
157
158 preempt_disable();
159 local_irq_save(flags);
160 if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
161 __this_cpu_dec(*(htab->map_locked[hash]));
162 local_irq_restore(flags);
163 preempt_enable();
164 return -EBUSY;
165 }
166
167 raw_spin_lock(&b->raw_lock);
168 *pflags = flags;
169
170 return 0;
171 }
172
htab_unlock_bucket(const struct bpf_htab * htab,struct bucket * b,u32 hash,unsigned long flags)173 static inline void htab_unlock_bucket(const struct bpf_htab *htab,
174 struct bucket *b, u32 hash,
175 unsigned long flags)
176 {
177 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
178 raw_spin_unlock(&b->raw_lock);
179 __this_cpu_dec(*(htab->map_locked[hash]));
180 local_irq_restore(flags);
181 preempt_enable();
182 }
183
184 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
185
htab_is_lru(const struct bpf_htab * htab)186 static bool htab_is_lru(const struct bpf_htab *htab)
187 {
188 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
189 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
190 }
191
htab_is_percpu(const struct bpf_htab * htab)192 static bool htab_is_percpu(const struct bpf_htab *htab)
193 {
194 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
195 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
196 }
197
htab_elem_set_ptr(struct htab_elem * l,u32 key_size,void __percpu * pptr)198 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
199 void __percpu *pptr)
200 {
201 *(void __percpu **)(l->key + key_size) = pptr;
202 }
203
htab_elem_get_ptr(struct htab_elem * l,u32 key_size)204 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
205 {
206 return *(void __percpu **)(l->key + key_size);
207 }
208
fd_htab_map_get_ptr(const struct bpf_map * map,struct htab_elem * l)209 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
210 {
211 return *(void **)(l->key + roundup(map->key_size, 8));
212 }
213
get_htab_elem(struct bpf_htab * htab,int i)214 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
215 {
216 return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size);
217 }
218
htab_has_extra_elems(struct bpf_htab * htab)219 static bool htab_has_extra_elems(struct bpf_htab *htab)
220 {
221 return !htab_is_percpu(htab) && !htab_is_lru(htab);
222 }
223
htab_free_prealloced_timers_and_wq(struct bpf_htab * htab)224 static void htab_free_prealloced_timers_and_wq(struct bpf_htab *htab)
225 {
226 u32 num_entries = htab->map.max_entries;
227 int i;
228
229 if (htab_has_extra_elems(htab))
230 num_entries += num_possible_cpus();
231
232 for (i = 0; i < num_entries; i++) {
233 struct htab_elem *elem;
234
235 elem = get_htab_elem(htab, i);
236 if (btf_record_has_field(htab->map.record, BPF_TIMER))
237 bpf_obj_free_timer(htab->map.record,
238 elem->key + round_up(htab->map.key_size, 8));
239 if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
240 bpf_obj_free_workqueue(htab->map.record,
241 elem->key + round_up(htab->map.key_size, 8));
242 cond_resched();
243 }
244 }
245
htab_free_prealloced_fields(struct bpf_htab * htab)246 static void htab_free_prealloced_fields(struct bpf_htab *htab)
247 {
248 u32 num_entries = htab->map.max_entries;
249 int i;
250
251 if (IS_ERR_OR_NULL(htab->map.record))
252 return;
253 if (htab_has_extra_elems(htab))
254 num_entries += num_possible_cpus();
255 for (i = 0; i < num_entries; i++) {
256 struct htab_elem *elem;
257
258 elem = get_htab_elem(htab, i);
259 if (htab_is_percpu(htab)) {
260 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size);
261 int cpu;
262
263 for_each_possible_cpu(cpu) {
264 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
265 cond_resched();
266 }
267 } else {
268 bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8));
269 cond_resched();
270 }
271 cond_resched();
272 }
273 }
274
htab_free_elems(struct bpf_htab * htab)275 static void htab_free_elems(struct bpf_htab *htab)
276 {
277 int i;
278
279 if (!htab_is_percpu(htab))
280 goto free_elems;
281
282 for (i = 0; i < htab->map.max_entries; i++) {
283 void __percpu *pptr;
284
285 pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
286 htab->map.key_size);
287 free_percpu(pptr);
288 cond_resched();
289 }
290 free_elems:
291 bpf_map_area_free(htab->elems);
292 }
293
294 /* The LRU list has a lock (lru_lock). Each htab bucket has a lock
295 * (bucket_lock). If both locks need to be acquired together, the lock
296 * order is always lru_lock -> bucket_lock and this only happens in
297 * bpf_lru_list.c logic. For example, certain code path of
298 * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(),
299 * will acquire lru_lock first followed by acquiring bucket_lock.
300 *
301 * In hashtab.c, to avoid deadlock, lock acquisition of
302 * bucket_lock followed by lru_lock is not allowed. In such cases,
303 * bucket_lock needs to be released first before acquiring lru_lock.
304 */
prealloc_lru_pop(struct bpf_htab * htab,void * key,u32 hash)305 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
306 u32 hash)
307 {
308 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
309 struct htab_elem *l;
310
311 if (node) {
312 bpf_map_inc_elem_count(&htab->map);
313 l = container_of(node, struct htab_elem, lru_node);
314 memcpy(l->key, key, htab->map.key_size);
315 return l;
316 }
317
318 return NULL;
319 }
320
prealloc_init(struct bpf_htab * htab)321 static int prealloc_init(struct bpf_htab *htab)
322 {
323 u32 num_entries = htab->map.max_entries;
324 int err = -ENOMEM, i;
325
326 if (htab_has_extra_elems(htab))
327 num_entries += num_possible_cpus();
328
329 htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries,
330 htab->map.numa_node);
331 if (!htab->elems)
332 return -ENOMEM;
333
334 if (!htab_is_percpu(htab))
335 goto skip_percpu_elems;
336
337 for (i = 0; i < num_entries; i++) {
338 u32 size = round_up(htab->map.value_size, 8);
339 void __percpu *pptr;
340
341 pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
342 GFP_USER | __GFP_NOWARN);
343 if (!pptr)
344 goto free_elems;
345 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
346 pptr);
347 cond_resched();
348 }
349
350 skip_percpu_elems:
351 if (htab_is_lru(htab))
352 err = bpf_lru_init(&htab->lru,
353 htab->map.map_flags & BPF_F_NO_COMMON_LRU,
354 offsetof(struct htab_elem, hash) -
355 offsetof(struct htab_elem, lru_node),
356 htab_lru_map_delete_node,
357 htab);
358 else
359 err = pcpu_freelist_init(&htab->freelist);
360
361 if (err)
362 goto free_elems;
363
364 if (htab_is_lru(htab))
365 bpf_lru_populate(&htab->lru, htab->elems,
366 offsetof(struct htab_elem, lru_node),
367 htab->elem_size, num_entries);
368 else
369 pcpu_freelist_populate(&htab->freelist,
370 htab->elems + offsetof(struct htab_elem, fnode),
371 htab->elem_size, num_entries);
372
373 return 0;
374
375 free_elems:
376 htab_free_elems(htab);
377 return err;
378 }
379
prealloc_destroy(struct bpf_htab * htab)380 static void prealloc_destroy(struct bpf_htab *htab)
381 {
382 htab_free_elems(htab);
383
384 if (htab_is_lru(htab))
385 bpf_lru_destroy(&htab->lru);
386 else
387 pcpu_freelist_destroy(&htab->freelist);
388 }
389
alloc_extra_elems(struct bpf_htab * htab)390 static int alloc_extra_elems(struct bpf_htab *htab)
391 {
392 struct htab_elem *__percpu *pptr, *l_new;
393 struct pcpu_freelist_node *l;
394 int cpu;
395
396 pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8,
397 GFP_USER | __GFP_NOWARN);
398 if (!pptr)
399 return -ENOMEM;
400
401 for_each_possible_cpu(cpu) {
402 l = pcpu_freelist_pop(&htab->freelist);
403 /* pop will succeed, since prealloc_init()
404 * preallocated extra num_possible_cpus elements
405 */
406 l_new = container_of(l, struct htab_elem, fnode);
407 *per_cpu_ptr(pptr, cpu) = l_new;
408 }
409 htab->extra_elems = pptr;
410 return 0;
411 }
412
413 /* Called from syscall */
htab_map_alloc_check(union bpf_attr * attr)414 static int htab_map_alloc_check(union bpf_attr *attr)
415 {
416 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
417 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
418 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
419 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
420 /* percpu_lru means each cpu has its own LRU list.
421 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
422 * the map's value itself is percpu. percpu_lru has
423 * nothing to do with the map's value.
424 */
425 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
426 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
427 bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED);
428 int numa_node = bpf_map_attr_numa_node(attr);
429
430 BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
431 offsetof(struct htab_elem, hash_node.pprev));
432
433 if (zero_seed && !capable(CAP_SYS_ADMIN))
434 /* Guard against local DoS, and discourage production use. */
435 return -EPERM;
436
437 if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK ||
438 !bpf_map_flags_access_ok(attr->map_flags))
439 return -EINVAL;
440
441 if (!lru && percpu_lru)
442 return -EINVAL;
443
444 if (lru && !prealloc)
445 return -ENOTSUPP;
446
447 if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
448 return -EINVAL;
449
450 /* check sanity of attributes.
451 * value_size == 0 may be allowed in the future to use map as a set
452 */
453 if (attr->max_entries == 0 || attr->key_size == 0 ||
454 attr->value_size == 0)
455 return -EINVAL;
456
457 if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE -
458 sizeof(struct htab_elem))
459 /* if key_size + value_size is bigger, the user space won't be
460 * able to access the elements via bpf syscall. This check
461 * also makes sure that the elem_size doesn't overflow and it's
462 * kmalloc-able later in htab_map_update_elem()
463 */
464 return -E2BIG;
465 /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */
466 if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE)
467 return -E2BIG;
468
469 return 0;
470 }
471
htab_map_alloc(union bpf_attr * attr)472 static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
473 {
474 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
475 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
476 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
477 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
478 /* percpu_lru means each cpu has its own LRU list.
479 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
480 * the map's value itself is percpu. percpu_lru has
481 * nothing to do with the map's value.
482 */
483 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
484 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
485 struct bpf_htab *htab;
486 int err, i;
487
488 htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
489 if (!htab)
490 return ERR_PTR(-ENOMEM);
491
492 lockdep_register_key(&htab->lockdep_key);
493
494 bpf_map_init_from_attr(&htab->map, attr);
495
496 if (percpu_lru) {
497 /* ensure each CPU's lru list has >=1 elements.
498 * since we are at it, make each lru list has the same
499 * number of elements.
500 */
501 htab->map.max_entries = roundup(attr->max_entries,
502 num_possible_cpus());
503 if (htab->map.max_entries < attr->max_entries)
504 htab->map.max_entries = rounddown(attr->max_entries,
505 num_possible_cpus());
506 }
507
508 /* hash table size must be power of 2; roundup_pow_of_two() can overflow
509 * into UB on 32-bit arches, so check that first
510 */
511 err = -E2BIG;
512 if (htab->map.max_entries > 1UL << 31)
513 goto free_htab;
514
515 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
516
517 htab->elem_size = sizeof(struct htab_elem) +
518 round_up(htab->map.key_size, 8);
519 if (percpu)
520 htab->elem_size += sizeof(void *);
521 else
522 htab->elem_size += round_up(htab->map.value_size, 8);
523
524 /* check for u32 overflow */
525 if (htab->n_buckets > U32_MAX / sizeof(struct bucket))
526 goto free_htab;
527
528 err = bpf_map_init_elem_count(&htab->map);
529 if (err)
530 goto free_htab;
531
532 err = -ENOMEM;
533 htab->buckets = bpf_map_area_alloc(htab->n_buckets *
534 sizeof(struct bucket),
535 htab->map.numa_node);
536 if (!htab->buckets)
537 goto free_elem_count;
538
539 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) {
540 htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map,
541 sizeof(int),
542 sizeof(int),
543 GFP_USER);
544 if (!htab->map_locked[i])
545 goto free_map_locked;
546 }
547
548 if (htab->map.map_flags & BPF_F_ZERO_SEED)
549 htab->hashrnd = 0;
550 else
551 htab->hashrnd = get_random_u32();
552
553 htab_init_buckets(htab);
554
555 /* compute_batch_value() computes batch value as num_online_cpus() * 2
556 * and __percpu_counter_compare() needs
557 * htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus()
558 * for percpu_counter to be faster than atomic_t. In practice the average bpf
559 * hash map size is 10k, which means that a system with 64 cpus will fill
560 * hashmap to 20% of 10k before percpu_counter becomes ineffective. Therefore
561 * define our own batch count as 32 then 10k hash map can be filled up to 80%:
562 * 10k - 8k > 32 _batch_ * 64 _cpus_
563 * and __percpu_counter_compare() will still be fast. At that point hash map
564 * collisions will dominate its performance anyway. Assume that hash map filled
565 * to 50+% isn't going to be O(1) and use the following formula to choose
566 * between percpu_counter and atomic_t.
567 */
568 #define PERCPU_COUNTER_BATCH 32
569 if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH)
570 htab->use_percpu_counter = true;
571
572 if (htab->use_percpu_counter) {
573 err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL);
574 if (err)
575 goto free_map_locked;
576 }
577
578 if (prealloc) {
579 err = prealloc_init(htab);
580 if (err)
581 goto free_map_locked;
582
583 if (!percpu && !lru) {
584 /* lru itself can remove the least used element, so
585 * there is no need for an extra elem during map_update.
586 */
587 err = alloc_extra_elems(htab);
588 if (err)
589 goto free_prealloc;
590 }
591 } else {
592 err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false);
593 if (err)
594 goto free_map_locked;
595 if (percpu) {
596 err = bpf_mem_alloc_init(&htab->pcpu_ma,
597 round_up(htab->map.value_size, 8), true);
598 if (err)
599 goto free_map_locked;
600 }
601 }
602
603 return &htab->map;
604
605 free_prealloc:
606 prealloc_destroy(htab);
607 free_map_locked:
608 if (htab->use_percpu_counter)
609 percpu_counter_destroy(&htab->pcount);
610 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
611 free_percpu(htab->map_locked[i]);
612 bpf_map_area_free(htab->buckets);
613 bpf_mem_alloc_destroy(&htab->pcpu_ma);
614 bpf_mem_alloc_destroy(&htab->ma);
615 free_elem_count:
616 bpf_map_free_elem_count(&htab->map);
617 free_htab:
618 lockdep_unregister_key(&htab->lockdep_key);
619 bpf_map_area_free(htab);
620 return ERR_PTR(err);
621 }
622
htab_map_hash(const void * key,u32 key_len,u32 hashrnd)623 static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
624 {
625 if (likely(key_len % 4 == 0))
626 return jhash2(key, key_len / 4, hashrnd);
627 return jhash(key, key_len, hashrnd);
628 }
629
__select_bucket(struct bpf_htab * htab,u32 hash)630 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
631 {
632 return &htab->buckets[hash & (htab->n_buckets - 1)];
633 }
634
select_bucket(struct bpf_htab * htab,u32 hash)635 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
636 {
637 return &__select_bucket(htab, hash)->head;
638 }
639
640 /* this lookup function can only be called with bucket lock taken */
lookup_elem_raw(struct hlist_nulls_head * head,u32 hash,void * key,u32 key_size)641 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
642 void *key, u32 key_size)
643 {
644 struct hlist_nulls_node *n;
645 struct htab_elem *l;
646
647 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
648 if (l->hash == hash && !memcmp(&l->key, key, key_size))
649 return l;
650
651 return NULL;
652 }
653
654 /* can be called without bucket lock. it will repeat the loop in
655 * the unlikely event when elements moved from one bucket into another
656 * while link list is being walked
657 */
lookup_nulls_elem_raw(struct hlist_nulls_head * head,u32 hash,void * key,u32 key_size,u32 n_buckets)658 static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
659 u32 hash, void *key,
660 u32 key_size, u32 n_buckets)
661 {
662 struct hlist_nulls_node *n;
663 struct htab_elem *l;
664
665 again:
666 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
667 if (l->hash == hash && !memcmp(&l->key, key, key_size))
668 return l;
669
670 if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
671 goto again;
672
673 return NULL;
674 }
675
676 /* Called from syscall or from eBPF program directly, so
677 * arguments have to match bpf_map_lookup_elem() exactly.
678 * The return value is adjusted by BPF instructions
679 * in htab_map_gen_lookup().
680 */
__htab_map_lookup_elem(struct bpf_map * map,void * key)681 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
682 {
683 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
684 struct hlist_nulls_head *head;
685 struct htab_elem *l;
686 u32 hash, key_size;
687
688 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
689 !rcu_read_lock_bh_held());
690
691 key_size = map->key_size;
692
693 hash = htab_map_hash(key, key_size, htab->hashrnd);
694
695 head = select_bucket(htab, hash);
696
697 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
698
699 return l;
700 }
701
htab_map_lookup_elem(struct bpf_map * map,void * key)702 static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
703 {
704 struct htab_elem *l = __htab_map_lookup_elem(map, key);
705
706 if (l)
707 return l->key + round_up(map->key_size, 8);
708
709 return NULL;
710 }
711
712 /* inline bpf_map_lookup_elem() call.
713 * Instead of:
714 * bpf_prog
715 * bpf_map_lookup_elem
716 * map->ops->map_lookup_elem
717 * htab_map_lookup_elem
718 * __htab_map_lookup_elem
719 * do:
720 * bpf_prog
721 * __htab_map_lookup_elem
722 */
htab_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)723 static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
724 {
725 struct bpf_insn *insn = insn_buf;
726 const int ret = BPF_REG_0;
727
728 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
729 (void *(*)(struct bpf_map *map, void *key))NULL));
730 *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
731 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
732 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
733 offsetof(struct htab_elem, key) +
734 round_up(map->key_size, 8));
735 return insn - insn_buf;
736 }
737
__htab_lru_map_lookup_elem(struct bpf_map * map,void * key,const bool mark)738 static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
739 void *key, const bool mark)
740 {
741 struct htab_elem *l = __htab_map_lookup_elem(map, key);
742
743 if (l) {
744 if (mark)
745 bpf_lru_node_set_ref(&l->lru_node);
746 return l->key + round_up(map->key_size, 8);
747 }
748
749 return NULL;
750 }
751
htab_lru_map_lookup_elem(struct bpf_map * map,void * key)752 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
753 {
754 return __htab_lru_map_lookup_elem(map, key, true);
755 }
756
htab_lru_map_lookup_elem_sys(struct bpf_map * map,void * key)757 static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
758 {
759 return __htab_lru_map_lookup_elem(map, key, false);
760 }
761
htab_lru_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)762 static int htab_lru_map_gen_lookup(struct bpf_map *map,
763 struct bpf_insn *insn_buf)
764 {
765 struct bpf_insn *insn = insn_buf;
766 const int ret = BPF_REG_0;
767 const int ref_reg = BPF_REG_1;
768
769 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
770 (void *(*)(struct bpf_map *map, void *key))NULL));
771 *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
772 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
773 *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
774 offsetof(struct htab_elem, lru_node) +
775 offsetof(struct bpf_lru_node, ref));
776 *insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1);
777 *insn++ = BPF_ST_MEM(BPF_B, ret,
778 offsetof(struct htab_elem, lru_node) +
779 offsetof(struct bpf_lru_node, ref),
780 1);
781 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
782 offsetof(struct htab_elem, key) +
783 round_up(map->key_size, 8));
784 return insn - insn_buf;
785 }
786
check_and_free_fields(struct bpf_htab * htab,struct htab_elem * elem)787 static void check_and_free_fields(struct bpf_htab *htab,
788 struct htab_elem *elem)
789 {
790 if (htab_is_percpu(htab)) {
791 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size);
792 int cpu;
793
794 for_each_possible_cpu(cpu)
795 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
796 } else {
797 void *map_value = elem->key + round_up(htab->map.key_size, 8);
798
799 bpf_obj_free_fields(htab->map.record, map_value);
800 }
801 }
802
803 /* It is called from the bpf_lru_list when the LRU needs to delete
804 * older elements from the htab.
805 */
htab_lru_map_delete_node(void * arg,struct bpf_lru_node * node)806 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
807 {
808 struct bpf_htab *htab = arg;
809 struct htab_elem *l = NULL, *tgt_l;
810 struct hlist_nulls_head *head;
811 struct hlist_nulls_node *n;
812 unsigned long flags;
813 struct bucket *b;
814 int ret;
815
816 tgt_l = container_of(node, struct htab_elem, lru_node);
817 b = __select_bucket(htab, tgt_l->hash);
818 head = &b->head;
819
820 ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags);
821 if (ret)
822 return false;
823
824 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
825 if (l == tgt_l) {
826 hlist_nulls_del_rcu(&l->hash_node);
827 bpf_map_dec_elem_count(&htab->map);
828 break;
829 }
830
831 htab_unlock_bucket(htab, b, tgt_l->hash, flags);
832
833 if (l == tgt_l)
834 check_and_free_fields(htab, l);
835 return l == tgt_l;
836 }
837
838 /* Called from syscall */
htab_map_get_next_key(struct bpf_map * map,void * key,void * next_key)839 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
840 {
841 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
842 struct hlist_nulls_head *head;
843 struct htab_elem *l, *next_l;
844 u32 hash, key_size;
845 int i = 0;
846
847 WARN_ON_ONCE(!rcu_read_lock_held());
848
849 key_size = map->key_size;
850
851 if (!key)
852 goto find_first_elem;
853
854 hash = htab_map_hash(key, key_size, htab->hashrnd);
855
856 head = select_bucket(htab, hash);
857
858 /* lookup the key */
859 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
860
861 if (!l)
862 goto find_first_elem;
863
864 /* key was found, get next key in the same bucket */
865 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
866 struct htab_elem, hash_node);
867
868 if (next_l) {
869 /* if next elem in this hash list is non-zero, just return it */
870 memcpy(next_key, next_l->key, key_size);
871 return 0;
872 }
873
874 /* no more elements in this hash list, go to the next bucket */
875 i = hash & (htab->n_buckets - 1);
876 i++;
877
878 find_first_elem:
879 /* iterate over buckets */
880 for (; i < htab->n_buckets; i++) {
881 head = select_bucket(htab, i);
882
883 /* pick first element in the bucket */
884 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
885 struct htab_elem, hash_node);
886 if (next_l) {
887 /* if it's not empty, just return it */
888 memcpy(next_key, next_l->key, key_size);
889 return 0;
890 }
891 }
892
893 /* iterated over all buckets and all elements */
894 return -ENOENT;
895 }
896
htab_elem_free(struct bpf_htab * htab,struct htab_elem * l)897 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
898 {
899 check_and_free_fields(htab, l);
900
901 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
902 bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr);
903 bpf_mem_cache_free(&htab->ma, l);
904 }
905
htab_put_fd_value(struct bpf_htab * htab,struct htab_elem * l)906 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
907 {
908 struct bpf_map *map = &htab->map;
909 void *ptr;
910
911 if (map->ops->map_fd_put_ptr) {
912 ptr = fd_htab_map_get_ptr(map, l);
913 map->ops->map_fd_put_ptr(map, ptr, true);
914 }
915 }
916
is_map_full(struct bpf_htab * htab)917 static bool is_map_full(struct bpf_htab *htab)
918 {
919 if (htab->use_percpu_counter)
920 return __percpu_counter_compare(&htab->pcount, htab->map.max_entries,
921 PERCPU_COUNTER_BATCH) >= 0;
922 return atomic_read(&htab->count) >= htab->map.max_entries;
923 }
924
inc_elem_count(struct bpf_htab * htab)925 static void inc_elem_count(struct bpf_htab *htab)
926 {
927 bpf_map_inc_elem_count(&htab->map);
928
929 if (htab->use_percpu_counter)
930 percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH);
931 else
932 atomic_inc(&htab->count);
933 }
934
dec_elem_count(struct bpf_htab * htab)935 static void dec_elem_count(struct bpf_htab *htab)
936 {
937 bpf_map_dec_elem_count(&htab->map);
938
939 if (htab->use_percpu_counter)
940 percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH);
941 else
942 atomic_dec(&htab->count);
943 }
944
945
free_htab_elem(struct bpf_htab * htab,struct htab_elem * l)946 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
947 {
948 htab_put_fd_value(htab, l);
949
950 if (htab_is_prealloc(htab)) {
951 bpf_map_dec_elem_count(&htab->map);
952 check_and_free_fields(htab, l);
953 pcpu_freelist_push(&htab->freelist, &l->fnode);
954 } else {
955 dec_elem_count(htab);
956 htab_elem_free(htab, l);
957 }
958 }
959
pcpu_copy_value(struct bpf_htab * htab,void __percpu * pptr,void * value,bool onallcpus)960 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
961 void *value, bool onallcpus)
962 {
963 if (!onallcpus) {
964 /* copy true value_size bytes */
965 copy_map_value(&htab->map, this_cpu_ptr(pptr), value);
966 } else {
967 u32 size = round_up(htab->map.value_size, 8);
968 int off = 0, cpu;
969
970 for_each_possible_cpu(cpu) {
971 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off);
972 off += size;
973 }
974 }
975 }
976
pcpu_init_value(struct bpf_htab * htab,void __percpu * pptr,void * value,bool onallcpus)977 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
978 void *value, bool onallcpus)
979 {
980 /* When not setting the initial value on all cpus, zero-fill element
981 * values for other cpus. Otherwise, bpf program has no way to ensure
982 * known initial values for cpus other than current one
983 * (onallcpus=false always when coming from bpf prog).
984 */
985 if (!onallcpus) {
986 int current_cpu = raw_smp_processor_id();
987 int cpu;
988
989 for_each_possible_cpu(cpu) {
990 if (cpu == current_cpu)
991 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value);
992 else /* Since elem is preallocated, we cannot touch special fields */
993 zero_map_value(&htab->map, per_cpu_ptr(pptr, cpu));
994 }
995 } else {
996 pcpu_copy_value(htab, pptr, value, onallcpus);
997 }
998 }
999
fd_htab_map_needs_adjust(const struct bpf_htab * htab)1000 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
1001 {
1002 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
1003 BITS_PER_LONG == 64;
1004 }
1005
alloc_htab_elem(struct bpf_htab * htab,void * key,void * value,u32 key_size,u32 hash,bool percpu,bool onallcpus,struct htab_elem * old_elem)1006 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
1007 void *value, u32 key_size, u32 hash,
1008 bool percpu, bool onallcpus,
1009 struct htab_elem *old_elem)
1010 {
1011 u32 size = htab->map.value_size;
1012 bool prealloc = htab_is_prealloc(htab);
1013 struct htab_elem *l_new, **pl_new;
1014 void __percpu *pptr;
1015
1016 if (prealloc) {
1017 if (old_elem) {
1018 /* if we're updating the existing element,
1019 * use per-cpu extra elems to avoid freelist_pop/push
1020 */
1021 pl_new = this_cpu_ptr(htab->extra_elems);
1022 l_new = *pl_new;
1023 *pl_new = old_elem;
1024 } else {
1025 struct pcpu_freelist_node *l;
1026
1027 l = __pcpu_freelist_pop(&htab->freelist);
1028 if (!l)
1029 return ERR_PTR(-E2BIG);
1030 l_new = container_of(l, struct htab_elem, fnode);
1031 bpf_map_inc_elem_count(&htab->map);
1032 }
1033 } else {
1034 if (is_map_full(htab))
1035 if (!old_elem)
1036 /* when map is full and update() is replacing
1037 * old element, it's ok to allocate, since
1038 * old element will be freed immediately.
1039 * Otherwise return an error
1040 */
1041 return ERR_PTR(-E2BIG);
1042 inc_elem_count(htab);
1043 l_new = bpf_mem_cache_alloc(&htab->ma);
1044 if (!l_new) {
1045 l_new = ERR_PTR(-ENOMEM);
1046 goto dec_count;
1047 }
1048 }
1049
1050 memcpy(l_new->key, key, key_size);
1051 if (percpu) {
1052 if (prealloc) {
1053 pptr = htab_elem_get_ptr(l_new, key_size);
1054 } else {
1055 /* alloc_percpu zero-fills */
1056 void *ptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
1057
1058 if (!ptr) {
1059 bpf_mem_cache_free(&htab->ma, l_new);
1060 l_new = ERR_PTR(-ENOMEM);
1061 goto dec_count;
1062 }
1063 l_new->ptr_to_pptr = ptr;
1064 pptr = *(void __percpu **)ptr;
1065 }
1066
1067 pcpu_init_value(htab, pptr, value, onallcpus);
1068
1069 if (!prealloc)
1070 htab_elem_set_ptr(l_new, key_size, pptr);
1071 } else if (fd_htab_map_needs_adjust(htab)) {
1072 size = round_up(size, 8);
1073 memcpy(l_new->key + round_up(key_size, 8), value, size);
1074 } else {
1075 copy_map_value(&htab->map,
1076 l_new->key + round_up(key_size, 8),
1077 value);
1078 }
1079
1080 l_new->hash = hash;
1081 return l_new;
1082 dec_count:
1083 dec_elem_count(htab);
1084 return l_new;
1085 }
1086
check_flags(struct bpf_htab * htab,struct htab_elem * l_old,u64 map_flags)1087 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
1088 u64 map_flags)
1089 {
1090 if (l_old && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
1091 /* elem already exists */
1092 return -EEXIST;
1093
1094 if (!l_old && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
1095 /* elem doesn't exist, cannot update it */
1096 return -ENOENT;
1097
1098 return 0;
1099 }
1100
1101 /* Called from syscall or from eBPF program */
htab_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)1102 static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
1103 u64 map_flags)
1104 {
1105 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1106 struct htab_elem *l_new = NULL, *l_old;
1107 struct hlist_nulls_head *head;
1108 unsigned long flags;
1109 void *old_map_ptr;
1110 struct bucket *b;
1111 u32 key_size, hash;
1112 int ret;
1113
1114 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
1115 /* unknown flags */
1116 return -EINVAL;
1117
1118 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1119 !rcu_read_lock_bh_held());
1120
1121 key_size = map->key_size;
1122
1123 hash = htab_map_hash(key, key_size, htab->hashrnd);
1124
1125 b = __select_bucket(htab, hash);
1126 head = &b->head;
1127
1128 if (unlikely(map_flags & BPF_F_LOCK)) {
1129 if (unlikely(!btf_record_has_field(map->record, BPF_SPIN_LOCK)))
1130 return -EINVAL;
1131 /* find an element without taking the bucket lock */
1132 l_old = lookup_nulls_elem_raw(head, hash, key, key_size,
1133 htab->n_buckets);
1134 ret = check_flags(htab, l_old, map_flags);
1135 if (ret)
1136 return ret;
1137 if (l_old) {
1138 /* grab the element lock and update value in place */
1139 copy_map_value_locked(map,
1140 l_old->key + round_up(key_size, 8),
1141 value, false);
1142 return 0;
1143 }
1144 /* fall through, grab the bucket lock and lookup again.
1145 * 99.9% chance that the element won't be found,
1146 * but second lookup under lock has to be done.
1147 */
1148 }
1149
1150 ret = htab_lock_bucket(htab, b, hash, &flags);
1151 if (ret)
1152 return ret;
1153
1154 l_old = lookup_elem_raw(head, hash, key, key_size);
1155
1156 ret = check_flags(htab, l_old, map_flags);
1157 if (ret)
1158 goto err;
1159
1160 if (unlikely(l_old && (map_flags & BPF_F_LOCK))) {
1161 /* first lookup without the bucket lock didn't find the element,
1162 * but second lookup with the bucket lock found it.
1163 * This case is highly unlikely, but has to be dealt with:
1164 * grab the element lock in addition to the bucket lock
1165 * and update element in place
1166 */
1167 copy_map_value_locked(map,
1168 l_old->key + round_up(key_size, 8),
1169 value, false);
1170 ret = 0;
1171 goto err;
1172 }
1173
1174 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
1175 l_old);
1176 if (IS_ERR(l_new)) {
1177 /* all pre-allocated elements are in use or memory exhausted */
1178 ret = PTR_ERR(l_new);
1179 goto err;
1180 }
1181
1182 /* add new element to the head of the list, so that
1183 * concurrent search will find it before old elem
1184 */
1185 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1186 if (l_old) {
1187 hlist_nulls_del_rcu(&l_old->hash_node);
1188
1189 /* l_old has already been stashed in htab->extra_elems, free
1190 * its special fields before it is available for reuse. Also
1191 * save the old map pointer in htab of maps before unlock
1192 * and release it after unlock.
1193 */
1194 old_map_ptr = NULL;
1195 if (htab_is_prealloc(htab)) {
1196 if (map->ops->map_fd_put_ptr)
1197 old_map_ptr = fd_htab_map_get_ptr(map, l_old);
1198 check_and_free_fields(htab, l_old);
1199 }
1200 }
1201 htab_unlock_bucket(htab, b, hash, flags);
1202 if (l_old) {
1203 if (old_map_ptr)
1204 map->ops->map_fd_put_ptr(map, old_map_ptr, true);
1205 if (!htab_is_prealloc(htab))
1206 free_htab_elem(htab, l_old);
1207 }
1208 return 0;
1209 err:
1210 htab_unlock_bucket(htab, b, hash, flags);
1211 return ret;
1212 }
1213
htab_lru_push_free(struct bpf_htab * htab,struct htab_elem * elem)1214 static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem)
1215 {
1216 check_and_free_fields(htab, elem);
1217 bpf_map_dec_elem_count(&htab->map);
1218 bpf_lru_push_free(&htab->lru, &elem->lru_node);
1219 }
1220
htab_lru_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)1221 static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
1222 u64 map_flags)
1223 {
1224 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1225 struct htab_elem *l_new, *l_old = NULL;
1226 struct hlist_nulls_head *head;
1227 unsigned long flags;
1228 struct bucket *b;
1229 u32 key_size, hash;
1230 int ret;
1231
1232 if (unlikely(map_flags > BPF_EXIST))
1233 /* unknown flags */
1234 return -EINVAL;
1235
1236 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1237 !rcu_read_lock_bh_held());
1238
1239 key_size = map->key_size;
1240
1241 hash = htab_map_hash(key, key_size, htab->hashrnd);
1242
1243 b = __select_bucket(htab, hash);
1244 head = &b->head;
1245
1246 /* For LRU, we need to alloc before taking bucket's
1247 * spinlock because getting free nodes from LRU may need
1248 * to remove older elements from htab and this removal
1249 * operation will need a bucket lock.
1250 */
1251 l_new = prealloc_lru_pop(htab, key, hash);
1252 if (!l_new)
1253 return -ENOMEM;
1254 copy_map_value(&htab->map,
1255 l_new->key + round_up(map->key_size, 8), value);
1256
1257 ret = htab_lock_bucket(htab, b, hash, &flags);
1258 if (ret)
1259 goto err_lock_bucket;
1260
1261 l_old = lookup_elem_raw(head, hash, key, key_size);
1262
1263 ret = check_flags(htab, l_old, map_flags);
1264 if (ret)
1265 goto err;
1266
1267 /* add new element to the head of the list, so that
1268 * concurrent search will find it before old elem
1269 */
1270 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1271 if (l_old) {
1272 bpf_lru_node_set_ref(&l_new->lru_node);
1273 hlist_nulls_del_rcu(&l_old->hash_node);
1274 }
1275 ret = 0;
1276
1277 err:
1278 htab_unlock_bucket(htab, b, hash, flags);
1279
1280 err_lock_bucket:
1281 if (ret)
1282 htab_lru_push_free(htab, l_new);
1283 else if (l_old)
1284 htab_lru_push_free(htab, l_old);
1285
1286 return ret;
1287 }
1288
__htab_percpu_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags,bool onallcpus)1289 static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1290 void *value, u64 map_flags,
1291 bool onallcpus)
1292 {
1293 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1294 struct htab_elem *l_new = NULL, *l_old;
1295 struct hlist_nulls_head *head;
1296 unsigned long flags;
1297 struct bucket *b;
1298 u32 key_size, hash;
1299 int ret;
1300
1301 if (unlikely(map_flags > BPF_EXIST))
1302 /* unknown flags */
1303 return -EINVAL;
1304
1305 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1306 !rcu_read_lock_bh_held());
1307
1308 key_size = map->key_size;
1309
1310 hash = htab_map_hash(key, key_size, htab->hashrnd);
1311
1312 b = __select_bucket(htab, hash);
1313 head = &b->head;
1314
1315 ret = htab_lock_bucket(htab, b, hash, &flags);
1316 if (ret)
1317 return ret;
1318
1319 l_old = lookup_elem_raw(head, hash, key, key_size);
1320
1321 ret = check_flags(htab, l_old, map_flags);
1322 if (ret)
1323 goto err;
1324
1325 if (l_old) {
1326 /* per-cpu hash map can update value in-place */
1327 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1328 value, onallcpus);
1329 } else {
1330 l_new = alloc_htab_elem(htab, key, value, key_size,
1331 hash, true, onallcpus, NULL);
1332 if (IS_ERR(l_new)) {
1333 ret = PTR_ERR(l_new);
1334 goto err;
1335 }
1336 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1337 }
1338 ret = 0;
1339 err:
1340 htab_unlock_bucket(htab, b, hash, flags);
1341 return ret;
1342 }
1343
__htab_lru_percpu_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags,bool onallcpus)1344 static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1345 void *value, u64 map_flags,
1346 bool onallcpus)
1347 {
1348 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1349 struct htab_elem *l_new = NULL, *l_old;
1350 struct hlist_nulls_head *head;
1351 unsigned long flags;
1352 struct bucket *b;
1353 u32 key_size, hash;
1354 int ret;
1355
1356 if (unlikely(map_flags > BPF_EXIST))
1357 /* unknown flags */
1358 return -EINVAL;
1359
1360 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1361 !rcu_read_lock_bh_held());
1362
1363 key_size = map->key_size;
1364
1365 hash = htab_map_hash(key, key_size, htab->hashrnd);
1366
1367 b = __select_bucket(htab, hash);
1368 head = &b->head;
1369
1370 /* For LRU, we need to alloc before taking bucket's
1371 * spinlock because LRU's elem alloc may need
1372 * to remove older elem from htab and this removal
1373 * operation will need a bucket lock.
1374 */
1375 if (map_flags != BPF_EXIST) {
1376 l_new = prealloc_lru_pop(htab, key, hash);
1377 if (!l_new)
1378 return -ENOMEM;
1379 }
1380
1381 ret = htab_lock_bucket(htab, b, hash, &flags);
1382 if (ret)
1383 goto err_lock_bucket;
1384
1385 l_old = lookup_elem_raw(head, hash, key, key_size);
1386
1387 ret = check_flags(htab, l_old, map_flags);
1388 if (ret)
1389 goto err;
1390
1391 if (l_old) {
1392 bpf_lru_node_set_ref(&l_old->lru_node);
1393
1394 /* per-cpu hash map can update value in-place */
1395 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1396 value, onallcpus);
1397 } else {
1398 pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
1399 value, onallcpus);
1400 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1401 l_new = NULL;
1402 }
1403 ret = 0;
1404 err:
1405 htab_unlock_bucket(htab, b, hash, flags);
1406 err_lock_bucket:
1407 if (l_new) {
1408 bpf_map_dec_elem_count(&htab->map);
1409 bpf_lru_push_free(&htab->lru, &l_new->lru_node);
1410 }
1411 return ret;
1412 }
1413
htab_percpu_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)1414 static long htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1415 void *value, u64 map_flags)
1416 {
1417 return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
1418 }
1419
htab_lru_percpu_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)1420 static long htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1421 void *value, u64 map_flags)
1422 {
1423 return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
1424 false);
1425 }
1426
1427 /* Called from syscall or from eBPF program */
htab_map_delete_elem(struct bpf_map * map,void * key)1428 static long htab_map_delete_elem(struct bpf_map *map, void *key)
1429 {
1430 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1431 struct hlist_nulls_head *head;
1432 struct bucket *b;
1433 struct htab_elem *l;
1434 unsigned long flags;
1435 u32 hash, key_size;
1436 int ret;
1437
1438 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1439 !rcu_read_lock_bh_held());
1440
1441 key_size = map->key_size;
1442
1443 hash = htab_map_hash(key, key_size, htab->hashrnd);
1444 b = __select_bucket(htab, hash);
1445 head = &b->head;
1446
1447 ret = htab_lock_bucket(htab, b, hash, &flags);
1448 if (ret)
1449 return ret;
1450
1451 l = lookup_elem_raw(head, hash, key, key_size);
1452 if (l)
1453 hlist_nulls_del_rcu(&l->hash_node);
1454 else
1455 ret = -ENOENT;
1456
1457 htab_unlock_bucket(htab, b, hash, flags);
1458
1459 if (l)
1460 free_htab_elem(htab, l);
1461 return ret;
1462 }
1463
htab_lru_map_delete_elem(struct bpf_map * map,void * key)1464 static long htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1465 {
1466 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1467 struct hlist_nulls_head *head;
1468 struct bucket *b;
1469 struct htab_elem *l;
1470 unsigned long flags;
1471 u32 hash, key_size;
1472 int ret;
1473
1474 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1475 !rcu_read_lock_bh_held());
1476
1477 key_size = map->key_size;
1478
1479 hash = htab_map_hash(key, key_size, htab->hashrnd);
1480 b = __select_bucket(htab, hash);
1481 head = &b->head;
1482
1483 ret = htab_lock_bucket(htab, b, hash, &flags);
1484 if (ret)
1485 return ret;
1486
1487 l = lookup_elem_raw(head, hash, key, key_size);
1488
1489 if (l)
1490 hlist_nulls_del_rcu(&l->hash_node);
1491 else
1492 ret = -ENOENT;
1493
1494 htab_unlock_bucket(htab, b, hash, flags);
1495 if (l)
1496 htab_lru_push_free(htab, l);
1497 return ret;
1498 }
1499
delete_all_elements(struct bpf_htab * htab)1500 static void delete_all_elements(struct bpf_htab *htab)
1501 {
1502 int i;
1503
1504 /* It's called from a worker thread and migration has been disabled,
1505 * therefore, it is OK to invoke bpf_mem_cache_free() directly.
1506 */
1507 for (i = 0; i < htab->n_buckets; i++) {
1508 struct hlist_nulls_head *head = select_bucket(htab, i);
1509 struct hlist_nulls_node *n;
1510 struct htab_elem *l;
1511
1512 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1513 hlist_nulls_del_rcu(&l->hash_node);
1514 htab_elem_free(htab, l);
1515 }
1516 cond_resched();
1517 }
1518 }
1519
htab_free_malloced_timers_and_wq(struct bpf_htab * htab)1520 static void htab_free_malloced_timers_and_wq(struct bpf_htab *htab)
1521 {
1522 int i;
1523
1524 rcu_read_lock();
1525 for (i = 0; i < htab->n_buckets; i++) {
1526 struct hlist_nulls_head *head = select_bucket(htab, i);
1527 struct hlist_nulls_node *n;
1528 struct htab_elem *l;
1529
1530 hlist_nulls_for_each_entry(l, n, head, hash_node) {
1531 /* We only free timer on uref dropping to zero */
1532 if (btf_record_has_field(htab->map.record, BPF_TIMER))
1533 bpf_obj_free_timer(htab->map.record,
1534 l->key + round_up(htab->map.key_size, 8));
1535 if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
1536 bpf_obj_free_workqueue(htab->map.record,
1537 l->key + round_up(htab->map.key_size, 8));
1538 }
1539 cond_resched_rcu();
1540 }
1541 rcu_read_unlock();
1542 }
1543
htab_map_free_timers_and_wq(struct bpf_map * map)1544 static void htab_map_free_timers_and_wq(struct bpf_map *map)
1545 {
1546 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1547
1548 /* We only free timer and workqueue on uref dropping to zero */
1549 if (btf_record_has_field(htab->map.record, BPF_TIMER | BPF_WORKQUEUE)) {
1550 if (!htab_is_prealloc(htab))
1551 htab_free_malloced_timers_and_wq(htab);
1552 else
1553 htab_free_prealloced_timers_and_wq(htab);
1554 }
1555 }
1556
1557 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
htab_map_free(struct bpf_map * map)1558 static void htab_map_free(struct bpf_map *map)
1559 {
1560 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1561 int i;
1562
1563 /* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
1564 * bpf_free_used_maps() is called after bpf prog is no longer executing.
1565 * There is no need to synchronize_rcu() here to protect map elements.
1566 */
1567
1568 /* htab no longer uses call_rcu() directly. bpf_mem_alloc does it
1569 * underneath and is responsible for waiting for callbacks to finish
1570 * during bpf_mem_alloc_destroy().
1571 */
1572 if (!htab_is_prealloc(htab)) {
1573 delete_all_elements(htab);
1574 } else {
1575 htab_free_prealloced_fields(htab);
1576 prealloc_destroy(htab);
1577 }
1578
1579 bpf_map_free_elem_count(map);
1580 free_percpu(htab->extra_elems);
1581 bpf_map_area_free(htab->buckets);
1582 bpf_mem_alloc_destroy(&htab->pcpu_ma);
1583 bpf_mem_alloc_destroy(&htab->ma);
1584 if (htab->use_percpu_counter)
1585 percpu_counter_destroy(&htab->pcount);
1586 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
1587 free_percpu(htab->map_locked[i]);
1588 lockdep_unregister_key(&htab->lockdep_key);
1589 bpf_map_area_free(htab);
1590 }
1591
htab_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)1592 static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
1593 struct seq_file *m)
1594 {
1595 void *value;
1596
1597 rcu_read_lock();
1598
1599 value = htab_map_lookup_elem(map, key);
1600 if (!value) {
1601 rcu_read_unlock();
1602 return;
1603 }
1604
1605 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
1606 seq_puts(m, ": ");
1607 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
1608 seq_putc(m, '\n');
1609
1610 rcu_read_unlock();
1611 }
1612
__htab_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,bool is_lru_map,bool is_percpu,u64 flags)1613 static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1614 void *value, bool is_lru_map,
1615 bool is_percpu, u64 flags)
1616 {
1617 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1618 struct hlist_nulls_head *head;
1619 unsigned long bflags;
1620 struct htab_elem *l;
1621 u32 hash, key_size;
1622 struct bucket *b;
1623 int ret;
1624
1625 key_size = map->key_size;
1626
1627 hash = htab_map_hash(key, key_size, htab->hashrnd);
1628 b = __select_bucket(htab, hash);
1629 head = &b->head;
1630
1631 ret = htab_lock_bucket(htab, b, hash, &bflags);
1632 if (ret)
1633 return ret;
1634
1635 l = lookup_elem_raw(head, hash, key, key_size);
1636 if (!l) {
1637 ret = -ENOENT;
1638 goto out_unlock;
1639 }
1640
1641 if (is_percpu) {
1642 u32 roundup_value_size = round_up(map->value_size, 8);
1643 void __percpu *pptr;
1644 int off = 0, cpu;
1645
1646 pptr = htab_elem_get_ptr(l, key_size);
1647 for_each_possible_cpu(cpu) {
1648 copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu));
1649 check_and_init_map_value(&htab->map, value + off);
1650 off += roundup_value_size;
1651 }
1652 } else {
1653 u32 roundup_key_size = round_up(map->key_size, 8);
1654
1655 if (flags & BPF_F_LOCK)
1656 copy_map_value_locked(map, value, l->key +
1657 roundup_key_size,
1658 true);
1659 else
1660 copy_map_value(map, value, l->key +
1661 roundup_key_size);
1662 /* Zeroing special fields in the temp buffer */
1663 check_and_init_map_value(map, value);
1664 }
1665 hlist_nulls_del_rcu(&l->hash_node);
1666
1667 out_unlock:
1668 htab_unlock_bucket(htab, b, hash, bflags);
1669
1670 if (l) {
1671 if (is_lru_map)
1672 htab_lru_push_free(htab, l);
1673 else
1674 free_htab_elem(htab, l);
1675 }
1676
1677 return ret;
1678 }
1679
htab_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,u64 flags)1680 static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1681 void *value, u64 flags)
1682 {
1683 return __htab_map_lookup_and_delete_elem(map, key, value, false, false,
1684 flags);
1685 }
1686
htab_percpu_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,u64 flags)1687 static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1688 void *key, void *value,
1689 u64 flags)
1690 {
1691 return __htab_map_lookup_and_delete_elem(map, key, value, false, true,
1692 flags);
1693 }
1694
htab_lru_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,u64 flags)1695 static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1696 void *value, u64 flags)
1697 {
1698 return __htab_map_lookup_and_delete_elem(map, key, value, true, false,
1699 flags);
1700 }
1701
htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,u64 flags)1702 static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1703 void *key, void *value,
1704 u64 flags)
1705 {
1706 return __htab_map_lookup_and_delete_elem(map, key, value, true, true,
1707 flags);
1708 }
1709
1710 static int
__htab_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr,bool do_delete,bool is_lru_map,bool is_percpu)1711 __htab_map_lookup_and_delete_batch(struct bpf_map *map,
1712 const union bpf_attr *attr,
1713 union bpf_attr __user *uattr,
1714 bool do_delete, bool is_lru_map,
1715 bool is_percpu)
1716 {
1717 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1718 u32 bucket_cnt, total, key_size, value_size, roundup_key_size;
1719 void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val;
1720 void __user *uvalues = u64_to_user_ptr(attr->batch.values);
1721 void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
1722 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1723 u32 batch, max_count, size, bucket_size, map_id;
1724 struct htab_elem *node_to_free = NULL;
1725 u64 elem_map_flags, map_flags;
1726 struct hlist_nulls_head *head;
1727 struct hlist_nulls_node *n;
1728 unsigned long flags = 0;
1729 bool locked = false;
1730 struct htab_elem *l;
1731 struct bucket *b;
1732 int ret = 0;
1733
1734 elem_map_flags = attr->batch.elem_flags;
1735 if ((elem_map_flags & ~BPF_F_LOCK) ||
1736 ((elem_map_flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK)))
1737 return -EINVAL;
1738
1739 map_flags = attr->batch.flags;
1740 if (map_flags)
1741 return -EINVAL;
1742
1743 max_count = attr->batch.count;
1744 if (!max_count)
1745 return 0;
1746
1747 if (put_user(0, &uattr->batch.count))
1748 return -EFAULT;
1749
1750 batch = 0;
1751 if (ubatch && copy_from_user(&batch, ubatch, sizeof(batch)))
1752 return -EFAULT;
1753
1754 if (batch >= htab->n_buckets)
1755 return -ENOENT;
1756
1757 key_size = htab->map.key_size;
1758 roundup_key_size = round_up(htab->map.key_size, 8);
1759 value_size = htab->map.value_size;
1760 size = round_up(value_size, 8);
1761 if (is_percpu)
1762 value_size = size * num_possible_cpus();
1763 total = 0;
1764 /* while experimenting with hash tables with sizes ranging from 10 to
1765 * 1000, it was observed that a bucket can have up to 5 entries.
1766 */
1767 bucket_size = 5;
1768
1769 alloc:
1770 /* We cannot do copy_from_user or copy_to_user inside
1771 * the rcu_read_lock. Allocate enough space here.
1772 */
1773 keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN);
1774 values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN);
1775 if (!keys || !values) {
1776 ret = -ENOMEM;
1777 goto after_loop;
1778 }
1779
1780 again:
1781 bpf_disable_instrumentation();
1782 rcu_read_lock();
1783 again_nocopy:
1784 dst_key = keys;
1785 dst_val = values;
1786 b = &htab->buckets[batch];
1787 head = &b->head;
1788 /* do not grab the lock unless need it (bucket_cnt > 0). */
1789 if (locked) {
1790 ret = htab_lock_bucket(htab, b, batch, &flags);
1791 if (ret) {
1792 rcu_read_unlock();
1793 bpf_enable_instrumentation();
1794 goto after_loop;
1795 }
1796 }
1797
1798 bucket_cnt = 0;
1799 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
1800 bucket_cnt++;
1801
1802 if (bucket_cnt && !locked) {
1803 locked = true;
1804 goto again_nocopy;
1805 }
1806
1807 if (bucket_cnt > (max_count - total)) {
1808 if (total == 0)
1809 ret = -ENOSPC;
1810 /* Note that since bucket_cnt > 0 here, it is implicit
1811 * that the locked was grabbed, so release it.
1812 */
1813 htab_unlock_bucket(htab, b, batch, flags);
1814 rcu_read_unlock();
1815 bpf_enable_instrumentation();
1816 goto after_loop;
1817 }
1818
1819 if (bucket_cnt > bucket_size) {
1820 bucket_size = bucket_cnt;
1821 /* Note that since bucket_cnt > 0 here, it is implicit
1822 * that the locked was grabbed, so release it.
1823 */
1824 htab_unlock_bucket(htab, b, batch, flags);
1825 rcu_read_unlock();
1826 bpf_enable_instrumentation();
1827 kvfree(keys);
1828 kvfree(values);
1829 goto alloc;
1830 }
1831
1832 /* Next block is only safe to run if you have grabbed the lock */
1833 if (!locked)
1834 goto next_batch;
1835
1836 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1837 memcpy(dst_key, l->key, key_size);
1838
1839 if (is_percpu) {
1840 int off = 0, cpu;
1841 void __percpu *pptr;
1842
1843 pptr = htab_elem_get_ptr(l, map->key_size);
1844 for_each_possible_cpu(cpu) {
1845 copy_map_value_long(&htab->map, dst_val + off, per_cpu_ptr(pptr, cpu));
1846 check_and_init_map_value(&htab->map, dst_val + off);
1847 off += size;
1848 }
1849 } else {
1850 value = l->key + roundup_key_size;
1851 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
1852 struct bpf_map **inner_map = value;
1853
1854 /* Actual value is the id of the inner map */
1855 map_id = map->ops->map_fd_sys_lookup_elem(*inner_map);
1856 value = &map_id;
1857 }
1858
1859 if (elem_map_flags & BPF_F_LOCK)
1860 copy_map_value_locked(map, dst_val, value,
1861 true);
1862 else
1863 copy_map_value(map, dst_val, value);
1864 /* Zeroing special fields in the temp buffer */
1865 check_and_init_map_value(map, dst_val);
1866 }
1867 if (do_delete) {
1868 hlist_nulls_del_rcu(&l->hash_node);
1869
1870 /* bpf_lru_push_free() will acquire lru_lock, which
1871 * may cause deadlock. See comments in function
1872 * prealloc_lru_pop(). Let us do bpf_lru_push_free()
1873 * after releasing the bucket lock.
1874 *
1875 * For htab of maps, htab_put_fd_value() in
1876 * free_htab_elem() may acquire a spinlock with bucket
1877 * lock being held and it violates the lock rule, so
1878 * invoke free_htab_elem() after unlock as well.
1879 */
1880 l->batch_flink = node_to_free;
1881 node_to_free = l;
1882 }
1883 dst_key += key_size;
1884 dst_val += value_size;
1885 }
1886
1887 htab_unlock_bucket(htab, b, batch, flags);
1888 locked = false;
1889
1890 while (node_to_free) {
1891 l = node_to_free;
1892 node_to_free = node_to_free->batch_flink;
1893 if (is_lru_map)
1894 htab_lru_push_free(htab, l);
1895 else
1896 free_htab_elem(htab, l);
1897 }
1898
1899 next_batch:
1900 /* If we are not copying data, we can go to next bucket and avoid
1901 * unlocking the rcu.
1902 */
1903 if (!bucket_cnt && (batch + 1 < htab->n_buckets)) {
1904 batch++;
1905 goto again_nocopy;
1906 }
1907
1908 rcu_read_unlock();
1909 bpf_enable_instrumentation();
1910 if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys,
1911 key_size * bucket_cnt) ||
1912 copy_to_user(uvalues + total * value_size, values,
1913 value_size * bucket_cnt))) {
1914 ret = -EFAULT;
1915 goto after_loop;
1916 }
1917
1918 total += bucket_cnt;
1919 batch++;
1920 if (batch >= htab->n_buckets) {
1921 ret = -ENOENT;
1922 goto after_loop;
1923 }
1924 goto again;
1925
1926 after_loop:
1927 if (ret == -EFAULT)
1928 goto out;
1929
1930 /* copy # of entries and next batch */
1931 ubatch = u64_to_user_ptr(attr->batch.out_batch);
1932 if (copy_to_user(ubatch, &batch, sizeof(batch)) ||
1933 put_user(total, &uattr->batch.count))
1934 ret = -EFAULT;
1935
1936 out:
1937 kvfree(keys);
1938 kvfree(values);
1939 return ret;
1940 }
1941
1942 static int
htab_percpu_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1943 htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1944 union bpf_attr __user *uattr)
1945 {
1946 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1947 false, true);
1948 }
1949
1950 static int
htab_percpu_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1951 htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1952 const union bpf_attr *attr,
1953 union bpf_attr __user *uattr)
1954 {
1955 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1956 false, true);
1957 }
1958
1959 static int
htab_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1960 htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1961 union bpf_attr __user *uattr)
1962 {
1963 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1964 false, false);
1965 }
1966
1967 static int
htab_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1968 htab_map_lookup_and_delete_batch(struct bpf_map *map,
1969 const union bpf_attr *attr,
1970 union bpf_attr __user *uattr)
1971 {
1972 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1973 false, false);
1974 }
1975
1976 static int
htab_lru_percpu_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1977 htab_lru_percpu_map_lookup_batch(struct bpf_map *map,
1978 const union bpf_attr *attr,
1979 union bpf_attr __user *uattr)
1980 {
1981 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1982 true, true);
1983 }
1984
1985 static int
htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1986 htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1987 const union bpf_attr *attr,
1988 union bpf_attr __user *uattr)
1989 {
1990 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1991 true, true);
1992 }
1993
1994 static int
htab_lru_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1995 htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1996 union bpf_attr __user *uattr)
1997 {
1998 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1999 true, false);
2000 }
2001
2002 static int
htab_lru_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)2003 htab_lru_map_lookup_and_delete_batch(struct bpf_map *map,
2004 const union bpf_attr *attr,
2005 union bpf_attr __user *uattr)
2006 {
2007 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
2008 true, false);
2009 }
2010
2011 struct bpf_iter_seq_hash_map_info {
2012 struct bpf_map *map;
2013 struct bpf_htab *htab;
2014 void *percpu_value_buf; // non-zero means percpu hash
2015 u32 bucket_id;
2016 u32 skip_elems;
2017 };
2018
2019 static struct htab_elem *
bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info * info,struct htab_elem * prev_elem)2020 bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
2021 struct htab_elem *prev_elem)
2022 {
2023 const struct bpf_htab *htab = info->htab;
2024 u32 skip_elems = info->skip_elems;
2025 u32 bucket_id = info->bucket_id;
2026 struct hlist_nulls_head *head;
2027 struct hlist_nulls_node *n;
2028 struct htab_elem *elem;
2029 struct bucket *b;
2030 u32 i, count;
2031
2032 if (bucket_id >= htab->n_buckets)
2033 return NULL;
2034
2035 /* try to find next elem in the same bucket */
2036 if (prev_elem) {
2037 /* no update/deletion on this bucket, prev_elem should be still valid
2038 * and we won't skip elements.
2039 */
2040 n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node));
2041 elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node);
2042 if (elem)
2043 return elem;
2044
2045 /* not found, unlock and go to the next bucket */
2046 b = &htab->buckets[bucket_id++];
2047 rcu_read_unlock();
2048 skip_elems = 0;
2049 }
2050
2051 for (i = bucket_id; i < htab->n_buckets; i++) {
2052 b = &htab->buckets[i];
2053 rcu_read_lock();
2054
2055 count = 0;
2056 head = &b->head;
2057 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2058 if (count >= skip_elems) {
2059 info->bucket_id = i;
2060 info->skip_elems = count;
2061 return elem;
2062 }
2063 count++;
2064 }
2065
2066 rcu_read_unlock();
2067 skip_elems = 0;
2068 }
2069
2070 info->bucket_id = i;
2071 info->skip_elems = 0;
2072 return NULL;
2073 }
2074
bpf_hash_map_seq_start(struct seq_file * seq,loff_t * pos)2075 static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos)
2076 {
2077 struct bpf_iter_seq_hash_map_info *info = seq->private;
2078 struct htab_elem *elem;
2079
2080 elem = bpf_hash_map_seq_find_next(info, NULL);
2081 if (!elem)
2082 return NULL;
2083
2084 if (*pos == 0)
2085 ++*pos;
2086 return elem;
2087 }
2088
bpf_hash_map_seq_next(struct seq_file * seq,void * v,loff_t * pos)2089 static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2090 {
2091 struct bpf_iter_seq_hash_map_info *info = seq->private;
2092
2093 ++*pos;
2094 ++info->skip_elems;
2095 return bpf_hash_map_seq_find_next(info, v);
2096 }
2097
__bpf_hash_map_seq_show(struct seq_file * seq,struct htab_elem * elem)2098 static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
2099 {
2100 struct bpf_iter_seq_hash_map_info *info = seq->private;
2101 u32 roundup_key_size, roundup_value_size;
2102 struct bpf_iter__bpf_map_elem ctx = {};
2103 struct bpf_map *map = info->map;
2104 struct bpf_iter_meta meta;
2105 int ret = 0, off = 0, cpu;
2106 struct bpf_prog *prog;
2107 void __percpu *pptr;
2108
2109 meta.seq = seq;
2110 prog = bpf_iter_get_info(&meta, elem == NULL);
2111 if (prog) {
2112 ctx.meta = &meta;
2113 ctx.map = info->map;
2114 if (elem) {
2115 roundup_key_size = round_up(map->key_size, 8);
2116 ctx.key = elem->key;
2117 if (!info->percpu_value_buf) {
2118 ctx.value = elem->key + roundup_key_size;
2119 } else {
2120 roundup_value_size = round_up(map->value_size, 8);
2121 pptr = htab_elem_get_ptr(elem, map->key_size);
2122 for_each_possible_cpu(cpu) {
2123 copy_map_value_long(map, info->percpu_value_buf + off,
2124 per_cpu_ptr(pptr, cpu));
2125 check_and_init_map_value(map, info->percpu_value_buf + off);
2126 off += roundup_value_size;
2127 }
2128 ctx.value = info->percpu_value_buf;
2129 }
2130 }
2131 ret = bpf_iter_run_prog(prog, &ctx);
2132 }
2133
2134 return ret;
2135 }
2136
bpf_hash_map_seq_show(struct seq_file * seq,void * v)2137 static int bpf_hash_map_seq_show(struct seq_file *seq, void *v)
2138 {
2139 return __bpf_hash_map_seq_show(seq, v);
2140 }
2141
bpf_hash_map_seq_stop(struct seq_file * seq,void * v)2142 static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v)
2143 {
2144 if (!v)
2145 (void)__bpf_hash_map_seq_show(seq, NULL);
2146 else
2147 rcu_read_unlock();
2148 }
2149
bpf_iter_init_hash_map(void * priv_data,struct bpf_iter_aux_info * aux)2150 static int bpf_iter_init_hash_map(void *priv_data,
2151 struct bpf_iter_aux_info *aux)
2152 {
2153 struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2154 struct bpf_map *map = aux->map;
2155 void *value_buf;
2156 u32 buf_size;
2157
2158 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
2159 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
2160 buf_size = round_up(map->value_size, 8) * num_possible_cpus();
2161 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
2162 if (!value_buf)
2163 return -ENOMEM;
2164
2165 seq_info->percpu_value_buf = value_buf;
2166 }
2167
2168 bpf_map_inc_with_uref(map);
2169 seq_info->map = map;
2170 seq_info->htab = container_of(map, struct bpf_htab, map);
2171 return 0;
2172 }
2173
bpf_iter_fini_hash_map(void * priv_data)2174 static void bpf_iter_fini_hash_map(void *priv_data)
2175 {
2176 struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2177
2178 bpf_map_put_with_uref(seq_info->map);
2179 kfree(seq_info->percpu_value_buf);
2180 }
2181
2182 static const struct seq_operations bpf_hash_map_seq_ops = {
2183 .start = bpf_hash_map_seq_start,
2184 .next = bpf_hash_map_seq_next,
2185 .stop = bpf_hash_map_seq_stop,
2186 .show = bpf_hash_map_seq_show,
2187 };
2188
2189 static const struct bpf_iter_seq_info iter_seq_info = {
2190 .seq_ops = &bpf_hash_map_seq_ops,
2191 .init_seq_private = bpf_iter_init_hash_map,
2192 .fini_seq_private = bpf_iter_fini_hash_map,
2193 .seq_priv_size = sizeof(struct bpf_iter_seq_hash_map_info),
2194 };
2195
bpf_for_each_hash_elem(struct bpf_map * map,bpf_callback_t callback_fn,void * callback_ctx,u64 flags)2196 static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
2197 void *callback_ctx, u64 flags)
2198 {
2199 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2200 struct hlist_nulls_head *head;
2201 struct hlist_nulls_node *n;
2202 struct htab_elem *elem;
2203 u32 roundup_key_size;
2204 int i, num_elems = 0;
2205 void __percpu *pptr;
2206 struct bucket *b;
2207 void *key, *val;
2208 bool is_percpu;
2209 u64 ret = 0;
2210
2211 cant_migrate();
2212
2213 if (flags != 0)
2214 return -EINVAL;
2215
2216 is_percpu = htab_is_percpu(htab);
2217
2218 roundup_key_size = round_up(map->key_size, 8);
2219 /* migration has been disabled, so percpu value prepared here will be
2220 * the same as the one seen by the bpf program with
2221 * bpf_map_lookup_elem().
2222 */
2223 for (i = 0; i < htab->n_buckets; i++) {
2224 b = &htab->buckets[i];
2225 rcu_read_lock();
2226 head = &b->head;
2227 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2228 key = elem->key;
2229 if (is_percpu) {
2230 /* current cpu value for percpu map */
2231 pptr = htab_elem_get_ptr(elem, map->key_size);
2232 val = this_cpu_ptr(pptr);
2233 } else {
2234 val = elem->key + roundup_key_size;
2235 }
2236 num_elems++;
2237 ret = callback_fn((u64)(long)map, (u64)(long)key,
2238 (u64)(long)val, (u64)(long)callback_ctx, 0);
2239 /* return value: 0 - continue, 1 - stop and return */
2240 if (ret) {
2241 rcu_read_unlock();
2242 goto out;
2243 }
2244 }
2245 rcu_read_unlock();
2246 }
2247 out:
2248 return num_elems;
2249 }
2250
htab_map_mem_usage(const struct bpf_map * map)2251 static u64 htab_map_mem_usage(const struct bpf_map *map)
2252 {
2253 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2254 u32 value_size = round_up(htab->map.value_size, 8);
2255 bool prealloc = htab_is_prealloc(htab);
2256 bool percpu = htab_is_percpu(htab);
2257 bool lru = htab_is_lru(htab);
2258 u64 num_entries;
2259 u64 usage = sizeof(struct bpf_htab);
2260
2261 usage += sizeof(struct bucket) * htab->n_buckets;
2262 usage += sizeof(int) * num_possible_cpus() * HASHTAB_MAP_LOCK_COUNT;
2263 if (prealloc) {
2264 num_entries = map->max_entries;
2265 if (htab_has_extra_elems(htab))
2266 num_entries += num_possible_cpus();
2267
2268 usage += htab->elem_size * num_entries;
2269
2270 if (percpu)
2271 usage += value_size * num_possible_cpus() * num_entries;
2272 else if (!lru)
2273 usage += sizeof(struct htab_elem *) * num_possible_cpus();
2274 } else {
2275 #define LLIST_NODE_SZ sizeof(struct llist_node)
2276
2277 num_entries = htab->use_percpu_counter ?
2278 percpu_counter_sum(&htab->pcount) :
2279 atomic_read(&htab->count);
2280 usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries;
2281 if (percpu) {
2282 usage += (LLIST_NODE_SZ + sizeof(void *)) * num_entries;
2283 usage += value_size * num_possible_cpus() * num_entries;
2284 }
2285 }
2286 return usage;
2287 }
2288
2289 BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab)
2290 const struct bpf_map_ops htab_map_ops = {
2291 .map_meta_equal = bpf_map_meta_equal,
2292 .map_alloc_check = htab_map_alloc_check,
2293 .map_alloc = htab_map_alloc,
2294 .map_free = htab_map_free,
2295 .map_get_next_key = htab_map_get_next_key,
2296 .map_release_uref = htab_map_free_timers_and_wq,
2297 .map_lookup_elem = htab_map_lookup_elem,
2298 .map_lookup_and_delete_elem = htab_map_lookup_and_delete_elem,
2299 .map_update_elem = htab_map_update_elem,
2300 .map_delete_elem = htab_map_delete_elem,
2301 .map_gen_lookup = htab_map_gen_lookup,
2302 .map_seq_show_elem = htab_map_seq_show_elem,
2303 .map_set_for_each_callback_args = map_set_for_each_callback_args,
2304 .map_for_each_callback = bpf_for_each_hash_elem,
2305 .map_mem_usage = htab_map_mem_usage,
2306 BATCH_OPS(htab),
2307 .map_btf_id = &htab_map_btf_ids[0],
2308 .iter_seq_info = &iter_seq_info,
2309 };
2310
2311 const struct bpf_map_ops htab_lru_map_ops = {
2312 .map_meta_equal = bpf_map_meta_equal,
2313 .map_alloc_check = htab_map_alloc_check,
2314 .map_alloc = htab_map_alloc,
2315 .map_free = htab_map_free,
2316 .map_get_next_key = htab_map_get_next_key,
2317 .map_release_uref = htab_map_free_timers_and_wq,
2318 .map_lookup_elem = htab_lru_map_lookup_elem,
2319 .map_lookup_and_delete_elem = htab_lru_map_lookup_and_delete_elem,
2320 .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
2321 .map_update_elem = htab_lru_map_update_elem,
2322 .map_delete_elem = htab_lru_map_delete_elem,
2323 .map_gen_lookup = htab_lru_map_gen_lookup,
2324 .map_seq_show_elem = htab_map_seq_show_elem,
2325 .map_set_for_each_callback_args = map_set_for_each_callback_args,
2326 .map_for_each_callback = bpf_for_each_hash_elem,
2327 .map_mem_usage = htab_map_mem_usage,
2328 BATCH_OPS(htab_lru),
2329 .map_btf_id = &htab_map_btf_ids[0],
2330 .iter_seq_info = &iter_seq_info,
2331 };
2332
2333 /* Called from eBPF program */
htab_percpu_map_lookup_elem(struct bpf_map * map,void * key)2334 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2335 {
2336 struct htab_elem *l = __htab_map_lookup_elem(map, key);
2337
2338 if (l)
2339 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2340 else
2341 return NULL;
2342 }
2343
2344 /* inline bpf_map_lookup_elem() call for per-CPU hashmap */
htab_percpu_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)2345 static int htab_percpu_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
2346 {
2347 struct bpf_insn *insn = insn_buf;
2348
2349 if (!bpf_jit_supports_percpu_insn())
2350 return -EOPNOTSUPP;
2351
2352 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
2353 (void *(*)(struct bpf_map *map, void *key))NULL));
2354 *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
2355 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3);
2356 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
2357 offsetof(struct htab_elem, key) + map->key_size);
2358 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0);
2359 *insn++ = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
2360
2361 return insn - insn_buf;
2362 }
2363
htab_percpu_map_lookup_percpu_elem(struct bpf_map * map,void * key,u32 cpu)2364 static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2365 {
2366 struct htab_elem *l;
2367
2368 if (cpu >= nr_cpu_ids)
2369 return NULL;
2370
2371 l = __htab_map_lookup_elem(map, key);
2372 if (l)
2373 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2374 else
2375 return NULL;
2376 }
2377
htab_lru_percpu_map_lookup_elem(struct bpf_map * map,void * key)2378 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2379 {
2380 struct htab_elem *l = __htab_map_lookup_elem(map, key);
2381
2382 if (l) {
2383 bpf_lru_node_set_ref(&l->lru_node);
2384 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2385 }
2386
2387 return NULL;
2388 }
2389
htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map * map,void * key,u32 cpu)2390 static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2391 {
2392 struct htab_elem *l;
2393
2394 if (cpu >= nr_cpu_ids)
2395 return NULL;
2396
2397 l = __htab_map_lookup_elem(map, key);
2398 if (l) {
2399 bpf_lru_node_set_ref(&l->lru_node);
2400 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2401 }
2402
2403 return NULL;
2404 }
2405
bpf_percpu_hash_copy(struct bpf_map * map,void * key,void * value)2406 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
2407 {
2408 struct htab_elem *l;
2409 void __percpu *pptr;
2410 int ret = -ENOENT;
2411 int cpu, off = 0;
2412 u32 size;
2413
2414 /* per_cpu areas are zero-filled and bpf programs can only
2415 * access 'value_size' of them, so copying rounded areas
2416 * will not leak any kernel data
2417 */
2418 size = round_up(map->value_size, 8);
2419 rcu_read_lock();
2420 l = __htab_map_lookup_elem(map, key);
2421 if (!l)
2422 goto out;
2423 /* We do not mark LRU map element here in order to not mess up
2424 * eviction heuristics when user space does a map walk.
2425 */
2426 pptr = htab_elem_get_ptr(l, map->key_size);
2427 for_each_possible_cpu(cpu) {
2428 copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
2429 check_and_init_map_value(map, value + off);
2430 off += size;
2431 }
2432 ret = 0;
2433 out:
2434 rcu_read_unlock();
2435 return ret;
2436 }
2437
bpf_percpu_hash_update(struct bpf_map * map,void * key,void * value,u64 map_flags)2438 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
2439 u64 map_flags)
2440 {
2441 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2442 int ret;
2443
2444 rcu_read_lock();
2445 if (htab_is_lru(htab))
2446 ret = __htab_lru_percpu_map_update_elem(map, key, value,
2447 map_flags, true);
2448 else
2449 ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
2450 true);
2451 rcu_read_unlock();
2452
2453 return ret;
2454 }
2455
htab_percpu_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)2456 static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
2457 struct seq_file *m)
2458 {
2459 struct htab_elem *l;
2460 void __percpu *pptr;
2461 int cpu;
2462
2463 rcu_read_lock();
2464
2465 l = __htab_map_lookup_elem(map, key);
2466 if (!l) {
2467 rcu_read_unlock();
2468 return;
2469 }
2470
2471 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
2472 seq_puts(m, ": {\n");
2473 pptr = htab_elem_get_ptr(l, map->key_size);
2474 for_each_possible_cpu(cpu) {
2475 seq_printf(m, "\tcpu%d: ", cpu);
2476 btf_type_seq_show(map->btf, map->btf_value_type_id,
2477 per_cpu_ptr(pptr, cpu), m);
2478 seq_putc(m, '\n');
2479 }
2480 seq_puts(m, "}\n");
2481
2482 rcu_read_unlock();
2483 }
2484
2485 const struct bpf_map_ops htab_percpu_map_ops = {
2486 .map_meta_equal = bpf_map_meta_equal,
2487 .map_alloc_check = htab_map_alloc_check,
2488 .map_alloc = htab_map_alloc,
2489 .map_free = htab_map_free,
2490 .map_get_next_key = htab_map_get_next_key,
2491 .map_lookup_elem = htab_percpu_map_lookup_elem,
2492 .map_gen_lookup = htab_percpu_map_gen_lookup,
2493 .map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem,
2494 .map_update_elem = htab_percpu_map_update_elem,
2495 .map_delete_elem = htab_map_delete_elem,
2496 .map_lookup_percpu_elem = htab_percpu_map_lookup_percpu_elem,
2497 .map_seq_show_elem = htab_percpu_map_seq_show_elem,
2498 .map_set_for_each_callback_args = map_set_for_each_callback_args,
2499 .map_for_each_callback = bpf_for_each_hash_elem,
2500 .map_mem_usage = htab_map_mem_usage,
2501 BATCH_OPS(htab_percpu),
2502 .map_btf_id = &htab_map_btf_ids[0],
2503 .iter_seq_info = &iter_seq_info,
2504 };
2505
2506 const struct bpf_map_ops htab_lru_percpu_map_ops = {
2507 .map_meta_equal = bpf_map_meta_equal,
2508 .map_alloc_check = htab_map_alloc_check,
2509 .map_alloc = htab_map_alloc,
2510 .map_free = htab_map_free,
2511 .map_get_next_key = htab_map_get_next_key,
2512 .map_lookup_elem = htab_lru_percpu_map_lookup_elem,
2513 .map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem,
2514 .map_update_elem = htab_lru_percpu_map_update_elem,
2515 .map_delete_elem = htab_lru_map_delete_elem,
2516 .map_lookup_percpu_elem = htab_lru_percpu_map_lookup_percpu_elem,
2517 .map_seq_show_elem = htab_percpu_map_seq_show_elem,
2518 .map_set_for_each_callback_args = map_set_for_each_callback_args,
2519 .map_for_each_callback = bpf_for_each_hash_elem,
2520 .map_mem_usage = htab_map_mem_usage,
2521 BATCH_OPS(htab_lru_percpu),
2522 .map_btf_id = &htab_map_btf_ids[0],
2523 .iter_seq_info = &iter_seq_info,
2524 };
2525
fd_htab_map_alloc_check(union bpf_attr * attr)2526 static int fd_htab_map_alloc_check(union bpf_attr *attr)
2527 {
2528 if (attr->value_size != sizeof(u32))
2529 return -EINVAL;
2530 return htab_map_alloc_check(attr);
2531 }
2532
fd_htab_map_free(struct bpf_map * map)2533 static void fd_htab_map_free(struct bpf_map *map)
2534 {
2535 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2536 struct hlist_nulls_node *n;
2537 struct hlist_nulls_head *head;
2538 struct htab_elem *l;
2539 int i;
2540
2541 for (i = 0; i < htab->n_buckets; i++) {
2542 head = select_bucket(htab, i);
2543
2544 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
2545 void *ptr = fd_htab_map_get_ptr(map, l);
2546
2547 map->ops->map_fd_put_ptr(map, ptr, false);
2548 }
2549 }
2550
2551 htab_map_free(map);
2552 }
2553
2554 /* only called from syscall */
bpf_fd_htab_map_lookup_elem(struct bpf_map * map,void * key,u32 * value)2555 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
2556 {
2557 void **ptr;
2558 int ret = 0;
2559
2560 if (!map->ops->map_fd_sys_lookup_elem)
2561 return -ENOTSUPP;
2562
2563 rcu_read_lock();
2564 ptr = htab_map_lookup_elem(map, key);
2565 if (ptr)
2566 *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
2567 else
2568 ret = -ENOENT;
2569 rcu_read_unlock();
2570
2571 return ret;
2572 }
2573
2574 /* only called from syscall */
bpf_fd_htab_map_update_elem(struct bpf_map * map,struct file * map_file,void * key,void * value,u64 map_flags)2575 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
2576 void *key, void *value, u64 map_flags)
2577 {
2578 void *ptr;
2579 int ret;
2580 u32 ufd = *(u32 *)value;
2581
2582 ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
2583 if (IS_ERR(ptr))
2584 return PTR_ERR(ptr);
2585
2586 /* The htab bucket lock is always held during update operations in fd
2587 * htab map, and the following rcu_read_lock() is only used to avoid
2588 * the WARN_ON_ONCE in htab_map_update_elem().
2589 */
2590 rcu_read_lock();
2591 ret = htab_map_update_elem(map, key, &ptr, map_flags);
2592 rcu_read_unlock();
2593 if (ret)
2594 map->ops->map_fd_put_ptr(map, ptr, false);
2595
2596 return ret;
2597 }
2598
htab_of_map_alloc(union bpf_attr * attr)2599 static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
2600 {
2601 struct bpf_map *map, *inner_map_meta;
2602
2603 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
2604 if (IS_ERR(inner_map_meta))
2605 return inner_map_meta;
2606
2607 map = htab_map_alloc(attr);
2608 if (IS_ERR(map)) {
2609 bpf_map_meta_free(inner_map_meta);
2610 return map;
2611 }
2612
2613 map->inner_map_meta = inner_map_meta;
2614
2615 return map;
2616 }
2617
htab_of_map_lookup_elem(struct bpf_map * map,void * key)2618 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
2619 {
2620 struct bpf_map **inner_map = htab_map_lookup_elem(map, key);
2621
2622 if (!inner_map)
2623 return NULL;
2624
2625 return READ_ONCE(*inner_map);
2626 }
2627
htab_of_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)2628 static int htab_of_map_gen_lookup(struct bpf_map *map,
2629 struct bpf_insn *insn_buf)
2630 {
2631 struct bpf_insn *insn = insn_buf;
2632 const int ret = BPF_REG_0;
2633
2634 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
2635 (void *(*)(struct bpf_map *map, void *key))NULL));
2636 *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
2637 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
2638 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
2639 offsetof(struct htab_elem, key) +
2640 round_up(map->key_size, 8));
2641 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
2642
2643 return insn - insn_buf;
2644 }
2645
htab_of_map_free(struct bpf_map * map)2646 static void htab_of_map_free(struct bpf_map *map)
2647 {
2648 bpf_map_meta_free(map->inner_map_meta);
2649 fd_htab_map_free(map);
2650 }
2651
2652 const struct bpf_map_ops htab_of_maps_map_ops = {
2653 .map_alloc_check = fd_htab_map_alloc_check,
2654 .map_alloc = htab_of_map_alloc,
2655 .map_free = htab_of_map_free,
2656 .map_get_next_key = htab_map_get_next_key,
2657 .map_lookup_elem = htab_of_map_lookup_elem,
2658 .map_delete_elem = htab_map_delete_elem,
2659 .map_fd_get_ptr = bpf_map_fd_get_ptr,
2660 .map_fd_put_ptr = bpf_map_fd_put_ptr,
2661 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
2662 .map_gen_lookup = htab_of_map_gen_lookup,
2663 .map_check_btf = map_check_no_btf,
2664 .map_mem_usage = htab_map_mem_usage,
2665 BATCH_OPS(htab),
2666 .map_btf_id = &htab_map_btf_ids[0],
2667 };
2668