xref: /linux/kernel/bpf/memalloc.c (revision 0e685c3e7158d35626d6d76b9f859eae806d87fa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3 #include <linux/mm.h>
4 #include <linux/llist.h>
5 #include <linux/bpf.h>
6 #include <linux/irq_work.h>
7 #include <linux/bpf_mem_alloc.h>
8 #include <linux/memcontrol.h>
9 #include <asm/local.h>
10 
11 /* Any context (including NMI) BPF specific memory allocator.
12  *
13  * Tracing BPF programs can attach to kprobe and fentry. Hence they
14  * run in unknown context where calling plain kmalloc() might not be safe.
15  *
16  * Front-end kmalloc() with per-cpu per-bucket cache of free elements.
17  * Refill this cache asynchronously from irq_work.
18  *
19  * CPU_0 buckets
20  * 16 32 64 96 128 196 256 512 1024 2048 4096
21  * ...
22  * CPU_N buckets
23  * 16 32 64 96 128 196 256 512 1024 2048 4096
24  *
25  * The buckets are prefilled at the start.
26  * BPF programs always run with migration disabled.
27  * It's safe to allocate from cache of the current cpu with irqs disabled.
28  * Free-ing is always done into bucket of the current cpu as well.
29  * irq_work trims extra free elements from buckets with kfree
30  * and refills them with kmalloc, so global kmalloc logic takes care
31  * of freeing objects allocated by one cpu and freed on another.
32  *
33  * Every allocated objected is padded with extra 8 bytes that contains
34  * struct llist_node.
35  */
36 #define LLIST_NODE_SZ sizeof(struct llist_node)
37 
38 /* similar to kmalloc, but sizeof == 8 bucket is gone */
39 static u8 size_index[24] __ro_after_init = {
40 	3,	/* 8 */
41 	3,	/* 16 */
42 	4,	/* 24 */
43 	4,	/* 32 */
44 	5,	/* 40 */
45 	5,	/* 48 */
46 	5,	/* 56 */
47 	5,	/* 64 */
48 	1,	/* 72 */
49 	1,	/* 80 */
50 	1,	/* 88 */
51 	1,	/* 96 */
52 	6,	/* 104 */
53 	6,	/* 112 */
54 	6,	/* 120 */
55 	6,	/* 128 */
56 	2,	/* 136 */
57 	2,	/* 144 */
58 	2,	/* 152 */
59 	2,	/* 160 */
60 	2,	/* 168 */
61 	2,	/* 176 */
62 	2,	/* 184 */
63 	2	/* 192 */
64 };
65 
66 static int bpf_mem_cache_idx(size_t size)
67 {
68 	if (!size || size > 4096)
69 		return -1;
70 
71 	if (size <= 192)
72 		return size_index[(size - 1) / 8] - 1;
73 
74 	return fls(size - 1) - 2;
75 }
76 
77 #define NUM_CACHES 11
78 
79 struct bpf_mem_cache {
80 	/* per-cpu list of free objects of size 'unit_size'.
81 	 * All accesses are done with interrupts disabled and 'active' counter
82 	 * protection with __llist_add() and __llist_del_first().
83 	 */
84 	struct llist_head free_llist;
85 	local_t active;
86 
87 	/* Operations on the free_list from unit_alloc/unit_free/bpf_mem_refill
88 	 * are sequenced by per-cpu 'active' counter. But unit_free() cannot
89 	 * fail. When 'active' is busy the unit_free() will add an object to
90 	 * free_llist_extra.
91 	 */
92 	struct llist_head free_llist_extra;
93 
94 	struct irq_work refill_work;
95 	struct obj_cgroup *objcg;
96 	int unit_size;
97 	/* count of objects in free_llist */
98 	int free_cnt;
99 	int low_watermark, high_watermark, batch;
100 	int percpu_size;
101 
102 	struct rcu_head rcu;
103 	struct llist_head free_by_rcu;
104 	struct llist_head waiting_for_gp;
105 	atomic_t call_rcu_in_progress;
106 };
107 
108 struct bpf_mem_caches {
109 	struct bpf_mem_cache cache[NUM_CACHES];
110 };
111 
112 static struct llist_node notrace *__llist_del_first(struct llist_head *head)
113 {
114 	struct llist_node *entry, *next;
115 
116 	entry = head->first;
117 	if (!entry)
118 		return NULL;
119 	next = entry->next;
120 	head->first = next;
121 	return entry;
122 }
123 
124 static void *__alloc(struct bpf_mem_cache *c, int node)
125 {
126 	/* Allocate, but don't deplete atomic reserves that typical
127 	 * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc
128 	 * will allocate from the current numa node which is what we
129 	 * want here.
130 	 */
131 	gfp_t flags = GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT;
132 
133 	if (c->percpu_size) {
134 		void **obj = kmalloc_node(c->percpu_size, flags, node);
135 		void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags);
136 
137 		if (!obj || !pptr) {
138 			free_percpu(pptr);
139 			kfree(obj);
140 			return NULL;
141 		}
142 		obj[1] = pptr;
143 		return obj;
144 	}
145 
146 	return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node);
147 }
148 
149 static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
150 {
151 #ifdef CONFIG_MEMCG_KMEM
152 	if (c->objcg)
153 		return get_mem_cgroup_from_objcg(c->objcg);
154 #endif
155 
156 #ifdef CONFIG_MEMCG
157 	return root_mem_cgroup;
158 #else
159 	return NULL;
160 #endif
161 }
162 
163 /* Mostly runs from irq_work except __init phase. */
164 static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
165 {
166 	struct mem_cgroup *memcg = NULL, *old_memcg;
167 	unsigned long flags;
168 	void *obj;
169 	int i;
170 
171 	memcg = get_memcg(c);
172 	old_memcg = set_active_memcg(memcg);
173 	for (i = 0; i < cnt; i++) {
174 		/*
175 		 * free_by_rcu is only manipulated by irq work refill_work().
176 		 * IRQ works on the same CPU are called sequentially, so it is
177 		 * safe to use __llist_del_first() here. If alloc_bulk() is
178 		 * invoked by the initial prefill, there will be no running
179 		 * refill_work(), so __llist_del_first() is fine as well.
180 		 *
181 		 * In most cases, objects on free_by_rcu are from the same CPU.
182 		 * If some objects come from other CPUs, it doesn't incur any
183 		 * harm because NUMA_NO_NODE means the preference for current
184 		 * numa node and it is not a guarantee.
185 		 */
186 		obj = __llist_del_first(&c->free_by_rcu);
187 		if (!obj) {
188 			obj = __alloc(c, node);
189 			if (!obj)
190 				break;
191 		}
192 		if (IS_ENABLED(CONFIG_PREEMPT_RT))
193 			/* In RT irq_work runs in per-cpu kthread, so disable
194 			 * interrupts to avoid preemption and interrupts and
195 			 * reduce the chance of bpf prog executing on this cpu
196 			 * when active counter is busy.
197 			 */
198 			local_irq_save(flags);
199 		/* alloc_bulk runs from irq_work which will not preempt a bpf
200 		 * program that does unit_alloc/unit_free since IRQs are
201 		 * disabled there. There is no race to increment 'active'
202 		 * counter. It protects free_llist from corruption in case NMI
203 		 * bpf prog preempted this loop.
204 		 */
205 		WARN_ON_ONCE(local_inc_return(&c->active) != 1);
206 		__llist_add(obj, &c->free_llist);
207 		c->free_cnt++;
208 		local_dec(&c->active);
209 		if (IS_ENABLED(CONFIG_PREEMPT_RT))
210 			local_irq_restore(flags);
211 	}
212 	set_active_memcg(old_memcg);
213 	mem_cgroup_put(memcg);
214 }
215 
216 static void free_one(struct bpf_mem_cache *c, void *obj)
217 {
218 	if (c->percpu_size) {
219 		free_percpu(((void **)obj)[1]);
220 		kfree(obj);
221 		return;
222 	}
223 
224 	kfree(obj);
225 }
226 
227 static void __free_rcu(struct rcu_head *head)
228 {
229 	struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu);
230 	struct llist_node *llnode = llist_del_all(&c->waiting_for_gp);
231 	struct llist_node *pos, *t;
232 
233 	llist_for_each_safe(pos, t, llnode)
234 		free_one(c, pos);
235 	atomic_set(&c->call_rcu_in_progress, 0);
236 }
237 
238 static void __free_rcu_tasks_trace(struct rcu_head *head)
239 {
240 	/* If RCU Tasks Trace grace period implies RCU grace period,
241 	 * there is no need to invoke call_rcu().
242 	 */
243 	if (rcu_trace_implies_rcu_gp())
244 		__free_rcu(head);
245 	else
246 		call_rcu(head, __free_rcu);
247 }
248 
249 static void enque_to_free(struct bpf_mem_cache *c, void *obj)
250 {
251 	struct llist_node *llnode = obj;
252 
253 	/* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work.
254 	 * Nothing races to add to free_by_rcu list.
255 	 */
256 	__llist_add(llnode, &c->free_by_rcu);
257 }
258 
259 static void do_call_rcu(struct bpf_mem_cache *c)
260 {
261 	struct llist_node *llnode, *t;
262 
263 	if (atomic_xchg(&c->call_rcu_in_progress, 1))
264 		return;
265 
266 	WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp));
267 	llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu))
268 		/* There is no concurrent __llist_add(waiting_for_gp) access.
269 		 * It doesn't race with llist_del_all either.
270 		 * But there could be two concurrent llist_del_all(waiting_for_gp):
271 		 * from __free_rcu() and from drain_mem_cache().
272 		 */
273 		__llist_add(llnode, &c->waiting_for_gp);
274 	/* Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
275 	 * If RCU Tasks Trace grace period implies RCU grace period, free
276 	 * these elements directly, else use call_rcu() to wait for normal
277 	 * progs to finish and finally do free_one() on each element.
278 	 */
279 	call_rcu_tasks_trace(&c->rcu, __free_rcu_tasks_trace);
280 }
281 
282 static void free_bulk(struct bpf_mem_cache *c)
283 {
284 	struct llist_node *llnode, *t;
285 	unsigned long flags;
286 	int cnt;
287 
288 	do {
289 		if (IS_ENABLED(CONFIG_PREEMPT_RT))
290 			local_irq_save(flags);
291 		WARN_ON_ONCE(local_inc_return(&c->active) != 1);
292 		llnode = __llist_del_first(&c->free_llist);
293 		if (llnode)
294 			cnt = --c->free_cnt;
295 		else
296 			cnt = 0;
297 		local_dec(&c->active);
298 		if (IS_ENABLED(CONFIG_PREEMPT_RT))
299 			local_irq_restore(flags);
300 		if (llnode)
301 			enque_to_free(c, llnode);
302 	} while (cnt > (c->high_watermark + c->low_watermark) / 2);
303 
304 	/* and drain free_llist_extra */
305 	llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra))
306 		enque_to_free(c, llnode);
307 	do_call_rcu(c);
308 }
309 
310 static void bpf_mem_refill(struct irq_work *work)
311 {
312 	struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work);
313 	int cnt;
314 
315 	/* Racy access to free_cnt. It doesn't need to be 100% accurate */
316 	cnt = c->free_cnt;
317 	if (cnt < c->low_watermark)
318 		/* irq_work runs on this cpu and kmalloc will allocate
319 		 * from the current numa node which is what we want here.
320 		 */
321 		alloc_bulk(c, c->batch, NUMA_NO_NODE);
322 	else if (cnt > c->high_watermark)
323 		free_bulk(c);
324 }
325 
326 static void notrace irq_work_raise(struct bpf_mem_cache *c)
327 {
328 	irq_work_queue(&c->refill_work);
329 }
330 
331 /* For typical bpf map case that uses bpf_mem_cache_alloc and single bucket
332  * the freelist cache will be elem_size * 64 (or less) on each cpu.
333  *
334  * For bpf programs that don't have statically known allocation sizes and
335  * assuming (low_mark + high_mark) / 2 as an average number of elements per
336  * bucket and all buckets are used the total amount of memory in freelists
337  * on each cpu will be:
338  * 64*16 + 64*32 + 64*64 + 64*96 + 64*128 + 64*196 + 64*256 + 32*512 + 16*1024 + 8*2048 + 4*4096
339  * == ~ 116 Kbyte using below heuristic.
340  * Initialized, but unused bpf allocator (not bpf map specific one) will
341  * consume ~ 11 Kbyte per cpu.
342  * Typical case will be between 11K and 116K closer to 11K.
343  * bpf progs can and should share bpf_mem_cache when possible.
344  */
345 
346 static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
347 {
348 	init_irq_work(&c->refill_work, bpf_mem_refill);
349 	if (c->unit_size <= 256) {
350 		c->low_watermark = 32;
351 		c->high_watermark = 96;
352 	} else {
353 		/* When page_size == 4k, order-0 cache will have low_mark == 2
354 		 * and high_mark == 6 with batch alloc of 3 individual pages at
355 		 * a time.
356 		 * 8k allocs and above low == 1, high == 3, batch == 1.
357 		 */
358 		c->low_watermark = max(32 * 256 / c->unit_size, 1);
359 		c->high_watermark = max(96 * 256 / c->unit_size, 3);
360 	}
361 	c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1);
362 
363 	/* To avoid consuming memory assume that 1st run of bpf
364 	 * prog won't be doing more than 4 map_update_elem from
365 	 * irq disabled region
366 	 */
367 	alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu));
368 }
369 
370 /* When size != 0 bpf_mem_cache for each cpu.
371  * This is typical bpf hash map use case when all elements have equal size.
372  *
373  * When size == 0 allocate 11 bpf_mem_cache-s for each cpu, then rely on
374  * kmalloc/kfree. Max allocation size is 4096 in this case.
375  * This is bpf_dynptr and bpf_kptr use case.
376  */
377 int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
378 {
379 	static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
380 	struct bpf_mem_caches *cc, __percpu *pcc;
381 	struct bpf_mem_cache *c, __percpu *pc;
382 	struct obj_cgroup *objcg = NULL;
383 	int cpu, i, unit_size, percpu_size = 0;
384 
385 	if (size) {
386 		pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);
387 		if (!pc)
388 			return -ENOMEM;
389 
390 		if (percpu)
391 			/* room for llist_node and per-cpu pointer */
392 			percpu_size = LLIST_NODE_SZ + sizeof(void *);
393 		else
394 			size += LLIST_NODE_SZ; /* room for llist_node */
395 		unit_size = size;
396 
397 #ifdef CONFIG_MEMCG_KMEM
398 		if (memcg_bpf_enabled())
399 			objcg = get_obj_cgroup_from_current();
400 #endif
401 		for_each_possible_cpu(cpu) {
402 			c = per_cpu_ptr(pc, cpu);
403 			c->unit_size = unit_size;
404 			c->objcg = objcg;
405 			c->percpu_size = percpu_size;
406 			prefill_mem_cache(c, cpu);
407 		}
408 		ma->cache = pc;
409 		return 0;
410 	}
411 
412 	/* size == 0 && percpu is an invalid combination */
413 	if (WARN_ON_ONCE(percpu))
414 		return -EINVAL;
415 
416 	pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
417 	if (!pcc)
418 		return -ENOMEM;
419 #ifdef CONFIG_MEMCG_KMEM
420 	objcg = get_obj_cgroup_from_current();
421 #endif
422 	for_each_possible_cpu(cpu) {
423 		cc = per_cpu_ptr(pcc, cpu);
424 		for (i = 0; i < NUM_CACHES; i++) {
425 			c = &cc->cache[i];
426 			c->unit_size = sizes[i];
427 			c->objcg = objcg;
428 			prefill_mem_cache(c, cpu);
429 		}
430 	}
431 	ma->caches = pcc;
432 	return 0;
433 }
434 
435 static void drain_mem_cache(struct bpf_mem_cache *c)
436 {
437 	struct llist_node *llnode, *t;
438 
439 	/* No progs are using this bpf_mem_cache, but htab_map_free() called
440 	 * bpf_mem_cache_free() for all remaining elements and they can be in
441 	 * free_by_rcu or in waiting_for_gp lists, so drain those lists now.
442 	 *
443 	 * Except for waiting_for_gp list, there are no concurrent operations
444 	 * on these lists, so it is safe to use __llist_del_all().
445 	 */
446 	llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu))
447 		free_one(c, llnode);
448 	llist_for_each_safe(llnode, t, llist_del_all(&c->waiting_for_gp))
449 		free_one(c, llnode);
450 	llist_for_each_safe(llnode, t, __llist_del_all(&c->free_llist))
451 		free_one(c, llnode);
452 	llist_for_each_safe(llnode, t, __llist_del_all(&c->free_llist_extra))
453 		free_one(c, llnode);
454 }
455 
456 static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma)
457 {
458 	free_percpu(ma->cache);
459 	free_percpu(ma->caches);
460 	ma->cache = NULL;
461 	ma->caches = NULL;
462 }
463 
464 static void free_mem_alloc(struct bpf_mem_alloc *ma)
465 {
466 	/* waiting_for_gp lists was drained, but __free_rcu might
467 	 * still execute. Wait for it now before we freeing percpu caches.
468 	 *
469 	 * rcu_barrier_tasks_trace() doesn't imply synchronize_rcu_tasks_trace(),
470 	 * but rcu_barrier_tasks_trace() and rcu_barrier() below are only used
471 	 * to wait for the pending __free_rcu_tasks_trace() and __free_rcu(),
472 	 * so if call_rcu(head, __free_rcu) is skipped due to
473 	 * rcu_trace_implies_rcu_gp(), it will be OK to skip rcu_barrier() by
474 	 * using rcu_trace_implies_rcu_gp() as well.
475 	 */
476 	rcu_barrier_tasks_trace();
477 	if (!rcu_trace_implies_rcu_gp())
478 		rcu_barrier();
479 	free_mem_alloc_no_barrier(ma);
480 }
481 
482 static void free_mem_alloc_deferred(struct work_struct *work)
483 {
484 	struct bpf_mem_alloc *ma = container_of(work, struct bpf_mem_alloc, work);
485 
486 	free_mem_alloc(ma);
487 	kfree(ma);
488 }
489 
490 static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress)
491 {
492 	struct bpf_mem_alloc *copy;
493 
494 	if (!rcu_in_progress) {
495 		/* Fast path. No callbacks are pending, hence no need to do
496 		 * rcu_barrier-s.
497 		 */
498 		free_mem_alloc_no_barrier(ma);
499 		return;
500 	}
501 
502 	copy = kmalloc(sizeof(*ma), GFP_KERNEL);
503 	if (!copy) {
504 		/* Slow path with inline barrier-s */
505 		free_mem_alloc(ma);
506 		return;
507 	}
508 
509 	/* Defer barriers into worker to let the rest of map memory to be freed */
510 	copy->cache = ma->cache;
511 	ma->cache = NULL;
512 	copy->caches = ma->caches;
513 	ma->caches = NULL;
514 	INIT_WORK(&copy->work, free_mem_alloc_deferred);
515 	queue_work(system_unbound_wq, &copy->work);
516 }
517 
518 void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
519 {
520 	struct bpf_mem_caches *cc;
521 	struct bpf_mem_cache *c;
522 	int cpu, i, rcu_in_progress;
523 
524 	if (ma->cache) {
525 		rcu_in_progress = 0;
526 		for_each_possible_cpu(cpu) {
527 			c = per_cpu_ptr(ma->cache, cpu);
528 			/*
529 			 * refill_work may be unfinished for PREEMPT_RT kernel
530 			 * in which irq work is invoked in a per-CPU RT thread.
531 			 * It is also possible for kernel with
532 			 * arch_irq_work_has_interrupt() being false and irq
533 			 * work is invoked in timer interrupt. So waiting for
534 			 * the completion of irq work to ease the handling of
535 			 * concurrency.
536 			 */
537 			irq_work_sync(&c->refill_work);
538 			drain_mem_cache(c);
539 			rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
540 		}
541 		/* objcg is the same across cpus */
542 		if (c->objcg)
543 			obj_cgroup_put(c->objcg);
544 		destroy_mem_alloc(ma, rcu_in_progress);
545 	}
546 	if (ma->caches) {
547 		rcu_in_progress = 0;
548 		for_each_possible_cpu(cpu) {
549 			cc = per_cpu_ptr(ma->caches, cpu);
550 			for (i = 0; i < NUM_CACHES; i++) {
551 				c = &cc->cache[i];
552 				irq_work_sync(&c->refill_work);
553 				drain_mem_cache(c);
554 				rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
555 			}
556 		}
557 		if (c->objcg)
558 			obj_cgroup_put(c->objcg);
559 		destroy_mem_alloc(ma, rcu_in_progress);
560 	}
561 }
562 
563 /* notrace is necessary here and in other functions to make sure
564  * bpf programs cannot attach to them and cause llist corruptions.
565  */
566 static void notrace *unit_alloc(struct bpf_mem_cache *c)
567 {
568 	struct llist_node *llnode = NULL;
569 	unsigned long flags;
570 	int cnt = 0;
571 
572 	/* Disable irqs to prevent the following race for majority of prog types:
573 	 * prog_A
574 	 *   bpf_mem_alloc
575 	 *      preemption or irq -> prog_B
576 	 *        bpf_mem_alloc
577 	 *
578 	 * but prog_B could be a perf_event NMI prog.
579 	 * Use per-cpu 'active' counter to order free_list access between
580 	 * unit_alloc/unit_free/bpf_mem_refill.
581 	 */
582 	local_irq_save(flags);
583 	if (local_inc_return(&c->active) == 1) {
584 		llnode = __llist_del_first(&c->free_llist);
585 		if (llnode)
586 			cnt = --c->free_cnt;
587 	}
588 	local_dec(&c->active);
589 	local_irq_restore(flags);
590 
591 	WARN_ON(cnt < 0);
592 
593 	if (cnt < c->low_watermark)
594 		irq_work_raise(c);
595 	return llnode;
596 }
597 
598 /* Though 'ptr' object could have been allocated on a different cpu
599  * add it to the free_llist of the current cpu.
600  * Let kfree() logic deal with it when it's later called from irq_work.
601  */
602 static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
603 {
604 	struct llist_node *llnode = ptr - LLIST_NODE_SZ;
605 	unsigned long flags;
606 	int cnt = 0;
607 
608 	BUILD_BUG_ON(LLIST_NODE_SZ > 8);
609 
610 	local_irq_save(flags);
611 	if (local_inc_return(&c->active) == 1) {
612 		__llist_add(llnode, &c->free_llist);
613 		cnt = ++c->free_cnt;
614 	} else {
615 		/* unit_free() cannot fail. Therefore add an object to atomic
616 		 * llist. free_bulk() will drain it. Though free_llist_extra is
617 		 * a per-cpu list we have to use atomic llist_add here, since
618 		 * it also can be interrupted by bpf nmi prog that does another
619 		 * unit_free() into the same free_llist_extra.
620 		 */
621 		llist_add(llnode, &c->free_llist_extra);
622 	}
623 	local_dec(&c->active);
624 	local_irq_restore(flags);
625 
626 	if (cnt > c->high_watermark)
627 		/* free few objects from current cpu into global kmalloc pool */
628 		irq_work_raise(c);
629 }
630 
631 /* Called from BPF program or from sys_bpf syscall.
632  * In both cases migration is disabled.
633  */
634 void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size)
635 {
636 	int idx;
637 	void *ret;
638 
639 	if (!size)
640 		return ZERO_SIZE_PTR;
641 
642 	idx = bpf_mem_cache_idx(size + LLIST_NODE_SZ);
643 	if (idx < 0)
644 		return NULL;
645 
646 	ret = unit_alloc(this_cpu_ptr(ma->caches)->cache + idx);
647 	return !ret ? NULL : ret + LLIST_NODE_SZ;
648 }
649 
650 void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr)
651 {
652 	int idx;
653 
654 	if (!ptr)
655 		return;
656 
657 	idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ));
658 	if (idx < 0)
659 		return;
660 
661 	unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr);
662 }
663 
664 void notrace *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma)
665 {
666 	void *ret;
667 
668 	ret = unit_alloc(this_cpu_ptr(ma->cache));
669 	return !ret ? NULL : ret + LLIST_NODE_SZ;
670 }
671 
672 void notrace bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr)
673 {
674 	if (!ptr)
675 		return;
676 
677 	unit_free(this_cpu_ptr(ma->cache), ptr);
678 }
679