Lines Matching refs:c
142 static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags) in __alloc() argument
144 if (c->percpu_size) { in __alloc()
145 void __percpu **obj = kmalloc_node(c->percpu_size, flags, node); in __alloc()
146 void __percpu *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags); in __alloc()
157 return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node); in __alloc()
160 static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c) in get_memcg() argument
163 if (c->objcg) in get_memcg()
164 return get_mem_cgroup_from_objcg(c->objcg); in get_memcg()
171 static void inc_active(struct bpf_mem_cache *c, unsigned long *flags) in inc_active() argument
186 WARN_ON_ONCE(local_inc_return(&c->active) != 1); in inc_active()
189 static void dec_active(struct bpf_mem_cache *c, unsigned long *flags) in dec_active() argument
191 local_dec(&c->active); in dec_active()
196 static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj) in add_obj_to_free_list() argument
200 inc_active(c, &flags); in add_obj_to_free_list()
201 __llist_add(obj, &c->free_llist); in add_obj_to_free_list()
202 c->free_cnt++; in add_obj_to_free_list()
203 dec_active(c, &flags); in add_obj_to_free_list()
207 static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic) in alloc_bulk() argument
223 obj = llist_del_first(&c->free_by_rcu_ttrace); in alloc_bulk()
226 add_obj_to_free_list(c, obj); in alloc_bulk()
232 obj = llist_del_first(&c->waiting_for_gp_ttrace); in alloc_bulk()
235 add_obj_to_free_list(c, obj); in alloc_bulk()
240 memcg = get_memcg(c); in alloc_bulk()
248 obj = __alloc(c, node, gfp); in alloc_bulk()
251 add_obj_to_free_list(c, obj); in alloc_bulk()
265 static int free_all(struct bpf_mem_cache *c, struct llist_node *llnode, bool percpu) in free_all() argument
271 if (c->dtor) in free_all()
272 c->dtor((void *)pos + LLIST_NODE_SZ, c->dtor_ctx); in free_all()
281 struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu_ttrace); in __free_rcu() local
283 free_all(c, llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size); in __free_rcu()
284 atomic_set(&c->call_rcu_ttrace_in_progress, 0); in __free_rcu()
298 static void enque_to_free(struct bpf_mem_cache *c, void *obj) in enque_to_free() argument
305 llist_add(llnode, &c->free_by_rcu_ttrace); in enque_to_free()
308 static void do_call_rcu_ttrace(struct bpf_mem_cache *c) in do_call_rcu_ttrace() argument
312 if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1)) { in do_call_rcu_ttrace()
313 if (unlikely(READ_ONCE(c->draining))) { in do_call_rcu_ttrace()
314 llnode = llist_del_all(&c->free_by_rcu_ttrace); in do_call_rcu_ttrace()
315 free_all(c, llnode, !!c->percpu_size); in do_call_rcu_ttrace()
320 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace)); in do_call_rcu_ttrace()
321 llist_for_each_safe(llnode, t, llist_del_all(&c->free_by_rcu_ttrace)) in do_call_rcu_ttrace()
322 llist_add(llnode, &c->waiting_for_gp_ttrace); in do_call_rcu_ttrace()
324 if (unlikely(READ_ONCE(c->draining))) { in do_call_rcu_ttrace()
325 __free_rcu(&c->rcu_ttrace); in do_call_rcu_ttrace()
334 call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu_tasks_trace); in do_call_rcu_ttrace()
337 static void free_bulk(struct bpf_mem_cache *c) in free_bulk() argument
339 struct bpf_mem_cache *tgt = c->tgt; in free_bulk()
344 WARN_ON_ONCE(tgt->unit_size != c->unit_size); in free_bulk()
345 WARN_ON_ONCE(tgt->percpu_size != c->percpu_size); in free_bulk()
348 inc_active(c, &flags); in free_bulk()
349 llnode = __llist_del_first(&c->free_llist); in free_bulk()
351 cnt = --c->free_cnt; in free_bulk()
354 dec_active(c, &flags); in free_bulk()
357 } while (cnt > (c->high_watermark + c->low_watermark) / 2); in free_bulk()
360 llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra)) in free_bulk()
367 struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu); in __free_by_rcu() local
368 struct bpf_mem_cache *tgt = c->tgt; in __free_by_rcu()
371 WARN_ON_ONCE(tgt->unit_size != c->unit_size); in __free_by_rcu()
372 WARN_ON_ONCE(tgt->percpu_size != c->percpu_size); in __free_by_rcu()
374 llnode = llist_del_all(&c->waiting_for_gp); in __free_by_rcu()
378 llist_add_batch(llnode, c->waiting_for_gp_tail, &tgt->free_by_rcu_ttrace); in __free_by_rcu()
383 atomic_set(&c->call_rcu_in_progress, 0); in __free_by_rcu()
386 static void check_free_by_rcu(struct bpf_mem_cache *c) in check_free_by_rcu() argument
392 if (unlikely(!llist_empty(&c->free_llist_extra_rcu))) { in check_free_by_rcu()
393 inc_active(c, &flags); in check_free_by_rcu()
394 llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra_rcu)) in check_free_by_rcu()
395 if (__llist_add(llnode, &c->free_by_rcu)) in check_free_by_rcu()
396 c->free_by_rcu_tail = llnode; in check_free_by_rcu()
397 dec_active(c, &flags); in check_free_by_rcu()
400 if (llist_empty(&c->free_by_rcu)) in check_free_by_rcu()
403 if (atomic_xchg(&c->call_rcu_in_progress, 1)) { in check_free_by_rcu()
416 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp)); in check_free_by_rcu()
418 inc_active(c, &flags); in check_free_by_rcu()
419 WRITE_ONCE(c->waiting_for_gp.first, __llist_del_all(&c->free_by_rcu)); in check_free_by_rcu()
420 c->waiting_for_gp_tail = c->free_by_rcu_tail; in check_free_by_rcu()
421 dec_active(c, &flags); in check_free_by_rcu()
423 if (unlikely(READ_ONCE(c->draining))) { in check_free_by_rcu()
424 free_all(c, llist_del_all(&c->waiting_for_gp), !!c->percpu_size); in check_free_by_rcu()
425 atomic_set(&c->call_rcu_in_progress, 0); in check_free_by_rcu()
427 call_rcu_hurry(&c->rcu, __free_by_rcu); in check_free_by_rcu()
433 struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work); in bpf_mem_refill() local
437 cnt = c->free_cnt; in bpf_mem_refill()
438 if (cnt < c->low_watermark) in bpf_mem_refill()
442 alloc_bulk(c, c->batch, NUMA_NO_NODE, true); in bpf_mem_refill()
443 else if (cnt > c->high_watermark) in bpf_mem_refill()
444 free_bulk(c); in bpf_mem_refill()
446 check_free_by_rcu(c); in bpf_mem_refill()
449 static void notrace irq_work_raise(struct bpf_mem_cache *c) in irq_work_raise() argument
451 irq_work_queue(&c->refill_work); in irq_work_raise()
471 static void init_refill_work(struct bpf_mem_cache *c) in init_refill_work() argument
473 init_irq_work(&c->refill_work, bpf_mem_refill); in init_refill_work()
474 if (c->percpu_size) { in init_refill_work()
475 c->low_watermark = 1; in init_refill_work()
476 c->high_watermark = 3; in init_refill_work()
477 } else if (c->unit_size <= 256) { in init_refill_work()
478 c->low_watermark = 32; in init_refill_work()
479 c->high_watermark = 96; in init_refill_work()
486 c->low_watermark = max(32 * 256 / c->unit_size, 1); in init_refill_work()
487 c->high_watermark = max(96 * 256 / c->unit_size, 3); in init_refill_work()
489 c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1); in init_refill_work()
492 static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu) in prefill_mem_cache() argument
501 if (!c->percpu_size && c->unit_size <= 256) in prefill_mem_cache()
503 alloc_bulk(c, cnt, cpu_to_node(cpu), false); in prefill_mem_cache()
516 struct bpf_mem_cache *c; struct bpf_mem_cache __percpu *pc; in bpf_mem_alloc_init() local
544 c = per_cpu_ptr(pc, cpu); in bpf_mem_alloc_init()
545 c->unit_size = unit_size; in bpf_mem_alloc_init()
546 c->objcg = objcg; in bpf_mem_alloc_init()
547 c->percpu_size = percpu_size; in bpf_mem_alloc_init()
548 c->tgt = c; in bpf_mem_alloc_init()
549 init_refill_work(c); in bpf_mem_alloc_init()
550 prefill_mem_cache(c, cpu); in bpf_mem_alloc_init()
566 c = &cc->cache[i]; in bpf_mem_alloc_init()
567 c->unit_size = sizes[i]; in bpf_mem_alloc_init()
568 c->objcg = objcg; in bpf_mem_alloc_init()
569 c->percpu_size = percpu_size; in bpf_mem_alloc_init()
570 c->tgt = c; in bpf_mem_alloc_init()
572 init_refill_work(c); in bpf_mem_alloc_init()
573 prefill_mem_cache(c, cpu); in bpf_mem_alloc_init()
600 struct bpf_mem_cache *c; in bpf_mem_alloc_percpu_unit_init() local
615 c = &cc->cache[i]; in bpf_mem_alloc_percpu_unit_init()
616 if (c->unit_size) in bpf_mem_alloc_percpu_unit_init()
619 c->unit_size = unit_size; in bpf_mem_alloc_percpu_unit_init()
620 c->objcg = objcg; in bpf_mem_alloc_percpu_unit_init()
621 c->percpu_size = percpu_size; in bpf_mem_alloc_percpu_unit_init()
622 c->tgt = c; in bpf_mem_alloc_percpu_unit_init()
624 init_refill_work(c); in bpf_mem_alloc_percpu_unit_init()
625 prefill_mem_cache(c, cpu); in bpf_mem_alloc_percpu_unit_init()
631 static void drain_mem_cache(struct bpf_mem_cache *c) in drain_mem_cache() argument
633 bool percpu = !!c->percpu_size; in drain_mem_cache()
642 free_all(c, llist_del_all(&c->free_by_rcu_ttrace), percpu); in drain_mem_cache()
643 free_all(c, llist_del_all(&c->waiting_for_gp_ttrace), percpu); in drain_mem_cache()
644 free_all(c, __llist_del_all(&c->free_llist), percpu); in drain_mem_cache()
645 free_all(c, __llist_del_all(&c->free_llist_extra), percpu); in drain_mem_cache()
646 free_all(c, __llist_del_all(&c->free_by_rcu), percpu); in drain_mem_cache()
647 free_all(c, __llist_del_all(&c->free_llist_extra_rcu), percpu); in drain_mem_cache()
648 free_all(c, llist_del_all(&c->waiting_for_gp), percpu); in drain_mem_cache()
651 static void check_mem_cache(struct bpf_mem_cache *c) in check_mem_cache() argument
653 WARN_ON_ONCE(!llist_empty(&c->free_by_rcu_ttrace)); in check_mem_cache()
654 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace)); in check_mem_cache()
655 WARN_ON_ONCE(!llist_empty(&c->free_llist)); in check_mem_cache()
656 WARN_ON_ONCE(!llist_empty(&c->free_llist_extra)); in check_mem_cache()
657 WARN_ON_ONCE(!llist_empty(&c->free_by_rcu)); in check_mem_cache()
658 WARN_ON_ONCE(!llist_empty(&c->free_llist_extra_rcu)); in check_mem_cache()
659 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp)); in check_mem_cache()
665 struct bpf_mem_cache *c; in check_leaked_objs() local
670 c = per_cpu_ptr(ma->cache, cpu); in check_leaked_objs()
671 check_mem_cache(c); in check_leaked_objs()
678 c = &cc->cache[i]; in check_leaked_objs()
679 check_mem_cache(c); in check_leaked_objs()
752 struct bpf_mem_cache *c; in bpf_mem_alloc_destroy() local
758 c = per_cpu_ptr(ma->cache, cpu); in bpf_mem_alloc_destroy()
759 WRITE_ONCE(c->draining, true); in bpf_mem_alloc_destroy()
760 irq_work_sync(&c->refill_work); in bpf_mem_alloc_destroy()
761 drain_mem_cache(c); in bpf_mem_alloc_destroy()
762 rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress); in bpf_mem_alloc_destroy()
763 rcu_in_progress += atomic_read(&c->call_rcu_in_progress); in bpf_mem_alloc_destroy()
773 c = &cc->cache[i]; in bpf_mem_alloc_destroy()
774 WRITE_ONCE(c->draining, true); in bpf_mem_alloc_destroy()
775 irq_work_sync(&c->refill_work); in bpf_mem_alloc_destroy()
776 drain_mem_cache(c); in bpf_mem_alloc_destroy()
777 rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress); in bpf_mem_alloc_destroy()
778 rcu_in_progress += atomic_read(&c->call_rcu_in_progress); in bpf_mem_alloc_destroy()
789 static void notrace *unit_alloc(struct bpf_mem_cache *c) in unit_alloc() argument
806 if (local_inc_return(&c->active) == 1) { in unit_alloc()
807 llnode = __llist_del_first(&c->free_llist); in unit_alloc()
809 cnt = --c->free_cnt; in unit_alloc()
810 *(struct bpf_mem_cache **)llnode = c; in unit_alloc()
813 local_dec(&c->active); in unit_alloc()
817 if (cnt < c->low_watermark) in unit_alloc()
818 irq_work_raise(c); in unit_alloc()
832 static void notrace unit_free(struct bpf_mem_cache *c, void *ptr) in unit_free() argument
844 c->tgt = *(struct bpf_mem_cache **)llnode; in unit_free()
847 if (local_inc_return(&c->active) == 1) { in unit_free()
848 __llist_add(llnode, &c->free_llist); in unit_free()
849 cnt = ++c->free_cnt; in unit_free()
857 llist_add(llnode, &c->free_llist_extra); in unit_free()
859 local_dec(&c->active); in unit_free()
861 if (cnt > c->high_watermark) in unit_free()
863 irq_work_raise(c); in unit_free()
872 static void notrace unit_free_rcu(struct bpf_mem_cache *c, void *ptr) in unit_free_rcu() argument
877 c->tgt = *(struct bpf_mem_cache **)llnode; in unit_free_rcu()
880 if (local_inc_return(&c->active) == 1) { in unit_free_rcu()
881 if (__llist_add(llnode, &c->free_by_rcu)) in unit_free_rcu()
882 c->free_by_rcu_tail = llnode; in unit_free_rcu()
884 llist_add(llnode, &c->free_llist_extra_rcu); in unit_free_rcu()
886 local_dec(&c->active); in unit_free_rcu()
888 if (!atomic_read(&c->call_rcu_in_progress)) in unit_free_rcu()
889 irq_work_raise(c); in unit_free_rcu()
916 struct bpf_mem_cache *c; in bpf_mem_free() local
922 c = *(void **)(ptr - LLIST_NODE_SZ); in bpf_mem_free()
923 idx = bpf_mem_cache_idx(c->unit_size); in bpf_mem_free()
932 struct bpf_mem_cache *c; in bpf_mem_free_rcu() local
938 c = *(void **)(ptr - LLIST_NODE_SZ); in bpf_mem_free_rcu()
939 idx = bpf_mem_cache_idx(c->unit_size); in bpf_mem_free_rcu()
994 struct bpf_mem_cache *c; in bpf_mem_cache_alloc_flags() local
997 c = this_cpu_ptr(ma->cache); in bpf_mem_cache_alloc_flags()
999 ret = unit_alloc(c); in bpf_mem_cache_alloc_flags()
1003 memcg = get_memcg(c); in bpf_mem_cache_alloc_flags()
1005 ret = __alloc(c, NUMA_NO_NODE, GFP_KERNEL | __GFP_NOWARN | __GFP_ACCOUNT); in bpf_mem_cache_alloc_flags()
1007 *(struct bpf_mem_cache **)ret = c; in bpf_mem_cache_alloc_flags()
1029 struct bpf_mem_cache *c; in bpf_mem_alloc_set_dtor() local
1037 c = per_cpu_ptr(ma->cache, cpu); in bpf_mem_alloc_set_dtor()
1038 c->dtor = dtor; in bpf_mem_alloc_set_dtor()
1039 c->dtor_ctx = ctx; in bpf_mem_alloc_set_dtor()
1046 c = &cc->cache[i]; in bpf_mem_alloc_set_dtor()
1047 c->dtor = dtor; in bpf_mem_alloc_set_dtor()
1048 c->dtor_ctx = ctx; in bpf_mem_alloc_set_dtor()