Lines Matching defs:c
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
140 static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags)
142 if (c->percpu_size) {
143 void __percpu **obj = kmalloc_node(c->percpu_size, flags, node);
144 void __percpu *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags);
155 return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node);
158 static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
161 if (c->objcg)
162 return get_mem_cgroup_from_objcg(c->objcg);
169 static void inc_active(struct bpf_mem_cache *c, unsigned long *flags)
184 WARN_ON_ONCE(local_inc_return(&c->active) != 1);
187 static void dec_active(struct bpf_mem_cache *c, unsigned long *flags)
189 local_dec(&c->active);
194 static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
198 inc_active(c, &flags);
199 __llist_add(obj, &c->free_llist);
200 c->free_cnt++;
201 dec_active(c, &flags);
205 static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic)
217 * For every 'c' llist_del_first(&c->free_by_rcu_ttrace); is
221 obj = llist_del_first(&c->free_by_rcu_ttrace);
224 add_obj_to_free_list(c, obj);
230 obj = llist_del_first(&c->waiting_for_gp_ttrace);
233 add_obj_to_free_list(c, obj);
238 memcg = get_memcg(c);
246 obj = __alloc(c, node, gfp);
249 add_obj_to_free_list(c, obj);
277 struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu_ttrace);
279 free_all(llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size);
280 atomic_set(&c->call_rcu_ttrace_in_progress, 0);
294 static void enque_to_free(struct bpf_mem_cache *c, void *obj)
301 llist_add(llnode, &c->free_by_rcu_ttrace);
304 static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
308 if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1)) {
309 if (unlikely(READ_ONCE(c->draining))) {
310 llnode = llist_del_all(&c->free_by_rcu_ttrace);
311 free_all(llnode, !!c->percpu_size);
316 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
317 llist_for_each_safe(llnode, t, llist_del_all(&c->free_by_rcu_ttrace))
318 llist_add(llnode, &c->waiting_for_gp_ttrace);
320 if (unlikely(READ_ONCE(c->draining))) {
321 __free_rcu(&c->rcu_ttrace);
330 call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu_tasks_trace);
333 static void free_bulk(struct bpf_mem_cache *c)
335 struct bpf_mem_cache *tgt = c->tgt;
340 WARN_ON_ONCE(tgt->unit_size != c->unit_size);
341 WARN_ON_ONCE(tgt->percpu_size != c->percpu_size);
344 inc_active(c, &flags);
345 llnode = __llist_del_first(&c->free_llist);
347 cnt = --c->free_cnt;
350 dec_active(c, &flags);
353 } while (cnt > (c->high_watermark + c->low_watermark) / 2);
356 llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra))
363 struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu);
364 struct bpf_mem_cache *tgt = c->tgt;
367 WARN_ON_ONCE(tgt->unit_size != c->unit_size);
368 WARN_ON_ONCE(tgt->percpu_size != c->percpu_size);
370 llnode = llist_del_all(&c->waiting_for_gp);
374 llist_add_batch(llnode, c->waiting_for_gp_tail, &tgt->free_by_rcu_ttrace);
379 atomic_set(&c->call_rcu_in_progress, 0);
382 static void check_free_by_rcu(struct bpf_mem_cache *c)
388 if (unlikely(!llist_empty(&c->free_llist_extra_rcu))) {
389 inc_active(c, &flags);
390 llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra_rcu))
391 if (__llist_add(llnode, &c->free_by_rcu))
392 c->free_by_rcu_tail = llnode;
393 dec_active(c, &flags);
396 if (llist_empty(&c->free_by_rcu))
399 if (atomic_xchg(&c->call_rcu_in_progress, 1)) {
412 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp));
414 inc_active(c, &flags);
415 WRITE_ONCE(c->waiting_for_gp.first, __llist_del_all(&c->free_by_rcu));
416 c->waiting_for_gp_tail = c->free_by_rcu_tail;
417 dec_active(c, &flags);
419 if (unlikely(READ_ONCE(c->draining))) {
420 free_all(llist_del_all(&c->waiting_for_gp), !!c->percpu_size);
421 atomic_set(&c->call_rcu_in_progress, 0);
423 call_rcu_hurry(&c->rcu, __free_by_rcu);
429 struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work);
433 cnt = c->free_cnt;
434 if (cnt < c->low_watermark)
438 alloc_bulk(c, c->batch, NUMA_NO_NODE, true);
439 else if (cnt > c->high_watermark)
440 free_bulk(c);
442 check_free_by_rcu(c);
445 static void notrace irq_work_raise(struct bpf_mem_cache *c)
447 irq_work_queue(&c->refill_work);
465 * memory consumption, set low_mark = 1 and high_mark = 3, resulting in c->batch = 1.
467 static void init_refill_work(struct bpf_mem_cache *c)
469 init_irq_work(&c->refill_work, bpf_mem_refill);
470 if (c->percpu_size) {
471 c->low_watermark = 1;
472 c->high_watermark = 3;
473 } else if (c->unit_size <= 256) {
474 c->low_watermark = 32;
475 c->high_watermark = 96;
482 c->low_watermark = max(32 * 256 / c->unit_size, 1);
483 c->high_watermark = max(96 * 256 / c->unit_size, 3);
485 c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1);
488 static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
497 if (!c->percpu_size && c->unit_size <= 256)
499 alloc_bulk(c, cnt, cpu_to_node(cpu), false);
512 struct bpf_mem_cache *c; struct bpf_mem_cache __percpu *pc;
540 c = per_cpu_ptr(pc, cpu);
541 c->unit_size = unit_size;
542 c->objcg = objcg;
543 c->percpu_size = percpu_size;
544 c->tgt = c;
545 init_refill_work(c);
546 prefill_mem_cache(c, cpu);
562 c = &cc->cache[i];
563 c->unit_size = sizes[i];
564 c->objcg = objcg;
565 c->percpu_size = percpu_size;
566 c->tgt = c;
568 init_refill_work(c);
569 prefill_mem_cache(c, cpu);
596 struct bpf_mem_cache *c;
611 c = &cc->cache[i];
612 if (c->unit_size)
615 c->unit_size = unit_size;
616 c->objcg = objcg;
617 c->percpu_size = percpu_size;
618 c->tgt = c;
620 init_refill_work(c);
621 prefill_mem_cache(c, cpu);
627 static void drain_mem_cache(struct bpf_mem_cache *c)
629 bool percpu = !!c->percpu_size;
638 free_all(llist_del_all(&c->free_by_rcu_ttrace), percpu);
639 free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu);
640 free_all(__llist_del_all(&c->free_llist), percpu);
641 free_all(__llist_del_all(&c->free_llist_extra), percpu);
642 free_all(__llist_del_all(&c->free_by_rcu), percpu);
643 free_all(__llist_del_all(&c->free_llist_extra_rcu), percpu);
644 free_all(llist_del_all(&c->waiting_for_gp), percpu);
647 static void check_mem_cache(struct bpf_mem_cache *c)
649 WARN_ON_ONCE(!llist_empty(&c->free_by_rcu_ttrace));
650 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
651 WARN_ON_ONCE(!llist_empty(&c->free_llist));
652 WARN_ON_ONCE(!llist_empty(&c->free_llist_extra));
653 WARN_ON_ONCE(!llist_empty(&c->free_by_rcu));
654 WARN_ON_ONCE(!llist_empty(&c->free_llist_extra_rcu));
655 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp));
661 struct bpf_mem_cache *c;
666 c = per_cpu_ptr(ma->cache, cpu);
667 check_mem_cache(c);
674 c = &cc->cache[i];
675 check_mem_cache(c);
745 struct bpf_mem_cache *c;
751 c = per_cpu_ptr(ma->cache, cpu);
752 WRITE_ONCE(c->draining, true);
753 irq_work_sync(&c->refill_work);
754 drain_mem_cache(c);
755 rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress);
756 rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
766 c = &cc->cache[i];
767 WRITE_ONCE(c->draining, true);
768 irq_work_sync(&c->refill_work);
769 drain_mem_cache(c);
770 rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress);
771 rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
782 static void notrace *unit_alloc(struct bpf_mem_cache *c)
799 if (local_inc_return(&c->active) == 1) {
800 llnode = __llist_del_first(&c->free_llist);
802 cnt = --c->free_cnt;
803 *(struct bpf_mem_cache **)llnode = c;
806 local_dec(&c->active);
810 if (cnt < c->low_watermark)
811 irq_work_raise(c);
825 static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
837 c->tgt = *(struct bpf_mem_cache **)llnode;
840 if (local_inc_return(&c->active) == 1) {
841 __llist_add(llnode, &c->free_llist);
842 cnt = ++c->free_cnt;
850 llist_add(llnode, &c->free_llist_extra);
852 local_dec(&c->active);
854 if (cnt > c->high_watermark)
856 irq_work_raise(c);
865 static void notrace unit_free_rcu(struct bpf_mem_cache *c, void *ptr)
870 c->tgt = *(struct bpf_mem_cache **)llnode;
873 if (local_inc_return(&c->active) == 1) {
874 if (__llist_add(llnode, &c->free_by_rcu))
875 c->free_by_rcu_tail = llnode;
877 llist_add(llnode, &c->free_llist_extra_rcu);
879 local_dec(&c->active);
881 if (!atomic_read(&c->call_rcu_in_progress))
882 irq_work_raise(c);
909 struct bpf_mem_cache *c;
915 c = *(void **)(ptr - LLIST_NODE_SZ);
916 idx = bpf_mem_cache_idx(c->unit_size);
925 struct bpf_mem_cache *c;
931 c = *(void **)(ptr - LLIST_NODE_SZ);
932 idx = bpf_mem_cache_idx(c->unit_size);
987 struct bpf_mem_cache *c;
990 c = this_cpu_ptr(ma->cache);
992 ret = unit_alloc(c);
996 memcg = get_memcg(c);
998 ret = __alloc(c, NUMA_NO_NODE, GFP_KERNEL | __GFP_NOWARN | __GFP_ACCOUNT);
1000 *(struct bpf_mem_cache **)ret = c;