Lines Matching defs:cpu
16 * Front-end kmalloc() with per-cpu per-bucket cache of free elements.
27 * It's safe to allocate from cache of the current cpu with irqs disabled.
28 * Free-ing is always done into bucket of the current cpu as well.
31 * of freeing objects allocated by one cpu and freed on another.
82 /* per-cpu list of free objects of size 'unit_size'.
90 * are sequenced by per-cpu 'active' counter. But unit_free() cannot
172 /* In RT irq_work runs in per-cpu kthread, so disable
174 * reduce the chance of bpf prog executing on this cpu
242 * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc
298 /* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work.
435 /* irq_work runs on this cpu and kmalloc will allocate
451 * the freelist cache will be elem_size * 64 (or less) on each cpu.
456 * on each cpu will be:
460 * consume ~ 11 Kbyte per cpu.
488 static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
499 alloc_bulk(c, cnt, cpu_to_node(cpu), false);
502 /* When size != 0 bpf_mem_cache for each cpu.
505 * When size == 0 allocate 11 bpf_mem_cache-s for each cpu, then rely on
514 int cpu, i, unit_size, percpu_size = 0;
519 /* room for llist_node and per-cpu pointer */
539 for_each_possible_cpu(cpu) {
540 c = per_cpu_ptr(pc, cpu);
546 prefill_mem_cache(c, cpu);
559 for_each_possible_cpu(cpu) {
560 cc = per_cpu_ptr(pcc, cpu);
569 prefill_mem_cache(c, cpu);
594 int cpu, i, unit_size, percpu_size;
602 /* room for llist_node and per-cpu pointer */
609 for_each_possible_cpu(cpu) {
610 cc = per_cpu_ptr(pcc, cpu);
621 prefill_mem_cache(c, cpu);
662 int cpu, i;
665 for_each_possible_cpu(cpu) {
666 c = per_cpu_ptr(ma->cache, cpu);
671 for_each_possible_cpu(cpu) {
672 cc = per_cpu_ptr(ma->caches, cpu);
746 int cpu, i, rcu_in_progress;
750 for_each_possible_cpu(cpu) {
751 c = per_cpu_ptr(ma->cache, cpu);
763 for_each_possible_cpu(cpu) {
764 cc = per_cpu_ptr(ma->caches, cpu);
795 * Use per-cpu 'active' counter to order free_list access between
821 /* Though 'ptr' object could have been allocated on a different cpu
822 * add it to the free_llist of the current cpu.
846 * a per-cpu list we have to use atomic llist_add here, since
855 /* free few objects from current cpu into global kmalloc pool */