Lines Matching full:rcpu
139 static u32 cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu, in cpu_map_bpf_prog_run_skb() argument
150 act = bpf_prog_run_generic_xdp(skb, &xdp, rcpu->prog); in cpu_map_bpf_prog_run_skb()
157 rcpu->prog); in cpu_map_bpf_prog_run_skb()
166 bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act); in cpu_map_bpf_prog_run_skb()
169 trace_xdp_exception(skb->dev, rcpu->prog, act); in cpu_map_bpf_prog_run_skb()
183 static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, in cpu_map_bpf_prog_run_xdp() argument
204 act = bpf_prog_run_xdp(rcpu->prog, &xdp); in cpu_map_bpf_prog_run_xdp()
217 rcpu->prog); in cpu_map_bpf_prog_run_xdp()
226 bpf_warn_invalid_xdp_action(xdpf->dev_rx, rcpu->prog, act); in cpu_map_bpf_prog_run_xdp()
229 trace_xdp_exception(xdpf->dev_rx, rcpu->prog, act); in cpu_map_bpf_prog_run_xdp()
250 static void cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames, in cpu_map_bpf_prog_run() argument
256 if (!rcpu->prog) in cpu_map_bpf_prog_run()
263 ret->xdp_n = cpu_map_bpf_prog_run_xdp(rcpu, frames, ret->xdp_n, stats); in cpu_map_bpf_prog_run()
265 ret->skb_n = cpu_map_bpf_prog_run_skb(rcpu, skbs, ret->skb_n, in cpu_map_bpf_prog_run()
280 static void cpu_map_gro_flush(struct bpf_cpu_map_entry *rcpu, bool empty) in cpu_map_gro_flush() argument
290 gro_flush_normal(&rcpu->gro, !empty && HZ >= 1000); in cpu_map_gro_flush()
295 struct bpf_cpu_map_entry *rcpu = data; in cpu_map_kthread_run() local
299 complete(&rcpu->kthread_running); in cpu_map_kthread_run()
302 /* When kthread gives stop order, then rcpu have been disconnected in cpu_map_kthread_run()
307 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
317 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
320 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
337 n = __ptr_ring_consume_batched(rcpu->queue, frames, in cpu_map_kthread_run()
364 cpu_map_bpf_prog_run(rcpu, frames, skbs, &ret, &stats); in cpu_map_kthread_run()
392 trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops, in cpu_map_kthread_run()
396 gro_receive_skb(&rcpu->gro, skbs[i]); in cpu_map_kthread_run()
400 empty = __ptr_ring_empty(rcpu->queue); in cpu_map_kthread_run()
402 cpu_map_gro_flush(rcpu, empty); in cpu_map_kthread_run()
413 static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, in __cpu_map_load_bpf_program() argument
428 rcpu->value.bpf_prog.id = prog->aux->id; in __cpu_map_load_bpf_program()
429 rcpu->prog = prog; in __cpu_map_load_bpf_program()
440 struct bpf_cpu_map_entry *rcpu; in __cpu_map_entry_alloc() local
446 rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa); in __cpu_map_entry_alloc()
447 if (!rcpu) in __cpu_map_entry_alloc()
451 rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq), in __cpu_map_entry_alloc()
453 if (!rcpu->bulkq) in __cpu_map_entry_alloc()
457 bq = per_cpu_ptr(rcpu->bulkq, i); in __cpu_map_entry_alloc()
458 bq->obj = rcpu; in __cpu_map_entry_alloc()
463 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp, in __cpu_map_entry_alloc()
465 if (!rcpu->queue) in __cpu_map_entry_alloc()
468 err = ptr_ring_init(rcpu->queue, value->qsize, gfp); in __cpu_map_entry_alloc()
472 rcpu->cpu = cpu; in __cpu_map_entry_alloc()
473 rcpu->map_id = map->id; in __cpu_map_entry_alloc()
474 rcpu->value.qsize = value->qsize; in __cpu_map_entry_alloc()
475 gro_init(&rcpu->gro); in __cpu_map_entry_alloc()
478 err = __cpu_map_load_bpf_program(rcpu, map, fd); in __cpu_map_entry_alloc()
484 init_completion(&rcpu->kthread_running); in __cpu_map_entry_alloc()
485 rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, in __cpu_map_entry_alloc()
488 if (IS_ERR(rcpu->kthread)) { in __cpu_map_entry_alloc()
489 err = PTR_ERR(rcpu->kthread); in __cpu_map_entry_alloc()
494 kthread_bind(rcpu->kthread, cpu); in __cpu_map_entry_alloc()
495 wake_up_process(rcpu->kthread); in __cpu_map_entry_alloc()
501 wait_for_completion(&rcpu->kthread_running); in __cpu_map_entry_alloc()
503 return rcpu; in __cpu_map_entry_alloc()
506 if (rcpu->prog) in __cpu_map_entry_alloc()
507 bpf_prog_put(rcpu->prog); in __cpu_map_entry_alloc()
509 gro_cleanup(&rcpu->gro); in __cpu_map_entry_alloc()
510 ptr_ring_cleanup(rcpu->queue, NULL); in __cpu_map_entry_alloc()
512 kfree(rcpu->queue); in __cpu_map_entry_alloc()
514 free_percpu(rcpu->bulkq); in __cpu_map_entry_alloc()
516 kfree(rcpu); in __cpu_map_entry_alloc()
522 struct bpf_cpu_map_entry *rcpu; in __cpu_map_entry_free() local
529 rcpu = container_of(to_rcu_work(work), struct bpf_cpu_map_entry, free_work); in __cpu_map_entry_free()
535 kthread_stop(rcpu->kthread); in __cpu_map_entry_free()
537 if (rcpu->prog) in __cpu_map_entry_free()
538 bpf_prog_put(rcpu->prog); in __cpu_map_entry_free()
539 gro_cleanup(&rcpu->gro); in __cpu_map_entry_free()
541 __cpu_map_ring_cleanup(rcpu->queue); in __cpu_map_entry_free()
542 ptr_ring_cleanup(rcpu->queue, NULL); in __cpu_map_entry_free()
543 kfree(rcpu->queue); in __cpu_map_entry_free()
544 free_percpu(rcpu->bulkq); in __cpu_map_entry_free()
545 kfree(rcpu); in __cpu_map_entry_free()
557 u32 key_cpu, struct bpf_cpu_map_entry *rcpu) in __cpu_map_entry_replace() argument
561 old_rcpu = unrcu_pointer(xchg(&cmap->cpu_map[key_cpu], RCU_INITIALIZER(rcpu))); in __cpu_map_entry_replace()
586 struct bpf_cpu_map_entry *rcpu; in cpu_map_update_elem() local
606 rcpu = NULL; /* Same as deleting */ in cpu_map_update_elem()
609 rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu); in cpu_map_update_elem()
610 if (IS_ERR(rcpu)) in cpu_map_update_elem()
611 return PTR_ERR(rcpu); in cpu_map_update_elem()
614 __cpu_map_entry_replace(cmap, key_cpu, rcpu); in cpu_map_update_elem()
638 struct bpf_cpu_map_entry *rcpu; in cpu_map_free() local
640 rcpu = rcu_dereference_raw(cmap->cpu_map[i]); in cpu_map_free()
641 if (!rcpu) in cpu_map_free()
645 __cpu_map_entry_free(&rcpu->free_work.work); in cpu_map_free()
658 struct bpf_cpu_map_entry *rcpu; in __cpu_map_lookup_elem() local
663 rcpu = rcu_dereference_check(cmap->cpu_map[key], in __cpu_map_lookup_elem()
665 return rcpu; in __cpu_map_lookup_elem()
670 struct bpf_cpu_map_entry *rcpu = in cpu_map_lookup_elem() local
673 return rcpu ? &rcpu->value : NULL; in cpu_map_lookup_elem()
725 struct bpf_cpu_map_entry *rcpu = bq->obj; in bq_flush_to_queue() local
727 const int to_cpu = rcpu->cpu; in bq_flush_to_queue()
736 q = rcpu->queue; in bq_flush_to_queue()
756 trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu); in bq_flush_to_queue()
763 static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) in bq_enqueue() argument
767 local_lock_nested_bh(&rcpu->bulkq->bq_lock); in bq_enqueue()
768 bq = this_cpu_ptr(rcpu->bulkq); in bq_enqueue()
790 local_unlock_nested_bh(&rcpu->bulkq->bq_lock); in bq_enqueue()
793 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf, in cpu_map_enqueue() argument
799 bq_enqueue(rcpu, xdpf); in cpu_map_enqueue()
803 int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, in cpu_map_generic_redirect() argument
812 ret = ptr_ring_produce(rcpu->queue, skb); in cpu_map_generic_redirect()
816 wake_up_process(rcpu->kthread); in cpu_map_generic_redirect()
818 trace_xdp_cpumap_enqueue(rcpu->map_id, !ret, !!ret, rcpu->cpu); in cpu_map_generic_redirect()