Lines Matching +full:feedback +full:- +full:loop
1 // SPDX-License-Identifier: GPL-2.0-only
14 * CPU will do SKB-allocation and call the normal network stack.
20 * basically allows for 10G wirespeed pre-filtering via bpf.
40 * will maximum be stored/queued for one driver ->poll() call. It is
46 #define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */
62 /* XDP can run multiple RX-ring queues, need __percpu enqueue store */
65 /* Queue with potential multi-producers, and single-consumer kthread */
85 u32 value_size = attr->value_size; in cpu_map_alloc()
89 if (attr->max_entries == 0 || attr->key_size != 4 || in cpu_map_alloc()
92 attr->map_flags & ~BPF_F_NUMA_NODE) in cpu_map_alloc()
93 return ERR_PTR(-EINVAL); in cpu_map_alloc()
95 /* Pre-limit array size based on NR_CPUS, not final CPU check */ in cpu_map_alloc()
96 if (attr->max_entries > NR_CPUS) in cpu_map_alloc()
97 return ERR_PTR(-E2BIG); in cpu_map_alloc()
101 return ERR_PTR(-ENOMEM); in cpu_map_alloc()
103 bpf_map_init_from_attr(&cmap->map, attr); in cpu_map_alloc()
106 cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries * in cpu_map_alloc()
108 cmap->map.numa_node); in cpu_map_alloc()
109 if (!cmap->cpu_map) { in cpu_map_alloc()
111 return ERR_PTR(-ENOMEM); in cpu_map_alloc()
114 return &cmap->map; in cpu_map_alloc()
119 /* The tear-down procedure should have made sure that queue is in __cpu_map_ring_cleanup()
120 * empty. See __cpu_map_entry_replace() and work-queue in __cpu_map_ring_cleanup()
148 act = bpf_prog_run_generic_xdp(skb, &xdp, rcpu->prog); in cpu_map_bpf_prog_run_skb()
154 err = xdp_do_generic_redirect(skb->dev, skb, &xdp, in cpu_map_bpf_prog_run_skb()
155 rcpu->prog); in cpu_map_bpf_prog_run_skb()
158 stats->drop++; in cpu_map_bpf_prog_run_skb()
160 stats->redirect++; in cpu_map_bpf_prog_run_skb()
164 bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act); in cpu_map_bpf_prog_run_skb()
167 trace_xdp_exception(skb->dev, rcpu->prog, act); in cpu_map_bpf_prog_run_skb()
171 stats->drop++; in cpu_map_bpf_prog_run_skb()
176 stats->pass += pass; in cpu_map_bpf_prog_run_skb()
197 rxq.dev = xdpf->dev_rx; in cpu_map_bpf_prog_run_xdp()
198 rxq.mem.type = xdpf->mem_type; in cpu_map_bpf_prog_run_xdp()
203 act = bpf_prog_run_xdp(rcpu->prog, &xdp); in cpu_map_bpf_prog_run_xdp()
209 stats->drop++; in cpu_map_bpf_prog_run_xdp()
215 err = xdp_do_redirect(xdpf->dev_rx, &xdp, in cpu_map_bpf_prog_run_xdp()
216 rcpu->prog); in cpu_map_bpf_prog_run_xdp()
219 stats->drop++; in cpu_map_bpf_prog_run_xdp()
221 stats->redirect++; in cpu_map_bpf_prog_run_xdp()
225 bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act); in cpu_map_bpf_prog_run_xdp()
229 stats->drop++; in cpu_map_bpf_prog_run_xdp()
235 stats->pass += nframes; in cpu_map_bpf_prog_run_xdp()
253 if (!rcpu->prog) in cpu_map_bpf_prog_run()
259 ret->xdp_n = cpu_map_bpf_prog_run_xdp(rcpu, frames, ret->xdp_n, stats); in cpu_map_bpf_prog_run()
260 if (unlikely(ret->skb_n)) in cpu_map_bpf_prog_run()
261 ret->skb_n = cpu_map_bpf_prog_run_skb(rcpu, skbs, ret->skb_n, in cpu_map_bpf_prog_run()
264 if (stats->redirect) in cpu_map_bpf_prog_run()
271 if (unlikely(ret->skb_n) && ret->xdp_n) in cpu_map_bpf_prog_run()
272 memmove(&skbs[ret->xdp_n], skbs, ret->skb_n * sizeof(*skbs)); in cpu_map_bpf_prog_run()
285 gro_flush(&rcpu->gro, !empty && HZ >= 1000); in cpu_map_gro_flush()
286 gro_normal_list(&rcpu->gro); in cpu_map_gro_flush()
295 complete(&rcpu->kthread_running); in cpu_map_kthread_run()
299 * from map, thus no new packets can enter. Remaining in-flight in cpu_map_kthread_run()
303 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
313 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
315 /* Recheck to avoid lost wake-up */ in cpu_map_kthread_run()
316 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
331 * consume side valid as no-resize allowed of queue. in cpu_map_kthread_run()
333 n = __ptr_ring_consume_batched(rcpu->queue, frames, in cpu_map_kthread_run()
373 kmem_alloc_drops += ret.xdp_n - m; in cpu_map_kthread_run()
380 /* Can fail only when !skb -- already handled above */ in cpu_map_kthread_run()
381 __xdp_build_skb_from_frame(xdpf, skbs[i], xdpf->dev_rx); in cpu_map_kthread_run()
385 /* Feedback loop via tracepoint. in cpu_map_kthread_run()
388 trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops, in cpu_map_kthread_run()
392 gro_receive_skb(&rcpu->gro, skbs[i]); in cpu_map_kthread_run()
396 empty = __ptr_ring_empty(rcpu->queue); in cpu_map_kthread_run()
418 if (prog->expected_attach_type != BPF_XDP_CPUMAP || in __cpu_map_load_bpf_program()
421 return -EINVAL; in __cpu_map_load_bpf_program()
424 rcpu->value.bpf_prog.id = prog->aux->id; in __cpu_map_load_bpf_program()
425 rcpu->prog = prog; in __cpu_map_load_bpf_program()
434 int numa, err, i, fd = value->bpf_prog.fd; in __cpu_map_entry_alloc()
439 /* Have map->numa_node, but choose node of redirect target CPU */ in __cpu_map_entry_alloc()
447 rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq), in __cpu_map_entry_alloc()
449 if (!rcpu->bulkq) in __cpu_map_entry_alloc()
453 bq = per_cpu_ptr(rcpu->bulkq, i); in __cpu_map_entry_alloc()
454 bq->obj = rcpu; in __cpu_map_entry_alloc()
458 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp, in __cpu_map_entry_alloc()
460 if (!rcpu->queue) in __cpu_map_entry_alloc()
463 err = ptr_ring_init(rcpu->queue, value->qsize, gfp); in __cpu_map_entry_alloc()
467 rcpu->cpu = cpu; in __cpu_map_entry_alloc()
468 rcpu->map_id = map->id; in __cpu_map_entry_alloc()
469 rcpu->value.qsize = value->qsize; in __cpu_map_entry_alloc()
470 gro_init(&rcpu->gro); in __cpu_map_entry_alloc()
476 init_completion(&rcpu->kthread_running); in __cpu_map_entry_alloc()
477 rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, in __cpu_map_entry_alloc()
479 map->id); in __cpu_map_entry_alloc()
480 if (IS_ERR(rcpu->kthread)) in __cpu_map_entry_alloc()
484 kthread_bind(rcpu->kthread, cpu); in __cpu_map_entry_alloc()
485 wake_up_process(rcpu->kthread); in __cpu_map_entry_alloc()
491 wait_for_completion(&rcpu->kthread_running); in __cpu_map_entry_alloc()
496 if (rcpu->prog) in __cpu_map_entry_alloc()
497 bpf_prog_put(rcpu->prog); in __cpu_map_entry_alloc()
499 gro_cleanup(&rcpu->gro); in __cpu_map_entry_alloc()
500 ptr_ring_cleanup(rcpu->queue, NULL); in __cpu_map_entry_alloc()
502 kfree(rcpu->queue); in __cpu_map_entry_alloc()
504 free_percpu(rcpu->bulkq); in __cpu_map_entry_alloc()
515 * RCU grace-period have elapsed. Thus, XDP cannot queue any in __cpu_map_entry_free()
525 kthread_stop(rcpu->kthread); in __cpu_map_entry_free()
527 if (rcpu->prog) in __cpu_map_entry_free()
528 bpf_prog_put(rcpu->prog); in __cpu_map_entry_free()
529 gro_cleanup(&rcpu->gro); in __cpu_map_entry_free()
531 __cpu_map_ring_cleanup(rcpu->queue); in __cpu_map_entry_free()
532 ptr_ring_cleanup(rcpu->queue, NULL); in __cpu_map_entry_free()
533 kfree(rcpu->queue); in __cpu_map_entry_free()
534 free_percpu(rcpu->bulkq); in __cpu_map_entry_free()
551 old_rcpu = unrcu_pointer(xchg(&cmap->cpu_map[key_cpu], RCU_INITIALIZER(rcpu))); in __cpu_map_entry_replace()
553 INIT_RCU_WORK(&old_rcpu->free_work, __cpu_map_entry_free); in __cpu_map_entry_replace()
554 queue_rcu_work(system_wq, &old_rcpu->free_work); in __cpu_map_entry_replace()
563 if (key_cpu >= map->max_entries) in cpu_map_delete_elem()
564 return -EINVAL; in cpu_map_delete_elem()
580 memcpy(&cpumap_value, value, map->value_size); in cpu_map_update_elem()
583 return -EINVAL; in cpu_map_update_elem()
584 if (unlikely(key_cpu >= cmap->map.max_entries)) in cpu_map_update_elem()
585 return -E2BIG; in cpu_map_update_elem()
587 return -EEXIST; in cpu_map_update_elem()
589 return -EOVERFLOW; in cpu_map_update_elem()
593 return -ENODEV; in cpu_map_update_elem()
598 /* Updating qsize cause re-allocation of bpf_cpu_map_entry */ in cpu_map_update_elem()
601 return -ENOMEM; in cpu_map_update_elem()
614 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, in cpu_map_free()
618 * guarantees no further "XDP/bpf-side" reads against in cpu_map_free()
619 * bpf_cpu_map->cpu_map, but also ensure pending flush operations in cpu_map_free()
627 for (i = 0; i < cmap->map.max_entries; i++) { in cpu_map_free()
630 rcpu = rcu_dereference_raw(cmap->cpu_map[i]); in cpu_map_free()
635 __cpu_map_entry_free(&rcpu->free_work.work); in cpu_map_free()
637 bpf_map_area_free(cmap->cpu_map); in cpu_map_free()
650 if (key >= map->max_entries) in __cpu_map_lookup_elem()
653 rcpu = rcu_dereference_check(cmap->cpu_map[key], in __cpu_map_lookup_elem()
663 return rcpu ? &rcpu->value : NULL; in cpu_map_lookup_elem()
672 if (index >= cmap->map.max_entries) { in cpu_map_get_next_key()
677 if (index == cmap->map.max_entries - 1) in cpu_map_get_next_key()
678 return -ENOENT; in cpu_map_get_next_key()
694 usage += (u64)map->max_entries * sizeof(struct bpf_cpu_map_entry *); in cpu_map_mem_usage()
715 struct bpf_cpu_map_entry *rcpu = bq->obj; in bq_flush_to_queue()
717 const int to_cpu = rcpu->cpu; in bq_flush_to_queue()
721 if (unlikely(!bq->count)) in bq_flush_to_queue()
724 q = rcpu->queue; in bq_flush_to_queue()
725 spin_lock(&q->producer_lock); in bq_flush_to_queue()
727 for (i = 0; i < bq->count; i++) { in bq_flush_to_queue()
728 struct xdp_frame *xdpf = bq->q[i]; in bq_flush_to_queue()
738 bq->count = 0; in bq_flush_to_queue()
739 spin_unlock(&q->producer_lock); in bq_flush_to_queue()
741 __list_del_clearprev(&bq->flush_node); in bq_flush_to_queue()
743 /* Feedback loop via tracepoints */ in bq_flush_to_queue()
744 trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu); in bq_flush_to_queue()
747 /* Runs under RCU-read-side, plus in softirq under NAPI protection.
752 struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); in bq_enqueue()
754 if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) in bq_enqueue()
759 * (e.g. ixgbe) recycle tricks based on page-refcnt. in bq_enqueue()
762 * with another CPU on page-refcnt and remaining driver code). in bq_enqueue()
764 * operation, when completing napi->poll call. in bq_enqueue()
766 bq->q[bq->count++] = xdpf; in bq_enqueue()
768 if (!bq->flush_node.prev) { in bq_enqueue()
771 list_add(&bq->flush_node, flush_list); in bq_enqueue()
779 xdpf->dev_rx = dev_rx; in cpu_map_enqueue()
790 __skb_pull(skb, skb->mac_len); in cpu_map_generic_redirect()
794 ret = ptr_ring_produce(rcpu->queue, skb); in cpu_map_generic_redirect()
798 wake_up_process(rcpu->kthread); in cpu_map_generic_redirect()
800 trace_xdp_cpumap_enqueue(rcpu->map_id, !ret, !!ret, rcpu->cpu); in cpu_map_generic_redirect()
812 wake_up_process(bq->obj->kthread); in __cpu_map_flush()