Lines Matching +full:pre +full:- +full:programs

1 // SPDX-License-Identifier: GPL-2.0-only
14 * CPU will do SKB-allocation and call the normal network stack.
20 * basically allows for 10G wirespeed pre-filtering via bpf.
40 * will maximum be stored/queued for one driver ->poll() call. It is
46 #define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */
62 /* XDP can run multiple RX-ring queues, need __percpu enqueue store */
65 /* Queue with potential multi-producers, and single-consumer kthread */
85 u32 value_size = attr->value_size; in cpu_map_alloc()
89 if (attr->max_entries == 0 || attr->key_size != 4 || in cpu_map_alloc()
92 attr->map_flags & ~BPF_F_NUMA_NODE) in cpu_map_alloc()
93 return ERR_PTR(-EINVAL); in cpu_map_alloc()
95 /* Pre-limit array size based on NR_CPUS, not final CPU check */ in cpu_map_alloc()
96 if (attr->max_entries > NR_CPUS) in cpu_map_alloc()
97 return ERR_PTR(-E2BIG); in cpu_map_alloc()
101 return ERR_PTR(-ENOMEM); in cpu_map_alloc()
103 bpf_map_init_from_attr(&cmap->map, attr); in cpu_map_alloc()
106 cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries * in cpu_map_alloc()
108 cmap->map.numa_node); in cpu_map_alloc()
109 if (!cmap->cpu_map) { in cpu_map_alloc()
111 return ERR_PTR(-ENOMEM); in cpu_map_alloc()
114 return &cmap->map; in cpu_map_alloc()
119 /* The tear-down procedure should have made sure that queue is in __cpu_map_ring_cleanup()
120 * empty. See __cpu_map_entry_replace() and work-queue in __cpu_map_ring_cleanup()
148 act = bpf_prog_run_generic_xdp(skb, &xdp, rcpu->prog); in cpu_map_bpf_prog_run_skb()
154 err = xdp_do_generic_redirect(skb->dev, skb, &xdp, in cpu_map_bpf_prog_run_skb()
155 rcpu->prog); in cpu_map_bpf_prog_run_skb()
158 stats->drop++; in cpu_map_bpf_prog_run_skb()
160 stats->redirect++; in cpu_map_bpf_prog_run_skb()
164 bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act); in cpu_map_bpf_prog_run_skb()
167 trace_xdp_exception(skb->dev, rcpu->prog, act); in cpu_map_bpf_prog_run_skb()
171 stats->drop++; in cpu_map_bpf_prog_run_skb()
176 stats->pass += pass; in cpu_map_bpf_prog_run_skb()
196 rxq.dev = xdpf->dev_rx; in cpu_map_bpf_prog_run_xdp()
197 rxq.mem.type = xdpf->mem_type; in cpu_map_bpf_prog_run_xdp()
202 act = bpf_prog_run_xdp(rcpu->prog, &xdp); in cpu_map_bpf_prog_run_xdp()
208 stats->drop++; in cpu_map_bpf_prog_run_xdp()
214 err = xdp_do_redirect(xdpf->dev_rx, &xdp, in cpu_map_bpf_prog_run_xdp()
215 rcpu->prog); in cpu_map_bpf_prog_run_xdp()
218 stats->drop++; in cpu_map_bpf_prog_run_xdp()
220 stats->redirect++; in cpu_map_bpf_prog_run_xdp()
224 bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act); in cpu_map_bpf_prog_run_xdp()
228 stats->drop++; in cpu_map_bpf_prog_run_xdp()
233 stats->pass += nframes; in cpu_map_bpf_prog_run_xdp()
251 if (!rcpu->prog) in cpu_map_bpf_prog_run()
258 ret->xdp_n = cpu_map_bpf_prog_run_xdp(rcpu, frames, ret->xdp_n, stats); in cpu_map_bpf_prog_run()
259 if (unlikely(ret->skb_n)) in cpu_map_bpf_prog_run()
260 ret->skb_n = cpu_map_bpf_prog_run_skb(rcpu, skbs, ret->skb_n, in cpu_map_bpf_prog_run()
263 if (stats->redirect) in cpu_map_bpf_prog_run()
271 if (unlikely(ret->skb_n) && ret->xdp_n) in cpu_map_bpf_prog_run()
272 memmove(&skbs[ret->xdp_n], skbs, ret->skb_n * sizeof(*skbs)); in cpu_map_bpf_prog_run()
285 gro_flush_normal(&rcpu->gro, !empty && HZ >= 1000); in cpu_map_gro_flush()
294 complete(&rcpu->kthread_running); in cpu_map_kthread_run()
298 * from map, thus no new packets can enter. Remaining in-flight in cpu_map_kthread_run()
302 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
312 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
314 /* Recheck to avoid lost wake-up */ in cpu_map_kthread_run()
315 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
330 * consume side valid as no-resize allowed of queue. in cpu_map_kthread_run()
332 n = __ptr_ring_consume_batched(rcpu->queue, frames, in cpu_map_kthread_run()
372 kmem_alloc_drops += ret.xdp_n - m; in cpu_map_kthread_run()
379 /* Can fail only when !skb -- already handled above */ in cpu_map_kthread_run()
380 __xdp_build_skb_from_frame(xdpf, skbs[i], xdpf->dev_rx); in cpu_map_kthread_run()
387 trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops, in cpu_map_kthread_run()
391 gro_receive_skb(&rcpu->gro, skbs[i]); in cpu_map_kthread_run()
395 empty = __ptr_ring_empty(rcpu->queue); in cpu_map_kthread_run()
417 if (prog->expected_attach_type != BPF_XDP_CPUMAP || in __cpu_map_load_bpf_program()
420 return -EINVAL; in __cpu_map_load_bpf_program()
423 rcpu->value.bpf_prog.id = prog->aux->id; in __cpu_map_load_bpf_program()
424 rcpu->prog = prog; in __cpu_map_load_bpf_program()
433 int numa, err, i, fd = value->bpf_prog.fd; in __cpu_map_entry_alloc()
438 /* Have map->numa_node, but choose node of redirect target CPU */ in __cpu_map_entry_alloc()
446 rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq), in __cpu_map_entry_alloc()
448 if (!rcpu->bulkq) in __cpu_map_entry_alloc()
452 bq = per_cpu_ptr(rcpu->bulkq, i); in __cpu_map_entry_alloc()
453 bq->obj = rcpu; in __cpu_map_entry_alloc()
457 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp, in __cpu_map_entry_alloc()
459 if (!rcpu->queue) in __cpu_map_entry_alloc()
462 err = ptr_ring_init(rcpu->queue, value->qsize, gfp); in __cpu_map_entry_alloc()
466 rcpu->cpu = cpu; in __cpu_map_entry_alloc()
467 rcpu->map_id = map->id; in __cpu_map_entry_alloc()
468 rcpu->value.qsize = value->qsize; in __cpu_map_entry_alloc()
469 gro_init(&rcpu->gro); in __cpu_map_entry_alloc()
475 init_completion(&rcpu->kthread_running); in __cpu_map_entry_alloc()
476 rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, in __cpu_map_entry_alloc()
478 map->id); in __cpu_map_entry_alloc()
479 if (IS_ERR(rcpu->kthread)) in __cpu_map_entry_alloc()
483 kthread_bind(rcpu->kthread, cpu); in __cpu_map_entry_alloc()
484 wake_up_process(rcpu->kthread); in __cpu_map_entry_alloc()
490 wait_for_completion(&rcpu->kthread_running); in __cpu_map_entry_alloc()
495 if (rcpu->prog) in __cpu_map_entry_alloc()
496 bpf_prog_put(rcpu->prog); in __cpu_map_entry_alloc()
498 gro_cleanup(&rcpu->gro); in __cpu_map_entry_alloc()
499 ptr_ring_cleanup(rcpu->queue, NULL); in __cpu_map_entry_alloc()
501 kfree(rcpu->queue); in __cpu_map_entry_alloc()
503 free_percpu(rcpu->bulkq); in __cpu_map_entry_alloc()
514 * RCU grace-period have elapsed. Thus, XDP cannot queue any in __cpu_map_entry_free()
524 kthread_stop(rcpu->kthread); in __cpu_map_entry_free()
526 if (rcpu->prog) in __cpu_map_entry_free()
527 bpf_prog_put(rcpu->prog); in __cpu_map_entry_free()
528 gro_cleanup(&rcpu->gro); in __cpu_map_entry_free()
530 __cpu_map_ring_cleanup(rcpu->queue); in __cpu_map_entry_free()
531 ptr_ring_cleanup(rcpu->queue, NULL); in __cpu_map_entry_free()
532 kfree(rcpu->queue); in __cpu_map_entry_free()
533 free_percpu(rcpu->bulkq); in __cpu_map_entry_free()
550 old_rcpu = unrcu_pointer(xchg(&cmap->cpu_map[key_cpu], RCU_INITIALIZER(rcpu))); in __cpu_map_entry_replace()
552 INIT_RCU_WORK(&old_rcpu->free_work, __cpu_map_entry_free); in __cpu_map_entry_replace()
553 queue_rcu_work(system_percpu_wq, &old_rcpu->free_work); in __cpu_map_entry_replace()
562 if (key_cpu >= map->max_entries) in cpu_map_delete_elem()
563 return -EINVAL; in cpu_map_delete_elem()
579 memcpy(&cpumap_value, value, map->value_size); in cpu_map_update_elem()
582 return -EINVAL; in cpu_map_update_elem()
583 if (unlikely(key_cpu >= cmap->map.max_entries)) in cpu_map_update_elem()
584 return -E2BIG; in cpu_map_update_elem()
586 return -EEXIST; in cpu_map_update_elem()
588 return -EOVERFLOW; in cpu_map_update_elem()
592 return -ENODEV; in cpu_map_update_elem()
597 /* Updating qsize cause re-allocation of bpf_cpu_map_entry */ in cpu_map_update_elem()
600 return -ENOMEM; in cpu_map_update_elem()
613 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, in cpu_map_free()
614 * so the bpf programs (can be more than one that used this map) were in cpu_map_free()
616 * these programs to complete. synchronize_rcu() below not only in cpu_map_free()
617 * guarantees no further "XDP/bpf-side" reads against in cpu_map_free()
618 * bpf_cpu_map->cpu_map, but also ensure pending flush operations in cpu_map_free()
626 for (i = 0; i < cmap->map.max_entries; i++) { in cpu_map_free()
629 rcpu = rcu_dereference_raw(cmap->cpu_map[i]); in cpu_map_free()
634 __cpu_map_entry_free(&rcpu->free_work.work); in cpu_map_free()
636 bpf_map_area_free(cmap->cpu_map); in cpu_map_free()
649 if (key >= map->max_entries) in __cpu_map_lookup_elem()
652 rcpu = rcu_dereference_check(cmap->cpu_map[key], in __cpu_map_lookup_elem()
662 return rcpu ? &rcpu->value : NULL; in cpu_map_lookup_elem()
671 if (index >= cmap->map.max_entries) { in cpu_map_get_next_key()
676 if (index == cmap->map.max_entries - 1) in cpu_map_get_next_key()
677 return -ENOENT; in cpu_map_get_next_key()
693 usage += (u64)map->max_entries * sizeof(struct bpf_cpu_map_entry *); in cpu_map_mem_usage()
714 struct bpf_cpu_map_entry *rcpu = bq->obj; in bq_flush_to_queue()
716 const int to_cpu = rcpu->cpu; in bq_flush_to_queue()
720 if (unlikely(!bq->count)) in bq_flush_to_queue()
723 q = rcpu->queue; in bq_flush_to_queue()
724 spin_lock(&q->producer_lock); in bq_flush_to_queue()
726 for (i = 0; i < bq->count; i++) { in bq_flush_to_queue()
727 struct xdp_frame *xdpf = bq->q[i]; in bq_flush_to_queue()
737 bq->count = 0; in bq_flush_to_queue()
738 spin_unlock(&q->producer_lock); in bq_flush_to_queue()
740 __list_del_clearprev(&bq->flush_node); in bq_flush_to_queue()
743 trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu); in bq_flush_to_queue()
746 /* Runs under RCU-read-side, plus in softirq under NAPI protection.
751 struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); in bq_enqueue()
753 if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) in bq_enqueue()
758 * (e.g. ixgbe) recycle tricks based on page-refcnt. in bq_enqueue()
761 * with another CPU on page-refcnt and remaining driver code). in bq_enqueue()
763 * operation, when completing napi->poll call. in bq_enqueue()
765 bq->q[bq->count++] = xdpf; in bq_enqueue()
767 if (!bq->flush_node.prev) { in bq_enqueue()
770 list_add(&bq->flush_node, flush_list); in bq_enqueue()
778 xdpf->dev_rx = dev_rx; in cpu_map_enqueue()
789 __skb_pull(skb, skb->mac_len); in cpu_map_generic_redirect()
793 ret = ptr_ring_produce(rcpu->queue, skb); in cpu_map_generic_redirect()
797 wake_up_process(rcpu->kthread); in cpu_map_generic_redirect()
799 trace_xdp_cpumap_enqueue(rcpu->map_id, !ret, !!ret, rcpu->cpu); in cpu_map_generic_redirect()
811 wake_up_process(bq->obj->kthread); in __cpu_map_flush()