Lines Matching refs:bq
378 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) in bq_xmit_all() argument
380 struct net_device *dev = bq->dev; in bq_xmit_all()
381 unsigned int cnt = bq->count; in bq_xmit_all()
386 lockdep_assert_held(&bq->bq_lock); in bq_xmit_all()
392 struct xdp_frame *xdpf = bq->q[i]; in bq_xmit_all()
397 if (bq->xdp_prog) { in bq_xmit_all()
398 to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev, bq->dev_rx); in bq_xmit_all()
403 sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags); in bq_xmit_all()
416 xdp_return_frame_rx_napi(bq->q[i]); in bq_xmit_all()
419 bq->count = 0; in bq_xmit_all()
420 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err); in bq_xmit_all()
429 struct xdp_dev_bulk_queue *bq, *tmp; in __dev_flush() local
431 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { in __dev_flush()
432 local_lock_nested_bh(&bq->dev->xdp_bulkq->bq_lock); in __dev_flush()
433 bq_xmit_all(bq, XDP_XMIT_FLUSH); in __dev_flush()
434 bq->dev_rx = NULL; in __dev_flush()
435 bq->xdp_prog = NULL; in __dev_flush()
436 __list_del_clearprev(&bq->flush_node); in __dev_flush()
437 local_unlock_nested_bh(&bq->dev->xdp_bulkq->bq_lock); in __dev_flush()
466 struct xdp_dev_bulk_queue *bq; in bq_enqueue() local
469 bq = this_cpu_ptr(dev->xdp_bulkq); in bq_enqueue()
471 if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) in bq_enqueue()
472 bq_xmit_all(bq, 0); in bq_enqueue()
481 if (!bq->dev_rx) { in bq_enqueue()
484 bq->dev_rx = dev_rx; in bq_enqueue()
485 bq->xdp_prog = xdp_prog; in bq_enqueue()
486 list_add(&bq->flush_node, flush_list); in bq_enqueue()
489 bq->q[bq->count++] = xdpf; in bq_enqueue()
1143 struct xdp_dev_bulk_queue *bq; in dev_map_notification() local
1145 bq = per_cpu_ptr(netdev->xdp_bulkq, cpu); in dev_map_notification()
1146 bq->dev = netdev; in dev_map_notification()
1147 local_lock_init(&bq->bq_lock); in dev_map_notification()