Lines Matching defs:bq
376 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
378 struct net_device *dev = bq->dev;
379 unsigned int cnt = bq->count;
388 struct xdp_frame *xdpf = bq->q[i];
393 if (bq->xdp_prog) {
394 to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev, bq->dev_rx);
399 sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
412 xdp_return_frame_rx_napi(bq->q[i]);
415 bq->count = 0;
416 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
425 struct xdp_dev_bulk_queue *bq, *tmp;
427 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
428 bq_xmit_all(bq, XDP_XMIT_FLUSH);
429 bq->dev_rx = NULL;
430 bq->xdp_prog = NULL;
431 __list_del_clearprev(&bq->flush_node);
459 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
461 if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
462 bq_xmit_all(bq, 0);
465 * bulk_queue, because bq stored per-CPU and must be flushed
471 if (!bq->dev_rx) {
474 bq->dev_rx = dev_rx;
475 bq->xdp_prog = xdp_prog;
476 list_add(&bq->flush_node, flush_list);
479 bq->q[bq->count++] = xdpf;