Lines Matching full:rx
38 struct gve_rx_ring *rx = &priv->rx[i]; in gve_rx_free_ring_gqi() local
40 if (rx->page_info != NULL) { in gve_rx_free_ring_gqi()
41 free(rx->page_info, M_GVE); in gve_rx_free_ring_gqi()
42 rx->page_info = NULL; in gve_rx_free_ring_gqi()
45 if (rx->data_ring != NULL) { in gve_rx_free_ring_gqi()
46 gve_dma_free_coherent(&rx->data_ring_mem); in gve_rx_free_ring_gqi()
47 rx->data_ring = NULL; in gve_rx_free_ring_gqi()
50 if (rx->desc_ring != NULL) { in gve_rx_free_ring_gqi()
51 gve_dma_free_coherent(&rx->desc_ring_mem); in gve_rx_free_ring_gqi()
52 rx->desc_ring = NULL; in gve_rx_free_ring_gqi()
59 struct gve_rx_ring *rx = &priv->rx[i]; in gve_rx_free_ring() local
60 struct gve_ring_com *com = &rx->com; in gve_rx_free_ring()
63 gve_free_counters((counter_u64_t *)&rx->stats, NUM_RX_STATS); in gve_rx_free_ring()
77 gve_prefill_rx_slots(struct gve_rx_ring *rx) in gve_prefill_rx_slots() argument
79 struct gve_ring_com *com = &rx->com; in gve_prefill_rx_slots()
84 rx->data_ring[i].qpl_offset = htobe64(PAGE_SIZE * i); in gve_prefill_rx_slots()
85 rx->page_info[i].page_offset = 0; in gve_prefill_rx_slots()
86 rx->page_info[i].page_address = com->qpl->dmas[i].cpu_addr; in gve_prefill_rx_slots()
87 rx->page_info[i].page = com->qpl->pages[i]; in gve_prefill_rx_slots()
93 bus_dmamap_sync(rx->data_ring_mem.tag, rx->data_ring_mem.map, in gve_prefill_rx_slots()
100 struct gve_rx_ring *rx = &priv->rx[i]; in gve_rx_alloc_ring_gqi() local
101 struct gve_ring_com *com = &rx->com; in gve_rx_alloc_ring_gqi()
106 CACHE_LINE_SIZE, &rx->desc_ring_mem); in gve_rx_alloc_ring_gqi()
109 "Failed to alloc desc ring for rx ring %d", i); in gve_rx_alloc_ring_gqi()
113 rx->mask = priv->rx_pages_per_qpl - 1; in gve_rx_alloc_ring_gqi()
114 rx->desc_ring = rx->desc_ring_mem.cpu_addr; in gve_rx_alloc_ring_gqi()
118 device_printf(priv->dev, "No QPL left for rx ring %d", i); in gve_rx_alloc_ring_gqi()
122 rx->page_info = malloc(priv->rx_desc_cnt * sizeof(*rx->page_info), in gve_rx_alloc_ring_gqi()
127 CACHE_LINE_SIZE, &rx->data_ring_mem); in gve_rx_alloc_ring_gqi()
130 "Failed to alloc data ring for rx ring %d", i); in gve_rx_alloc_ring_gqi()
133 rx->data_ring = rx->data_ring_mem.cpu_addr; in gve_rx_alloc_ring_gqi()
135 gve_prefill_rx_slots(rx); in gve_rx_alloc_ring_gqi()
146 struct gve_rx_ring *rx = &priv->rx[i]; in gve_rx_alloc_ring() local
147 struct gve_ring_com *com = &rx->com; in gve_rx_alloc_ring()
153 gve_alloc_counters((counter_u64_t *)&rx->stats, NUM_RX_STATS); in gve_rx_alloc_ring()
159 "Failed to alloc queue resources for rx ring %d", i); in gve_rx_alloc_ring()
184 priv->rx = malloc(sizeof(struct gve_rx_ring) * priv->rx_cfg.num_queues, in gve_alloc_rx_rings()
198 free(priv->rx, M_GVE); in gve_alloc_rx_rings()
210 free(priv->rx, M_GVE); in gve_free_rx_rings()
214 gve_rx_clear_data_ring(struct gve_rx_ring *rx) in gve_rx_clear_data_ring() argument
216 struct gve_priv *priv = rx->com.priv; in gve_rx_clear_data_ring()
220 * The Rx data ring has this invariant: "the networking stack is not in gve_rx_clear_data_ring()
229 rx->data_ring[i].qpl_offset = htobe64(PAGE_SIZE * i + in gve_rx_clear_data_ring()
230 rx->page_info[i].page_offset); in gve_rx_clear_data_ring()
231 rx->fill_cnt++; in gve_rx_clear_data_ring()
234 bus_dmamap_sync(rx->data_ring_mem.tag, rx->data_ring_mem.map, in gve_rx_clear_data_ring()
239 gve_rx_clear_desc_ring(struct gve_rx_ring *rx) in gve_rx_clear_desc_ring() argument
241 struct gve_priv *priv = rx->com.priv; in gve_rx_clear_desc_ring()
245 rx->desc_ring[i] = (struct gve_rx_desc){}; in gve_rx_clear_desc_ring()
247 bus_dmamap_sync(rx->desc_ring_mem.tag, rx->desc_ring_mem.map, in gve_rx_clear_desc_ring()
254 struct gve_rx_ring *rx = &priv->rx[i]; in gve_clear_rx_ring() local
261 rx->seq_no = 1; in gve_clear_rx_ring()
262 rx->cnt = 0; in gve_clear_rx_ring()
263 rx->fill_cnt = 0; in gve_clear_rx_ring()
264 rx->mask = priv->rx_desc_cnt - 1; in gve_clear_rx_ring()
266 gve_rx_clear_desc_ring(rx); in gve_clear_rx_ring()
267 gve_rx_clear_data_ring(rx); in gve_clear_rx_ring()
273 struct gve_rx_ring *rx = &priv->rx[i]; in gve_start_rx_ring() local
274 struct gve_ring_com *com = &rx->com; in gve_start_rx_ring()
277 if (tcp_lro_init(&rx->lro) != 0) in gve_start_rx_ring()
278 device_printf(priv->dev, "Failed to init lro for rx ring %d", i); in gve_start_rx_ring()
279 rx->lro.ifp = priv->ifp; in gve_start_rx_ring()
283 NET_TASK_INIT(&com->cleanup_task, 0, gve_rx_cleanup_tq, rx); in gve_start_rx_ring()
285 NET_TASK_INIT(&com->cleanup_task, 0, gve_rx_cleanup_tq_dqo, rx); in gve_start_rx_ring()
286 com->cleanup_tq = taskqueue_create_fast("gve rx", M_WAITOK, in gve_start_rx_ring()
293 /* GQ RX bufs are prefilled at ring alloc time */ in gve_start_rx_ring()
294 gve_db_bar_write_4(priv, com->db_offset, rx->fill_cnt); in gve_start_rx_ring()
296 gve_rx_prefill_buffers_dqo(rx); in gve_start_rx_ring()
303 struct gve_rx_ring *rx; in gve_create_rx_rings() local
321 rx = &priv->rx[i]; in gve_create_rx_rings()
322 com = &rx->com; in gve_create_rx_rings()
341 struct gve_rx_ring *rx = &priv->rx[i]; in gve_stop_rx_ring() local
342 struct gve_ring_com *com = &rx->com; in gve_stop_rx_ring()
350 tcp_lro_free(&rx->lro); in gve_stop_rx_ring()
351 rx->ctx = (struct gve_rx_ctx){}; in gve_stop_rx_ring()
376 struct gve_rx_ring *rx = arg; in gve_rx_intr() local
377 struct gve_priv *priv = rx->com.priv; in gve_rx_intr()
378 struct gve_ring_com *com = &rx->com; in gve_rx_intr()
384 taskqueue_enqueue(rx->com.cleanup_tq, &rx->com.cleanup_task); in gve_rx_intr()
421 gve_rx_create_mbuf(struct gve_priv *priv, struct gve_rx_ring *rx, in gve_rx_create_mbuf() argument
425 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx_create_mbuf()
440 counter_u64_add_protected(rx->stats.rx_copybreak_cnt, 1); in gve_rx_create_mbuf()
446 KASSERT(len <= MCLBYTES, ("gve rx fragment bigger than cluster mbuf")); in gve_rx_create_mbuf()
491 counter_u64_add_protected(rx->stats.rx_frag_flip_cnt, 1); in gve_rx_create_mbuf()
504 counter_u64_add_protected(rx->stats.rx_frag_copy_cnt, 1); in gve_rx_create_mbuf()
526 gve_rx(struct gve_priv *priv, struct gve_rx_ring *rx, struct gve_rx_desc *desc, in gve_rx() argument
532 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx()
548 counter_u64_add_protected(rx->stats.rx_dropped_pkt_desc_err, 1); in gve_rx()
549 counter_u64_add_protected(rx->stats.rx_dropped_pkt, 1); in gve_rx()
555 page_info = &rx->page_info[idx]; in gve_rx()
556 data_slot = &rx->data_ring[idx]; in gve_rx()
557 page_dma_handle = &(rx->com.qpl->dmas[idx]); in gve_rx()
565 mbuf = gve_rx_create_mbuf(priv, rx, page_info, len, data_slot, in gve_rx()
570 counter_u64_add_protected(rx->stats.rx_dropped_pkt_mbuf_alloc_fail, 1); in gve_rx()
571 counter_u64_add_protected(rx->stats.rx_dropped_pkt, 1); in gve_rx()
603 (rx->lro.lro_cnt != 0) && /* LRO resources exist */ in gve_rx()
604 (tcp_lro_rx(&rx->lro, mbuf, 0) == 0)) in gve_rx()
611 counter_u64_add_protected(rx->stats.rbytes, ctx->total_size); in gve_rx()
612 counter_u64_add_protected(rx->stats.rpackets, 1); in gve_rx()
619 rx->ctx = (struct gve_rx_ctx){}; in gve_rx()
623 gve_rx_work_pending(struct gve_rx_ring *rx) in gve_rx_work_pending() argument
629 next_idx = rx->cnt & rx->mask; in gve_rx_work_pending()
630 desc = rx->desc_ring + next_idx; in gve_rx_work_pending()
634 return (GVE_SEQNO(flags_seq) == rx->seq_no); in gve_rx_work_pending()
644 gve_rx_cleanup(struct gve_priv *priv, struct gve_rx_ring *rx, int budget) in gve_rx_cleanup() argument
646 uint32_t idx = rx->cnt & rx->mask; in gve_rx_cleanup()
648 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx_cleanup()
653 bus_dmamap_sync(rx->desc_ring_mem.tag, rx->desc_ring_mem.map, in gve_rx_cleanup()
655 desc = &rx->desc_ring[idx]; in gve_rx_cleanup()
658 (GVE_SEQNO(desc->flags_seq) == rx->seq_no)) { in gve_rx_cleanup()
660 gve_rx(priv, rx, desc, idx); in gve_rx_cleanup()
662 rx->cnt++; in gve_rx_cleanup()
663 idx = rx->cnt & rx->mask; in gve_rx_cleanup()
664 desc = &rx->desc_ring[idx]; in gve_rx_cleanup()
665 rx->seq_no = gve_next_seqno(rx->seq_no); in gve_rx_cleanup()
672 rx->ctx = (struct gve_rx_ctx){}; in gve_rx_cleanup()
675 GVE_SEQNO(desc->flags_seq), rx->seq_no); in gve_rx_cleanup()
680 tcp_lro_flush_all(&rx->lro); in gve_rx_cleanup()
682 bus_dmamap_sync(rx->data_ring_mem.tag, rx->data_ring_mem.map, in gve_rx_cleanup()
686 rx->fill_cnt += work_done; in gve_rx_cleanup()
687 gve_db_bar_write_4(priv, rx->com.db_offset, rx->fill_cnt); in gve_rx_cleanup()
693 struct gve_rx_ring *rx = arg; in gve_rx_cleanup_tq() local
694 struct gve_priv *priv = rx->com.priv; in gve_rx_cleanup_tq()
699 gve_rx_cleanup(priv, rx, /*budget=*/128); in gve_rx_cleanup_tq()
701 gve_db_bar_write_4(priv, rx->com.irq_db_offset, in gve_rx_cleanup_tq()
711 if (gve_rx_work_pending(rx)) { in gve_rx_cleanup_tq()
712 gve_db_bar_write_4(priv, rx->com.irq_db_offset, GVE_IRQ_MASK); in gve_rx_cleanup_tq()
713 taskqueue_enqueue(rx->com.cleanup_tq, &rx->com.cleanup_task); in gve_rx_cleanup_tq()