Lines Matching +full:rx +full:- +full:slots

1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
4 * Copyright (C) 2015-2021 Google, Inc.
19 dma_addr_t dma = (dma_addr_t)(be64_to_cpu(data_slot->addr) & in gve_rx_free_buffer()
22 page_ref_sub(page_info->page, page_info->pagecnt_bias - 1); in gve_rx_free_buffer()
23 gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE); in gve_rx_free_buffer()
27 struct gve_rx_ring *rx, in gve_rx_unfill_pages() argument
30 u32 slots = rx->mask + 1; in gve_rx_unfill_pages() local
33 if (!rx->data.page_info) in gve_rx_unfill_pages()
36 if (rx->data.raw_addressing) { in gve_rx_unfill_pages()
37 for (i = 0; i < slots; i++) in gve_rx_unfill_pages()
38 gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i], in gve_rx_unfill_pages()
39 &rx->data.data_ring[i]); in gve_rx_unfill_pages()
41 for (i = 0; i < slots; i++) in gve_rx_unfill_pages()
42 page_ref_sub(rx->data.page_info[i].page, in gve_rx_unfill_pages()
43 rx->data.page_info[i].pagecnt_bias - 1); in gve_rx_unfill_pages()
45 for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) { in gve_rx_unfill_pages()
46 page_ref_sub(rx->qpl_copy_pool[i].page, in gve_rx_unfill_pages()
47 rx->qpl_copy_pool[i].pagecnt_bias - 1); in gve_rx_unfill_pages()
48 put_page(rx->qpl_copy_pool[i].page); in gve_rx_unfill_pages()
51 kvfree(rx->data.page_info); in gve_rx_unfill_pages()
52 rx->data.page_info = NULL; in gve_rx_unfill_pages()
57 ctx->skb_head = NULL; in gve_rx_ctx_clear()
58 ctx->skb_tail = NULL; in gve_rx_ctx_clear()
59 ctx->total_size = 0; in gve_rx_ctx_clear()
60 ctx->frag_cnt = 0; in gve_rx_ctx_clear()
61 ctx->drop_pkt = false; in gve_rx_ctx_clear()
64 static void gve_rx_init_ring_state_gqi(struct gve_rx_ring *rx) in gve_rx_init_ring_state_gqi() argument
66 rx->desc.seqno = 1; in gve_rx_init_ring_state_gqi()
67 rx->cnt = 0; in gve_rx_init_ring_state_gqi()
68 gve_rx_ctx_clear(&rx->ctx); in gve_rx_init_ring_state_gqi()
73 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_reset_ring_gqi() local
74 const u32 slots = priv->rx_desc_cnt; in gve_rx_reset_ring_gqi() local
78 if (rx->desc.desc_ring) { in gve_rx_reset_ring_gqi()
79 size = slots * sizeof(rx->desc.desc_ring[0]); in gve_rx_reset_ring_gqi()
80 memset(rx->desc.desc_ring, 0, size); in gve_rx_reset_ring_gqi()
84 if (rx->q_resources) in gve_rx_reset_ring_gqi()
85 memset(rx->q_resources, 0, sizeof(*rx->q_resources)); in gve_rx_reset_ring_gqi()
87 gve_rx_init_ring_state_gqi(rx); in gve_rx_reset_ring_gqi()
102 void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx, in gve_rx_free_ring_gqi() argument
105 struct device *dev = &priv->pdev->dev; in gve_rx_free_ring_gqi()
106 u32 slots = rx->mask + 1; in gve_rx_free_ring_gqi() local
107 int idx = rx->q_num; in gve_rx_free_ring_gqi()
111 if (rx->desc.desc_ring) { in gve_rx_free_ring_gqi()
112 bytes = sizeof(struct gve_rx_desc) * cfg->ring_size; in gve_rx_free_ring_gqi()
113 dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus); in gve_rx_free_ring_gqi()
114 rx->desc.desc_ring = NULL; in gve_rx_free_ring_gqi()
117 if (rx->q_resources) { in gve_rx_free_ring_gqi()
118 dma_free_coherent(dev, sizeof(*rx->q_resources), in gve_rx_free_ring_gqi()
119 rx->q_resources, rx->q_resources_bus); in gve_rx_free_ring_gqi()
120 rx->q_resources = NULL; in gve_rx_free_ring_gqi()
123 gve_rx_unfill_pages(priv, rx, cfg); in gve_rx_free_ring_gqi()
125 if (rx->data.data_ring) { in gve_rx_free_ring_gqi()
126 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_free_ring_gqi()
127 dma_free_coherent(dev, bytes, rx->data.data_ring, in gve_rx_free_ring_gqi()
128 rx->data.data_bus); in gve_rx_free_ring_gqi()
129 rx->data.data_ring = NULL; in gve_rx_free_ring_gqi()
132 kvfree(rx->qpl_copy_pool); in gve_rx_free_ring_gqi()
133 rx->qpl_copy_pool = NULL; in gve_rx_free_ring_gqi()
135 if (rx->data.qpl) { in gve_rx_free_ring_gqi()
136 qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, idx); in gve_rx_free_ring_gqi()
137 gve_free_queue_page_list(priv, rx->data.qpl, qpl_id); in gve_rx_free_ring_gqi()
138 rx->data.qpl = NULL; in gve_rx_free_ring_gqi()
141 netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); in gve_rx_free_ring_gqi()
147 page_info->page = page; in gve_setup_rx_buffer()
148 page_info->page_offset = 0; in gve_setup_rx_buffer()
149 page_info->page_address = page_address(page); in gve_setup_rx_buffer()
152 page_ref_add(page, INT_MAX - 1); in gve_setup_rx_buffer()
153 page_info->pagecnt_bias = INT_MAX; in gve_setup_rx_buffer()
159 struct gve_rx_ring *rx) in gve_rx_alloc_buffer() argument
168 u64_stats_update_begin(&rx->statss); in gve_rx_alloc_buffer()
169 rx->rx_buf_alloc_fail++; in gve_rx_alloc_buffer()
170 u64_stats_update_end(&rx->statss); in gve_rx_alloc_buffer()
174 gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr); in gve_rx_alloc_buffer()
178 static int gve_rx_prefill_pages(struct gve_rx_ring *rx, in gve_rx_prefill_pages() argument
181 struct gve_priv *priv = rx->gve; in gve_rx_prefill_pages()
182 u32 slots; in gve_rx_prefill_pages() local
187 /* Allocate one page per Rx queue slot. Each page is split into two in gve_rx_prefill_pages()
190 slots = rx->mask + 1; in gve_rx_prefill_pages()
192 rx->data.page_info = kvzalloc(slots * in gve_rx_prefill_pages()
193 sizeof(*rx->data.page_info), GFP_KERNEL); in gve_rx_prefill_pages()
194 if (!rx->data.page_info) in gve_rx_prefill_pages()
195 return -ENOMEM; in gve_rx_prefill_pages()
197 for (i = 0; i < slots; i++) { in gve_rx_prefill_pages()
198 if (!rx->data.raw_addressing) { in gve_rx_prefill_pages()
199 struct page *page = rx->data.qpl->pages[i]; in gve_rx_prefill_pages()
202 gve_setup_rx_buffer(&rx->data.page_info[i], addr, page, in gve_rx_prefill_pages()
203 &rx->data.data_ring[i].qpl_offset); in gve_rx_prefill_pages()
206 err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, in gve_rx_prefill_pages()
207 &rx->data.page_info[i], in gve_rx_prefill_pages()
208 &rx->data.data_ring[i], rx); in gve_rx_prefill_pages()
213 if (!rx->data.raw_addressing) { in gve_rx_prefill_pages()
214 for (j = 0; j < rx->qpl_copy_pool_mask + 1; j++) { in gve_rx_prefill_pages()
218 err = -ENOMEM; in gve_rx_prefill_pages()
222 rx->qpl_copy_pool[j].page = page; in gve_rx_prefill_pages()
223 rx->qpl_copy_pool[j].page_offset = 0; in gve_rx_prefill_pages()
224 rx->qpl_copy_pool[j].page_address = page_address(page); in gve_rx_prefill_pages()
227 page_ref_add(page, INT_MAX - 1); in gve_rx_prefill_pages()
228 rx->qpl_copy_pool[j].pagecnt_bias = INT_MAX; in gve_rx_prefill_pages()
232 return slots; in gve_rx_prefill_pages()
236 while (j--) { in gve_rx_prefill_pages()
237 page_ref_sub(rx->qpl_copy_pool[j].page, in gve_rx_prefill_pages()
238 rx->qpl_copy_pool[j].pagecnt_bias - 1); in gve_rx_prefill_pages()
239 put_page(rx->qpl_copy_pool[j].page); in gve_rx_prefill_pages()
242 /* Do not fully free QPL pages - only remove the bias added in this in gve_rx_prefill_pages()
245 while (i--) in gve_rx_prefill_pages()
246 page_ref_sub(rx->data.page_info[i].page, in gve_rx_prefill_pages()
247 rx->data.page_info[i].pagecnt_bias - 1); in gve_rx_prefill_pages()
252 while (i--) in gve_rx_prefill_pages()
253 gve_rx_free_buffer(&priv->pdev->dev, in gve_rx_prefill_pages()
254 &rx->data.page_info[i], in gve_rx_prefill_pages()
255 &rx->data.data_ring[i]); in gve_rx_prefill_pages()
269 struct gve_rx_ring *rx, in gve_rx_alloc_ring_gqi() argument
272 struct device *hdev = &priv->pdev->dev; in gve_rx_alloc_ring_gqi()
273 u32 slots = cfg->ring_size; in gve_rx_alloc_ring_gqi() local
280 netif_dbg(priv, drv, priv->dev, "allocating rx ring\n"); in gve_rx_alloc_ring_gqi()
282 memset(rx, 0, sizeof(*rx)); in gve_rx_alloc_ring_gqi()
284 rx->gve = priv; in gve_rx_alloc_ring_gqi()
285 rx->q_num = idx; in gve_rx_alloc_ring_gqi()
287 rx->mask = slots - 1; in gve_rx_alloc_ring_gqi()
288 rx->data.raw_addressing = cfg->raw_addressing; in gve_rx_alloc_ring_gqi()
290 /* alloc rx data ring */ in gve_rx_alloc_ring_gqi()
291 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_alloc_ring_gqi()
292 rx->data.data_ring = dma_alloc_coherent(hdev, bytes, in gve_rx_alloc_ring_gqi()
293 &rx->data.data_bus, in gve_rx_alloc_ring_gqi()
295 if (!rx->data.data_ring) in gve_rx_alloc_ring_gqi()
296 return -ENOMEM; in gve_rx_alloc_ring_gqi()
298 rx->qpl_copy_pool_mask = min_t(u32, U32_MAX, slots * 2) - 1; in gve_rx_alloc_ring_gqi()
299 rx->qpl_copy_pool_head = 0; in gve_rx_alloc_ring_gqi()
300 rx->qpl_copy_pool = kvcalloc(rx->qpl_copy_pool_mask + 1, in gve_rx_alloc_ring_gqi()
301 sizeof(rx->qpl_copy_pool[0]), in gve_rx_alloc_ring_gqi()
304 if (!rx->qpl_copy_pool) { in gve_rx_alloc_ring_gqi()
305 err = -ENOMEM; in gve_rx_alloc_ring_gqi()
309 if (!rx->data.raw_addressing) { in gve_rx_alloc_ring_gqi()
310 qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num); in gve_rx_alloc_ring_gqi()
311 qpl_page_cnt = cfg->ring_size; in gve_rx_alloc_ring_gqi()
313 rx->data.qpl = gve_alloc_queue_page_list(priv, qpl_id, in gve_rx_alloc_ring_gqi()
315 if (!rx->data.qpl) { in gve_rx_alloc_ring_gqi()
316 err = -ENOMEM; in gve_rx_alloc_ring_gqi()
321 filled_pages = gve_rx_prefill_pages(rx, cfg); in gve_rx_alloc_ring_gqi()
323 err = -ENOMEM; in gve_rx_alloc_ring_gqi()
326 rx->fill_cnt = filled_pages; in gve_rx_alloc_ring_gqi()
327 /* Ensure data ring slots (packet buffers) are visible. */ in gve_rx_alloc_ring_gqi()
331 rx->q_resources = in gve_rx_alloc_ring_gqi()
333 sizeof(*rx->q_resources), in gve_rx_alloc_ring_gqi()
334 &rx->q_resources_bus, in gve_rx_alloc_ring_gqi()
336 if (!rx->q_resources) { in gve_rx_alloc_ring_gqi()
337 err = -ENOMEM; in gve_rx_alloc_ring_gqi()
340 netif_dbg(priv, drv, priv->dev, "rx[%d]->data.data_bus=%lx\n", idx, in gve_rx_alloc_ring_gqi()
341 (unsigned long)rx->data.data_bus); in gve_rx_alloc_ring_gqi()
343 /* alloc rx desc ring */ in gve_rx_alloc_ring_gqi()
344 bytes = sizeof(struct gve_rx_desc) * cfg->ring_size; in gve_rx_alloc_ring_gqi()
345 rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus, in gve_rx_alloc_ring_gqi()
347 if (!rx->desc.desc_ring) { in gve_rx_alloc_ring_gqi()
348 err = -ENOMEM; in gve_rx_alloc_ring_gqi()
351 rx->db_threshold = slots / 2; in gve_rx_alloc_ring_gqi()
352 gve_rx_init_ring_state_gqi(rx); in gve_rx_alloc_ring_gqi()
354 rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE; in gve_rx_alloc_ring_gqi()
355 gve_rx_ctx_clear(&rx->ctx); in gve_rx_alloc_ring_gqi()
360 dma_free_coherent(hdev, sizeof(*rx->q_resources), in gve_rx_alloc_ring_gqi()
361 rx->q_resources, rx->q_resources_bus); in gve_rx_alloc_ring_gqi()
362 rx->q_resources = NULL; in gve_rx_alloc_ring_gqi()
364 gve_rx_unfill_pages(priv, rx, cfg); in gve_rx_alloc_ring_gqi()
366 if (!rx->data.raw_addressing) { in gve_rx_alloc_ring_gqi()
367 gve_free_queue_page_list(priv, rx->data.qpl, qpl_id); in gve_rx_alloc_ring_gqi()
368 rx->data.qpl = NULL; in gve_rx_alloc_ring_gqi()
371 kvfree(rx->qpl_copy_pool); in gve_rx_alloc_ring_gqi()
372 rx->qpl_copy_pool = NULL; in gve_rx_alloc_ring_gqi()
374 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_alloc_ring_gqi()
375 dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus); in gve_rx_alloc_ring_gqi()
376 rx->data.data_ring = NULL; in gve_rx_alloc_ring_gqi()
384 struct gve_rx_ring *rx; in gve_rx_alloc_rings_gqi() local
388 rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring), in gve_rx_alloc_rings_gqi()
390 if (!rx) in gve_rx_alloc_rings_gqi()
391 return -ENOMEM; in gve_rx_alloc_rings_gqi()
393 for (i = 0; i < cfg->qcfg->num_queues; i++) { in gve_rx_alloc_rings_gqi()
394 err = gve_rx_alloc_ring_gqi(priv, cfg, &rx[i], i); in gve_rx_alloc_rings_gqi()
396 netif_err(priv, drv, priv->dev, in gve_rx_alloc_rings_gqi()
397 "Failed to alloc rx ring=%d: err=%d\n", in gve_rx_alloc_rings_gqi()
403 cfg->rx = rx; in gve_rx_alloc_rings_gqi()
408 gve_rx_free_ring_gqi(priv, &rx[j], cfg); in gve_rx_alloc_rings_gqi()
409 kvfree(rx); in gve_rx_alloc_rings_gqi()
416 struct gve_rx_ring *rx = cfg->rx; in gve_rx_free_rings_gqi() local
419 if (!rx) in gve_rx_free_rings_gqi()
422 for (i = 0; i < cfg->qcfg->num_queues; i++) in gve_rx_free_rings_gqi()
423 gve_rx_free_ring_gqi(priv, &rx[i], cfg); in gve_rx_free_rings_gqi()
425 kvfree(rx); in gve_rx_free_rings_gqi()
426 cfg->rx = NULL; in gve_rx_free_rings_gqi()
429 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx) in gve_rx_write_doorbell() argument
431 u32 db_idx = be32_to_cpu(rx->q_resources->db_index); in gve_rx_write_doorbell()
433 iowrite32be(rx->fill_cnt, &priv->db_bar2[db_idx]); in gve_rx_write_doorbell()
450 u32 offset = page_info->page_offset + page_info->pad; in gve_rx_add_frags()
451 struct sk_buff *skb = ctx->skb_tail; in gve_rx_add_frags()
459 ctx->skb_head = skb; in gve_rx_add_frags()
460 ctx->skb_tail = skb; in gve_rx_add_frags()
462 num_frags = skb_shinfo(ctx->skb_tail)->nr_frags; in gve_rx_add_frags()
469 // which is why we do not need to chain by using skb->next in gve_rx_add_frags()
470 skb_shinfo(ctx->skb_tail)->frag_list = skb; in gve_rx_add_frags()
472 ctx->skb_tail = skb; in gve_rx_add_frags()
477 if (skb != ctx->skb_head) { in gve_rx_add_frags()
478 ctx->skb_head->len += len; in gve_rx_add_frags()
479 ctx->skb_head->data_len += len; in gve_rx_add_frags()
480 ctx->skb_head->truesize += truesize; in gve_rx_add_frags()
482 skb_add_rx_frag(skb, num_frags, page_info->page, in gve_rx_add_frags()
485 return ctx->skb_head; in gve_rx_add_frags()
493 page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET; in gve_rx_flip_buff()
499 int pagecount = page_count(page_info->page); in gve_rx_can_recycle_buffer()
501 /* This page is not being used by any SKBs - reuse */ in gve_rx_can_recycle_buffer()
502 if (pagecount == page_info->pagecnt_bias) in gve_rx_can_recycle_buffer()
504 /* This page is still being used by an SKB - we can't reuse */ in gve_rx_can_recycle_buffer()
505 else if (pagecount > page_info->pagecnt_bias) in gve_rx_can_recycle_buffer()
507 WARN(pagecount < page_info->pagecnt_bias, in gve_rx_can_recycle_buffer()
509 return -1; in gve_rx_can_recycle_buffer()
533 static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx, in gve_rx_copy_to_pool() argument
537 u32 pool_idx = rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask; in gve_rx_copy_to_pool()
538 void *src = page_info->page_address + page_info->page_offset; in gve_rx_copy_to_pool()
540 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx_copy_to_pool()
545 copy_page_info = &rx->qpl_copy_pool[pool_idx]; in gve_rx_copy_to_pool()
546 if (!copy_page_info->can_flip) { in gve_rx_copy_to_pool()
550 gve_schedule_reset(rx->gve); in gve_rx_copy_to_pool()
562 * on alleviates head-of-line blocking. in gve_rx_copy_to_pool()
564 rx->qpl_copy_pool_head++; in gve_rx_copy_to_pool()
573 alloc_page_info.pad = page_info->pad; in gve_rx_copy_to_pool()
575 memcpy(alloc_page_info.page_address, src, page_info->pad + len); in gve_rx_copy_to_pool()
580 u64_stats_update_begin(&rx->statss); in gve_rx_copy_to_pool()
581 rx->rx_frag_copy_cnt++; in gve_rx_copy_to_pool()
582 rx->rx_frag_alloc_cnt++; in gve_rx_copy_to_pool()
583 u64_stats_update_end(&rx->statss); in gve_rx_copy_to_pool()
588 dst = copy_page_info->page_address + copy_page_info->page_offset; in gve_rx_copy_to_pool()
589 memcpy(dst, src, page_info->pad + len); in gve_rx_copy_to_pool()
590 copy_page_info->pad = page_info->pad; in gve_rx_copy_to_pool()
593 rx->packet_buffer_size, len, ctx); in gve_rx_copy_to_pool()
598 copy_page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET; in gve_rx_copy_to_pool()
600 if (copy_page_info->can_flip) { in gve_rx_copy_to_pool()
604 copy_page_info->can_flip = false; in gve_rx_copy_to_pool()
605 rx->qpl_copy_pool_head++; in gve_rx_copy_to_pool()
606 prefetch(rx->qpl_copy_pool[rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask].page); in gve_rx_copy_to_pool()
608 copy_page_info->can_flip = true; in gve_rx_copy_to_pool()
611 u64_stats_update_begin(&rx->statss); in gve_rx_copy_to_pool()
612 rx->rx_frag_copy_cnt++; in gve_rx_copy_to_pool()
613 u64_stats_update_end(&rx->statss); in gve_rx_copy_to_pool()
620 struct gve_rx_ring *rx, struct gve_rx_slot_page_info *page_info, in gve_rx_qpl() argument
624 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx_qpl()
632 if (page_info->can_flip) { in gve_rx_qpl()
633 skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx); in gve_rx_qpl()
638 gve_rx_flip_buff(page_info, &data_slot->qpl_offset); in gve_rx_qpl()
641 skb = gve_rx_copy_to_pool(rx, page_info, len, napi); in gve_rx_qpl()
646 static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx, in gve_rx_skb() argument
651 struct net_device *netdev = priv->dev; in gve_rx_skb()
652 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx_skb()
655 if (len <= priv->rx_copybreak && is_only_frag) { in gve_rx_skb()
659 u64_stats_update_begin(&rx->statss); in gve_rx_skb()
660 rx->rx_copied_pkt++; in gve_rx_skb()
661 rx->rx_frag_copy_cnt++; in gve_rx_skb()
662 rx->rx_copybreak_pkt++; in gve_rx_skb()
663 u64_stats_update_end(&rx->statss); in gve_rx_skb()
672 page_info->can_flip = recycle; in gve_rx_skb()
673 if (page_info->can_flip) { in gve_rx_skb()
674 u64_stats_update_begin(&rx->statss); in gve_rx_skb()
675 rx->rx_frag_flip_cnt++; in gve_rx_skb()
676 u64_stats_update_end(&rx->statss); in gve_rx_skb()
679 if (rx->data.raw_addressing) { in gve_rx_skb()
680 skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev, in gve_rx_skb()
683 rx->packet_buffer_size, ctx); in gve_rx_skb()
685 skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx, in gve_rx_skb()
693 struct gve_rx_ring *rx, in gve_xsk_pool_redirect() argument
700 if (rx->xsk_pool->frame_len < len) in gve_xsk_pool_redirect()
701 return -E2BIG; in gve_xsk_pool_redirect()
702 xdp = xsk_buff_alloc(rx->xsk_pool); in gve_xsk_pool_redirect()
704 u64_stats_update_begin(&rx->statss); in gve_xsk_pool_redirect()
705 rx->xdp_alloc_fails++; in gve_xsk_pool_redirect()
706 u64_stats_update_end(&rx->statss); in gve_xsk_pool_redirect()
707 return -ENOMEM; in gve_xsk_pool_redirect()
709 xdp->data_end = xdp->data + len; in gve_xsk_pool_redirect()
710 memcpy(xdp->data, data, len); in gve_xsk_pool_redirect()
717 static int gve_xdp_redirect(struct net_device *dev, struct gve_rx_ring *rx, in gve_xdp_redirect() argument
720 int total_len, len = orig->data_end - orig->data; in gve_xdp_redirect()
726 if (rx->xsk_pool) in gve_xdp_redirect()
727 return gve_xsk_pool_redirect(dev, rx, orig->data, in gve_xdp_redirect()
732 frame = page_frag_alloc(&rx->page_cache, total_len, GFP_ATOMIC); in gve_xdp_redirect()
734 u64_stats_update_begin(&rx->statss); in gve_xdp_redirect()
735 rx->xdp_alloc_fails++; in gve_xdp_redirect()
736 u64_stats_update_end(&rx->statss); in gve_xdp_redirect()
737 return -ENOMEM; in gve_xdp_redirect()
739 xdp_init_buff(&new, total_len, &rx->xdp_rxq); in gve_xdp_redirect()
741 memcpy(new.data, orig->data, len); in gve_xdp_redirect()
750 static void gve_xdp_done(struct gve_priv *priv, struct gve_rx_ring *rx, in gve_xdp_done() argument
764 tx_qid = gve_xdp_tx_queue_id(priv, rx->q_num); in gve_xdp_done()
765 tx = &priv->tx[tx_qid]; in gve_xdp_done()
766 spin_lock(&tx->xdp_lock); in gve_xdp_done()
767 err = gve_xdp_xmit_one(priv, tx, xdp->data, in gve_xdp_done()
768 xdp->data_end - xdp->data, NULL); in gve_xdp_done()
769 spin_unlock(&tx->xdp_lock); in gve_xdp_done()
772 u64_stats_update_begin(&rx->statss); in gve_xdp_done()
773 rx->xdp_tx_errors++; in gve_xdp_done()
774 u64_stats_update_end(&rx->statss); in gve_xdp_done()
778 err = gve_xdp_redirect(priv->dev, rx, xdp, xprog); in gve_xdp_done()
781 u64_stats_update_begin(&rx->statss); in gve_xdp_done()
782 rx->xdp_redirect_errors++; in gve_xdp_done()
783 u64_stats_update_end(&rx->statss); in gve_xdp_done()
787 u64_stats_update_begin(&rx->statss); in gve_xdp_done()
789 rx->xdp_actions[xdp_act]++; in gve_xdp_done()
790 u64_stats_update_end(&rx->statss); in gve_xdp_done()
794 static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat, in gve_rx() argument
798 bool is_last_frag = !GVE_PKTCONT_BIT_IS_SET(desc->flags_seq); in gve_rx()
800 u16 frag_size = be16_to_cpu(desc->len); in gve_rx()
801 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx()
803 struct gve_priv *priv = rx->gve; in gve_rx()
811 struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi; in gve_rx()
812 bool is_first_frag = ctx->frag_cnt == 0; in gve_rx()
816 if (unlikely(ctx->drop_pkt)) in gve_rx()
819 if (desc->flags_seq & GVE_RXF_ERR) { in gve_rx()
820 ctx->drop_pkt = true; in gve_rx()
821 cnts->desc_err_pkt_cnt++; in gve_rx()
826 if (unlikely(frag_size > rx->packet_buffer_size)) { in gve_rx()
827 netdev_warn(priv->dev, "Unexpected frag size %d, can't exceed %d, scheduling reset", in gve_rx()
828 frag_size, rx->packet_buffer_size); in gve_rx()
829 ctx->drop_pkt = true; in gve_rx()
831 gve_schedule_reset(rx->gve); in gve_rx()
836 page_info = &rx->data.page_info[(idx + 2) & rx->mask]; in gve_rx()
837 va = page_info->page_address + page_info->page_offset; in gve_rx()
838 prefetch(page_info->page); /* Kernel page struct. */ in gve_rx()
842 page_info = &rx->data.page_info[idx]; in gve_rx()
843 data_slot = &rx->data.data_ring[idx]; in gve_rx()
844 page_bus = (rx->data.raw_addressing) ? in gve_rx()
845 be64_to_cpu(data_slot->addr) - page_info->page_offset : in gve_rx()
846 rx->data.qpl->page_buses[idx]; in gve_rx()
847 dma_sync_single_for_cpu(&priv->pdev->dev, page_bus, in gve_rx()
849 page_info->pad = is_first_frag ? GVE_RX_PAD : 0; in gve_rx()
850 len -= page_info->pad; in gve_rx()
851 frag_size -= page_info->pad; in gve_rx()
853 xprog = READ_ONCE(priv->xdp_prog); in gve_rx()
858 xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq); in gve_rx()
859 xdp_prepare_buff(&xdp, page_info->page_address + in gve_rx()
860 page_info->page_offset, GVE_RX_PAD, in gve_rx()
865 gve_xdp_done(priv, rx, &xdp, xprog, xdp_act); in gve_rx()
866 ctx->total_size += frag_size; in gve_rx()
870 page_info->pad += xdp.data - old_data; in gve_rx()
871 len = xdp.data_end - xdp.data; in gve_rx()
873 u64_stats_update_begin(&rx->statss); in gve_rx()
874 rx->xdp_actions[XDP_PASS]++; in gve_rx()
875 u64_stats_update_end(&rx->statss); in gve_rx()
878 skb = gve_rx_skb(priv, rx, page_info, napi, len, in gve_rx()
881 u64_stats_update_begin(&rx->statss); in gve_rx()
882 rx->rx_skb_alloc_fail++; in gve_rx()
883 u64_stats_update_end(&rx->statss); in gve_rx()
886 ctx->drop_pkt = true; in gve_rx()
889 ctx->total_size += frag_size; in gve_rx()
894 if (desc->csum) in gve_rx()
895 skb->ip_summed = CHECKSUM_COMPLETE; in gve_rx()
897 skb->ip_summed = CHECKSUM_NONE; in gve_rx()
898 skb->csum = csum_unfold(desc->csum); in gve_rx()
903 gve_needs_rss(desc->flags_seq)) in gve_rx()
904 skb_set_hash(skb, be32_to_cpu(desc->rss_hash), in gve_rx()
905 gve_rss_type(desc->flags_seq)); in gve_rx()
909 skb_record_rx_queue(skb, rx->q_num); in gve_rx()
920 cnts->ok_pkt_bytes += ctx->total_size; in gve_rx()
921 cnts->ok_pkt_cnt++; in gve_rx()
923 ctx->frag_cnt++; in gve_rx()
925 cnts->total_pkt_cnt++; in gve_rx()
926 cnts->cont_pkt_cnt += (ctx->frag_cnt > 1); in gve_rx()
931 bool gve_rx_work_pending(struct gve_rx_ring *rx) in gve_rx_work_pending() argument
937 next_idx = rx->cnt & rx->mask; in gve_rx_work_pending()
938 desc = rx->desc.desc_ring + next_idx; in gve_rx_work_pending()
940 flags_seq = desc->flags_seq; in gve_rx_work_pending()
942 return (GVE_SEQNO(flags_seq) == rx->desc.seqno); in gve_rx_work_pending()
945 static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx) in gve_rx_refill_buffers() argument
947 int refill_target = rx->mask + 1; in gve_rx_refill_buffers()
948 u32 fill_cnt = rx->fill_cnt; in gve_rx_refill_buffers()
950 while (fill_cnt - rx->cnt < refill_target) { in gve_rx_refill_buffers()
952 u32 idx = fill_cnt & rx->mask; in gve_rx_refill_buffers()
954 page_info = &rx->data.page_info[idx]; in gve_rx_refill_buffers()
955 if (page_info->can_flip) { in gve_rx_refill_buffers()
960 &rx->data.data_ring[idx]; in gve_rx_refill_buffers()
962 gve_rx_flip_buff(page_info, &data_slot->addr); in gve_rx_refill_buffers()
963 page_info->can_flip = 0; in gve_rx_refill_buffers()
968 * Flipping is unnecessary here - if the networking stack still in gve_rx_refill_buffers()
975 if (!rx->data.raw_addressing) in gve_rx_refill_buffers()
980 /* We can't reuse the buffer - alloc a new one*/ in gve_rx_refill_buffers()
982 &rx->data.data_ring[idx]; in gve_rx_refill_buffers()
983 struct device *dev = &priv->pdev->dev; in gve_rx_refill_buffers()
985 page_info->page = NULL; in gve_rx_refill_buffers()
987 data_slot, rx)) { in gve_rx_refill_buffers()
994 rx->fill_cnt = fill_cnt; in gve_rx_refill_buffers()
998 static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget, in gve_clean_rx_done() argument
1001 u64 xdp_redirects = rx->xdp_actions[XDP_REDIRECT]; in gve_clean_rx_done()
1002 u64 xdp_txs = rx->xdp_actions[XDP_TX]; in gve_clean_rx_done()
1003 struct gve_rx_ctx *ctx = &rx->ctx; in gve_clean_rx_done()
1004 struct gve_priv *priv = rx->gve; in gve_clean_rx_done()
1007 u32 idx = rx->cnt & rx->mask; in gve_clean_rx_done()
1010 struct gve_rx_desc *desc = &rx->desc.desc_ring[idx]; in gve_clean_rx_done()
1013 while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) && in gve_clean_rx_done()
1014 (work_done < budget || ctx->frag_cnt)) { in gve_clean_rx_done()
1015 next_desc = &rx->desc.desc_ring[(idx + 1) & rx->mask]; in gve_clean_rx_done()
1018 gve_rx(rx, feat, desc, idx, &cnts); in gve_clean_rx_done()
1020 rx->cnt++; in gve_clean_rx_done()
1021 idx = rx->cnt & rx->mask; in gve_clean_rx_done()
1022 desc = &rx->desc.desc_ring[idx]; in gve_clean_rx_done()
1023 rx->desc.seqno = gve_next_seqno(rx->desc.seqno); in gve_clean_rx_done()
1028 if (unlikely(ctx->frag_cnt)) { in gve_clean_rx_done()
1029 struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi; in gve_clean_rx_done()
1032 gve_rx_ctx_clear(&rx->ctx); in gve_clean_rx_done()
1033 …netdev_warn(priv->dev, "Unexpected seq number %d with incomplete packet, expected %d, scheduling r… in gve_clean_rx_done()
1034 GVE_SEQNO(desc->flags_seq), rx->desc.seqno); in gve_clean_rx_done()
1035 gve_schedule_reset(rx->gve); in gve_clean_rx_done()
1038 if (!work_done && rx->fill_cnt - rx->cnt > rx->db_threshold) in gve_clean_rx_done()
1042 u64_stats_update_begin(&rx->statss); in gve_clean_rx_done()
1043 rx->rpackets += cnts.ok_pkt_cnt; in gve_clean_rx_done()
1044 rx->rbytes += cnts.ok_pkt_bytes; in gve_clean_rx_done()
1045 rx->rx_cont_packet_cnt += cnts.cont_pkt_cnt; in gve_clean_rx_done()
1046 rx->rx_desc_err_dropped_pkt += cnts.desc_err_pkt_cnt; in gve_clean_rx_done()
1047 u64_stats_update_end(&rx->statss); in gve_clean_rx_done()
1050 if (xdp_txs != rx->xdp_actions[XDP_TX]) in gve_clean_rx_done()
1051 gve_xdp_tx_flush(priv, rx->q_num); in gve_clean_rx_done()
1053 if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT]) in gve_clean_rx_done()
1056 /* restock ring slots */ in gve_clean_rx_done()
1057 if (!rx->data.raw_addressing) { in gve_clean_rx_done()
1059 rx->fill_cnt += work_done; in gve_clean_rx_done()
1060 } else if (rx->fill_cnt - rx->cnt <= rx->db_threshold) { in gve_clean_rx_done()
1064 if (!gve_rx_refill_buffers(priv, rx)) in gve_clean_rx_done()
1070 if (rx->fill_cnt - rx->cnt <= rx->db_threshold) { in gve_clean_rx_done()
1071 gve_rx_write_doorbell(priv, rx); in gve_clean_rx_done()
1076 gve_rx_write_doorbell(priv, rx); in gve_clean_rx_done()
1082 struct gve_rx_ring *rx = block->rx; in gve_rx_poll() local
1086 feat = block->napi.dev->features; in gve_rx_poll()
1089 work_done = gve_clean_rx_done(rx, budget, feat); in gve_rx_poll()