Lines Matching +full:dma +full:- +full:safe +full:- +full:map
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2023-2024 Google LLC
38 struct gve_rx_ring *rx = &priv->rx[i]; in gve_rx_free_ring_gqi()
40 if (rx->page_info != NULL) { in gve_rx_free_ring_gqi()
41 free(rx->page_info, M_GVE); in gve_rx_free_ring_gqi()
42 rx->page_info = NULL; in gve_rx_free_ring_gqi()
45 if (rx->data_ring != NULL) { in gve_rx_free_ring_gqi()
46 gve_dma_free_coherent(&rx->data_ring_mem); in gve_rx_free_ring_gqi()
47 rx->data_ring = NULL; in gve_rx_free_ring_gqi()
50 if (rx->desc_ring != NULL) { in gve_rx_free_ring_gqi()
51 gve_dma_free_coherent(&rx->desc_ring_mem); in gve_rx_free_ring_gqi()
52 rx->desc_ring = NULL; in gve_rx_free_ring_gqi()
59 struct gve_rx_ring *rx = &priv->rx[i]; in gve_rx_free_ring()
60 struct gve_ring_com *com = &rx->com; in gve_rx_free_ring()
62 /* Safe to call even if never allocated */ in gve_rx_free_ring()
63 gve_free_counters((counter_u64_t *)&rx->stats, NUM_RX_STATS); in gve_rx_free_ring()
70 if (com->q_resources != NULL) { in gve_rx_free_ring()
71 gve_dma_free_coherent(&com->q_resources_mem); in gve_rx_free_ring()
72 com->q_resources = NULL; in gve_rx_free_ring()
79 struct gve_ring_com *com = &rx->com; in gve_prefill_rx_slots()
80 struct gve_dma_handle *dma; in gve_prefill_rx_slots() local
83 for (i = 0; i < com->priv->rx_desc_cnt; i++) { in gve_prefill_rx_slots()
84 rx->data_ring[i].qpl_offset = htobe64(PAGE_SIZE * i); in gve_prefill_rx_slots()
85 rx->page_info[i].page_offset = 0; in gve_prefill_rx_slots()
86 rx->page_info[i].page_address = com->qpl->dmas[i].cpu_addr; in gve_prefill_rx_slots()
87 rx->page_info[i].page = com->qpl->pages[i]; in gve_prefill_rx_slots()
89 dma = &com->qpl->dmas[i]; in gve_prefill_rx_slots()
90 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREREAD); in gve_prefill_rx_slots()
93 bus_dmamap_sync(rx->data_ring_mem.tag, rx->data_ring_mem.map, in gve_prefill_rx_slots()
100 struct gve_rx_ring *rx = &priv->rx[i]; in gve_rx_alloc_ring_gqi()
101 struct gve_ring_com *com = &rx->com; in gve_rx_alloc_ring_gqi()
105 sizeof(struct gve_rx_desc) * priv->rx_desc_cnt, in gve_rx_alloc_ring_gqi()
106 CACHE_LINE_SIZE, &rx->desc_ring_mem); in gve_rx_alloc_ring_gqi()
108 device_printf(priv->dev, in gve_rx_alloc_ring_gqi()
113 rx->mask = priv->rx_pages_per_qpl - 1; in gve_rx_alloc_ring_gqi()
114 rx->desc_ring = rx->desc_ring_mem.cpu_addr; in gve_rx_alloc_ring_gqi()
116 com->qpl = &priv->qpls[priv->tx_cfg.max_queues + i]; in gve_rx_alloc_ring_gqi()
117 if (com->qpl == NULL) { in gve_rx_alloc_ring_gqi()
118 device_printf(priv->dev, "No QPL left for rx ring %d", i); in gve_rx_alloc_ring_gqi()
122 rx->page_info = malloc(priv->rx_desc_cnt * sizeof(*rx->page_info), in gve_rx_alloc_ring_gqi()
126 sizeof(union gve_rx_data_slot) * priv->rx_desc_cnt, in gve_rx_alloc_ring_gqi()
127 CACHE_LINE_SIZE, &rx->data_ring_mem); in gve_rx_alloc_ring_gqi()
129 device_printf(priv->dev, in gve_rx_alloc_ring_gqi()
133 rx->data_ring = rx->data_ring_mem.cpu_addr; in gve_rx_alloc_ring_gqi()
146 struct gve_rx_ring *rx = &priv->rx[i]; in gve_rx_alloc_ring()
147 struct gve_ring_com *com = &rx->com; in gve_rx_alloc_ring()
150 com->priv = priv; in gve_rx_alloc_ring()
151 com->id = i; in gve_rx_alloc_ring()
153 gve_alloc_counters((counter_u64_t *)&rx->stats, NUM_RX_STATS); in gve_rx_alloc_ring()
156 PAGE_SIZE, &com->q_resources_mem); in gve_rx_alloc_ring()
158 device_printf(priv->dev, in gve_rx_alloc_ring()
162 com->q_resources = com->q_resources_mem.cpu_addr; in gve_rx_alloc_ring()
184 priv->rx = malloc(sizeof(struct gve_rx_ring) * priv->rx_cfg.num_queues, in gve_alloc_rx_rings()
187 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in gve_alloc_rx_rings()
196 while (i--) in gve_alloc_rx_rings()
198 free(priv->rx, M_GVE); in gve_alloc_rx_rings()
207 for (i = 0; i < priv->rx_cfg.num_queues; i++) in gve_free_rx_rings()
210 free(priv->rx, M_GVE); in gve_free_rx_rings()
216 struct gve_priv *priv = rx->com.priv; in gve_rx_clear_data_ring()
222 * established initially by gve_prefill_rx_slots at alloc-time and is in gve_rx_clear_data_ring()
228 for (i = 0; i < priv->rx_desc_cnt; i++) { in gve_rx_clear_data_ring()
229 rx->data_ring[i].qpl_offset = htobe64(PAGE_SIZE * i + in gve_rx_clear_data_ring()
230 rx->page_info[i].page_offset); in gve_rx_clear_data_ring()
231 rx->fill_cnt++; in gve_rx_clear_data_ring()
234 bus_dmamap_sync(rx->data_ring_mem.tag, rx->data_ring_mem.map, in gve_rx_clear_data_ring()
241 struct gve_priv *priv = rx->com.priv; in gve_rx_clear_desc_ring()
244 for (i = 0; i < priv->rx_desc_cnt; i++) in gve_rx_clear_desc_ring()
245 rx->desc_ring[i] = (struct gve_rx_desc){}; in gve_rx_clear_desc_ring()
247 bus_dmamap_sync(rx->desc_ring_mem.tag, rx->desc_ring_mem.map, in gve_rx_clear_desc_ring()
254 struct gve_rx_ring *rx = &priv->rx[i]; in gve_clear_rx_ring()
261 rx->seq_no = 1; in gve_clear_rx_ring()
262 rx->cnt = 0; in gve_clear_rx_ring()
263 rx->fill_cnt = 0; in gve_clear_rx_ring()
264 rx->mask = priv->rx_desc_cnt - 1; in gve_clear_rx_ring()
273 struct gve_rx_ring *rx = &priv->rx[i]; in gve_start_rx_ring()
274 struct gve_ring_com *com = &rx->com; in gve_start_rx_ring()
276 if ((if_getcapenable(priv->ifp) & IFCAP_LRO) != 0) { in gve_start_rx_ring()
277 if (tcp_lro_init(&rx->lro) != 0) in gve_start_rx_ring()
278 device_printf(priv->dev, "Failed to init lro for rx ring %d", i); in gve_start_rx_ring()
279 rx->lro.ifp = priv->ifp; in gve_start_rx_ring()
283 NET_TASK_INIT(&com->cleanup_task, 0, gve_rx_cleanup_tq, rx); in gve_start_rx_ring()
285 NET_TASK_INIT(&com->cleanup_task, 0, gve_rx_cleanup_tq_dqo, rx); in gve_start_rx_ring()
286 com->cleanup_tq = taskqueue_create_fast("gve rx", M_WAITOK, in gve_start_rx_ring()
287 taskqueue_thread_enqueue, &com->cleanup_tq); in gve_start_rx_ring()
289 taskqueue_start_threads(&com->cleanup_tq, 1, PI_NET, in gve_start_rx_ring()
290 "%s rxq %d", device_get_nameunit(priv->dev), i); in gve_start_rx_ring()
294 gve_db_bar_write_4(priv, com->db_offset, rx->fill_cnt); in gve_start_rx_ring()
310 for (i = 0; i < priv->rx_cfg.num_queues; i++) in gve_create_rx_rings()
313 err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues); in gve_create_rx_rings()
317 bus_dmamap_sync(priv->irqs_db_mem.tag, priv->irqs_db_mem.map, in gve_create_rx_rings()
320 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in gve_create_rx_rings()
321 rx = &priv->rx[i]; in gve_create_rx_rings()
322 com = &rx->com; in gve_create_rx_rings()
324 com->irq_db_offset = 4 * be32toh(priv->irq_db_indices[com->ntfy_id].index); in gve_create_rx_rings()
326 bus_dmamap_sync(com->q_resources_mem.tag, com->q_resources_mem.map, in gve_create_rx_rings()
328 com->db_offset = 4 * be32toh(com->q_resources->db_index); in gve_create_rx_rings()
329 com->counter_idx = be32toh(com->q_resources->counter_index); in gve_create_rx_rings()
341 struct gve_rx_ring *rx = &priv->rx[i]; in gve_stop_rx_ring()
342 struct gve_ring_com *com = &rx->com; in gve_stop_rx_ring()
344 if (com->cleanup_tq != NULL) { in gve_stop_rx_ring()
345 taskqueue_quiesce(com->cleanup_tq); in gve_stop_rx_ring()
346 taskqueue_free(com->cleanup_tq); in gve_stop_rx_ring()
347 com->cleanup_tq = NULL; in gve_stop_rx_ring()
350 tcp_lro_free(&rx->lro); in gve_stop_rx_ring()
351 rx->ctx = (struct gve_rx_ctx){}; in gve_stop_rx_ring()
360 for (i = 0; i < priv->rx_cfg.num_queues; i++) in gve_destroy_rx_rings()
364 err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues); in gve_destroy_rx_rings()
377 struct gve_priv *priv = rx->com.priv; in gve_rx_intr()
378 struct gve_ring_com *com = &rx->com; in gve_rx_intr()
380 if (__predict_false((if_getdrvflags(priv->ifp) & IFF_DRV_RUNNING) == 0)) in gve_rx_intr()
383 gve_db_bar_write_4(priv, com->irq_db_offset, GVE_IRQ_MASK); in gve_rx_intr()
384 taskqueue_enqueue(rx->com.cleanup_tq, &rx->com.cleanup_task); in gve_rx_intr()
416 page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET; in gve_rx_flip_buff()
425 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx_create_mbuf()
430 uint32_t offset = page_info->page_offset + page_info->pad; in gve_rx_create_mbuf()
431 void *va = (char *)page_info->page_address + offset; in gve_rx_create_mbuf()
433 if (len <= priv->rx_copybreak && is_only_frag) { in gve_rx_create_mbuf()
440 counter_u64_add_protected(rx->stats.rx_copybreak_cnt, 1); in gve_rx_create_mbuf()
442 ctx->mbuf_head = mbuf; in gve_rx_create_mbuf()
443 ctx->mbuf_tail = mbuf; in gve_rx_create_mbuf()
445 struct mbuf *mbuf_tail = ctx->mbuf_tail; in gve_rx_create_mbuf()
461 ref_count = atomic_load_int(&page_info->page->ref_count); in gve_rx_create_mbuf()
470 ctx->mbuf_head = mbuf; in gve_rx_create_mbuf()
471 ctx->mbuf_tail = mbuf; in gve_rx_create_mbuf()
478 mbuf_tail->m_next = mbuf; in gve_rx_create_mbuf()
479 ctx->mbuf_tail = mbuf; in gve_rx_create_mbuf()
487 page_info->page, page_info->page_address, in gve_rx_create_mbuf()
491 counter_u64_add_protected(rx->stats.rx_frag_flip_cnt, 1); in gve_rx_create_mbuf()
498 vm_page_wire(page_info->page); in gve_rx_create_mbuf()
500 gve_rx_flip_buff(page_info, &data_slot->qpl_offset); in gve_rx_create_mbuf()
504 counter_u64_add_protected(rx->stats.rx_frag_copy_cnt, 1); in gve_rx_create_mbuf()
509 mbuf->m_len = len; in gve_rx_create_mbuf()
510 ctx->total_size += len; in gve_rx_create_mbuf()
532 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx()
534 if_t ifp = priv->ifp; in gve_rx()
538 bool is_first_frag = ctx->frag_cnt == 0; in gve_rx()
539 bool is_last_frag = !(GVE_RXF_PKT_CONT & desc->flags_seq); in gve_rx()
542 if (__predict_false(ctx->drop_pkt)) in gve_rx()
545 if ((desc->flags_seq & GVE_RXF_ERR) != 0) { in gve_rx()
546 ctx->drop_pkt = true; in gve_rx()
548 counter_u64_add_protected(rx->stats.rx_dropped_pkt_desc_err, 1); in gve_rx()
549 counter_u64_add_protected(rx->stats.rx_dropped_pkt, 1); in gve_rx()
551 m_freem(ctx->mbuf_head); in gve_rx()
555 page_info = &rx->page_info[idx]; in gve_rx()
556 data_slot = &rx->data_ring[idx]; in gve_rx()
557 page_dma_handle = &(rx->com.qpl->dmas[idx]); in gve_rx()
559 page_info->pad = is_first_frag ? GVE_RX_PAD : 0; in gve_rx()
560 len = be16toh(desc->len) - page_info->pad; in gve_rx()
562 bus_dmamap_sync(page_dma_handle->tag, page_dma_handle->map, in gve_rx()
568 ctx->drop_pkt = true; in gve_rx()
570 counter_u64_add_protected(rx->stats.rx_dropped_pkt_mbuf_alloc_fail, 1); in gve_rx()
571 counter_u64_add_protected(rx->stats.rx_dropped_pkt, 1); in gve_rx()
573 m_freem(ctx->mbuf_head); in gve_rx()
578 mbuf->m_pkthdr.rcvif = priv->ifp; in gve_rx()
579 ctx->is_tcp = desc->flags_seq & GVE_RXF_TCP; in gve_rx()
581 if (gve_needs_rss(desc->flags_seq)) { in gve_rx()
582 gve_set_rss_type(desc->flags_seq, mbuf); in gve_rx()
583 mbuf->m_pkthdr.flowid = be32toh(desc->rss_hash); in gve_rx()
586 if ((desc->csum != 0) && ((desc->flags_seq & GVE_RXF_FRAG) == 0)) { in gve_rx()
587 mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | in gve_rx()
591 mbuf->m_pkthdr.csum_data = 0xffff; in gve_rx()
596 mbuf = ctx->mbuf_head; in gve_rx()
597 mbuf->m_pkthdr.len = ctx->total_size; in gve_rx()
600 if (((if_getcapenable(priv->ifp) & IFCAP_LRO) != 0) && /* LRO is enabled */ in gve_rx()
601 (ctx->is_tcp) && /* pkt is a TCP pkt */ in gve_rx()
602 ((mbuf->m_pkthdr.csum_flags & CSUM_DATA_VALID) != 0) && /* NIC verified csum */ in gve_rx()
603 (rx->lro.lro_cnt != 0) && /* LRO resources exist */ in gve_rx()
604 (tcp_lro_rx(&rx->lro, mbuf, 0) == 0)) in gve_rx()
611 counter_u64_add_protected(rx->stats.rbytes, ctx->total_size); in gve_rx()
612 counter_u64_add_protected(rx->stats.rpackets, 1); in gve_rx()
617 ctx->frag_cnt++; in gve_rx()
619 rx->ctx = (struct gve_rx_ctx){}; in gve_rx()
629 next_idx = rx->cnt & rx->mask; in gve_rx_work_pending()
630 desc = rx->desc_ring + next_idx; in gve_rx_work_pending()
632 flags_seq = desc->flags_seq; in gve_rx_work_pending()
634 return (GVE_SEQNO(flags_seq) == rx->seq_no); in gve_rx_work_pending()
646 uint32_t idx = rx->cnt & rx->mask; in gve_rx_cleanup()
648 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx_cleanup()
653 bus_dmamap_sync(rx->desc_ring_mem.tag, rx->desc_ring_mem.map, in gve_rx_cleanup()
655 desc = &rx->desc_ring[idx]; in gve_rx_cleanup()
657 while ((work_done < budget || ctx->frag_cnt) && in gve_rx_cleanup()
658 (GVE_SEQNO(desc->flags_seq) == rx->seq_no)) { in gve_rx_cleanup()
662 rx->cnt++; in gve_rx_cleanup()
663 idx = rx->cnt & rx->mask; in gve_rx_cleanup()
664 desc = &rx->desc_ring[idx]; in gve_rx_cleanup()
665 rx->seq_no = gve_next_seqno(rx->seq_no); in gve_rx_cleanup()
670 if (__predict_false(ctx->frag_cnt)) { in gve_rx_cleanup()
671 m_freem(ctx->mbuf_head); in gve_rx_cleanup()
672 rx->ctx = (struct gve_rx_ctx){}; in gve_rx_cleanup()
673 device_printf(priv->dev, in gve_rx_cleanup()
675 GVE_SEQNO(desc->flags_seq), rx->seq_no); in gve_rx_cleanup()
680 tcp_lro_flush_all(&rx->lro); in gve_rx_cleanup()
682 bus_dmamap_sync(rx->data_ring_mem.tag, rx->data_ring_mem.map, in gve_rx_cleanup()
686 rx->fill_cnt += work_done; in gve_rx_cleanup()
687 gve_db_bar_write_4(priv, rx->com.db_offset, rx->fill_cnt); in gve_rx_cleanup()
694 struct gve_priv *priv = rx->com.priv; in gve_rx_cleanup_tq()
696 if (__predict_false((if_getdrvflags(priv->ifp) & IFF_DRV_RUNNING) == 0)) in gve_rx_cleanup_tq()
701 gve_db_bar_write_4(priv, rx->com.irq_db_offset, in gve_rx_cleanup_tq()
712 gve_db_bar_write_4(priv, rx->com.irq_db_offset, GVE_IRQ_MASK); in gve_rx_cleanup_tq()
713 taskqueue_enqueue(rx->com.cleanup_tq, &rx->com.cleanup_task); in gve_rx_cleanup_tq()