Lines Matching +full:txrx +full:-

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates.
86 ena_log_nm(adapter->pdev, INFO, "netmap attach\n"); in ena_netmap_attach()
90 na.ifp = adapter->ifp; in ena_netmap_attach()
91 na.num_tx_desc = adapter->requested_tx_ring_size; in ena_netmap_attach()
92 na.num_rx_desc = adapter->requested_rx_ring_size; in ena_netmap_attach()
93 na.num_tx_rings = adapter->num_io_queues; in ena_netmap_attach()
94 na.num_rx_rings = adapter->num_io_queues; in ena_netmap_attach()
95 na.rx_buf_maxsize = adapter->buf_ring_size; in ena_netmap_attach()
107 struct netmap_adapter *na = NA(adapter->ifp); in ena_netmap_alloc_rx_slot()
116 if (unlikely(rx_info->netmap_buf_idx != 0)) in ena_netmap_alloc_rx_slot()
119 qid = rx_ring->qid; in ena_netmap_alloc_rx_slot()
120 kring = na->rx_rings[qid]; in ena_netmap_alloc_rx_slot()
121 nm_i = kring->nr_hwcur; in ena_netmap_alloc_rx_slot()
122 head = kring->rhead; in ena_netmap_alloc_rx_slot()
124 ena_log_nm(adapter->pdev, DBG, in ena_netmap_alloc_rx_slot()
126 kring->nr_hwcur, kring->nr_hwtail, kring->rhead, kring->rcur, in ena_netmap_alloc_rx_slot()
127 kring->rtail); in ena_netmap_alloc_rx_slot()
129 if ((nm_i == head) && rx_ring->initialized) { in ena_netmap_alloc_rx_slot()
130 ena_log_nm(adapter->pdev, ERR, in ena_netmap_alloc_rx_slot()
135 ring = kring->ring; in ena_netmap_alloc_rx_slot()
137 ena_log_nm(adapter->pdev, ERR, "Rx ring %d is NULL\n", qid); in ena_netmap_alloc_rx_slot()
140 slot = &ring->slot[nm_i]; in ena_netmap_alloc_rx_slot()
144 ena_log_nm(adapter->pdev, ERR, "Bad buff in slot\n"); in ena_netmap_alloc_rx_slot()
148 rc = netmap_load_map(na, adapter->rx_buf_tag, rx_info->map, addr); in ena_netmap_alloc_rx_slot()
150 ena_log_nm(adapter->pdev, WARN, "DMA mapping error\n"); in ena_netmap_alloc_rx_slot()
153 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD); in ena_netmap_alloc_rx_slot()
155 rx_info->ena_buf.paddr = paddr; in ena_netmap_alloc_rx_slot()
156 rx_info->ena_buf.len = ring->nr_buf_size; in ena_netmap_alloc_rx_slot()
157 rx_info->mbuf = NULL; in ena_netmap_alloc_rx_slot()
158 rx_info->netmap_buf_idx = slot->buf_idx; in ena_netmap_alloc_rx_slot()
160 slot->buf_idx = 0; in ena_netmap_alloc_rx_slot()
162 lim = kring->nkr_num_slots - 1; in ena_netmap_alloc_rx_slot()
163 kring->nr_hwcur = nm_next(nm_i, lim); in ena_netmap_alloc_rx_slot()
177 na = NA(adapter->ifp); in ena_netmap_free_rx_slot()
179 ena_log_nm(adapter->pdev, ERR, "netmap adapter is NULL\n"); in ena_netmap_free_rx_slot()
183 if (na->rx_rings == NULL) { in ena_netmap_free_rx_slot()
184 ena_log_nm(adapter->pdev, ERR, "netmap rings are NULL\n"); in ena_netmap_free_rx_slot()
188 qid = rx_ring->qid; in ena_netmap_free_rx_slot()
189 kring = na->rx_rings[qid]; in ena_netmap_free_rx_slot()
191 ena_log_nm(adapter->pdev, ERR, in ena_netmap_free_rx_slot()
196 lim = kring->nkr_num_slots - 1; in ena_netmap_free_rx_slot()
197 nm_i = nm_prev(kring->nr_hwcur, lim); in ena_netmap_free_rx_slot()
199 if (kring->nr_mode != NKR_NETMAP_ON) in ena_netmap_free_rx_slot()
202 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, in ena_netmap_free_rx_slot()
204 netmap_unload_map(na, adapter->rx_buf_tag, rx_info->map); in ena_netmap_free_rx_slot()
206 KASSERT(kring->ring != NULL, ("Netmap Rx ring is NULL\n")); in ena_netmap_free_rx_slot()
208 slot = &kring->ring->slot[nm_i]; in ena_netmap_free_rx_slot()
210 ENA_WARN(slot->buf_idx != 0, adapter->ena_dev, "Overwrite slot buf\n"); in ena_netmap_free_rx_slot()
211 slot->buf_idx = rx_info->netmap_buf_idx; in ena_netmap_free_rx_slot()
212 slot->flags = NS_BUF_CHANGED; in ena_netmap_free_rx_slot()
214 rx_info->netmap_buf_idx = 0; in ena_netmap_free_rx_slot()
215 kring->nr_hwcur = nm_i; in ena_netmap_free_rx_slot()
219 ena_ring_in_netmap(struct ena_adapter *adapter, int qid, enum txrx x) in ena_ring_in_netmap()
224 if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) { in ena_ring_in_netmap()
225 na = NA(adapter->ifp); in ena_ring_in_netmap()
226 kring = (x == NR_RX) ? na->rx_rings[qid] : na->tx_rings[qid]; in ena_ring_in_netmap()
227 if (kring->nr_mode == NKR_NETMAP_ON) in ena_ring_in_netmap()
246 ena_netmap_reset_ring(struct ena_adapter *adapter, int qid, enum txrx x) in ena_netmap_reset_ring()
251 netmap_reset(NA(adapter->ifp), x, qid, 0); in ena_netmap_reset_ring()
252 ena_log_nm(adapter->pdev, INFO, "%s ring %d is in netmap mode\n", in ena_netmap_reset_ring()
271 if_t ifp = na->ifp; in ena_netmap_reg()
273 device_t pdev = adapter->pdev; in ena_netmap_reg()
275 enum txrx t; in ena_netmap_reg()
288 kring->nr_mode = NKR_NETMAP_ON; in ena_netmap_reg()
300 kring->nr_mode = NKR_NETMAP_OFF; in ena_netmap_reg()
309 adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE; in ena_netmap_reg()
326 ena_netmap_fill_ctx(kring, &ctx, ENA_IO_TXQ_IDX(kring->ring_id)); in ena_netmap_txsync()
327 ctx.ring = &ctx.adapter->tx_ring[kring->ring_id]; in ena_netmap_txsync()
347 struct ena_ring *tx_ring = ctx->ring; in ena_netmap_tx_frames()
350 ctx->nm_i = ctx->kring->nr_hwcur; in ena_netmap_tx_frames()
351 ctx->nt = ctx->ring->next_to_use; in ena_netmap_tx_frames()
353 __builtin_prefetch(&ctx->slots[ctx->nm_i]); in ena_netmap_tx_frames()
355 while (ctx->nm_i != ctx->kring->rhead) { in ena_netmap_tx_frames()
369 tx_ring->acum_pkts++; in ena_netmap_tx_frames()
373 if (likely(ctx->nm_i != ctx->kring->nr_hwcur)) { in ena_netmap_tx_frames()
377 ctx->ring->next_to_use = ctx->nt; in ena_netmap_tx_frames()
378 ctx->kring->nr_hwcur = ctx->nm_i; in ena_netmap_tx_frames()
398 adapter = ctx->adapter; in ena_netmap_tx_frame()
399 if (ena_netmap_count_slots(ctx) > adapter->max_tx_sgl_size) { in ena_netmap_tx_frame()
400 ena_log_nm(adapter->pdev, WARN, "Too many slots per packet\n"); in ena_netmap_tx_frame()
404 tx_ring = ctx->ring; in ena_netmap_tx_frame()
406 req_id = tx_ring->free_tx_ids[ctx->nt]; in ena_netmap_tx_frame()
407 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_netmap_tx_frame()
408 tx_info->num_of_bufs = 0; in ena_netmap_tx_frame()
409 tx_info->nm_info.sockets_used = 0; in ena_netmap_tx_frame()
414 ena_log_nm(adapter->pdev, ERR, "Failed to map Tx slot\n"); in ena_netmap_tx_frame()
419 ena_tx_ctx.ena_bufs = tx_info->bufs; in ena_netmap_tx_frame()
421 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; in ena_netmap_tx_frame()
424 ena_tx_ctx.meta_valid = adapter->disable_meta_caching; in ena_netmap_tx_frame()
428 if (tx_ring->acum_pkts == ENA_DB_THRESHOLD || in ena_netmap_tx_frame()
429 ena_com_is_doorbell_needed(ctx->io_sq, &ena_tx_ctx)) in ena_netmap_tx_frame()
432 rc = ena_com_prepare_tx(ctx->io_sq, &ena_tx_ctx, &nb_hw_desc); in ena_netmap_tx_frame()
435 ena_log_nm(adapter->pdev, DBG, in ena_netmap_tx_frame()
436 "Tx ring[%d] is out of space\n", tx_ring->que->id); in ena_netmap_tx_frame()
438 ena_log_nm(adapter->pdev, ERR, in ena_netmap_tx_frame()
443 counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1); in ena_netmap_tx_frame()
450 counter_u64_add_protected(tx_ring->tx_stats.cnt, 1); in ena_netmap_tx_frame()
451 counter_u64_add_protected(tx_ring->tx_stats.bytes, packet_len); in ena_netmap_tx_frame()
452 counter_u64_add_protected(adapter->hw_stats.tx_packets, 1); in ena_netmap_tx_frame()
453 counter_u64_add_protected(adapter->hw_stats.tx_bytes, packet_len); in ena_netmap_tx_frame()
456 tx_info->tx_descs = nb_hw_desc; in ena_netmap_tx_frame()
458 ctx->nt = ENA_TX_RING_IDX_NEXT(ctx->nt, ctx->ring->ring_size); in ena_netmap_tx_frame()
460 for (unsigned int i = 0; i < tx_info->num_of_bufs; i++) in ena_netmap_tx_frame()
461 bus_dmamap_sync(adapter->tx_buf_tag, in ena_netmap_tx_frame()
462 tx_info->nm_info.map_seg[i], BUS_DMASYNC_PREWRITE); in ena_netmap_tx_frame()
471 uint16_t nm = ctx->nm_i; in ena_netmap_count_slots()
473 while ((ctx->slots[nm].flags & NS_MOREFRAG) != 0) { in ena_netmap_count_slots()
475 nm = nm_next(nm, ctx->lim); in ena_netmap_count_slots()
490 packet_size += nm_slot->len; in ena_netmap_packet_len()
492 } while ((nm_slot->flags & NS_MOREFRAG) != 0); in ena_netmap_packet_len()
511 data_amount = min_t(uint16_t, bytes_to_copy, nm_slot->len); in ena_netmap_copy_data()
513 bytes_to_copy -= data_amount; in ena_netmap_copy_data()
516 } while ((nm_slot->flags & NS_MOREFRAG) != 0 && bytes_to_copy > 0); in ena_netmap_copy_data()
528 pdev = ((struct ena_adapter *)if_getsoftc(na->ifp))->pdev; in ena_netmap_map_single_slot()
539 slot->buf_idx); in ena_netmap_map_single_slot()
567 adapter = ctx->adapter; in ena_netmap_tx_map_slots()
568 tx_ring = ctx->ring; in ena_netmap_tx_map_slots()
569 ena_buf = tx_info->bufs; in ena_netmap_tx_map_slots()
570 nm_info = &tx_info->nm_info; in ena_netmap_tx_map_slots()
571 nm_maps = nm_info->map_seg; in ena_netmap_tx_map_slots()
572 nm_buf_idx = nm_info->socket_buf_idx; in ena_netmap_tx_map_slots()
573 slot = &ctx->slots[ctx->nm_i]; in ena_netmap_tx_map_slots()
575 slot_head_len = slot->len; in ena_netmap_tx_map_slots()
576 *packet_len = ena_netmap_packet_len(ctx->slots, ctx->nm_i, ctx->lim); in ena_netmap_tx_map_slots()
580 __builtin_prefetch(&ctx->slots[nm_next(ctx->nm_i, ctx->lim)]); in ena_netmap_tx_map_slots()
581 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { in ena_netmap_tx_map_slots()
593 tx_ring->tx_max_header_size); in ena_netmap_tx_map_slots()
597 *push_hdr = NMB(ctx->na, slot); in ena_netmap_tx_map_slots()
599 ena_log_nm(adapter->pdev, ERR, in ena_netmap_tx_map_slots()
608 rc = ena_netmap_copy_data(ctx->na, ctx->slots, in ena_netmap_tx_map_slots()
609 ctx->nm_i, ctx->lim, push_len, in ena_netmap_tx_map_slots()
610 tx_ring->push_buf_intermediate_buf); in ena_netmap_tx_map_slots()
612 ena_log_nm(adapter->pdev, ERR, in ena_netmap_tx_map_slots()
617 *push_hdr = tx_ring->push_buf_intermediate_buf; in ena_netmap_tx_map_slots()
618 counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1); in ena_netmap_tx_map_slots()
620 delta = push_len - slot_head_len; in ena_netmap_tx_map_slots()
623 ena_log_nm(adapter->pdev, DBG, in ena_netmap_tx_map_slots()
624 "slot: %d header_buf->vaddr: %p push_len: %d\n", in ena_netmap_tx_map_slots()
625 slot->buf_idx, *push_hdr, push_len); in ena_netmap_tx_map_slots()
632 rc = ena_netmap_map_single_slot(ctx->na, slot, in ena_netmap_tx_map_slots()
633 adapter->tx_buf_tag, *nm_maps, &vaddr, &paddr); in ena_netmap_tx_map_slots()
635 ena_log_nm(adapter->pdev, ERR, in ena_netmap_tx_map_slots()
641 ena_buf->paddr = paddr + push_len; in ena_netmap_tx_map_slots()
642 ena_buf->len = slot->len - push_len; in ena_netmap_tx_map_slots()
645 tx_info->num_of_bufs++; in ena_netmap_tx_map_slots()
648 remaining_len -= slot->len; in ena_netmap_tx_map_slots()
651 *nm_buf_idx = slot->buf_idx; in ena_netmap_tx_map_slots()
653 slot->buf_idx = 0; in ena_netmap_tx_map_slots()
656 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim); in ena_netmap_tx_map_slots()
657 slot = &ctx->slots[ctx->nm_i]; in ena_netmap_tx_map_slots()
658 nm_info->sockets_used++; in ena_netmap_tx_map_slots()
667 __builtin_prefetch(&ctx->slots[nm_next(ctx->nm_i, ctx->lim)]); in ena_netmap_tx_map_slots()
668 frag_len = slot->len; in ena_netmap_tx_map_slots()
675 delta -= frag_len; in ena_netmap_tx_map_slots()
681 rc = ena_netmap_map_single_slot(ctx->na, slot, in ena_netmap_tx_map_slots()
682 adapter->tx_buf_tag, *nm_maps, &vaddr, in ena_netmap_tx_map_slots()
685 ena_log_nm(adapter->pdev, ERR, in ena_netmap_tx_map_slots()
691 ena_buf->paddr = paddr + delta; in ena_netmap_tx_map_slots()
692 ena_buf->len = slot->len - delta; in ena_netmap_tx_map_slots()
695 tx_info->num_of_bufs++; in ena_netmap_tx_map_slots()
699 remaining_len -= slot->len; in ena_netmap_tx_map_slots()
702 *nm_buf_idx = slot->buf_idx; in ena_netmap_tx_map_slots()
704 slot->buf_idx = 0; in ena_netmap_tx_map_slots()
707 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim); in ena_netmap_tx_map_slots()
708 slot = &ctx->slots[ctx->nm_i]; in ena_netmap_tx_map_slots()
709 nm_info->sockets_used++; in ena_netmap_tx_map_slots()
723 /* Map all remaining data (regular routine for non-LLQ mode) */ in ena_netmap_tx_map_slots()
725 __builtin_prefetch(&ctx->slots[nm_next(ctx->nm_i, ctx->lim)]); in ena_netmap_tx_map_slots()
727 rc = ena_netmap_map_single_slot(ctx->na, slot, in ena_netmap_tx_map_slots()
728 adapter->tx_buf_tag, *nm_maps, &vaddr, &paddr); in ena_netmap_tx_map_slots()
730 ena_log_nm(adapter->pdev, ERR, "DMA mapping error\n"); in ena_netmap_tx_map_slots()
735 ena_buf->paddr = paddr; in ena_netmap_tx_map_slots()
736 ena_buf->len = slot->len; in ena_netmap_tx_map_slots()
739 tx_info->num_of_bufs++; in ena_netmap_tx_map_slots()
741 remaining_len -= slot->len; in ena_netmap_tx_map_slots()
744 *nm_buf_idx = slot->buf_idx; in ena_netmap_tx_map_slots()
746 slot->buf_idx = 0; in ena_netmap_tx_map_slots()
749 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim); in ena_netmap_tx_map_slots()
750 slot = &ctx->slots[ctx->nm_i]; in ena_netmap_tx_map_slots()
751 nm_info->sockets_used++; in ena_netmap_tx_map_slots()
769 nm_info = &tx_info->nm_info; in ena_netmap_unmap_last_socket_chain()
776 n = tx_info->num_of_bufs; in ena_netmap_unmap_last_socket_chain()
777 while (n--) { in ena_netmap_unmap_last_socket_chain()
778 netmap_unload_map(ctx->na, ctx->adapter->tx_buf_tag, in ena_netmap_unmap_last_socket_chain()
779 nm_info->map_seg[n]); in ena_netmap_unmap_last_socket_chain()
781 tx_info->num_of_bufs = 0; in ena_netmap_unmap_last_socket_chain()
784 n = nm_info->sockets_used; in ena_netmap_unmap_last_socket_chain()
785 while (n--) { in ena_netmap_unmap_last_socket_chain()
786 ctx->nm_i = nm_prev(ctx->nm_i, ctx->lim); in ena_netmap_unmap_last_socket_chain()
787 ctx->slots[ctx->nm_i].buf_idx = nm_info->socket_buf_idx[n]; in ena_netmap_unmap_last_socket_chain()
788 ctx->slots[ctx->nm_i].flags = NS_BUF_CHANGED; in ena_netmap_unmap_last_socket_chain()
789 nm_info->socket_buf_idx[n] = 0; in ena_netmap_unmap_last_socket_chain()
791 nm_info->sockets_used = 0; in ena_netmap_unmap_last_socket_chain()
797 struct ena_ring *tx_ring = ctx->ring; in ena_netmap_tx_cleanup()
802 ctx->nm_i = ctx->kring->nr_hwtail; in ena_netmap_tx_cleanup()
803 ctx->nt = tx_ring->next_to_clean; in ena_netmap_tx_cleanup()
807 rc = ena_com_tx_comp_req_id_get(ctx->io_cq, &req_id); in ena_netmap_tx_cleanup()
818 ctx->kring->nr_hwtail = ctx->nm_i; in ena_netmap_tx_cleanup()
822 tx_ring->next_to_clean = ctx->nt; in ena_netmap_tx_cleanup()
823 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); in ena_netmap_tx_cleanup()
834 tx_info = &ctx->ring->tx_buffer_info[req_id]; in ena_netmap_tx_clean_one()
835 nm_info = &tx_info->nm_info; in ena_netmap_tx_clean_one()
842 n = tx_info->num_of_bufs; in ena_netmap_tx_clean_one()
843 for (n = 0; n < tx_info->num_of_bufs; n++) { in ena_netmap_tx_clean_one()
844 netmap_unload_map(ctx->na, ctx->adapter->tx_buf_tag, in ena_netmap_tx_clean_one()
845 nm_info->map_seg[n]); in ena_netmap_tx_clean_one()
847 tx_info->num_of_bufs = 0; in ena_netmap_tx_clean_one()
850 for (n = 0; n < nm_info->sockets_used; n++) { in ena_netmap_tx_clean_one()
851 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim); in ena_netmap_tx_clean_one()
852 ENA_WARN(ctx->slots[ctx->nm_i].buf_idx != 0, in ena_netmap_tx_clean_one()
853 ctx->adapter->ena_dev, "Tx idx is not 0.\n"); in ena_netmap_tx_clean_one()
854 ctx->slots[ctx->nm_i].buf_idx = nm_info->socket_buf_idx[n]; in ena_netmap_tx_clean_one()
855 ctx->slots[ctx->nm_i].flags = NS_BUF_CHANGED; in ena_netmap_tx_clean_one()
856 nm_info->socket_buf_idx[n] = 0; in ena_netmap_tx_clean_one()
858 nm_info->sockets_used = 0; in ena_netmap_tx_clean_one()
860 ctx->ring->free_tx_ids[ctx->nt] = req_id; in ena_netmap_tx_clean_one()
861 ctx->nt = ENA_TX_RING_IDX_NEXT(ctx->nt, ctx->lim); in ena_netmap_tx_clean_one()
863 return tx_info->tx_descs; in ena_netmap_tx_clean_one()
872 ena_netmap_fill_ctx(kring, &ctx, ENA_IO_RXQ_IDX(kring->ring_id)); in ena_netmap_rxsync()
873 ctx.ring = &ctx.adapter->rx_ring[kring->ring_id]; in ena_netmap_rxsync()
875 if (ctx.kring->rhead > ctx.lim) { in ena_netmap_rxsync()
880 if (unlikely((if_getdrvflags(ctx.na->ifp) & IFF_DRV_RUNNING) == 0)) in ena_netmap_rxsync()
900 ctx->nt = ctx->ring->next_to_clean; in ena_netmap_rx_frames()
901 ctx->nm_i = ctx->kring->nr_hwtail; in ena_netmap_rx_frames()
908 ena_log_nm(ctx->adapter->pdev, ERR, in ena_netmap_rx_frames()
914 ctx->kring->nr_hwtail = ctx->nm_i; in ena_netmap_rx_frames()
915 ctx->kring->nr_kflags &= ~NKR_PENDINTR; in ena_netmap_rx_frames()
916 ctx->ring->next_to_clean = ctx->nt; in ena_netmap_rx_frames()
929 ena_rx_ctx.ena_bufs = ctx->ring->ena_bufs; in ena_netmap_rx_frame()
930 ena_rx_ctx.max_bufs = ctx->adapter->max_rx_sgl_size; in ena_netmap_rx_frame()
931 bus_dmamap_sync(ctx->io_cq->cdesc_addr.mem_handle.tag, in ena_netmap_rx_frame()
932 ctx->io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_POSTREAD); in ena_netmap_rx_frame()
934 rc = ena_com_rx_pkt(ctx->io_cq, ctx->io_sq, &ena_rx_ctx); in ena_netmap_rx_frame()
936 ena_log_nm(ctx->adapter->pdev, ERR, in ena_netmap_rx_frame()
939 counter_u64_add(ctx->ring->rx_stats.bad_desc_num, 1); in ena_netmap_rx_frame()
944 counter_u64_add(ctx->ring->rx_stats.bad_req_id, 1); in ena_netmap_rx_frame()
947 ena_trigger_reset(ctx->adapter, reset_reason); in ena_netmap_rx_frame()
953 ena_log_nm(ctx->adapter->pdev, DBG, in ena_netmap_rx_frame()
956 ctx->ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, in ena_netmap_rx_frame()
967 ctx->slots[nm_prev(ctx->nm_i, ctx->lim)].flags &= ~NS_MOREFRAG; in ena_netmap_rx_frame()
973 bus_dmamap_sync(ctx->io_cq->cdesc_addr.mem_handle.tag, in ena_netmap_rx_frame()
974 ctx->io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_PREREAD); in ena_netmap_rx_frame()
977 counter_u64_add_protected(ctx->ring->rx_stats.bytes, len); in ena_netmap_rx_frame()
978 counter_u64_add_protected(ctx->adapter->hw_stats.rx_bytes, len); in ena_netmap_rx_frame()
979 counter_u64_add_protected(ctx->ring->rx_stats.cnt, 1); in ena_netmap_rx_frame()
980 counter_u64_add_protected(ctx->adapter->hw_stats.rx_packets, 1); in ena_netmap_rx_frame()
986 nm = ctx->nm_i; in ena_netmap_rx_frame()
989 while (buf--) { in ena_netmap_rx_frame()
990 ctx->slots[nm].flags = 0; in ena_netmap_rx_frame()
991 ctx->slots[nm].len = 0; in ena_netmap_rx_frame()
992 nm = nm_prev(nm, ctx->lim); in ena_netmap_rx_frame()
1004 req_id = ctx->ring->ena_bufs[buf].req_id; in ena_netmap_rx_load_desc()
1005 rx_info = &ctx->ring->rx_buffer_info[req_id]; in ena_netmap_rx_load_desc()
1006 bus_dmamap_sync(ctx->adapter->rx_buf_tag, rx_info->map, in ena_netmap_rx_load_desc()
1008 netmap_unload_map(ctx->na, ctx->adapter->rx_buf_tag, rx_info->map); in ena_netmap_rx_load_desc()
1010 ENA_WARN(ctx->slots[ctx->nm_i].buf_idx != 0, ctx->adapter->ena_dev, in ena_netmap_rx_load_desc()
1013 ctx->slots[ctx->nm_i].buf_idx = rx_info->netmap_buf_idx; in ena_netmap_rx_load_desc()
1014 rx_info->netmap_buf_idx = 0; in ena_netmap_rx_load_desc()
1019 ctx->slots[ctx->nm_i].flags |= NS_MOREFRAG | NS_BUF_CHANGED; in ena_netmap_rx_load_desc()
1020 ctx->slots[ctx->nm_i].len = ctx->ring->ena_bufs[buf].len; in ena_netmap_rx_load_desc()
1021 *len += ctx->slots[ctx->nm_i].len; in ena_netmap_rx_load_desc()
1022 ctx->ring->free_rx_ids[ctx->nt] = req_id; in ena_netmap_rx_load_desc()
1023 ena_log_nm(ctx->adapter->pdev, DBG, in ena_netmap_rx_load_desc()
1025 ctx->slots[ctx->nm_i].buf_idx, (uintmax_t)rx_info->ena_buf.paddr, in ena_netmap_rx_load_desc()
1026 ctx->nm_i); in ena_netmap_rx_load_desc()
1028 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim); in ena_netmap_rx_load_desc()
1029 ctx->nt = ENA_RX_RING_IDX_NEXT(ctx->nt, ctx->ring->ring_size); in ena_netmap_rx_load_desc()
1039 refill_required = ctx->kring->rhead - ctx->kring->nr_hwcur; in ena_netmap_rx_cleanup()
1040 if (ctx->kring->nr_hwcur != ctx->kring->nr_hwtail) in ena_netmap_rx_cleanup()
1041 refill_required -= 1; in ena_netmap_rx_cleanup()
1046 refill_required += ctx->kring->nkr_num_slots; in ena_netmap_rx_cleanup()
1048 ena_refill_rx_bufs(ctx->ring, refill_required); in ena_netmap_rx_cleanup()
1055 ctx->kring = kring; in ena_netmap_fill_ctx()
1056 ctx->na = kring->na; in ena_netmap_fill_ctx()
1057 ctx->adapter = if_getsoftc(ctx->na->ifp); in ena_netmap_fill_ctx()
1058 ctx->lim = kring->nkr_num_slots - 1; in ena_netmap_fill_ctx()
1059 ctx->io_cq = &ctx->adapter->ena_dev->io_cq_queues[ena_qid]; in ena_netmap_fill_ctx()
1060 ctx->io_sq = &ctx->adapter->ena_dev->io_sq_queues[ena_qid]; in ena_netmap_fill_ctx()
1061 ctx->slots = kring->ring->slot; in ena_netmap_fill_ctx()
1067 struct netmap_adapter *na = NA(adapter->ifp); in ena_netmap_unload()
1069 netmap_unload_map(na, adapter->tx_buf_tag, map); in ena_netmap_unload()