1d17b7d87SMarcin Wojtas /*- 2d17b7d87SMarcin Wojtas * BSD LICENSE 3d17b7d87SMarcin Wojtas * 4d17b7d87SMarcin Wojtas * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. 5d17b7d87SMarcin Wojtas * All rights reserved. 6d17b7d87SMarcin Wojtas * 7d17b7d87SMarcin Wojtas * Redistribution and use in source and binary forms, with or without 8d17b7d87SMarcin Wojtas * modification, are permitted provided that the following conditions 9d17b7d87SMarcin Wojtas * are met: 10d17b7d87SMarcin Wojtas * 11d17b7d87SMarcin Wojtas * 1. Redistributions of source code must retain the above copyright 12d17b7d87SMarcin Wojtas * notice, this list of conditions and the following disclaimer. 13d17b7d87SMarcin Wojtas * 14d17b7d87SMarcin Wojtas * 2. Redistributions in binary form must reproduce the above copyright 15d17b7d87SMarcin Wojtas * notice, this list of conditions and the following disclaimer in the 16d17b7d87SMarcin Wojtas * documentation and/or other materials provided with the distribution. 17d17b7d87SMarcin Wojtas * 18d17b7d87SMarcin Wojtas * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19d17b7d87SMarcin Wojtas * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20d17b7d87SMarcin Wojtas * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21d17b7d87SMarcin Wojtas * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22d17b7d87SMarcin Wojtas * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23d17b7d87SMarcin Wojtas * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24d17b7d87SMarcin Wojtas * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25d17b7d87SMarcin Wojtas * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26d17b7d87SMarcin Wojtas * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27d17b7d87SMarcin Wojtas * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28d17b7d87SMarcin Wojtas * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29d17b7d87SMarcin Wojtas */ 30d17b7d87SMarcin Wojtas #include <sys/cdefs.h> 31d17b7d87SMarcin Wojtas __FBSDID("$FreeBSD$"); 32d17b7d87SMarcin Wojtas 33d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 34d17b7d87SMarcin Wojtas 35d17b7d87SMarcin Wojtas #include "ena.h" 36d17b7d87SMarcin Wojtas #include "ena_netmap.h" 37d17b7d87SMarcin Wojtas 389a0f2079SMarcin Wojtas #define ENA_NETMAP_MORE_FRAMES 1 399a0f2079SMarcin Wojtas #define ENA_NETMAP_NO_MORE_FRAMES 0 409a0f2079SMarcin Wojtas #define ENA_MAX_FRAMES 16384 419a0f2079SMarcin Wojtas 429a0f2079SMarcin Wojtas struct ena_netmap_ctx { 439a0f2079SMarcin Wojtas struct netmap_kring *kring; 449a0f2079SMarcin Wojtas struct ena_adapter *adapter; 459a0f2079SMarcin Wojtas struct netmap_adapter *na; 469a0f2079SMarcin Wojtas struct netmap_slot *slots; 479a0f2079SMarcin Wojtas struct ena_ring *ring; 489a0f2079SMarcin Wojtas struct ena_com_io_cq *io_cq; 499a0f2079SMarcin Wojtas struct ena_com_io_sq *io_sq; 509a0f2079SMarcin Wojtas u_int nm_i; 519a0f2079SMarcin Wojtas uint16_t nt; 529a0f2079SMarcin Wojtas uint16_t lim; 539a0f2079SMarcin Wojtas }; 549a0f2079SMarcin Wojtas 559a0f2079SMarcin Wojtas /* Netmap callbacks */ 56d17b7d87SMarcin Wojtas static int ena_netmap_reg(struct netmap_adapter *, int); 57d17b7d87SMarcin Wojtas static int ena_netmap_txsync(struct netmap_kring *, int); 58d17b7d87SMarcin Wojtas static int ena_netmap_rxsync(struct netmap_kring *, int); 59d17b7d87SMarcin Wojtas 609a0f2079SMarcin Wojtas /* Helper functions */ 61*6f2128c7SMarcin Wojtas static int ena_netmap_tx_frames(struct ena_netmap_ctx *); 62*6f2128c7SMarcin Wojtas static int ena_netmap_tx_frame(struct ena_netmap_ctx *); 63*6f2128c7SMarcin Wojtas static inline uint16_t ena_netmap_count_slots(struct ena_netmap_ctx *); 64*6f2128c7SMarcin Wojtas static inline uint16_t ena_netmap_packet_len(struct netmap_slot *, u_int, 65*6f2128c7SMarcin Wojtas uint16_t); 66*6f2128c7SMarcin Wojtas static int ena_netmap_copy_data(struct netmap_adapter *, 67*6f2128c7SMarcin Wojtas struct netmap_slot *, u_int, uint16_t, uint16_t, void *); 68*6f2128c7SMarcin Wojtas static int ena_netmap_map_single_slot(struct netmap_adapter *, 69*6f2128c7SMarcin Wojtas struct netmap_slot *, bus_dma_tag_t, bus_dmamap_t, void **, uint64_t *); 70*6f2128c7SMarcin Wojtas static int ena_netmap_tx_map_slots(struct ena_netmap_ctx *, 71*6f2128c7SMarcin Wojtas struct ena_tx_buffer *, void **, uint16_t *, uint16_t *); 72*6f2128c7SMarcin Wojtas static void ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx *, 73*6f2128c7SMarcin Wojtas struct ena_tx_buffer *); 74*6f2128c7SMarcin Wojtas static void ena_netmap_tx_cleanup(struct ena_netmap_ctx *); 75*6f2128c7SMarcin Wojtas static uint16_t ena_netmap_tx_clean_one(struct ena_netmap_ctx *, 76*6f2128c7SMarcin Wojtas uint16_t); 77*6f2128c7SMarcin Wojtas static inline int validate_tx_req_id(struct ena_ring *, uint16_t); 789a0f2079SMarcin Wojtas static int ena_netmap_rx_frames(struct ena_netmap_ctx *); 799a0f2079SMarcin Wojtas static int ena_netmap_rx_frame(struct ena_netmap_ctx *); 809a0f2079SMarcin Wojtas static int ena_netmap_rx_load_desc(struct ena_netmap_ctx *, uint16_t, 819a0f2079SMarcin Wojtas int *); 829a0f2079SMarcin Wojtas static void ena_netmap_rx_cleanup(struct ena_netmap_ctx *); 839a0f2079SMarcin Wojtas static void ena_netmap_fill_ctx(struct netmap_kring *, 849a0f2079SMarcin Wojtas struct ena_netmap_ctx *, uint16_t); 859a0f2079SMarcin Wojtas 86d17b7d87SMarcin Wojtas int 87d17b7d87SMarcin Wojtas ena_netmap_attach(struct ena_adapter *adapter) 88d17b7d87SMarcin Wojtas { 89d17b7d87SMarcin Wojtas struct netmap_adapter na; 90d17b7d87SMarcin Wojtas 91d17b7d87SMarcin Wojtas ena_trace(ENA_NETMAP, "netmap attach\n"); 92d17b7d87SMarcin Wojtas 93d17b7d87SMarcin Wojtas bzero(&na, sizeof(na)); 94d17b7d87SMarcin Wojtas na.na_flags = NAF_MOREFRAG; 95d17b7d87SMarcin Wojtas na.ifp = adapter->ifp; 96d17b7d87SMarcin Wojtas na.num_tx_desc = adapter->tx_ring_size; 97d17b7d87SMarcin Wojtas na.num_rx_desc = adapter->rx_ring_size; 98d17b7d87SMarcin Wojtas na.num_tx_rings = adapter->num_queues; 99d17b7d87SMarcin Wojtas na.num_rx_rings = adapter->num_queues; 100d17b7d87SMarcin Wojtas na.rx_buf_maxsize = adapter->buf_ring_size; 101d17b7d87SMarcin Wojtas na.nm_txsync = ena_netmap_txsync; 102d17b7d87SMarcin Wojtas na.nm_rxsync = ena_netmap_rxsync; 103d17b7d87SMarcin Wojtas na.nm_register = ena_netmap_reg; 104d17b7d87SMarcin Wojtas 105d17b7d87SMarcin Wojtas return (netmap_attach(&na)); 106d17b7d87SMarcin Wojtas } 107d17b7d87SMarcin Wojtas 1089a0f2079SMarcin Wojtas int 1099a0f2079SMarcin Wojtas ena_netmap_alloc_rx_slot(struct ena_adapter *adapter, 1109a0f2079SMarcin Wojtas struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) 1119a0f2079SMarcin Wojtas { 1129a0f2079SMarcin Wojtas struct netmap_adapter *na = NA(adapter->ifp); 1139a0f2079SMarcin Wojtas struct netmap_kring *kring; 1149a0f2079SMarcin Wojtas struct netmap_ring *ring; 1159a0f2079SMarcin Wojtas struct netmap_slot *slot; 1169a0f2079SMarcin Wojtas void *addr; 1179a0f2079SMarcin Wojtas uint64_t paddr; 1189a0f2079SMarcin Wojtas int nm_i, qid, head, lim, rc; 1199a0f2079SMarcin Wojtas 1209a0f2079SMarcin Wojtas /* if previously allocated frag is not used */ 1219a0f2079SMarcin Wojtas if (unlikely(rx_info->netmap_buf_idx != 0)) 1229a0f2079SMarcin Wojtas return (0); 1239a0f2079SMarcin Wojtas 1249a0f2079SMarcin Wojtas qid = rx_ring->qid; 1259a0f2079SMarcin Wojtas kring = na->rx_rings[qid]; 1269a0f2079SMarcin Wojtas nm_i = kring->nr_hwcur; 1279a0f2079SMarcin Wojtas head = kring->rhead; 1289a0f2079SMarcin Wojtas 1299a0f2079SMarcin Wojtas ena_trace(ENA_NETMAP | ENA_DBG, "nr_hwcur: %d, nr_hwtail: %d, " 1309a0f2079SMarcin Wojtas "rhead: %d, rcur: %d, rtail: %d\n", kring->nr_hwcur, 1319a0f2079SMarcin Wojtas kring->nr_hwtail, kring->rhead, kring->rcur, kring->rtail); 1329a0f2079SMarcin Wojtas 1339a0f2079SMarcin Wojtas if ((nm_i == head) && rx_ring->initialized) { 1349a0f2079SMarcin Wojtas ena_trace(ENA_NETMAP, "No free slots in netmap ring\n"); 1359a0f2079SMarcin Wojtas return (ENOMEM); 1369a0f2079SMarcin Wojtas } 1379a0f2079SMarcin Wojtas 1389a0f2079SMarcin Wojtas ring = kring->ring; 1399a0f2079SMarcin Wojtas if (ring == NULL) { 1409a0f2079SMarcin Wojtas device_printf(adapter->pdev, "Rx ring %d is NULL\n", qid); 1419a0f2079SMarcin Wojtas return (EFAULT); 1429a0f2079SMarcin Wojtas } 1439a0f2079SMarcin Wojtas slot = &ring->slot[nm_i]; 1449a0f2079SMarcin Wojtas 1459a0f2079SMarcin Wojtas addr = PNMB(na, slot, &paddr); 1469a0f2079SMarcin Wojtas if (addr == NETMAP_BUF_BASE(na)) { 1479a0f2079SMarcin Wojtas device_printf(adapter->pdev, "Bad buff in slot\n"); 1489a0f2079SMarcin Wojtas return (EFAULT); 1499a0f2079SMarcin Wojtas } 1509a0f2079SMarcin Wojtas 1519a0f2079SMarcin Wojtas rc = netmap_load_map(na, adapter->rx_buf_tag, rx_info->map, addr); 1529a0f2079SMarcin Wojtas if (rc != 0) { 1539a0f2079SMarcin Wojtas ena_trace(ENA_WARNING, "DMA mapping error\n"); 1549a0f2079SMarcin Wojtas return (rc); 1559a0f2079SMarcin Wojtas } 1569a0f2079SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD); 1579a0f2079SMarcin Wojtas 1589a0f2079SMarcin Wojtas rx_info->ena_buf.paddr = paddr; 1599a0f2079SMarcin Wojtas rx_info->ena_buf.len = ring->nr_buf_size; 1609a0f2079SMarcin Wojtas rx_info->mbuf = NULL; 1619a0f2079SMarcin Wojtas rx_info->netmap_buf_idx = slot->buf_idx; 1629a0f2079SMarcin Wojtas 1639a0f2079SMarcin Wojtas slot->buf_idx = 0; 1649a0f2079SMarcin Wojtas 1659a0f2079SMarcin Wojtas lim = kring->nkr_num_slots - 1; 1669a0f2079SMarcin Wojtas kring->nr_hwcur = nm_next(nm_i, lim); 1679a0f2079SMarcin Wojtas 1689a0f2079SMarcin Wojtas return (0); 1699a0f2079SMarcin Wojtas } 1709a0f2079SMarcin Wojtas 1719a0f2079SMarcin Wojtas void 1729a0f2079SMarcin Wojtas ena_netmap_free_rx_slot(struct ena_adapter *adapter, 1739a0f2079SMarcin Wojtas struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) 1749a0f2079SMarcin Wojtas { 1759a0f2079SMarcin Wojtas struct netmap_adapter *na; 1769a0f2079SMarcin Wojtas struct netmap_kring *kring; 1779a0f2079SMarcin Wojtas struct netmap_slot *slot; 1789a0f2079SMarcin Wojtas int nm_i, qid, lim; 1799a0f2079SMarcin Wojtas 1809a0f2079SMarcin Wojtas na = NA(adapter->ifp); 1819a0f2079SMarcin Wojtas if (na == NULL) { 1829a0f2079SMarcin Wojtas device_printf(adapter->pdev, "netmap adapter is NULL\n"); 1839a0f2079SMarcin Wojtas return; 1849a0f2079SMarcin Wojtas } 1859a0f2079SMarcin Wojtas 1869a0f2079SMarcin Wojtas if (na->rx_rings == NULL) { 1879a0f2079SMarcin Wojtas device_printf(adapter->pdev, "netmap rings are NULL\n"); 1889a0f2079SMarcin Wojtas return; 1899a0f2079SMarcin Wojtas } 1909a0f2079SMarcin Wojtas 1919a0f2079SMarcin Wojtas qid = rx_ring->qid; 1929a0f2079SMarcin Wojtas kring = na->rx_rings[qid]; 1939a0f2079SMarcin Wojtas if (kring == NULL) { 1949a0f2079SMarcin Wojtas device_printf(adapter->pdev, 1959a0f2079SMarcin Wojtas "netmap kernel ring %d is NULL\n", qid); 1969a0f2079SMarcin Wojtas return; 1979a0f2079SMarcin Wojtas } 1989a0f2079SMarcin Wojtas 1999a0f2079SMarcin Wojtas lim = kring->nkr_num_slots - 1; 2009a0f2079SMarcin Wojtas nm_i = nm_prev(kring->nr_hwcur, lim); 2019a0f2079SMarcin Wojtas 2029a0f2079SMarcin Wojtas if (kring->nr_mode != NKR_NETMAP_ON) 2039a0f2079SMarcin Wojtas return; 2049a0f2079SMarcin Wojtas 2059a0f2079SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, 2069a0f2079SMarcin Wojtas BUS_DMASYNC_POSTREAD); 2079a0f2079SMarcin Wojtas netmap_unload_map(na, adapter->rx_buf_tag, rx_info->map); 2089a0f2079SMarcin Wojtas 2099a0f2079SMarcin Wojtas slot = &kring->ring->slot[nm_i]; 2109a0f2079SMarcin Wojtas 2119a0f2079SMarcin Wojtas ENA_ASSERT(slot->buf_idx == 0, "Overwrite slot buf\n"); 2129a0f2079SMarcin Wojtas slot->buf_idx = rx_info->netmap_buf_idx; 2139a0f2079SMarcin Wojtas slot->flags = NS_BUF_CHANGED; 2149a0f2079SMarcin Wojtas 2159a0f2079SMarcin Wojtas rx_info->netmap_buf_idx = 0; 2169a0f2079SMarcin Wojtas kring->nr_hwcur = nm_i; 2179a0f2079SMarcin Wojtas } 2189a0f2079SMarcin Wojtas 2199a0f2079SMarcin Wojtas void 2209a0f2079SMarcin Wojtas ena_netmap_reset_rx_ring(struct ena_adapter *adapter, int qid) 2219a0f2079SMarcin Wojtas { 2229a0f2079SMarcin Wojtas if (adapter->ifp->if_capenable & IFCAP_NETMAP) 2239a0f2079SMarcin Wojtas netmap_reset(NA(adapter->ifp), NR_RX, qid, 0); 2249a0f2079SMarcin Wojtas } 2259a0f2079SMarcin Wojtas 226*6f2128c7SMarcin Wojtas void 227*6f2128c7SMarcin Wojtas ena_netmap_reset_tx_ring(struct ena_adapter *adapter, int qid) 228*6f2128c7SMarcin Wojtas { 229*6f2128c7SMarcin Wojtas if (adapter->ifp->if_capenable & IFCAP_NETMAP) 230*6f2128c7SMarcin Wojtas netmap_reset(NA(adapter->ifp), NR_TX, qid, 0); 231*6f2128c7SMarcin Wojtas } 232*6f2128c7SMarcin Wojtas 233d17b7d87SMarcin Wojtas static int 234d17b7d87SMarcin Wojtas ena_netmap_reg(struct netmap_adapter *na, int onoff) 235d17b7d87SMarcin Wojtas { 236d17b7d87SMarcin Wojtas struct ifnet *ifp = na->ifp; 237d17b7d87SMarcin Wojtas struct ena_adapter* adapter = ifp->if_softc; 238d17b7d87SMarcin Wojtas int rc; 239d17b7d87SMarcin Wojtas 240d17b7d87SMarcin Wojtas sx_xlock(&adapter->ioctl_sx); 241d17b7d87SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 242d17b7d87SMarcin Wojtas ena_down(adapter); 243d17b7d87SMarcin Wojtas 244d17b7d87SMarcin Wojtas if (onoff) { 245d17b7d87SMarcin Wojtas ena_trace(ENA_NETMAP, "netmap on\n"); 246d17b7d87SMarcin Wojtas nm_set_native_flags(na); 247d17b7d87SMarcin Wojtas } else { 248d17b7d87SMarcin Wojtas ena_trace(ENA_NETMAP, "netmap off\n"); 249d17b7d87SMarcin Wojtas nm_clear_native_flags(na); 250d17b7d87SMarcin Wojtas } 251d17b7d87SMarcin Wojtas 252d17b7d87SMarcin Wojtas rc = ena_up(adapter); 253d17b7d87SMarcin Wojtas if (rc != 0) { 254d17b7d87SMarcin Wojtas ena_trace(ENA_WARNING, "ena_up failed with rc=%d\n", rc); 255d17b7d87SMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE; 256d17b7d87SMarcin Wojtas nm_clear_native_flags(na); 257d17b7d87SMarcin Wojtas ena_destroy_device(adapter, false); 258d17b7d87SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 259d17b7d87SMarcin Wojtas rc = ena_restore_device(adapter); 260d17b7d87SMarcin Wojtas } 261d17b7d87SMarcin Wojtas sx_unlock(&adapter->ioctl_sx); 262d17b7d87SMarcin Wojtas 263d17b7d87SMarcin Wojtas return (rc); 264d17b7d87SMarcin Wojtas } 265d17b7d87SMarcin Wojtas 266d17b7d87SMarcin Wojtas static int 267d17b7d87SMarcin Wojtas ena_netmap_txsync(struct netmap_kring *kring, int flags) 268d17b7d87SMarcin Wojtas { 269*6f2128c7SMarcin Wojtas struct ena_netmap_ctx ctx; 270*6f2128c7SMarcin Wojtas int rc = 0; 271*6f2128c7SMarcin Wojtas 272*6f2128c7SMarcin Wojtas ena_netmap_fill_ctx(kring, &ctx, ENA_IO_TXQ_IDX(kring->ring_id)); 273*6f2128c7SMarcin Wojtas ctx.ring = &ctx.adapter->tx_ring[kring->ring_id]; 274*6f2128c7SMarcin Wojtas 275*6f2128c7SMarcin Wojtas ENA_RING_MTX_LOCK(ctx.ring); 276*6f2128c7SMarcin Wojtas if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, ctx.adapter))) 277*6f2128c7SMarcin Wojtas goto txsync_end; 278*6f2128c7SMarcin Wojtas 279*6f2128c7SMarcin Wojtas if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, ctx.adapter))) 280*6f2128c7SMarcin Wojtas goto txsync_end; 281*6f2128c7SMarcin Wojtas 282*6f2128c7SMarcin Wojtas rc = ena_netmap_tx_frames(&ctx); 283*6f2128c7SMarcin Wojtas ena_netmap_tx_cleanup(&ctx); 284*6f2128c7SMarcin Wojtas 285*6f2128c7SMarcin Wojtas txsync_end: 286*6f2128c7SMarcin Wojtas ENA_RING_MTX_UNLOCK(ctx.ring); 287*6f2128c7SMarcin Wojtas return (rc); 288*6f2128c7SMarcin Wojtas } 289*6f2128c7SMarcin Wojtas 290*6f2128c7SMarcin Wojtas static int 291*6f2128c7SMarcin Wojtas ena_netmap_tx_frames(struct ena_netmap_ctx *ctx) 292*6f2128c7SMarcin Wojtas { 293*6f2128c7SMarcin Wojtas struct ena_ring *tx_ring = ctx->ring; 294*6f2128c7SMarcin Wojtas int rc = 0; 295*6f2128c7SMarcin Wojtas 296*6f2128c7SMarcin Wojtas ctx->nm_i = ctx->kring->nr_hwcur; 297*6f2128c7SMarcin Wojtas ctx->nt = ctx->ring->next_to_use; 298*6f2128c7SMarcin Wojtas 299*6f2128c7SMarcin Wojtas __builtin_prefetch(&ctx->slots[ctx->nm_i]); 300*6f2128c7SMarcin Wojtas 301*6f2128c7SMarcin Wojtas while (ctx->nm_i != ctx->kring->rhead) { 302*6f2128c7SMarcin Wojtas if ((rc = ena_netmap_tx_frame(ctx)) != 0) { 303*6f2128c7SMarcin Wojtas /* 304*6f2128c7SMarcin Wojtas * When there is no empty space in Tx ring, error is 305*6f2128c7SMarcin Wojtas * still being returned. It should not be passed to the 306*6f2128c7SMarcin Wojtas * netmap, as application knows current ring state from 307*6f2128c7SMarcin Wojtas * netmap ring pointers. Returning error there could 308*6f2128c7SMarcin Wojtas * cause application to exit, but the Tx ring is commonly 309*6f2128c7SMarcin Wojtas * being full. 310*6f2128c7SMarcin Wojtas */ 311*6f2128c7SMarcin Wojtas if (rc == ENA_COM_NO_MEM) 312*6f2128c7SMarcin Wojtas rc = 0; 313*6f2128c7SMarcin Wojtas break; 314*6f2128c7SMarcin Wojtas } 315*6f2128c7SMarcin Wojtas tx_ring->acum_pkts++; 316*6f2128c7SMarcin Wojtas } 317*6f2128c7SMarcin Wojtas 318*6f2128c7SMarcin Wojtas /* If any packet was sent... */ 319*6f2128c7SMarcin Wojtas if (likely(ctx->nm_i != ctx->kring->nr_hwcur)) { 320*6f2128c7SMarcin Wojtas wmb(); 321*6f2128c7SMarcin Wojtas /* ...send the doorbell to the device. */ 322*6f2128c7SMarcin Wojtas ena_com_write_sq_doorbell(ctx->io_sq); 323*6f2128c7SMarcin Wojtas counter_u64_add(ctx->ring->tx_stats.doorbells, 1); 324*6f2128c7SMarcin Wojtas tx_ring->acum_pkts = 0; 325*6f2128c7SMarcin Wojtas 326*6f2128c7SMarcin Wojtas ctx->ring->next_to_use = ctx->nt; 327*6f2128c7SMarcin Wojtas ctx->kring->nr_hwcur = ctx->nm_i; 328*6f2128c7SMarcin Wojtas } 329*6f2128c7SMarcin Wojtas 330*6f2128c7SMarcin Wojtas return (rc); 331*6f2128c7SMarcin Wojtas } 332*6f2128c7SMarcin Wojtas 333*6f2128c7SMarcin Wojtas static int 334*6f2128c7SMarcin Wojtas ena_netmap_tx_frame(struct ena_netmap_ctx *ctx) 335*6f2128c7SMarcin Wojtas { 336*6f2128c7SMarcin Wojtas struct ena_com_tx_ctx ena_tx_ctx; 337*6f2128c7SMarcin Wojtas struct ena_adapter *adapter; 338*6f2128c7SMarcin Wojtas struct ena_ring *tx_ring; 339*6f2128c7SMarcin Wojtas struct ena_tx_buffer *tx_info; 340*6f2128c7SMarcin Wojtas uint16_t req_id; 341*6f2128c7SMarcin Wojtas uint16_t header_len; 342*6f2128c7SMarcin Wojtas uint16_t packet_len; 343*6f2128c7SMarcin Wojtas int nb_hw_desc; 344*6f2128c7SMarcin Wojtas int rc; 345*6f2128c7SMarcin Wojtas void *push_hdr; 346*6f2128c7SMarcin Wojtas 347*6f2128c7SMarcin Wojtas adapter = ctx->adapter; 348*6f2128c7SMarcin Wojtas if (ena_netmap_count_slots(ctx) > adapter->max_tx_sgl_size) { 349*6f2128c7SMarcin Wojtas ena_trace(ENA_WARNING, "Too many slots per packet\n"); 350*6f2128c7SMarcin Wojtas return (EINVAL); 351*6f2128c7SMarcin Wojtas } 352*6f2128c7SMarcin Wojtas 353*6f2128c7SMarcin Wojtas tx_ring = ctx->ring; 354*6f2128c7SMarcin Wojtas 355*6f2128c7SMarcin Wojtas req_id = tx_ring->free_tx_ids[ctx->nt]; 356*6f2128c7SMarcin Wojtas tx_info = &tx_ring->tx_buffer_info[req_id]; 357*6f2128c7SMarcin Wojtas tx_info->num_of_bufs = 0; 358*6f2128c7SMarcin Wojtas tx_info->nm_info.sockets_used = 0; 359*6f2128c7SMarcin Wojtas 360*6f2128c7SMarcin Wojtas rc = ena_netmap_tx_map_slots(ctx, tx_info, &push_hdr, &header_len, 361*6f2128c7SMarcin Wojtas &packet_len); 362*6f2128c7SMarcin Wojtas if (unlikely(rc != 0)) { 363*6f2128c7SMarcin Wojtas device_printf(adapter->pdev, "Failed to map Tx slot\n"); 364*6f2128c7SMarcin Wojtas return (rc); 365*6f2128c7SMarcin Wojtas } 366*6f2128c7SMarcin Wojtas 367*6f2128c7SMarcin Wojtas bzero(&ena_tx_ctx, sizeof(struct ena_com_tx_ctx)); 368*6f2128c7SMarcin Wojtas ena_tx_ctx.ena_bufs = tx_info->bufs; 369*6f2128c7SMarcin Wojtas ena_tx_ctx.push_header = push_hdr; 370*6f2128c7SMarcin Wojtas ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 371*6f2128c7SMarcin Wojtas ena_tx_ctx.req_id = req_id; 372*6f2128c7SMarcin Wojtas ena_tx_ctx.header_len = header_len; 373*6f2128c7SMarcin Wojtas 374*6f2128c7SMarcin Wojtas /* There are no any offloads, as the netmap doesn't support them */ 375*6f2128c7SMarcin Wojtas 376*6f2128c7SMarcin Wojtas if (tx_ring->acum_pkts == DB_THRESHOLD || 377*6f2128c7SMarcin Wojtas ena_com_is_doorbell_needed(ctx->io_sq, &ena_tx_ctx)) { 378*6f2128c7SMarcin Wojtas wmb(); 379*6f2128c7SMarcin Wojtas ena_com_write_sq_doorbell(ctx->io_sq); 380*6f2128c7SMarcin Wojtas counter_u64_add(tx_ring->tx_stats.doorbells, 1); 381*6f2128c7SMarcin Wojtas tx_ring->acum_pkts = 0; 382*6f2128c7SMarcin Wojtas } 383*6f2128c7SMarcin Wojtas 384*6f2128c7SMarcin Wojtas rc = ena_com_prepare_tx(ctx->io_sq, &ena_tx_ctx, &nb_hw_desc); 385*6f2128c7SMarcin Wojtas if (unlikely(rc != 0)) { 386*6f2128c7SMarcin Wojtas if (likely(rc == ENA_COM_NO_MEM)) { 387*6f2128c7SMarcin Wojtas ena_trace(ENA_NETMAP | ENA_DBG | ENA_TXPTH, 388*6f2128c7SMarcin Wojtas "Tx ring[%d] is out of space\n", tx_ring->que->id); 389*6f2128c7SMarcin Wojtas } else { 390*6f2128c7SMarcin Wojtas device_printf(adapter->pdev, 391*6f2128c7SMarcin Wojtas "Failed to prepare Tx bufs\n"); 392*6f2128c7SMarcin Wojtas } 393*6f2128c7SMarcin Wojtas counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1); 394*6f2128c7SMarcin Wojtas 395*6f2128c7SMarcin Wojtas ena_netmap_unmap_last_socket_chain(ctx, tx_info); 396*6f2128c7SMarcin Wojtas return (rc); 397*6f2128c7SMarcin Wojtas } 398*6f2128c7SMarcin Wojtas 399*6f2128c7SMarcin Wojtas counter_enter(); 400*6f2128c7SMarcin Wojtas counter_u64_add_protected(tx_ring->tx_stats.cnt, 1); 401*6f2128c7SMarcin Wojtas counter_u64_add_protected(tx_ring->tx_stats.bytes, packet_len); 402*6f2128c7SMarcin Wojtas counter_u64_add_protected(adapter->hw_stats.tx_packets, 1); 403*6f2128c7SMarcin Wojtas counter_u64_add_protected(adapter->hw_stats.tx_bytes, packet_len); 404*6f2128c7SMarcin Wojtas counter_exit(); 405*6f2128c7SMarcin Wojtas 406*6f2128c7SMarcin Wojtas tx_info->tx_descs = nb_hw_desc; 407*6f2128c7SMarcin Wojtas 408*6f2128c7SMarcin Wojtas ctx->nt = ENA_TX_RING_IDX_NEXT(ctx->nt, ctx->ring->ring_size); 409*6f2128c7SMarcin Wojtas 410*6f2128c7SMarcin Wojtas for (unsigned int i = 0; i < tx_info->num_of_bufs; i++) 411*6f2128c7SMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, 412*6f2128c7SMarcin Wojtas tx_info->nm_info.map_seg[i], BUS_DMASYNC_PREWRITE); 413*6f2128c7SMarcin Wojtas 414d17b7d87SMarcin Wojtas return (0); 415d17b7d87SMarcin Wojtas } 416d17b7d87SMarcin Wojtas 417*6f2128c7SMarcin Wojtas static inline uint16_t 418*6f2128c7SMarcin Wojtas ena_netmap_count_slots(struct ena_netmap_ctx *ctx) 419*6f2128c7SMarcin Wojtas { 420*6f2128c7SMarcin Wojtas uint16_t slots = 1; 421*6f2128c7SMarcin Wojtas uint16_t nm = ctx->nm_i; 422*6f2128c7SMarcin Wojtas 423*6f2128c7SMarcin Wojtas while ((ctx->slots[nm].flags & NS_MOREFRAG) != 0) { 424*6f2128c7SMarcin Wojtas slots++; 425*6f2128c7SMarcin Wojtas nm = nm_next(nm, ctx->lim); 426*6f2128c7SMarcin Wojtas } 427*6f2128c7SMarcin Wojtas 428*6f2128c7SMarcin Wojtas return slots; 429*6f2128c7SMarcin Wojtas } 430*6f2128c7SMarcin Wojtas 431*6f2128c7SMarcin Wojtas static inline uint16_t 432*6f2128c7SMarcin Wojtas ena_netmap_packet_len(struct netmap_slot *slots, u_int slot_index, 433*6f2128c7SMarcin Wojtas uint16_t limit) 434*6f2128c7SMarcin Wojtas { 435*6f2128c7SMarcin Wojtas struct netmap_slot *nm_slot; 436*6f2128c7SMarcin Wojtas uint16_t packet_size = 0; 437*6f2128c7SMarcin Wojtas 438*6f2128c7SMarcin Wojtas do { 439*6f2128c7SMarcin Wojtas nm_slot = &slots[slot_index]; 440*6f2128c7SMarcin Wojtas packet_size += nm_slot->len; 441*6f2128c7SMarcin Wojtas slot_index = nm_next(slot_index, limit); 442*6f2128c7SMarcin Wojtas } while ((nm_slot->flags & NS_MOREFRAG) != 0); 443*6f2128c7SMarcin Wojtas 444*6f2128c7SMarcin Wojtas return packet_size; 445*6f2128c7SMarcin Wojtas } 446*6f2128c7SMarcin Wojtas 447*6f2128c7SMarcin Wojtas static int 448*6f2128c7SMarcin Wojtas ena_netmap_copy_data(struct netmap_adapter *na, struct netmap_slot *slots, 449*6f2128c7SMarcin Wojtas u_int slot_index, uint16_t limit, uint16_t bytes_to_copy, void *destination) 450*6f2128c7SMarcin Wojtas { 451*6f2128c7SMarcin Wojtas struct netmap_slot *nm_slot; 452*6f2128c7SMarcin Wojtas void *slot_vaddr; 453*6f2128c7SMarcin Wojtas uint16_t packet_size; 454*6f2128c7SMarcin Wojtas uint16_t data_amount; 455*6f2128c7SMarcin Wojtas 456*6f2128c7SMarcin Wojtas packet_size = 0; 457*6f2128c7SMarcin Wojtas do { 458*6f2128c7SMarcin Wojtas nm_slot = &slots[slot_index]; 459*6f2128c7SMarcin Wojtas slot_vaddr = NMB(na, nm_slot); 460*6f2128c7SMarcin Wojtas if (unlikely(slot_vaddr == NULL)) 461*6f2128c7SMarcin Wojtas return (EINVAL); 462*6f2128c7SMarcin Wojtas 463*6f2128c7SMarcin Wojtas data_amount = min_t(uint16_t, bytes_to_copy, nm_slot->len); 464*6f2128c7SMarcin Wojtas memcpy(destination, slot_vaddr, data_amount); 465*6f2128c7SMarcin Wojtas bytes_to_copy -= data_amount; 466*6f2128c7SMarcin Wojtas 467*6f2128c7SMarcin Wojtas slot_index = nm_next(slot_index, limit); 468*6f2128c7SMarcin Wojtas } while ((nm_slot->flags & NS_MOREFRAG) != 0 && bytes_to_copy > 0); 469*6f2128c7SMarcin Wojtas 470*6f2128c7SMarcin Wojtas return (0); 471*6f2128c7SMarcin Wojtas } 472*6f2128c7SMarcin Wojtas 473*6f2128c7SMarcin Wojtas static int 474*6f2128c7SMarcin Wojtas ena_netmap_map_single_slot(struct netmap_adapter *na, struct netmap_slot *slot, 475*6f2128c7SMarcin Wojtas bus_dma_tag_t dmatag, bus_dmamap_t dmamap, void **vaddr, uint64_t *paddr) 476*6f2128c7SMarcin Wojtas { 477*6f2128c7SMarcin Wojtas int rc; 478*6f2128c7SMarcin Wojtas 479*6f2128c7SMarcin Wojtas *vaddr = PNMB(na, slot, paddr); 480*6f2128c7SMarcin Wojtas if (unlikely(vaddr == NULL)) { 481*6f2128c7SMarcin Wojtas ena_trace(ENA_ALERT, "Slot address is NULL\n"); 482*6f2128c7SMarcin Wojtas return (EINVAL); 483*6f2128c7SMarcin Wojtas } 484*6f2128c7SMarcin Wojtas 485*6f2128c7SMarcin Wojtas rc = netmap_load_map(na, dmatag, dmamap, *vaddr); 486*6f2128c7SMarcin Wojtas if (unlikely(rc != 0)) { 487*6f2128c7SMarcin Wojtas ena_trace(ENA_ALERT, "Failed to map slot %d for DMA\n", 488*6f2128c7SMarcin Wojtas slot->buf_idx); 489*6f2128c7SMarcin Wojtas return (EINVAL); 490*6f2128c7SMarcin Wojtas } 491*6f2128c7SMarcin Wojtas 492*6f2128c7SMarcin Wojtas return (0); 493*6f2128c7SMarcin Wojtas } 494*6f2128c7SMarcin Wojtas 495*6f2128c7SMarcin Wojtas static int 496*6f2128c7SMarcin Wojtas ena_netmap_tx_map_slots(struct ena_netmap_ctx *ctx, 497*6f2128c7SMarcin Wojtas struct ena_tx_buffer *tx_info, void **push_hdr, uint16_t *header_len, 498*6f2128c7SMarcin Wojtas uint16_t *packet_len) 499*6f2128c7SMarcin Wojtas { 500*6f2128c7SMarcin Wojtas struct netmap_slot *slot; 501*6f2128c7SMarcin Wojtas struct ena_com_buf *ena_buf; 502*6f2128c7SMarcin Wojtas struct ena_adapter *adapter; 503*6f2128c7SMarcin Wojtas struct ena_ring *tx_ring; 504*6f2128c7SMarcin Wojtas struct ena_netmap_tx_info *nm_info; 505*6f2128c7SMarcin Wojtas bus_dmamap_t *nm_maps; 506*6f2128c7SMarcin Wojtas void *vaddr; 507*6f2128c7SMarcin Wojtas uint64_t paddr; 508*6f2128c7SMarcin Wojtas uint32_t *nm_buf_idx; 509*6f2128c7SMarcin Wojtas uint32_t slot_head_len; 510*6f2128c7SMarcin Wojtas uint32_t frag_len; 511*6f2128c7SMarcin Wojtas uint32_t remaining_len; 512*6f2128c7SMarcin Wojtas uint16_t push_len; 513*6f2128c7SMarcin Wojtas uint16_t delta; 514*6f2128c7SMarcin Wojtas int rc; 515*6f2128c7SMarcin Wojtas 516*6f2128c7SMarcin Wojtas adapter = ctx->adapter; 517*6f2128c7SMarcin Wojtas tx_ring = ctx->ring; 518*6f2128c7SMarcin Wojtas ena_buf = tx_info->bufs; 519*6f2128c7SMarcin Wojtas nm_info = &tx_info->nm_info; 520*6f2128c7SMarcin Wojtas nm_maps = nm_info->map_seg; 521*6f2128c7SMarcin Wojtas nm_buf_idx = nm_info->socket_buf_idx; 522*6f2128c7SMarcin Wojtas slot = &ctx->slots[ctx->nm_i]; 523*6f2128c7SMarcin Wojtas 524*6f2128c7SMarcin Wojtas slot_head_len = slot->len; 525*6f2128c7SMarcin Wojtas *packet_len = ena_netmap_packet_len(ctx->slots, ctx->nm_i, ctx->lim); 526*6f2128c7SMarcin Wojtas remaining_len = *packet_len; 527*6f2128c7SMarcin Wojtas delta = 0; 528*6f2128c7SMarcin Wojtas 529*6f2128c7SMarcin Wojtas __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]); 530*6f2128c7SMarcin Wojtas if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 531*6f2128c7SMarcin Wojtas /* 532*6f2128c7SMarcin Wojtas * When the device is in LLQ mode, the driver will copy 533*6f2128c7SMarcin Wojtas * the header into the device memory space. 534*6f2128c7SMarcin Wojtas * The ena_com layer assumes that the header is in a linear 535*6f2128c7SMarcin Wojtas * memory space. 536*6f2128c7SMarcin Wojtas * This assumption might be wrong since part of the header 537*6f2128c7SMarcin Wojtas * can be in the fragmented buffers. 538*6f2128c7SMarcin Wojtas * First, check if header fits in the first slot. If not, copy 539*6f2128c7SMarcin Wojtas * it to separate buffer that will be holding linearized data. 540*6f2128c7SMarcin Wojtas */ 541*6f2128c7SMarcin Wojtas push_len = min_t(uint32_t, *packet_len, 542*6f2128c7SMarcin Wojtas tx_ring->tx_max_header_size); 543*6f2128c7SMarcin Wojtas *header_len = push_len; 544*6f2128c7SMarcin Wojtas /* If header is in linear space, just point to socket's data. */ 545*6f2128c7SMarcin Wojtas if (likely(push_len <= slot_head_len)) { 546*6f2128c7SMarcin Wojtas *push_hdr = NMB(ctx->na, slot); 547*6f2128c7SMarcin Wojtas if (unlikely(push_hdr == NULL)) { 548*6f2128c7SMarcin Wojtas device_printf(adapter->pdev, 549*6f2128c7SMarcin Wojtas "Slot vaddress is NULL\n"); 550*6f2128c7SMarcin Wojtas return (EINVAL); 551*6f2128c7SMarcin Wojtas } 552*6f2128c7SMarcin Wojtas /* 553*6f2128c7SMarcin Wojtas * Otherwise, copy whole portion of header from multiple slots 554*6f2128c7SMarcin Wojtas * to intermediate buffer. 555*6f2128c7SMarcin Wojtas */ 556*6f2128c7SMarcin Wojtas } else { 557*6f2128c7SMarcin Wojtas rc = ena_netmap_copy_data(ctx->na, 558*6f2128c7SMarcin Wojtas ctx->slots, 559*6f2128c7SMarcin Wojtas ctx->nm_i, 560*6f2128c7SMarcin Wojtas ctx->lim, 561*6f2128c7SMarcin Wojtas push_len, 562*6f2128c7SMarcin Wojtas tx_ring->push_buf_intermediate_buf); 563*6f2128c7SMarcin Wojtas if (unlikely(rc)) { 564*6f2128c7SMarcin Wojtas device_printf(adapter->pdev, 565*6f2128c7SMarcin Wojtas "Failed to copy data from slots to push_buf\n"); 566*6f2128c7SMarcin Wojtas return (EINVAL); 567*6f2128c7SMarcin Wojtas } 568*6f2128c7SMarcin Wojtas 569*6f2128c7SMarcin Wojtas *push_hdr = tx_ring->push_buf_intermediate_buf; 570*6f2128c7SMarcin Wojtas counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1); 571*6f2128c7SMarcin Wojtas 572*6f2128c7SMarcin Wojtas delta = push_len - slot_head_len; 573*6f2128c7SMarcin Wojtas } 574*6f2128c7SMarcin Wojtas 575*6f2128c7SMarcin Wojtas ena_trace(ENA_NETMAP | ENA_DBG | ENA_TXPTH, 576*6f2128c7SMarcin Wojtas "slot: %d header_buf->vaddr: %p push_len: %d\n", 577*6f2128c7SMarcin Wojtas slot->buf_idx, *push_hdr, push_len); 578*6f2128c7SMarcin Wojtas 579*6f2128c7SMarcin Wojtas /* 580*6f2128c7SMarcin Wojtas * If header was in linear memory space, map for the dma rest of the data 581*6f2128c7SMarcin Wojtas * in the first mbuf of the mbuf chain. 582*6f2128c7SMarcin Wojtas */ 583*6f2128c7SMarcin Wojtas if (slot_head_len > push_len) { 584*6f2128c7SMarcin Wojtas rc = ena_netmap_map_single_slot(ctx->na, 585*6f2128c7SMarcin Wojtas slot, 586*6f2128c7SMarcin Wojtas adapter->tx_buf_tag, 587*6f2128c7SMarcin Wojtas *nm_maps, 588*6f2128c7SMarcin Wojtas &vaddr, 589*6f2128c7SMarcin Wojtas &paddr); 590*6f2128c7SMarcin Wojtas if (unlikely(rc != 0)) { 591*6f2128c7SMarcin Wojtas device_printf(adapter->pdev, 592*6f2128c7SMarcin Wojtas "DMA mapping error\n"); 593*6f2128c7SMarcin Wojtas return (rc); 594*6f2128c7SMarcin Wojtas } 595*6f2128c7SMarcin Wojtas nm_maps++; 596*6f2128c7SMarcin Wojtas 597*6f2128c7SMarcin Wojtas ena_buf->paddr = paddr + push_len; 598*6f2128c7SMarcin Wojtas ena_buf->len = slot->len - push_len; 599*6f2128c7SMarcin Wojtas ena_buf++; 600*6f2128c7SMarcin Wojtas 601*6f2128c7SMarcin Wojtas tx_info->num_of_bufs++; 602*6f2128c7SMarcin Wojtas } 603*6f2128c7SMarcin Wojtas 604*6f2128c7SMarcin Wojtas remaining_len -= slot->len; 605*6f2128c7SMarcin Wojtas 606*6f2128c7SMarcin Wojtas /* Save buf idx before advancing */ 607*6f2128c7SMarcin Wojtas *nm_buf_idx = slot->buf_idx; 608*6f2128c7SMarcin Wojtas nm_buf_idx++; 609*6f2128c7SMarcin Wojtas slot->buf_idx = 0; 610*6f2128c7SMarcin Wojtas 611*6f2128c7SMarcin Wojtas /* Advance to the next socket */ 612*6f2128c7SMarcin Wojtas ctx->nm_i = nm_next(ctx->nm_i, ctx->lim); 613*6f2128c7SMarcin Wojtas slot = &ctx->slots[ctx->nm_i]; 614*6f2128c7SMarcin Wojtas nm_info->sockets_used++; 615*6f2128c7SMarcin Wojtas 616*6f2128c7SMarcin Wojtas /* 617*6f2128c7SMarcin Wojtas * If header is in non linear space (delta > 0), then skip mbufs 618*6f2128c7SMarcin Wojtas * containing header and map the last one containing both header 619*6f2128c7SMarcin Wojtas * and the packet data. 620*6f2128c7SMarcin Wojtas * The first segment is already counted in. 621*6f2128c7SMarcin Wojtas */ 622*6f2128c7SMarcin Wojtas while (delta > 0) { 623*6f2128c7SMarcin Wojtas __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]); 624*6f2128c7SMarcin Wojtas frag_len = slot->len; 625*6f2128c7SMarcin Wojtas 626*6f2128c7SMarcin Wojtas /* 627*6f2128c7SMarcin Wojtas * If whole segment contains header just move to the 628*6f2128c7SMarcin Wojtas * next one and reduce delta. 629*6f2128c7SMarcin Wojtas */ 630*6f2128c7SMarcin Wojtas if (unlikely(delta >= frag_len)) { 631*6f2128c7SMarcin Wojtas delta -= frag_len; 632*6f2128c7SMarcin Wojtas } else { 633*6f2128c7SMarcin Wojtas /* 634*6f2128c7SMarcin Wojtas * Map the data and then assign it with the 635*6f2128c7SMarcin Wojtas * offsets 636*6f2128c7SMarcin Wojtas */ 637*6f2128c7SMarcin Wojtas rc = ena_netmap_map_single_slot(ctx->na, 638*6f2128c7SMarcin Wojtas slot, 639*6f2128c7SMarcin Wojtas adapter->tx_buf_tag, 640*6f2128c7SMarcin Wojtas *nm_maps, 641*6f2128c7SMarcin Wojtas &vaddr, 642*6f2128c7SMarcin Wojtas &paddr); 643*6f2128c7SMarcin Wojtas if (unlikely(rc != 0)) { 644*6f2128c7SMarcin Wojtas device_printf(adapter->pdev, 645*6f2128c7SMarcin Wojtas "DMA mapping error\n"); 646*6f2128c7SMarcin Wojtas goto error_map; 647*6f2128c7SMarcin Wojtas } 648*6f2128c7SMarcin Wojtas nm_maps++; 649*6f2128c7SMarcin Wojtas 650*6f2128c7SMarcin Wojtas ena_buf->paddr = paddr + delta; 651*6f2128c7SMarcin Wojtas ena_buf->len = slot->len - delta; 652*6f2128c7SMarcin Wojtas ena_buf++; 653*6f2128c7SMarcin Wojtas 654*6f2128c7SMarcin Wojtas tx_info->num_of_bufs++; 655*6f2128c7SMarcin Wojtas delta = 0; 656*6f2128c7SMarcin Wojtas } 657*6f2128c7SMarcin Wojtas 658*6f2128c7SMarcin Wojtas remaining_len -= slot->len; 659*6f2128c7SMarcin Wojtas 660*6f2128c7SMarcin Wojtas /* Save buf idx before advancing */ 661*6f2128c7SMarcin Wojtas *nm_buf_idx = slot->buf_idx; 662*6f2128c7SMarcin Wojtas nm_buf_idx++; 663*6f2128c7SMarcin Wojtas slot->buf_idx = 0; 664*6f2128c7SMarcin Wojtas 665*6f2128c7SMarcin Wojtas /* Advance to the next socket */ 666*6f2128c7SMarcin Wojtas ctx->nm_i = nm_next(ctx->nm_i, ctx->lim); 667*6f2128c7SMarcin Wojtas slot = &ctx->slots[ctx->nm_i]; 668*6f2128c7SMarcin Wojtas nm_info->sockets_used++; 669*6f2128c7SMarcin Wojtas } 670*6f2128c7SMarcin Wojtas } else { 671*6f2128c7SMarcin Wojtas *push_hdr = NULL; 672*6f2128c7SMarcin Wojtas /* 673*6f2128c7SMarcin Wojtas * header_len is just a hint for the device. Because netmap is 674*6f2128c7SMarcin Wojtas * not giving us any information about packet header length and 675*6f2128c7SMarcin Wojtas * it is not guaranteed that all packet headers will be in the 676*6f2128c7SMarcin Wojtas * 1st slot, setting header_len to 0 is making the device ignore 677*6f2128c7SMarcin Wojtas * this value and resolve header on it's own. 678*6f2128c7SMarcin Wojtas */ 679*6f2128c7SMarcin Wojtas *header_len = 0; 680*6f2128c7SMarcin Wojtas } 681*6f2128c7SMarcin Wojtas 682*6f2128c7SMarcin Wojtas /* Map all remaining data (regular routine for non-LLQ mode) */ 683*6f2128c7SMarcin Wojtas while (remaining_len > 0) { 684*6f2128c7SMarcin Wojtas __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]); 685*6f2128c7SMarcin Wojtas 686*6f2128c7SMarcin Wojtas rc = ena_netmap_map_single_slot(ctx->na, 687*6f2128c7SMarcin Wojtas slot, 688*6f2128c7SMarcin Wojtas adapter->tx_buf_tag, 689*6f2128c7SMarcin Wojtas *nm_maps, 690*6f2128c7SMarcin Wojtas &vaddr, 691*6f2128c7SMarcin Wojtas &paddr); 692*6f2128c7SMarcin Wojtas if (unlikely(rc != 0)) { 693*6f2128c7SMarcin Wojtas device_printf(adapter->pdev, 694*6f2128c7SMarcin Wojtas "DMA mapping error\n"); 695*6f2128c7SMarcin Wojtas goto error_map; 696*6f2128c7SMarcin Wojtas } 697*6f2128c7SMarcin Wojtas nm_maps++; 698*6f2128c7SMarcin Wojtas 699*6f2128c7SMarcin Wojtas ena_buf->paddr = paddr; 700*6f2128c7SMarcin Wojtas ena_buf->len = slot->len; 701*6f2128c7SMarcin Wojtas ena_buf++; 702*6f2128c7SMarcin Wojtas 703*6f2128c7SMarcin Wojtas tx_info->num_of_bufs++; 704*6f2128c7SMarcin Wojtas 705*6f2128c7SMarcin Wojtas remaining_len -= slot->len; 706*6f2128c7SMarcin Wojtas 707*6f2128c7SMarcin Wojtas /* Save buf idx before advancing */ 708*6f2128c7SMarcin Wojtas *nm_buf_idx = slot->buf_idx; 709*6f2128c7SMarcin Wojtas nm_buf_idx++; 710*6f2128c7SMarcin Wojtas slot->buf_idx = 0; 711*6f2128c7SMarcin Wojtas 712*6f2128c7SMarcin Wojtas /* Advance to the next socket */ 713*6f2128c7SMarcin Wojtas ctx->nm_i = nm_next(ctx->nm_i, ctx->lim); 714*6f2128c7SMarcin Wojtas slot = &ctx->slots[ctx->nm_i]; 715*6f2128c7SMarcin Wojtas nm_info->sockets_used++; 716*6f2128c7SMarcin Wojtas } 717*6f2128c7SMarcin Wojtas 718*6f2128c7SMarcin Wojtas return (0); 719*6f2128c7SMarcin Wojtas 720*6f2128c7SMarcin Wojtas error_map: 721*6f2128c7SMarcin Wojtas ena_netmap_unmap_last_socket_chain(ctx, tx_info); 722*6f2128c7SMarcin Wojtas 723*6f2128c7SMarcin Wojtas return (rc); 724*6f2128c7SMarcin Wojtas } 725*6f2128c7SMarcin Wojtas 726*6f2128c7SMarcin Wojtas static void 727*6f2128c7SMarcin Wojtas ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx *ctx, 728*6f2128c7SMarcin Wojtas struct ena_tx_buffer *tx_info) 729*6f2128c7SMarcin Wojtas { 730*6f2128c7SMarcin Wojtas struct ena_netmap_tx_info *nm_info; 731*6f2128c7SMarcin Wojtas int n; 732*6f2128c7SMarcin Wojtas 733*6f2128c7SMarcin Wojtas nm_info = &tx_info->nm_info; 734*6f2128c7SMarcin Wojtas 735*6f2128c7SMarcin Wojtas /** 736*6f2128c7SMarcin Wojtas * As the used sockets must not be equal to the buffers used in the LLQ 737*6f2128c7SMarcin Wojtas * mode, they must be treated separately. 738*6f2128c7SMarcin Wojtas * First, unmap the DMA maps. 739*6f2128c7SMarcin Wojtas */ 740*6f2128c7SMarcin Wojtas n = tx_info->num_of_bufs; 741*6f2128c7SMarcin Wojtas while (n--) { 742*6f2128c7SMarcin Wojtas netmap_unload_map(ctx->na, ctx->adapter->tx_buf_tag, 743*6f2128c7SMarcin Wojtas nm_info->map_seg[n]); 744*6f2128c7SMarcin Wojtas } 745*6f2128c7SMarcin Wojtas tx_info->num_of_bufs = 0; 746*6f2128c7SMarcin Wojtas 747*6f2128c7SMarcin Wojtas /* Next, retain the sockets back to the userspace */ 748*6f2128c7SMarcin Wojtas n = nm_info->sockets_used; 749*6f2128c7SMarcin Wojtas while (n--) { 750*6f2128c7SMarcin Wojtas ctx->slots[ctx->nm_i].buf_idx = nm_info->socket_buf_idx[n]; 751*6f2128c7SMarcin Wojtas ctx->slots[ctx->nm_i].flags = NS_BUF_CHANGED; 752*6f2128c7SMarcin Wojtas nm_info->socket_buf_idx[n] = 0; 753*6f2128c7SMarcin Wojtas ctx->nm_i = nm_prev(ctx->nm_i, ctx->lim); 754*6f2128c7SMarcin Wojtas } 755*6f2128c7SMarcin Wojtas nm_info->sockets_used = 0; 756*6f2128c7SMarcin Wojtas } 757*6f2128c7SMarcin Wojtas 758*6f2128c7SMarcin Wojtas static void 759*6f2128c7SMarcin Wojtas ena_netmap_tx_cleanup(struct ena_netmap_ctx *ctx) 760*6f2128c7SMarcin Wojtas { 761*6f2128c7SMarcin Wojtas uint16_t req_id; 762*6f2128c7SMarcin Wojtas uint16_t total_tx_descs = 0; 763*6f2128c7SMarcin Wojtas 764*6f2128c7SMarcin Wojtas ctx->nm_i = ctx->kring->nr_hwtail; 765*6f2128c7SMarcin Wojtas ctx->nt = ctx->ring->next_to_clean; 766*6f2128c7SMarcin Wojtas 767*6f2128c7SMarcin Wojtas /* Reclaim buffers for completed transmissions */ 768*6f2128c7SMarcin Wojtas while (ena_com_tx_comp_req_id_get(ctx->io_cq, &req_id) >= 0) { 769*6f2128c7SMarcin Wojtas if (validate_tx_req_id(ctx->ring, req_id) != 0) 770*6f2128c7SMarcin Wojtas break; 771*6f2128c7SMarcin Wojtas total_tx_descs += ena_netmap_tx_clean_one(ctx, req_id); 772*6f2128c7SMarcin Wojtas } 773*6f2128c7SMarcin Wojtas 774*6f2128c7SMarcin Wojtas ctx->kring->nr_hwtail = ctx->nm_i; 775*6f2128c7SMarcin Wojtas 776*6f2128c7SMarcin Wojtas if (total_tx_descs > 0) { 777*6f2128c7SMarcin Wojtas /* acknowledge completion of sent packets */ 778*6f2128c7SMarcin Wojtas ctx->ring->next_to_clean = ctx->nt; 779*6f2128c7SMarcin Wojtas ena_com_comp_ack(ctx->ring->ena_com_io_sq, total_tx_descs); 780*6f2128c7SMarcin Wojtas ena_com_update_dev_comp_head(ctx->ring->ena_com_io_cq); 781*6f2128c7SMarcin Wojtas } 782*6f2128c7SMarcin Wojtas } 783*6f2128c7SMarcin Wojtas 784*6f2128c7SMarcin Wojtas static uint16_t 785*6f2128c7SMarcin Wojtas ena_netmap_tx_clean_one(struct ena_netmap_ctx *ctx, uint16_t req_id) 786*6f2128c7SMarcin Wojtas { 787*6f2128c7SMarcin Wojtas struct ena_tx_buffer *tx_info; 788*6f2128c7SMarcin Wojtas struct ena_netmap_tx_info *nm_info; 789*6f2128c7SMarcin Wojtas int n; 790*6f2128c7SMarcin Wojtas 791*6f2128c7SMarcin Wojtas tx_info = &ctx->ring->tx_buffer_info[req_id]; 792*6f2128c7SMarcin Wojtas nm_info = &tx_info->nm_info; 793*6f2128c7SMarcin Wojtas 794*6f2128c7SMarcin Wojtas /** 795*6f2128c7SMarcin Wojtas * As the used sockets must not be equal to the buffers used in the LLQ 796*6f2128c7SMarcin Wojtas * mode, they must be treated separately. 797*6f2128c7SMarcin Wojtas * First, unmap the DMA maps. 798*6f2128c7SMarcin Wojtas */ 799*6f2128c7SMarcin Wojtas n = tx_info->num_of_bufs; 800*6f2128c7SMarcin Wojtas for (n = 0; n < tx_info->num_of_bufs; n++) { 801*6f2128c7SMarcin Wojtas netmap_unload_map(ctx->na, ctx->adapter->tx_buf_tag, 802*6f2128c7SMarcin Wojtas nm_info->map_seg[n]); 803*6f2128c7SMarcin Wojtas } 804*6f2128c7SMarcin Wojtas tx_info->num_of_bufs = 0; 805*6f2128c7SMarcin Wojtas 806*6f2128c7SMarcin Wojtas /* Next, retain the sockets back to the userspace */ 807*6f2128c7SMarcin Wojtas for (n = 0; n < nm_info->sockets_used; n++) { 808*6f2128c7SMarcin Wojtas ctx->nm_i = nm_next(ctx->nm_i, ctx->lim); 809*6f2128c7SMarcin Wojtas ENA_ASSERT(ctx->slots[ctx->nm_i].buf_idx == 0, 810*6f2128c7SMarcin Wojtas "Tx idx is not 0.\n"); 811*6f2128c7SMarcin Wojtas ctx->slots[ctx->nm_i].buf_idx = nm_info->socket_buf_idx[n]; 812*6f2128c7SMarcin Wojtas ctx->slots[ctx->nm_i].flags = NS_BUF_CHANGED; 813*6f2128c7SMarcin Wojtas nm_info->socket_buf_idx[n] = 0; 814*6f2128c7SMarcin Wojtas } 815*6f2128c7SMarcin Wojtas nm_info->sockets_used = 0; 816*6f2128c7SMarcin Wojtas 817*6f2128c7SMarcin Wojtas ctx->ring->free_tx_ids[ctx->nt] = req_id; 818*6f2128c7SMarcin Wojtas ctx->nt = ENA_TX_RING_IDX_NEXT(ctx->nt, ctx->lim); 819*6f2128c7SMarcin Wojtas 820*6f2128c7SMarcin Wojtas return tx_info->tx_descs; 821*6f2128c7SMarcin Wojtas } 822*6f2128c7SMarcin Wojtas 823*6f2128c7SMarcin Wojtas static inline int 824*6f2128c7SMarcin Wojtas validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id) 825*6f2128c7SMarcin Wojtas { 826*6f2128c7SMarcin Wojtas struct ena_adapter *adapter = tx_ring->adapter; 827*6f2128c7SMarcin Wojtas 828*6f2128c7SMarcin Wojtas if (likely(req_id < tx_ring->ring_size)) 829*6f2128c7SMarcin Wojtas return (0); 830*6f2128c7SMarcin Wojtas 831*6f2128c7SMarcin Wojtas ena_trace(ENA_WARNING, "Invalid req_id: %hu\n", req_id); 832*6f2128c7SMarcin Wojtas counter_u64_add(tx_ring->tx_stats.bad_req_id, 1); 833*6f2128c7SMarcin Wojtas 834*6f2128c7SMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; 835*6f2128c7SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 836*6f2128c7SMarcin Wojtas 837*6f2128c7SMarcin Wojtas return (EFAULT); 838*6f2128c7SMarcin Wojtas } 839*6f2128c7SMarcin Wojtas 840d17b7d87SMarcin Wojtas static int 841d17b7d87SMarcin Wojtas ena_netmap_rxsync(struct netmap_kring *kring, int flags) 842d17b7d87SMarcin Wojtas { 8439a0f2079SMarcin Wojtas struct ena_netmap_ctx ctx; 8449a0f2079SMarcin Wojtas int rc; 8459a0f2079SMarcin Wojtas 8469a0f2079SMarcin Wojtas ena_netmap_fill_ctx(kring, &ctx, ENA_IO_RXQ_IDX(kring->ring_id)); 8479a0f2079SMarcin Wojtas ctx.ring = &ctx.adapter->rx_ring[kring->ring_id]; 8489a0f2079SMarcin Wojtas 8499a0f2079SMarcin Wojtas if (ctx.kring->rhead > ctx.lim) { 8509a0f2079SMarcin Wojtas /* Probably not needed to release slots from RX ring. */ 8519a0f2079SMarcin Wojtas return (netmap_ring_reinit(ctx.kring)); 8529a0f2079SMarcin Wojtas } 8539a0f2079SMarcin Wojtas 8549a0f2079SMarcin Wojtas if (unlikely((if_getdrvflags(ctx.na->ifp) & IFF_DRV_RUNNING) == 0)) 855d17b7d87SMarcin Wojtas return (0); 8569a0f2079SMarcin Wojtas 8579a0f2079SMarcin Wojtas if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, ctx.adapter))) 8589a0f2079SMarcin Wojtas return (0); 8599a0f2079SMarcin Wojtas 8609a0f2079SMarcin Wojtas if ((rc = ena_netmap_rx_frames(&ctx)) != 0) 8619a0f2079SMarcin Wojtas return (rc); 8629a0f2079SMarcin Wojtas 8639a0f2079SMarcin Wojtas ena_netmap_rx_cleanup(&ctx); 8649a0f2079SMarcin Wojtas 8659a0f2079SMarcin Wojtas return (0); 8669a0f2079SMarcin Wojtas } 8679a0f2079SMarcin Wojtas 8689a0f2079SMarcin Wojtas static inline int 8699a0f2079SMarcin Wojtas ena_netmap_rx_frames(struct ena_netmap_ctx *ctx) 8709a0f2079SMarcin Wojtas { 8719a0f2079SMarcin Wojtas int rc = 0; 8729a0f2079SMarcin Wojtas int frames_counter = 0; 8739a0f2079SMarcin Wojtas 8749a0f2079SMarcin Wojtas ctx->nt = ctx->ring->next_to_clean; 8759a0f2079SMarcin Wojtas ctx->nm_i = ctx->kring->nr_hwtail; 8769a0f2079SMarcin Wojtas 8779a0f2079SMarcin Wojtas while((rc = ena_netmap_rx_frame(ctx)) == ENA_NETMAP_MORE_FRAMES) { 8789a0f2079SMarcin Wojtas frames_counter++; 8799a0f2079SMarcin Wojtas /* In case of multiple frames, it is not an error. */ 8809a0f2079SMarcin Wojtas rc = 0; 8819a0f2079SMarcin Wojtas if (frames_counter > ENA_MAX_FRAMES) { 8829a0f2079SMarcin Wojtas device_printf(ctx->adapter->pdev, 8839a0f2079SMarcin Wojtas "Driver is stuck in the Rx loop\n"); 8849a0f2079SMarcin Wojtas break; 8859a0f2079SMarcin Wojtas } 8869a0f2079SMarcin Wojtas }; 8879a0f2079SMarcin Wojtas 8889a0f2079SMarcin Wojtas ctx->kring->nr_hwtail = ctx->nm_i; 8899a0f2079SMarcin Wojtas ctx->kring->nr_kflags &= ~NKR_PENDINTR; 8909a0f2079SMarcin Wojtas ctx->ring->next_to_clean = ctx->nt; 8919a0f2079SMarcin Wojtas 8929a0f2079SMarcin Wojtas return (rc); 8939a0f2079SMarcin Wojtas } 8949a0f2079SMarcin Wojtas 8959a0f2079SMarcin Wojtas static inline int 8969a0f2079SMarcin Wojtas ena_netmap_rx_frame(struct ena_netmap_ctx *ctx) 8979a0f2079SMarcin Wojtas { 8989a0f2079SMarcin Wojtas struct ena_com_rx_ctx ena_rx_ctx; 8999a0f2079SMarcin Wojtas int rc, len = 0; 9009a0f2079SMarcin Wojtas uint16_t buf, nm; 9019a0f2079SMarcin Wojtas 9029a0f2079SMarcin Wojtas ena_rx_ctx.ena_bufs = ctx->ring->ena_bufs; 9039a0f2079SMarcin Wojtas ena_rx_ctx.max_bufs = ctx->adapter->max_rx_sgl_size; 9049a0f2079SMarcin Wojtas bus_dmamap_sync(ctx->io_cq->cdesc_addr.mem_handle.tag, 9059a0f2079SMarcin Wojtas ctx->io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_POSTREAD); 9069a0f2079SMarcin Wojtas 9079a0f2079SMarcin Wojtas rc = ena_com_rx_pkt(ctx->io_cq, ctx->io_sq, &ena_rx_ctx); 9089a0f2079SMarcin Wojtas if (unlikely(rc != 0)) { 9099a0f2079SMarcin Wojtas ena_trace(ENA_ALERT, "Too many desc from the device.\n"); 9109a0f2079SMarcin Wojtas counter_u64_add(ctx->ring->rx_stats.bad_desc_num, 1); 9119a0f2079SMarcin Wojtas ctx->adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; 9129a0f2079SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, ctx->adapter); 9139a0f2079SMarcin Wojtas return (rc); 9149a0f2079SMarcin Wojtas } 9159a0f2079SMarcin Wojtas if (unlikely(ena_rx_ctx.descs == 0)) 9169a0f2079SMarcin Wojtas return (ENA_NETMAP_NO_MORE_FRAMES); 9179a0f2079SMarcin Wojtas 9189a0f2079SMarcin Wojtas ena_trace(ENA_NETMAP | ENA_DBG, "Rx: q %d got packet from ena. descs #:" 9199a0f2079SMarcin Wojtas " %d l3 proto %d l4 proto %d hash: %x\n", ctx->ring->qid, 9209a0f2079SMarcin Wojtas ena_rx_ctx.descs, ena_rx_ctx.l3_proto, ena_rx_ctx.l4_proto, 9219a0f2079SMarcin Wojtas ena_rx_ctx.hash); 9229a0f2079SMarcin Wojtas 9239a0f2079SMarcin Wojtas for (buf = 0; buf < ena_rx_ctx.descs; buf++) 9249a0f2079SMarcin Wojtas if ((rc = ena_netmap_rx_load_desc(ctx, buf, &len)) != 0) 9259a0f2079SMarcin Wojtas break; 9269a0f2079SMarcin Wojtas /* 9279a0f2079SMarcin Wojtas * ena_netmap_rx_load_desc doesn't know the number of descriptors. 9289a0f2079SMarcin Wojtas * It just set flag NS_MOREFRAG to all slots, then here flag of 9299a0f2079SMarcin Wojtas * last slot is cleared. 9309a0f2079SMarcin Wojtas */ 9319a0f2079SMarcin Wojtas ctx->slots[nm_prev(ctx->nm_i, ctx->lim)].flags = NS_BUF_CHANGED; 9329a0f2079SMarcin Wojtas 9339a0f2079SMarcin Wojtas if (rc != 0) { 9349a0f2079SMarcin Wojtas goto rx_clear_desc; 9359a0f2079SMarcin Wojtas } 9369a0f2079SMarcin Wojtas 9379a0f2079SMarcin Wojtas bus_dmamap_sync(ctx->io_cq->cdesc_addr.mem_handle.tag, 9389a0f2079SMarcin Wojtas ctx->io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_PREREAD); 9399a0f2079SMarcin Wojtas 9409a0f2079SMarcin Wojtas counter_enter(); 9419a0f2079SMarcin Wojtas counter_u64_add_protected(ctx->ring->rx_stats.bytes, len); 9429a0f2079SMarcin Wojtas counter_u64_add_protected(ctx->adapter->hw_stats.rx_bytes, len); 9439a0f2079SMarcin Wojtas counter_u64_add_protected(ctx->ring->rx_stats.cnt, 1); 9449a0f2079SMarcin Wojtas counter_u64_add_protected(ctx->adapter->hw_stats.rx_packets, 1); 9459a0f2079SMarcin Wojtas counter_exit(); 9469a0f2079SMarcin Wojtas 9479a0f2079SMarcin Wojtas return (ENA_NETMAP_MORE_FRAMES); 9489a0f2079SMarcin Wojtas 9499a0f2079SMarcin Wojtas rx_clear_desc: 9509a0f2079SMarcin Wojtas nm = ctx->nm_i; 9519a0f2079SMarcin Wojtas 9529a0f2079SMarcin Wojtas /* Remove failed packet from ring */ 9539a0f2079SMarcin Wojtas while(buf--) { 9549a0f2079SMarcin Wojtas ctx->slots[nm].flags = 0; 9559a0f2079SMarcin Wojtas ctx->slots[nm].len = 0; 9569a0f2079SMarcin Wojtas nm = nm_prev(nm, ctx->lim); 9579a0f2079SMarcin Wojtas } 9589a0f2079SMarcin Wojtas 9599a0f2079SMarcin Wojtas return (rc); 9609a0f2079SMarcin Wojtas } 9619a0f2079SMarcin Wojtas 9629a0f2079SMarcin Wojtas static inline int 9639a0f2079SMarcin Wojtas ena_netmap_rx_load_desc(struct ena_netmap_ctx *ctx, uint16_t buf, int *len) 9649a0f2079SMarcin Wojtas { 9659a0f2079SMarcin Wojtas struct ena_rx_buffer *rx_info; 9669a0f2079SMarcin Wojtas uint16_t req_id; 9679a0f2079SMarcin Wojtas int rc; 9689a0f2079SMarcin Wojtas 9699a0f2079SMarcin Wojtas req_id = ctx->ring->ena_bufs[buf].req_id; 9709a0f2079SMarcin Wojtas rc = validate_rx_req_id(ctx->ring, req_id); 9719a0f2079SMarcin Wojtas if (unlikely(rc != 0)) 9729a0f2079SMarcin Wojtas return (rc); 9739a0f2079SMarcin Wojtas 9749a0f2079SMarcin Wojtas rx_info = &ctx->ring->rx_buffer_info[req_id]; 9759a0f2079SMarcin Wojtas bus_dmamap_sync(ctx->adapter->rx_buf_tag, rx_info->map, 9769a0f2079SMarcin Wojtas BUS_DMASYNC_POSTREAD); 9779a0f2079SMarcin Wojtas netmap_unload_map(ctx->na, ctx->adapter->rx_buf_tag, rx_info->map); 9789a0f2079SMarcin Wojtas 9799a0f2079SMarcin Wojtas ENA_ASSERT(ctx->slots[ctx->nm_i].buf_idx == 0, "Rx idx is not 0.\n"); 9809a0f2079SMarcin Wojtas 9819a0f2079SMarcin Wojtas ctx->slots[ctx->nm_i].buf_idx = rx_info->netmap_buf_idx; 9829a0f2079SMarcin Wojtas rx_info->netmap_buf_idx = 0; 9839a0f2079SMarcin Wojtas /* 9849a0f2079SMarcin Wojtas * Set NS_MOREFRAG to all slots. 9859a0f2079SMarcin Wojtas * Then ena_netmap_rx_frame clears it from last one. 9869a0f2079SMarcin Wojtas */ 9879a0f2079SMarcin Wojtas ctx->slots[ctx->nm_i].flags |= NS_MOREFRAG | NS_BUF_CHANGED; 9889a0f2079SMarcin Wojtas ctx->slots[ctx->nm_i].len = ctx->ring->ena_bufs[buf].len; 9899a0f2079SMarcin Wojtas *len += ctx->slots[ctx->nm_i].len; 9909a0f2079SMarcin Wojtas ctx->ring->free_rx_ids[ctx->nt] = req_id; 9919a0f2079SMarcin Wojtas ena_trace(ENA_DBG, "rx_info %p, buf_idx %d, paddr %jx, nm: %d\n", 9929a0f2079SMarcin Wojtas rx_info, ctx->slots[ctx->nm_i].buf_idx, 9939a0f2079SMarcin Wojtas (uintmax_t)rx_info->ena_buf.paddr, ctx->nm_i); 9949a0f2079SMarcin Wojtas 9959a0f2079SMarcin Wojtas ctx->nm_i = nm_next(ctx->nm_i, ctx->lim); 9969a0f2079SMarcin Wojtas ctx->nt = ENA_RX_RING_IDX_NEXT(ctx->nt, ctx->ring->ring_size); 9979a0f2079SMarcin Wojtas 9989a0f2079SMarcin Wojtas return (0); 9999a0f2079SMarcin Wojtas } 10009a0f2079SMarcin Wojtas 10019a0f2079SMarcin Wojtas static inline void 10029a0f2079SMarcin Wojtas ena_netmap_rx_cleanup(struct ena_netmap_ctx *ctx) 10039a0f2079SMarcin Wojtas { 10049a0f2079SMarcin Wojtas int refill_required; 10059a0f2079SMarcin Wojtas 10069a0f2079SMarcin Wojtas refill_required = ctx->kring->rhead - ctx->kring->nr_hwcur; 10079a0f2079SMarcin Wojtas if (ctx->kring->nr_hwcur != ctx->kring->nr_hwtail) 10089a0f2079SMarcin Wojtas refill_required -= 1; 10099a0f2079SMarcin Wojtas 10109a0f2079SMarcin Wojtas if (refill_required == 0) 10119a0f2079SMarcin Wojtas return; 10129a0f2079SMarcin Wojtas else if (refill_required < 0) 10139a0f2079SMarcin Wojtas refill_required += ctx->kring->nkr_num_slots; 10149a0f2079SMarcin Wojtas 10159a0f2079SMarcin Wojtas ena_refill_rx_bufs(ctx->ring, refill_required); 10169a0f2079SMarcin Wojtas } 10179a0f2079SMarcin Wojtas 10189a0f2079SMarcin Wojtas static inline void 10199a0f2079SMarcin Wojtas ena_netmap_fill_ctx(struct netmap_kring *kring, struct ena_netmap_ctx *ctx, 10209a0f2079SMarcin Wojtas uint16_t ena_qid) 10219a0f2079SMarcin Wojtas { 10229a0f2079SMarcin Wojtas ctx->kring = kring; 10239a0f2079SMarcin Wojtas ctx->na = kring->na; 10249a0f2079SMarcin Wojtas ctx->adapter = ctx->na->ifp->if_softc; 10259a0f2079SMarcin Wojtas ctx->lim = kring->nkr_num_slots - 1; 10269a0f2079SMarcin Wojtas ctx->io_cq = &ctx->adapter->ena_dev->io_cq_queues[ena_qid]; 10279a0f2079SMarcin Wojtas ctx->io_sq = &ctx->adapter->ena_dev->io_sq_queues[ena_qid]; 10289a0f2079SMarcin Wojtas ctx->slots = kring->ring->slot; 1029d17b7d87SMarcin Wojtas } 1030d17b7d87SMarcin Wojtas 1031*6f2128c7SMarcin Wojtas void 1032*6f2128c7SMarcin Wojtas ena_netmap_unload(struct ena_adapter *adapter, bus_dmamap_t map) 1033*6f2128c7SMarcin Wojtas { 1034*6f2128c7SMarcin Wojtas struct netmap_adapter *na = NA(adapter->ifp); 1035*6f2128c7SMarcin Wojtas 1036*6f2128c7SMarcin Wojtas netmap_unload_map(na, adapter->tx_buf_tag, map); 1037*6f2128c7SMarcin Wojtas } 1038*6f2128c7SMarcin Wojtas 1039d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */ 1040