ena_netmap.c (d17b7d87eec781844a495efb818d1b6cd169cb55) | ena_netmap.c (9a0f2079ca900f9f37806e341790e2f3ed4cb19f) |
---|---|
1/*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions --- 21 unchanged lines hidden (view full) --- 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD$"); 32 33#ifdef DEV_NETMAP 34 35#include "ena.h" 36#include "ena_netmap.h" 37 | 1/*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions --- 21 unchanged lines hidden (view full) --- 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD$"); 32 33#ifdef DEV_NETMAP 34 35#include "ena.h" 36#include "ena_netmap.h" 37 |
38#define ENA_NETMAP_MORE_FRAMES 1 39#define ENA_NETMAP_NO_MORE_FRAMES 0 40#define ENA_MAX_FRAMES 16384 41 42struct ena_netmap_ctx { 43 struct netmap_kring *kring; 44 struct ena_adapter *adapter; 45 struct netmap_adapter *na; 46 struct netmap_slot *slots; 47 struct ena_ring *ring; 48 struct ena_com_io_cq *io_cq; 49 struct ena_com_io_sq *io_sq; 50 u_int nm_i; 51 uint16_t nt; 52 uint16_t lim; 53}; 54 55/* Netmap callbacks */ |
|
38static int ena_netmap_reg(struct netmap_adapter *, int); 39static int ena_netmap_txsync(struct netmap_kring *, int); 40static int ena_netmap_rxsync(struct netmap_kring *, int); 41 | 56static int ena_netmap_reg(struct netmap_adapter *, int); 57static int ena_netmap_txsync(struct netmap_kring *, int); 58static int ena_netmap_rxsync(struct netmap_kring *, int); 59 |
60/* Helper functions */ 61static int ena_netmap_rx_frames(struct ena_netmap_ctx *); 62static int ena_netmap_rx_frame(struct ena_netmap_ctx *); 63static int ena_netmap_rx_load_desc(struct ena_netmap_ctx *, uint16_t, 64 int *); 65static void ena_netmap_rx_cleanup(struct ena_netmap_ctx *); 66static void ena_netmap_fill_ctx(struct netmap_kring *, 67 struct ena_netmap_ctx *, uint16_t); 68 |
|
42int 43ena_netmap_attach(struct ena_adapter *adapter) 44{ 45 struct netmap_adapter na; 46 47 ena_trace(ENA_NETMAP, "netmap attach\n"); 48 49 bzero(&na, sizeof(na)); --- 6 unchanged lines hidden (view full) --- 56 na.rx_buf_maxsize = adapter->buf_ring_size; 57 na.nm_txsync = ena_netmap_txsync; 58 na.nm_rxsync = ena_netmap_rxsync; 59 na.nm_register = ena_netmap_reg; 60 61 return (netmap_attach(&na)); 62} 63 | 69int 70ena_netmap_attach(struct ena_adapter *adapter) 71{ 72 struct netmap_adapter na; 73 74 ena_trace(ENA_NETMAP, "netmap attach\n"); 75 76 bzero(&na, sizeof(na)); --- 6 unchanged lines hidden (view full) --- 83 na.rx_buf_maxsize = adapter->buf_ring_size; 84 na.nm_txsync = ena_netmap_txsync; 85 na.nm_rxsync = ena_netmap_rxsync; 86 na.nm_register = ena_netmap_reg; 87 88 return (netmap_attach(&na)); 89} 90 |
91int 92ena_netmap_alloc_rx_slot(struct ena_adapter *adapter, 93 struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) 94{ 95 struct netmap_adapter *na = NA(adapter->ifp); 96 struct netmap_kring *kring; 97 struct netmap_ring *ring; 98 struct netmap_slot *slot; 99 void *addr; 100 uint64_t paddr; 101 int nm_i, qid, head, lim, rc; 102 103 /* if previously allocated frag is not used */ 104 if (unlikely(rx_info->netmap_buf_idx != 0)) 105 return (0); 106 107 qid = rx_ring->qid; 108 kring = na->rx_rings[qid]; 109 nm_i = kring->nr_hwcur; 110 head = kring->rhead; 111 112 ena_trace(ENA_NETMAP | ENA_DBG, "nr_hwcur: %d, nr_hwtail: %d, " 113 "rhead: %d, rcur: %d, rtail: %d\n", kring->nr_hwcur, 114 kring->nr_hwtail, kring->rhead, kring->rcur, kring->rtail); 115 116 if ((nm_i == head) && rx_ring->initialized) { 117 ena_trace(ENA_NETMAP, "No free slots in netmap ring\n"); 118 return (ENOMEM); 119 } 120 121 ring = kring->ring; 122 if (ring == NULL) { 123 device_printf(adapter->pdev, "Rx ring %d is NULL\n", qid); 124 return (EFAULT); 125 } 126 slot = &ring->slot[nm_i]; 127 128 addr = PNMB(na, slot, &paddr); 129 if (addr == NETMAP_BUF_BASE(na)) { 130 device_printf(adapter->pdev, "Bad buff in slot\n"); 131 return (EFAULT); 132 } 133 134 rc = netmap_load_map(na, adapter->rx_buf_tag, rx_info->map, addr); 135 if (rc != 0) { 136 ena_trace(ENA_WARNING, "DMA mapping error\n"); 137 return (rc); 138 } 139 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD); 140 141 rx_info->ena_buf.paddr = paddr; 142 rx_info->ena_buf.len = ring->nr_buf_size; 143 rx_info->mbuf = NULL; 144 rx_info->netmap_buf_idx = slot->buf_idx; 145 146 slot->buf_idx = 0; 147 148 lim = kring->nkr_num_slots - 1; 149 kring->nr_hwcur = nm_next(nm_i, lim); 150 151 return (0); 152} 153 154void 155ena_netmap_free_rx_slot(struct ena_adapter *adapter, 156 struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) 157{ 158 struct netmap_adapter *na; 159 struct netmap_kring *kring; 160 struct netmap_slot *slot; 161 int nm_i, qid, lim; 162 163 na = NA(adapter->ifp); 164 if (na == NULL) { 165 device_printf(adapter->pdev, "netmap adapter is NULL\n"); 166 return; 167 } 168 169 if (na->rx_rings == NULL) { 170 device_printf(adapter->pdev, "netmap rings are NULL\n"); 171 return; 172 } 173 174 qid = rx_ring->qid; 175 kring = na->rx_rings[qid]; 176 if (kring == NULL) { 177 device_printf(adapter->pdev, 178 "netmap kernel ring %d is NULL\n", qid); 179 return; 180 } 181 182 lim = kring->nkr_num_slots - 1; 183 nm_i = nm_prev(kring->nr_hwcur, lim); 184 185 if (kring->nr_mode != NKR_NETMAP_ON) 186 return; 187 188 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, 189 BUS_DMASYNC_POSTREAD); 190 netmap_unload_map(na, adapter->rx_buf_tag, rx_info->map); 191 192 slot = &kring->ring->slot[nm_i]; 193 194 ENA_ASSERT(slot->buf_idx == 0, "Overwrite slot buf\n"); 195 slot->buf_idx = rx_info->netmap_buf_idx; 196 slot->flags = NS_BUF_CHANGED; 197 198 rx_info->netmap_buf_idx = 0; 199 kring->nr_hwcur = nm_i; 200} 201 202void 203ena_netmap_reset_rx_ring(struct ena_adapter *adapter, int qid) 204{ 205 if (adapter->ifp->if_capenable & IFCAP_NETMAP) 206 netmap_reset(NA(adapter->ifp), NR_RX, qid, 0); 207} 208 |
|
64static int 65ena_netmap_reg(struct netmap_adapter *na, int onoff) 66{ 67 struct ifnet *ifp = na->ifp; 68 struct ena_adapter* adapter = ifp->if_softc; 69 int rc; 70 71 sx_xlock(&adapter->ioctl_sx); --- 27 unchanged lines hidden (view full) --- 99{ 100 ena_trace(ENA_NETMAP, "netmap txsync\n"); 101 return (0); 102} 103 104static int 105ena_netmap_rxsync(struct netmap_kring *kring, int flags) 106{ | 209static int 210ena_netmap_reg(struct netmap_adapter *na, int onoff) 211{ 212 struct ifnet *ifp = na->ifp; 213 struct ena_adapter* adapter = ifp->if_softc; 214 int rc; 215 216 sx_xlock(&adapter->ioctl_sx); --- 27 unchanged lines hidden (view full) --- 244{ 245 ena_trace(ENA_NETMAP, "netmap txsync\n"); 246 return (0); 247} 248 249static int 250ena_netmap_rxsync(struct netmap_kring *kring, int flags) 251{ |
107 ena_trace(ENA_NETMAP, "netmap rxsync\n"); | 252 struct ena_netmap_ctx ctx; 253 int rc; 254 255 ena_netmap_fill_ctx(kring, &ctx, ENA_IO_RXQ_IDX(kring->ring_id)); 256 ctx.ring = &ctx.adapter->rx_ring[kring->ring_id]; 257 258 if (ctx.kring->rhead > ctx.lim) { 259 /* Probably not needed to release slots from RX ring. */ 260 return (netmap_ring_reinit(ctx.kring)); 261 } 262 263 if (unlikely((if_getdrvflags(ctx.na->ifp) & IFF_DRV_RUNNING) == 0)) 264 return (0); 265 266 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, ctx.adapter))) 267 return (0); 268 269 if ((rc = ena_netmap_rx_frames(&ctx)) != 0) 270 return (rc); 271 272 ena_netmap_rx_cleanup(&ctx); 273 |
108 return (0); 109} 110 | 274 return (0); 275} 276 |
277static inline int 278ena_netmap_rx_frames(struct ena_netmap_ctx *ctx) 279{ 280 int rc = 0; 281 int frames_counter = 0; 282 283 ctx->nt = ctx->ring->next_to_clean; 284 ctx->nm_i = ctx->kring->nr_hwtail; 285 286 while((rc = ena_netmap_rx_frame(ctx)) == ENA_NETMAP_MORE_FRAMES) { 287 frames_counter++; 288 /* In case of multiple frames, it is not an error. */ 289 rc = 0; 290 if (frames_counter > ENA_MAX_FRAMES) { 291 device_printf(ctx->adapter->pdev, 292 "Driver is stuck in the Rx loop\n"); 293 break; 294 } 295 }; 296 297 ctx->kring->nr_hwtail = ctx->nm_i; 298 ctx->kring->nr_kflags &= ~NKR_PENDINTR; 299 ctx->ring->next_to_clean = ctx->nt; 300 301 return (rc); 302} 303 304static inline int 305ena_netmap_rx_frame(struct ena_netmap_ctx *ctx) 306{ 307 struct ena_com_rx_ctx ena_rx_ctx; 308 int rc, len = 0; 309 uint16_t buf, nm; 310 311 ena_rx_ctx.ena_bufs = ctx->ring->ena_bufs; 312 ena_rx_ctx.max_bufs = ctx->adapter->max_rx_sgl_size; 313 bus_dmamap_sync(ctx->io_cq->cdesc_addr.mem_handle.tag, 314 ctx->io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_POSTREAD); 315 316 rc = ena_com_rx_pkt(ctx->io_cq, ctx->io_sq, &ena_rx_ctx); 317 if (unlikely(rc != 0)) { 318 ena_trace(ENA_ALERT, "Too many desc from the device.\n"); 319 counter_u64_add(ctx->ring->rx_stats.bad_desc_num, 1); 320 ctx->adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; 321 ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, ctx->adapter); 322 return (rc); 323 } 324 if (unlikely(ena_rx_ctx.descs == 0)) 325 return (ENA_NETMAP_NO_MORE_FRAMES); 326 327 ena_trace(ENA_NETMAP | ENA_DBG, "Rx: q %d got packet from ena. descs #:" 328 " %d l3 proto %d l4 proto %d hash: %x\n", ctx->ring->qid, 329 ena_rx_ctx.descs, ena_rx_ctx.l3_proto, ena_rx_ctx.l4_proto, 330 ena_rx_ctx.hash); 331 332 for (buf = 0; buf < ena_rx_ctx.descs; buf++) 333 if ((rc = ena_netmap_rx_load_desc(ctx, buf, &len)) != 0) 334 break; 335 /* 336 * ena_netmap_rx_load_desc doesn't know the number of descriptors. 337 * It just set flag NS_MOREFRAG to all slots, then here flag of 338 * last slot is cleared. 339 */ 340 ctx->slots[nm_prev(ctx->nm_i, ctx->lim)].flags = NS_BUF_CHANGED; 341 342 if (rc != 0) { 343 goto rx_clear_desc; 344 } 345 346 bus_dmamap_sync(ctx->io_cq->cdesc_addr.mem_handle.tag, 347 ctx->io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_PREREAD); 348 349 counter_enter(); 350 counter_u64_add_protected(ctx->ring->rx_stats.bytes, len); 351 counter_u64_add_protected(ctx->adapter->hw_stats.rx_bytes, len); 352 counter_u64_add_protected(ctx->ring->rx_stats.cnt, 1); 353 counter_u64_add_protected(ctx->adapter->hw_stats.rx_packets, 1); 354 counter_exit(); 355 356 return (ENA_NETMAP_MORE_FRAMES); 357 358rx_clear_desc: 359 nm = ctx->nm_i; 360 361 /* Remove failed packet from ring */ 362 while(buf--) { 363 ctx->slots[nm].flags = 0; 364 ctx->slots[nm].len = 0; 365 nm = nm_prev(nm, ctx->lim); 366 } 367 368 return (rc); 369} 370 371static inline int 372ena_netmap_rx_load_desc(struct ena_netmap_ctx *ctx, uint16_t buf, int *len) 373{ 374 struct ena_rx_buffer *rx_info; 375 uint16_t req_id; 376 int rc; 377 378 req_id = ctx->ring->ena_bufs[buf].req_id; 379 rc = validate_rx_req_id(ctx->ring, req_id); 380 if (unlikely(rc != 0)) 381 return (rc); 382 383 rx_info = &ctx->ring->rx_buffer_info[req_id]; 384 bus_dmamap_sync(ctx->adapter->rx_buf_tag, rx_info->map, 385 BUS_DMASYNC_POSTREAD); 386 netmap_unload_map(ctx->na, ctx->adapter->rx_buf_tag, rx_info->map); 387 388 ENA_ASSERT(ctx->slots[ctx->nm_i].buf_idx == 0, "Rx idx is not 0.\n"); 389 390 ctx->slots[ctx->nm_i].buf_idx = rx_info->netmap_buf_idx; 391 rx_info->netmap_buf_idx = 0; 392 /* 393 * Set NS_MOREFRAG to all slots. 394 * Then ena_netmap_rx_frame clears it from last one. 395 */ 396 ctx->slots[ctx->nm_i].flags |= NS_MOREFRAG | NS_BUF_CHANGED; 397 ctx->slots[ctx->nm_i].len = ctx->ring->ena_bufs[buf].len; 398 *len += ctx->slots[ctx->nm_i].len; 399 ctx->ring->free_rx_ids[ctx->nt] = req_id; 400 ena_trace(ENA_DBG, "rx_info %p, buf_idx %d, paddr %jx, nm: %d\n", 401 rx_info, ctx->slots[ctx->nm_i].buf_idx, 402 (uintmax_t)rx_info->ena_buf.paddr, ctx->nm_i); 403 404 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim); 405 ctx->nt = ENA_RX_RING_IDX_NEXT(ctx->nt, ctx->ring->ring_size); 406 407 return (0); 408} 409 410static inline void 411ena_netmap_rx_cleanup(struct ena_netmap_ctx *ctx) 412{ 413 int refill_required; 414 415 refill_required = ctx->kring->rhead - ctx->kring->nr_hwcur; 416 if (ctx->kring->nr_hwcur != ctx->kring->nr_hwtail) 417 refill_required -= 1; 418 419 if (refill_required == 0) 420 return; 421 else if (refill_required < 0) 422 refill_required += ctx->kring->nkr_num_slots; 423 424 ena_refill_rx_bufs(ctx->ring, refill_required); 425} 426 427static inline void 428ena_netmap_fill_ctx(struct netmap_kring *kring, struct ena_netmap_ctx *ctx, 429 uint16_t ena_qid) 430{ 431 ctx->kring = kring; 432 ctx->na = kring->na; 433 ctx->adapter = ctx->na->ifp->if_softc; 434 ctx->lim = kring->nkr_num_slots - 1; 435 ctx->io_cq = &ctx->adapter->ena_dev->io_cq_queues[ena_qid]; 436 ctx->io_sq = &ctx->adapter->ena_dev->io_sq_queues[ena_qid]; 437 ctx->slots = kring->ring->slot; 438} 439 |
|
111#endif /* DEV_NETMAP */ | 440#endif /* DEV_NETMAP */ |