ena_datapath.c (b40dd828bdc96959cf5fa90d02fc00d96c54fbe4) ena_datapath.c (9eb1615f33e9174fa5f1ca46954c35b3f026a98a)
1/*-
2 * BSD LICENSE
3 *
4 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 62 unchanged lines hidden (view full) ---

71 struct ena_com_io_cq* io_cq;
72 struct ena_eth_io_intr_reg intr_reg;
73 int qid, ena_qid;
74 int txc, rxc, i;
75
76 if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
77 return;
78
1/*-
2 * BSD LICENSE
3 *
4 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 62 unchanged lines hidden (view full) ---

71 struct ena_com_io_cq* io_cq;
72 struct ena_eth_io_intr_reg intr_reg;
73 int qid, ena_qid;
74 int txc, rxc, i;
75
76 if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
77 return;
78
79 ena_trace(ENA_DBG, "MSI-X TX/RX routine\n");
79 ena_trace(NULL, ENA_DBG, "MSI-X TX/RX routine\n");
80
81 tx_ring = que->tx_ring;
82 rx_ring = que->rx_ring;
83 qid = que->id;
84 ena_qid = ENA_IO_TXQ_IDX(qid);
85 io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
86
87 tx_ring->first_interrupt = true;

--- 174 unchanged lines hidden (view full) ---

262 tx_info->mbuf = NULL;
263 bintime_clear(&tx_info->timestamp);
264
265 bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap,
266 BUS_DMASYNC_POSTWRITE);
267 bus_dmamap_unload(adapter->tx_buf_tag,
268 tx_info->dmamap);
269
80
81 tx_ring = que->tx_ring;
82 rx_ring = que->rx_ring;
83 qid = que->id;
84 ena_qid = ENA_IO_TXQ_IDX(qid);
85 io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
86
87 tx_ring->first_interrupt = true;

--- 174 unchanged lines hidden (view full) ---

262 tx_info->mbuf = NULL;
263 bintime_clear(&tx_info->timestamp);
264
265 bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap,
266 BUS_DMASYNC_POSTWRITE);
267 bus_dmamap_unload(adapter->tx_buf_tag,
268 tx_info->dmamap);
269
270 ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d mbuf %p completed\n",
270 ena_trace(NULL, ENA_DBG | ENA_TXPTH, "tx: q %d mbuf %p completed\n",
271 tx_ring->qid, mbuf);
272
273 m_freem(mbuf);
274
275 total_done += tx_info->tx_descs;
276
277 tx_ring->free_tx_ids[next_to_clean] = req_id;
278 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,

--- 8 unchanged lines hidden (view full) ---

287 total_done);
288 ena_com_update_dev_comp_head(io_cq);
289 total_done = 0;
290 }
291 } while (likely(--budget));
292
293 work_done = TX_BUDGET - budget;
294
271 tx_ring->qid, mbuf);
272
273 m_freem(mbuf);
274
275 total_done += tx_info->tx_descs;
276
277 tx_ring->free_tx_ids[next_to_clean] = req_id;
278 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,

--- 8 unchanged lines hidden (view full) ---

287 total_done);
288 ena_com_update_dev_comp_head(io_cq);
289 total_done = 0;
290 }
291 } while (likely(--budget));
292
293 work_done = TX_BUDGET - budget;
294
295 ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d done. total pkts: %d\n",
295 ena_trace(NULL, ENA_DBG | ENA_TXPTH, "tx: q %d done. total pkts: %d\n",
296 tx_ring->qid, work_done);
297
298 /* If there is still something to commit update ring state */
299 if (likely(commit != TX_COMMIT)) {
300 tx_ring->next_to_clean = next_to_clean;
301 ena_com_comp_ack(&adapter->ena_dev->io_sq_queues[ena_qid],
302 total_done);
303 ena_com_update_dev_comp_head(io_cq);

--- 100 unchanged lines hidden (view full) ---

404static struct mbuf*
405ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs,
406 struct ena_com_rx_ctx *ena_rx_ctx, uint16_t *next_to_clean)
407{
408 struct mbuf *mbuf;
409 struct ena_rx_buffer *rx_info;
410 struct ena_adapter *adapter;
411 unsigned int descs = ena_rx_ctx->descs;
296 tx_ring->qid, work_done);
297
298 /* If there is still something to commit update ring state */
299 if (likely(commit != TX_COMMIT)) {
300 tx_ring->next_to_clean = next_to_clean;
301 ena_com_comp_ack(&adapter->ena_dev->io_sq_queues[ena_qid],
302 total_done);
303 ena_com_update_dev_comp_head(io_cq);

--- 100 unchanged lines hidden (view full) ---

404static struct mbuf*
405ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs,
406 struct ena_com_rx_ctx *ena_rx_ctx, uint16_t *next_to_clean)
407{
408 struct mbuf *mbuf;
409 struct ena_rx_buffer *rx_info;
410 struct ena_adapter *adapter;
411 unsigned int descs = ena_rx_ctx->descs;
412 int rc;
413 uint16_t ntc, len, req_id, buf = 0;
414
415 ntc = *next_to_clean;
416 adapter = rx_ring->adapter;
417
418 len = ena_bufs[buf].len;
419 req_id = ena_bufs[buf].req_id;
412 uint16_t ntc, len, req_id, buf = 0;
413
414 ntc = *next_to_clean;
415 adapter = rx_ring->adapter;
416
417 len = ena_bufs[buf].len;
418 req_id = ena_bufs[buf].req_id;
420 rc = validate_rx_req_id(rx_ring, req_id);
421 if (unlikely(rc != 0))
422 return (NULL);
423
424 rx_info = &rx_ring->rx_buffer_info[req_id];
425 if (unlikely(rx_info->mbuf == NULL)) {
426 device_printf(adapter->pdev, "NULL mbuf in rx_info");
427 return (NULL);
428 }
429
419 rx_info = &rx_ring->rx_buffer_info[req_id];
420 if (unlikely(rx_info->mbuf == NULL)) {
421 device_printf(adapter->pdev, "NULL mbuf in rx_info");
422 return (NULL);
423 }
424
430 ena_trace(ENA_DBG | ENA_RXPTH, "rx_info %p, mbuf %p, paddr %jx\n",
425 ena_trace(NULL, ENA_DBG | ENA_RXPTH, "rx_info %p, mbuf %p, paddr %jx\n",
431 rx_info, rx_info->mbuf, (uintmax_t)rx_info->ena_buf.paddr);
432
433 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
434 BUS_DMASYNC_POSTREAD);
435 mbuf = rx_info->mbuf;
436 mbuf->m_flags |= M_PKTHDR;
437 mbuf->m_pkthdr.len = len;
438 mbuf->m_len = len;
439 mbuf->m_pkthdr.rcvif = rx_ring->que->adapter->ifp;
440
441 /* Fill mbuf with hash key and it's interpretation for optimization */
442 ena_rx_hash_mbuf(rx_ring, ena_rx_ctx, mbuf);
443
426 rx_info, rx_info->mbuf, (uintmax_t)rx_info->ena_buf.paddr);
427
428 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
429 BUS_DMASYNC_POSTREAD);
430 mbuf = rx_info->mbuf;
431 mbuf->m_flags |= M_PKTHDR;
432 mbuf->m_pkthdr.len = len;
433 mbuf->m_len = len;
434 mbuf->m_pkthdr.rcvif = rx_ring->que->adapter->ifp;
435
436 /* Fill mbuf with hash key and it's interpretation for optimization */
437 ena_rx_hash_mbuf(rx_ring, ena_rx_ctx, mbuf);
438
444 ena_trace(ENA_DBG | ENA_RXPTH, "rx mbuf 0x%p, flags=0x%x, len: %d\n",
439 ena_trace(NULL, ENA_DBG | ENA_RXPTH, "rx mbuf 0x%p, flags=0x%x, len: %d\n",
445 mbuf, mbuf->m_flags, mbuf->m_pkthdr.len);
446
447 /* DMA address is not needed anymore, unmap it */
448 bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map);
449
450 rx_info->mbuf = NULL;
451 rx_ring->free_rx_ids[ntc] = req_id;
452 ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size);
453
454 /*
455 * While we have more than 1 descriptors for one rcvd packet, append
456 * other mbufs to the main one
457 */
458 while (--descs) {
459 ++buf;
460 len = ena_bufs[buf].len;
461 req_id = ena_bufs[buf].req_id;
440 mbuf, mbuf->m_flags, mbuf->m_pkthdr.len);
441
442 /* DMA address is not needed anymore, unmap it */
443 bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map);
444
445 rx_info->mbuf = NULL;
446 rx_ring->free_rx_ids[ntc] = req_id;
447 ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size);
448
449 /*
450 * While we have more than 1 descriptors for one rcvd packet, append
451 * other mbufs to the main one
452 */
453 while (--descs) {
454 ++buf;
455 len = ena_bufs[buf].len;
456 req_id = ena_bufs[buf].req_id;
462 rc = validate_rx_req_id(rx_ring, req_id);
463 if (unlikely(rc != 0)) {
464 /*
465 * If the req_id is invalid, then the device will be
466 * reset. In that case we must free all mbufs that
467 * were already gathered.
468 */
469 m_freem(mbuf);
470 return (NULL);
471 }
472 rx_info = &rx_ring->rx_buffer_info[req_id];
473
474 if (unlikely(rx_info->mbuf == NULL)) {
475 device_printf(adapter->pdev, "NULL mbuf in rx_info");
476 /*
477 * If one of the required mbufs was not allocated yet,
478 * we can break there.
479 * All earlier used descriptors will be reallocated

--- 6 unchanged lines hidden (view full) ---

486 m_freem(mbuf);
487 return (NULL);
488 }
489
490 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
491 BUS_DMASYNC_POSTREAD);
492 if (unlikely(m_append(mbuf, len, rx_info->mbuf->m_data) == 0)) {
493 counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
457 rx_info = &rx_ring->rx_buffer_info[req_id];
458
459 if (unlikely(rx_info->mbuf == NULL)) {
460 device_printf(adapter->pdev, "NULL mbuf in rx_info");
461 /*
462 * If one of the required mbufs was not allocated yet,
463 * we can break there.
464 * All earlier used descriptors will be reallocated

--- 6 unchanged lines hidden (view full) ---

471 m_freem(mbuf);
472 return (NULL);
473 }
474
475 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
476 BUS_DMASYNC_POSTREAD);
477 if (unlikely(m_append(mbuf, len, rx_info->mbuf->m_data) == 0)) {
478 counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
494 ena_trace(ENA_WARNING, "Failed to append Rx mbuf %p\n",
479 ena_trace(NULL, ENA_WARNING, "Failed to append Rx mbuf %p\n",
495 mbuf);
496 }
497
480 mbuf);
481 }
482
498 ena_trace(ENA_DBG | ENA_RXPTH,
483 ena_trace(NULL, ENA_DBG | ENA_RXPTH,
499 "rx mbuf updated. len %d\n", mbuf->m_pkthdr.len);
500
501 /* Free already appended mbuf, it won't be useful anymore */
502 bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map);
503 m_freem(rx_info->mbuf);
504 rx_info->mbuf = NULL;
505
506 rx_ring->free_rx_ids[ntc] = req_id;

--- 14 unchanged lines hidden (view full) ---

521{
522
523 /* if IP and error */
524 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
525 ena_rx_ctx->l3_csum_err)) {
526 /* ipv4 checksum error */
527 mbuf->m_pkthdr.csum_flags = 0;
528 counter_u64_add(rx_ring->rx_stats.bad_csum, 1);
484 "rx mbuf updated. len %d\n", mbuf->m_pkthdr.len);
485
486 /* Free already appended mbuf, it won't be useful anymore */
487 bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map);
488 m_freem(rx_info->mbuf);
489 rx_info->mbuf = NULL;
490
491 rx_ring->free_rx_ids[ntc] = req_id;

--- 14 unchanged lines hidden (view full) ---

506{
507
508 /* if IP and error */
509 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
510 ena_rx_ctx->l3_csum_err)) {
511 /* ipv4 checksum error */
512 mbuf->m_pkthdr.csum_flags = 0;
513 counter_u64_add(rx_ring->rx_stats.bad_csum, 1);
529 ena_trace(ENA_DBG, "RX IPv4 header checksum error\n");
514 ena_trace(NULL, ENA_DBG, "RX IPv4 header checksum error\n");
530 return;
531 }
532
533 /* if TCP/UDP */
534 if ((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
535 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)) {
536 if (ena_rx_ctx->l4_csum_err) {
537 /* TCP/UDP checksum error */
538 mbuf->m_pkthdr.csum_flags = 0;
539 counter_u64_add(rx_ring->rx_stats.bad_csum, 1);
515 return;
516 }
517
518 /* if TCP/UDP */
519 if ((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
520 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)) {
521 if (ena_rx_ctx->l4_csum_err) {
522 /* TCP/UDP checksum error */
523 mbuf->m_pkthdr.csum_flags = 0;
524 counter_u64_add(rx_ring->rx_stats.bad_csum, 1);
540 ena_trace(ENA_DBG, "RX L4 checksum error\n");
525 ena_trace(NULL, ENA_DBG, "RX L4 checksum error\n");
541 } else {
542 mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
543 mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
544 }
545 }
546}
547
548/**
549 * ena_rx_cleanup - handle rx irq
550 * @arg: ring for which irq is being handled
551 **/
552static int
553ena_rx_cleanup(struct ena_ring *rx_ring)
554{
555 struct ena_adapter *adapter;
556 struct mbuf *mbuf;
557 struct ena_com_rx_ctx ena_rx_ctx;
558 struct ena_com_io_cq* io_cq;
559 struct ena_com_io_sq* io_sq;
526 } else {
527 mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
528 mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
529 }
530 }
531}
532
533/**
534 * ena_rx_cleanup - handle rx irq
535 * @arg: ring for which irq is being handled
536 **/
537static int
538ena_rx_cleanup(struct ena_ring *rx_ring)
539{
540 struct ena_adapter *adapter;
541 struct mbuf *mbuf;
542 struct ena_com_rx_ctx ena_rx_ctx;
543 struct ena_com_io_cq* io_cq;
544 struct ena_com_io_sq* io_sq;
545 enum ena_regs_reset_reason_types reset_reason;
560 if_t ifp;
561 uint16_t ena_qid;
562 uint16_t next_to_clean;
563 uint32_t refill_required;
564 uint32_t refill_threshold;
565 uint32_t do_if_input = 0;
566 unsigned int qid;
567 int rc, i;

--- 10 unchanged lines hidden (view full) ---

578 io_sq = &adapter->ena_dev->io_sq_queues[ena_qid];
579 next_to_clean = rx_ring->next_to_clean;
580
581#ifdef DEV_NETMAP
582 if (netmap_rx_irq(adapter->ifp, rx_ring->qid, &done) != NM_IRQ_PASS)
583 return (0);
584#endif /* DEV_NETMAP */
585
546 if_t ifp;
547 uint16_t ena_qid;
548 uint16_t next_to_clean;
549 uint32_t refill_required;
550 uint32_t refill_threshold;
551 uint32_t do_if_input = 0;
552 unsigned int qid;
553 int rc, i;

--- 10 unchanged lines hidden (view full) ---

564 io_sq = &adapter->ena_dev->io_sq_queues[ena_qid];
565 next_to_clean = rx_ring->next_to_clean;
566
567#ifdef DEV_NETMAP
568 if (netmap_rx_irq(adapter->ifp, rx_ring->qid, &done) != NM_IRQ_PASS)
569 return (0);
570#endif /* DEV_NETMAP */
571
586 ena_trace(ENA_DBG, "rx: qid %d\n", qid);
572 ena_trace(NULL, ENA_DBG, "rx: qid %d\n", qid);
587
588 do {
589 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
590 ena_rx_ctx.max_bufs = adapter->max_rx_sgl_size;
591 ena_rx_ctx.descs = 0;
592 bus_dmamap_sync(io_cq->cdesc_addr.mem_handle.tag,
593 io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_POSTREAD);
594 rc = ena_com_rx_pkt(io_cq, io_sq, &ena_rx_ctx);
573
574 do {
575 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
576 ena_rx_ctx.max_bufs = adapter->max_rx_sgl_size;
577 ena_rx_ctx.descs = 0;
578 bus_dmamap_sync(io_cq->cdesc_addr.mem_handle.tag,
579 io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_POSTREAD);
580 rc = ena_com_rx_pkt(io_cq, io_sq, &ena_rx_ctx);
581 if (unlikely(rc != 0)) {
582 if (rc == ENA_COM_NO_SPACE) {
583 counter_u64_add(rx_ring->rx_stats.bad_desc_num,
584 1);
585 reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
586 } else {
587 counter_u64_add(rx_ring->rx_stats.bad_req_id,
588 1);
589 reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
590 }
591 ena_trigger_reset(adapter, reset_reason);
592 return (0);
593 }
595
594
596 if (unlikely(rc != 0))
597 goto error;
598
599 if (unlikely(ena_rx_ctx.descs == 0))
600 break;
601
595 if (unlikely(ena_rx_ctx.descs == 0))
596 break;
597
602 ena_trace(ENA_DBG | ENA_RXPTH, "rx: q %d got packet from ena. "
598 ena_trace(NULL, ENA_DBG | ENA_RXPTH, "rx: q %d got packet from ena. "
603 "descs #: %d l3 proto %d l4 proto %d hash: %x\n",
604 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
605 ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
606
607 /* Receive mbuf from the ring */
608 mbuf = ena_rx_mbuf(rx_ring, rx_ring->ena_bufs,
609 &ena_rx_ctx, &next_to_clean);
610 bus_dmamap_sync(io_cq->cdesc_addr.mem_handle.tag,

--- 36 unchanged lines hidden (view full) ---

647 * - no LRO resources, or
648 * - lro enqueue fails
649 */
650 if ((rx_ring->lro.lro_cnt != 0) &&
651 (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0))
652 do_if_input = 0;
653 }
654 if (do_if_input != 0) {
599 "descs #: %d l3 proto %d l4 proto %d hash: %x\n",
600 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
601 ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
602
603 /* Receive mbuf from the ring */
604 mbuf = ena_rx_mbuf(rx_ring, rx_ring->ena_bufs,
605 &ena_rx_ctx, &next_to_clean);
606 bus_dmamap_sync(io_cq->cdesc_addr.mem_handle.tag,

--- 36 unchanged lines hidden (view full) ---

643 * - no LRO resources, or
644 * - lro enqueue fails
645 */
646 if ((rx_ring->lro.lro_cnt != 0) &&
647 (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0))
648 do_if_input = 0;
649 }
650 if (do_if_input != 0) {
655 ena_trace(ENA_DBG | ENA_RXPTH,
651 ena_trace(NULL, ENA_DBG | ENA_RXPTH,
656 "calling if_input() with mbuf %p\n", mbuf);
657 (*ifp->if_input)(ifp, mbuf);
658 }
659
660 counter_enter();
661 counter_u64_add_protected(rx_ring->rx_stats.cnt, 1);
662 counter_u64_add_protected(adapter->hw_stats.rx_packets, 1);
663 counter_exit();

--- 9 unchanged lines hidden (view full) ---

673 if (refill_required > refill_threshold) {
674 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
675 ena_refill_rx_bufs(rx_ring, refill_required);
676 }
677
678 tcp_lro_flush_all(&rx_ring->lro);
679
680 return (RX_BUDGET - budget);
652 "calling if_input() with mbuf %p\n", mbuf);
653 (*ifp->if_input)(ifp, mbuf);
654 }
655
656 counter_enter();
657 counter_u64_add_protected(rx_ring->rx_stats.cnt, 1);
658 counter_u64_add_protected(adapter->hw_stats.rx_packets, 1);
659 counter_exit();

--- 9 unchanged lines hidden (view full) ---

669 if (refill_required > refill_threshold) {
670 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
671 ena_refill_rx_bufs(rx_ring, refill_required);
672 }
673
674 tcp_lro_flush_all(&rx_ring->lro);
675
676 return (RX_BUDGET - budget);
681
682error:
683 counter_u64_add(rx_ring->rx_stats.bad_desc_num, 1);
684
685 /* Too many desc from the device. Trigger reset */
686 ena_trigger_reset(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
687
688 return (0);
689}
690
691static void
692ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct mbuf *mbuf,
693 bool disable_meta_caching)
694{
695 struct ena_com_tx_meta *ena_meta;
696 struct ether_vlan_header *eh;

--- 139 unchanged lines hidden (view full) ---

836
837 /*
838 * For easier maintaining of the DMA map, map the whole mbuf even if
839 * the LLQ is used. The descriptors will be filled using the segments.
840 */
841 rc = bus_dmamap_load_mbuf_sg(adapter->tx_buf_tag, tx_info->dmamap, mbuf,
842 segs, &nsegs, BUS_DMA_NOWAIT);
843 if (unlikely((rc != 0) || (nsegs == 0))) {
677}
678
679static void
680ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct mbuf *mbuf,
681 bool disable_meta_caching)
682{
683 struct ena_com_tx_meta *ena_meta;
684 struct ether_vlan_header *eh;

--- 139 unchanged lines hidden (view full) ---

824
825 /*
826 * For easier maintaining of the DMA map, map the whole mbuf even if
827 * the LLQ is used. The descriptors will be filled using the segments.
828 */
829 rc = bus_dmamap_load_mbuf_sg(adapter->tx_buf_tag, tx_info->dmamap, mbuf,
830 segs, &nsegs, BUS_DMA_NOWAIT);
831 if (unlikely((rc != 0) || (nsegs == 0))) {
844 ena_trace(ENA_WARNING,
832 ena_trace(NULL, ENA_WARNING,
845 "dmamap load failed! err: %d nsegs: %d\n", rc, nsegs);
846 goto dma_error;
847 }
848
849 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
850 /*
851 * When the device is LLQ mode, the driver will copy
852 * the header into the device memory space.

--- 15 unchanged lines hidden (view full) ---

868 */
869 } else {
870 m_copydata(mbuf, 0, *header_len, tx_ring->push_buf_intermediate_buf);
871 *push_hdr = tx_ring->push_buf_intermediate_buf;
872
873 counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1);
874 }
875
833 "dmamap load failed! err: %d nsegs: %d\n", rc, nsegs);
834 goto dma_error;
835 }
836
837 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
838 /*
839 * When the device is LLQ mode, the driver will copy
840 * the header into the device memory space.

--- 15 unchanged lines hidden (view full) ---

856 */
857 } else {
858 m_copydata(mbuf, 0, *header_len, tx_ring->push_buf_intermediate_buf);
859 *push_hdr = tx_ring->push_buf_intermediate_buf;
860
861 counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1);
862 }
863
876 ena_trace(ENA_DBG | ENA_TXPTH,
864 ena_trace(NULL, ENA_DBG | ENA_TXPTH,
877 "mbuf: %p header_buf->vaddr: %p push_len: %d\n",
878 mbuf, *push_hdr, *header_len);
879
880 /* If packet is fitted in LLQ header, no need for DMA segments. */
881 if (mbuf->m_pkthdr.len <= tx_ring->tx_max_header_size) {
882 return (0);
883 } else {
884 offset = tx_ring->tx_max_header_size;

--- 61 unchanged lines hidden (view full) ---

946
947 ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
948 adapter = tx_ring->que->adapter;
949 ena_dev = adapter->ena_dev;
950 io_sq = &ena_dev->io_sq_queues[ena_qid];
951
952 rc = ena_check_and_collapse_mbuf(tx_ring, mbuf);
953 if (unlikely(rc != 0)) {
865 "mbuf: %p header_buf->vaddr: %p push_len: %d\n",
866 mbuf, *push_hdr, *header_len);
867
868 /* If packet is fitted in LLQ header, no need for DMA segments. */
869 if (mbuf->m_pkthdr.len <= tx_ring->tx_max_header_size) {
870 return (0);
871 } else {
872 offset = tx_ring->tx_max_header_size;

--- 61 unchanged lines hidden (view full) ---

934
935 ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
936 adapter = tx_ring->que->adapter;
937 ena_dev = adapter->ena_dev;
938 io_sq = &ena_dev->io_sq_queues[ena_qid];
939
940 rc = ena_check_and_collapse_mbuf(tx_ring, mbuf);
941 if (unlikely(rc != 0)) {
954 ena_trace(ENA_WARNING,
942 ena_trace(NULL, ENA_WARNING,
955 "Failed to collapse mbuf! err: %d\n", rc);
956 return (rc);
957 }
958
943 "Failed to collapse mbuf! err: %d\n", rc);
944 return (rc);
945 }
946
959 ena_trace(ENA_DBG | ENA_TXPTH, "Tx: %d bytes\n", (*mbuf)->m_pkthdr.len);
947 ena_trace(NULL, ENA_DBG | ENA_TXPTH, "Tx: %d bytes\n", (*mbuf)->m_pkthdr.len);
960
961 next_to_use = tx_ring->next_to_use;
962 req_id = tx_ring->free_tx_ids[next_to_use];
963 tx_info = &tx_ring->tx_buffer_info[req_id];
964 tx_info->num_of_bufs = 0;
965
966 rc = ena_tx_map_mbuf(tx_ring, tx_info, *mbuf, &push_hdr, &header_len);
967 if (unlikely(rc != 0)) {
948
949 next_to_use = tx_ring->next_to_use;
950 req_id = tx_ring->free_tx_ids[next_to_use];
951 tx_info = &tx_ring->tx_buffer_info[req_id];
952 tx_info->num_of_bufs = 0;
953
954 rc = ena_tx_map_mbuf(tx_ring, tx_info, *mbuf, &push_hdr, &header_len);
955 if (unlikely(rc != 0)) {
968 ena_trace(ENA_WARNING, "Failed to map TX mbuf\n");
956 ena_trace(NULL, ENA_WARNING, "Failed to map TX mbuf\n");
969 return (rc);
970 }
971 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
972 ena_tx_ctx.ena_bufs = tx_info->bufs;
973 ena_tx_ctx.push_header = push_hdr;
974 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
975 ena_tx_ctx.req_id = req_id;
976 ena_tx_ctx.header_len = header_len;
977
978 /* Set flags and meta data */
979 ena_tx_csum(&ena_tx_ctx, *mbuf, adapter->disable_meta_caching);
980
981 if (tx_ring->acum_pkts == DB_THRESHOLD ||
982 ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx)) {
957 return (rc);
958 }
959 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
960 ena_tx_ctx.ena_bufs = tx_info->bufs;
961 ena_tx_ctx.push_header = push_hdr;
962 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
963 ena_tx_ctx.req_id = req_id;
964 ena_tx_ctx.header_len = header_len;
965
966 /* Set flags and meta data */
967 ena_tx_csum(&ena_tx_ctx, *mbuf, adapter->disable_meta_caching);
968
969 if (tx_ring->acum_pkts == DB_THRESHOLD ||
970 ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx)) {
983 ena_trace(ENA_DBG | ENA_TXPTH,
971 ena_trace(NULL, ENA_DBG | ENA_TXPTH,
984 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
985 tx_ring->que->id);
986 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
987 counter_u64_add(tx_ring->tx_stats.doorbells, 1);
988 tx_ring->acum_pkts = 0;
989 }
990
991 /* Prepare the packet's descriptors and send them to device */
992 rc = ena_com_prepare_tx(io_sq, &ena_tx_ctx, &nb_hw_desc);
993 if (unlikely(rc != 0)) {
994 if (likely(rc == ENA_COM_NO_MEM)) {
972 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
973 tx_ring->que->id);
974 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
975 counter_u64_add(tx_ring->tx_stats.doorbells, 1);
976 tx_ring->acum_pkts = 0;
977 }
978
979 /* Prepare the packet's descriptors and send them to device */
980 rc = ena_com_prepare_tx(io_sq, &ena_tx_ctx, &nb_hw_desc);
981 if (unlikely(rc != 0)) {
982 if (likely(rc == ENA_COM_NO_MEM)) {
995 ena_trace(ENA_DBG | ENA_TXPTH,
983 ena_trace(NULL, ENA_DBG | ENA_TXPTH,
996 "tx ring[%d] if out of space\n", tx_ring->que->id);
997 } else {
998 device_printf(adapter->pdev,
999 "failed to prepare tx bufs\n");
1000 }
1001 counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1);
1002 goto dma_error;
1003 }

--- 16 unchanged lines hidden (view full) ---

1020 tx_ring->ring_size);
1021
1022 /* stop the queue when no more space available, the packet can have up
1023 * to sgl_size + 2. one for the meta descriptor and one for header
1024 * (if the header is larger than tx_max_header_size).
1025 */
1026 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1027 adapter->max_tx_sgl_size + 2))) {
984 "tx ring[%d] if out of space\n", tx_ring->que->id);
985 } else {
986 device_printf(adapter->pdev,
987 "failed to prepare tx bufs\n");
988 }
989 counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1);
990 goto dma_error;
991 }

--- 16 unchanged lines hidden (view full) ---

1008 tx_ring->ring_size);
1009
1010 /* stop the queue when no more space available, the packet can have up
1011 * to sgl_size + 2. one for the meta descriptor and one for header
1012 * (if the header is larger than tx_max_header_size).
1013 */
1014 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1015 adapter->max_tx_sgl_size + 2))) {
1028 ena_trace(ENA_DBG | ENA_TXPTH, "Stop queue %d\n",
1016 ena_trace(NULL, ENA_DBG | ENA_TXPTH, "Stop queue %d\n",
1029 tx_ring->que->id);
1030
1031 tx_ring->running = false;
1032 counter_u64_add(tx_ring->tx_stats.queue_stop, 1);
1033
1034 /* There is a rare condition where this function decides to
1035 * stop the queue but meanwhile tx_cleanup() updates
1036 * next_to_completion and terminates.

--- 36 unchanged lines hidden (view full) ---

1073
1074 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)))
1075 return;
1076
1077 ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
1078 io_sq = &adapter->ena_dev->io_sq_queues[ena_qid];
1079
1080 while ((mbuf = drbr_peek(adapter->ifp, tx_ring->br)) != NULL) {
1017 tx_ring->que->id);
1018
1019 tx_ring->running = false;
1020 counter_u64_add(tx_ring->tx_stats.queue_stop, 1);
1021
1022 /* There is a rare condition where this function decides to
1023 * stop the queue but meanwhile tx_cleanup() updates
1024 * next_to_completion and terminates.

--- 36 unchanged lines hidden (view full) ---

1061
1062 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)))
1063 return;
1064
1065 ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
1066 io_sq = &adapter->ena_dev->io_sq_queues[ena_qid];
1067
1068 while ((mbuf = drbr_peek(adapter->ifp, tx_ring->br)) != NULL) {
1081 ena_trace(ENA_DBG | ENA_TXPTH, "\ndequeued mbuf %p with flags %#x and"
1069 ena_trace(NULL, ENA_DBG | ENA_TXPTH, "\ndequeued mbuf %p with flags %#x and"
1082 " header csum flags %#jx\n",
1083 mbuf, mbuf->m_flags, (uint64_t)mbuf->m_pkthdr.csum_flags);
1084
1085 if (unlikely(!tx_ring->running)) {
1086 drbr_putback(adapter->ifp, tx_ring->br, mbuf);
1087 break;
1088 }
1089

--- 35 unchanged lines hidden ---
1070 " header csum flags %#jx\n",
1071 mbuf, mbuf->m_flags, (uint64_t)mbuf->m_pkthdr.csum_flags);
1072
1073 if (unlikely(!tx_ring->running)) {
1074 drbr_putback(adapter->ifp, tx_ring->br, mbuf);
1075 break;
1076 }
1077

--- 35 unchanged lines hidden ---