Lines Matching refs:tx_ring
399 txr = &adapter->tx_ring[i]; in ena_init_io_rings_basic()
413 que->tx_ring = txr; in ena_init_io_rings_basic()
431 txr = &adapter->tx_ring[i]; in ena_init_io_rings_advanced()
474 struct ena_ring *txr = &adapter->tx_ring[qid]; in ena_free_io_ring_resources()
569 validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id, int tx_req_id_rc) in validate_tx_req_id() argument
571 struct ena_adapter *adapter = tx_ring->adapter; in validate_tx_req_id()
579 req_id, tx_ring->qid); in validate_tx_req_id()
583 req_id, tx_ring->qid); in validate_tx_req_id()
584 counter_u64_add(tx_ring->tx_stats.bad_req_id, 1); in validate_tx_req_id()
595 ena_release_all_tx_dmamap(struct ena_ring *tx_ring) in ena_release_all_tx_dmamap() argument
597 struct ena_adapter *adapter = tx_ring->adapter; in ena_release_all_tx_dmamap()
606 for (i = 0; i < tx_ring->ring_size; ++i) { in ena_release_all_tx_dmamap()
607 tx_info = &tx_ring->tx_buffer_info[i]; in ena_release_all_tx_dmamap()
640 struct ena_ring *tx_ring = que->tx_ring; in ena_setup_tx_resources() local
650 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; in ena_setup_tx_resources()
652 tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); in ena_setup_tx_resources()
653 if (unlikely(tx_ring->tx_buffer_info == NULL)) in ena_setup_tx_resources()
656 size = sizeof(uint16_t) * tx_ring->ring_size; in ena_setup_tx_resources()
657 tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); in ena_setup_tx_resources()
658 if (unlikely(tx_ring->free_tx_ids == NULL)) in ena_setup_tx_resources()
661 size = tx_ring->tx_max_header_size; in ena_setup_tx_resources()
662 tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF, in ena_setup_tx_resources()
664 if (unlikely(tx_ring->push_buf_intermediate_buf == NULL)) in ena_setup_tx_resources()
668 for (i = 0; i < tx_ring->ring_size; i++) in ena_setup_tx_resources()
669 tx_ring->free_tx_ids[i] = i; in ena_setup_tx_resources()
672 ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats, in ena_setup_tx_resources()
673 sizeof(tx_ring->tx_stats)); in ena_setup_tx_resources()
675 tx_ring->next_to_use = 0; in ena_setup_tx_resources()
676 tx_ring->next_to_clean = 0; in ena_setup_tx_resources()
677 tx_ring->acum_pkts = 0; in ena_setup_tx_resources()
680 ENA_RING_MTX_LOCK(tx_ring); in ena_setup_tx_resources()
681 drbr_flush(adapter->ifp, tx_ring->br); in ena_setup_tx_resources()
682 ENA_RING_MTX_UNLOCK(tx_ring); in ena_setup_tx_resources()
685 for (i = 0; i < tx_ring->ring_size; i++) { in ena_setup_tx_resources()
687 &tx_ring->tx_buffer_info[i].dmamap); in ena_setup_tx_resources()
696 map = tx_ring->tx_buffer_info[i].nm_info.map_seg; in ena_setup_tx_resources()
712 TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring); in ena_setup_tx_resources()
713 tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT, in ena_setup_tx_resources()
714 taskqueue_thread_enqueue, &tx_ring->enqueue_tq); in ena_setup_tx_resources()
715 if (unlikely(tx_ring->enqueue_tq == NULL)) { in ena_setup_tx_resources()
718 i = tx_ring->ring_size; in ena_setup_tx_resources()
722 tx_ring->running = true; in ena_setup_tx_resources()
732 taskqueue_start_threads_cpuset(&tx_ring->enqueue_tq, 1, PI_NET, in ena_setup_tx_resources()
738 ena_release_all_tx_dmamap(tx_ring); in ena_setup_tx_resources()
740 free(tx_ring->free_tx_ids, M_DEVBUF); in ena_setup_tx_resources()
741 tx_ring->free_tx_ids = NULL; in ena_setup_tx_resources()
743 free(tx_ring->tx_buffer_info, M_DEVBUF); in ena_setup_tx_resources()
744 tx_ring->tx_buffer_info = NULL; in ena_setup_tx_resources()
759 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_free_tx_resources() local
765 while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL)) in ena_free_tx_resources()
766 taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task); in ena_free_tx_resources()
768 taskqueue_free(tx_ring->enqueue_tq); in ena_free_tx_resources()
770 ENA_RING_MTX_LOCK(tx_ring); in ena_free_tx_resources()
772 drbr_flush(adapter->ifp, tx_ring->br); in ena_free_tx_resources()
775 for (int i = 0; i < tx_ring->ring_size; i++) { in ena_free_tx_resources()
777 tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE); in ena_free_tx_resources()
779 tx_ring->tx_buffer_info[i].dmamap); in ena_free_tx_resources()
781 tx_ring->tx_buffer_info[i].dmamap); in ena_free_tx_resources()
785 nm_info = &tx_ring->tx_buffer_info[i].nm_info; in ena_free_tx_resources()
801 m_freem(tx_ring->tx_buffer_info[i].mbuf); in ena_free_tx_resources()
802 tx_ring->tx_buffer_info[i].mbuf = NULL; in ena_free_tx_resources()
804 ENA_RING_MTX_UNLOCK(tx_ring); in ena_free_tx_resources()
807 free(tx_ring->tx_buffer_info, M_DEVBUF); in ena_free_tx_resources()
808 tx_ring->tx_buffer_info = NULL; in ena_free_tx_resources()
810 free(tx_ring->free_tx_ids, M_DEVBUF); in ena_free_tx_resources()
811 tx_ring->free_tx_ids = NULL; in ena_free_tx_resources()
813 free(tx_ring->push_buf_intermediate_buf, M_DEVBUF); in ena_free_tx_resources()
814 tx_ring->push_buf_intermediate_buf = NULL; in ena_free_tx_resources()
1504 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_free_tx_bufs() local
1506 ENA_RING_MTX_LOCK(tx_ring); in ena_free_tx_bufs()
1507 for (int i = 0; i < tx_ring->ring_size; i++) { in ena_free_tx_bufs()
1508 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; in ena_free_tx_bufs()
1531 ENA_RING_MTX_UNLOCK(tx_ring); in ena_free_tx_bufs()
1611 ring = &adapter->tx_ring[i]; in ena_create_io_queues()
2101 struct ena_ring *tx_ring; in ena_unmask_all_io_irqs() local
2110 tx_ring = &adapter->tx_ring[i]; in ena_unmask_all_io_irqs()
2111 counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1); in ena_unmask_all_io_irqs()
2147 adapter->tx_ring[i].ring_size = new_tx_size; in set_io_rings_size()
2206 cur_tx_ring_size = adapter->tx_ring[0].ring_size; in create_queues_with_size_backoff()
3155 struct ena_ring *tx_ring) in check_cdesc_in_tx_cq() argument
3161 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id); in check_cdesc_in_tx_cq()
3166 tx_ring->qid); in check_cdesc_in_tx_cq()
3173 tx_ring->qid); in check_cdesc_in_tx_cq()
3180 struct ena_ring *tx_ring) in check_missing_comp_in_tx_queue() argument
3195 for (i = 0; i < tx_ring->ring_size; i++) { in check_missing_comp_in_tx_queue()
3196 tx_buf = &tx_ring->tx_buffer_info[i]; in check_missing_comp_in_tx_queue()
3205 if (unlikely(!atomic_load_8(&tx_ring->first_interrupt) && in check_missing_comp_in_tx_queue()
3214 tx_ring->qid); in check_missing_comp_in_tx_queue()
3225 tx_ring->tx_last_cleanup_ticks); in check_missing_comp_in_tx_queue()
3231 tx_ring->qid, i, time_since_last_cleanup, in check_missing_comp_in_tx_queue()
3254 cleanup_scheduled = !!(atomic_load_16(&tx_ring->que->cleanup_task.ta_pending)); in check_missing_comp_in_tx_queue()
3255 cleanup_running = !!(atomic_load_8((&tx_ring->cleanup_running))); in check_missing_comp_in_tx_queue()
3257 reset_reason = check_cdesc_in_tx_cq(adapter, tx_ring); in check_missing_comp_in_tx_queue()
3263 counter_u64_add(tx_ring->tx_stats.missing_tx_comp, new_missed_tx); in check_missing_comp_in_tx_queue()
3277 struct ena_ring *tx_ring; in check_for_missing_completions() local
3296 tx_ring = &adapter->tx_ring[i]; in check_for_missing_completions()
3299 rc = check_missing_comp_in_tx_queue(adapter, tx_ring); in check_for_missing_completions()