Lines Matching refs:tx_ring

49 static inline int ena_get_tx_req_id(struct ena_ring *tx_ring,
58 static int ena_check_and_collapse_mbuf(struct ena_ring *tx_ring,
73 struct ena_ring *tx_ring; in ena_cleanup() local
80 tx_ring = que->tx_ring; in ena_cleanup()
86 atomic_store_8(&tx_ring->cleanup_running, 1); in ena_cleanup()
98 atomic_store_8(&tx_ring->first_interrupt, 1); in ena_cleanup()
103 txc = ena_tx_cleanup(tx_ring); in ena_cleanup()
116 counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1); in ena_cleanup()
118 atomic_store_8(&tx_ring->cleanup_running, 0); in ena_cleanup()
124 struct ena_ring *tx_ring = (struct ena_ring *)arg; in ena_deferred_mq_start() local
125 if_t ifp = tx_ring->adapter->ifp; in ena_deferred_mq_start()
127 while (!drbr_empty(ifp, tx_ring->br) && tx_ring->running && in ena_deferred_mq_start()
129 ENA_RING_MTX_LOCK(tx_ring); in ena_deferred_mq_start()
130 ena_start_xmit(tx_ring); in ena_deferred_mq_start()
131 ENA_RING_MTX_UNLOCK(tx_ring); in ena_deferred_mq_start()
139 struct ena_ring *tx_ring; in ena_mq_start() local
166 tx_ring = &adapter->tx_ring[i]; in ena_mq_start()
169 is_drbr_empty = drbr_empty(ifp, tx_ring->br); in ena_mq_start()
170 ret = drbr_enqueue(ifp, tx_ring->br, m); in ena_mq_start()
172 taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); in ena_mq_start()
176 if (is_drbr_empty && (ENA_RING_MTX_TRYLOCK(tx_ring) != 0)) { in ena_mq_start()
177 ena_start_xmit(tx_ring); in ena_mq_start()
178 ENA_RING_MTX_UNLOCK(tx_ring); in ena_mq_start()
180 taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); in ena_mq_start()
190 struct ena_ring *tx_ring = adapter->tx_ring; in ena_qflush() local
193 for (i = 0; i < adapter->num_io_queues; ++i, ++tx_ring) in ena_qflush()
194 if (!drbr_empty(ifp, tx_ring->br)) { in ena_qflush()
195 ENA_RING_MTX_LOCK(tx_ring); in ena_qflush()
196 drbr_flush(ifp, tx_ring->br); in ena_qflush()
197 ENA_RING_MTX_UNLOCK(tx_ring); in ena_qflush()
208 ena_get_tx_req_id(struct ena_ring *tx_ring, struct ena_com_io_cq *io_cq, in ena_get_tx_req_id() argument
211 struct ena_adapter *adapter = tx_ring->adapter; in ena_get_tx_req_id()
217 rc = validate_tx_req_id(tx_ring, *req_id, rc); in ena_get_tx_req_id()
219 if (unlikely(tx_ring->tx_buffer_info[*req_id].mbuf == NULL)) { in ena_get_tx_req_id()
222 *req_id, tx_ring->qid); in ena_get_tx_req_id()
242 ena_tx_cleanup(struct ena_ring *tx_ring) in ena_tx_cleanup() argument
256 adapter = tx_ring->que->adapter; in ena_tx_cleanup()
257 ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); in ena_tx_cleanup()
259 next_to_clean = tx_ring->next_to_clean; in ena_tx_cleanup()
262 if (netmap_tx_irq(adapter->ifp, tx_ring->qid) != NM_IRQ_PASS) in ena_tx_cleanup()
270 rc = ena_get_tx_req_id(tx_ring, io_cq, &req_id); in ena_tx_cleanup()
274 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_tx_cleanup()
286 tx_ring->qid, mbuf); in ena_tx_cleanup()
292 tx_ring->free_tx_ids[next_to_clean] = req_id; in ena_tx_cleanup()
294 tx_ring->ring_size); in ena_tx_cleanup()
299 tx_ring->next_to_clean = next_to_clean; in ena_tx_cleanup()
310 tx_ring->qid, work_done); in ena_tx_cleanup()
314 tx_ring->next_to_clean = next_to_clean; in ena_tx_cleanup()
325 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_tx_cleanup()
327 if (unlikely(!tx_ring->running && above_thresh)) { in ena_tx_cleanup()
328 ENA_RING_MTX_LOCK(tx_ring); in ena_tx_cleanup()
330 tx_ring->ena_com_io_sq, ENA_TX_RESUME_THRESH); in ena_tx_cleanup()
331 if (!tx_ring->running && above_thresh) { in ena_tx_cleanup()
332 tx_ring->running = true; in ena_tx_cleanup()
333 counter_u64_add(tx_ring->tx_stats.queue_wakeup, 1); in ena_tx_cleanup()
334 taskqueue_enqueue(tx_ring->enqueue_tq, in ena_tx_cleanup()
335 &tx_ring->enqueue_task); in ena_tx_cleanup()
337 ENA_RING_MTX_UNLOCK(tx_ring); in ena_tx_cleanup()
340 tx_ring->tx_last_cleanup_ticks = ticks; in ena_tx_cleanup()
820 ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) in ena_check_and_collapse_mbuf() argument
826 adapter = tx_ring->adapter; in ena_check_and_collapse_mbuf()
834 ((*mbuf)->m_pkthdr.len < tx_ring->tx_max_header_size)) in ena_check_and_collapse_mbuf()
837 counter_u64_add(tx_ring->tx_stats.collapse, 1); in ena_check_and_collapse_mbuf()
842 counter_u64_add(tx_ring->tx_stats.collapse_err, 1); in ena_check_and_collapse_mbuf()
853 ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info, in ena_tx_map_mbuf() argument
856 struct ena_adapter *adapter = tx_ring->adapter; in ena_tx_map_mbuf()
880 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { in ena_tx_map_mbuf()
892 tx_ring->tx_max_header_size); in ena_tx_map_mbuf()
903 tx_ring->push_buf_intermediate_buf); in ena_tx_map_mbuf()
904 *push_hdr = tx_ring->push_buf_intermediate_buf; in ena_tx_map_mbuf()
906 counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1); in ena_tx_map_mbuf()
914 if (mbuf->m_pkthdr.len <= tx_ring->tx_max_header_size) { in ena_tx_map_mbuf()
917 offset = tx_ring->tx_max_header_size; in ena_tx_map_mbuf()
962 counter_u64_add(tx_ring->tx_stats.dma_mapping_err, 1); in ena_tx_map_mbuf()
968 ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) in ena_xmit_mbuf() argument
984 ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); in ena_xmit_mbuf()
985 adapter = tx_ring->que->adapter; in ena_xmit_mbuf()
990 rc = ena_check_and_collapse_mbuf(tx_ring, mbuf); in ena_xmit_mbuf()
999 next_to_use = tx_ring->next_to_use; in ena_xmit_mbuf()
1000 req_id = tx_ring->free_tx_ids[next_to_use]; in ena_xmit_mbuf()
1001 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_xmit_mbuf()
1007 rc = ena_tx_map_mbuf(tx_ring, tx_info, *mbuf, &push_hdr, &header_len); in ena_xmit_mbuf()
1022 if (tx_ring->acum_pkts == ENA_DB_THRESHOLD || in ena_xmit_mbuf()
1023 ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx)) { in ena_xmit_mbuf()
1026 tx_ring->que->id); in ena_xmit_mbuf()
1027 ena_ring_tx_doorbell(tx_ring); in ena_xmit_mbuf()
1035 tx_ring->que->id); in ena_xmit_mbuf()
1041 counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1); in ena_xmit_mbuf()
1046 counter_u64_add_protected(tx_ring->tx_stats.cnt, 1); in ena_xmit_mbuf()
1047 counter_u64_add_protected(tx_ring->tx_stats.bytes, in ena_xmit_mbuf()
1059 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, in ena_xmit_mbuf()
1060 tx_ring->ring_size); in ena_xmit_mbuf()
1066 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_xmit_mbuf()
1068 ena_log_io(pdev, DBG, "Stop queue %d\n", tx_ring->que->id); in ena_xmit_mbuf()
1070 tx_ring->running = false; in ena_xmit_mbuf()
1071 counter_u64_add(tx_ring->tx_stats.queue_stop, 1); in ena_xmit_mbuf()
1082 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_xmit_mbuf()
1084 tx_ring->running = true; in ena_xmit_mbuf()
1085 counter_u64_add(tx_ring->tx_stats.queue_wakeup, 1); in ena_xmit_mbuf()
1102 ena_start_xmit(struct ena_ring *tx_ring) in ena_start_xmit() argument
1105 struct ena_adapter *adapter = tx_ring->adapter; in ena_start_xmit()
1108 ENA_RING_MTX_ASSERT(tx_ring); in ena_start_xmit()
1116 while ((mbuf = drbr_peek(adapter->ifp, tx_ring->br)) != NULL) { in ena_start_xmit()
1121 if (unlikely(!tx_ring->running)) { in ena_start_xmit()
1122 drbr_putback(adapter->ifp, tx_ring->br, mbuf); in ena_start_xmit()
1126 if (unlikely((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0)) { in ena_start_xmit()
1128 drbr_putback(adapter->ifp, tx_ring->br, mbuf); in ena_start_xmit()
1130 drbr_putback(adapter->ifp, tx_ring->br, mbuf); in ena_start_xmit()
1133 drbr_advance(adapter->ifp, tx_ring->br); in ena_start_xmit()
1139 drbr_advance(adapter->ifp, tx_ring->br); in ena_start_xmit()
1144 tx_ring->acum_pkts++; in ena_start_xmit()
1149 if (likely(tx_ring->acum_pkts != 0)) { in ena_start_xmit()
1151 ena_ring_tx_doorbell(tx_ring); in ena_start_xmit()
1154 if (unlikely(!tx_ring->running)) in ena_start_xmit()
1155 taskqueue_enqueue(tx_ring->que->cleanup_tq, in ena_start_xmit()
1156 &tx_ring->que->cleanup_task); in ena_start_xmit()