Lines Matching +full:dma +full:- +full:queues

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2005-2013 Solarflare Communications Inc.
31 &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)]; in efx_tx_get_copy_buffer()
33 ((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1); in efx_tx_get_copy_buffer()
35 if (unlikely(!page_buf->addr) && in efx_tx_get_copy_buffer()
36 efx_siena_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, in efx_tx_get_copy_buffer()
39 buffer->dma_addr = page_buf->dma_addr + offset; in efx_tx_get_copy_buffer()
40 buffer->unmap_len = 0; in efx_tx_get_copy_buffer()
41 return (u8 *)page_buf->addr + offset; in efx_tx_get_copy_buffer()
46 /* We need to consider all queues that the net core sees as one */ in efx_tx_maybe_stop_queue()
47 struct efx_nic *efx = txq1->efx; in efx_tx_maybe_stop_queue()
51 fill_level = efx_channel_tx_old_fill_level(txq1->channel); in efx_tx_maybe_stop_queue()
52 if (likely(fill_level < efx->txq_stop_thresh)) in efx_tx_maybe_stop_queue()
57 * validly be >= efx->txq_entries). Now try again using in efx_tx_maybe_stop_queue()
69 netif_tx_stop_queue(txq1->core_txq); in efx_tx_maybe_stop_queue()
71 efx_for_each_channel_tx_queue(txq2, txq1->channel) in efx_tx_maybe_stop_queue()
72 txq2->old_read_count = READ_ONCE(txq2->read_count); in efx_tx_maybe_stop_queue()
74 fill_level = efx_channel_tx_old_fill_level(txq1->channel); in efx_tx_maybe_stop_queue()
75 EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries); in efx_tx_maybe_stop_queue()
76 if (likely(fill_level < efx->txq_stop_thresh)) { in efx_tx_maybe_stop_queue()
78 if (likely(!efx->loopback_selftest)) in efx_tx_maybe_stop_queue()
79 netif_tx_start_queue(txq1->core_txq); in efx_tx_maybe_stop_queue()
86 unsigned int copy_len = skb->len; in efx_enqueue_skb_copy()
97 return -ENOMEM; in efx_enqueue_skb_copy()
101 buffer->len = copy_len; in efx_enqueue_skb_copy()
103 buffer->skb = skb; in efx_enqueue_skb_copy()
104 buffer->flags = EFX_TX_BUF_SKB; in efx_enqueue_skb_copy()
106 ++tx_queue->insert_count; in efx_enqueue_skb_copy()
111 * queues for a channel, so we must check all of them.
118 if (q->xmit_pending) in efx_tx_send_pending()
126 * This maps all fragments of a socket buffer for DMA and adds them to
130 * If any DMA mapping fails, any mapped fragments will be unmapped,
134 * loopback test to direct packets via specific TX queues.
142 unsigned int old_insert_count = tx_queue->insert_count; in __efx_siena_enqueue_skb()
149 skb_len = skb->len; in __efx_siena_enqueue_skb()
150 segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; in __efx_siena_enqueue_skb()
154 /* Handle TSO first - it's *possible* (although unlikely) that we might in __efx_siena_enqueue_skb()
160 tx_queue->tso_fallbacks++; in __efx_siena_enqueue_skb()
164 } else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) { in __efx_siena_enqueue_skb()
168 tx_queue->cb_packets++; in __efx_siena_enqueue_skb()
172 /* Map for DMA and create descriptors if we haven't done so already. */ in __efx_siena_enqueue_skb()
178 tx_queue->xmit_pending = true; in __efx_siena_enqueue_skb()
181 if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) in __efx_siena_enqueue_skb()
182 efx_tx_send_pending(tx_queue->channel); in __efx_siena_enqueue_skb()
184 tx_queue->tx_packets++; in __efx_siena_enqueue_skb()
197 efx_tx_send_pending(tx_queue->channel); in __efx_siena_enqueue_skb()
221 return -EINVAL; in efx_siena_xdp_tx_buffers()
226 if (unlikely(cpu >= efx->xdp_tx_queue_count)) in efx_siena_xdp_tx_buffers()
227 return -EINVAL; in efx_siena_xdp_tx_buffers()
229 tx_queue = efx->xdp_tx_queues[cpu]; in efx_siena_xdp_tx_buffers()
231 return -EINVAL; in efx_siena_xdp_tx_buffers()
233 if (!tx_queue->initialised) in efx_siena_xdp_tx_buffers()
234 return -EINVAL; in efx_siena_xdp_tx_buffers()
236 if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) in efx_siena_xdp_tx_buffers()
237 HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu); in efx_siena_xdp_tx_buffers()
239 /* If we're borrowing net stack queues we have to handle stop-restart in efx_siena_xdp_tx_buffers()
242 if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) { in efx_siena_xdp_tx_buffers()
243 if (netif_tx_queue_stopped(tx_queue->core_txq)) in efx_siena_xdp_tx_buffers()
251 space = efx->txq_entries + in efx_siena_xdp_tx_buffers()
252 tx_queue->read_count - tx_queue->insert_count; in efx_siena_xdp_tx_buffers()
263 len = xdpf->len; in efx_siena_xdp_tx_buffers()
265 /* Map for DMA. */ in efx_siena_xdp_tx_buffers()
266 dma_addr = dma_map_single(&efx->pci_dev->dev, in efx_siena_xdp_tx_buffers()
267 xdpf->data, len, in efx_siena_xdp_tx_buffers()
269 if (dma_mapping_error(&efx->pci_dev->dev, dma_addr)) in efx_siena_xdp_tx_buffers()
272 /* Create descriptor and set up for unmapping DMA. */ in efx_siena_xdp_tx_buffers()
274 tx_buffer->xdpf = xdpf; in efx_siena_xdp_tx_buffers()
275 tx_buffer->flags = EFX_TX_BUF_XDP | in efx_siena_xdp_tx_buffers()
277 tx_buffer->dma_offset = 0; in efx_siena_xdp_tx_buffers()
278 tx_buffer->unmap_len = len; in efx_siena_xdp_tx_buffers()
279 tx_queue->tx_packets++; in efx_siena_xdp_tx_buffers()
287 if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) in efx_siena_xdp_tx_buffers()
288 HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq); in efx_siena_xdp_tx_buffers()
290 return i == 0 ? -EIO : i; in efx_siena_xdp_tx_buffers()
296 * Context: non-blocking.
310 if (index >= efx->n_tx_channels) { in efx_siena_hard_start_xmit()
311 index -= efx->n_tx_channels; in efx_siena_hard_start_xmit()
317 ((efx_siena_ptp_use_mac_tx_timestamps(efx) && efx->ptp_data) || in efx_siena_hard_start_xmit()
348 struct efx_nic *efx = tx_queue->efx; in efx_siena_init_tx_queue_core_txq()
351 tx_queue->core_txq = in efx_siena_init_tx_queue_core_txq()
352 netdev_get_tx_queue(efx->net_dev, in efx_siena_init_tx_queue_core_txq()
353 tx_queue->channel->channel + in efx_siena_init_tx_queue_core_txq()
354 ((tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ? in efx_siena_init_tx_queue_core_txq()
355 efx->n_tx_channels : 0)); in efx_siena_init_tx_queue_core_txq()
366 return -EOPNOTSUPP; in efx_siena_setup_tc()
368 /* Only Siena supported highpri queues */ in efx_siena_setup_tc()
370 return -EOPNOTSUPP; in efx_siena_setup_tc()
372 num_tc = mqprio->num_tc; in efx_siena_setup_tc()
375 return -EINVAL; in efx_siena_setup_tc()
377 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; in efx_siena_setup_tc()
379 if (num_tc == net_dev->num_tc) in efx_siena_setup_tc()
383 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; in efx_siena_setup_tc()
384 net_dev->tc_to_txq[tc].count = efx->n_tx_channels; in efx_siena_setup_tc()
387 net_dev->num_tc = num_tc; in efx_siena_setup_tc()
391 efx->n_tx_channels); in efx_siena_setup_tc()