Lines Matching +full:channel +full:- +full:1
1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2005-2013 Solarflare Communications Inc.
47 struct efx_nic *efx = rx_queue->efx; in efx_rx_packet__check_len()
48 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; in efx_rx_packet__check_len()
56 rx_buf->flags |= EFX_RX_PKT_DISCARD; in efx_rx_packet__check_len()
59 netif_err(efx, rx_err, efx->net_dev, in efx_rx_packet__check_len()
63 efx_rx_queue_channel(rx_queue)->n_rx_overlength++; in efx_rx_packet__check_len()
67 static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel, in efx_rx_mk_skb() argument
72 struct efx_nic *efx = channel->efx; in efx_rx_mk_skb()
76 skb = netdev_alloc_skb(efx->net_dev, in efx_rx_mk_skb()
77 efx->rx_ip_align + efx->rx_prefix_size + in efx_rx_mk_skb()
80 atomic_inc(&efx->n_rx_noskb_drops); in efx_rx_mk_skb()
84 EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len); in efx_rx_mk_skb()
86 memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size, in efx_rx_mk_skb()
87 efx->rx_prefix_size + hdr_len); in efx_rx_mk_skb()
88 skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size); in efx_rx_mk_skb()
92 if (rx_buf->len > hdr_len) { in efx_rx_mk_skb()
93 rx_buf->page_offset += hdr_len; in efx_rx_mk_skb()
94 rx_buf->len -= hdr_len; in efx_rx_mk_skb()
97 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, in efx_rx_mk_skb()
98 rx_buf->page, rx_buf->page_offset, in efx_rx_mk_skb()
99 rx_buf->len, efx->rx_buffer_truesize); in efx_rx_mk_skb()
100 rx_buf->page = NULL; in efx_rx_mk_skb()
102 if (skb_shinfo(skb)->nr_frags == n_frags) in efx_rx_mk_skb()
105 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); in efx_rx_mk_skb()
108 __free_pages(rx_buf->page, efx->rx_buffer_order); in efx_rx_mk_skb()
109 rx_buf->page = NULL; in efx_rx_mk_skb()
114 skb->protocol = eth_type_trans(skb, efx->net_dev); in efx_rx_mk_skb()
116 skb_mark_napi_id(skb, &channel->napi_str); in efx_rx_mk_skb()
124 struct efx_nic *efx = rx_queue->efx; in efx_siena_rx_packet()
125 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); in efx_siena_rx_packet() local
128 rx_queue->rx_packets++; in efx_siena_rx_packet()
131 rx_buf->flags |= flags; in efx_siena_rx_packet()
134 if (n_frags == 1) { in efx_siena_rx_packet()
138 unlikely(len <= (n_frags - 1) * efx->rx_dma_len) || in efx_siena_rx_packet()
139 unlikely(len > n_frags * efx->rx_dma_len) || in efx_siena_rx_packet()
140 unlikely(!efx->rx_scatter)) { in efx_siena_rx_packet()
144 WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD)); in efx_siena_rx_packet()
145 rx_buf->flags |= EFX_RX_PKT_DISCARD; in efx_siena_rx_packet()
148 netif_vdbg(efx, rx_status, efx->net_dev, in efx_siena_rx_packet()
149 "RX queue %d received ids %x-%x len %d %s%s\n", in efx_siena_rx_packet()
151 (index + n_frags - 1) & rx_queue->ptr_mask, len, in efx_siena_rx_packet()
152 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "", in efx_siena_rx_packet()
153 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : ""); in efx_siena_rx_packet()
158 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { in efx_siena_rx_packet()
159 efx_rx_flush_packet(channel); in efx_siena_rx_packet()
160 efx_siena_discard_rx_packet(channel, rx_buf, n_frags); in efx_siena_rx_packet()
164 if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN)) in efx_siena_rx_packet()
165 rx_buf->len = len; in efx_siena_rx_packet()
167 /* Release and/or sync the DMA mapping - assumes all RX buffers in efx_siena_rx_packet()
168 * consumed in-order per RX queue. in efx_siena_rx_packet()
170 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); in efx_siena_rx_packet()
177 rx_buf->page_offset += efx->rx_prefix_size; in efx_siena_rx_packet()
178 rx_buf->len -= efx->rx_prefix_size; in efx_siena_rx_packet()
180 if (n_frags > 1) { in efx_siena_rx_packet()
184 unsigned int tail_frags = n_frags - 1; in efx_siena_rx_packet()
188 if (--tail_frags == 0) in efx_siena_rx_packet()
190 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len); in efx_siena_rx_packet()
192 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len; in efx_siena_rx_packet()
193 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); in efx_siena_rx_packet()
196 /* All fragments have been DMA-synced, so recycle pages. */ in efx_siena_rx_packet()
198 efx_siena_recycle_rx_pages(channel, rx_buf, n_frags); in efx_siena_rx_packet()
203 efx_rx_flush_packet(channel); in efx_siena_rx_packet()
204 channel->rx_pkt_n_frags = n_frags; in efx_siena_rx_packet()
205 channel->rx_pkt_index = index; in efx_siena_rx_packet()
208 static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, in efx_rx_deliver() argument
213 u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS); in efx_rx_deliver()
215 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); in efx_rx_deliver()
219 rx_queue = efx_channel_get_rx_queue(channel); in efx_rx_deliver()
223 skb_record_rx_queue(skb, channel->rx_queue.core_index); in efx_rx_deliver()
227 if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) { in efx_rx_deliver()
228 skb->ip_summed = CHECKSUM_UNNECESSARY; in efx_rx_deliver()
229 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL); in efx_rx_deliver()
232 efx_rx_skb_attach_timestamp(channel, skb); in efx_rx_deliver()
234 if (channel->type->receive_skb) in efx_rx_deliver()
235 if (channel->type->receive_skb(channel, skb)) in efx_rx_deliver()
239 if (channel->rx_list != NULL) in efx_rx_deliver()
241 list_add_tail(&skb->list, channel->rx_list); in efx_rx_deliver()
251 static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, in efx_do_xdp() argument
263 xdp_prog = rcu_dereference_bh(efx->xdp_prog); in efx_do_xdp()
267 rx_queue = efx_channel_get_rx_queue(channel); in efx_do_xdp()
269 if (unlikely(channel->rx_pkt_n_frags > 1)) { in efx_do_xdp()
270 /* We can't do XDP on fragmented packets - drop. */ in efx_do_xdp()
272 channel->rx_pkt_n_frags); in efx_do_xdp()
274 netif_err(efx, rx_err, efx->net_dev, in efx_do_xdp()
276 channel->rx_pkt_n_frags); in efx_do_xdp()
277 channel->n_rx_xdp_bad_drops++; in efx_do_xdp()
281 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, in efx_do_xdp()
282 rx_buf->len, DMA_FROM_DEVICE); in efx_do_xdp()
285 EFX_WARN_ON_PARANOID(efx->rx_prefix_size > EFX_MAX_RX_PREFIX_SIZE); in efx_do_xdp()
286 memcpy(rx_prefix, *ehp - efx->rx_prefix_size, in efx_do_xdp()
287 efx->rx_prefix_size); in efx_do_xdp()
289 xdp_init_buff(&xdp, efx->rx_page_buf_step, &rx_queue->xdp_rxq_info); in efx_do_xdp()
291 xdp_prepare_buff(&xdp, *ehp - EFX_XDP_HEADROOM, EFX_XDP_HEADROOM, in efx_do_xdp()
292 rx_buf->len, false); in efx_do_xdp()
296 offset = (u8 *)xdp.data - *ehp; in efx_do_xdp()
303 rx_buf->page_offset += offset; in efx_do_xdp()
304 rx_buf->len -= offset; in efx_do_xdp()
305 memcpy(*ehp - efx->rx_prefix_size, rx_prefix, in efx_do_xdp()
306 efx->rx_prefix_size); in efx_do_xdp()
313 err = efx_siena_xdp_tx_buffers(efx, 1, &xdpf, true); in efx_do_xdp()
314 if (unlikely(err != 1)) { in efx_do_xdp()
315 efx_siena_free_rx_buffers(rx_queue, rx_buf, 1); in efx_do_xdp()
317 netif_err(efx, rx_err, efx->net_dev, in efx_do_xdp()
319 channel->n_rx_xdp_bad_drops++; in efx_do_xdp()
320 trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); in efx_do_xdp()
322 channel->n_rx_xdp_tx++; in efx_do_xdp()
327 err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog); in efx_do_xdp()
329 efx_siena_free_rx_buffers(rx_queue, rx_buf, 1); in efx_do_xdp()
331 netif_err(efx, rx_err, efx->net_dev, in efx_do_xdp()
333 channel->n_rx_xdp_bad_drops++; in efx_do_xdp()
334 trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); in efx_do_xdp()
336 channel->n_rx_xdp_redirect++; in efx_do_xdp()
341 bpf_warn_invalid_xdp_action(efx->net_dev, xdp_prog, xdp_act); in efx_do_xdp()
342 efx_siena_free_rx_buffers(rx_queue, rx_buf, 1); in efx_do_xdp()
343 channel->n_rx_xdp_bad_drops++; in efx_do_xdp()
344 trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); in efx_do_xdp()
348 trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); in efx_do_xdp()
351 efx_siena_free_rx_buffers(rx_queue, rx_buf, 1); in efx_do_xdp()
352 channel->n_rx_xdp_drops++; in efx_do_xdp()
360 void __efx_siena_rx_packet(struct efx_channel *channel) in __efx_siena_rx_packet() argument
362 struct efx_nic *efx = channel->efx; in __efx_siena_rx_packet()
364 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); in __efx_siena_rx_packet()
370 if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN) in __efx_siena_rx_packet()
371 rx_buf->len = le16_to_cpup((__le16 *) in __efx_siena_rx_packet()
372 (eh + efx->rx_packet_len_offset)); in __efx_siena_rx_packet()
377 if (unlikely(efx->loopback_selftest)) { in __efx_siena_rx_packet()
380 efx_siena_loopback_rx_packet(efx, eh, rx_buf->len); in __efx_siena_rx_packet()
381 rx_queue = efx_channel_get_rx_queue(channel); in __efx_siena_rx_packet()
383 channel->rx_pkt_n_frags); in __efx_siena_rx_packet()
387 if (!efx_do_xdp(efx, channel, rx_buf, &eh)) in __efx_siena_rx_packet()
390 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) in __efx_siena_rx_packet()
391 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; in __efx_siena_rx_packet()
393 if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb) in __efx_siena_rx_packet()
394 efx_siena_rx_packet_gro(channel, rx_buf, in __efx_siena_rx_packet()
395 channel->rx_pkt_n_frags, eh, 0); in __efx_siena_rx_packet()
397 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); in __efx_siena_rx_packet()
399 channel->rx_pkt_n_frags = 0; in __efx_siena_rx_packet()