Lines Matching refs:rx_queue
38 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) in efx_reuse_page() argument
40 struct efx_nic *efx = rx_queue->efx; in efx_reuse_page()
45 if (unlikely(!rx_queue->page_ring)) in efx_reuse_page()
47 index = rx_queue->page_remove & rx_queue->page_ptr_mask; in efx_reuse_page()
48 page = rx_queue->page_ring[index]; in efx_reuse_page()
52 rx_queue->page_ring[index] = NULL; in efx_reuse_page()
54 if (rx_queue->page_remove != rx_queue->page_add) in efx_reuse_page()
55 ++rx_queue->page_remove; in efx_reuse_page()
59 ++rx_queue->page_recycle_count; in efx_reuse_page()
67 ++rx_queue->page_recycle_failed; in efx_reuse_page()
80 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); in efx_recycle_rx_page() local
81 struct efx_nic *efx = rx_queue->efx; in efx_recycle_rx_page()
89 index = rx_queue->page_add & rx_queue->page_ptr_mask; in efx_recycle_rx_page()
90 if (rx_queue->page_ring[index] == NULL) { in efx_recycle_rx_page()
91 unsigned int read_index = rx_queue->page_remove & in efx_recycle_rx_page()
92 rx_queue->page_ptr_mask; in efx_recycle_rx_page()
99 ++rx_queue->page_remove; in efx_recycle_rx_page()
100 rx_queue->page_ring[index] = page; in efx_recycle_rx_page()
101 ++rx_queue->page_add; in efx_recycle_rx_page()
104 ++rx_queue->page_recycle_full; in efx_recycle_rx_page()
114 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); in efx_siena_recycle_rx_pages() local
116 if (unlikely(!rx_queue->page_ring)) in efx_siena_recycle_rx_pages()
121 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); in efx_siena_recycle_rx_pages()
129 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); in efx_siena_discard_rx_packet() local
133 efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags); in efx_siena_discard_rx_packet()
136 static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue) in efx_init_rx_recycle_ring() argument
139 struct efx_nic *efx = rx_queue->efx; in efx_init_rx_recycle_ring()
144 rx_queue->page_ring = kcalloc(page_ring_size, in efx_init_rx_recycle_ring()
145 sizeof(*rx_queue->page_ring), GFP_KERNEL); in efx_init_rx_recycle_ring()
146 if (!rx_queue->page_ring) in efx_init_rx_recycle_ring()
147 rx_queue->page_ptr_mask = 0; in efx_init_rx_recycle_ring()
149 rx_queue->page_ptr_mask = page_ring_size - 1; in efx_init_rx_recycle_ring()
152 static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue) in efx_fini_rx_recycle_ring() argument
154 struct efx_nic *efx = rx_queue->efx; in efx_fini_rx_recycle_ring()
157 if (unlikely(!rx_queue->page_ring)) in efx_fini_rx_recycle_ring()
161 for (i = 0; i <= rx_queue->page_ptr_mask; i++) { in efx_fini_rx_recycle_ring()
162 struct page *page = rx_queue->page_ring[i]; in efx_fini_rx_recycle_ring()
174 kfree(rx_queue->page_ring); in efx_fini_rx_recycle_ring()
175 rx_queue->page_ring = NULL; in efx_fini_rx_recycle_ring()
178 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, in efx_fini_rx_buffer() argument
187 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); in efx_fini_rx_buffer()
188 efx_siena_free_rx_buffers(rx_queue, rx_buf, 1); in efx_fini_rx_buffer()
193 int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue) in efx_siena_probe_rx_queue() argument
195 struct efx_nic *efx = rx_queue->efx; in efx_siena_probe_rx_queue()
202 rx_queue->ptr_mask = entries - 1; in efx_siena_probe_rx_queue()
206 efx_rx_queue_index(rx_queue), efx->rxq_entries, in efx_siena_probe_rx_queue()
207 rx_queue->ptr_mask); in efx_siena_probe_rx_queue()
210 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), in efx_siena_probe_rx_queue()
212 if (!rx_queue->buffer) in efx_siena_probe_rx_queue()
215 rc = efx_nic_probe_rx(rx_queue); in efx_siena_probe_rx_queue()
217 kfree(rx_queue->buffer); in efx_siena_probe_rx_queue()
218 rx_queue->buffer = NULL; in efx_siena_probe_rx_queue()
224 void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue) in efx_siena_init_rx_queue() argument
227 struct efx_nic *efx = rx_queue->efx; in efx_siena_init_rx_queue()
230 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_siena_init_rx_queue()
231 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); in efx_siena_init_rx_queue()
234 rx_queue->added_count = 0; in efx_siena_init_rx_queue()
235 rx_queue->notified_count = 0; in efx_siena_init_rx_queue()
236 rx_queue->removed_count = 0; in efx_siena_init_rx_queue()
237 rx_queue->min_fill = -1U; in efx_siena_init_rx_queue()
238 efx_init_rx_recycle_ring(rx_queue); in efx_siena_init_rx_queue()
240 rx_queue->page_remove = 0; in efx_siena_init_rx_queue()
241 rx_queue->page_add = rx_queue->page_ptr_mask + 1; in efx_siena_init_rx_queue()
242 rx_queue->page_recycle_count = 0; in efx_siena_init_rx_queue()
243 rx_queue->page_recycle_failed = 0; in efx_siena_init_rx_queue()
244 rx_queue->page_recycle_full = 0; in efx_siena_init_rx_queue()
258 rx_queue->max_fill = max_fill; in efx_siena_init_rx_queue()
259 rx_queue->fast_fill_trigger = trigger; in efx_siena_init_rx_queue()
260 rx_queue->refill_enabled = true; in efx_siena_init_rx_queue()
263 rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev, in efx_siena_init_rx_queue()
264 rx_queue->core_index, 0); in efx_siena_init_rx_queue()
272 rx_queue->xdp_rxq_info_valid = true; in efx_siena_init_rx_queue()
276 efx_nic_init_rx(rx_queue); in efx_siena_init_rx_queue()
279 void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue) in efx_siena_fini_rx_queue() argument
284 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_siena_fini_rx_queue()
285 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); in efx_siena_fini_rx_queue()
287 timer_delete_sync(&rx_queue->slow_fill); in efx_siena_fini_rx_queue()
290 if (rx_queue->buffer) { in efx_siena_fini_rx_queue()
291 for (i = rx_queue->removed_count; i < rx_queue->added_count; in efx_siena_fini_rx_queue()
293 unsigned int index = i & rx_queue->ptr_mask; in efx_siena_fini_rx_queue()
295 rx_buf = efx_rx_buffer(rx_queue, index); in efx_siena_fini_rx_queue()
296 efx_fini_rx_buffer(rx_queue, rx_buf); in efx_siena_fini_rx_queue()
300 efx_fini_rx_recycle_ring(rx_queue); in efx_siena_fini_rx_queue()
302 if (rx_queue->xdp_rxq_info_valid) in efx_siena_fini_rx_queue()
303 xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info); in efx_siena_fini_rx_queue()
305 rx_queue->xdp_rxq_info_valid = false; in efx_siena_fini_rx_queue()
308 void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue) in efx_siena_remove_rx_queue() argument
310 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_siena_remove_rx_queue()
311 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); in efx_siena_remove_rx_queue()
313 efx_nic_remove_rx(rx_queue); in efx_siena_remove_rx_queue()
315 kfree(rx_queue->buffer); in efx_siena_remove_rx_queue()
316 rx_queue->buffer = NULL; in efx_siena_remove_rx_queue()
337 void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue, in efx_siena_free_rx_buffers() argument
346 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); in efx_siena_free_rx_buffers()
352 struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill); in efx_siena_rx_slow_fill() local
355 efx_nic_generate_fill_event(rx_queue); in efx_siena_rx_slow_fill()
356 ++rx_queue->slow_fill_count; in efx_siena_rx_slow_fill()
359 static void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) in efx_schedule_slow_fill() argument
361 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10)); in efx_schedule_slow_fill()
373 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic) in efx_init_rx_buffers() argument
376 struct efx_nic *efx = rx_queue->efx; in efx_init_rx_buffers()
384 page = efx_reuse_page(rx_queue); in efx_init_rx_buffers()
411 index = rx_queue->added_count & rx_queue->ptr_mask; in efx_init_rx_buffers()
412 rx_buf = efx_rx_buffer(rx_queue, index); in efx_init_rx_buffers()
420 ++rx_queue->added_count; in efx_init_rx_buffers()
457 void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, in efx_siena_fast_push_rx_descriptors() argument
460 struct efx_nic *efx = rx_queue->efx; in efx_siena_fast_push_rx_descriptors()
464 if (!rx_queue->refill_enabled) in efx_siena_fast_push_rx_descriptors()
468 fill_level = (rx_queue->added_count - rx_queue->removed_count); in efx_siena_fast_push_rx_descriptors()
469 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries); in efx_siena_fast_push_rx_descriptors()
470 if (fill_level >= rx_queue->fast_fill_trigger) in efx_siena_fast_push_rx_descriptors()
474 if (unlikely(fill_level < rx_queue->min_fill)) { in efx_siena_fast_push_rx_descriptors()
476 rx_queue->min_fill = fill_level; in efx_siena_fast_push_rx_descriptors()
480 space = rx_queue->max_fill - fill_level; in efx_siena_fast_push_rx_descriptors()
483 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, in efx_siena_fast_push_rx_descriptors()
486 efx_rx_queue_index(rx_queue), fill_level, in efx_siena_fast_push_rx_descriptors()
487 rx_queue->max_fill); in efx_siena_fast_push_rx_descriptors()
490 rc = efx_init_rx_buffers(rx_queue, atomic); in efx_siena_fast_push_rx_descriptors()
493 efx_schedule_slow_fill(rx_queue); in efx_siena_fast_push_rx_descriptors()
498 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, in efx_siena_fast_push_rx_descriptors()
500 "to level %d\n", efx_rx_queue_index(rx_queue), in efx_siena_fast_push_rx_descriptors()
501 rx_queue->added_count - rx_queue->removed_count); in efx_siena_fast_push_rx_descriptors()
504 if (rx_queue->notified_count != rx_queue->added_count) in efx_siena_fast_push_rx_descriptors()
505 efx_nic_notify_rx_desc(rx_queue); in efx_siena_fast_push_rx_descriptors()
522 struct efx_rx_queue *rx_queue; in efx_siena_rx_packet_gro() local
524 rx_queue = efx_channel_get_rx_queue(channel); in efx_siena_rx_packet_gro()
525 efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags); in efx_siena_rx_packet_gro()
550 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); in efx_siena_rx_packet_gro()
556 skb_record_rx_queue(skb, channel->rx_queue.core_index); in efx_siena_rx_packet_gro()