1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2018 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11 #include "net_driver.h"
12 #include <linux/module.h>
13 #include <linux/iommu.h>
14 #include <net/rps.h>
15 #include "efx.h"
16 #include "nic.h"
17 #include "rx_common.h"
18
19 /* This is the percentage fill level below which new RX descriptors
20 * will be added to the RX descriptor ring.
21 */
22 static unsigned int rx_refill_threshold;
23 module_param(rx_refill_threshold, uint, 0444);
24 MODULE_PARM_DESC(rx_refill_threshold,
25 "RX descriptor ring refill threshold (%)");
26
27 /* RX maximum head room required.
28 *
29 * This must be at least 1 to prevent overflow, plus one packet-worth
30 * to allow pipelined receives.
31 */
32 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
33
34 /* Check the RX page recycle ring for a page that can be reused. */
efx_reuse_page(struct efx_rx_queue * rx_queue)35 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
36 {
37 struct efx_nic *efx = rx_queue->efx;
38 struct efx_rx_page_state *state;
39 unsigned int index;
40 struct page *page;
41
42 if (unlikely(!rx_queue->page_ring))
43 return NULL;
44 index = rx_queue->page_remove & rx_queue->page_ptr_mask;
45 page = rx_queue->page_ring[index];
46 if (page == NULL)
47 return NULL;
48
49 rx_queue->page_ring[index] = NULL;
50 /* page_remove cannot exceed page_add. */
51 if (rx_queue->page_remove != rx_queue->page_add)
52 ++rx_queue->page_remove;
53
54 /* If page_count is 1 then we hold the only reference to this page. */
55 if (page_count(page) == 1) {
56 ++rx_queue->page_recycle_count;
57 return page;
58 } else {
59 state = page_address(page);
60 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
61 PAGE_SIZE << efx->rx_buffer_order,
62 DMA_FROM_DEVICE);
63 put_page(page);
64 ++rx_queue->page_recycle_failed;
65 }
66
67 return NULL;
68 }
69
70 /* Attempt to recycle the page if there is an RX recycle ring; the page can
71 * only be added if this is the final RX buffer, to prevent pages being used in
72 * the descriptor ring and appearing in the recycle ring simultaneously.
73 */
efx_recycle_rx_page(struct efx_channel * channel,struct efx_rx_buffer * rx_buf)74 static void efx_recycle_rx_page(struct efx_channel *channel,
75 struct efx_rx_buffer *rx_buf)
76 {
77 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
78 struct efx_nic *efx = rx_queue->efx;
79 struct page *page = rx_buf->page;
80 unsigned int index;
81
82 /* Only recycle the page after processing the final buffer. */
83 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
84 return;
85
86 index = rx_queue->page_add & rx_queue->page_ptr_mask;
87 if (rx_queue->page_ring[index] == NULL) {
88 unsigned int read_index = rx_queue->page_remove &
89 rx_queue->page_ptr_mask;
90
91 /* The next slot in the recycle ring is available, but
92 * increment page_remove if the read pointer currently
93 * points here.
94 */
95 if (read_index == index)
96 ++rx_queue->page_remove;
97 rx_queue->page_ring[index] = page;
98 ++rx_queue->page_add;
99 return;
100 }
101 ++rx_queue->page_recycle_full;
102 efx_unmap_rx_buffer(efx, rx_buf);
103 put_page(rx_buf->page);
104 }
105
106 /* Recycle the pages that are used by buffers that have just been received. */
efx_recycle_rx_pages(struct efx_channel * channel,struct efx_rx_buffer * rx_buf,unsigned int n_frags)107 void efx_recycle_rx_pages(struct efx_channel *channel,
108 struct efx_rx_buffer *rx_buf,
109 unsigned int n_frags)
110 {
111 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
112
113 if (unlikely(!rx_queue->page_ring))
114 return;
115
116 do {
117 efx_recycle_rx_page(channel, rx_buf);
118 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
119 } while (--n_frags);
120 }
121
efx_discard_rx_packet(struct efx_channel * channel,struct efx_rx_buffer * rx_buf,unsigned int n_frags)122 void efx_discard_rx_packet(struct efx_channel *channel,
123 struct efx_rx_buffer *rx_buf,
124 unsigned int n_frags)
125 {
126 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
127
128 efx_recycle_rx_pages(channel, rx_buf, n_frags);
129
130 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
131 }
132
efx_init_rx_recycle_ring(struct efx_rx_queue * rx_queue)133 static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
134 {
135 unsigned int bufs_in_recycle_ring, page_ring_size;
136 struct efx_nic *efx = rx_queue->efx;
137
138 bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx);
139 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
140 efx->rx_bufs_per_page);
141 rx_queue->page_ring = kcalloc(page_ring_size,
142 sizeof(*rx_queue->page_ring), GFP_KERNEL);
143 if (!rx_queue->page_ring)
144 rx_queue->page_ptr_mask = 0;
145 else
146 rx_queue->page_ptr_mask = page_ring_size - 1;
147 }
148
efx_fini_rx_recycle_ring(struct efx_rx_queue * rx_queue)149 static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
150 {
151 struct efx_nic *efx = rx_queue->efx;
152 int i;
153
154 if (unlikely(!rx_queue->page_ring))
155 return;
156
157 /* Unmap and release the pages in the recycle ring. Remove the ring. */
158 for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
159 struct page *page = rx_queue->page_ring[i];
160 struct efx_rx_page_state *state;
161
162 if (page == NULL)
163 continue;
164
165 state = page_address(page);
166 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
167 PAGE_SIZE << efx->rx_buffer_order,
168 DMA_FROM_DEVICE);
169 put_page(page);
170 }
171 kfree(rx_queue->page_ring);
172 rx_queue->page_ring = NULL;
173 }
174
efx_fini_rx_buffer(struct efx_rx_queue * rx_queue,struct efx_rx_buffer * rx_buf)175 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
176 struct efx_rx_buffer *rx_buf)
177 {
178 /* Release the page reference we hold for the buffer. */
179 if (rx_buf->page)
180 put_page(rx_buf->page);
181
182 /* If this is the last buffer in a page, unmap and free it. */
183 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
184 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
185 efx_free_rx_buffers(rx_queue, rx_buf, 1);
186 }
187 rx_buf->page = NULL;
188 }
189
efx_probe_rx_queue(struct efx_rx_queue * rx_queue)190 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
191 {
192 struct efx_nic *efx = rx_queue->efx;
193 unsigned int entries;
194 int rc;
195
196 /* Create the smallest power-of-two aligned ring */
197 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
198 EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
199 rx_queue->ptr_mask = entries - 1;
200
201 netif_dbg(efx, probe, efx->net_dev,
202 "creating RX queue %d size %#x mask %#x\n",
203 efx_rx_queue_index(rx_queue), efx->rxq_entries,
204 rx_queue->ptr_mask);
205
206 /* Allocate RX buffers */
207 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
208 GFP_KERNEL);
209 if (!rx_queue->buffer)
210 return -ENOMEM;
211
212 rc = efx_nic_probe_rx(rx_queue);
213 if (rc) {
214 kfree(rx_queue->buffer);
215 rx_queue->buffer = NULL;
216 }
217
218 return rc;
219 }
220
efx_init_rx_queue(struct efx_rx_queue * rx_queue)221 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
222 {
223 unsigned int max_fill, trigger, max_trigger;
224 struct efx_nic *efx = rx_queue->efx;
225 int rc = 0;
226
227 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
228 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
229
230 /* Initialise ptr fields */
231 rx_queue->added_count = 0;
232 rx_queue->notified_count = 0;
233 rx_queue->granted_count = 0;
234 rx_queue->removed_count = 0;
235 rx_queue->min_fill = -1U;
236 efx_init_rx_recycle_ring(rx_queue);
237
238 rx_queue->page_remove = 0;
239 rx_queue->page_add = rx_queue->page_ptr_mask + 1;
240 rx_queue->page_recycle_count = 0;
241 rx_queue->page_recycle_failed = 0;
242 rx_queue->page_recycle_full = 0;
243
244 rx_queue->old_rx_packets = rx_queue->rx_packets;
245 rx_queue->old_rx_bytes = rx_queue->rx_bytes;
246
247 /* Initialise limit fields */
248 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
249 max_trigger =
250 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
251 if (rx_refill_threshold != 0) {
252 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
253 if (trigger > max_trigger)
254 trigger = max_trigger;
255 } else {
256 trigger = max_trigger;
257 }
258
259 rx_queue->max_fill = max_fill;
260 rx_queue->fast_fill_trigger = trigger;
261 rx_queue->refill_enabled = true;
262
263 /* Initialise XDP queue information */
264 rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
265 rx_queue->core_index, 0);
266
267 if (rc) {
268 netif_err(efx, rx_err, efx->net_dev,
269 "Failure to initialise XDP queue information rc=%d\n",
270 rc);
271 efx->xdp_rxq_info_failed = true;
272 } else {
273 rx_queue->xdp_rxq_info_valid = true;
274 }
275
276 /* Set up RX descriptor ring */
277 efx_nic_init_rx(rx_queue);
278 }
279
efx_fini_rx_queue(struct efx_rx_queue * rx_queue)280 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
281 {
282 struct efx_rx_buffer *rx_buf;
283 int i;
284
285 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
286 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
287
288 timer_delete_sync(&rx_queue->slow_fill);
289 if (rx_queue->grant_credits)
290 flush_work(&rx_queue->grant_work);
291
292 /* Release RX buffers from the current read ptr to the write ptr */
293 if (rx_queue->buffer) {
294 for (i = rx_queue->removed_count; i < rx_queue->added_count;
295 i++) {
296 unsigned int index = i & rx_queue->ptr_mask;
297
298 rx_buf = efx_rx_buffer(rx_queue, index);
299 efx_fini_rx_buffer(rx_queue, rx_buf);
300 }
301 }
302
303 efx_fini_rx_recycle_ring(rx_queue);
304
305 if (rx_queue->xdp_rxq_info_valid)
306 xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
307
308 rx_queue->xdp_rxq_info_valid = false;
309 }
310
efx_remove_rx_queue(struct efx_rx_queue * rx_queue)311 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
312 {
313 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
314 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
315
316 efx_nic_remove_rx(rx_queue);
317
318 kfree(rx_queue->buffer);
319 rx_queue->buffer = NULL;
320 }
321
322 /* Unmap a DMA-mapped page. This function is only called for the final RX
323 * buffer in a page.
324 */
efx_unmap_rx_buffer(struct efx_nic * efx,struct efx_rx_buffer * rx_buf)325 void efx_unmap_rx_buffer(struct efx_nic *efx,
326 struct efx_rx_buffer *rx_buf)
327 {
328 struct page *page = rx_buf->page;
329
330 if (page) {
331 struct efx_rx_page_state *state = page_address(page);
332
333 dma_unmap_page(&efx->pci_dev->dev,
334 state->dma_addr,
335 PAGE_SIZE << efx->rx_buffer_order,
336 DMA_FROM_DEVICE);
337 }
338 }
339
efx_free_rx_buffers(struct efx_rx_queue * rx_queue,struct efx_rx_buffer * rx_buf,unsigned int num_bufs)340 void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
341 struct efx_rx_buffer *rx_buf,
342 unsigned int num_bufs)
343 {
344 do {
345 if (rx_buf->page) {
346 put_page(rx_buf->page);
347 rx_buf->page = NULL;
348 }
349 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
350 } while (--num_bufs);
351 }
352
efx_rx_slow_fill(struct timer_list * t)353 void efx_rx_slow_fill(struct timer_list *t)
354 {
355 struct efx_rx_queue *rx_queue = timer_container_of(rx_queue, t,
356 slow_fill);
357
358 /* Post an event to cause NAPI to run and refill the queue */
359 efx_nic_generate_fill_event(rx_queue);
360 ++rx_queue->slow_fill_count;
361 }
362
efx_schedule_slow_fill(struct efx_rx_queue * rx_queue)363 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
364 {
365 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
366 }
367
368 /* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
369 *
370 * @rx_queue: Efx RX queue
371 *
372 * This allocates a batch of pages, maps them for DMA, and populates
373 * struct efx_rx_buffers for each one. Return a negative error code or
374 * 0 on success. If a single page can be used for multiple buffers,
375 * then the page will either be inserted fully, or not at all.
376 */
efx_init_rx_buffers(struct efx_rx_queue * rx_queue,bool atomic)377 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
378 {
379 unsigned int page_offset, index, count;
380 struct efx_nic *efx = rx_queue->efx;
381 struct efx_rx_page_state *state;
382 struct efx_rx_buffer *rx_buf;
383 dma_addr_t dma_addr;
384 struct page *page;
385
386 count = 0;
387 do {
388 page = efx_reuse_page(rx_queue);
389 if (page == NULL) {
390 page = alloc_pages(__GFP_COMP |
391 (atomic ? GFP_ATOMIC : GFP_KERNEL),
392 efx->rx_buffer_order);
393 if (unlikely(page == NULL))
394 return -ENOMEM;
395 dma_addr =
396 dma_map_page(&efx->pci_dev->dev, page, 0,
397 PAGE_SIZE << efx->rx_buffer_order,
398 DMA_FROM_DEVICE);
399 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
400 dma_addr))) {
401 __free_pages(page, efx->rx_buffer_order);
402 return -EIO;
403 }
404 state = page_address(page);
405 state->dma_addr = dma_addr;
406 } else {
407 state = page_address(page);
408 dma_addr = state->dma_addr;
409 }
410
411 dma_addr += sizeof(struct efx_rx_page_state);
412 page_offset = sizeof(struct efx_rx_page_state);
413
414 do {
415 index = rx_queue->added_count & rx_queue->ptr_mask;
416 rx_buf = efx_rx_buffer(rx_queue, index);
417 rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
418 EFX_XDP_HEADROOM;
419 rx_buf->page = page;
420 rx_buf->page_offset = page_offset + efx->rx_ip_align +
421 EFX_XDP_HEADROOM;
422 rx_buf->len = efx->rx_dma_len;
423 rx_buf->flags = 0;
424 ++rx_queue->added_count;
425 get_page(page);
426 dma_addr += efx->rx_page_buf_step;
427 page_offset += efx->rx_page_buf_step;
428 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
429
430 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
431 } while (++count < efx->rx_pages_per_batch);
432
433 return 0;
434 }
435
efx_rx_config_page_split(struct efx_nic * efx)436 void efx_rx_config_page_split(struct efx_nic *efx)
437 {
438 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
439 EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
440 EFX_RX_BUF_ALIGNMENT);
441 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
442 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
443 efx->rx_page_buf_step);
444 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
445 efx->rx_bufs_per_page;
446 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
447 efx->rx_bufs_per_page);
448 }
449
450 /* efx_fast_push_rx_descriptors - push new RX descriptors quickly
451 * @rx_queue: RX descriptor queue
452 *
453 * This will aim to fill the RX descriptor queue up to
454 * @rx_queue->@max_fill. If there is insufficient atomic
455 * memory to do so, a slow fill will be scheduled.
456 *
457 * The caller must provide serialisation (none is used here). In practise,
458 * this means this function must run from the NAPI handler, or be called
459 * when NAPI is disabled.
460 */
efx_fast_push_rx_descriptors(struct efx_rx_queue * rx_queue,bool atomic)461 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
462 {
463 struct efx_nic *efx = rx_queue->efx;
464 unsigned int fill_level, batch_size;
465 int space, rc = 0;
466
467 if (!rx_queue->refill_enabled)
468 return;
469
470 /* Calculate current fill level, and exit if we don't need to fill */
471 fill_level = (rx_queue->added_count - rx_queue->removed_count);
472 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
473 if (fill_level >= rx_queue->fast_fill_trigger)
474 goto out;
475
476 /* Record minimum fill level */
477 if (unlikely(fill_level < rx_queue->min_fill)) {
478 if (fill_level)
479 rx_queue->min_fill = fill_level;
480 }
481
482 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
483 space = rx_queue->max_fill - fill_level;
484 EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
485
486 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
487 "RX queue %d fast-filling descriptor ring from"
488 " level %d to level %d\n",
489 efx_rx_queue_index(rx_queue), fill_level,
490 rx_queue->max_fill);
491
492 do {
493 rc = efx_init_rx_buffers(rx_queue, atomic);
494 if (unlikely(rc)) {
495 /* Ensure that we don't leave the rx queue empty */
496 efx_schedule_slow_fill(rx_queue);
497 goto out;
498 }
499 } while ((space -= batch_size) >= batch_size);
500
501 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
502 "RX queue %d fast-filled descriptor ring "
503 "to level %d\n", efx_rx_queue_index(rx_queue),
504 rx_queue->added_count - rx_queue->removed_count);
505
506 out:
507 if (rx_queue->notified_count != rx_queue->added_count)
508 efx_nic_notify_rx_desc(rx_queue);
509 }
510
511 /* Pass a received packet up through GRO. GRO can handle pages
512 * regardless of checksum state and skbs with a good checksum.
513 */
514 void
efx_rx_packet_gro(struct efx_channel * channel,struct efx_rx_buffer * rx_buf,unsigned int n_frags,u8 * eh,__wsum csum)515 efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
516 unsigned int n_frags, u8 *eh, __wsum csum)
517 {
518 struct napi_struct *napi = &channel->napi_str;
519 struct efx_nic *efx = channel->efx;
520 struct sk_buff *skb;
521
522 skb = napi_get_frags(napi);
523 if (unlikely(!skb)) {
524 struct efx_rx_queue *rx_queue;
525
526 rx_queue = efx_channel_get_rx_queue(channel);
527 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
528 return;
529 }
530
531 if (efx->net_dev->features & NETIF_F_RXHASH &&
532 efx_rx_buf_hash_valid(efx, eh))
533 skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
534 PKT_HASH_TYPE_L3);
535 if (csum) {
536 skb->csum = csum;
537 skb->ip_summed = CHECKSUM_COMPLETE;
538 } else {
539 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
540 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
541 }
542 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
543
544 for (;;) {
545 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
546 rx_buf->page, rx_buf->page_offset,
547 rx_buf->len);
548 rx_buf->page = NULL;
549 skb->len += rx_buf->len;
550 if (skb_shinfo(skb)->nr_frags == n_frags)
551 break;
552
553 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
554 }
555
556 skb->data_len = skb->len;
557 skb->truesize += n_frags * efx->rx_buffer_truesize;
558
559 skb_record_rx_queue(skb, channel->rx_queue.core_index);
560
561 napi_gro_frags(napi);
562 }
563
efx_find_rss_context_entry(struct efx_nic * efx,u32 id)564 struct efx_rss_context_priv *efx_find_rss_context_entry(struct efx_nic *efx,
565 u32 id)
566 {
567 struct ethtool_rxfh_context *ctx;
568
569 WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock));
570
571 ctx = xa_load(&efx->net_dev->ethtool->rss_ctx, id);
572 if (!ctx)
573 return NULL;
574 return ethtool_rxfh_context_priv(ctx);
575 }
576
efx_set_default_rx_indir_table(struct efx_nic * efx,u32 * indir)577 void efx_set_default_rx_indir_table(struct efx_nic *efx, u32 *indir)
578 {
579 size_t i;
580
581 for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); i++)
582 indir[i] = ethtool_rxfh_indir_default(i, efx->rss_spread);
583 }
584
585 /**
586 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
587 * @spec: Specification to test
588 *
589 * Return: %true if the specification is a non-drop RX filter that
590 * matches a local MAC address I/G bit value of 1 or matches a local
591 * IPv4 or IPv6 address value in the respective multicast address
592 * range. Otherwise %false.
593 */
efx_filter_is_mc_recipient(const struct efx_filter_spec * spec)594 bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
595 {
596 if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
597 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
598 return false;
599
600 if (spec->match_flags &
601 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
602 is_multicast_ether_addr(spec->loc_mac))
603 return true;
604
605 if ((spec->match_flags &
606 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
607 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
608 if (spec->ether_type == htons(ETH_P_IP) &&
609 ipv4_is_multicast(spec->loc_host[0]))
610 return true;
611 if (spec->ether_type == htons(ETH_P_IPV6) &&
612 ((const u8 *)spec->loc_host)[0] == 0xff)
613 return true;
614 }
615
616 return false;
617 }
618
efx_filter_spec_equal(const struct efx_filter_spec * left,const struct efx_filter_spec * right)619 bool efx_filter_spec_equal(const struct efx_filter_spec *left,
620 const struct efx_filter_spec *right)
621 {
622 if ((left->match_flags ^ right->match_flags) |
623 ((left->flags ^ right->flags) &
624 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
625 return false;
626
627 return memcmp(&left->vport_id, &right->vport_id,
628 sizeof(struct efx_filter_spec) -
629 offsetof(struct efx_filter_spec, vport_id)) == 0;
630 }
631
efx_filter_spec_hash(const struct efx_filter_spec * spec)632 u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
633 {
634 BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3);
635 return jhash2((const u32 *)&spec->vport_id,
636 (sizeof(struct efx_filter_spec) -
637 offsetof(struct efx_filter_spec, vport_id)) / 4,
638 0);
639 }
640
641 #ifdef CONFIG_RFS_ACCEL
efx_rps_check_rule(struct efx_arfs_rule * rule,unsigned int filter_idx,bool * force)642 bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
643 bool *force)
644 {
645 if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
646 /* ARFS is currently updating this entry, leave it */
647 return false;
648 }
649 if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
650 /* ARFS tried and failed to update this, so it's probably out
651 * of date. Remove the filter and the ARFS rule entry.
652 */
653 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
654 *force = true;
655 return true;
656 } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
657 /* ARFS has moved on, so old filter is not needed. Since we did
658 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
659 * not be removed by efx_rps_hash_del() subsequently.
660 */
661 *force = true;
662 return true;
663 }
664 /* Remove it iff ARFS wants to. */
665 return true;
666 }
667
668 static
efx_rps_hash_bucket(struct efx_nic * efx,const struct efx_filter_spec * spec)669 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
670 const struct efx_filter_spec *spec)
671 {
672 u32 hash = efx_filter_spec_hash(spec);
673
674 lockdep_assert_held(&efx->rps_hash_lock);
675 if (!efx->rps_hash_table)
676 return NULL;
677 return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
678 }
679
efx_rps_hash_find(struct efx_nic * efx,const struct efx_filter_spec * spec)680 struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
681 const struct efx_filter_spec *spec)
682 {
683 struct efx_arfs_rule *rule;
684 struct hlist_head *head;
685 struct hlist_node *node;
686
687 head = efx_rps_hash_bucket(efx, spec);
688 if (!head)
689 return NULL;
690 hlist_for_each(node, head) {
691 rule = container_of(node, struct efx_arfs_rule, node);
692 if (efx_filter_spec_equal(spec, &rule->spec))
693 return rule;
694 }
695 return NULL;
696 }
697
efx_rps_hash_add(struct efx_nic * efx,const struct efx_filter_spec * spec,bool * new)698 struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
699 const struct efx_filter_spec *spec,
700 bool *new)
701 {
702 struct efx_arfs_rule *rule;
703 struct hlist_head *head;
704 struct hlist_node *node;
705
706 head = efx_rps_hash_bucket(efx, spec);
707 if (!head)
708 return NULL;
709 hlist_for_each(node, head) {
710 rule = container_of(node, struct efx_arfs_rule, node);
711 if (efx_filter_spec_equal(spec, &rule->spec)) {
712 *new = false;
713 return rule;
714 }
715 }
716 rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
717 *new = true;
718 if (rule) {
719 memcpy(&rule->spec, spec, sizeof(rule->spec));
720 hlist_add_head(&rule->node, head);
721 }
722 return rule;
723 }
724
efx_rps_hash_del(struct efx_nic * efx,const struct efx_filter_spec * spec)725 void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
726 {
727 struct efx_arfs_rule *rule;
728 struct hlist_head *head;
729 struct hlist_node *node;
730
731 head = efx_rps_hash_bucket(efx, spec);
732 if (WARN_ON(!head))
733 return;
734 hlist_for_each(node, head) {
735 rule = container_of(node, struct efx_arfs_rule, node);
736 if (efx_filter_spec_equal(spec, &rule->spec)) {
737 /* Someone already reused the entry. We know that if
738 * this check doesn't fire (i.e. filter_id == REMOVING)
739 * then the REMOVING mark was put there by our caller,
740 * because caller is holding a lock on filter table and
741 * only holders of that lock set REMOVING.
742 */
743 if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
744 return;
745 hlist_del(node);
746 kfree(rule);
747 return;
748 }
749 }
750 /* We didn't find it. */
751 WARN_ON(1);
752 }
753 #endif
754
efx_probe_filters(struct efx_nic * efx)755 int efx_probe_filters(struct efx_nic *efx)
756 {
757 int rc;
758
759 mutex_lock(&efx->mac_lock);
760 rc = efx->type->filter_table_probe(efx);
761 if (rc)
762 goto out_unlock;
763
764 #ifdef CONFIG_RFS_ACCEL
765 if (efx->type->offload_features & NETIF_F_NTUPLE) {
766 struct efx_channel *channel;
767 int i, success = 1;
768
769 efx_for_each_channel(channel, efx) {
770 channel->rps_flow_id =
771 kcalloc(efx->type->max_rx_ip_filters,
772 sizeof(*channel->rps_flow_id),
773 GFP_KERNEL);
774 if (!channel->rps_flow_id)
775 success = 0;
776 else
777 for (i = 0;
778 i < efx->type->max_rx_ip_filters;
779 ++i)
780 channel->rps_flow_id[i] =
781 RPS_FLOW_ID_INVALID;
782 channel->rfs_expire_index = 0;
783 channel->rfs_filter_count = 0;
784 }
785
786 if (!success) {
787 efx_for_each_channel(channel, efx) {
788 kfree(channel->rps_flow_id);
789 channel->rps_flow_id = NULL;
790 }
791 efx->type->filter_table_remove(efx);
792 rc = -ENOMEM;
793 goto out_unlock;
794 }
795 }
796 #endif
797 out_unlock:
798 mutex_unlock(&efx->mac_lock);
799 return rc;
800 }
801
efx_remove_filters(struct efx_nic * efx)802 void efx_remove_filters(struct efx_nic *efx)
803 {
804 #ifdef CONFIG_RFS_ACCEL
805 struct efx_channel *channel;
806
807 efx_for_each_channel(channel, efx) {
808 cancel_delayed_work_sync(&channel->filter_work);
809 kfree(channel->rps_flow_id);
810 channel->rps_flow_id = NULL;
811 }
812 #endif
813 efx->type->filter_table_remove(efx);
814 }
815
816 #ifdef CONFIG_RFS_ACCEL
817
efx_filter_rfs_work(struct work_struct * data)818 static void efx_filter_rfs_work(struct work_struct *data)
819 {
820 struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
821 work);
822 struct efx_nic *efx = efx_netdev_priv(req->net_dev);
823 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
824 int slot_idx = req - efx->rps_slot;
825 struct efx_arfs_rule *rule;
826 u16 arfs_id = 0;
827 int rc;
828
829 rc = efx->type->filter_insert(efx, &req->spec, true);
830 if (rc >= 0)
831 /* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
832 rc %= efx->type->max_rx_ip_filters;
833 if (efx->rps_hash_table) {
834 spin_lock_bh(&efx->rps_hash_lock);
835 rule = efx_rps_hash_find(efx, &req->spec);
836 /* The rule might have already gone, if someone else's request
837 * for the same spec was already worked and then expired before
838 * we got around to our work. In that case we have nothing
839 * tying us to an arfs_id, meaning that as soon as the filter
840 * is considered for expiry it will be removed.
841 */
842 if (rule) {
843 if (rc < 0)
844 rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
845 else
846 rule->filter_id = rc;
847 arfs_id = rule->arfs_id;
848 }
849 spin_unlock_bh(&efx->rps_hash_lock);
850 }
851 if (rc >= 0) {
852 /* Remember this so we can check whether to expire the filter
853 * later.
854 */
855 mutex_lock(&efx->rps_mutex);
856 if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
857 channel->rfs_filter_count++;
858 channel->rps_flow_id[rc] = req->flow_id;
859 mutex_unlock(&efx->rps_mutex);
860
861 if (req->spec.ether_type == htons(ETH_P_IP))
862 netif_info(efx, rx_status, efx->net_dev,
863 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
864 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
865 req->spec.rem_host, ntohs(req->spec.rem_port),
866 req->spec.loc_host, ntohs(req->spec.loc_port),
867 req->rxq_index, req->flow_id, rc, arfs_id);
868 else
869 netif_info(efx, rx_status, efx->net_dev,
870 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
871 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
872 req->spec.rem_host, ntohs(req->spec.rem_port),
873 req->spec.loc_host, ntohs(req->spec.loc_port),
874 req->rxq_index, req->flow_id, rc, arfs_id);
875 channel->n_rfs_succeeded++;
876 } else {
877 if (req->spec.ether_type == htons(ETH_P_IP))
878 netif_dbg(efx, rx_status, efx->net_dev,
879 "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
880 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
881 req->spec.rem_host, ntohs(req->spec.rem_port),
882 req->spec.loc_host, ntohs(req->spec.loc_port),
883 req->rxq_index, req->flow_id, rc, arfs_id);
884 else
885 netif_dbg(efx, rx_status, efx->net_dev,
886 "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
887 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
888 req->spec.rem_host, ntohs(req->spec.rem_port),
889 req->spec.loc_host, ntohs(req->spec.loc_port),
890 req->rxq_index, req->flow_id, rc, arfs_id);
891 channel->n_rfs_failed++;
892 /* We're overloading the NIC's filter tables, so let's do a
893 * chunk of extra expiry work.
894 */
895 __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
896 100u));
897 }
898
899 /* Release references */
900 clear_bit(slot_idx, &efx->rps_slot_map);
901 netdev_put(req->net_dev, &req->net_dev_tracker);
902 }
903
efx_filter_rfs(struct net_device * net_dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)904 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
905 u16 rxq_index, u32 flow_id)
906 {
907 struct efx_nic *efx = efx_netdev_priv(net_dev);
908 struct efx_async_filter_insertion *req;
909 struct efx_arfs_rule *rule;
910 struct flow_keys fk;
911 int slot_idx;
912 bool new;
913 int rc;
914
915 /* find a free slot */
916 for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
917 if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
918 break;
919 if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
920 return -EBUSY;
921
922 if (flow_id == RPS_FLOW_ID_INVALID) {
923 rc = -EINVAL;
924 goto out_clear;
925 }
926
927 if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
928 rc = -EPROTONOSUPPORT;
929 goto out_clear;
930 }
931
932 if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
933 rc = -EPROTONOSUPPORT;
934 goto out_clear;
935 }
936 if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
937 rc = -EPROTONOSUPPORT;
938 goto out_clear;
939 }
940
941 req = efx->rps_slot + slot_idx;
942 efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
943 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
944 rxq_index);
945 req->spec.match_flags =
946 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
947 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
948 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
949 req->spec.ether_type = fk.basic.n_proto;
950 req->spec.ip_proto = fk.basic.ip_proto;
951
952 if (fk.basic.n_proto == htons(ETH_P_IP)) {
953 req->spec.rem_host[0] = fk.addrs.v4addrs.src;
954 req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
955 } else {
956 memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
957 sizeof(struct in6_addr));
958 memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
959 sizeof(struct in6_addr));
960 }
961
962 req->spec.rem_port = fk.ports.src;
963 req->spec.loc_port = fk.ports.dst;
964
965 if (efx->rps_hash_table) {
966 /* Add it to ARFS hash table */
967 spin_lock(&efx->rps_hash_lock);
968 rule = efx_rps_hash_add(efx, &req->spec, &new);
969 if (!rule) {
970 rc = -ENOMEM;
971 goto out_unlock;
972 }
973 if (new)
974 rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
975 rc = rule->arfs_id;
976 /* Skip if existing or pending filter already does the right thing */
977 if (!new && rule->rxq_index == rxq_index &&
978 rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
979 goto out_unlock;
980 rule->rxq_index = rxq_index;
981 rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
982 spin_unlock(&efx->rps_hash_lock);
983 } else {
984 /* Without an ARFS hash table, we just use arfs_id 0 for all
985 * filters. This means if multiple flows hash to the same
986 * flow_id, all but the most recently touched will be eligible
987 * for expiry.
988 */
989 rc = 0;
990 }
991
992 /* Queue the request */
993 req->net_dev = net_dev;
994 netdev_hold(req->net_dev, &req->net_dev_tracker, GFP_ATOMIC);
995 INIT_WORK(&req->work, efx_filter_rfs_work);
996 req->rxq_index = rxq_index;
997 req->flow_id = flow_id;
998 schedule_work(&req->work);
999 return rc;
1000 out_unlock:
1001 spin_unlock(&efx->rps_hash_lock);
1002 out_clear:
1003 clear_bit(slot_idx, &efx->rps_slot_map);
1004 return rc;
1005 }
1006
__efx_filter_rfs_expire(struct efx_channel * channel,unsigned int quota)1007 bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
1008 {
1009 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
1010 struct efx_nic *efx = channel->efx;
1011 unsigned int index, size, start;
1012 u32 flow_id;
1013
1014 if (!mutex_trylock(&efx->rps_mutex))
1015 return false;
1016 expire_one = efx->type->filter_rfs_expire_one;
1017 index = channel->rfs_expire_index;
1018 start = index;
1019 size = efx->type->max_rx_ip_filters;
1020 while (quota) {
1021 flow_id = channel->rps_flow_id[index];
1022
1023 if (flow_id != RPS_FLOW_ID_INVALID) {
1024 quota--;
1025 if (expire_one(efx, flow_id, index)) {
1026 netif_info(efx, rx_status, efx->net_dev,
1027 "expired filter %d [channel %u flow %u]\n",
1028 index, channel->channel, flow_id);
1029 channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
1030 channel->rfs_filter_count--;
1031 }
1032 }
1033 if (++index == size)
1034 index = 0;
1035 /* If we were called with a quota that exceeds the total number
1036 * of filters in the table (which shouldn't happen, but could
1037 * if two callers race), ensure that we don't loop forever -
1038 * stop when we've examined every row of the table.
1039 */
1040 if (index == start)
1041 break;
1042 }
1043
1044 channel->rfs_expire_index = index;
1045 mutex_unlock(&efx->rps_mutex);
1046 return true;
1047 }
1048
1049 #endif /* CONFIG_RFS_ACCEL */
1050