1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2018 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include "net_driver.h" 12 #include <linux/module.h> 13 #include <linux/iommu.h> 14 #include <net/rps.h> 15 #include "efx.h" 16 #include "nic.h" 17 #include "rx_common.h" 18 19 /* This is the percentage fill level below which new RX descriptors 20 * will be added to the RX descriptor ring. 21 */ 22 static unsigned int rx_refill_threshold; 23 module_param(rx_refill_threshold, uint, 0444); 24 MODULE_PARM_DESC(rx_refill_threshold, 25 "RX descriptor ring refill threshold (%)"); 26 27 /* RX maximum head room required. 28 * 29 * This must be at least 1 to prevent overflow, plus one packet-worth 30 * to allow pipelined receives. 31 */ 32 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS) 33 34 /* Check the RX page recycle ring for a page that can be reused. */ 35 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) 36 { 37 struct efx_nic *efx = rx_queue->efx; 38 struct efx_rx_page_state *state; 39 unsigned int index; 40 struct page *page; 41 42 if (unlikely(!rx_queue->page_ring)) 43 return NULL; 44 index = rx_queue->page_remove & rx_queue->page_ptr_mask; 45 page = rx_queue->page_ring[index]; 46 if (page == NULL) 47 return NULL; 48 49 rx_queue->page_ring[index] = NULL; 50 /* page_remove cannot exceed page_add. */ 51 if (rx_queue->page_remove != rx_queue->page_add) 52 ++rx_queue->page_remove; 53 54 /* If page_count is 1 then we hold the only reference to this page. */ 55 if (page_count(page) == 1) { 56 ++rx_queue->page_recycle_count; 57 return page; 58 } else { 59 state = page_address(page); 60 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, 61 PAGE_SIZE << efx->rx_buffer_order, 62 DMA_FROM_DEVICE); 63 put_page(page); 64 ++rx_queue->page_recycle_failed; 65 } 66 67 return NULL; 68 } 69 70 /* Attempt to recycle the page if there is an RX recycle ring; the page can 71 * only be added if this is the final RX buffer, to prevent pages being used in 72 * the descriptor ring and appearing in the recycle ring simultaneously. 73 */ 74 static void efx_recycle_rx_page(struct efx_channel *channel, 75 struct efx_rx_buffer *rx_buf) 76 { 77 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); 78 struct efx_nic *efx = rx_queue->efx; 79 struct page *page = rx_buf->page; 80 unsigned int index; 81 82 /* Only recycle the page after processing the final buffer. */ 83 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE)) 84 return; 85 86 index = rx_queue->page_add & rx_queue->page_ptr_mask; 87 if (rx_queue->page_ring[index] == NULL) { 88 unsigned int read_index = rx_queue->page_remove & 89 rx_queue->page_ptr_mask; 90 91 /* The next slot in the recycle ring is available, but 92 * increment page_remove if the read pointer currently 93 * points here. 94 */ 95 if (read_index == index) 96 ++rx_queue->page_remove; 97 rx_queue->page_ring[index] = page; 98 ++rx_queue->page_add; 99 return; 100 } 101 ++rx_queue->page_recycle_full; 102 efx_unmap_rx_buffer(efx, rx_buf); 103 put_page(rx_buf->page); 104 } 105 106 /* Recycle the pages that are used by buffers that have just been received. */ 107 void efx_recycle_rx_pages(struct efx_channel *channel, 108 struct efx_rx_buffer *rx_buf, 109 unsigned int n_frags) 110 { 111 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); 112 113 if (unlikely(!rx_queue->page_ring)) 114 return; 115 116 do { 117 efx_recycle_rx_page(channel, rx_buf); 118 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); 119 } while (--n_frags); 120 } 121 122 void efx_discard_rx_packet(struct efx_channel *channel, 123 struct efx_rx_buffer *rx_buf, 124 unsigned int n_frags) 125 { 126 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); 127 128 efx_recycle_rx_pages(channel, rx_buf, n_frags); 129 130 efx_free_rx_buffers(rx_queue, rx_buf, n_frags); 131 } 132 133 static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue) 134 { 135 unsigned int bufs_in_recycle_ring, page_ring_size; 136 struct efx_nic *efx = rx_queue->efx; 137 138 bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx); 139 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring / 140 efx->rx_bufs_per_page); 141 rx_queue->page_ring = kzalloc_objs(*rx_queue->page_ring, page_ring_size); 142 if (!rx_queue->page_ring) 143 rx_queue->page_ptr_mask = 0; 144 else 145 rx_queue->page_ptr_mask = page_ring_size - 1; 146 } 147 148 static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue) 149 { 150 struct efx_nic *efx = rx_queue->efx; 151 int i; 152 153 if (unlikely(!rx_queue->page_ring)) 154 return; 155 156 /* Unmap and release the pages in the recycle ring. Remove the ring. */ 157 for (i = 0; i <= rx_queue->page_ptr_mask; i++) { 158 struct page *page = rx_queue->page_ring[i]; 159 struct efx_rx_page_state *state; 160 161 if (page == NULL) 162 continue; 163 164 state = page_address(page); 165 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, 166 PAGE_SIZE << efx->rx_buffer_order, 167 DMA_FROM_DEVICE); 168 put_page(page); 169 } 170 kfree(rx_queue->page_ring); 171 rx_queue->page_ring = NULL; 172 } 173 174 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, 175 struct efx_rx_buffer *rx_buf) 176 { 177 /* Release the page reference we hold for the buffer. */ 178 if (rx_buf->page) 179 put_page(rx_buf->page); 180 181 /* If this is the last buffer in a page, unmap and free it. */ 182 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { 183 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); 184 efx_free_rx_buffers(rx_queue, rx_buf, 1); 185 } 186 rx_buf->page = NULL; 187 } 188 189 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) 190 { 191 struct efx_nic *efx = rx_queue->efx; 192 unsigned int entries; 193 int rc; 194 195 /* Create the smallest power-of-two aligned ring */ 196 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); 197 EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); 198 rx_queue->ptr_mask = entries - 1; 199 200 netif_dbg(efx, probe, efx->net_dev, 201 "creating RX queue %d size %#x mask %#x\n", 202 efx_rx_queue_index(rx_queue), efx->rxq_entries, 203 rx_queue->ptr_mask); 204 205 /* Allocate RX buffers */ 206 rx_queue->buffer = kzalloc_objs(*rx_queue->buffer, entries); 207 if (!rx_queue->buffer) 208 return -ENOMEM; 209 210 rc = efx_nic_probe_rx(rx_queue); 211 if (rc) { 212 kfree(rx_queue->buffer); 213 rx_queue->buffer = NULL; 214 } 215 216 return rc; 217 } 218 219 void efx_init_rx_queue(struct efx_rx_queue *rx_queue) 220 { 221 unsigned int max_fill, trigger, max_trigger; 222 struct efx_nic *efx = rx_queue->efx; 223 int rc = 0; 224 225 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 226 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); 227 228 /* Initialise ptr fields */ 229 rx_queue->added_count = 0; 230 rx_queue->notified_count = 0; 231 rx_queue->granted_count = 0; 232 rx_queue->removed_count = 0; 233 rx_queue->min_fill = -1U; 234 efx_init_rx_recycle_ring(rx_queue); 235 236 rx_queue->page_remove = 0; 237 rx_queue->page_add = rx_queue->page_ptr_mask + 1; 238 rx_queue->page_recycle_count = 0; 239 rx_queue->page_recycle_failed = 0; 240 rx_queue->page_recycle_full = 0; 241 242 rx_queue->old_rx_packets = rx_queue->rx_packets; 243 rx_queue->old_rx_bytes = rx_queue->rx_bytes; 244 245 /* Initialise limit fields */ 246 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; 247 max_trigger = 248 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page; 249 if (rx_refill_threshold != 0) { 250 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 251 if (trigger > max_trigger) 252 trigger = max_trigger; 253 } else { 254 trigger = max_trigger; 255 } 256 257 rx_queue->max_fill = max_fill; 258 rx_queue->fast_fill_trigger = trigger; 259 rx_queue->refill_enabled = true; 260 261 /* Initialise XDP queue information */ 262 rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev, 263 rx_queue->core_index, 0); 264 265 if (rc) { 266 netif_err(efx, rx_err, efx->net_dev, 267 "Failure to initialise XDP queue information rc=%d\n", 268 rc); 269 efx->xdp_rxq_info_failed = true; 270 } 271 272 /* Set up RX descriptor ring */ 273 efx_nic_init_rx(rx_queue); 274 } 275 276 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) 277 { 278 struct efx_rx_buffer *rx_buf; 279 int i; 280 281 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 282 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); 283 284 timer_delete_sync(&rx_queue->slow_fill); 285 if (rx_queue->grant_credits) 286 flush_work(&rx_queue->grant_work); 287 288 /* Release RX buffers from the current read ptr to the write ptr */ 289 if (rx_queue->buffer) { 290 for (i = rx_queue->removed_count; i < rx_queue->added_count; 291 i++) { 292 unsigned int index = i & rx_queue->ptr_mask; 293 294 rx_buf = efx_rx_buffer(rx_queue, index); 295 efx_fini_rx_buffer(rx_queue, rx_buf); 296 } 297 } 298 299 efx_fini_rx_recycle_ring(rx_queue); 300 301 if (xdp_rxq_info_is_reg(&rx_queue->xdp_rxq_info)) 302 xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info); 303 } 304 305 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) 306 { 307 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 308 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); 309 310 efx_nic_remove_rx(rx_queue); 311 312 kfree(rx_queue->buffer); 313 rx_queue->buffer = NULL; 314 } 315 316 /* Unmap a DMA-mapped page. This function is only called for the final RX 317 * buffer in a page. 318 */ 319 void efx_unmap_rx_buffer(struct efx_nic *efx, 320 struct efx_rx_buffer *rx_buf) 321 { 322 struct page *page = rx_buf->page; 323 324 if (page) { 325 struct efx_rx_page_state *state = page_address(page); 326 327 dma_unmap_page(&efx->pci_dev->dev, 328 state->dma_addr, 329 PAGE_SIZE << efx->rx_buffer_order, 330 DMA_FROM_DEVICE); 331 } 332 } 333 334 void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, 335 struct efx_rx_buffer *rx_buf, 336 unsigned int num_bufs) 337 { 338 do { 339 if (rx_buf->page) { 340 put_page(rx_buf->page); 341 rx_buf->page = NULL; 342 } 343 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); 344 } while (--num_bufs); 345 } 346 347 void efx_rx_slow_fill(struct timer_list *t) 348 { 349 struct efx_rx_queue *rx_queue = timer_container_of(rx_queue, t, 350 slow_fill); 351 352 /* Post an event to cause NAPI to run and refill the queue */ 353 efx_nic_generate_fill_event(rx_queue); 354 ++rx_queue->slow_fill_count; 355 } 356 357 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) 358 { 359 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10)); 360 } 361 362 /* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers 363 * 364 * @rx_queue: Efx RX queue 365 * 366 * This allocates a batch of pages, maps them for DMA, and populates 367 * struct efx_rx_buffers for each one. Return a negative error code or 368 * 0 on success. If a single page can be used for multiple buffers, 369 * then the page will either be inserted fully, or not at all. 370 */ 371 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic) 372 { 373 unsigned int page_offset, index, count; 374 struct efx_nic *efx = rx_queue->efx; 375 struct efx_rx_page_state *state; 376 struct efx_rx_buffer *rx_buf; 377 dma_addr_t dma_addr; 378 struct page *page; 379 380 count = 0; 381 do { 382 page = efx_reuse_page(rx_queue); 383 if (page == NULL) { 384 page = alloc_pages(__GFP_COMP | 385 (atomic ? GFP_ATOMIC : GFP_KERNEL), 386 efx->rx_buffer_order); 387 if (unlikely(page == NULL)) 388 return -ENOMEM; 389 dma_addr = 390 dma_map_page(&efx->pci_dev->dev, page, 0, 391 PAGE_SIZE << efx->rx_buffer_order, 392 DMA_FROM_DEVICE); 393 if (unlikely(dma_mapping_error(&efx->pci_dev->dev, 394 dma_addr))) { 395 __free_pages(page, efx->rx_buffer_order); 396 return -EIO; 397 } 398 state = page_address(page); 399 state->dma_addr = dma_addr; 400 } else { 401 state = page_address(page); 402 dma_addr = state->dma_addr; 403 } 404 405 dma_addr += sizeof(struct efx_rx_page_state); 406 page_offset = sizeof(struct efx_rx_page_state); 407 408 do { 409 index = rx_queue->added_count & rx_queue->ptr_mask; 410 rx_buf = efx_rx_buffer(rx_queue, index); 411 rx_buf->dma_addr = dma_addr + efx->rx_ip_align + 412 EFX_XDP_HEADROOM; 413 rx_buf->page = page; 414 rx_buf->page_offset = page_offset + efx->rx_ip_align + 415 EFX_XDP_HEADROOM; 416 rx_buf->len = efx->rx_dma_len; 417 rx_buf->flags = 0; 418 ++rx_queue->added_count; 419 get_page(page); 420 dma_addr += efx->rx_page_buf_step; 421 page_offset += efx->rx_page_buf_step; 422 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); 423 424 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE; 425 } while (++count < efx->rx_pages_per_batch); 426 427 return 0; 428 } 429 430 void efx_rx_config_page_split(struct efx_nic *efx) 431 { 432 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align + 433 EFX_XDP_HEADROOM + EFX_XDP_TAILROOM, 434 EFX_RX_BUF_ALIGNMENT); 435 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : 436 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / 437 efx->rx_page_buf_step); 438 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / 439 efx->rx_bufs_per_page; 440 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH, 441 efx->rx_bufs_per_page); 442 } 443 444 /* efx_fast_push_rx_descriptors - push new RX descriptors quickly 445 * @rx_queue: RX descriptor queue 446 * 447 * This will aim to fill the RX descriptor queue up to 448 * @rx_queue->@max_fill. If there is insufficient atomic 449 * memory to do so, a slow fill will be scheduled. 450 * 451 * The caller must provide serialisation (none is used here). In practise, 452 * this means this function must run from the NAPI handler, or be called 453 * when NAPI is disabled. 454 */ 455 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) 456 { 457 struct efx_nic *efx = rx_queue->efx; 458 unsigned int fill_level, batch_size; 459 int space, rc = 0; 460 461 if (!rx_queue->refill_enabled) 462 return; 463 464 /* Calculate current fill level, and exit if we don't need to fill */ 465 fill_level = (rx_queue->added_count - rx_queue->removed_count); 466 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries); 467 if (fill_level >= rx_queue->fast_fill_trigger) 468 goto out; 469 470 /* Record minimum fill level */ 471 if (unlikely(fill_level < rx_queue->min_fill)) { 472 if (fill_level) 473 rx_queue->min_fill = fill_level; 474 } 475 476 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; 477 space = rx_queue->max_fill - fill_level; 478 EFX_WARN_ON_ONCE_PARANOID(space < batch_size); 479 480 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 481 "RX queue %d fast-filling descriptor ring from" 482 " level %d to level %d\n", 483 efx_rx_queue_index(rx_queue), fill_level, 484 rx_queue->max_fill); 485 486 do { 487 rc = efx_init_rx_buffers(rx_queue, atomic); 488 if (unlikely(rc)) { 489 /* Ensure that we don't leave the rx queue empty */ 490 efx_schedule_slow_fill(rx_queue); 491 goto out; 492 } 493 } while ((space -= batch_size) >= batch_size); 494 495 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 496 "RX queue %d fast-filled descriptor ring " 497 "to level %d\n", efx_rx_queue_index(rx_queue), 498 rx_queue->added_count - rx_queue->removed_count); 499 500 out: 501 if (rx_queue->notified_count != rx_queue->added_count) 502 efx_nic_notify_rx_desc(rx_queue); 503 } 504 505 /* Pass a received packet up through GRO. GRO can handle pages 506 * regardless of checksum state and skbs with a good checksum. 507 */ 508 void 509 efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, 510 unsigned int n_frags, u8 *eh, __wsum csum) 511 { 512 struct napi_struct *napi = &channel->napi_str; 513 struct efx_nic *efx = channel->efx; 514 struct sk_buff *skb; 515 516 skb = napi_get_frags(napi); 517 if (unlikely(!skb)) { 518 struct efx_rx_queue *rx_queue; 519 520 rx_queue = efx_channel_get_rx_queue(channel); 521 efx_free_rx_buffers(rx_queue, rx_buf, n_frags); 522 return; 523 } 524 525 if (efx->net_dev->features & NETIF_F_RXHASH && 526 efx_rx_buf_hash_valid(efx, eh)) 527 skb_set_hash(skb, efx_rx_buf_hash(efx, eh), 528 PKT_HASH_TYPE_L3); 529 if (csum) { 530 skb->csum = csum; 531 skb->ip_summed = CHECKSUM_COMPLETE; 532 } else { 533 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? 534 CHECKSUM_UNNECESSARY : CHECKSUM_NONE); 535 } 536 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL); 537 538 for (;;) { 539 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 540 rx_buf->page, rx_buf->page_offset, 541 rx_buf->len); 542 rx_buf->page = NULL; 543 skb->len += rx_buf->len; 544 if (skb_shinfo(skb)->nr_frags == n_frags) 545 break; 546 547 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); 548 } 549 550 skb->data_len = skb->len; 551 skb->truesize += n_frags * efx->rx_buffer_truesize; 552 553 skb_record_rx_queue(skb, channel->rx_queue.core_index); 554 555 napi_gro_frags(napi); 556 } 557 558 struct efx_rss_context_priv *efx_find_rss_context_entry(struct efx_nic *efx, 559 u32 id) 560 { 561 struct ethtool_rxfh_context *ctx; 562 563 WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock)); 564 565 ctx = xa_load(&efx->net_dev->ethtool->rss_ctx, id); 566 if (!ctx) 567 return NULL; 568 return ethtool_rxfh_context_priv(ctx); 569 } 570 571 void efx_set_default_rx_indir_table(struct efx_nic *efx, u32 *indir) 572 { 573 size_t i; 574 575 for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); i++) 576 indir[i] = ethtool_rxfh_indir_default(i, efx->rss_spread); 577 } 578 579 /** 580 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient 581 * @spec: Specification to test 582 * 583 * Return: %true if the specification is a non-drop RX filter that 584 * matches a local MAC address I/G bit value of 1 or matches a local 585 * IPv4 or IPv6 address value in the respective multicast address 586 * range. Otherwise %false. 587 */ 588 bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec) 589 { 590 if (!(spec->flags & EFX_FILTER_FLAG_RX) || 591 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) 592 return false; 593 594 if (spec->match_flags & 595 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) && 596 is_multicast_ether_addr(spec->loc_mac)) 597 return true; 598 599 if ((spec->match_flags & 600 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == 601 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { 602 if (spec->ether_type == htons(ETH_P_IP) && 603 ipv4_is_multicast(spec->loc_host[0])) 604 return true; 605 if (spec->ether_type == htons(ETH_P_IPV6) && 606 ((const u8 *)spec->loc_host)[0] == 0xff) 607 return true; 608 } 609 610 return false; 611 } 612 613 bool efx_filter_spec_equal(const struct efx_filter_spec *left, 614 const struct efx_filter_spec *right) 615 { 616 if ((left->match_flags ^ right->match_flags) | 617 ((left->flags ^ right->flags) & 618 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) 619 return false; 620 621 return memcmp(&left->vport_id, &right->vport_id, 622 sizeof(struct efx_filter_spec) - 623 offsetof(struct efx_filter_spec, vport_id)) == 0; 624 } 625 626 u32 efx_filter_spec_hash(const struct efx_filter_spec *spec) 627 { 628 BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3); 629 return jhash2((const u32 *)&spec->vport_id, 630 (sizeof(struct efx_filter_spec) - 631 offsetof(struct efx_filter_spec, vport_id)) / 4, 632 0); 633 } 634 635 #ifdef CONFIG_RFS_ACCEL 636 bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx, 637 bool *force) 638 { 639 if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) { 640 /* ARFS is currently updating this entry, leave it */ 641 return false; 642 } 643 if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) { 644 /* ARFS tried and failed to update this, so it's probably out 645 * of date. Remove the filter and the ARFS rule entry. 646 */ 647 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; 648 *force = true; 649 return true; 650 } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */ 651 /* ARFS has moved on, so old filter is not needed. Since we did 652 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will 653 * not be removed by efx_rps_hash_del() subsequently. 654 */ 655 *force = true; 656 return true; 657 } 658 /* Remove it iff ARFS wants to. */ 659 return true; 660 } 661 662 static 663 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx, 664 const struct efx_filter_spec *spec) 665 { 666 u32 hash = efx_filter_spec_hash(spec); 667 668 lockdep_assert_held(&efx->rps_hash_lock); 669 if (!efx->rps_hash_table) 670 return NULL; 671 return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE]; 672 } 673 674 struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx, 675 const struct efx_filter_spec *spec) 676 { 677 struct efx_arfs_rule *rule; 678 struct hlist_head *head; 679 struct hlist_node *node; 680 681 head = efx_rps_hash_bucket(efx, spec); 682 if (!head) 683 return NULL; 684 hlist_for_each(node, head) { 685 rule = container_of(node, struct efx_arfs_rule, node); 686 if (efx_filter_spec_equal(spec, &rule->spec)) 687 return rule; 688 } 689 return NULL; 690 } 691 692 struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, 693 const struct efx_filter_spec *spec, 694 bool *new) 695 { 696 struct efx_arfs_rule *rule; 697 struct hlist_head *head; 698 struct hlist_node *node; 699 700 head = efx_rps_hash_bucket(efx, spec); 701 if (!head) 702 return NULL; 703 hlist_for_each(node, head) { 704 rule = container_of(node, struct efx_arfs_rule, node); 705 if (efx_filter_spec_equal(spec, &rule->spec)) { 706 *new = false; 707 return rule; 708 } 709 } 710 rule = kmalloc_obj(*rule, GFP_ATOMIC); 711 *new = true; 712 if (rule) { 713 memcpy(&rule->spec, spec, sizeof(rule->spec)); 714 hlist_add_head(&rule->node, head); 715 } 716 return rule; 717 } 718 719 void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec) 720 { 721 struct efx_arfs_rule *rule; 722 struct hlist_head *head; 723 struct hlist_node *node; 724 725 head = efx_rps_hash_bucket(efx, spec); 726 if (WARN_ON(!head)) 727 return; 728 hlist_for_each(node, head) { 729 rule = container_of(node, struct efx_arfs_rule, node); 730 if (efx_filter_spec_equal(spec, &rule->spec)) { 731 /* Someone already reused the entry. We know that if 732 * this check doesn't fire (i.e. filter_id == REMOVING) 733 * then the REMOVING mark was put there by our caller, 734 * because caller is holding a lock on filter table and 735 * only holders of that lock set REMOVING. 736 */ 737 if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING) 738 return; 739 hlist_del(node); 740 kfree(rule); 741 return; 742 } 743 } 744 /* We didn't find it. */ 745 WARN_ON(1); 746 } 747 #endif 748 749 int efx_probe_filters(struct efx_nic *efx) 750 { 751 int rc; 752 753 mutex_lock(&efx->mac_lock); 754 rc = efx->type->filter_table_probe(efx); 755 if (rc) 756 goto out_unlock; 757 758 #ifdef CONFIG_RFS_ACCEL 759 if (efx->type->offload_features & NETIF_F_NTUPLE) { 760 struct efx_channel *channel; 761 int i, success = 1; 762 763 efx_for_each_channel(channel, efx) { 764 channel->rps_flow_id = 765 kcalloc(efx->type->max_rx_ip_filters, 766 sizeof(*channel->rps_flow_id), 767 GFP_KERNEL); 768 if (!channel->rps_flow_id) 769 success = 0; 770 else 771 for (i = 0; 772 i < efx->type->max_rx_ip_filters; 773 ++i) 774 channel->rps_flow_id[i] = 775 RPS_FLOW_ID_INVALID; 776 channel->rfs_expire_index = 0; 777 channel->rfs_filter_count = 0; 778 } 779 780 if (!success) { 781 efx_for_each_channel(channel, efx) { 782 kfree(channel->rps_flow_id); 783 channel->rps_flow_id = NULL; 784 } 785 efx->type->filter_table_remove(efx); 786 rc = -ENOMEM; 787 goto out_unlock; 788 } 789 } 790 #endif 791 out_unlock: 792 mutex_unlock(&efx->mac_lock); 793 return rc; 794 } 795 796 void efx_remove_filters(struct efx_nic *efx) 797 { 798 #ifdef CONFIG_RFS_ACCEL 799 struct efx_channel *channel; 800 801 efx_for_each_channel(channel, efx) { 802 cancel_delayed_work_sync(&channel->filter_work); 803 kfree(channel->rps_flow_id); 804 channel->rps_flow_id = NULL; 805 } 806 #endif 807 efx->type->filter_table_remove(efx); 808 } 809 810 #ifdef CONFIG_RFS_ACCEL 811 812 static void efx_filter_rfs_work(struct work_struct *data) 813 { 814 struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion, 815 work); 816 struct efx_nic *efx = efx_netdev_priv(req->net_dev); 817 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); 818 int slot_idx = req - efx->rps_slot; 819 struct efx_arfs_rule *rule; 820 u16 arfs_id = 0; 821 int rc; 822 823 rc = efx->type->filter_insert(efx, &req->spec, true); 824 if (rc >= 0) 825 /* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */ 826 rc %= efx->type->max_rx_ip_filters; 827 if (efx->rps_hash_table) { 828 spin_lock_bh(&efx->rps_hash_lock); 829 rule = efx_rps_hash_find(efx, &req->spec); 830 /* The rule might have already gone, if someone else's request 831 * for the same spec was already worked and then expired before 832 * we got around to our work. In that case we have nothing 833 * tying us to an arfs_id, meaning that as soon as the filter 834 * is considered for expiry it will be removed. 835 */ 836 if (rule) { 837 if (rc < 0) 838 rule->filter_id = EFX_ARFS_FILTER_ID_ERROR; 839 else 840 rule->filter_id = rc; 841 arfs_id = rule->arfs_id; 842 } 843 spin_unlock_bh(&efx->rps_hash_lock); 844 } 845 if (rc >= 0) { 846 /* Remember this so we can check whether to expire the filter 847 * later. 848 */ 849 mutex_lock(&efx->rps_mutex); 850 if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID) 851 channel->rfs_filter_count++; 852 channel->rps_flow_id[rc] = req->flow_id; 853 mutex_unlock(&efx->rps_mutex); 854 855 if (req->spec.ether_type == htons(ETH_P_IP)) 856 netif_info(efx, rx_status, efx->net_dev, 857 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n", 858 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 859 req->spec.rem_host, ntohs(req->spec.rem_port), 860 req->spec.loc_host, ntohs(req->spec.loc_port), 861 req->rxq_index, req->flow_id, rc, arfs_id); 862 else 863 netif_info(efx, rx_status, efx->net_dev, 864 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n", 865 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 866 req->spec.rem_host, ntohs(req->spec.rem_port), 867 req->spec.loc_host, ntohs(req->spec.loc_port), 868 req->rxq_index, req->flow_id, rc, arfs_id); 869 channel->n_rfs_succeeded++; 870 } else { 871 if (req->spec.ether_type == htons(ETH_P_IP)) 872 netif_dbg(efx, rx_status, efx->net_dev, 873 "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n", 874 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 875 req->spec.rem_host, ntohs(req->spec.rem_port), 876 req->spec.loc_host, ntohs(req->spec.loc_port), 877 req->rxq_index, req->flow_id, rc, arfs_id); 878 else 879 netif_dbg(efx, rx_status, efx->net_dev, 880 "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n", 881 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 882 req->spec.rem_host, ntohs(req->spec.rem_port), 883 req->spec.loc_host, ntohs(req->spec.loc_port), 884 req->rxq_index, req->flow_id, rc, arfs_id); 885 channel->n_rfs_failed++; 886 /* We're overloading the NIC's filter tables, so let's do a 887 * chunk of extra expiry work. 888 */ 889 __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, 890 100u)); 891 } 892 893 /* Release references */ 894 clear_bit(slot_idx, &efx->rps_slot_map); 895 netdev_put(req->net_dev, &req->net_dev_tracker); 896 } 897 898 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 899 u16 rxq_index, u32 flow_id) 900 { 901 struct efx_nic *efx = efx_netdev_priv(net_dev); 902 struct efx_async_filter_insertion *req; 903 struct efx_arfs_rule *rule; 904 struct flow_keys fk; 905 int slot_idx; 906 bool new; 907 int rc; 908 909 /* find a free slot */ 910 for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++) 911 if (!test_and_set_bit(slot_idx, &efx->rps_slot_map)) 912 break; 913 if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT) 914 return -EBUSY; 915 916 if (flow_id == RPS_FLOW_ID_INVALID) { 917 rc = -EINVAL; 918 goto out_clear; 919 } 920 921 if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) { 922 rc = -EPROTONOSUPPORT; 923 goto out_clear; 924 } 925 926 if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) { 927 rc = -EPROTONOSUPPORT; 928 goto out_clear; 929 } 930 if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) { 931 rc = -EPROTONOSUPPORT; 932 goto out_clear; 933 } 934 935 req = efx->rps_slot + slot_idx; 936 efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, 937 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, 938 rxq_index); 939 req->spec.match_flags = 940 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | 941 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | 942 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; 943 req->spec.ether_type = fk.basic.n_proto; 944 req->spec.ip_proto = fk.basic.ip_proto; 945 946 if (fk.basic.n_proto == htons(ETH_P_IP)) { 947 req->spec.rem_host[0] = fk.addrs.v4addrs.src; 948 req->spec.loc_host[0] = fk.addrs.v4addrs.dst; 949 } else { 950 memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src, 951 sizeof(struct in6_addr)); 952 memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst, 953 sizeof(struct in6_addr)); 954 } 955 956 req->spec.rem_port = fk.ports.src; 957 req->spec.loc_port = fk.ports.dst; 958 959 if (efx->rps_hash_table) { 960 /* Add it to ARFS hash table */ 961 spin_lock(&efx->rps_hash_lock); 962 rule = efx_rps_hash_add(efx, &req->spec, &new); 963 if (!rule) { 964 rc = -ENOMEM; 965 goto out_unlock; 966 } 967 if (new) 968 rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER; 969 rc = rule->arfs_id; 970 /* Skip if existing or pending filter already does the right thing */ 971 if (!new && rule->rxq_index == rxq_index && 972 rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING) 973 goto out_unlock; 974 rule->rxq_index = rxq_index; 975 rule->filter_id = EFX_ARFS_FILTER_ID_PENDING; 976 spin_unlock(&efx->rps_hash_lock); 977 } else { 978 /* Without an ARFS hash table, we just use arfs_id 0 for all 979 * filters. This means if multiple flows hash to the same 980 * flow_id, all but the most recently touched will be eligible 981 * for expiry. 982 */ 983 rc = 0; 984 } 985 986 /* Queue the request */ 987 req->net_dev = net_dev; 988 netdev_hold(req->net_dev, &req->net_dev_tracker, GFP_ATOMIC); 989 INIT_WORK(&req->work, efx_filter_rfs_work); 990 req->rxq_index = rxq_index; 991 req->flow_id = flow_id; 992 schedule_work(&req->work); 993 return rc; 994 out_unlock: 995 spin_unlock(&efx->rps_hash_lock); 996 out_clear: 997 clear_bit(slot_idx, &efx->rps_slot_map); 998 return rc; 999 } 1000 1001 bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota) 1002 { 1003 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); 1004 struct efx_nic *efx = channel->efx; 1005 unsigned int index, size, start; 1006 u32 flow_id; 1007 1008 if (!mutex_trylock(&efx->rps_mutex)) 1009 return false; 1010 expire_one = efx->type->filter_rfs_expire_one; 1011 index = channel->rfs_expire_index; 1012 start = index; 1013 size = efx->type->max_rx_ip_filters; 1014 while (quota) { 1015 flow_id = channel->rps_flow_id[index]; 1016 1017 if (flow_id != RPS_FLOW_ID_INVALID) { 1018 quota--; 1019 if (expire_one(efx, flow_id, index)) { 1020 netif_info(efx, rx_status, efx->net_dev, 1021 "expired filter %d [channel %u flow %u]\n", 1022 index, channel->channel, flow_id); 1023 channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; 1024 channel->rfs_filter_count--; 1025 } 1026 } 1027 if (++index == size) 1028 index = 0; 1029 /* If we were called with a quota that exceeds the total number 1030 * of filters in the table (which shouldn't happen, but could 1031 * if two callers race), ensure that we don't loop forever - 1032 * stop when we've examined every row of the table. 1033 */ 1034 if (index == start) 1035 break; 1036 } 1037 1038 channel->rfs_expire_index = index; 1039 mutex_unlock(&efx->rps_mutex); 1040 return true; 1041 } 1042 1043 #endif /* CONFIG_RFS_ACCEL */ 1044