1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* The driver transmit and receive code */ 5 6 #include <linux/prefetch.h> 7 #include <linux/mm.h> 8 #include <linux/bpf_trace.h> 9 #include <net/xdp.h> 10 #include "ice_txrx_lib.h" 11 #include "ice_lib.h" 12 #include "ice.h" 13 #include "ice_dcb_lib.h" 14 #include "ice_xsk.h" 15 16 #define ICE_RX_HDR_SIZE 256 17 18 /** 19 * ice_unmap_and_free_tx_buf - Release a Tx buffer 20 * @ring: the ring that owns the buffer 21 * @tx_buf: the buffer to free 22 */ 23 static void 24 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) 25 { 26 if (tx_buf->skb) { 27 if (ice_ring_is_xdp(ring)) 28 page_frag_free(tx_buf->raw_buf); 29 else 30 dev_kfree_skb_any(tx_buf->skb); 31 if (dma_unmap_len(tx_buf, len)) 32 dma_unmap_single(ring->dev, 33 dma_unmap_addr(tx_buf, dma), 34 dma_unmap_len(tx_buf, len), 35 DMA_TO_DEVICE); 36 } else if (dma_unmap_len(tx_buf, len)) { 37 dma_unmap_page(ring->dev, 38 dma_unmap_addr(tx_buf, dma), 39 dma_unmap_len(tx_buf, len), 40 DMA_TO_DEVICE); 41 } 42 43 tx_buf->next_to_watch = NULL; 44 tx_buf->skb = NULL; 45 dma_unmap_len_set(tx_buf, len, 0); 46 /* tx_buf must be completely set up in the transmit path */ 47 } 48 49 static struct netdev_queue *txring_txq(const struct ice_ring *ring) 50 { 51 return netdev_get_tx_queue(ring->netdev, ring->q_index); 52 } 53 54 /** 55 * ice_clean_tx_ring - Free any empty Tx buffers 56 * @tx_ring: ring to be cleaned 57 */ 58 void ice_clean_tx_ring(struct ice_ring *tx_ring) 59 { 60 u16 i; 61 62 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) { 63 ice_xsk_clean_xdp_ring(tx_ring); 64 goto tx_skip_free; 65 } 66 67 /* ring already cleared, nothing to do */ 68 if (!tx_ring->tx_buf) 69 return; 70 71 /* Free all the Tx ring sk_buffs */ 72 for (i = 0; i < tx_ring->count; i++) 73 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 74 75 tx_skip_free: 76 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 77 78 /* Zero out the descriptor ring */ 79 memset(tx_ring->desc, 0, tx_ring->size); 80 81 tx_ring->next_to_use = 0; 82 tx_ring->next_to_clean = 0; 83 84 if (!tx_ring->netdev) 85 return; 86 87 /* cleanup Tx queue statistics */ 88 netdev_tx_reset_queue(txring_txq(tx_ring)); 89 } 90 91 /** 92 * ice_free_tx_ring - Free Tx resources per queue 93 * @tx_ring: Tx descriptor ring for a specific queue 94 * 95 * Free all transmit software resources 96 */ 97 void ice_free_tx_ring(struct ice_ring *tx_ring) 98 { 99 ice_clean_tx_ring(tx_ring); 100 devm_kfree(tx_ring->dev, tx_ring->tx_buf); 101 tx_ring->tx_buf = NULL; 102 103 if (tx_ring->desc) { 104 dmam_free_coherent(tx_ring->dev, tx_ring->size, 105 tx_ring->desc, tx_ring->dma); 106 tx_ring->desc = NULL; 107 } 108 } 109 110 /** 111 * ice_clean_tx_irq - Reclaim resources after transmit completes 112 * @tx_ring: Tx ring to clean 113 * @napi_budget: Used to determine if we are in netpoll 114 * 115 * Returns true if there's any budget left (e.g. the clean is finished) 116 */ 117 static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) 118 { 119 unsigned int total_bytes = 0, total_pkts = 0; 120 unsigned int budget = ICE_DFLT_IRQ_WORK; 121 struct ice_vsi *vsi = tx_ring->vsi; 122 s16 i = tx_ring->next_to_clean; 123 struct ice_tx_desc *tx_desc; 124 struct ice_tx_buf *tx_buf; 125 126 tx_buf = &tx_ring->tx_buf[i]; 127 tx_desc = ICE_TX_DESC(tx_ring, i); 128 i -= tx_ring->count; 129 130 prefetch(&vsi->state); 131 132 do { 133 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 134 135 /* if next_to_watch is not set then there is no work pending */ 136 if (!eop_desc) 137 break; 138 139 smp_rmb(); /* prevent any other reads prior to eop_desc */ 140 141 /* if the descriptor isn't done, no work yet to do */ 142 if (!(eop_desc->cmd_type_offset_bsz & 143 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 144 break; 145 146 /* clear next_to_watch to prevent false hangs */ 147 tx_buf->next_to_watch = NULL; 148 149 /* update the statistics for this packet */ 150 total_bytes += tx_buf->bytecount; 151 total_pkts += tx_buf->gso_segs; 152 153 if (ice_ring_is_xdp(tx_ring)) 154 page_frag_free(tx_buf->raw_buf); 155 else 156 /* free the skb */ 157 napi_consume_skb(tx_buf->skb, napi_budget); 158 159 /* unmap skb header data */ 160 dma_unmap_single(tx_ring->dev, 161 dma_unmap_addr(tx_buf, dma), 162 dma_unmap_len(tx_buf, len), 163 DMA_TO_DEVICE); 164 165 /* clear tx_buf data */ 166 tx_buf->skb = NULL; 167 dma_unmap_len_set(tx_buf, len, 0); 168 169 /* unmap remaining buffers */ 170 while (tx_desc != eop_desc) { 171 tx_buf++; 172 tx_desc++; 173 i++; 174 if (unlikely(!i)) { 175 i -= tx_ring->count; 176 tx_buf = tx_ring->tx_buf; 177 tx_desc = ICE_TX_DESC(tx_ring, 0); 178 } 179 180 /* unmap any remaining paged data */ 181 if (dma_unmap_len(tx_buf, len)) { 182 dma_unmap_page(tx_ring->dev, 183 dma_unmap_addr(tx_buf, dma), 184 dma_unmap_len(tx_buf, len), 185 DMA_TO_DEVICE); 186 dma_unmap_len_set(tx_buf, len, 0); 187 } 188 } 189 190 /* move us one more past the eop_desc for start of next pkt */ 191 tx_buf++; 192 tx_desc++; 193 i++; 194 if (unlikely(!i)) { 195 i -= tx_ring->count; 196 tx_buf = tx_ring->tx_buf; 197 tx_desc = ICE_TX_DESC(tx_ring, 0); 198 } 199 200 prefetch(tx_desc); 201 202 /* update budget accounting */ 203 budget--; 204 } while (likely(budget)); 205 206 i += tx_ring->count; 207 tx_ring->next_to_clean = i; 208 209 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); 210 211 if (ice_ring_is_xdp(tx_ring)) 212 return !!budget; 213 214 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, 215 total_bytes); 216 217 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 218 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 219 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 220 /* Make sure that anybody stopping the queue after this 221 * sees the new next_to_clean. 222 */ 223 smp_mb(); 224 if (__netif_subqueue_stopped(tx_ring->netdev, 225 tx_ring->q_index) && 226 !test_bit(__ICE_DOWN, vsi->state)) { 227 netif_wake_subqueue(tx_ring->netdev, 228 tx_ring->q_index); 229 ++tx_ring->tx_stats.restart_q; 230 } 231 } 232 233 return !!budget; 234 } 235 236 /** 237 * ice_setup_tx_ring - Allocate the Tx descriptors 238 * @tx_ring: the Tx ring to set up 239 * 240 * Return 0 on success, negative on error 241 */ 242 int ice_setup_tx_ring(struct ice_ring *tx_ring) 243 { 244 struct device *dev = tx_ring->dev; 245 246 if (!dev) 247 return -ENOMEM; 248 249 /* warn if we are about to overwrite the pointer */ 250 WARN_ON(tx_ring->tx_buf); 251 tx_ring->tx_buf = 252 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count, 253 GFP_KERNEL); 254 if (!tx_ring->tx_buf) 255 return -ENOMEM; 256 257 /* round up to nearest page */ 258 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 259 PAGE_SIZE); 260 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, 261 GFP_KERNEL); 262 if (!tx_ring->desc) { 263 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 264 tx_ring->size); 265 goto err; 266 } 267 268 tx_ring->next_to_use = 0; 269 tx_ring->next_to_clean = 0; 270 tx_ring->tx_stats.prev_pkt = -1; 271 return 0; 272 273 err: 274 devm_kfree(dev, tx_ring->tx_buf); 275 tx_ring->tx_buf = NULL; 276 return -ENOMEM; 277 } 278 279 /** 280 * ice_clean_rx_ring - Free Rx buffers 281 * @rx_ring: ring to be cleaned 282 */ 283 void ice_clean_rx_ring(struct ice_ring *rx_ring) 284 { 285 struct device *dev = rx_ring->dev; 286 u16 i; 287 288 /* ring already cleared, nothing to do */ 289 if (!rx_ring->rx_buf) 290 return; 291 292 if (rx_ring->xsk_umem) { 293 ice_xsk_clean_rx_ring(rx_ring); 294 goto rx_skip_free; 295 } 296 297 /* Free all the Rx ring sk_buffs */ 298 for (i = 0; i < rx_ring->count; i++) { 299 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 300 301 if (rx_buf->skb) { 302 dev_kfree_skb(rx_buf->skb); 303 rx_buf->skb = NULL; 304 } 305 if (!rx_buf->page) 306 continue; 307 308 /* Invalidate cache lines that may have been written to by 309 * device so that we avoid corrupting memory. 310 */ 311 dma_sync_single_range_for_cpu(dev, rx_buf->dma, 312 rx_buf->page_offset, 313 rx_ring->rx_buf_len, 314 DMA_FROM_DEVICE); 315 316 /* free resources associated with mapping */ 317 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), 318 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 319 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 320 321 rx_buf->page = NULL; 322 rx_buf->page_offset = 0; 323 } 324 325 rx_skip_free: 326 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); 327 328 /* Zero out the descriptor ring */ 329 memset(rx_ring->desc, 0, rx_ring->size); 330 331 rx_ring->next_to_alloc = 0; 332 rx_ring->next_to_clean = 0; 333 rx_ring->next_to_use = 0; 334 } 335 336 /** 337 * ice_free_rx_ring - Free Rx resources 338 * @rx_ring: ring to clean the resources from 339 * 340 * Free all receive software resources 341 */ 342 void ice_free_rx_ring(struct ice_ring *rx_ring) 343 { 344 ice_clean_rx_ring(rx_ring); 345 if (rx_ring->vsi->type == ICE_VSI_PF) 346 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 347 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 348 rx_ring->xdp_prog = NULL; 349 devm_kfree(rx_ring->dev, rx_ring->rx_buf); 350 rx_ring->rx_buf = NULL; 351 352 if (rx_ring->desc) { 353 dmam_free_coherent(rx_ring->dev, rx_ring->size, 354 rx_ring->desc, rx_ring->dma); 355 rx_ring->desc = NULL; 356 } 357 } 358 359 /** 360 * ice_setup_rx_ring - Allocate the Rx descriptors 361 * @rx_ring: the Rx ring to set up 362 * 363 * Return 0 on success, negative on error 364 */ 365 int ice_setup_rx_ring(struct ice_ring *rx_ring) 366 { 367 struct device *dev = rx_ring->dev; 368 369 if (!dev) 370 return -ENOMEM; 371 372 /* warn if we are about to overwrite the pointer */ 373 WARN_ON(rx_ring->rx_buf); 374 rx_ring->rx_buf = 375 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, 376 GFP_KERNEL); 377 if (!rx_ring->rx_buf) 378 return -ENOMEM; 379 380 /* round up to nearest page */ 381 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 382 PAGE_SIZE); 383 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, 384 GFP_KERNEL); 385 if (!rx_ring->desc) { 386 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 387 rx_ring->size); 388 goto err; 389 } 390 391 rx_ring->next_to_use = 0; 392 rx_ring->next_to_clean = 0; 393 394 if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 395 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); 396 397 if (rx_ring->vsi->type == ICE_VSI_PF && 398 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 399 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 400 rx_ring->q_index)) 401 goto err; 402 return 0; 403 404 err: 405 devm_kfree(dev, rx_ring->rx_buf); 406 rx_ring->rx_buf = NULL; 407 return -ENOMEM; 408 } 409 410 /** 411 * ice_rx_offset - Return expected offset into page to access data 412 * @rx_ring: Ring we are requesting offset of 413 * 414 * Returns the offset value for ring into the data buffer. 415 */ 416 static unsigned int ice_rx_offset(struct ice_ring *rx_ring) 417 { 418 if (ice_ring_uses_build_skb(rx_ring)) 419 return ICE_SKB_PAD; 420 else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 421 return XDP_PACKET_HEADROOM; 422 423 return 0; 424 } 425 426 static unsigned int ice_rx_frame_truesize(struct ice_ring *rx_ring, 427 unsigned int size) 428 { 429 unsigned int truesize; 430 431 #if (PAGE_SIZE < 8192) 432 truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ 433 #else 434 truesize = ice_rx_offset(rx_ring) ? 435 SKB_DATA_ALIGN(ice_rx_offset(rx_ring) + size) + 436 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 437 SKB_DATA_ALIGN(size); 438 #endif 439 return truesize; 440 } 441 442 /** 443 * ice_run_xdp - Executes an XDP program on initialized xdp_buff 444 * @rx_ring: Rx ring 445 * @xdp: xdp_buff used as input to the XDP program 446 * @xdp_prog: XDP program to run 447 * 448 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 449 */ 450 static int 451 ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, 452 struct bpf_prog *xdp_prog) 453 { 454 int err, result = ICE_XDP_PASS; 455 struct ice_ring *xdp_ring; 456 u32 act; 457 458 act = bpf_prog_run_xdp(xdp_prog, xdp); 459 switch (act) { 460 case XDP_PASS: 461 break; 462 case XDP_TX: 463 xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()]; 464 result = ice_xmit_xdp_buff(xdp, xdp_ring); 465 break; 466 case XDP_REDIRECT: 467 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 468 result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED; 469 break; 470 default: 471 bpf_warn_invalid_xdp_action(act); 472 fallthrough; 473 case XDP_ABORTED: 474 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 475 fallthrough; 476 case XDP_DROP: 477 result = ICE_XDP_CONSUMED; 478 break; 479 } 480 481 return result; 482 } 483 484 /** 485 * ice_xdp_xmit - submit packets to XDP ring for transmission 486 * @dev: netdev 487 * @n: number of XDP frames to be transmitted 488 * @frames: XDP frames to be transmitted 489 * @flags: transmit flags 490 * 491 * Returns number of frames successfully sent. Frames that fail are 492 * free'ed via XDP return API. 493 * For error cases, a negative errno code is returned and no-frames 494 * are transmitted (caller must handle freeing frames). 495 */ 496 int 497 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 498 u32 flags) 499 { 500 struct ice_netdev_priv *np = netdev_priv(dev); 501 unsigned int queue_index = smp_processor_id(); 502 struct ice_vsi *vsi = np->vsi; 503 struct ice_ring *xdp_ring; 504 int drops = 0, i; 505 506 if (test_bit(__ICE_DOWN, vsi->state)) 507 return -ENETDOWN; 508 509 if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq) 510 return -ENXIO; 511 512 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 513 return -EINVAL; 514 515 xdp_ring = vsi->xdp_rings[queue_index]; 516 for (i = 0; i < n; i++) { 517 struct xdp_frame *xdpf = frames[i]; 518 int err; 519 520 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); 521 if (err != ICE_XDP_TX) { 522 xdp_return_frame_rx_napi(xdpf); 523 drops++; 524 } 525 } 526 527 if (unlikely(flags & XDP_XMIT_FLUSH)) 528 ice_xdp_ring_update_tail(xdp_ring); 529 530 return n - drops; 531 } 532 533 /** 534 * ice_alloc_mapped_page - recycle or make a new page 535 * @rx_ring: ring to use 536 * @bi: rx_buf struct to modify 537 * 538 * Returns true if the page was successfully allocated or 539 * reused. 540 */ 541 static bool 542 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) 543 { 544 struct page *page = bi->page; 545 dma_addr_t dma; 546 547 /* since we are recycling buffers we should seldom need to alloc */ 548 if (likely(page)) { 549 rx_ring->rx_stats.page_reuse_count++; 550 return true; 551 } 552 553 /* alloc new page for storage */ 554 page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); 555 if (unlikely(!page)) { 556 rx_ring->rx_stats.alloc_page_failed++; 557 return false; 558 } 559 560 /* map page for use */ 561 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), 562 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 563 564 /* if mapping failed free memory back to system since 565 * there isn't much point in holding memory we can't use 566 */ 567 if (dma_mapping_error(rx_ring->dev, dma)) { 568 __free_pages(page, ice_rx_pg_order(rx_ring)); 569 rx_ring->rx_stats.alloc_page_failed++; 570 return false; 571 } 572 573 bi->dma = dma; 574 bi->page = page; 575 bi->page_offset = ice_rx_offset(rx_ring); 576 page_ref_add(page, USHRT_MAX - 1); 577 bi->pagecnt_bias = USHRT_MAX; 578 579 return true; 580 } 581 582 /** 583 * ice_alloc_rx_bufs - Replace used receive buffers 584 * @rx_ring: ring to place buffers on 585 * @cleaned_count: number of buffers to replace 586 * 587 * Returns false if all allocations were successful, true if any fail. Returning 588 * true signals to the caller that we didn't replace cleaned_count buffers and 589 * there is more work to do. 590 * 591 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx 592 * buffers. Then bump tail at most one time. Grouping like this lets us avoid 593 * multiple tail writes per call. 594 */ 595 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) 596 { 597 union ice_32b_rx_flex_desc *rx_desc; 598 u16 ntu = rx_ring->next_to_use; 599 struct ice_rx_buf *bi; 600 601 /* do nothing if no valid netdev defined */ 602 if (!rx_ring->netdev || !cleaned_count) 603 return false; 604 605 /* get the Rx descriptor and buffer based on next_to_use */ 606 rx_desc = ICE_RX_DESC(rx_ring, ntu); 607 bi = &rx_ring->rx_buf[ntu]; 608 609 do { 610 /* if we fail here, we have work remaining */ 611 if (!ice_alloc_mapped_page(rx_ring, bi)) 612 break; 613 614 /* sync the buffer for use by the device */ 615 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 616 bi->page_offset, 617 rx_ring->rx_buf_len, 618 DMA_FROM_DEVICE); 619 620 /* Refresh the desc even if buffer_addrs didn't change 621 * because each write-back erases this info. 622 */ 623 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 624 625 rx_desc++; 626 bi++; 627 ntu++; 628 if (unlikely(ntu == rx_ring->count)) { 629 rx_desc = ICE_RX_DESC(rx_ring, 0); 630 bi = rx_ring->rx_buf; 631 ntu = 0; 632 } 633 634 /* clear the status bits for the next_to_use descriptor */ 635 rx_desc->wb.status_error0 = 0; 636 637 cleaned_count--; 638 } while (cleaned_count); 639 640 if (rx_ring->next_to_use != ntu) 641 ice_release_rx_desc(rx_ring, ntu); 642 643 return !!cleaned_count; 644 } 645 646 /** 647 * ice_page_is_reserved - check if reuse is possible 648 * @page: page struct to check 649 */ 650 static bool ice_page_is_reserved(struct page *page) 651 { 652 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 653 } 654 655 /** 656 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 657 * @rx_buf: Rx buffer to adjust 658 * @size: Size of adjustment 659 * 660 * Update the offset within page so that Rx buf will be ready to be reused. 661 * For systems with PAGE_SIZE < 8192 this function will flip the page offset 662 * so the second half of page assigned to Rx buffer will be used, otherwise 663 * the offset is moved by "size" bytes 664 */ 665 static void 666 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 667 { 668 #if (PAGE_SIZE < 8192) 669 /* flip page offset to other buffer */ 670 rx_buf->page_offset ^= size; 671 #else 672 /* move offset up to the next cache line */ 673 rx_buf->page_offset += size; 674 #endif 675 } 676 677 /** 678 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 679 * @rx_buf: buffer containing the page 680 * 681 * If page is reusable, we have a green light for calling ice_reuse_rx_page, 682 * which will assign the current buffer to the buffer that next_to_alloc is 683 * pointing to; otherwise, the DMA mapping needs to be destroyed and 684 * page freed 685 */ 686 static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) 687 { 688 unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 689 struct page *page = rx_buf->page; 690 691 /* avoid re-using remote pages */ 692 if (unlikely(ice_page_is_reserved(page))) 693 return false; 694 695 #if (PAGE_SIZE < 8192) 696 /* if we are only owner of page we can reuse it */ 697 if (unlikely((page_count(page) - pagecnt_bias) > 1)) 698 return false; 699 #else 700 #define ICE_LAST_OFFSET \ 701 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) 702 if (rx_buf->page_offset > ICE_LAST_OFFSET) 703 return false; 704 #endif /* PAGE_SIZE < 8192) */ 705 706 /* If we have drained the page fragment pool we need to update 707 * the pagecnt_bias and page count so that we fully restock the 708 * number of references the driver holds. 709 */ 710 if (unlikely(pagecnt_bias == 1)) { 711 page_ref_add(page, USHRT_MAX - 1); 712 rx_buf->pagecnt_bias = USHRT_MAX; 713 } 714 715 return true; 716 } 717 718 /** 719 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag 720 * @rx_ring: Rx descriptor ring to transact packets on 721 * @rx_buf: buffer containing page to add 722 * @skb: sk_buff to place the data into 723 * @size: packet length from rx_desc 724 * 725 * This function will add the data contained in rx_buf->page to the skb. 726 * It will just attach the page as a frag to the skb. 727 * The function will then update the page offset. 728 */ 729 static void 730 ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 731 struct sk_buff *skb, unsigned int size) 732 { 733 #if (PAGE_SIZE >= 8192) 734 unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring)); 735 #else 736 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 737 #endif 738 739 if (!size) 740 return; 741 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 742 rx_buf->page_offset, size, truesize); 743 744 /* page is being used so we must update the page offset */ 745 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 746 } 747 748 /** 749 * ice_reuse_rx_page - page flip buffer and store it back on the ring 750 * @rx_ring: Rx descriptor ring to store buffers on 751 * @old_buf: donor buffer to have page reused 752 * 753 * Synchronizes page for reuse by the adapter 754 */ 755 static void 756 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) 757 { 758 u16 nta = rx_ring->next_to_alloc; 759 struct ice_rx_buf *new_buf; 760 761 new_buf = &rx_ring->rx_buf[nta]; 762 763 /* update, and store next to alloc */ 764 nta++; 765 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 766 767 /* Transfer page from old buffer to new buffer. 768 * Move each member individually to avoid possible store 769 * forwarding stalls and unnecessary copy of skb. 770 */ 771 new_buf->dma = old_buf->dma; 772 new_buf->page = old_buf->page; 773 new_buf->page_offset = old_buf->page_offset; 774 new_buf->pagecnt_bias = old_buf->pagecnt_bias; 775 } 776 777 /** 778 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 779 * @rx_ring: Rx descriptor ring to transact packets on 780 * @skb: skb to be used 781 * @size: size of buffer to add to skb 782 * 783 * This function will pull an Rx buffer from the ring and synchronize it 784 * for use by the CPU. 785 */ 786 static struct ice_rx_buf * 787 ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, 788 const unsigned int size) 789 { 790 struct ice_rx_buf *rx_buf; 791 792 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 793 prefetchw(rx_buf->page); 794 *skb = rx_buf->skb; 795 796 if (!size) 797 return rx_buf; 798 /* we are reusing so sync this buffer for CPU use */ 799 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 800 rx_buf->page_offset, size, 801 DMA_FROM_DEVICE); 802 803 /* We have pulled a buffer for use, so decrement pagecnt_bias */ 804 rx_buf->pagecnt_bias--; 805 806 return rx_buf; 807 } 808 809 /** 810 * ice_build_skb - Build skb around an existing buffer 811 * @rx_ring: Rx descriptor ring to transact packets on 812 * @rx_buf: Rx buffer to pull data from 813 * @xdp: xdp_buff pointing to the data 814 * 815 * This function builds an skb around an existing Rx buffer, taking care 816 * to set up the skb correctly and avoid any memcpy overhead. 817 */ 818 static struct sk_buff * 819 ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 820 struct xdp_buff *xdp) 821 { 822 unsigned int metasize = xdp->data - xdp->data_meta; 823 #if (PAGE_SIZE < 8192) 824 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 825 #else 826 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 827 SKB_DATA_ALIGN(xdp->data_end - 828 xdp->data_hard_start); 829 #endif 830 struct sk_buff *skb; 831 832 /* Prefetch first cache line of first page. If xdp->data_meta 833 * is unused, this points exactly as xdp->data, otherwise we 834 * likely have a consumer accessing first few bytes of meta 835 * data, and then actual data. 836 */ 837 prefetch(xdp->data_meta); 838 #if L1_CACHE_BYTES < 128 839 prefetch((void *)(xdp->data + L1_CACHE_BYTES)); 840 #endif 841 /* build an skb around the page buffer */ 842 skb = build_skb(xdp->data_hard_start, truesize); 843 if (unlikely(!skb)) 844 return NULL; 845 846 /* must to record Rx queue, otherwise OS features such as 847 * symmetric queue won't work 848 */ 849 skb_record_rx_queue(skb, rx_ring->q_index); 850 851 /* update pointers within the skb to store the data */ 852 skb_reserve(skb, xdp->data - xdp->data_hard_start); 853 __skb_put(skb, xdp->data_end - xdp->data); 854 if (metasize) 855 skb_metadata_set(skb, metasize); 856 857 /* buffer is used by skb, update page_offset */ 858 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 859 860 return skb; 861 } 862 863 /** 864 * ice_construct_skb - Allocate skb and populate it 865 * @rx_ring: Rx descriptor ring to transact packets on 866 * @rx_buf: Rx buffer to pull data from 867 * @xdp: xdp_buff pointing to the data 868 * 869 * This function allocates an skb. It then populates it with the page 870 * data from the current receive descriptor, taking care to set up the 871 * skb correctly. 872 */ 873 static struct sk_buff * 874 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 875 struct xdp_buff *xdp) 876 { 877 unsigned int size = xdp->data_end - xdp->data; 878 unsigned int headlen; 879 struct sk_buff *skb; 880 881 /* prefetch first cache line of first page */ 882 prefetch(xdp->data); 883 #if L1_CACHE_BYTES < 128 884 prefetch((void *)(xdp->data + L1_CACHE_BYTES)); 885 #endif /* L1_CACHE_BYTES */ 886 887 /* allocate a skb to store the frags */ 888 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, 889 GFP_ATOMIC | __GFP_NOWARN); 890 if (unlikely(!skb)) 891 return NULL; 892 893 skb_record_rx_queue(skb, rx_ring->q_index); 894 /* Determine available headroom for copy */ 895 headlen = size; 896 if (headlen > ICE_RX_HDR_SIZE) 897 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); 898 899 /* align pull length to size of long to optimize memcpy performance */ 900 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, 901 sizeof(long))); 902 903 /* if we exhaust the linear part then add what is left as a frag */ 904 size -= headlen; 905 if (size) { 906 #if (PAGE_SIZE >= 8192) 907 unsigned int truesize = SKB_DATA_ALIGN(size); 908 #else 909 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 910 #endif 911 skb_add_rx_frag(skb, 0, rx_buf->page, 912 rx_buf->page_offset + headlen, size, truesize); 913 /* buffer is used by skb, update page_offset */ 914 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 915 } else { 916 /* buffer is unused, reset bias back to rx_buf; data was copied 917 * onto skb's linear part so there's no need for adjusting 918 * page offset and we can reuse this buffer as-is 919 */ 920 rx_buf->pagecnt_bias++; 921 } 922 923 return skb; 924 } 925 926 /** 927 * ice_put_rx_buf - Clean up used buffer and either recycle or free 928 * @rx_ring: Rx descriptor ring to transact packets on 929 * @rx_buf: Rx buffer to pull data from 930 * 931 * This function will update next_to_clean and then clean up the contents 932 * of the rx_buf. It will either recycle the buffer or unmap it and free 933 * the associated resources. 934 */ 935 static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) 936 { 937 u32 ntc = rx_ring->next_to_clean + 1; 938 939 /* fetch, update, and store next to clean */ 940 ntc = (ntc < rx_ring->count) ? ntc : 0; 941 rx_ring->next_to_clean = ntc; 942 943 if (!rx_buf) 944 return; 945 946 if (ice_can_reuse_rx_page(rx_buf)) { 947 /* hand second half of page back to the ring */ 948 ice_reuse_rx_page(rx_ring, rx_buf); 949 rx_ring->rx_stats.page_reuse_count++; 950 } else { 951 /* we are not reusing the buffer so unmap it */ 952 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, 953 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 954 ICE_RX_DMA_ATTR); 955 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 956 } 957 958 /* clear contents of buffer_info */ 959 rx_buf->page = NULL; 960 rx_buf->skb = NULL; 961 } 962 963 /** 964 * ice_is_non_eop - process handling of non-EOP buffers 965 * @rx_ring: Rx ring being processed 966 * @rx_desc: Rx descriptor for current buffer 967 * @skb: Current socket buffer containing buffer in progress 968 * 969 * If the buffer is an EOP buffer, this function exits returning false, 970 * otherwise return true indicating that this is in fact a non-EOP buffer. 971 */ 972 static bool 973 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, 974 struct sk_buff *skb) 975 { 976 /* if we are the last buffer then there is nothing else to do */ 977 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) 978 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) 979 return false; 980 981 /* place skb in next buffer to be received */ 982 rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb; 983 rx_ring->rx_stats.non_eop_descs++; 984 985 return true; 986 } 987 988 /** 989 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 990 * @rx_ring: Rx descriptor ring to transact packets on 991 * @budget: Total limit on number of packets to process 992 * 993 * This function provides a "bounce buffer" approach to Rx interrupt 994 * processing. The advantage to this is that on systems that have 995 * expensive overhead for IOMMU access this provides a means of avoiding 996 * it by maintaining the mapping of the page to the system. 997 * 998 * Returns amount of work completed 999 */ 1000 static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) 1001 { 1002 unsigned int total_rx_bytes = 0, total_rx_pkts = 0; 1003 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 1004 unsigned int xdp_res, xdp_xmit = 0; 1005 struct bpf_prog *xdp_prog = NULL; 1006 struct xdp_buff xdp; 1007 bool failure; 1008 1009 xdp.rxq = &rx_ring->xdp_rxq; 1010 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ 1011 #if (PAGE_SIZE < 8192) 1012 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0); 1013 #endif 1014 1015 /* start the loop to process Rx packets bounded by 'budget' */ 1016 while (likely(total_rx_pkts < (unsigned int)budget)) { 1017 union ice_32b_rx_flex_desc *rx_desc; 1018 struct ice_rx_buf *rx_buf; 1019 struct sk_buff *skb; 1020 unsigned int size; 1021 u16 stat_err_bits; 1022 u16 vlan_tag = 0; 1023 u8 rx_ptype; 1024 1025 /* get the Rx desc from Rx ring based on 'next_to_clean' */ 1026 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); 1027 1028 /* status_error_len will always be zero for unused descriptors 1029 * because it's cleared in cleanup, and overlaps with hdr_addr 1030 * which is always zero because packet split isn't used, if the 1031 * hardware wrote DD then it will be non-zero 1032 */ 1033 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 1034 if (!ice_test_staterr(rx_desc, stat_err_bits)) 1035 break; 1036 1037 /* This memory barrier is needed to keep us from reading 1038 * any other fields out of the rx_desc until we know the 1039 * DD bit is set. 1040 */ 1041 dma_rmb(); 1042 1043 size = le16_to_cpu(rx_desc->wb.pkt_len) & 1044 ICE_RX_FLX_DESC_PKT_LEN_M; 1045 1046 /* retrieve a buffer from the ring */ 1047 rx_buf = ice_get_rx_buf(rx_ring, &skb, size); 1048 1049 if (!size) { 1050 xdp.data = NULL; 1051 xdp.data_end = NULL; 1052 xdp.data_hard_start = NULL; 1053 xdp.data_meta = NULL; 1054 goto construct_skb; 1055 } 1056 1057 xdp.data = page_address(rx_buf->page) + rx_buf->page_offset; 1058 xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring); 1059 xdp.data_meta = xdp.data; 1060 xdp.data_end = xdp.data + size; 1061 #if (PAGE_SIZE > 4096) 1062 /* At larger PAGE_SIZE, frame_sz depend on len size */ 1063 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size); 1064 #endif 1065 1066 rcu_read_lock(); 1067 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 1068 if (!xdp_prog) { 1069 rcu_read_unlock(); 1070 goto construct_skb; 1071 } 1072 1073 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog); 1074 rcu_read_unlock(); 1075 if (!xdp_res) 1076 goto construct_skb; 1077 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { 1078 xdp_xmit |= xdp_res; 1079 ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz); 1080 } else { 1081 rx_buf->pagecnt_bias++; 1082 } 1083 total_rx_bytes += size; 1084 total_rx_pkts++; 1085 1086 cleaned_count++; 1087 ice_put_rx_buf(rx_ring, rx_buf); 1088 continue; 1089 construct_skb: 1090 if (skb) { 1091 ice_add_rx_frag(rx_ring, rx_buf, skb, size); 1092 } else if (likely(xdp.data)) { 1093 if (ice_ring_uses_build_skb(rx_ring)) 1094 skb = ice_build_skb(rx_ring, rx_buf, &xdp); 1095 else 1096 skb = ice_construct_skb(rx_ring, rx_buf, &xdp); 1097 } 1098 /* exit if we failed to retrieve a buffer */ 1099 if (!skb) { 1100 rx_ring->rx_stats.alloc_buf_failed++; 1101 if (rx_buf) 1102 rx_buf->pagecnt_bias++; 1103 break; 1104 } 1105 1106 ice_put_rx_buf(rx_ring, rx_buf); 1107 cleaned_count++; 1108 1109 /* skip if it is NOP desc */ 1110 if (ice_is_non_eop(rx_ring, rx_desc, skb)) 1111 continue; 1112 1113 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 1114 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) { 1115 dev_kfree_skb_any(skb); 1116 continue; 1117 } 1118 1119 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 1120 if (ice_test_staterr(rx_desc, stat_err_bits)) 1121 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); 1122 1123 /* pad the skb if needed, to make a valid ethernet frame */ 1124 if (eth_skb_pad(skb)) { 1125 skb = NULL; 1126 continue; 1127 } 1128 1129 /* probably a little skewed due to removing CRC */ 1130 total_rx_bytes += skb->len; 1131 1132 /* populate checksum, VLAN, and protocol */ 1133 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 1134 ICE_RX_FLEX_DESC_PTYPE_M; 1135 1136 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1137 1138 /* send completed skb up the stack */ 1139 ice_receive_skb(rx_ring, skb, vlan_tag); 1140 1141 /* update budget accounting */ 1142 total_rx_pkts++; 1143 } 1144 1145 /* return up to cleaned_count buffers to hardware */ 1146 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); 1147 1148 if (xdp_prog) 1149 ice_finalize_xdp_rx(rx_ring, xdp_xmit); 1150 1151 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); 1152 1153 /* guarantee a trip back through this routine if there was a failure */ 1154 return failure ? budget : (int)total_rx_pkts; 1155 } 1156 1157 /** 1158 * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic 1159 * @port_info: port_info structure containing the current link speed 1160 * @avg_pkt_size: average size of Tx or Rx packets based on clean routine 1161 * @itr: ITR value to update 1162 * 1163 * Calculate how big of an increment should be applied to the ITR value passed 1164 * in based on wmem_default, SKB overhead, Ethernet overhead, and the current 1165 * link speed. 1166 * 1167 * The following is a calculation derived from: 1168 * wmem_default / (size + overhead) = desired_pkts_per_int 1169 * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate 1170 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 1171 * 1172 * Assuming wmem_default is 212992 and overhead is 640 bytes per 1173 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 1174 * formula down to: 1175 * 1176 * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24 1177 * ITR = -------------------------------------------- * -------------- 1178 * rate pkt_size + 640 1179 */ 1180 static unsigned int 1181 ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info, 1182 unsigned int avg_pkt_size, 1183 unsigned int itr) 1184 { 1185 switch (port_info->phy.link_info.link_speed) { 1186 case ICE_AQ_LINK_SPEED_100GB: 1187 itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24), 1188 avg_pkt_size + 640); 1189 break; 1190 case ICE_AQ_LINK_SPEED_50GB: 1191 itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24), 1192 avg_pkt_size + 640); 1193 break; 1194 case ICE_AQ_LINK_SPEED_40GB: 1195 itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24), 1196 avg_pkt_size + 640); 1197 break; 1198 case ICE_AQ_LINK_SPEED_25GB: 1199 itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24), 1200 avg_pkt_size + 640); 1201 break; 1202 case ICE_AQ_LINK_SPEED_20GB: 1203 itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24), 1204 avg_pkt_size + 640); 1205 break; 1206 case ICE_AQ_LINK_SPEED_10GB: 1207 default: 1208 itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24), 1209 avg_pkt_size + 640); 1210 break; 1211 } 1212 1213 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 1214 itr &= ICE_ITR_ADAPTIVE_LATENCY; 1215 itr += ICE_ITR_ADAPTIVE_MAX_USECS; 1216 } 1217 1218 return itr; 1219 } 1220 1221 /** 1222 * ice_update_itr - update the adaptive ITR value based on statistics 1223 * @q_vector: structure containing interrupt and ring information 1224 * @rc: structure containing ring performance data 1225 * 1226 * Stores a new ITR value based on packets and byte 1227 * counts during the last interrupt. The advantage of per interrupt 1228 * computation is faster updates and more accurate ITR for the current 1229 * traffic pattern. Constants in this function were computed 1230 * based on theoretical maximum wire speed and thresholds were set based 1231 * on testing data as well as attempting to minimize response time 1232 * while increasing bulk throughput. 1233 */ 1234 static void 1235 ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) 1236 { 1237 unsigned long next_update = jiffies; 1238 unsigned int packets, bytes, itr; 1239 bool container_is_rx; 1240 1241 if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting)) 1242 return; 1243 1244 /* If itr_countdown is set it means we programmed an ITR within 1245 * the last 4 interrupt cycles. This has a side effect of us 1246 * potentially firing an early interrupt. In order to work around 1247 * this we need to throw out any data received for a few 1248 * interrupts following the update. 1249 */ 1250 if (q_vector->itr_countdown) { 1251 itr = rc->target_itr; 1252 goto clear_counts; 1253 } 1254 1255 container_is_rx = (&q_vector->rx == rc); 1256 /* For Rx we want to push the delay up and default to low latency. 1257 * for Tx we want to pull the delay down and default to high latency. 1258 */ 1259 itr = container_is_rx ? 1260 ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY : 1261 ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY; 1262 1263 /* If we didn't update within up to 1 - 2 jiffies we can assume 1264 * that either packets are coming in so slow there hasn't been 1265 * any work, or that there is so much work that NAPI is dealing 1266 * with interrupt moderation and we don't need to do anything. 1267 */ 1268 if (time_after(next_update, rc->next_update)) 1269 goto clear_counts; 1270 1271 prefetch(q_vector->vsi->port_info); 1272 1273 packets = rc->total_pkts; 1274 bytes = rc->total_bytes; 1275 1276 if (container_is_rx) { 1277 /* If Rx there are 1 to 4 packets and bytes are less than 1278 * 9000 assume insufficient data to use bulk rate limiting 1279 * approach unless Tx is already in bulk rate limiting. We 1280 * are likely latency driven. 1281 */ 1282 if (packets && packets < 4 && bytes < 9000 && 1283 (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) { 1284 itr = ICE_ITR_ADAPTIVE_LATENCY; 1285 goto adjust_by_size_and_speed; 1286 } 1287 } else if (packets < 4) { 1288 /* If we have Tx and Rx ITR maxed and Tx ITR is running in 1289 * bulk mode and we are receiving 4 or fewer packets just 1290 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so 1291 * that the Rx can relax. 1292 */ 1293 if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS && 1294 (q_vector->rx.target_itr & ICE_ITR_MASK) == 1295 ICE_ITR_ADAPTIVE_MAX_USECS) 1296 goto clear_counts; 1297 } else if (packets > 32) { 1298 /* If we have processed over 32 packets in a single interrupt 1299 * for Tx assume we need to switch over to "bulk" mode. 1300 */ 1301 rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY; 1302 } 1303 1304 /* We have no packets to actually measure against. This means 1305 * either one of the other queues on this vector is active or 1306 * we are a Tx queue doing TSO with too high of an interrupt rate. 1307 * 1308 * Between 4 and 56 we can assume that our current interrupt delay 1309 * is only slightly too low. As such we should increase it by a small 1310 * fixed amount. 1311 */ 1312 if (packets < 56) { 1313 itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC; 1314 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 1315 itr &= ICE_ITR_ADAPTIVE_LATENCY; 1316 itr += ICE_ITR_ADAPTIVE_MAX_USECS; 1317 } 1318 goto clear_counts; 1319 } 1320 1321 if (packets <= 256) { 1322 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); 1323 itr &= ICE_ITR_MASK; 1324 1325 /* Between 56 and 112 is our "goldilocks" zone where we are 1326 * working out "just right". Just report that our current 1327 * ITR is good for us. 1328 */ 1329 if (packets <= 112) 1330 goto clear_counts; 1331 1332 /* If packet count is 128 or greater we are likely looking 1333 * at a slight overrun of the delay we want. Try halving 1334 * our delay to see if that will cut the number of packets 1335 * in half per interrupt. 1336 */ 1337 itr >>= 1; 1338 itr &= ICE_ITR_MASK; 1339 if (itr < ICE_ITR_ADAPTIVE_MIN_USECS) 1340 itr = ICE_ITR_ADAPTIVE_MIN_USECS; 1341 1342 goto clear_counts; 1343 } 1344 1345 /* The paths below assume we are dealing with a bulk ITR since 1346 * number of packets is greater than 256. We are just going to have 1347 * to compute a value and try to bring the count under control, 1348 * though for smaller packet sizes there isn't much we can do as 1349 * NAPI polling will likely be kicking in sooner rather than later. 1350 */ 1351 itr = ICE_ITR_ADAPTIVE_BULK; 1352 1353 adjust_by_size_and_speed: 1354 1355 /* based on checks above packets cannot be 0 so division is safe */ 1356 itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info, 1357 bytes / packets, itr); 1358 1359 clear_counts: 1360 /* write back value */ 1361 rc->target_itr = itr; 1362 1363 /* next update should occur within next jiffy */ 1364 rc->next_update = next_update + 1; 1365 1366 rc->total_bytes = 0; 1367 rc->total_pkts = 0; 1368 } 1369 1370 /** 1371 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 1372 * @itr_idx: interrupt throttling index 1373 * @itr: interrupt throttling value in usecs 1374 */ 1375 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 1376 { 1377 /* The ITR value is reported in microseconds, and the register value is 1378 * recorded in 2 microsecond units. For this reason we only need to 1379 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 1380 * granularity as a shift instead of division. The mask makes sure the 1381 * ITR value is never odd so we don't accidentally write into the field 1382 * prior to the ITR field. 1383 */ 1384 itr &= ICE_ITR_MASK; 1385 1386 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 1387 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 1388 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 1389 } 1390 1391 /* The act of updating the ITR will cause it to immediately trigger. In order 1392 * to prevent this from throwing off adaptive update statistics we defer the 1393 * update so that it can only happen so often. So after either Tx or Rx are 1394 * updated we make the adaptive scheme wait until either the ITR completely 1395 * expires via the next_update expiration or we have been through at least 1396 * 3 interrupts. 1397 */ 1398 #define ITR_COUNTDOWN_START 3 1399 1400 /** 1401 * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt 1402 * @q_vector: q_vector for which ITR is being updated and interrupt enabled 1403 */ 1404 static void ice_update_ena_itr(struct ice_q_vector *q_vector) 1405 { 1406 struct ice_ring_container *tx = &q_vector->tx; 1407 struct ice_ring_container *rx = &q_vector->rx; 1408 struct ice_vsi *vsi = q_vector->vsi; 1409 u32 itr_val; 1410 1411 /* when exiting WB_ON_ITR lets set a low ITR value and trigger 1412 * interrupts to expire right away in case we have more work ready to go 1413 * already 1414 */ 1415 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) { 1416 itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS); 1417 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); 1418 /* set target back to last user set value */ 1419 rx->target_itr = rx->itr_setting; 1420 /* set current to what we just wrote and dynamic if needed */ 1421 rx->current_itr = ICE_WB_ON_ITR_USECS | 1422 (rx->itr_setting & ICE_ITR_DYNAMIC); 1423 /* allow normal interrupt flow to start */ 1424 q_vector->itr_countdown = 0; 1425 return; 1426 } 1427 1428 /* This will do nothing if dynamic updates are not enabled */ 1429 ice_update_itr(q_vector, tx); 1430 ice_update_itr(q_vector, rx); 1431 1432 /* This block of logic allows us to get away with only updating 1433 * one ITR value with each interrupt. The idea is to perform a 1434 * pseudo-lazy update with the following criteria. 1435 * 1436 * 1. Rx is given higher priority than Tx if both are in same state 1437 * 2. If we must reduce an ITR that is given highest priority. 1438 * 3. We then give priority to increasing ITR based on amount. 1439 */ 1440 if (rx->target_itr < rx->current_itr) { 1441 /* Rx ITR needs to be reduced, this is highest priority */ 1442 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 1443 rx->current_itr = rx->target_itr; 1444 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1445 } else if ((tx->target_itr < tx->current_itr) || 1446 ((rx->target_itr - rx->current_itr) < 1447 (tx->target_itr - tx->current_itr))) { 1448 /* Tx ITR needs to be reduced, this is second priority 1449 * Tx ITR needs to be increased more than Rx, fourth priority 1450 */ 1451 itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr); 1452 tx->current_itr = tx->target_itr; 1453 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1454 } else if (rx->current_itr != rx->target_itr) { 1455 /* Rx ITR needs to be increased, third priority */ 1456 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 1457 rx->current_itr = rx->target_itr; 1458 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1459 } else { 1460 /* Still have to re-enable the interrupts */ 1461 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 1462 if (q_vector->itr_countdown) 1463 q_vector->itr_countdown--; 1464 } 1465 1466 if (!test_bit(__ICE_DOWN, q_vector->vsi->state)) 1467 wr32(&q_vector->vsi->back->hw, 1468 GLINT_DYN_CTL(q_vector->reg_idx), 1469 itr_val); 1470 } 1471 1472 /** 1473 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector 1474 * @q_vector: q_vector to set WB_ON_ITR on 1475 * 1476 * We need to tell hardware to write-back completed descriptors even when 1477 * interrupts are disabled. Descriptors will be written back on cache line 1478 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR 1479 * descriptors may not be written back if they don't fill a cache line until the 1480 * next interrupt. 1481 * 1482 * This sets the write-back frequency to 2 microseconds as that is the minimum 1483 * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to 1484 * make sure hardware knows we aren't meddling with the INTENA_M bit. 1485 */ 1486 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) 1487 { 1488 struct ice_vsi *vsi = q_vector->vsi; 1489 1490 /* already in WB_ON_ITR mode no need to change it */ 1491 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) 1492 return; 1493 1494 if (q_vector->num_ring_rx) 1495 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 1496 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS, 1497 ICE_RX_ITR)); 1498 1499 if (q_vector->num_ring_tx) 1500 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 1501 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS, 1502 ICE_TX_ITR)); 1503 1504 q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE; 1505 } 1506 1507 /** 1508 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 1509 * @napi: napi struct with our devices info in it 1510 * @budget: amount of work driver is allowed to do this pass, in packets 1511 * 1512 * This function will clean all queues associated with a q_vector. 1513 * 1514 * Returns the amount of work done 1515 */ 1516 int ice_napi_poll(struct napi_struct *napi, int budget) 1517 { 1518 struct ice_q_vector *q_vector = 1519 container_of(napi, struct ice_q_vector, napi); 1520 bool clean_complete = true; 1521 struct ice_ring *ring; 1522 int budget_per_ring; 1523 int work_done = 0; 1524 1525 /* Since the actual Tx work is minimal, we can give the Tx a larger 1526 * budget and be more aggressive about cleaning up the Tx descriptors. 1527 */ 1528 ice_for_each_ring(ring, q_vector->tx) { 1529 bool wd = ring->xsk_umem ? 1530 ice_clean_tx_irq_zc(ring, budget) : 1531 ice_clean_tx_irq(ring, budget); 1532 1533 if (!wd) 1534 clean_complete = false; 1535 } 1536 1537 /* Handle case where we are called by netpoll with a budget of 0 */ 1538 if (unlikely(budget <= 0)) 1539 return budget; 1540 1541 /* normally we have 1 Rx ring per q_vector */ 1542 if (unlikely(q_vector->num_ring_rx > 1)) 1543 /* We attempt to distribute budget to each Rx queue fairly, but 1544 * don't allow the budget to go below 1 because that would exit 1545 * polling early. 1546 */ 1547 budget_per_ring = max(budget / q_vector->num_ring_rx, 1); 1548 else 1549 /* Max of 1 Rx ring in this q_vector so give it the budget */ 1550 budget_per_ring = budget; 1551 1552 ice_for_each_ring(ring, q_vector->rx) { 1553 int cleaned; 1554 1555 /* A dedicated path for zero-copy allows making a single 1556 * comparison in the irq context instead of many inside the 1557 * ice_clean_rx_irq function and makes the codebase cleaner. 1558 */ 1559 cleaned = ring->xsk_umem ? 1560 ice_clean_rx_irq_zc(ring, budget_per_ring) : 1561 ice_clean_rx_irq(ring, budget_per_ring); 1562 work_done += cleaned; 1563 /* if we clean as many as budgeted, we must not be done */ 1564 if (cleaned >= budget_per_ring) 1565 clean_complete = false; 1566 } 1567 1568 /* If work not completed, return budget and polling will return */ 1569 if (!clean_complete) 1570 return budget; 1571 1572 /* Exit the polling mode, but don't re-enable interrupts if stack might 1573 * poll us due to busy-polling 1574 */ 1575 if (likely(napi_complete_done(napi, work_done))) 1576 ice_update_ena_itr(q_vector); 1577 else 1578 ice_set_wb_on_itr(q_vector); 1579 1580 return min_t(int, work_done, budget - 1); 1581 } 1582 1583 /** 1584 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 1585 * @tx_ring: the ring to be checked 1586 * @size: the size buffer we want to assure is available 1587 * 1588 * Returns -EBUSY if a stop is needed, else 0 1589 */ 1590 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 1591 { 1592 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); 1593 /* Memory barrier before checking head and tail */ 1594 smp_mb(); 1595 1596 /* Check again in a case another CPU has just made room available. */ 1597 if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 1598 return -EBUSY; 1599 1600 /* A reprieve! - use start_subqueue because it doesn't call schedule */ 1601 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); 1602 ++tx_ring->tx_stats.restart_q; 1603 return 0; 1604 } 1605 1606 /** 1607 * ice_maybe_stop_tx - 1st level check for Tx stop conditions 1608 * @tx_ring: the ring to be checked 1609 * @size: the size buffer we want to assure is available 1610 * 1611 * Returns 0 if stop is not needed 1612 */ 1613 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 1614 { 1615 if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 1616 return 0; 1617 1618 return __ice_maybe_stop_tx(tx_ring, size); 1619 } 1620 1621 /** 1622 * ice_tx_map - Build the Tx descriptor 1623 * @tx_ring: ring to send buffer on 1624 * @first: first buffer info buffer to use 1625 * @off: pointer to struct that holds offload parameters 1626 * 1627 * This function loops over the skb data pointed to by *first 1628 * and gets a physical address for each memory location and programs 1629 * it and the length into the transmit descriptor. 1630 */ 1631 static void 1632 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, 1633 struct ice_tx_offload_params *off) 1634 { 1635 u64 td_offset, td_tag, td_cmd; 1636 u16 i = tx_ring->next_to_use; 1637 unsigned int data_len, size; 1638 struct ice_tx_desc *tx_desc; 1639 struct ice_tx_buf *tx_buf; 1640 struct sk_buff *skb; 1641 skb_frag_t *frag; 1642 dma_addr_t dma; 1643 1644 td_tag = off->td_l2tag1; 1645 td_cmd = off->td_cmd; 1646 td_offset = off->td_offset; 1647 skb = first->skb; 1648 1649 data_len = skb->data_len; 1650 size = skb_headlen(skb); 1651 1652 tx_desc = ICE_TX_DESC(tx_ring, i); 1653 1654 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1655 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1656 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1657 ICE_TX_FLAGS_VLAN_S; 1658 } 1659 1660 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 1661 1662 tx_buf = first; 1663 1664 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1665 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1666 1667 if (dma_mapping_error(tx_ring->dev, dma)) 1668 goto dma_error; 1669 1670 /* record length, and DMA address */ 1671 dma_unmap_len_set(tx_buf, len, size); 1672 dma_unmap_addr_set(tx_buf, dma, dma); 1673 1674 /* align size to end of page */ 1675 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 1676 tx_desc->buf_addr = cpu_to_le64(dma); 1677 1678 /* account for data chunks larger than the hardware 1679 * can handle 1680 */ 1681 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 1682 tx_desc->cmd_type_offset_bsz = 1683 build_ctob(td_cmd, td_offset, max_data, td_tag); 1684 1685 tx_desc++; 1686 i++; 1687 1688 if (i == tx_ring->count) { 1689 tx_desc = ICE_TX_DESC(tx_ring, 0); 1690 i = 0; 1691 } 1692 1693 dma += max_data; 1694 size -= max_data; 1695 1696 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1697 tx_desc->buf_addr = cpu_to_le64(dma); 1698 } 1699 1700 if (likely(!data_len)) 1701 break; 1702 1703 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, 1704 size, td_tag); 1705 1706 tx_desc++; 1707 i++; 1708 1709 if (i == tx_ring->count) { 1710 tx_desc = ICE_TX_DESC(tx_ring, 0); 1711 i = 0; 1712 } 1713 1714 size = skb_frag_size(frag); 1715 data_len -= size; 1716 1717 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 1718 DMA_TO_DEVICE); 1719 1720 tx_buf = &tx_ring->tx_buf[i]; 1721 } 1722 1723 /* record bytecount for BQL */ 1724 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 1725 1726 /* record SW timestamp if HW timestamp is not available */ 1727 skb_tx_timestamp(first->skb); 1728 1729 i++; 1730 if (i == tx_ring->count) 1731 i = 0; 1732 1733 /* write last descriptor with RS and EOP bits */ 1734 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; 1735 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size, 1736 td_tag); 1737 1738 /* Force memory writes to complete before letting h/w know there 1739 * are new descriptors to fetch. 1740 * 1741 * We also use this memory barrier to make certain all of the 1742 * status bits have been updated before next_to_watch is written. 1743 */ 1744 wmb(); 1745 1746 /* set next_to_watch value indicating a packet is present */ 1747 first->next_to_watch = tx_desc; 1748 1749 tx_ring->next_to_use = i; 1750 1751 ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 1752 1753 /* notify HW of packet */ 1754 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) 1755 writel(i, tx_ring->tail); 1756 1757 return; 1758 1759 dma_error: 1760 /* clear DMA mappings for failed tx_buf map */ 1761 for (;;) { 1762 tx_buf = &tx_ring->tx_buf[i]; 1763 ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 1764 if (tx_buf == first) 1765 break; 1766 if (i == 0) 1767 i = tx_ring->count; 1768 i--; 1769 } 1770 1771 tx_ring->next_to_use = i; 1772 } 1773 1774 /** 1775 * ice_tx_csum - Enable Tx checksum offloads 1776 * @first: pointer to the first descriptor 1777 * @off: pointer to struct that holds offload parameters 1778 * 1779 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1780 */ 1781 static 1782 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1783 { 1784 u32 l4_len = 0, l3_len = 0, l2_len = 0; 1785 struct sk_buff *skb = first->skb; 1786 union { 1787 struct iphdr *v4; 1788 struct ipv6hdr *v6; 1789 unsigned char *hdr; 1790 } ip; 1791 union { 1792 struct tcphdr *tcp; 1793 unsigned char *hdr; 1794 } l4; 1795 __be16 frag_off, protocol; 1796 unsigned char *exthdr; 1797 u32 offset, cmd = 0; 1798 u8 l4_proto = 0; 1799 1800 if (skb->ip_summed != CHECKSUM_PARTIAL) 1801 return 0; 1802 1803 ip.hdr = skb_network_header(skb); 1804 l4.hdr = skb_transport_header(skb); 1805 1806 /* compute outer L2 header size */ 1807 l2_len = ip.hdr - skb->data; 1808 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1809 1810 if (skb->encapsulation) 1811 return -1; 1812 1813 /* Enable IP checksum offloads */ 1814 protocol = vlan_get_protocol(skb); 1815 if (protocol == htons(ETH_P_IP)) { 1816 l4_proto = ip.v4->protocol; 1817 /* the stack computes the IP header already, the only time we 1818 * need the hardware to recompute it is in the case of TSO. 1819 */ 1820 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1821 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1822 else 1823 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 1824 1825 } else if (protocol == htons(ETH_P_IPV6)) { 1826 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 1827 exthdr = ip.hdr + sizeof(*ip.v6); 1828 l4_proto = ip.v6->nexthdr; 1829 if (l4.hdr != exthdr) 1830 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 1831 &frag_off); 1832 } else { 1833 return -1; 1834 } 1835 1836 /* compute inner L3 header size */ 1837 l3_len = l4.hdr - ip.hdr; 1838 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 1839 1840 /* Enable L4 checksum offloads */ 1841 switch (l4_proto) { 1842 case IPPROTO_TCP: 1843 /* enable checksum offloads */ 1844 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 1845 l4_len = l4.tcp->doff; 1846 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1847 break; 1848 case IPPROTO_UDP: 1849 /* enable UDP checksum offload */ 1850 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 1851 l4_len = (sizeof(struct udphdr) >> 2); 1852 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1853 break; 1854 case IPPROTO_SCTP: 1855 /* enable SCTP checksum offload */ 1856 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 1857 l4_len = sizeof(struct sctphdr) >> 2; 1858 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1859 break; 1860 1861 default: 1862 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1863 return -1; 1864 skb_checksum_help(skb); 1865 return 0; 1866 } 1867 1868 off->td_cmd |= cmd; 1869 off->td_offset |= offset; 1870 return 1; 1871 } 1872 1873 /** 1874 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 1875 * @tx_ring: ring to send buffer on 1876 * @first: pointer to struct ice_tx_buf 1877 * 1878 * Checks the skb and set up correspondingly several generic transmit flags 1879 * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 1880 * 1881 * Returns error code indicate the frame should be dropped upon error and the 1882 * otherwise returns 0 to indicate the flags has been set properly. 1883 */ 1884 static int 1885 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first) 1886 { 1887 struct sk_buff *skb = first->skb; 1888 __be16 protocol = skb->protocol; 1889 1890 if (protocol == htons(ETH_P_8021Q) && 1891 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 1892 /* when HW VLAN acceleration is turned off by the user the 1893 * stack sets the protocol to 8021q so that the driver 1894 * can take any steps required to support the SW only 1895 * VLAN handling. In our case the driver doesn't need 1896 * to take any further steps so just set the protocol 1897 * to the encapsulated ethertype. 1898 */ 1899 skb->protocol = vlan_get_protocol(skb); 1900 return 0; 1901 } 1902 1903 /* if we have a HW VLAN tag being added, default to the HW one */ 1904 if (skb_vlan_tag_present(skb)) { 1905 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 1906 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 1907 } else if (protocol == htons(ETH_P_8021Q)) { 1908 struct vlan_hdr *vhdr, _vhdr; 1909 1910 /* for SW VLAN, check the next protocol and store the tag */ 1911 vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN, 1912 sizeof(_vhdr), 1913 &_vhdr); 1914 if (!vhdr) 1915 return -EINVAL; 1916 1917 first->tx_flags |= ntohs(vhdr->h_vlan_TCI) << 1918 ICE_TX_FLAGS_VLAN_S; 1919 first->tx_flags |= ICE_TX_FLAGS_SW_VLAN; 1920 } 1921 1922 return ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 1923 } 1924 1925 /** 1926 * ice_tso - computes mss and TSO length to prepare for TSO 1927 * @first: pointer to struct ice_tx_buf 1928 * @off: pointer to struct that holds offload parameters 1929 * 1930 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 1931 */ 1932 static 1933 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1934 { 1935 struct sk_buff *skb = first->skb; 1936 union { 1937 struct iphdr *v4; 1938 struct ipv6hdr *v6; 1939 unsigned char *hdr; 1940 } ip; 1941 union { 1942 struct tcphdr *tcp; 1943 struct udphdr *udp; 1944 unsigned char *hdr; 1945 } l4; 1946 u64 cd_mss, cd_tso_len; 1947 u32 paylen, l4_start; 1948 int err; 1949 1950 if (skb->ip_summed != CHECKSUM_PARTIAL) 1951 return 0; 1952 1953 if (!skb_is_gso(skb)) 1954 return 0; 1955 1956 err = skb_cow_head(skb, 0); 1957 if (err < 0) 1958 return err; 1959 1960 /* cppcheck-suppress unreadVariable */ 1961 ip.hdr = skb_network_header(skb); 1962 l4.hdr = skb_transport_header(skb); 1963 1964 /* initialize outer IP header fields */ 1965 if (ip.v4->version == 4) { 1966 ip.v4->tot_len = 0; 1967 ip.v4->check = 0; 1968 } else { 1969 ip.v6->payload_len = 0; 1970 } 1971 1972 /* determine offset of transport header */ 1973 l4_start = l4.hdr - skb->data; 1974 1975 /* remove payload length from checksum */ 1976 paylen = skb->len - l4_start; 1977 1978 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 1979 csum_replace_by_diff(&l4.udp->check, 1980 (__force __wsum)htonl(paylen)); 1981 /* compute length of UDP segmentation header */ 1982 off->header_len = sizeof(l4.udp) + l4_start; 1983 } else { 1984 csum_replace_by_diff(&l4.tcp->check, 1985 (__force __wsum)htonl(paylen)); 1986 /* compute length of TCP segmentation header */ 1987 off->header_len = (l4.tcp->doff * 4) + l4_start; 1988 } 1989 1990 /* update gso_segs and bytecount */ 1991 first->gso_segs = skb_shinfo(skb)->gso_segs; 1992 first->bytecount += (first->gso_segs - 1) * off->header_len; 1993 1994 cd_tso_len = skb->len - off->header_len; 1995 cd_mss = skb_shinfo(skb)->gso_size; 1996 1997 /* record cdesc_qw1 with TSO parameters */ 1998 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 1999 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 2000 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 2001 (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); 2002 first->tx_flags |= ICE_TX_FLAGS_TSO; 2003 return 1; 2004 } 2005 2006 /** 2007 * ice_txd_use_count - estimate the number of descriptors needed for Tx 2008 * @size: transmit request size in bytes 2009 * 2010 * Due to hardware alignment restrictions (4K alignment), we need to 2011 * assume that we can have no more than 12K of data per descriptor, even 2012 * though each descriptor can take up to 16K - 1 bytes of aligned memory. 2013 * Thus, we need to divide by 12K. But division is slow! Instead, 2014 * we decompose the operation into shifts and one relatively cheap 2015 * multiply operation. 2016 * 2017 * To divide by 12K, we first divide by 4K, then divide by 3: 2018 * To divide by 4K, shift right by 12 bits 2019 * To divide by 3, multiply by 85, then divide by 256 2020 * (Divide by 256 is done by shifting right by 8 bits) 2021 * Finally, we add one to round up. Because 256 isn't an exact multiple of 2022 * 3, we'll underestimate near each multiple of 12K. This is actually more 2023 * accurate as we have 4K - 1 of wiggle room that we can fit into the last 2024 * segment. For our purposes this is accurate out to 1M which is orders of 2025 * magnitude greater than our largest possible GSO size. 2026 * 2027 * This would then be implemented as: 2028 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 2029 * 2030 * Since multiplication and division are commutative, we can reorder 2031 * operations into: 2032 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 2033 */ 2034 static unsigned int ice_txd_use_count(unsigned int size) 2035 { 2036 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 2037 } 2038 2039 /** 2040 * ice_xmit_desc_count - calculate number of Tx descriptors needed 2041 * @skb: send buffer 2042 * 2043 * Returns number of data descriptors needed for this skb. 2044 */ 2045 static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 2046 { 2047 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 2048 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2049 unsigned int count = 0, size = skb_headlen(skb); 2050 2051 for (;;) { 2052 count += ice_txd_use_count(size); 2053 2054 if (!nr_frags--) 2055 break; 2056 2057 size = skb_frag_size(frag++); 2058 } 2059 2060 return count; 2061 } 2062 2063 /** 2064 * __ice_chk_linearize - Check if there are more than 8 buffers per packet 2065 * @skb: send buffer 2066 * 2067 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 2068 * and so we need to figure out the cases where we need to linearize the skb. 2069 * 2070 * For TSO we need to count the TSO header and segment payload separately. 2071 * As such we need to check cases where we have 7 fragments or more as we 2072 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 2073 * the segment payload in the first descriptor, and another 7 for the 2074 * fragments. 2075 */ 2076 static bool __ice_chk_linearize(struct sk_buff *skb) 2077 { 2078 const skb_frag_t *frag, *stale; 2079 int nr_frags, sum; 2080 2081 /* no need to check if number of frags is less than 7 */ 2082 nr_frags = skb_shinfo(skb)->nr_frags; 2083 if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 2084 return false; 2085 2086 /* We need to walk through the list and validate that each group 2087 * of 6 fragments totals at least gso_size. 2088 */ 2089 nr_frags -= ICE_MAX_BUF_TXD - 2; 2090 frag = &skb_shinfo(skb)->frags[0]; 2091 2092 /* Initialize size to the negative value of gso_size minus 1. We 2093 * use this as the worst case scenario in which the frag ahead 2094 * of us only provides one byte which is why we are limited to 6 2095 * descriptors for a single transmit as the header and previous 2096 * fragment are already consuming 2 descriptors. 2097 */ 2098 sum = 1 - skb_shinfo(skb)->gso_size; 2099 2100 /* Add size of frags 0 through 4 to create our initial sum */ 2101 sum += skb_frag_size(frag++); 2102 sum += skb_frag_size(frag++); 2103 sum += skb_frag_size(frag++); 2104 sum += skb_frag_size(frag++); 2105 sum += skb_frag_size(frag++); 2106 2107 /* Walk through fragments adding latest fragment, testing it, and 2108 * then removing stale fragments from the sum. 2109 */ 2110 stale = &skb_shinfo(skb)->frags[0]; 2111 for (;;) { 2112 sum += skb_frag_size(frag++); 2113 2114 /* if sum is negative we failed to make sufficient progress */ 2115 if (sum < 0) 2116 return true; 2117 2118 if (!nr_frags--) 2119 break; 2120 2121 sum -= skb_frag_size(stale++); 2122 } 2123 2124 return false; 2125 } 2126 2127 /** 2128 * ice_chk_linearize - Check if there are more than 8 fragments per packet 2129 * @skb: send buffer 2130 * @count: number of buffers used 2131 * 2132 * Note: Our HW can't scatter-gather more than 8 fragments to build 2133 * a packet on the wire and so we need to figure out the cases where we 2134 * need to linearize the skb. 2135 */ 2136 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 2137 { 2138 /* Both TSO and single send will work if count is less than 8 */ 2139 if (likely(count < ICE_MAX_BUF_TXD)) 2140 return false; 2141 2142 if (skb_is_gso(skb)) 2143 return __ice_chk_linearize(skb); 2144 2145 /* we can support up to 8 data buffers for a single send */ 2146 return count != ICE_MAX_BUF_TXD; 2147 } 2148 2149 /** 2150 * ice_xmit_frame_ring - Sends buffer on Tx ring 2151 * @skb: send buffer 2152 * @tx_ring: ring to send buffer on 2153 * 2154 * Returns NETDEV_TX_OK if sent, else an error code 2155 */ 2156 static netdev_tx_t 2157 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) 2158 { 2159 struct ice_tx_offload_params offload = { 0 }; 2160 struct ice_vsi *vsi = tx_ring->vsi; 2161 struct ice_tx_buf *first; 2162 unsigned int count; 2163 int tso, csum; 2164 2165 count = ice_xmit_desc_count(skb); 2166 if (ice_chk_linearize(skb, count)) { 2167 if (__skb_linearize(skb)) 2168 goto out_drop; 2169 count = ice_txd_use_count(skb->len); 2170 tx_ring->tx_stats.tx_linearize++; 2171 } 2172 2173 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 2174 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 2175 * + 4 desc gap to avoid the cache line where head is, 2176 * + 1 desc for context descriptor, 2177 * otherwise try next time 2178 */ 2179 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 2180 ICE_DESCS_FOR_CTX_DESC)) { 2181 tx_ring->tx_stats.tx_busy++; 2182 return NETDEV_TX_BUSY; 2183 } 2184 2185 offload.tx_ring = tx_ring; 2186 2187 /* record the location of the first descriptor for this packet */ 2188 first = &tx_ring->tx_buf[tx_ring->next_to_use]; 2189 first->skb = skb; 2190 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 2191 first->gso_segs = 1; 2192 first->tx_flags = 0; 2193 2194 /* prepare the VLAN tagging flags for Tx */ 2195 if (ice_tx_prepare_vlan_flags(tx_ring, first)) 2196 goto out_drop; 2197 2198 /* set up TSO offload */ 2199 tso = ice_tso(first, &offload); 2200 if (tso < 0) 2201 goto out_drop; 2202 2203 /* always set up Tx checksum offload */ 2204 csum = ice_tx_csum(first, &offload); 2205 if (csum < 0) 2206 goto out_drop; 2207 2208 /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ 2209 if (unlikely(skb->priority == TC_PRIO_CONTROL && 2210 vsi->type == ICE_VSI_PF && 2211 vsi->port_info->is_sw_lldp)) 2212 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2213 ICE_TX_CTX_DESC_SWTCH_UPLINK << 2214 ICE_TXD_CTX_QW1_CMD_S); 2215 2216 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { 2217 struct ice_tx_ctx_desc *cdesc; 2218 int i = tx_ring->next_to_use; 2219 2220 /* grab the next descriptor */ 2221 cdesc = ICE_TX_CTX_DESC(tx_ring, i); 2222 i++; 2223 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2224 2225 /* setup context descriptor */ 2226 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 2227 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 2228 cdesc->rsvd = cpu_to_le16(0); 2229 cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 2230 } 2231 2232 ice_tx_map(tx_ring, first, &offload); 2233 return NETDEV_TX_OK; 2234 2235 out_drop: 2236 dev_kfree_skb_any(skb); 2237 return NETDEV_TX_OK; 2238 } 2239 2240 /** 2241 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 2242 * @skb: send buffer 2243 * @netdev: network interface device structure 2244 * 2245 * Returns NETDEV_TX_OK if sent, else an error code 2246 */ 2247 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 2248 { 2249 struct ice_netdev_priv *np = netdev_priv(netdev); 2250 struct ice_vsi *vsi = np->vsi; 2251 struct ice_ring *tx_ring; 2252 2253 tx_ring = vsi->tx_rings[skb->queue_mapping]; 2254 2255 /* hardware can't handle really short frames, hardware padding works 2256 * beyond this point 2257 */ 2258 if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 2259 return NETDEV_TX_OK; 2260 2261 return ice_xmit_frame_ring(skb, tx_ring); 2262 } 2263