1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* The driver transmit and receive code */ 5 6 #include <linux/prefetch.h> 7 #include <linux/mm.h> 8 #include <linux/bpf_trace.h> 9 #include <net/xdp.h> 10 #include "ice_txrx_lib.h" 11 #include "ice_lib.h" 12 #include "ice.h" 13 #include "ice_dcb_lib.h" 14 #include "ice_xsk.h" 15 16 #define ICE_RX_HDR_SIZE 256 17 18 /** 19 * ice_unmap_and_free_tx_buf - Release a Tx buffer 20 * @ring: the ring that owns the buffer 21 * @tx_buf: the buffer to free 22 */ 23 static void 24 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) 25 { 26 if (tx_buf->skb) { 27 if (ice_ring_is_xdp(ring)) 28 page_frag_free(tx_buf->raw_buf); 29 else 30 dev_kfree_skb_any(tx_buf->skb); 31 if (dma_unmap_len(tx_buf, len)) 32 dma_unmap_single(ring->dev, 33 dma_unmap_addr(tx_buf, dma), 34 dma_unmap_len(tx_buf, len), 35 DMA_TO_DEVICE); 36 } else if (dma_unmap_len(tx_buf, len)) { 37 dma_unmap_page(ring->dev, 38 dma_unmap_addr(tx_buf, dma), 39 dma_unmap_len(tx_buf, len), 40 DMA_TO_DEVICE); 41 } 42 43 tx_buf->next_to_watch = NULL; 44 tx_buf->skb = NULL; 45 dma_unmap_len_set(tx_buf, len, 0); 46 /* tx_buf must be completely set up in the transmit path */ 47 } 48 49 static struct netdev_queue *txring_txq(const struct ice_ring *ring) 50 { 51 return netdev_get_tx_queue(ring->netdev, ring->q_index); 52 } 53 54 /** 55 * ice_clean_tx_ring - Free any empty Tx buffers 56 * @tx_ring: ring to be cleaned 57 */ 58 void ice_clean_tx_ring(struct ice_ring *tx_ring) 59 { 60 u16 i; 61 62 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) { 63 ice_xsk_clean_xdp_ring(tx_ring); 64 goto tx_skip_free; 65 } 66 67 /* ring already cleared, nothing to do */ 68 if (!tx_ring->tx_buf) 69 return; 70 71 /* Free all the Tx ring sk_buffs */ 72 for (i = 0; i < tx_ring->count; i++) 73 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 74 75 tx_skip_free: 76 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 77 78 /* Zero out the descriptor ring */ 79 memset(tx_ring->desc, 0, tx_ring->size); 80 81 tx_ring->next_to_use = 0; 82 tx_ring->next_to_clean = 0; 83 84 if (!tx_ring->netdev) 85 return; 86 87 /* cleanup Tx queue statistics */ 88 netdev_tx_reset_queue(txring_txq(tx_ring)); 89 } 90 91 /** 92 * ice_free_tx_ring - Free Tx resources per queue 93 * @tx_ring: Tx descriptor ring for a specific queue 94 * 95 * Free all transmit software resources 96 */ 97 void ice_free_tx_ring(struct ice_ring *tx_ring) 98 { 99 ice_clean_tx_ring(tx_ring); 100 devm_kfree(tx_ring->dev, tx_ring->tx_buf); 101 tx_ring->tx_buf = NULL; 102 103 if (tx_ring->desc) { 104 dmam_free_coherent(tx_ring->dev, tx_ring->size, 105 tx_ring->desc, tx_ring->dma); 106 tx_ring->desc = NULL; 107 } 108 } 109 110 /** 111 * ice_clean_tx_irq - Reclaim resources after transmit completes 112 * @tx_ring: Tx ring to clean 113 * @napi_budget: Used to determine if we are in netpoll 114 * 115 * Returns true if there's any budget left (e.g. the clean is finished) 116 */ 117 static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) 118 { 119 unsigned int total_bytes = 0, total_pkts = 0; 120 unsigned int budget = ICE_DFLT_IRQ_WORK; 121 struct ice_vsi *vsi = tx_ring->vsi; 122 s16 i = tx_ring->next_to_clean; 123 struct ice_tx_desc *tx_desc; 124 struct ice_tx_buf *tx_buf; 125 126 tx_buf = &tx_ring->tx_buf[i]; 127 tx_desc = ICE_TX_DESC(tx_ring, i); 128 i -= tx_ring->count; 129 130 prefetch(&vsi->state); 131 132 do { 133 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 134 135 /* if next_to_watch is not set then there is no work pending */ 136 if (!eop_desc) 137 break; 138 139 smp_rmb(); /* prevent any other reads prior to eop_desc */ 140 141 /* if the descriptor isn't done, no work yet to do */ 142 if (!(eop_desc->cmd_type_offset_bsz & 143 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 144 break; 145 146 /* clear next_to_watch to prevent false hangs */ 147 tx_buf->next_to_watch = NULL; 148 149 /* update the statistics for this packet */ 150 total_bytes += tx_buf->bytecount; 151 total_pkts += tx_buf->gso_segs; 152 153 if (ice_ring_is_xdp(tx_ring)) 154 page_frag_free(tx_buf->raw_buf); 155 else 156 /* free the skb */ 157 napi_consume_skb(tx_buf->skb, napi_budget); 158 159 /* unmap skb header data */ 160 dma_unmap_single(tx_ring->dev, 161 dma_unmap_addr(tx_buf, dma), 162 dma_unmap_len(tx_buf, len), 163 DMA_TO_DEVICE); 164 165 /* clear tx_buf data */ 166 tx_buf->skb = NULL; 167 dma_unmap_len_set(tx_buf, len, 0); 168 169 /* unmap remaining buffers */ 170 while (tx_desc != eop_desc) { 171 tx_buf++; 172 tx_desc++; 173 i++; 174 if (unlikely(!i)) { 175 i -= tx_ring->count; 176 tx_buf = tx_ring->tx_buf; 177 tx_desc = ICE_TX_DESC(tx_ring, 0); 178 } 179 180 /* unmap any remaining paged data */ 181 if (dma_unmap_len(tx_buf, len)) { 182 dma_unmap_page(tx_ring->dev, 183 dma_unmap_addr(tx_buf, dma), 184 dma_unmap_len(tx_buf, len), 185 DMA_TO_DEVICE); 186 dma_unmap_len_set(tx_buf, len, 0); 187 } 188 } 189 190 /* move us one more past the eop_desc for start of next pkt */ 191 tx_buf++; 192 tx_desc++; 193 i++; 194 if (unlikely(!i)) { 195 i -= tx_ring->count; 196 tx_buf = tx_ring->tx_buf; 197 tx_desc = ICE_TX_DESC(tx_ring, 0); 198 } 199 200 prefetch(tx_desc); 201 202 /* update budget accounting */ 203 budget--; 204 } while (likely(budget)); 205 206 i += tx_ring->count; 207 tx_ring->next_to_clean = i; 208 209 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); 210 211 if (ice_ring_is_xdp(tx_ring)) 212 return !!budget; 213 214 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, 215 total_bytes); 216 217 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 218 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 219 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 220 /* Make sure that anybody stopping the queue after this 221 * sees the new next_to_clean. 222 */ 223 smp_mb(); 224 if (__netif_subqueue_stopped(tx_ring->netdev, 225 tx_ring->q_index) && 226 !test_bit(__ICE_DOWN, vsi->state)) { 227 netif_wake_subqueue(tx_ring->netdev, 228 tx_ring->q_index); 229 ++tx_ring->tx_stats.restart_q; 230 } 231 } 232 233 return !!budget; 234 } 235 236 /** 237 * ice_setup_tx_ring - Allocate the Tx descriptors 238 * @tx_ring: the Tx ring to set up 239 * 240 * Return 0 on success, negative on error 241 */ 242 int ice_setup_tx_ring(struct ice_ring *tx_ring) 243 { 244 struct device *dev = tx_ring->dev; 245 246 if (!dev) 247 return -ENOMEM; 248 249 /* warn if we are about to overwrite the pointer */ 250 WARN_ON(tx_ring->tx_buf); 251 tx_ring->tx_buf = 252 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count, 253 GFP_KERNEL); 254 if (!tx_ring->tx_buf) 255 return -ENOMEM; 256 257 /* round up to nearest page */ 258 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 259 PAGE_SIZE); 260 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, 261 GFP_KERNEL); 262 if (!tx_ring->desc) { 263 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 264 tx_ring->size); 265 goto err; 266 } 267 268 tx_ring->next_to_use = 0; 269 tx_ring->next_to_clean = 0; 270 tx_ring->tx_stats.prev_pkt = -1; 271 return 0; 272 273 err: 274 devm_kfree(dev, tx_ring->tx_buf); 275 tx_ring->tx_buf = NULL; 276 return -ENOMEM; 277 } 278 279 /** 280 * ice_clean_rx_ring - Free Rx buffers 281 * @rx_ring: ring to be cleaned 282 */ 283 void ice_clean_rx_ring(struct ice_ring *rx_ring) 284 { 285 struct device *dev = rx_ring->dev; 286 u16 i; 287 288 /* ring already cleared, nothing to do */ 289 if (!rx_ring->rx_buf) 290 return; 291 292 if (rx_ring->xsk_umem) { 293 ice_xsk_clean_rx_ring(rx_ring); 294 goto rx_skip_free; 295 } 296 297 /* Free all the Rx ring sk_buffs */ 298 for (i = 0; i < rx_ring->count; i++) { 299 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 300 301 if (rx_buf->skb) { 302 dev_kfree_skb(rx_buf->skb); 303 rx_buf->skb = NULL; 304 } 305 if (!rx_buf->page) 306 continue; 307 308 /* Invalidate cache lines that may have been written to by 309 * device so that we avoid corrupting memory. 310 */ 311 dma_sync_single_range_for_cpu(dev, rx_buf->dma, 312 rx_buf->page_offset, 313 rx_ring->rx_buf_len, 314 DMA_FROM_DEVICE); 315 316 /* free resources associated with mapping */ 317 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), 318 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 319 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 320 321 rx_buf->page = NULL; 322 rx_buf->page_offset = 0; 323 } 324 325 rx_skip_free: 326 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); 327 328 /* Zero out the descriptor ring */ 329 memset(rx_ring->desc, 0, rx_ring->size); 330 331 rx_ring->next_to_alloc = 0; 332 rx_ring->next_to_clean = 0; 333 rx_ring->next_to_use = 0; 334 } 335 336 /** 337 * ice_free_rx_ring - Free Rx resources 338 * @rx_ring: ring to clean the resources from 339 * 340 * Free all receive software resources 341 */ 342 void ice_free_rx_ring(struct ice_ring *rx_ring) 343 { 344 ice_clean_rx_ring(rx_ring); 345 if (rx_ring->vsi->type == ICE_VSI_PF) 346 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 347 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 348 rx_ring->xdp_prog = NULL; 349 devm_kfree(rx_ring->dev, rx_ring->rx_buf); 350 rx_ring->rx_buf = NULL; 351 352 if (rx_ring->desc) { 353 dmam_free_coherent(rx_ring->dev, rx_ring->size, 354 rx_ring->desc, rx_ring->dma); 355 rx_ring->desc = NULL; 356 } 357 } 358 359 /** 360 * ice_setup_rx_ring - Allocate the Rx descriptors 361 * @rx_ring: the Rx ring to set up 362 * 363 * Return 0 on success, negative on error 364 */ 365 int ice_setup_rx_ring(struct ice_ring *rx_ring) 366 { 367 struct device *dev = rx_ring->dev; 368 369 if (!dev) 370 return -ENOMEM; 371 372 /* warn if we are about to overwrite the pointer */ 373 WARN_ON(rx_ring->rx_buf); 374 rx_ring->rx_buf = 375 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, 376 GFP_KERNEL); 377 if (!rx_ring->rx_buf) 378 return -ENOMEM; 379 380 /* round up to nearest page */ 381 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 382 PAGE_SIZE); 383 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, 384 GFP_KERNEL); 385 if (!rx_ring->desc) { 386 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 387 rx_ring->size); 388 goto err; 389 } 390 391 rx_ring->next_to_use = 0; 392 rx_ring->next_to_clean = 0; 393 394 if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 395 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); 396 397 if (rx_ring->vsi->type == ICE_VSI_PF && 398 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 399 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 400 rx_ring->q_index)) 401 goto err; 402 return 0; 403 404 err: 405 devm_kfree(dev, rx_ring->rx_buf); 406 rx_ring->rx_buf = NULL; 407 return -ENOMEM; 408 } 409 410 /** 411 * ice_rx_offset - Return expected offset into page to access data 412 * @rx_ring: Ring we are requesting offset of 413 * 414 * Returns the offset value for ring into the data buffer. 415 */ 416 static unsigned int ice_rx_offset(struct ice_ring *rx_ring) 417 { 418 if (ice_ring_uses_build_skb(rx_ring)) 419 return ICE_SKB_PAD; 420 else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 421 return XDP_PACKET_HEADROOM; 422 423 return 0; 424 } 425 426 /** 427 * ice_run_xdp - Executes an XDP program on initialized xdp_buff 428 * @rx_ring: Rx ring 429 * @xdp: xdp_buff used as input to the XDP program 430 * @xdp_prog: XDP program to run 431 * 432 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 433 */ 434 static int 435 ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, 436 struct bpf_prog *xdp_prog) 437 { 438 int err, result = ICE_XDP_PASS; 439 struct ice_ring *xdp_ring; 440 u32 act; 441 442 act = bpf_prog_run_xdp(xdp_prog, xdp); 443 switch (act) { 444 case XDP_PASS: 445 break; 446 case XDP_TX: 447 xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()]; 448 result = ice_xmit_xdp_buff(xdp, xdp_ring); 449 break; 450 case XDP_REDIRECT: 451 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 452 result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED; 453 break; 454 default: 455 bpf_warn_invalid_xdp_action(act); 456 /* fallthrough -- not supported action */ 457 case XDP_ABORTED: 458 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 459 /* fallthrough -- handle aborts by dropping frame */ 460 case XDP_DROP: 461 result = ICE_XDP_CONSUMED; 462 break; 463 } 464 465 return result; 466 } 467 468 /** 469 * ice_xdp_xmit - submit packets to XDP ring for transmission 470 * @dev: netdev 471 * @n: number of XDP frames to be transmitted 472 * @frames: XDP frames to be transmitted 473 * @flags: transmit flags 474 * 475 * Returns number of frames successfully sent. Frames that fail are 476 * free'ed via XDP return API. 477 * For error cases, a negative errno code is returned and no-frames 478 * are transmitted (caller must handle freeing frames). 479 */ 480 int 481 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 482 u32 flags) 483 { 484 struct ice_netdev_priv *np = netdev_priv(dev); 485 unsigned int queue_index = smp_processor_id(); 486 struct ice_vsi *vsi = np->vsi; 487 struct ice_ring *xdp_ring; 488 int drops = 0, i; 489 490 if (test_bit(__ICE_DOWN, vsi->state)) 491 return -ENETDOWN; 492 493 if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq) 494 return -ENXIO; 495 496 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 497 return -EINVAL; 498 499 xdp_ring = vsi->xdp_rings[queue_index]; 500 for (i = 0; i < n; i++) { 501 struct xdp_frame *xdpf = frames[i]; 502 int err; 503 504 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); 505 if (err != ICE_XDP_TX) { 506 xdp_return_frame_rx_napi(xdpf); 507 drops++; 508 } 509 } 510 511 if (unlikely(flags & XDP_XMIT_FLUSH)) 512 ice_xdp_ring_update_tail(xdp_ring); 513 514 return n - drops; 515 } 516 517 /** 518 * ice_alloc_mapped_page - recycle or make a new page 519 * @rx_ring: ring to use 520 * @bi: rx_buf struct to modify 521 * 522 * Returns true if the page was successfully allocated or 523 * reused. 524 */ 525 static bool 526 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) 527 { 528 struct page *page = bi->page; 529 dma_addr_t dma; 530 531 /* since we are recycling buffers we should seldom need to alloc */ 532 if (likely(page)) { 533 rx_ring->rx_stats.page_reuse_count++; 534 return true; 535 } 536 537 /* alloc new page for storage */ 538 page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); 539 if (unlikely(!page)) { 540 rx_ring->rx_stats.alloc_page_failed++; 541 return false; 542 } 543 544 /* map page for use */ 545 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), 546 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 547 548 /* if mapping failed free memory back to system since 549 * there isn't much point in holding memory we can't use 550 */ 551 if (dma_mapping_error(rx_ring->dev, dma)) { 552 __free_pages(page, ice_rx_pg_order(rx_ring)); 553 rx_ring->rx_stats.alloc_page_failed++; 554 return false; 555 } 556 557 bi->dma = dma; 558 bi->page = page; 559 bi->page_offset = ice_rx_offset(rx_ring); 560 page_ref_add(page, USHRT_MAX - 1); 561 bi->pagecnt_bias = USHRT_MAX; 562 563 return true; 564 } 565 566 /** 567 * ice_alloc_rx_bufs - Replace used receive buffers 568 * @rx_ring: ring to place buffers on 569 * @cleaned_count: number of buffers to replace 570 * 571 * Returns false if all allocations were successful, true if any fail. Returning 572 * true signals to the caller that we didn't replace cleaned_count buffers and 573 * there is more work to do. 574 * 575 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx 576 * buffers. Then bump tail at most one time. Grouping like this lets us avoid 577 * multiple tail writes per call. 578 */ 579 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) 580 { 581 union ice_32b_rx_flex_desc *rx_desc; 582 u16 ntu = rx_ring->next_to_use; 583 struct ice_rx_buf *bi; 584 585 /* do nothing if no valid netdev defined */ 586 if (!rx_ring->netdev || !cleaned_count) 587 return false; 588 589 /* get the Rx descriptor and buffer based on next_to_use */ 590 rx_desc = ICE_RX_DESC(rx_ring, ntu); 591 bi = &rx_ring->rx_buf[ntu]; 592 593 do { 594 /* if we fail here, we have work remaining */ 595 if (!ice_alloc_mapped_page(rx_ring, bi)) 596 break; 597 598 /* sync the buffer for use by the device */ 599 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 600 bi->page_offset, 601 rx_ring->rx_buf_len, 602 DMA_FROM_DEVICE); 603 604 /* Refresh the desc even if buffer_addrs didn't change 605 * because each write-back erases this info. 606 */ 607 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 608 609 rx_desc++; 610 bi++; 611 ntu++; 612 if (unlikely(ntu == rx_ring->count)) { 613 rx_desc = ICE_RX_DESC(rx_ring, 0); 614 bi = rx_ring->rx_buf; 615 ntu = 0; 616 } 617 618 /* clear the status bits for the next_to_use descriptor */ 619 rx_desc->wb.status_error0 = 0; 620 621 cleaned_count--; 622 } while (cleaned_count); 623 624 if (rx_ring->next_to_use != ntu) 625 ice_release_rx_desc(rx_ring, ntu); 626 627 return !!cleaned_count; 628 } 629 630 /** 631 * ice_page_is_reserved - check if reuse is possible 632 * @page: page struct to check 633 */ 634 static bool ice_page_is_reserved(struct page *page) 635 { 636 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 637 } 638 639 /** 640 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 641 * @rx_buf: Rx buffer to adjust 642 * @size: Size of adjustment 643 * 644 * Update the offset within page so that Rx buf will be ready to be reused. 645 * For systems with PAGE_SIZE < 8192 this function will flip the page offset 646 * so the second half of page assigned to Rx buffer will be used, otherwise 647 * the offset is moved by the @size bytes 648 */ 649 static void 650 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 651 { 652 #if (PAGE_SIZE < 8192) 653 /* flip page offset to other buffer */ 654 rx_buf->page_offset ^= size; 655 #else 656 /* move offset up to the next cache line */ 657 rx_buf->page_offset += size; 658 #endif 659 } 660 661 /** 662 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 663 * @rx_buf: buffer containing the page 664 * 665 * If page is reusable, we have a green light for calling ice_reuse_rx_page, 666 * which will assign the current buffer to the buffer that next_to_alloc is 667 * pointing to; otherwise, the DMA mapping needs to be destroyed and 668 * page freed 669 */ 670 static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) 671 { 672 unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 673 struct page *page = rx_buf->page; 674 675 /* avoid re-using remote pages */ 676 if (unlikely(ice_page_is_reserved(page))) 677 return false; 678 679 #if (PAGE_SIZE < 8192) 680 /* if we are only owner of page we can reuse it */ 681 if (unlikely((page_count(page) - pagecnt_bias) > 1)) 682 return false; 683 #else 684 #define ICE_LAST_OFFSET \ 685 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) 686 if (rx_buf->page_offset > ICE_LAST_OFFSET) 687 return false; 688 #endif /* PAGE_SIZE < 8192) */ 689 690 /* If we have drained the page fragment pool we need to update 691 * the pagecnt_bias and page count so that we fully restock the 692 * number of references the driver holds. 693 */ 694 if (unlikely(pagecnt_bias == 1)) { 695 page_ref_add(page, USHRT_MAX - 1); 696 rx_buf->pagecnt_bias = USHRT_MAX; 697 } 698 699 return true; 700 } 701 702 /** 703 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag 704 * @rx_ring: Rx descriptor ring to transact packets on 705 * @rx_buf: buffer containing page to add 706 * @skb: sk_buff to place the data into 707 * @size: packet length from rx_desc 708 * 709 * This function will add the data contained in rx_buf->page to the skb. 710 * It will just attach the page as a frag to the skb. 711 * The function will then update the page offset. 712 */ 713 static void 714 ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 715 struct sk_buff *skb, unsigned int size) 716 { 717 #if (PAGE_SIZE >= 8192) 718 unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring)); 719 #else 720 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 721 #endif 722 723 if (!size) 724 return; 725 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 726 rx_buf->page_offset, size, truesize); 727 728 /* page is being used so we must update the page offset */ 729 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 730 } 731 732 /** 733 * ice_reuse_rx_page - page flip buffer and store it back on the ring 734 * @rx_ring: Rx descriptor ring to store buffers on 735 * @old_buf: donor buffer to have page reused 736 * 737 * Synchronizes page for reuse by the adapter 738 */ 739 static void 740 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) 741 { 742 u16 nta = rx_ring->next_to_alloc; 743 struct ice_rx_buf *new_buf; 744 745 new_buf = &rx_ring->rx_buf[nta]; 746 747 /* update, and store next to alloc */ 748 nta++; 749 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 750 751 /* Transfer page from old buffer to new buffer. 752 * Move each member individually to avoid possible store 753 * forwarding stalls and unnecessary copy of skb. 754 */ 755 new_buf->dma = old_buf->dma; 756 new_buf->page = old_buf->page; 757 new_buf->page_offset = old_buf->page_offset; 758 new_buf->pagecnt_bias = old_buf->pagecnt_bias; 759 } 760 761 /** 762 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 763 * @rx_ring: Rx descriptor ring to transact packets on 764 * @skb: skb to be used 765 * @size: size of buffer to add to skb 766 * 767 * This function will pull an Rx buffer from the ring and synchronize it 768 * for use by the CPU. 769 */ 770 static struct ice_rx_buf * 771 ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, 772 const unsigned int size) 773 { 774 struct ice_rx_buf *rx_buf; 775 776 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 777 prefetchw(rx_buf->page); 778 *skb = rx_buf->skb; 779 780 if (!size) 781 return rx_buf; 782 /* we are reusing so sync this buffer for CPU use */ 783 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 784 rx_buf->page_offset, size, 785 DMA_FROM_DEVICE); 786 787 /* We have pulled a buffer for use, so decrement pagecnt_bias */ 788 rx_buf->pagecnt_bias--; 789 790 return rx_buf; 791 } 792 793 /** 794 * ice_build_skb - Build skb around an existing buffer 795 * @rx_ring: Rx descriptor ring to transact packets on 796 * @rx_buf: Rx buffer to pull data from 797 * @xdp: xdp_buff pointing to the data 798 * 799 * This function builds an skb around an existing Rx buffer, taking care 800 * to set up the skb correctly and avoid any memcpy overhead. 801 */ 802 static struct sk_buff * 803 ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 804 struct xdp_buff *xdp) 805 { 806 unsigned int metasize = xdp->data - xdp->data_meta; 807 #if (PAGE_SIZE < 8192) 808 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 809 #else 810 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 811 SKB_DATA_ALIGN(xdp->data_end - 812 xdp->data_hard_start); 813 #endif 814 struct sk_buff *skb; 815 816 /* Prefetch first cache line of first page. If xdp->data_meta 817 * is unused, this points exactly as xdp->data, otherwise we 818 * likely have a consumer accessing first few bytes of meta 819 * data, and then actual data. 820 */ 821 prefetch(xdp->data_meta); 822 #if L1_CACHE_BYTES < 128 823 prefetch((void *)(xdp->data + L1_CACHE_BYTES)); 824 #endif 825 /* build an skb around the page buffer */ 826 skb = build_skb(xdp->data_hard_start, truesize); 827 if (unlikely(!skb)) 828 return NULL; 829 830 /* must to record Rx queue, otherwise OS features such as 831 * symmetric queue won't work 832 */ 833 skb_record_rx_queue(skb, rx_ring->q_index); 834 835 /* update pointers within the skb to store the data */ 836 skb_reserve(skb, xdp->data - xdp->data_hard_start); 837 __skb_put(skb, xdp->data_end - xdp->data); 838 if (metasize) 839 skb_metadata_set(skb, metasize); 840 841 /* buffer is used by skb, update page_offset */ 842 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 843 844 return skb; 845 } 846 847 /** 848 * ice_construct_skb - Allocate skb and populate it 849 * @rx_ring: Rx descriptor ring to transact packets on 850 * @rx_buf: Rx buffer to pull data from 851 * @xdp: xdp_buff pointing to the data 852 * 853 * This function allocates an skb. It then populates it with the page 854 * data from the current receive descriptor, taking care to set up the 855 * skb correctly. 856 */ 857 static struct sk_buff * 858 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 859 struct xdp_buff *xdp) 860 { 861 unsigned int size = xdp->data_end - xdp->data; 862 unsigned int headlen; 863 struct sk_buff *skb; 864 865 /* prefetch first cache line of first page */ 866 prefetch(xdp->data); 867 #if L1_CACHE_BYTES < 128 868 prefetch((void *)(xdp->data + L1_CACHE_BYTES)); 869 #endif /* L1_CACHE_BYTES */ 870 871 /* allocate a skb to store the frags */ 872 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, 873 GFP_ATOMIC | __GFP_NOWARN); 874 if (unlikely(!skb)) 875 return NULL; 876 877 skb_record_rx_queue(skb, rx_ring->q_index); 878 /* Determine available headroom for copy */ 879 headlen = size; 880 if (headlen > ICE_RX_HDR_SIZE) 881 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); 882 883 /* align pull length to size of long to optimize memcpy performance */ 884 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, 885 sizeof(long))); 886 887 /* if we exhaust the linear part then add what is left as a frag */ 888 size -= headlen; 889 if (size) { 890 #if (PAGE_SIZE >= 8192) 891 unsigned int truesize = SKB_DATA_ALIGN(size); 892 #else 893 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 894 #endif 895 skb_add_rx_frag(skb, 0, rx_buf->page, 896 rx_buf->page_offset + headlen, size, truesize); 897 /* buffer is used by skb, update page_offset */ 898 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 899 } else { 900 /* buffer is unused, reset bias back to rx_buf; data was copied 901 * onto skb's linear part so there's no need for adjusting 902 * page offset and we can reuse this buffer as-is 903 */ 904 rx_buf->pagecnt_bias++; 905 } 906 907 return skb; 908 } 909 910 /** 911 * ice_put_rx_buf - Clean up used buffer and either recycle or free 912 * @rx_ring: Rx descriptor ring to transact packets on 913 * @rx_buf: Rx buffer to pull data from 914 * 915 * This function will update next_to_clean and then clean up the contents 916 * of the rx_buf. It will either recycle the buffer or unmap it and free 917 * the associated resources. 918 */ 919 static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) 920 { 921 u32 ntc = rx_ring->next_to_clean + 1; 922 923 /* fetch, update, and store next to clean */ 924 ntc = (ntc < rx_ring->count) ? ntc : 0; 925 rx_ring->next_to_clean = ntc; 926 927 if (!rx_buf) 928 return; 929 930 if (ice_can_reuse_rx_page(rx_buf)) { 931 /* hand second half of page back to the ring */ 932 ice_reuse_rx_page(rx_ring, rx_buf); 933 rx_ring->rx_stats.page_reuse_count++; 934 } else { 935 /* we are not reusing the buffer so unmap it */ 936 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, 937 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 938 ICE_RX_DMA_ATTR); 939 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 940 } 941 942 /* clear contents of buffer_info */ 943 rx_buf->page = NULL; 944 rx_buf->skb = NULL; 945 } 946 947 /** 948 * ice_cleanup_headers - Correct empty headers 949 * @skb: pointer to current skb being fixed 950 * 951 * Also address the case where we are pulling data in on pages only 952 * and as such no data is present in the skb header. 953 * 954 * In addition if skb is not at least 60 bytes we need to pad it so that 955 * it is large enough to qualify as a valid Ethernet frame. 956 * 957 * Returns true if an error was encountered and skb was freed. 958 */ 959 static bool ice_cleanup_headers(struct sk_buff *skb) 960 { 961 /* if eth_skb_pad returns an error the skb was freed */ 962 if (eth_skb_pad(skb)) 963 return true; 964 965 return false; 966 } 967 968 /** 969 * ice_is_non_eop - process handling of non-EOP buffers 970 * @rx_ring: Rx ring being processed 971 * @rx_desc: Rx descriptor for current buffer 972 * @skb: Current socket buffer containing buffer in progress 973 * 974 * If the buffer is an EOP buffer, this function exits returning false, 975 * otherwise return true indicating that this is in fact a non-EOP buffer. 976 */ 977 static bool 978 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, 979 struct sk_buff *skb) 980 { 981 /* if we are the last buffer then there is nothing else to do */ 982 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) 983 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) 984 return false; 985 986 /* place skb in next buffer to be received */ 987 rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb; 988 rx_ring->rx_stats.non_eop_descs++; 989 990 return true; 991 } 992 993 /** 994 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 995 * @rx_ring: Rx descriptor ring to transact packets on 996 * @budget: Total limit on number of packets to process 997 * 998 * This function provides a "bounce buffer" approach to Rx interrupt 999 * processing. The advantage to this is that on systems that have 1000 * expensive overhead for IOMMU access this provides a means of avoiding 1001 * it by maintaining the mapping of the page to the system. 1002 * 1003 * Returns amount of work completed 1004 */ 1005 static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) 1006 { 1007 unsigned int total_rx_bytes = 0, total_rx_pkts = 0; 1008 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 1009 unsigned int xdp_res, xdp_xmit = 0; 1010 struct bpf_prog *xdp_prog = NULL; 1011 struct xdp_buff xdp; 1012 bool failure; 1013 1014 xdp.rxq = &rx_ring->xdp_rxq; 1015 1016 /* start the loop to process Rx packets bounded by 'budget' */ 1017 while (likely(total_rx_pkts < (unsigned int)budget)) { 1018 union ice_32b_rx_flex_desc *rx_desc; 1019 struct ice_rx_buf *rx_buf; 1020 struct sk_buff *skb; 1021 unsigned int size; 1022 u16 stat_err_bits; 1023 u16 vlan_tag = 0; 1024 u8 rx_ptype; 1025 1026 /* get the Rx desc from Rx ring based on 'next_to_clean' */ 1027 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); 1028 1029 /* status_error_len will always be zero for unused descriptors 1030 * because it's cleared in cleanup, and overlaps with hdr_addr 1031 * which is always zero because packet split isn't used, if the 1032 * hardware wrote DD then it will be non-zero 1033 */ 1034 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 1035 if (!ice_test_staterr(rx_desc, stat_err_bits)) 1036 break; 1037 1038 /* This memory barrier is needed to keep us from reading 1039 * any other fields out of the rx_desc until we know the 1040 * DD bit is set. 1041 */ 1042 dma_rmb(); 1043 1044 size = le16_to_cpu(rx_desc->wb.pkt_len) & 1045 ICE_RX_FLX_DESC_PKT_LEN_M; 1046 1047 /* retrieve a buffer from the ring */ 1048 rx_buf = ice_get_rx_buf(rx_ring, &skb, size); 1049 1050 if (!size) { 1051 xdp.data = NULL; 1052 xdp.data_end = NULL; 1053 xdp.data_hard_start = NULL; 1054 xdp.data_meta = NULL; 1055 goto construct_skb; 1056 } 1057 1058 xdp.data = page_address(rx_buf->page) + rx_buf->page_offset; 1059 xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring); 1060 xdp.data_meta = xdp.data; 1061 xdp.data_end = xdp.data + size; 1062 1063 rcu_read_lock(); 1064 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 1065 if (!xdp_prog) { 1066 rcu_read_unlock(); 1067 goto construct_skb; 1068 } 1069 1070 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog); 1071 rcu_read_unlock(); 1072 if (!xdp_res) 1073 goto construct_skb; 1074 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { 1075 unsigned int truesize; 1076 1077 #if (PAGE_SIZE < 8192) 1078 truesize = ice_rx_pg_size(rx_ring) / 2; 1079 #else 1080 truesize = SKB_DATA_ALIGN(ice_rx_offset(rx_ring) + 1081 size); 1082 #endif 1083 xdp_xmit |= xdp_res; 1084 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 1085 } else { 1086 rx_buf->pagecnt_bias++; 1087 } 1088 total_rx_bytes += size; 1089 total_rx_pkts++; 1090 1091 cleaned_count++; 1092 ice_put_rx_buf(rx_ring, rx_buf); 1093 continue; 1094 construct_skb: 1095 if (skb) 1096 ice_add_rx_frag(rx_ring, rx_buf, skb, size); 1097 else if (ice_ring_uses_build_skb(rx_ring)) 1098 skb = ice_build_skb(rx_ring, rx_buf, &xdp); 1099 else 1100 skb = ice_construct_skb(rx_ring, rx_buf, &xdp); 1101 1102 /* exit if we failed to retrieve a buffer */ 1103 if (!skb) { 1104 rx_ring->rx_stats.alloc_buf_failed++; 1105 if (rx_buf) 1106 rx_buf->pagecnt_bias++; 1107 break; 1108 } 1109 1110 ice_put_rx_buf(rx_ring, rx_buf); 1111 cleaned_count++; 1112 1113 /* skip if it is NOP desc */ 1114 if (ice_is_non_eop(rx_ring, rx_desc, skb)) 1115 continue; 1116 1117 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 1118 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) { 1119 dev_kfree_skb_any(skb); 1120 continue; 1121 } 1122 1123 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 1124 if (ice_test_staterr(rx_desc, stat_err_bits)) 1125 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); 1126 1127 /* correct empty headers and pad skb if needed (to make valid 1128 * ethernet frame 1129 */ 1130 if (ice_cleanup_headers(skb)) { 1131 skb = NULL; 1132 continue; 1133 } 1134 1135 /* probably a little skewed due to removing CRC */ 1136 total_rx_bytes += skb->len; 1137 1138 /* populate checksum, VLAN, and protocol */ 1139 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 1140 ICE_RX_FLEX_DESC_PTYPE_M; 1141 1142 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1143 1144 /* send completed skb up the stack */ 1145 ice_receive_skb(rx_ring, skb, vlan_tag); 1146 1147 /* update budget accounting */ 1148 total_rx_pkts++; 1149 } 1150 1151 /* return up to cleaned_count buffers to hardware */ 1152 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); 1153 1154 if (xdp_prog) 1155 ice_finalize_xdp_rx(rx_ring, xdp_xmit); 1156 1157 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); 1158 1159 /* guarantee a trip back through this routine if there was a failure */ 1160 return failure ? budget : (int)total_rx_pkts; 1161 } 1162 1163 /** 1164 * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic 1165 * @port_info: port_info structure containing the current link speed 1166 * @avg_pkt_size: average size of Tx or Rx packets based on clean routine 1167 * @itr: ITR value to update 1168 * 1169 * Calculate how big of an increment should be applied to the ITR value passed 1170 * in based on wmem_default, SKB overhead, Ethernet overhead, and the current 1171 * link speed. 1172 * 1173 * The following is a calculation derived from: 1174 * wmem_default / (size + overhead) = desired_pkts_per_int 1175 * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate 1176 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 1177 * 1178 * Assuming wmem_default is 212992 and overhead is 640 bytes per 1179 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 1180 * formula down to: 1181 * 1182 * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24 1183 * ITR = -------------------------------------------- * -------------- 1184 * rate pkt_size + 640 1185 */ 1186 static unsigned int 1187 ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info, 1188 unsigned int avg_pkt_size, 1189 unsigned int itr) 1190 { 1191 switch (port_info->phy.link_info.link_speed) { 1192 case ICE_AQ_LINK_SPEED_100GB: 1193 itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24), 1194 avg_pkt_size + 640); 1195 break; 1196 case ICE_AQ_LINK_SPEED_50GB: 1197 itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24), 1198 avg_pkt_size + 640); 1199 break; 1200 case ICE_AQ_LINK_SPEED_40GB: 1201 itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24), 1202 avg_pkt_size + 640); 1203 break; 1204 case ICE_AQ_LINK_SPEED_25GB: 1205 itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24), 1206 avg_pkt_size + 640); 1207 break; 1208 case ICE_AQ_LINK_SPEED_20GB: 1209 itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24), 1210 avg_pkt_size + 640); 1211 break; 1212 case ICE_AQ_LINK_SPEED_10GB: 1213 /* fall through */ 1214 default: 1215 itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24), 1216 avg_pkt_size + 640); 1217 break; 1218 } 1219 1220 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 1221 itr &= ICE_ITR_ADAPTIVE_LATENCY; 1222 itr += ICE_ITR_ADAPTIVE_MAX_USECS; 1223 } 1224 1225 return itr; 1226 } 1227 1228 /** 1229 * ice_update_itr - update the adaptive ITR value based on statistics 1230 * @q_vector: structure containing interrupt and ring information 1231 * @rc: structure containing ring performance data 1232 * 1233 * Stores a new ITR value based on packets and byte 1234 * counts during the last interrupt. The advantage of per interrupt 1235 * computation is faster updates and more accurate ITR for the current 1236 * traffic pattern. Constants in this function were computed 1237 * based on theoretical maximum wire speed and thresholds were set based 1238 * on testing data as well as attempting to minimize response time 1239 * while increasing bulk throughput. 1240 */ 1241 static void 1242 ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) 1243 { 1244 unsigned long next_update = jiffies; 1245 unsigned int packets, bytes, itr; 1246 bool container_is_rx; 1247 1248 if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting)) 1249 return; 1250 1251 /* If itr_countdown is set it means we programmed an ITR within 1252 * the last 4 interrupt cycles. This has a side effect of us 1253 * potentially firing an early interrupt. In order to work around 1254 * this we need to throw out any data received for a few 1255 * interrupts following the update. 1256 */ 1257 if (q_vector->itr_countdown) { 1258 itr = rc->target_itr; 1259 goto clear_counts; 1260 } 1261 1262 container_is_rx = (&q_vector->rx == rc); 1263 /* For Rx we want to push the delay up and default to low latency. 1264 * for Tx we want to pull the delay down and default to high latency. 1265 */ 1266 itr = container_is_rx ? 1267 ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY : 1268 ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY; 1269 1270 /* If we didn't update within up to 1 - 2 jiffies we can assume 1271 * that either packets are coming in so slow there hasn't been 1272 * any work, or that there is so much work that NAPI is dealing 1273 * with interrupt moderation and we don't need to do anything. 1274 */ 1275 if (time_after(next_update, rc->next_update)) 1276 goto clear_counts; 1277 1278 prefetch(q_vector->vsi->port_info); 1279 1280 packets = rc->total_pkts; 1281 bytes = rc->total_bytes; 1282 1283 if (container_is_rx) { 1284 /* If Rx there are 1 to 4 packets and bytes are less than 1285 * 9000 assume insufficient data to use bulk rate limiting 1286 * approach unless Tx is already in bulk rate limiting. We 1287 * are likely latency driven. 1288 */ 1289 if (packets && packets < 4 && bytes < 9000 && 1290 (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) { 1291 itr = ICE_ITR_ADAPTIVE_LATENCY; 1292 goto adjust_by_size_and_speed; 1293 } 1294 } else if (packets < 4) { 1295 /* If we have Tx and Rx ITR maxed and Tx ITR is running in 1296 * bulk mode and we are receiving 4 or fewer packets just 1297 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so 1298 * that the Rx can relax. 1299 */ 1300 if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS && 1301 (q_vector->rx.target_itr & ICE_ITR_MASK) == 1302 ICE_ITR_ADAPTIVE_MAX_USECS) 1303 goto clear_counts; 1304 } else if (packets > 32) { 1305 /* If we have processed over 32 packets in a single interrupt 1306 * for Tx assume we need to switch over to "bulk" mode. 1307 */ 1308 rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY; 1309 } 1310 1311 /* We have no packets to actually measure against. This means 1312 * either one of the other queues on this vector is active or 1313 * we are a Tx queue doing TSO with too high of an interrupt rate. 1314 * 1315 * Between 4 and 56 we can assume that our current interrupt delay 1316 * is only slightly too low. As such we should increase it by a small 1317 * fixed amount. 1318 */ 1319 if (packets < 56) { 1320 itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC; 1321 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 1322 itr &= ICE_ITR_ADAPTIVE_LATENCY; 1323 itr += ICE_ITR_ADAPTIVE_MAX_USECS; 1324 } 1325 goto clear_counts; 1326 } 1327 1328 if (packets <= 256) { 1329 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); 1330 itr &= ICE_ITR_MASK; 1331 1332 /* Between 56 and 112 is our "goldilocks" zone where we are 1333 * working out "just right". Just report that our current 1334 * ITR is good for us. 1335 */ 1336 if (packets <= 112) 1337 goto clear_counts; 1338 1339 /* If packet count is 128 or greater we are likely looking 1340 * at a slight overrun of the delay we want. Try halving 1341 * our delay to see if that will cut the number of packets 1342 * in half per interrupt. 1343 */ 1344 itr >>= 1; 1345 itr &= ICE_ITR_MASK; 1346 if (itr < ICE_ITR_ADAPTIVE_MIN_USECS) 1347 itr = ICE_ITR_ADAPTIVE_MIN_USECS; 1348 1349 goto clear_counts; 1350 } 1351 1352 /* The paths below assume we are dealing with a bulk ITR since 1353 * number of packets is greater than 256. We are just going to have 1354 * to compute a value and try to bring the count under control, 1355 * though for smaller packet sizes there isn't much we can do as 1356 * NAPI polling will likely be kicking in sooner rather than later. 1357 */ 1358 itr = ICE_ITR_ADAPTIVE_BULK; 1359 1360 adjust_by_size_and_speed: 1361 1362 /* based on checks above packets cannot be 0 so division is safe */ 1363 itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info, 1364 bytes / packets, itr); 1365 1366 clear_counts: 1367 /* write back value */ 1368 rc->target_itr = itr; 1369 1370 /* next update should occur within next jiffy */ 1371 rc->next_update = next_update + 1; 1372 1373 rc->total_bytes = 0; 1374 rc->total_pkts = 0; 1375 } 1376 1377 /** 1378 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 1379 * @itr_idx: interrupt throttling index 1380 * @itr: interrupt throttling value in usecs 1381 */ 1382 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 1383 { 1384 /* The ITR value is reported in microseconds, and the register value is 1385 * recorded in 2 microsecond units. For this reason we only need to 1386 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 1387 * granularity as a shift instead of division. The mask makes sure the 1388 * ITR value is never odd so we don't accidentally write into the field 1389 * prior to the ITR field. 1390 */ 1391 itr &= ICE_ITR_MASK; 1392 1393 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 1394 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 1395 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 1396 } 1397 1398 /* The act of updating the ITR will cause it to immediately trigger. In order 1399 * to prevent this from throwing off adaptive update statistics we defer the 1400 * update so that it can only happen so often. So after either Tx or Rx are 1401 * updated we make the adaptive scheme wait until either the ITR completely 1402 * expires via the next_update expiration or we have been through at least 1403 * 3 interrupts. 1404 */ 1405 #define ITR_COUNTDOWN_START 3 1406 1407 /** 1408 * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt 1409 * @q_vector: q_vector for which ITR is being updated and interrupt enabled 1410 */ 1411 static void ice_update_ena_itr(struct ice_q_vector *q_vector) 1412 { 1413 struct ice_ring_container *tx = &q_vector->tx; 1414 struct ice_ring_container *rx = &q_vector->rx; 1415 struct ice_vsi *vsi = q_vector->vsi; 1416 u32 itr_val; 1417 1418 /* when exiting WB_ON_ITR lets set a low ITR value and trigger 1419 * interrupts to expire right away in case we have more work ready to go 1420 * already 1421 */ 1422 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) { 1423 itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS); 1424 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); 1425 /* set target back to last user set value */ 1426 rx->target_itr = rx->itr_setting; 1427 /* set current to what we just wrote and dynamic if needed */ 1428 rx->current_itr = ICE_WB_ON_ITR_USECS | 1429 (rx->itr_setting & ICE_ITR_DYNAMIC); 1430 /* allow normal interrupt flow to start */ 1431 q_vector->itr_countdown = 0; 1432 return; 1433 } 1434 1435 /* This will do nothing if dynamic updates are not enabled */ 1436 ice_update_itr(q_vector, tx); 1437 ice_update_itr(q_vector, rx); 1438 1439 /* This block of logic allows us to get away with only updating 1440 * one ITR value with each interrupt. The idea is to perform a 1441 * pseudo-lazy update with the following criteria. 1442 * 1443 * 1. Rx is given higher priority than Tx if both are in same state 1444 * 2. If we must reduce an ITR that is given highest priority. 1445 * 3. We then give priority to increasing ITR based on amount. 1446 */ 1447 if (rx->target_itr < rx->current_itr) { 1448 /* Rx ITR needs to be reduced, this is highest priority */ 1449 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 1450 rx->current_itr = rx->target_itr; 1451 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1452 } else if ((tx->target_itr < tx->current_itr) || 1453 ((rx->target_itr - rx->current_itr) < 1454 (tx->target_itr - tx->current_itr))) { 1455 /* Tx ITR needs to be reduced, this is second priority 1456 * Tx ITR needs to be increased more than Rx, fourth priority 1457 */ 1458 itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr); 1459 tx->current_itr = tx->target_itr; 1460 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1461 } else if (rx->current_itr != rx->target_itr) { 1462 /* Rx ITR needs to be increased, third priority */ 1463 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 1464 rx->current_itr = rx->target_itr; 1465 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1466 } else { 1467 /* Still have to re-enable the interrupts */ 1468 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 1469 if (q_vector->itr_countdown) 1470 q_vector->itr_countdown--; 1471 } 1472 1473 if (!test_bit(__ICE_DOWN, q_vector->vsi->state)) 1474 wr32(&q_vector->vsi->back->hw, 1475 GLINT_DYN_CTL(q_vector->reg_idx), 1476 itr_val); 1477 } 1478 1479 /** 1480 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector 1481 * @q_vector: q_vector to set WB_ON_ITR on 1482 * 1483 * We need to tell hardware to write-back completed descriptors even when 1484 * interrupts are disabled. Descriptors will be written back on cache line 1485 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR 1486 * descriptors may not be written back if they don't fill a cache line until the 1487 * next interrupt. 1488 * 1489 * This sets the write-back frequency to 2 microseconds as that is the minimum 1490 * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to 1491 * make sure hardware knows we aren't meddling with the INTENA_M bit. 1492 */ 1493 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) 1494 { 1495 struct ice_vsi *vsi = q_vector->vsi; 1496 1497 /* already in WB_ON_ITR mode no need to change it */ 1498 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) 1499 return; 1500 1501 if (q_vector->num_ring_rx) 1502 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 1503 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS, 1504 ICE_RX_ITR)); 1505 1506 if (q_vector->num_ring_tx) 1507 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 1508 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS, 1509 ICE_TX_ITR)); 1510 1511 q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE; 1512 } 1513 1514 /** 1515 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 1516 * @napi: napi struct with our devices info in it 1517 * @budget: amount of work driver is allowed to do this pass, in packets 1518 * 1519 * This function will clean all queues associated with a q_vector. 1520 * 1521 * Returns the amount of work done 1522 */ 1523 int ice_napi_poll(struct napi_struct *napi, int budget) 1524 { 1525 struct ice_q_vector *q_vector = 1526 container_of(napi, struct ice_q_vector, napi); 1527 bool clean_complete = true; 1528 struct ice_ring *ring; 1529 int budget_per_ring; 1530 int work_done = 0; 1531 1532 /* Since the actual Tx work is minimal, we can give the Tx a larger 1533 * budget and be more aggressive about cleaning up the Tx descriptors. 1534 */ 1535 ice_for_each_ring(ring, q_vector->tx) { 1536 bool wd = ring->xsk_umem ? 1537 ice_clean_tx_irq_zc(ring, budget) : 1538 ice_clean_tx_irq(ring, budget); 1539 1540 if (!wd) 1541 clean_complete = false; 1542 } 1543 1544 /* Handle case where we are called by netpoll with a budget of 0 */ 1545 if (unlikely(budget <= 0)) 1546 return budget; 1547 1548 /* normally we have 1 Rx ring per q_vector */ 1549 if (unlikely(q_vector->num_ring_rx > 1)) 1550 /* We attempt to distribute budget to each Rx queue fairly, but 1551 * don't allow the budget to go below 1 because that would exit 1552 * polling early. 1553 */ 1554 budget_per_ring = max(budget / q_vector->num_ring_rx, 1); 1555 else 1556 /* Max of 1 Rx ring in this q_vector so give it the budget */ 1557 budget_per_ring = budget; 1558 1559 ice_for_each_ring(ring, q_vector->rx) { 1560 int cleaned; 1561 1562 /* A dedicated path for zero-copy allows making a single 1563 * comparison in the irq context instead of many inside the 1564 * ice_clean_rx_irq function and makes the codebase cleaner. 1565 */ 1566 cleaned = ring->xsk_umem ? 1567 ice_clean_rx_irq_zc(ring, budget_per_ring) : 1568 ice_clean_rx_irq(ring, budget_per_ring); 1569 work_done += cleaned; 1570 /* if we clean as many as budgeted, we must not be done */ 1571 if (cleaned >= budget_per_ring) 1572 clean_complete = false; 1573 } 1574 1575 /* If work not completed, return budget and polling will return */ 1576 if (!clean_complete) 1577 return budget; 1578 1579 /* Exit the polling mode, but don't re-enable interrupts if stack might 1580 * poll us due to busy-polling 1581 */ 1582 if (likely(napi_complete_done(napi, work_done))) 1583 ice_update_ena_itr(q_vector); 1584 else 1585 ice_set_wb_on_itr(q_vector); 1586 1587 return min_t(int, work_done, budget - 1); 1588 } 1589 1590 /** 1591 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 1592 * @tx_ring: the ring to be checked 1593 * @size: the size buffer we want to assure is available 1594 * 1595 * Returns -EBUSY if a stop is needed, else 0 1596 */ 1597 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 1598 { 1599 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); 1600 /* Memory barrier before checking head and tail */ 1601 smp_mb(); 1602 1603 /* Check again in a case another CPU has just made room available. */ 1604 if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 1605 return -EBUSY; 1606 1607 /* A reprieve! - use start_subqueue because it doesn't call schedule */ 1608 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); 1609 ++tx_ring->tx_stats.restart_q; 1610 return 0; 1611 } 1612 1613 /** 1614 * ice_maybe_stop_tx - 1st level check for Tx stop conditions 1615 * @tx_ring: the ring to be checked 1616 * @size: the size buffer we want to assure is available 1617 * 1618 * Returns 0 if stop is not needed 1619 */ 1620 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 1621 { 1622 if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 1623 return 0; 1624 1625 return __ice_maybe_stop_tx(tx_ring, size); 1626 } 1627 1628 /** 1629 * ice_tx_map - Build the Tx descriptor 1630 * @tx_ring: ring to send buffer on 1631 * @first: first buffer info buffer to use 1632 * @off: pointer to struct that holds offload parameters 1633 * 1634 * This function loops over the skb data pointed to by *first 1635 * and gets a physical address for each memory location and programs 1636 * it and the length into the transmit descriptor. 1637 */ 1638 static void 1639 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, 1640 struct ice_tx_offload_params *off) 1641 { 1642 u64 td_offset, td_tag, td_cmd; 1643 u16 i = tx_ring->next_to_use; 1644 skb_frag_t *frag; 1645 unsigned int data_len, size; 1646 struct ice_tx_desc *tx_desc; 1647 struct ice_tx_buf *tx_buf; 1648 struct sk_buff *skb; 1649 dma_addr_t dma; 1650 1651 td_tag = off->td_l2tag1; 1652 td_cmd = off->td_cmd; 1653 td_offset = off->td_offset; 1654 skb = first->skb; 1655 1656 data_len = skb->data_len; 1657 size = skb_headlen(skb); 1658 1659 tx_desc = ICE_TX_DESC(tx_ring, i); 1660 1661 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1662 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1663 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1664 ICE_TX_FLAGS_VLAN_S; 1665 } 1666 1667 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 1668 1669 tx_buf = first; 1670 1671 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1672 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1673 1674 if (dma_mapping_error(tx_ring->dev, dma)) 1675 goto dma_error; 1676 1677 /* record length, and DMA address */ 1678 dma_unmap_len_set(tx_buf, len, size); 1679 dma_unmap_addr_set(tx_buf, dma, dma); 1680 1681 /* align size to end of page */ 1682 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 1683 tx_desc->buf_addr = cpu_to_le64(dma); 1684 1685 /* account for data chunks larger than the hardware 1686 * can handle 1687 */ 1688 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 1689 tx_desc->cmd_type_offset_bsz = 1690 build_ctob(td_cmd, td_offset, max_data, td_tag); 1691 1692 tx_desc++; 1693 i++; 1694 1695 if (i == tx_ring->count) { 1696 tx_desc = ICE_TX_DESC(tx_ring, 0); 1697 i = 0; 1698 } 1699 1700 dma += max_data; 1701 size -= max_data; 1702 1703 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1704 tx_desc->buf_addr = cpu_to_le64(dma); 1705 } 1706 1707 if (likely(!data_len)) 1708 break; 1709 1710 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, 1711 size, td_tag); 1712 1713 tx_desc++; 1714 i++; 1715 1716 if (i == tx_ring->count) { 1717 tx_desc = ICE_TX_DESC(tx_ring, 0); 1718 i = 0; 1719 } 1720 1721 size = skb_frag_size(frag); 1722 data_len -= size; 1723 1724 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 1725 DMA_TO_DEVICE); 1726 1727 tx_buf = &tx_ring->tx_buf[i]; 1728 } 1729 1730 /* record bytecount for BQL */ 1731 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 1732 1733 /* record SW timestamp if HW timestamp is not available */ 1734 skb_tx_timestamp(first->skb); 1735 1736 i++; 1737 if (i == tx_ring->count) 1738 i = 0; 1739 1740 /* write last descriptor with RS and EOP bits */ 1741 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; 1742 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size, 1743 td_tag); 1744 1745 /* Force memory writes to complete before letting h/w know there 1746 * are new descriptors to fetch. 1747 * 1748 * We also use this memory barrier to make certain all of the 1749 * status bits have been updated before next_to_watch is written. 1750 */ 1751 wmb(); 1752 1753 /* set next_to_watch value indicating a packet is present */ 1754 first->next_to_watch = tx_desc; 1755 1756 tx_ring->next_to_use = i; 1757 1758 ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 1759 1760 /* notify HW of packet */ 1761 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 1762 writel(i, tx_ring->tail); 1763 } 1764 1765 return; 1766 1767 dma_error: 1768 /* clear DMA mappings for failed tx_buf map */ 1769 for (;;) { 1770 tx_buf = &tx_ring->tx_buf[i]; 1771 ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 1772 if (tx_buf == first) 1773 break; 1774 if (i == 0) 1775 i = tx_ring->count; 1776 i--; 1777 } 1778 1779 tx_ring->next_to_use = i; 1780 } 1781 1782 /** 1783 * ice_tx_csum - Enable Tx checksum offloads 1784 * @first: pointer to the first descriptor 1785 * @off: pointer to struct that holds offload parameters 1786 * 1787 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1788 */ 1789 static 1790 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1791 { 1792 u32 l4_len = 0, l3_len = 0, l2_len = 0; 1793 struct sk_buff *skb = first->skb; 1794 union { 1795 struct iphdr *v4; 1796 struct ipv6hdr *v6; 1797 unsigned char *hdr; 1798 } ip; 1799 union { 1800 struct tcphdr *tcp; 1801 unsigned char *hdr; 1802 } l4; 1803 __be16 frag_off, protocol; 1804 unsigned char *exthdr; 1805 u32 offset, cmd = 0; 1806 u8 l4_proto = 0; 1807 1808 if (skb->ip_summed != CHECKSUM_PARTIAL) 1809 return 0; 1810 1811 ip.hdr = skb_network_header(skb); 1812 l4.hdr = skb_transport_header(skb); 1813 1814 /* compute outer L2 header size */ 1815 l2_len = ip.hdr - skb->data; 1816 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1817 1818 if (skb->encapsulation) 1819 return -1; 1820 1821 /* Enable IP checksum offloads */ 1822 protocol = vlan_get_protocol(skb); 1823 if (protocol == htons(ETH_P_IP)) { 1824 l4_proto = ip.v4->protocol; 1825 /* the stack computes the IP header already, the only time we 1826 * need the hardware to recompute it is in the case of TSO. 1827 */ 1828 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1829 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1830 else 1831 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 1832 1833 } else if (protocol == htons(ETH_P_IPV6)) { 1834 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 1835 exthdr = ip.hdr + sizeof(*ip.v6); 1836 l4_proto = ip.v6->nexthdr; 1837 if (l4.hdr != exthdr) 1838 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 1839 &frag_off); 1840 } else { 1841 return -1; 1842 } 1843 1844 /* compute inner L3 header size */ 1845 l3_len = l4.hdr - ip.hdr; 1846 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 1847 1848 /* Enable L4 checksum offloads */ 1849 switch (l4_proto) { 1850 case IPPROTO_TCP: 1851 /* enable checksum offloads */ 1852 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 1853 l4_len = l4.tcp->doff; 1854 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1855 break; 1856 case IPPROTO_UDP: 1857 /* enable UDP checksum offload */ 1858 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 1859 l4_len = (sizeof(struct udphdr) >> 2); 1860 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1861 break; 1862 case IPPROTO_SCTP: 1863 /* enable SCTP checksum offload */ 1864 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 1865 l4_len = sizeof(struct sctphdr) >> 2; 1866 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1867 break; 1868 1869 default: 1870 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1871 return -1; 1872 skb_checksum_help(skb); 1873 return 0; 1874 } 1875 1876 off->td_cmd |= cmd; 1877 off->td_offset |= offset; 1878 return 1; 1879 } 1880 1881 /** 1882 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 1883 * @tx_ring: ring to send buffer on 1884 * @first: pointer to struct ice_tx_buf 1885 * 1886 * Checks the skb and set up correspondingly several generic transmit flags 1887 * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 1888 * 1889 * Returns error code indicate the frame should be dropped upon error and the 1890 * otherwise returns 0 to indicate the flags has been set properly. 1891 */ 1892 static int 1893 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first) 1894 { 1895 struct sk_buff *skb = first->skb; 1896 __be16 protocol = skb->protocol; 1897 1898 if (protocol == htons(ETH_P_8021Q) && 1899 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 1900 /* when HW VLAN acceleration is turned off by the user the 1901 * stack sets the protocol to 8021q so that the driver 1902 * can take any steps required to support the SW only 1903 * VLAN handling. In our case the driver doesn't need 1904 * to take any further steps so just set the protocol 1905 * to the encapsulated ethertype. 1906 */ 1907 skb->protocol = vlan_get_protocol(skb); 1908 return 0; 1909 } 1910 1911 /* if we have a HW VLAN tag being added, default to the HW one */ 1912 if (skb_vlan_tag_present(skb)) { 1913 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 1914 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 1915 } else if (protocol == htons(ETH_P_8021Q)) { 1916 struct vlan_hdr *vhdr, _vhdr; 1917 1918 /* for SW VLAN, check the next protocol and store the tag */ 1919 vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN, 1920 sizeof(_vhdr), 1921 &_vhdr); 1922 if (!vhdr) 1923 return -EINVAL; 1924 1925 first->tx_flags |= ntohs(vhdr->h_vlan_TCI) << 1926 ICE_TX_FLAGS_VLAN_S; 1927 first->tx_flags |= ICE_TX_FLAGS_SW_VLAN; 1928 } 1929 1930 return ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 1931 } 1932 1933 /** 1934 * ice_tso - computes mss and TSO length to prepare for TSO 1935 * @first: pointer to struct ice_tx_buf 1936 * @off: pointer to struct that holds offload parameters 1937 * 1938 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 1939 */ 1940 static 1941 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1942 { 1943 struct sk_buff *skb = first->skb; 1944 union { 1945 struct iphdr *v4; 1946 struct ipv6hdr *v6; 1947 unsigned char *hdr; 1948 } ip; 1949 union { 1950 struct tcphdr *tcp; 1951 unsigned char *hdr; 1952 } l4; 1953 u64 cd_mss, cd_tso_len; 1954 u32 paylen, l4_start; 1955 int err; 1956 1957 if (skb->ip_summed != CHECKSUM_PARTIAL) 1958 return 0; 1959 1960 if (!skb_is_gso(skb)) 1961 return 0; 1962 1963 err = skb_cow_head(skb, 0); 1964 if (err < 0) 1965 return err; 1966 1967 /* cppcheck-suppress unreadVariable */ 1968 ip.hdr = skb_network_header(skb); 1969 l4.hdr = skb_transport_header(skb); 1970 1971 /* initialize outer IP header fields */ 1972 if (ip.v4->version == 4) { 1973 ip.v4->tot_len = 0; 1974 ip.v4->check = 0; 1975 } else { 1976 ip.v6->payload_len = 0; 1977 } 1978 1979 /* determine offset of transport header */ 1980 l4_start = l4.hdr - skb->data; 1981 1982 /* remove payload length from checksum */ 1983 paylen = skb->len - l4_start; 1984 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); 1985 1986 /* compute length of segmentation header */ 1987 off->header_len = (l4.tcp->doff * 4) + l4_start; 1988 1989 /* update gso_segs and bytecount */ 1990 first->gso_segs = skb_shinfo(skb)->gso_segs; 1991 first->bytecount += (first->gso_segs - 1) * off->header_len; 1992 1993 cd_tso_len = skb->len - off->header_len; 1994 cd_mss = skb_shinfo(skb)->gso_size; 1995 1996 /* record cdesc_qw1 with TSO parameters */ 1997 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 1998 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 1999 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 2000 (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); 2001 first->tx_flags |= ICE_TX_FLAGS_TSO; 2002 return 1; 2003 } 2004 2005 /** 2006 * ice_txd_use_count - estimate the number of descriptors needed for Tx 2007 * @size: transmit request size in bytes 2008 * 2009 * Due to hardware alignment restrictions (4K alignment), we need to 2010 * assume that we can have no more than 12K of data per descriptor, even 2011 * though each descriptor can take up to 16K - 1 bytes of aligned memory. 2012 * Thus, we need to divide by 12K. But division is slow! Instead, 2013 * we decompose the operation into shifts and one relatively cheap 2014 * multiply operation. 2015 * 2016 * To divide by 12K, we first divide by 4K, then divide by 3: 2017 * To divide by 4K, shift right by 12 bits 2018 * To divide by 3, multiply by 85, then divide by 256 2019 * (Divide by 256 is done by shifting right by 8 bits) 2020 * Finally, we add one to round up. Because 256 isn't an exact multiple of 2021 * 3, we'll underestimate near each multiple of 12K. This is actually more 2022 * accurate as we have 4K - 1 of wiggle room that we can fit into the last 2023 * segment. For our purposes this is accurate out to 1M which is orders of 2024 * magnitude greater than our largest possible GSO size. 2025 * 2026 * This would then be implemented as: 2027 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 2028 * 2029 * Since multiplication and division are commutative, we can reorder 2030 * operations into: 2031 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 2032 */ 2033 static unsigned int ice_txd_use_count(unsigned int size) 2034 { 2035 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 2036 } 2037 2038 /** 2039 * ice_xmit_desc_count - calculate number of Tx descriptors needed 2040 * @skb: send buffer 2041 * 2042 * Returns number of data descriptors needed for this skb. 2043 */ 2044 static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 2045 { 2046 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 2047 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2048 unsigned int count = 0, size = skb_headlen(skb); 2049 2050 for (;;) { 2051 count += ice_txd_use_count(size); 2052 2053 if (!nr_frags--) 2054 break; 2055 2056 size = skb_frag_size(frag++); 2057 } 2058 2059 return count; 2060 } 2061 2062 /** 2063 * __ice_chk_linearize - Check if there are more than 8 buffers per packet 2064 * @skb: send buffer 2065 * 2066 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 2067 * and so we need to figure out the cases where we need to linearize the skb. 2068 * 2069 * For TSO we need to count the TSO header and segment payload separately. 2070 * As such we need to check cases where we have 7 fragments or more as we 2071 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 2072 * the segment payload in the first descriptor, and another 7 for the 2073 * fragments. 2074 */ 2075 static bool __ice_chk_linearize(struct sk_buff *skb) 2076 { 2077 const skb_frag_t *frag, *stale; 2078 int nr_frags, sum; 2079 2080 /* no need to check if number of frags is less than 7 */ 2081 nr_frags = skb_shinfo(skb)->nr_frags; 2082 if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 2083 return false; 2084 2085 /* We need to walk through the list and validate that each group 2086 * of 6 fragments totals at least gso_size. 2087 */ 2088 nr_frags -= ICE_MAX_BUF_TXD - 2; 2089 frag = &skb_shinfo(skb)->frags[0]; 2090 2091 /* Initialize size to the negative value of gso_size minus 1. We 2092 * use this as the worst case scenerio in which the frag ahead 2093 * of us only provides one byte which is why we are limited to 6 2094 * descriptors for a single transmit as the header and previous 2095 * fragment are already consuming 2 descriptors. 2096 */ 2097 sum = 1 - skb_shinfo(skb)->gso_size; 2098 2099 /* Add size of frags 0 through 4 to create our initial sum */ 2100 sum += skb_frag_size(frag++); 2101 sum += skb_frag_size(frag++); 2102 sum += skb_frag_size(frag++); 2103 sum += skb_frag_size(frag++); 2104 sum += skb_frag_size(frag++); 2105 2106 /* Walk through fragments adding latest fragment, testing it, and 2107 * then removing stale fragments from the sum. 2108 */ 2109 stale = &skb_shinfo(skb)->frags[0]; 2110 for (;;) { 2111 sum += skb_frag_size(frag++); 2112 2113 /* if sum is negative we failed to make sufficient progress */ 2114 if (sum < 0) 2115 return true; 2116 2117 if (!nr_frags--) 2118 break; 2119 2120 sum -= skb_frag_size(stale++); 2121 } 2122 2123 return false; 2124 } 2125 2126 /** 2127 * ice_chk_linearize - Check if there are more than 8 fragments per packet 2128 * @skb: send buffer 2129 * @count: number of buffers used 2130 * 2131 * Note: Our HW can't scatter-gather more than 8 fragments to build 2132 * a packet on the wire and so we need to figure out the cases where we 2133 * need to linearize the skb. 2134 */ 2135 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 2136 { 2137 /* Both TSO and single send will work if count is less than 8 */ 2138 if (likely(count < ICE_MAX_BUF_TXD)) 2139 return false; 2140 2141 if (skb_is_gso(skb)) 2142 return __ice_chk_linearize(skb); 2143 2144 /* we can support up to 8 data buffers for a single send */ 2145 return count != ICE_MAX_BUF_TXD; 2146 } 2147 2148 /** 2149 * ice_xmit_frame_ring - Sends buffer on Tx ring 2150 * @skb: send buffer 2151 * @tx_ring: ring to send buffer on 2152 * 2153 * Returns NETDEV_TX_OK if sent, else an error code 2154 */ 2155 static netdev_tx_t 2156 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) 2157 { 2158 struct ice_tx_offload_params offload = { 0 }; 2159 struct ice_vsi *vsi = tx_ring->vsi; 2160 struct ice_tx_buf *first; 2161 unsigned int count; 2162 int tso, csum; 2163 2164 count = ice_xmit_desc_count(skb); 2165 if (ice_chk_linearize(skb, count)) { 2166 if (__skb_linearize(skb)) 2167 goto out_drop; 2168 count = ice_txd_use_count(skb->len); 2169 tx_ring->tx_stats.tx_linearize++; 2170 } 2171 2172 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 2173 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 2174 * + 4 desc gap to avoid the cache line where head is, 2175 * + 1 desc for context descriptor, 2176 * otherwise try next time 2177 */ 2178 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 2179 ICE_DESCS_FOR_CTX_DESC)) { 2180 tx_ring->tx_stats.tx_busy++; 2181 return NETDEV_TX_BUSY; 2182 } 2183 2184 offload.tx_ring = tx_ring; 2185 2186 /* record the location of the first descriptor for this packet */ 2187 first = &tx_ring->tx_buf[tx_ring->next_to_use]; 2188 first->skb = skb; 2189 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 2190 first->gso_segs = 1; 2191 first->tx_flags = 0; 2192 2193 /* prepare the VLAN tagging flags for Tx */ 2194 if (ice_tx_prepare_vlan_flags(tx_ring, first)) 2195 goto out_drop; 2196 2197 /* set up TSO offload */ 2198 tso = ice_tso(first, &offload); 2199 if (tso < 0) 2200 goto out_drop; 2201 2202 /* always set up Tx checksum offload */ 2203 csum = ice_tx_csum(first, &offload); 2204 if (csum < 0) 2205 goto out_drop; 2206 2207 /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ 2208 if (unlikely(skb->priority == TC_PRIO_CONTROL && 2209 vsi->type == ICE_VSI_PF && 2210 vsi->port_info->is_sw_lldp)) 2211 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2212 ICE_TX_CTX_DESC_SWTCH_UPLINK << 2213 ICE_TXD_CTX_QW1_CMD_S); 2214 2215 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { 2216 struct ice_tx_ctx_desc *cdesc; 2217 int i = tx_ring->next_to_use; 2218 2219 /* grab the next descriptor */ 2220 cdesc = ICE_TX_CTX_DESC(tx_ring, i); 2221 i++; 2222 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2223 2224 /* setup context descriptor */ 2225 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 2226 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 2227 cdesc->rsvd = cpu_to_le16(0); 2228 cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 2229 } 2230 2231 ice_tx_map(tx_ring, first, &offload); 2232 return NETDEV_TX_OK; 2233 2234 out_drop: 2235 dev_kfree_skb_any(skb); 2236 return NETDEV_TX_OK; 2237 } 2238 2239 /** 2240 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 2241 * @skb: send buffer 2242 * @netdev: network interface device structure 2243 * 2244 * Returns NETDEV_TX_OK if sent, else an error code 2245 */ 2246 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 2247 { 2248 struct ice_netdev_priv *np = netdev_priv(netdev); 2249 struct ice_vsi *vsi = np->vsi; 2250 struct ice_ring *tx_ring; 2251 2252 tx_ring = vsi->tx_rings[skb->queue_mapping]; 2253 2254 /* hardware can't handle really short frames, hardware padding works 2255 * beyond this point 2256 */ 2257 if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 2258 return NETDEV_TX_OK; 2259 2260 return ice_xmit_frame_ring(skb, tx_ring); 2261 } 2262