1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* The driver transmit and receive code */ 5 6 #include <linux/mm.h> 7 #include <linux/netdevice.h> 8 #include <linux/prefetch.h> 9 #include <linux/bpf_trace.h> 10 #include <net/dsfield.h> 11 #include <net/mpls.h> 12 #include <net/xdp.h> 13 #include "ice_txrx_lib.h" 14 #include "ice_lib.h" 15 #include "ice.h" 16 #include "ice_trace.h" 17 #include "ice_dcb_lib.h" 18 #include "ice_xsk.h" 19 #include "ice_eswitch.h" 20 21 #define ICE_RX_HDR_SIZE 256 22 23 #define ICE_FDIR_CLEAN_DELAY 10 24 25 /** 26 * ice_prgm_fdir_fltr - Program a Flow Director filter 27 * @vsi: VSI to send dummy packet 28 * @fdir_desc: flow director descriptor 29 * @raw_packet: allocated buffer for flow director 30 */ 31 int 32 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, 33 u8 *raw_packet) 34 { 35 struct ice_tx_buf *tx_buf, *first; 36 struct ice_fltr_desc *f_desc; 37 struct ice_tx_desc *tx_desc; 38 struct ice_tx_ring *tx_ring; 39 struct device *dev; 40 dma_addr_t dma; 41 u32 td_cmd; 42 u16 i; 43 44 /* VSI and Tx ring */ 45 if (!vsi) 46 return -ENOENT; 47 tx_ring = vsi->tx_rings[0]; 48 if (!tx_ring || !tx_ring->desc) 49 return -ENOENT; 50 dev = tx_ring->dev; 51 52 /* we are using two descriptors to add/del a filter and we can wait */ 53 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { 54 if (!i) 55 return -EAGAIN; 56 msleep_interruptible(1); 57 } 58 59 dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE, 60 DMA_TO_DEVICE); 61 62 if (dma_mapping_error(dev, dma)) 63 return -EINVAL; 64 65 /* grab the next descriptor */ 66 i = tx_ring->next_to_use; 67 first = &tx_ring->tx_buf[i]; 68 f_desc = ICE_TX_FDIRDESC(tx_ring, i); 69 memcpy(f_desc, fdir_desc, sizeof(*f_desc)); 70 71 i++; 72 i = (i < tx_ring->count) ? i : 0; 73 tx_desc = ICE_TX_DESC(tx_ring, i); 74 tx_buf = &tx_ring->tx_buf[i]; 75 76 i++; 77 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 78 79 memset(tx_buf, 0, sizeof(*tx_buf)); 80 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE); 81 dma_unmap_addr_set(tx_buf, dma, dma); 82 83 tx_desc->buf_addr = cpu_to_le64(dma); 84 td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY | 85 ICE_TX_DESC_CMD_RE; 86 87 tx_buf->type = ICE_TX_BUF_DUMMY; 88 tx_buf->raw_buf = raw_packet; 89 90 tx_desc->cmd_type_offset_bsz = 91 ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0); 92 93 /* Force memory write to complete before letting h/w know 94 * there are new descriptors to fetch. 95 */ 96 wmb(); 97 98 /* mark the data descriptor to be watched */ 99 first->next_to_watch = tx_desc; 100 101 writel(tx_ring->next_to_use, tx_ring->tail); 102 103 return 0; 104 } 105 106 /** 107 * ice_unmap_and_free_tx_buf - Release a Tx buffer 108 * @ring: the ring that owns the buffer 109 * @tx_buf: the buffer to free 110 */ 111 static void 112 ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf) 113 { 114 if (dma_unmap_len(tx_buf, len)) 115 dma_unmap_page(ring->dev, 116 dma_unmap_addr(tx_buf, dma), 117 dma_unmap_len(tx_buf, len), 118 DMA_TO_DEVICE); 119 120 switch (tx_buf->type) { 121 case ICE_TX_BUF_DUMMY: 122 devm_kfree(ring->dev, tx_buf->raw_buf); 123 break; 124 case ICE_TX_BUF_SKB: 125 dev_kfree_skb_any(tx_buf->skb); 126 break; 127 case ICE_TX_BUF_XDP_TX: 128 page_frag_free(tx_buf->raw_buf); 129 break; 130 case ICE_TX_BUF_XDP_XMIT: 131 xdp_return_frame(tx_buf->xdpf); 132 break; 133 } 134 135 tx_buf->next_to_watch = NULL; 136 tx_buf->type = ICE_TX_BUF_EMPTY; 137 dma_unmap_len_set(tx_buf, len, 0); 138 /* tx_buf must be completely set up in the transmit path */ 139 } 140 141 static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring) 142 { 143 return netdev_get_tx_queue(ring->netdev, ring->q_index); 144 } 145 146 /** 147 * ice_clean_tx_ring - Free any empty Tx buffers 148 * @tx_ring: ring to be cleaned 149 */ 150 void ice_clean_tx_ring(struct ice_tx_ring *tx_ring) 151 { 152 u32 size; 153 u16 i; 154 155 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { 156 ice_xsk_clean_xdp_ring(tx_ring); 157 goto tx_skip_free; 158 } 159 160 /* ring already cleared, nothing to do */ 161 if (!tx_ring->tx_buf) 162 return; 163 164 /* Free all the Tx ring sk_buffs */ 165 for (i = 0; i < tx_ring->count; i++) 166 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 167 168 tx_skip_free: 169 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 170 171 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 172 PAGE_SIZE); 173 /* Zero out the descriptor ring */ 174 memset(tx_ring->desc, 0, size); 175 176 tx_ring->next_to_use = 0; 177 tx_ring->next_to_clean = 0; 178 179 if (!tx_ring->netdev) 180 return; 181 182 /* cleanup Tx queue statistics */ 183 netdev_tx_reset_queue(txring_txq(tx_ring)); 184 } 185 186 /** 187 * ice_free_tx_ring - Free Tx resources per queue 188 * @tx_ring: Tx descriptor ring for a specific queue 189 * 190 * Free all transmit software resources 191 */ 192 void ice_free_tx_ring(struct ice_tx_ring *tx_ring) 193 { 194 u32 size; 195 196 ice_clean_tx_ring(tx_ring); 197 devm_kfree(tx_ring->dev, tx_ring->tx_buf); 198 tx_ring->tx_buf = NULL; 199 200 if (tx_ring->desc) { 201 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 202 PAGE_SIZE); 203 dmam_free_coherent(tx_ring->dev, size, 204 tx_ring->desc, tx_ring->dma); 205 tx_ring->desc = NULL; 206 } 207 } 208 209 /** 210 * ice_clean_tx_irq - Reclaim resources after transmit completes 211 * @tx_ring: Tx ring to clean 212 * @napi_budget: Used to determine if we are in netpoll 213 * 214 * Returns true if there's any budget left (e.g. the clean is finished) 215 */ 216 static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) 217 { 218 unsigned int total_bytes = 0, total_pkts = 0; 219 unsigned int budget = ICE_DFLT_IRQ_WORK; 220 struct ice_vsi *vsi = tx_ring->vsi; 221 s16 i = tx_ring->next_to_clean; 222 struct ice_tx_desc *tx_desc; 223 struct ice_tx_buf *tx_buf; 224 225 /* get the bql data ready */ 226 netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring)); 227 228 tx_buf = &tx_ring->tx_buf[i]; 229 tx_desc = ICE_TX_DESC(tx_ring, i); 230 i -= tx_ring->count; 231 232 prefetch(&vsi->state); 233 234 do { 235 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 236 237 /* if next_to_watch is not set then there is no work pending */ 238 if (!eop_desc) 239 break; 240 241 /* follow the guidelines of other drivers */ 242 prefetchw(&tx_buf->skb->users); 243 244 smp_rmb(); /* prevent any other reads prior to eop_desc */ 245 246 ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); 247 /* if the descriptor isn't done, no work yet to do */ 248 if (!(eop_desc->cmd_type_offset_bsz & 249 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 250 break; 251 252 /* clear next_to_watch to prevent false hangs */ 253 tx_buf->next_to_watch = NULL; 254 255 /* update the statistics for this packet */ 256 total_bytes += tx_buf->bytecount; 257 total_pkts += tx_buf->gso_segs; 258 259 /* free the skb */ 260 napi_consume_skb(tx_buf->skb, napi_budget); 261 262 /* unmap skb header data */ 263 dma_unmap_single(tx_ring->dev, 264 dma_unmap_addr(tx_buf, dma), 265 dma_unmap_len(tx_buf, len), 266 DMA_TO_DEVICE); 267 268 /* clear tx_buf data */ 269 tx_buf->type = ICE_TX_BUF_EMPTY; 270 dma_unmap_len_set(tx_buf, len, 0); 271 272 /* unmap remaining buffers */ 273 while (tx_desc != eop_desc) { 274 ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf); 275 tx_buf++; 276 tx_desc++; 277 i++; 278 if (unlikely(!i)) { 279 i -= tx_ring->count; 280 tx_buf = tx_ring->tx_buf; 281 tx_desc = ICE_TX_DESC(tx_ring, 0); 282 } 283 284 /* unmap any remaining paged data */ 285 if (dma_unmap_len(tx_buf, len)) { 286 dma_unmap_page(tx_ring->dev, 287 dma_unmap_addr(tx_buf, dma), 288 dma_unmap_len(tx_buf, len), 289 DMA_TO_DEVICE); 290 dma_unmap_len_set(tx_buf, len, 0); 291 } 292 } 293 ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf); 294 295 /* move us one more past the eop_desc for start of next pkt */ 296 tx_buf++; 297 tx_desc++; 298 i++; 299 if (unlikely(!i)) { 300 i -= tx_ring->count; 301 tx_buf = tx_ring->tx_buf; 302 tx_desc = ICE_TX_DESC(tx_ring, 0); 303 } 304 305 prefetch(tx_desc); 306 307 /* update budget accounting */ 308 budget--; 309 } while (likely(budget)); 310 311 i += tx_ring->count; 312 tx_ring->next_to_clean = i; 313 314 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); 315 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes); 316 317 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 318 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 319 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 320 /* Make sure that anybody stopping the queue after this 321 * sees the new next_to_clean. 322 */ 323 smp_mb(); 324 if (netif_tx_queue_stopped(txring_txq(tx_ring)) && 325 !test_bit(ICE_VSI_DOWN, vsi->state)) { 326 netif_tx_wake_queue(txring_txq(tx_ring)); 327 ++tx_ring->ring_stats->tx_stats.restart_q; 328 } 329 } 330 331 return !!budget; 332 } 333 334 /** 335 * ice_setup_tx_ring - Allocate the Tx descriptors 336 * @tx_ring: the Tx ring to set up 337 * 338 * Return 0 on success, negative on error 339 */ 340 int ice_setup_tx_ring(struct ice_tx_ring *tx_ring) 341 { 342 struct device *dev = tx_ring->dev; 343 u32 size; 344 345 if (!dev) 346 return -ENOMEM; 347 348 /* warn if we are about to overwrite the pointer */ 349 WARN_ON(tx_ring->tx_buf); 350 tx_ring->tx_buf = 351 devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count, 352 GFP_KERNEL); 353 if (!tx_ring->tx_buf) 354 return -ENOMEM; 355 356 /* round up to nearest page */ 357 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 358 PAGE_SIZE); 359 tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma, 360 GFP_KERNEL); 361 if (!tx_ring->desc) { 362 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 363 size); 364 goto err; 365 } 366 367 tx_ring->next_to_use = 0; 368 tx_ring->next_to_clean = 0; 369 tx_ring->ring_stats->tx_stats.prev_pkt = -1; 370 return 0; 371 372 err: 373 devm_kfree(dev, tx_ring->tx_buf); 374 tx_ring->tx_buf = NULL; 375 return -ENOMEM; 376 } 377 378 /** 379 * ice_clean_rx_ring - Free Rx buffers 380 * @rx_ring: ring to be cleaned 381 */ 382 void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) 383 { 384 struct xdp_buff *xdp = &rx_ring->xdp; 385 struct device *dev = rx_ring->dev; 386 u32 size; 387 u16 i; 388 389 /* ring already cleared, nothing to do */ 390 if (!rx_ring->rx_buf) 391 return; 392 393 if (rx_ring->xsk_pool) { 394 ice_xsk_clean_rx_ring(rx_ring); 395 goto rx_skip_free; 396 } 397 398 if (xdp->data) { 399 xdp_return_buff(xdp); 400 xdp->data = NULL; 401 } 402 403 /* Free all the Rx ring sk_buffs */ 404 for (i = 0; i < rx_ring->count; i++) { 405 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 406 407 if (!rx_buf->page) 408 continue; 409 410 /* Invalidate cache lines that may have been written to by 411 * device so that we avoid corrupting memory. 412 */ 413 dma_sync_single_range_for_cpu(dev, rx_buf->dma, 414 rx_buf->page_offset, 415 rx_ring->rx_buf_len, 416 DMA_FROM_DEVICE); 417 418 /* free resources associated with mapping */ 419 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), 420 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 421 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 422 423 rx_buf->page = NULL; 424 rx_buf->page_offset = 0; 425 } 426 427 rx_skip_free: 428 if (rx_ring->xsk_pool) 429 memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf))); 430 else 431 memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf))); 432 433 /* Zero out the descriptor ring */ 434 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 435 PAGE_SIZE); 436 memset(rx_ring->desc, 0, size); 437 438 rx_ring->next_to_alloc = 0; 439 rx_ring->next_to_clean = 0; 440 rx_ring->first_desc = 0; 441 rx_ring->next_to_use = 0; 442 } 443 444 /** 445 * ice_free_rx_ring - Free Rx resources 446 * @rx_ring: ring to clean the resources from 447 * 448 * Free all receive software resources 449 */ 450 void ice_free_rx_ring(struct ice_rx_ring *rx_ring) 451 { 452 u32 size; 453 454 ice_clean_rx_ring(rx_ring); 455 if (rx_ring->vsi->type == ICE_VSI_PF) 456 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 457 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 458 WRITE_ONCE(rx_ring->xdp_prog, NULL); 459 if (rx_ring->xsk_pool) { 460 kfree(rx_ring->xdp_buf); 461 rx_ring->xdp_buf = NULL; 462 } else { 463 kfree(rx_ring->rx_buf); 464 rx_ring->rx_buf = NULL; 465 } 466 467 if (rx_ring->desc) { 468 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 469 PAGE_SIZE); 470 dmam_free_coherent(rx_ring->dev, size, 471 rx_ring->desc, rx_ring->dma); 472 rx_ring->desc = NULL; 473 } 474 } 475 476 /** 477 * ice_setup_rx_ring - Allocate the Rx descriptors 478 * @rx_ring: the Rx ring to set up 479 * 480 * Return 0 on success, negative on error 481 */ 482 int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) 483 { 484 struct device *dev = rx_ring->dev; 485 u32 size; 486 487 if (!dev) 488 return -ENOMEM; 489 490 /* warn if we are about to overwrite the pointer */ 491 WARN_ON(rx_ring->rx_buf); 492 rx_ring->rx_buf = 493 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); 494 if (!rx_ring->rx_buf) 495 return -ENOMEM; 496 497 /* round up to nearest page */ 498 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 499 PAGE_SIZE); 500 rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma, 501 GFP_KERNEL); 502 if (!rx_ring->desc) { 503 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 504 size); 505 goto err; 506 } 507 508 rx_ring->next_to_use = 0; 509 rx_ring->next_to_clean = 0; 510 rx_ring->first_desc = 0; 511 512 if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 513 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); 514 515 return 0; 516 517 err: 518 kfree(rx_ring->rx_buf); 519 rx_ring->rx_buf = NULL; 520 return -ENOMEM; 521 } 522 523 /** 524 * ice_run_xdp - Executes an XDP program on initialized xdp_buff 525 * @rx_ring: Rx ring 526 * @xdp: xdp_buff used as input to the XDP program 527 * @xdp_prog: XDP program to run 528 * @xdp_ring: ring to be used for XDP_TX action 529 * @eop_desc: Last descriptor in packet to read metadata from 530 * 531 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 532 */ 533 static u32 534 ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, 535 struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, 536 union ice_32b_rx_flex_desc *eop_desc) 537 { 538 unsigned int ret = ICE_XDP_PASS; 539 u32 act; 540 541 if (!xdp_prog) 542 goto exit; 543 544 ice_xdp_meta_set_desc(xdp, eop_desc); 545 546 act = bpf_prog_run_xdp(xdp_prog, xdp); 547 switch (act) { 548 case XDP_PASS: 549 break; 550 case XDP_TX: 551 if (static_branch_unlikely(&ice_xdp_locking_key)) 552 spin_lock(&xdp_ring->tx_lock); 553 ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false); 554 if (static_branch_unlikely(&ice_xdp_locking_key)) 555 spin_unlock(&xdp_ring->tx_lock); 556 if (ret == ICE_XDP_CONSUMED) 557 goto out_failure; 558 break; 559 case XDP_REDIRECT: 560 if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog)) 561 goto out_failure; 562 ret = ICE_XDP_REDIR; 563 break; 564 default: 565 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); 566 fallthrough; 567 case XDP_ABORTED: 568 out_failure: 569 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 570 fallthrough; 571 case XDP_DROP: 572 ret = ICE_XDP_CONSUMED; 573 } 574 exit: 575 return ret; 576 } 577 578 /** 579 * ice_xmit_xdp_ring - submit frame to XDP ring for transmission 580 * @xdpf: XDP frame that will be converted to XDP buff 581 * @xdp_ring: XDP ring for transmission 582 */ 583 static int ice_xmit_xdp_ring(const struct xdp_frame *xdpf, 584 struct ice_tx_ring *xdp_ring) 585 { 586 struct xdp_buff xdp; 587 588 xdp.data_hard_start = (void *)xdpf; 589 xdp.data = xdpf->data; 590 xdp.data_end = xdp.data + xdpf->len; 591 xdp.frame_sz = xdpf->frame_sz; 592 xdp.flags = xdpf->flags; 593 594 return __ice_xmit_xdp_ring(&xdp, xdp_ring, true); 595 } 596 597 /** 598 * ice_xdp_xmit - submit packets to XDP ring for transmission 599 * @dev: netdev 600 * @n: number of XDP frames to be transmitted 601 * @frames: XDP frames to be transmitted 602 * @flags: transmit flags 603 * 604 * Returns number of frames successfully sent. Failed frames 605 * will be free'ed by XDP core. 606 * For error cases, a negative errno code is returned and no-frames 607 * are transmitted (caller must handle freeing frames). 608 */ 609 int 610 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 611 u32 flags) 612 { 613 struct ice_netdev_priv *np = netdev_priv(dev); 614 unsigned int queue_index = smp_processor_id(); 615 struct ice_vsi *vsi = np->vsi; 616 struct ice_tx_ring *xdp_ring; 617 struct ice_tx_buf *tx_buf; 618 int nxmit = 0, i; 619 620 if (test_bit(ICE_VSI_DOWN, vsi->state)) 621 return -ENETDOWN; 622 623 if (!ice_is_xdp_ena_vsi(vsi)) 624 return -ENXIO; 625 626 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 627 return -EINVAL; 628 629 if (static_branch_unlikely(&ice_xdp_locking_key)) { 630 queue_index %= vsi->num_xdp_txq; 631 xdp_ring = vsi->xdp_rings[queue_index]; 632 spin_lock(&xdp_ring->tx_lock); 633 } else { 634 /* Generally, should not happen */ 635 if (unlikely(queue_index >= vsi->num_xdp_txq)) 636 return -ENXIO; 637 xdp_ring = vsi->xdp_rings[queue_index]; 638 } 639 640 tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use]; 641 for (i = 0; i < n; i++) { 642 const struct xdp_frame *xdpf = frames[i]; 643 int err; 644 645 err = ice_xmit_xdp_ring(xdpf, xdp_ring); 646 if (err != ICE_XDP_TX) 647 break; 648 nxmit++; 649 } 650 651 tx_buf->rs_idx = ice_set_rs_bit(xdp_ring); 652 if (unlikely(flags & XDP_XMIT_FLUSH)) 653 ice_xdp_ring_update_tail(xdp_ring); 654 655 if (static_branch_unlikely(&ice_xdp_locking_key)) 656 spin_unlock(&xdp_ring->tx_lock); 657 658 return nxmit; 659 } 660 661 /** 662 * ice_alloc_mapped_page - recycle or make a new page 663 * @rx_ring: ring to use 664 * @bi: rx_buf struct to modify 665 * 666 * Returns true if the page was successfully allocated or 667 * reused. 668 */ 669 static bool 670 ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi) 671 { 672 struct page *page = bi->page; 673 dma_addr_t dma; 674 675 /* since we are recycling buffers we should seldom need to alloc */ 676 if (likely(page)) 677 return true; 678 679 /* alloc new page for storage */ 680 page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); 681 if (unlikely(!page)) { 682 rx_ring->ring_stats->rx_stats.alloc_page_failed++; 683 return false; 684 } 685 686 /* map page for use */ 687 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), 688 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 689 690 /* if mapping failed free memory back to system since 691 * there isn't much point in holding memory we can't use 692 */ 693 if (dma_mapping_error(rx_ring->dev, dma)) { 694 __free_pages(page, ice_rx_pg_order(rx_ring)); 695 rx_ring->ring_stats->rx_stats.alloc_page_failed++; 696 return false; 697 } 698 699 bi->dma = dma; 700 bi->page = page; 701 bi->page_offset = rx_ring->rx_offset; 702 page_ref_add(page, USHRT_MAX - 1); 703 bi->pagecnt_bias = USHRT_MAX; 704 705 return true; 706 } 707 708 /** 709 * ice_init_ctrl_rx_descs - Initialize Rx descriptors for control vsi. 710 * @rx_ring: ring to init descriptors on 711 * @count: number of descriptors to initialize 712 */ 713 void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 count) 714 { 715 union ice_32b_rx_flex_desc *rx_desc; 716 u32 ntu = rx_ring->next_to_use; 717 718 if (!count) 719 return; 720 721 rx_desc = ICE_RX_DESC(rx_ring, ntu); 722 723 do { 724 rx_desc++; 725 ntu++; 726 if (unlikely(ntu == rx_ring->count)) { 727 rx_desc = ICE_RX_DESC(rx_ring, 0); 728 ntu = 0; 729 } 730 731 rx_desc->wb.status_error0 = 0; 732 count--; 733 } while (count); 734 735 if (rx_ring->next_to_use != ntu) 736 ice_release_rx_desc(rx_ring, ntu); 737 } 738 739 /** 740 * ice_alloc_rx_bufs - Replace used receive buffers 741 * @rx_ring: ring to place buffers on 742 * @cleaned_count: number of buffers to replace 743 * 744 * Returns false if all allocations were successful, true if any fail. Returning 745 * true signals to the caller that we didn't replace cleaned_count buffers and 746 * there is more work to do. 747 * 748 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx 749 * buffers. Then bump tail at most one time. Grouping like this lets us avoid 750 * multiple tail writes per call. 751 */ 752 bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count) 753 { 754 union ice_32b_rx_flex_desc *rx_desc; 755 u16 ntu = rx_ring->next_to_use; 756 struct ice_rx_buf *bi; 757 758 /* do nothing if no valid netdev defined */ 759 if (!rx_ring->netdev || !cleaned_count) 760 return false; 761 762 /* get the Rx descriptor and buffer based on next_to_use */ 763 rx_desc = ICE_RX_DESC(rx_ring, ntu); 764 bi = &rx_ring->rx_buf[ntu]; 765 766 do { 767 /* if we fail here, we have work remaining */ 768 if (!ice_alloc_mapped_page(rx_ring, bi)) 769 break; 770 771 /* sync the buffer for use by the device */ 772 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 773 bi->page_offset, 774 rx_ring->rx_buf_len, 775 DMA_FROM_DEVICE); 776 777 /* Refresh the desc even if buffer_addrs didn't change 778 * because each write-back erases this info. 779 */ 780 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 781 782 rx_desc++; 783 bi++; 784 ntu++; 785 if (unlikely(ntu == rx_ring->count)) { 786 rx_desc = ICE_RX_DESC(rx_ring, 0); 787 bi = rx_ring->rx_buf; 788 ntu = 0; 789 } 790 791 /* clear the status bits for the next_to_use descriptor */ 792 rx_desc->wb.status_error0 = 0; 793 794 cleaned_count--; 795 } while (cleaned_count); 796 797 if (rx_ring->next_to_use != ntu) 798 ice_release_rx_desc(rx_ring, ntu); 799 800 return !!cleaned_count; 801 } 802 803 /** 804 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 805 * @rx_buf: Rx buffer to adjust 806 * @size: Size of adjustment 807 * 808 * Update the offset within page so that Rx buf will be ready to be reused. 809 * For systems with PAGE_SIZE < 8192 this function will flip the page offset 810 * so the second half of page assigned to Rx buffer will be used, otherwise 811 * the offset is moved by "size" bytes 812 */ 813 static void 814 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 815 { 816 #if (PAGE_SIZE < 8192) 817 /* flip page offset to other buffer */ 818 rx_buf->page_offset ^= size; 819 #else 820 /* move offset up to the next cache line */ 821 rx_buf->page_offset += size; 822 #endif 823 } 824 825 /** 826 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 827 * @rx_buf: buffer containing the page 828 * 829 * If page is reusable, we have a green light for calling ice_reuse_rx_page, 830 * which will assign the current buffer to the buffer that next_to_alloc is 831 * pointing to; otherwise, the DMA mapping needs to be destroyed and 832 * page freed 833 */ 834 static bool 835 ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) 836 { 837 unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 838 struct page *page = rx_buf->page; 839 840 /* avoid re-using remote and pfmemalloc pages */ 841 if (!dev_page_is_reusable(page)) 842 return false; 843 844 /* if we are only owner of page we can reuse it */ 845 if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1)) 846 return false; 847 #if (PAGE_SIZE >= 8192) 848 #define ICE_LAST_OFFSET \ 849 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_3072) 850 if (rx_buf->page_offset > ICE_LAST_OFFSET) 851 return false; 852 #endif /* PAGE_SIZE >= 8192) */ 853 854 /* If we have drained the page fragment pool we need to update 855 * the pagecnt_bias and page count so that we fully restock the 856 * number of references the driver holds. 857 */ 858 if (unlikely(pagecnt_bias == 1)) { 859 page_ref_add(page, USHRT_MAX - 1); 860 rx_buf->pagecnt_bias = USHRT_MAX; 861 } 862 863 return true; 864 } 865 866 /** 867 * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag 868 * @rx_ring: Rx descriptor ring to transact packets on 869 * @xdp: xdp buff to place the data into 870 * @rx_buf: buffer containing page to add 871 * @size: packet length from rx_desc 872 * 873 * This function will add the data contained in rx_buf->page to the xdp buf. 874 * It will just attach the page as a frag. 875 */ 876 static int 877 ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, 878 struct ice_rx_buf *rx_buf, const unsigned int size) 879 { 880 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 881 882 if (!size) 883 return 0; 884 885 if (!xdp_buff_has_frags(xdp)) { 886 sinfo->nr_frags = 0; 887 sinfo->xdp_frags_size = 0; 888 xdp_buff_set_frags_flag(xdp); 889 } 890 891 if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) 892 return -ENOMEM; 893 894 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, 895 rx_buf->page_offset, size); 896 sinfo->xdp_frags_size += size; 897 /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail() 898 * can pop off frags but driver has to handle it on its own 899 */ 900 rx_ring->nr_frags = sinfo->nr_frags; 901 902 if (page_is_pfmemalloc(rx_buf->page)) 903 xdp_buff_set_frag_pfmemalloc(xdp); 904 905 return 0; 906 } 907 908 /** 909 * ice_reuse_rx_page - page flip buffer and store it back on the ring 910 * @rx_ring: Rx descriptor ring to store buffers on 911 * @old_buf: donor buffer to have page reused 912 * 913 * Synchronizes page for reuse by the adapter 914 */ 915 static void 916 ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf) 917 { 918 u16 nta = rx_ring->next_to_alloc; 919 struct ice_rx_buf *new_buf; 920 921 new_buf = &rx_ring->rx_buf[nta]; 922 923 /* update, and store next to alloc */ 924 nta++; 925 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 926 927 /* Transfer page from old buffer to new buffer. 928 * Move each member individually to avoid possible store 929 * forwarding stalls and unnecessary copy of skb. 930 */ 931 new_buf->dma = old_buf->dma; 932 new_buf->page = old_buf->page; 933 new_buf->page_offset = old_buf->page_offset; 934 new_buf->pagecnt_bias = old_buf->pagecnt_bias; 935 } 936 937 /** 938 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 939 * @rx_ring: Rx descriptor ring to transact packets on 940 * @size: size of buffer to add to skb 941 * @ntc: index of next to clean element 942 * 943 * This function will pull an Rx buffer from the ring and synchronize it 944 * for use by the CPU. 945 */ 946 static struct ice_rx_buf * 947 ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, 948 const unsigned int ntc) 949 { 950 struct ice_rx_buf *rx_buf; 951 952 rx_buf = &rx_ring->rx_buf[ntc]; 953 prefetchw(rx_buf->page); 954 955 if (!size) 956 return rx_buf; 957 /* we are reusing so sync this buffer for CPU use */ 958 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 959 rx_buf->page_offset, size, 960 DMA_FROM_DEVICE); 961 962 /* We have pulled a buffer for use, so decrement pagecnt_bias */ 963 rx_buf->pagecnt_bias--; 964 965 return rx_buf; 966 } 967 968 /** 969 * ice_get_pgcnts - grab page_count() for gathered fragments 970 * @rx_ring: Rx descriptor ring to store the page counts on 971 * 972 * This function is intended to be called right before running XDP 973 * program so that the page recycling mechanism will be able to take 974 * a correct decision regarding underlying pages; this is done in such 975 * way as XDP program can change the refcount of page 976 */ 977 static void ice_get_pgcnts(struct ice_rx_ring *rx_ring) 978 { 979 u32 nr_frags = rx_ring->nr_frags + 1; 980 u32 idx = rx_ring->first_desc; 981 struct ice_rx_buf *rx_buf; 982 u32 cnt = rx_ring->count; 983 984 for (int i = 0; i < nr_frags; i++) { 985 rx_buf = &rx_ring->rx_buf[idx]; 986 rx_buf->pgcnt = page_count(rx_buf->page); 987 988 if (++idx == cnt) 989 idx = 0; 990 } 991 } 992 993 /** 994 * ice_build_skb - Build skb around an existing buffer 995 * @rx_ring: Rx descriptor ring to transact packets on 996 * @xdp: xdp_buff pointing to the data 997 * 998 * This function builds an skb around an existing XDP buffer, taking care 999 * to set up the skb correctly and avoid any memcpy overhead. Driver has 1000 * already combined frags (if any) to skb_shared_info. 1001 */ 1002 static struct sk_buff * 1003 ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) 1004 { 1005 u8 metasize = xdp->data - xdp->data_meta; 1006 struct skb_shared_info *sinfo = NULL; 1007 unsigned int nr_frags; 1008 struct sk_buff *skb; 1009 1010 if (unlikely(xdp_buff_has_frags(xdp))) { 1011 sinfo = xdp_get_shared_info_from_buff(xdp); 1012 nr_frags = sinfo->nr_frags; 1013 } 1014 1015 /* Prefetch first cache line of first page. If xdp->data_meta 1016 * is unused, this points exactly as xdp->data, otherwise we 1017 * likely have a consumer accessing first few bytes of meta 1018 * data, and then actual data. 1019 */ 1020 net_prefetch(xdp->data_meta); 1021 /* build an skb around the page buffer */ 1022 skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz); 1023 if (unlikely(!skb)) 1024 return NULL; 1025 1026 /* must to record Rx queue, otherwise OS features such as 1027 * symmetric queue won't work 1028 */ 1029 skb_record_rx_queue(skb, rx_ring->q_index); 1030 1031 /* update pointers within the skb to store the data */ 1032 skb_reserve(skb, xdp->data - xdp->data_hard_start); 1033 __skb_put(skb, xdp->data_end - xdp->data); 1034 if (metasize) 1035 skb_metadata_set(skb, metasize); 1036 1037 if (unlikely(xdp_buff_has_frags(xdp))) 1038 xdp_update_skb_shared_info(skb, nr_frags, 1039 sinfo->xdp_frags_size, 1040 nr_frags * xdp->frame_sz, 1041 xdp_buff_is_frag_pfmemalloc(xdp)); 1042 1043 return skb; 1044 } 1045 1046 /** 1047 * ice_construct_skb - Allocate skb and populate it 1048 * @rx_ring: Rx descriptor ring to transact packets on 1049 * @xdp: xdp_buff pointing to the data 1050 * 1051 * This function allocates an skb. It then populates it with the page 1052 * data from the current receive descriptor, taking care to set up the 1053 * skb correctly. 1054 */ 1055 static struct sk_buff * 1056 ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) 1057 { 1058 unsigned int size = xdp->data_end - xdp->data; 1059 struct skb_shared_info *sinfo = NULL; 1060 struct ice_rx_buf *rx_buf; 1061 unsigned int nr_frags = 0; 1062 unsigned int headlen; 1063 struct sk_buff *skb; 1064 1065 /* prefetch first cache line of first page */ 1066 net_prefetch(xdp->data); 1067 1068 if (unlikely(xdp_buff_has_frags(xdp))) { 1069 sinfo = xdp_get_shared_info_from_buff(xdp); 1070 nr_frags = sinfo->nr_frags; 1071 } 1072 1073 /* allocate a skb to store the frags */ 1074 skb = napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE); 1075 if (unlikely(!skb)) 1076 return NULL; 1077 1078 rx_buf = &rx_ring->rx_buf[rx_ring->first_desc]; 1079 skb_record_rx_queue(skb, rx_ring->q_index); 1080 /* Determine available headroom for copy */ 1081 headlen = size; 1082 if (headlen > ICE_RX_HDR_SIZE) 1083 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); 1084 1085 /* align pull length to size of long to optimize memcpy performance */ 1086 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, 1087 sizeof(long))); 1088 1089 /* if we exhaust the linear part then add what is left as a frag */ 1090 size -= headlen; 1091 if (size) { 1092 /* besides adding here a partial frag, we are going to add 1093 * frags from xdp_buff, make sure there is enough space for 1094 * them 1095 */ 1096 if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) { 1097 dev_kfree_skb(skb); 1098 return NULL; 1099 } 1100 skb_add_rx_frag(skb, 0, rx_buf->page, 1101 rx_buf->page_offset + headlen, size, 1102 xdp->frame_sz); 1103 } else { 1104 /* buffer is unused, restore biased page count in Rx buffer; 1105 * data was copied onto skb's linear part so there's no 1106 * need for adjusting page offset and we can reuse this buffer 1107 * as-is 1108 */ 1109 rx_buf->pagecnt_bias++; 1110 } 1111 1112 if (unlikely(xdp_buff_has_frags(xdp))) { 1113 struct skb_shared_info *skinfo = skb_shinfo(skb); 1114 1115 memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0], 1116 sizeof(skb_frag_t) * nr_frags); 1117 1118 xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags, 1119 sinfo->xdp_frags_size, 1120 nr_frags * xdp->frame_sz, 1121 xdp_buff_is_frag_pfmemalloc(xdp)); 1122 } 1123 1124 return skb; 1125 } 1126 1127 /** 1128 * ice_put_rx_buf - Clean up used buffer and either recycle or free 1129 * @rx_ring: Rx descriptor ring to transact packets on 1130 * @rx_buf: Rx buffer to pull data from 1131 * 1132 * This function will clean up the contents of the rx_buf. It will either 1133 * recycle the buffer or unmap it and free the associated resources. 1134 */ 1135 static void 1136 ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf) 1137 { 1138 if (!rx_buf) 1139 return; 1140 1141 if (ice_can_reuse_rx_page(rx_buf)) { 1142 /* hand second half of page back to the ring */ 1143 ice_reuse_rx_page(rx_ring, rx_buf); 1144 } else { 1145 /* we are not reusing the buffer so unmap it */ 1146 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, 1147 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 1148 ICE_RX_DMA_ATTR); 1149 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 1150 } 1151 1152 /* clear contents of buffer_info */ 1153 rx_buf->page = NULL; 1154 } 1155 1156 /** 1157 * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags 1158 * @rx_ring: Rx ring with all the auxiliary data 1159 * @xdp: XDP buffer carrying linear + frags part 1160 * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage 1161 * @ntc: a current next_to_clean value to be stored at rx_ring 1162 * @verdict: return code from XDP program execution 1163 * 1164 * Walk through gathered fragments and satisfy internal page 1165 * recycle mechanism; we take here an action related to verdict 1166 * returned by XDP program; 1167 */ 1168 static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, 1169 u32 *xdp_xmit, u32 ntc, u32 verdict) 1170 { 1171 u32 nr_frags = rx_ring->nr_frags + 1; 1172 u32 idx = rx_ring->first_desc; 1173 u32 cnt = rx_ring->count; 1174 u32 post_xdp_frags = 1; 1175 struct ice_rx_buf *buf; 1176 int i; 1177 1178 if (unlikely(xdp_buff_has_frags(xdp))) 1179 post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags; 1180 1181 for (i = 0; i < post_xdp_frags; i++) { 1182 buf = &rx_ring->rx_buf[idx]; 1183 1184 if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) { 1185 ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); 1186 *xdp_xmit |= verdict; 1187 } else if (verdict & ICE_XDP_CONSUMED) { 1188 buf->pagecnt_bias++; 1189 } else if (verdict == ICE_XDP_PASS) { 1190 ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); 1191 } 1192 1193 ice_put_rx_buf(rx_ring, buf); 1194 1195 if (++idx == cnt) 1196 idx = 0; 1197 } 1198 /* handle buffers that represented frags released by XDP prog; 1199 * for these we keep pagecnt_bias as-is; refcount from struct page 1200 * has been decremented within XDP prog and we do not have to increase 1201 * the biased refcnt 1202 */ 1203 for (; i < nr_frags; i++) { 1204 buf = &rx_ring->rx_buf[idx]; 1205 ice_put_rx_buf(rx_ring, buf); 1206 if (++idx == cnt) 1207 idx = 0; 1208 } 1209 1210 xdp->data = NULL; 1211 rx_ring->first_desc = ntc; 1212 rx_ring->nr_frags = 0; 1213 } 1214 1215 /** 1216 * ice_clean_ctrl_rx_irq - Clean descriptors from flow director Rx ring 1217 * @rx_ring: Rx descriptor ring for ctrl_vsi to transact packets on 1218 * 1219 * This function cleans Rx descriptors from the ctrl_vsi Rx ring used 1220 * to set flow director rules on VFs. 1221 */ 1222 void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring) 1223 { 1224 u32 ntc = rx_ring->next_to_clean; 1225 unsigned int total_rx_pkts = 0; 1226 u32 cnt = rx_ring->count; 1227 1228 while (likely(total_rx_pkts < ICE_DFLT_IRQ_WORK)) { 1229 struct ice_vsi *ctrl_vsi = rx_ring->vsi; 1230 union ice_32b_rx_flex_desc *rx_desc; 1231 u16 stat_err_bits; 1232 1233 rx_desc = ICE_RX_DESC(rx_ring, ntc); 1234 1235 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 1236 if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) 1237 break; 1238 1239 dma_rmb(); 1240 1241 if (ctrl_vsi->vf) 1242 ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); 1243 1244 if (++ntc == cnt) 1245 ntc = 0; 1246 total_rx_pkts++; 1247 } 1248 1249 rx_ring->first_desc = ntc; 1250 rx_ring->next_to_clean = ntc; 1251 ice_init_ctrl_rx_descs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring)); 1252 } 1253 1254 /** 1255 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 1256 * @rx_ring: Rx descriptor ring to transact packets on 1257 * @budget: Total limit on number of packets to process 1258 * 1259 * This function provides a "bounce buffer" approach to Rx interrupt 1260 * processing. The advantage to this is that on systems that have 1261 * expensive overhead for IOMMU access this provides a means of avoiding 1262 * it by maintaining the mapping of the page to the system. 1263 * 1264 * Returns amount of work completed 1265 */ 1266 static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) 1267 { 1268 unsigned int total_rx_bytes = 0, total_rx_pkts = 0; 1269 unsigned int offset = rx_ring->rx_offset; 1270 struct xdp_buff *xdp = &rx_ring->xdp; 1271 struct ice_tx_ring *xdp_ring = NULL; 1272 struct bpf_prog *xdp_prog = NULL; 1273 u32 ntc = rx_ring->next_to_clean; 1274 u32 cached_ntu, xdp_verdict; 1275 u32 cnt = rx_ring->count; 1276 u32 xdp_xmit = 0; 1277 bool failure; 1278 1279 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 1280 if (xdp_prog) { 1281 xdp_ring = rx_ring->xdp_ring; 1282 cached_ntu = xdp_ring->next_to_use; 1283 } 1284 1285 /* start the loop to process Rx packets bounded by 'budget' */ 1286 while (likely(total_rx_pkts < (unsigned int)budget)) { 1287 union ice_32b_rx_flex_desc *rx_desc; 1288 struct ice_rx_buf *rx_buf; 1289 struct sk_buff *skb; 1290 unsigned int size; 1291 u16 stat_err_bits; 1292 u16 vlan_tci; 1293 1294 /* get the Rx desc from Rx ring based on 'next_to_clean' */ 1295 rx_desc = ICE_RX_DESC(rx_ring, ntc); 1296 1297 /* status_error_len will always be zero for unused descriptors 1298 * because it's cleared in cleanup, and overlaps with hdr_addr 1299 * which is always zero because packet split isn't used, if the 1300 * hardware wrote DD then it will be non-zero 1301 */ 1302 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 1303 if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) 1304 break; 1305 1306 /* This memory barrier is needed to keep us from reading 1307 * any other fields out of the rx_desc until we know the 1308 * DD bit is set. 1309 */ 1310 dma_rmb(); 1311 1312 ice_trace(clean_rx_irq, rx_ring, rx_desc); 1313 1314 size = le16_to_cpu(rx_desc->wb.pkt_len) & 1315 ICE_RX_FLX_DESC_PKT_LEN_M; 1316 1317 /* retrieve a buffer from the ring */ 1318 rx_buf = ice_get_rx_buf(rx_ring, size, ntc); 1319 1320 if (!xdp->data) { 1321 void *hard_start; 1322 1323 hard_start = page_address(rx_buf->page) + rx_buf->page_offset - 1324 offset; 1325 xdp_prepare_buff(xdp, hard_start, offset, size, !!offset); 1326 xdp_buff_clear_frags_flag(xdp); 1327 } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { 1328 ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED); 1329 break; 1330 } 1331 if (++ntc == cnt) 1332 ntc = 0; 1333 1334 /* skip if it is NOP desc */ 1335 if (ice_is_non_eop(rx_ring, rx_desc)) 1336 continue; 1337 1338 ice_get_pgcnts(rx_ring); 1339 xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc); 1340 if (xdp_verdict == ICE_XDP_PASS) 1341 goto construct_skb; 1342 total_rx_bytes += xdp_get_buff_len(xdp); 1343 total_rx_pkts++; 1344 1345 ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); 1346 1347 continue; 1348 construct_skb: 1349 if (likely(ice_ring_uses_build_skb(rx_ring))) 1350 skb = ice_build_skb(rx_ring, xdp); 1351 else 1352 skb = ice_construct_skb(rx_ring, xdp); 1353 /* exit if we failed to retrieve a buffer */ 1354 if (!skb) { 1355 rx_ring->ring_stats->rx_stats.alloc_page_failed++; 1356 xdp_verdict = ICE_XDP_CONSUMED; 1357 } 1358 ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); 1359 1360 if (!skb) 1361 break; 1362 1363 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 1364 if (unlikely(ice_test_staterr(rx_desc->wb.status_error0, 1365 stat_err_bits))) { 1366 dev_kfree_skb_any(skb); 1367 continue; 1368 } 1369 1370 vlan_tci = ice_get_vlan_tci(rx_desc); 1371 1372 /* pad the skb if needed, to make a valid ethernet frame */ 1373 if (eth_skb_pad(skb)) 1374 continue; 1375 1376 /* probably a little skewed due to removing CRC */ 1377 total_rx_bytes += skb->len; 1378 1379 /* populate checksum, VLAN, and protocol */ 1380 ice_process_skb_fields(rx_ring, rx_desc, skb); 1381 1382 ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb); 1383 /* send completed skb up the stack */ 1384 ice_receive_skb(rx_ring, skb, vlan_tci); 1385 1386 /* update budget accounting */ 1387 total_rx_pkts++; 1388 } 1389 1390 rx_ring->next_to_clean = ntc; 1391 /* return up to cleaned_count buffers to hardware */ 1392 failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring)); 1393 1394 if (xdp_xmit) 1395 ice_finalize_xdp_rx(xdp_ring, xdp_xmit, cached_ntu); 1396 1397 if (rx_ring->ring_stats) 1398 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, 1399 total_rx_bytes); 1400 1401 /* guarantee a trip back through this routine if there was a failure */ 1402 return failure ? budget : (int)total_rx_pkts; 1403 } 1404 1405 static void __ice_update_sample(struct ice_q_vector *q_vector, 1406 struct ice_ring_container *rc, 1407 struct dim_sample *sample, 1408 bool is_tx) 1409 { 1410 u64 packets = 0, bytes = 0; 1411 1412 if (is_tx) { 1413 struct ice_tx_ring *tx_ring; 1414 1415 ice_for_each_tx_ring(tx_ring, *rc) { 1416 struct ice_ring_stats *ring_stats; 1417 1418 ring_stats = tx_ring->ring_stats; 1419 if (!ring_stats) 1420 continue; 1421 packets += ring_stats->stats.pkts; 1422 bytes += ring_stats->stats.bytes; 1423 } 1424 } else { 1425 struct ice_rx_ring *rx_ring; 1426 1427 ice_for_each_rx_ring(rx_ring, *rc) { 1428 struct ice_ring_stats *ring_stats; 1429 1430 ring_stats = rx_ring->ring_stats; 1431 if (!ring_stats) 1432 continue; 1433 packets += ring_stats->stats.pkts; 1434 bytes += ring_stats->stats.bytes; 1435 } 1436 } 1437 1438 dim_update_sample(q_vector->total_events, packets, bytes, sample); 1439 sample->comp_ctr = 0; 1440 1441 /* if dim settings get stale, like when not updated for 1 1442 * second or longer, force it to start again. This addresses the 1443 * frequent case of an idle queue being switched to by the 1444 * scheduler. The 1,000 here means 1,000 milliseconds. 1445 */ 1446 if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000) 1447 rc->dim.state = DIM_START_MEASURE; 1448 } 1449 1450 /** 1451 * ice_net_dim - Update net DIM algorithm 1452 * @q_vector: the vector associated with the interrupt 1453 * 1454 * Create a DIM sample and notify net_dim() so that it can possibly decide 1455 * a new ITR value based on incoming packets, bytes, and interrupts. 1456 * 1457 * This function is a no-op if the ring is not configured to dynamic ITR. 1458 */ 1459 static void ice_net_dim(struct ice_q_vector *q_vector) 1460 { 1461 struct ice_ring_container *tx = &q_vector->tx; 1462 struct ice_ring_container *rx = &q_vector->rx; 1463 1464 if (ITR_IS_DYNAMIC(tx)) { 1465 struct dim_sample dim_sample; 1466 1467 __ice_update_sample(q_vector, tx, &dim_sample, true); 1468 net_dim(&tx->dim, &dim_sample); 1469 } 1470 1471 if (ITR_IS_DYNAMIC(rx)) { 1472 struct dim_sample dim_sample; 1473 1474 __ice_update_sample(q_vector, rx, &dim_sample, false); 1475 net_dim(&rx->dim, &dim_sample); 1476 } 1477 } 1478 1479 /** 1480 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 1481 * @itr_idx: interrupt throttling index 1482 * @itr: interrupt throttling value in usecs 1483 */ 1484 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 1485 { 1486 /* The ITR value is reported in microseconds, and the register value is 1487 * recorded in 2 microsecond units. For this reason we only need to 1488 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 1489 * granularity as a shift instead of division. The mask makes sure the 1490 * ITR value is never odd so we don't accidentally write into the field 1491 * prior to the ITR field. 1492 */ 1493 itr &= ICE_ITR_MASK; 1494 1495 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 1496 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 1497 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 1498 } 1499 1500 /** 1501 * ice_enable_interrupt - re-enable MSI-X interrupt 1502 * @q_vector: the vector associated with the interrupt to enable 1503 * 1504 * If the VSI is down, the interrupt will not be re-enabled. Also, 1505 * when enabling the interrupt always reset the wb_on_itr to false 1506 * and trigger a software interrupt to clean out internal state. 1507 */ 1508 static void ice_enable_interrupt(struct ice_q_vector *q_vector) 1509 { 1510 struct ice_vsi *vsi = q_vector->vsi; 1511 bool wb_en = q_vector->wb_on_itr; 1512 u32 itr_val; 1513 1514 if (test_bit(ICE_DOWN, vsi->state)) 1515 return; 1516 1517 /* trigger an ITR delayed software interrupt when exiting busy poll, to 1518 * make sure to catch any pending cleanups that might have been missed 1519 * due to interrupt state transition. If busy poll or poll isn't 1520 * enabled, then don't update ITR, and just enable the interrupt. 1521 */ 1522 if (!wb_en) { 1523 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 1524 } else { 1525 q_vector->wb_on_itr = false; 1526 1527 /* do two things here with a single write. Set up the third ITR 1528 * index to be used for software interrupt moderation, and then 1529 * trigger a software interrupt with a rate limit of 20K on 1530 * software interrupts, this will help avoid high interrupt 1531 * loads due to frequently polling and exiting polling. 1532 */ 1533 itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K); 1534 itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M | 1535 ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S | 1536 GLINT_DYN_CTL_SW_ITR_INDX_ENA_M; 1537 } 1538 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); 1539 } 1540 1541 /** 1542 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector 1543 * @q_vector: q_vector to set WB_ON_ITR on 1544 * 1545 * We need to tell hardware to write-back completed descriptors even when 1546 * interrupts are disabled. Descriptors will be written back on cache line 1547 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR 1548 * descriptors may not be written back if they don't fill a cache line until 1549 * the next interrupt. 1550 * 1551 * This sets the write-back frequency to whatever was set previously for the 1552 * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we 1553 * aren't meddling with the INTENA_M bit. 1554 */ 1555 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) 1556 { 1557 struct ice_vsi *vsi = q_vector->vsi; 1558 1559 /* already in wb_on_itr mode no need to change it */ 1560 if (q_vector->wb_on_itr) 1561 return; 1562 1563 /* use previously set ITR values for all of the ITR indices by 1564 * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and 1565 * be static in non-adaptive mode (user configured) 1566 */ 1567 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 1568 FIELD_PREP(GLINT_DYN_CTL_ITR_INDX_M, ICE_ITR_NONE) | 1569 FIELD_PREP(GLINT_DYN_CTL_INTENA_MSK_M, 1) | 1570 FIELD_PREP(GLINT_DYN_CTL_WB_ON_ITR_M, 1)); 1571 1572 q_vector->wb_on_itr = true; 1573 } 1574 1575 /** 1576 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 1577 * @napi: napi struct with our devices info in it 1578 * @budget: amount of work driver is allowed to do this pass, in packets 1579 * 1580 * This function will clean all queues associated with a q_vector. 1581 * 1582 * Returns the amount of work done 1583 */ 1584 int ice_napi_poll(struct napi_struct *napi, int budget) 1585 { 1586 struct ice_q_vector *q_vector = 1587 container_of(napi, struct ice_q_vector, napi); 1588 struct ice_tx_ring *tx_ring; 1589 struct ice_rx_ring *rx_ring; 1590 bool clean_complete = true; 1591 int budget_per_ring; 1592 int work_done = 0; 1593 1594 /* Since the actual Tx work is minimal, we can give the Tx a larger 1595 * budget and be more aggressive about cleaning up the Tx descriptors. 1596 */ 1597 ice_for_each_tx_ring(tx_ring, q_vector->tx) { 1598 struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool); 1599 bool wd; 1600 1601 if (xsk_pool) 1602 wd = ice_xmit_zc(tx_ring, xsk_pool); 1603 else if (ice_ring_is_xdp(tx_ring)) 1604 wd = true; 1605 else 1606 wd = ice_clean_tx_irq(tx_ring, budget); 1607 1608 if (!wd) 1609 clean_complete = false; 1610 } 1611 1612 /* Handle case where we are called by netpoll with a budget of 0 */ 1613 if (unlikely(budget <= 0)) 1614 return budget; 1615 1616 /* normally we have 1 Rx ring per q_vector */ 1617 if (unlikely(q_vector->num_ring_rx > 1)) 1618 /* We attempt to distribute budget to each Rx queue fairly, but 1619 * don't allow the budget to go below 1 because that would exit 1620 * polling early. 1621 */ 1622 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); 1623 else 1624 /* Max of 1 Rx ring in this q_vector so give it the budget */ 1625 budget_per_ring = budget; 1626 1627 ice_for_each_rx_ring(rx_ring, q_vector->rx) { 1628 struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool); 1629 int cleaned; 1630 1631 /* A dedicated path for zero-copy allows making a single 1632 * comparison in the irq context instead of many inside the 1633 * ice_clean_rx_irq function and makes the codebase cleaner. 1634 */ 1635 cleaned = rx_ring->xsk_pool ? 1636 ice_clean_rx_irq_zc(rx_ring, xsk_pool, budget_per_ring) : 1637 ice_clean_rx_irq(rx_ring, budget_per_ring); 1638 work_done += cleaned; 1639 /* if we clean as many as budgeted, we must not be done */ 1640 if (cleaned >= budget_per_ring) 1641 clean_complete = false; 1642 } 1643 1644 /* If work not completed, return budget and polling will return */ 1645 if (!clean_complete) { 1646 /* Set the writeback on ITR so partial completions of 1647 * cache-lines will still continue even if we're polling. 1648 */ 1649 ice_set_wb_on_itr(q_vector); 1650 return budget; 1651 } 1652 1653 /* Exit the polling mode, but don't re-enable interrupts if stack might 1654 * poll us due to busy-polling 1655 */ 1656 if (napi_complete_done(napi, work_done)) { 1657 ice_net_dim(q_vector); 1658 ice_enable_interrupt(q_vector); 1659 } else { 1660 ice_set_wb_on_itr(q_vector); 1661 } 1662 1663 return min_t(int, work_done, budget - 1); 1664 } 1665 1666 /** 1667 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 1668 * @tx_ring: the ring to be checked 1669 * @size: the size buffer we want to assure is available 1670 * 1671 * Returns -EBUSY if a stop is needed, else 0 1672 */ 1673 static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) 1674 { 1675 netif_tx_stop_queue(txring_txq(tx_ring)); 1676 /* Memory barrier before checking head and tail */ 1677 smp_mb(); 1678 1679 /* Check again in a case another CPU has just made room available. */ 1680 if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 1681 return -EBUSY; 1682 1683 /* A reprieve! - use start_queue because it doesn't call schedule */ 1684 netif_tx_start_queue(txring_txq(tx_ring)); 1685 ++tx_ring->ring_stats->tx_stats.restart_q; 1686 return 0; 1687 } 1688 1689 /** 1690 * ice_maybe_stop_tx - 1st level check for Tx stop conditions 1691 * @tx_ring: the ring to be checked 1692 * @size: the size buffer we want to assure is available 1693 * 1694 * Returns 0 if stop is not needed 1695 */ 1696 static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) 1697 { 1698 if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 1699 return 0; 1700 1701 return __ice_maybe_stop_tx(tx_ring, size); 1702 } 1703 1704 /** 1705 * ice_tx_map - Build the Tx descriptor 1706 * @tx_ring: ring to send buffer on 1707 * @first: first buffer info buffer to use 1708 * @off: pointer to struct that holds offload parameters 1709 * 1710 * This function loops over the skb data pointed to by *first 1711 * and gets a physical address for each memory location and programs 1712 * it and the length into the transmit descriptor. 1713 */ 1714 static void 1715 ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, 1716 struct ice_tx_offload_params *off) 1717 { 1718 u64 td_offset, td_tag, td_cmd; 1719 u16 i = tx_ring->next_to_use; 1720 unsigned int data_len, size; 1721 struct ice_tx_desc *tx_desc; 1722 struct ice_tx_buf *tx_buf; 1723 struct sk_buff *skb; 1724 skb_frag_t *frag; 1725 dma_addr_t dma; 1726 bool kick; 1727 1728 td_tag = off->td_l2tag1; 1729 td_cmd = off->td_cmd; 1730 td_offset = off->td_offset; 1731 skb = first->skb; 1732 1733 data_len = skb->data_len; 1734 size = skb_headlen(skb); 1735 1736 tx_desc = ICE_TX_DESC(tx_ring, i); 1737 1738 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1739 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1740 td_tag = first->vid; 1741 } 1742 1743 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 1744 1745 tx_buf = first; 1746 1747 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1748 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1749 1750 if (dma_mapping_error(tx_ring->dev, dma)) 1751 goto dma_error; 1752 1753 /* record length, and DMA address */ 1754 dma_unmap_len_set(tx_buf, len, size); 1755 dma_unmap_addr_set(tx_buf, dma, dma); 1756 1757 /* align size to end of page */ 1758 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 1759 tx_desc->buf_addr = cpu_to_le64(dma); 1760 1761 /* account for data chunks larger than the hardware 1762 * can handle 1763 */ 1764 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 1765 tx_desc->cmd_type_offset_bsz = 1766 ice_build_ctob(td_cmd, td_offset, max_data, 1767 td_tag); 1768 1769 tx_desc++; 1770 i++; 1771 1772 if (i == tx_ring->count) { 1773 tx_desc = ICE_TX_DESC(tx_ring, 0); 1774 i = 0; 1775 } 1776 1777 dma += max_data; 1778 size -= max_data; 1779 1780 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1781 tx_desc->buf_addr = cpu_to_le64(dma); 1782 } 1783 1784 if (likely(!data_len)) 1785 break; 1786 1787 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset, 1788 size, td_tag); 1789 1790 tx_desc++; 1791 i++; 1792 1793 if (i == tx_ring->count) { 1794 tx_desc = ICE_TX_DESC(tx_ring, 0); 1795 i = 0; 1796 } 1797 1798 size = skb_frag_size(frag); 1799 data_len -= size; 1800 1801 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 1802 DMA_TO_DEVICE); 1803 1804 tx_buf = &tx_ring->tx_buf[i]; 1805 tx_buf->type = ICE_TX_BUF_FRAG; 1806 } 1807 1808 /* record SW timestamp if HW timestamp is not available */ 1809 skb_tx_timestamp(first->skb); 1810 1811 i++; 1812 if (i == tx_ring->count) 1813 i = 0; 1814 1815 /* write last descriptor with RS and EOP bits */ 1816 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; 1817 tx_desc->cmd_type_offset_bsz = 1818 ice_build_ctob(td_cmd, td_offset, size, td_tag); 1819 1820 /* Force memory writes to complete before letting h/w know there 1821 * are new descriptors to fetch. 1822 * 1823 * We also use this memory barrier to make certain all of the 1824 * status bits have been updated before next_to_watch is written. 1825 */ 1826 wmb(); 1827 1828 /* set next_to_watch value indicating a packet is present */ 1829 first->next_to_watch = tx_desc; 1830 1831 tx_ring->next_to_use = i; 1832 1833 ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 1834 1835 /* notify HW of packet */ 1836 kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount, 1837 netdev_xmit_more()); 1838 if (kick) 1839 /* notify HW of packet */ 1840 writel(i, tx_ring->tail); 1841 1842 return; 1843 1844 dma_error: 1845 /* clear DMA mappings for failed tx_buf map */ 1846 for (;;) { 1847 tx_buf = &tx_ring->tx_buf[i]; 1848 ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 1849 if (tx_buf == first) 1850 break; 1851 if (i == 0) 1852 i = tx_ring->count; 1853 i--; 1854 } 1855 1856 tx_ring->next_to_use = i; 1857 } 1858 1859 /** 1860 * ice_tx_csum - Enable Tx checksum offloads 1861 * @first: pointer to the first descriptor 1862 * @off: pointer to struct that holds offload parameters 1863 * 1864 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1865 */ 1866 static 1867 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1868 { 1869 const struct ice_tx_ring *tx_ring = off->tx_ring; 1870 u32 l4_len = 0, l3_len = 0, l2_len = 0; 1871 struct sk_buff *skb = first->skb; 1872 union { 1873 struct iphdr *v4; 1874 struct ipv6hdr *v6; 1875 unsigned char *hdr; 1876 } ip; 1877 union { 1878 struct tcphdr *tcp; 1879 unsigned char *hdr; 1880 } l4; 1881 __be16 frag_off, protocol; 1882 unsigned char *exthdr; 1883 u32 offset, cmd = 0; 1884 u8 l4_proto = 0; 1885 1886 if (skb->ip_summed != CHECKSUM_PARTIAL) 1887 return 0; 1888 1889 protocol = vlan_get_protocol(skb); 1890 1891 if (eth_p_mpls(protocol)) { 1892 ip.hdr = skb_inner_network_header(skb); 1893 l4.hdr = skb_checksum_start(skb); 1894 } else { 1895 ip.hdr = skb_network_header(skb); 1896 l4.hdr = skb_transport_header(skb); 1897 } 1898 1899 /* compute outer L2 header size */ 1900 l2_len = ip.hdr - skb->data; 1901 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1902 1903 /* set the tx_flags to indicate the IP protocol type. this is 1904 * required so that checksum header computation below is accurate. 1905 */ 1906 if (ip.v4->version == 4) 1907 first->tx_flags |= ICE_TX_FLAGS_IPV4; 1908 else if (ip.v6->version == 6) 1909 first->tx_flags |= ICE_TX_FLAGS_IPV6; 1910 1911 if (skb->encapsulation) { 1912 bool gso_ena = false; 1913 u32 tunnel = 0; 1914 1915 /* define outer network header type */ 1916 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1917 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ? 1918 ICE_TX_CTX_EIPT_IPV4 : 1919 ICE_TX_CTX_EIPT_IPV4_NO_CSUM; 1920 l4_proto = ip.v4->protocol; 1921 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 1922 int ret; 1923 1924 tunnel |= ICE_TX_CTX_EIPT_IPV6; 1925 exthdr = ip.hdr + sizeof(*ip.v6); 1926 l4_proto = ip.v6->nexthdr; 1927 ret = ipv6_skip_exthdr(skb, exthdr - skb->data, 1928 &l4_proto, &frag_off); 1929 if (ret < 0) 1930 return -1; 1931 } 1932 1933 /* define outer transport */ 1934 switch (l4_proto) { 1935 case IPPROTO_UDP: 1936 tunnel |= ICE_TXD_CTX_UDP_TUNNELING; 1937 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1938 break; 1939 case IPPROTO_GRE: 1940 tunnel |= ICE_TXD_CTX_GRE_TUNNELING; 1941 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1942 break; 1943 case IPPROTO_IPIP: 1944 case IPPROTO_IPV6: 1945 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1946 l4.hdr = skb_inner_network_header(skb); 1947 break; 1948 default: 1949 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1950 return -1; 1951 1952 skb_checksum_help(skb); 1953 return 0; 1954 } 1955 1956 /* compute outer L3 header size */ 1957 tunnel |= ((l4.hdr - ip.hdr) / 4) << 1958 ICE_TXD_CTX_QW0_EIPLEN_S; 1959 1960 /* switch IP header pointer from outer to inner header */ 1961 ip.hdr = skb_inner_network_header(skb); 1962 1963 /* compute tunnel header size */ 1964 tunnel |= ((ip.hdr - l4.hdr) / 2) << 1965 ICE_TXD_CTX_QW0_NATLEN_S; 1966 1967 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; 1968 /* indicate if we need to offload outer UDP header */ 1969 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena && 1970 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 1971 tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M; 1972 1973 /* record tunnel offload values */ 1974 off->cd_tunnel_params |= tunnel; 1975 1976 /* set DTYP=1 to indicate that it's an Tx context descriptor 1977 * in IPsec tunnel mode with Tx offloads in Quad word 1 1978 */ 1979 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX; 1980 1981 /* switch L4 header pointer from outer to inner */ 1982 l4.hdr = skb_inner_transport_header(skb); 1983 l4_proto = 0; 1984 1985 /* reset type as we transition from outer to inner headers */ 1986 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); 1987 if (ip.v4->version == 4) 1988 first->tx_flags |= ICE_TX_FLAGS_IPV4; 1989 if (ip.v6->version == 6) 1990 first->tx_flags |= ICE_TX_FLAGS_IPV6; 1991 } 1992 1993 /* Enable IP checksum offloads */ 1994 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1995 l4_proto = ip.v4->protocol; 1996 /* the stack computes the IP header already, the only time we 1997 * need the hardware to recompute it is in the case of TSO. 1998 */ 1999 if (first->tx_flags & ICE_TX_FLAGS_TSO) 2000 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 2001 else 2002 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 2003 2004 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 2005 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 2006 exthdr = ip.hdr + sizeof(*ip.v6); 2007 l4_proto = ip.v6->nexthdr; 2008 if (l4.hdr != exthdr) 2009 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 2010 &frag_off); 2011 } else { 2012 return -1; 2013 } 2014 2015 /* compute inner L3 header size */ 2016 l3_len = l4.hdr - ip.hdr; 2017 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 2018 2019 if ((tx_ring->netdev->features & NETIF_F_HW_CSUM) && 2020 !(first->tx_flags & ICE_TX_FLAGS_TSO) && 2021 !skb_csum_is_sctp(skb)) { 2022 /* Set GCS */ 2023 u16 csum_start = (skb->csum_start - skb->mac_header) / 2; 2024 u16 csum_offset = skb->csum_offset / 2; 2025 u16 gcs_params; 2026 2027 gcs_params = FIELD_PREP(ICE_TX_GCS_DESC_START_M, csum_start) | 2028 FIELD_PREP(ICE_TX_GCS_DESC_OFFSET_M, csum_offset) | 2029 FIELD_PREP(ICE_TX_GCS_DESC_TYPE_M, 2030 ICE_TX_GCS_DESC_CSUM_PSH); 2031 2032 /* Unlike legacy HW checksums, GCS requires a context 2033 * descriptor. 2034 */ 2035 off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX; 2036 off->cd_gcs_params = gcs_params; 2037 /* Fill out CSO info in data descriptors */ 2038 off->td_offset |= offset; 2039 off->td_cmd |= cmd; 2040 return 1; 2041 } 2042 2043 /* Enable L4 checksum offloads */ 2044 switch (l4_proto) { 2045 case IPPROTO_TCP: 2046 /* enable checksum offloads */ 2047 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 2048 l4_len = l4.tcp->doff; 2049 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 2050 break; 2051 case IPPROTO_UDP: 2052 /* enable UDP checksum offload */ 2053 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 2054 l4_len = (sizeof(struct udphdr) >> 2); 2055 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 2056 break; 2057 case IPPROTO_SCTP: 2058 /* enable SCTP checksum offload */ 2059 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 2060 l4_len = sizeof(struct sctphdr) >> 2; 2061 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 2062 break; 2063 2064 default: 2065 if (first->tx_flags & ICE_TX_FLAGS_TSO) 2066 return -1; 2067 skb_checksum_help(skb); 2068 return 0; 2069 } 2070 2071 off->td_cmd |= cmd; 2072 off->td_offset |= offset; 2073 return 1; 2074 } 2075 2076 /** 2077 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 2078 * @tx_ring: ring to send buffer on 2079 * @first: pointer to struct ice_tx_buf 2080 * 2081 * Checks the skb and set up correspondingly several generic transmit flags 2082 * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 2083 */ 2084 static void 2085 ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first) 2086 { 2087 struct sk_buff *skb = first->skb; 2088 2089 /* nothing left to do, software offloaded VLAN */ 2090 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) 2091 return; 2092 2093 /* the VLAN ethertype/tpid is determined by VSI configuration and netdev 2094 * feature flags, which the driver only allows either 802.1Q or 802.1ad 2095 * VLAN offloads exclusively so we only care about the VLAN ID here 2096 */ 2097 if (skb_vlan_tag_present(skb)) { 2098 first->vid = skb_vlan_tag_get(skb); 2099 if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2) 2100 first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN; 2101 else 2102 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 2103 } 2104 2105 ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 2106 } 2107 2108 /** 2109 * ice_tso - computes mss and TSO length to prepare for TSO 2110 * @first: pointer to struct ice_tx_buf 2111 * @off: pointer to struct that holds offload parameters 2112 * 2113 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 2114 */ 2115 static 2116 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 2117 { 2118 struct sk_buff *skb = first->skb; 2119 union { 2120 struct iphdr *v4; 2121 struct ipv6hdr *v6; 2122 unsigned char *hdr; 2123 } ip; 2124 union { 2125 struct tcphdr *tcp; 2126 struct udphdr *udp; 2127 unsigned char *hdr; 2128 } l4; 2129 u64 cd_mss, cd_tso_len; 2130 __be16 protocol; 2131 u32 paylen; 2132 u8 l4_start; 2133 int err; 2134 2135 if (skb->ip_summed != CHECKSUM_PARTIAL) 2136 return 0; 2137 2138 if (!skb_is_gso(skb)) 2139 return 0; 2140 2141 err = skb_cow_head(skb, 0); 2142 if (err < 0) 2143 return err; 2144 2145 protocol = vlan_get_protocol(skb); 2146 2147 if (eth_p_mpls(protocol)) 2148 ip.hdr = skb_inner_network_header(skb); 2149 else 2150 ip.hdr = skb_network_header(skb); 2151 l4.hdr = skb_checksum_start(skb); 2152 2153 /* initialize outer IP header fields */ 2154 if (ip.v4->version == 4) { 2155 ip.v4->tot_len = 0; 2156 ip.v4->check = 0; 2157 } else { 2158 ip.v6->payload_len = 0; 2159 } 2160 2161 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 2162 SKB_GSO_GRE_CSUM | 2163 SKB_GSO_IPXIP4 | 2164 SKB_GSO_IPXIP6 | 2165 SKB_GSO_UDP_TUNNEL | 2166 SKB_GSO_UDP_TUNNEL_CSUM)) { 2167 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 2168 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 2169 l4.udp->len = 0; 2170 2171 /* determine offset of outer transport header */ 2172 l4_start = (u8)(l4.hdr - skb->data); 2173 2174 /* remove payload length from outer checksum */ 2175 paylen = skb->len - l4_start; 2176 csum_replace_by_diff(&l4.udp->check, 2177 (__force __wsum)htonl(paylen)); 2178 } 2179 2180 /* reset pointers to inner headers */ 2181 ip.hdr = skb_inner_network_header(skb); 2182 l4.hdr = skb_inner_transport_header(skb); 2183 2184 /* initialize inner IP header fields */ 2185 if (ip.v4->version == 4) { 2186 ip.v4->tot_len = 0; 2187 ip.v4->check = 0; 2188 } else { 2189 ip.v6->payload_len = 0; 2190 } 2191 } 2192 2193 /* determine offset of transport header */ 2194 l4_start = (u8)(l4.hdr - skb->data); 2195 2196 /* remove payload length from checksum */ 2197 paylen = skb->len - l4_start; 2198 2199 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 2200 csum_replace_by_diff(&l4.udp->check, 2201 (__force __wsum)htonl(paylen)); 2202 /* compute length of UDP segmentation header */ 2203 off->header_len = (u8)sizeof(l4.udp) + l4_start; 2204 } else { 2205 csum_replace_by_diff(&l4.tcp->check, 2206 (__force __wsum)htonl(paylen)); 2207 /* compute length of TCP segmentation header */ 2208 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start); 2209 } 2210 2211 /* update gso_segs and bytecount */ 2212 first->gso_segs = skb_shinfo(skb)->gso_segs; 2213 first->bytecount += (first->gso_segs - 1) * off->header_len; 2214 2215 cd_tso_len = skb->len - off->header_len; 2216 cd_mss = skb_shinfo(skb)->gso_size; 2217 2218 /* record cdesc_qw1 with TSO parameters */ 2219 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2220 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 2221 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 2222 (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); 2223 first->tx_flags |= ICE_TX_FLAGS_TSO; 2224 return 1; 2225 } 2226 2227 /** 2228 * ice_txd_use_count - estimate the number of descriptors needed for Tx 2229 * @size: transmit request size in bytes 2230 * 2231 * Due to hardware alignment restrictions (4K alignment), we need to 2232 * assume that we can have no more than 12K of data per descriptor, even 2233 * though each descriptor can take up to 16K - 1 bytes of aligned memory. 2234 * Thus, we need to divide by 12K. But division is slow! Instead, 2235 * we decompose the operation into shifts and one relatively cheap 2236 * multiply operation. 2237 * 2238 * To divide by 12K, we first divide by 4K, then divide by 3: 2239 * To divide by 4K, shift right by 12 bits 2240 * To divide by 3, multiply by 85, then divide by 256 2241 * (Divide by 256 is done by shifting right by 8 bits) 2242 * Finally, we add one to round up. Because 256 isn't an exact multiple of 2243 * 3, we'll underestimate near each multiple of 12K. This is actually more 2244 * accurate as we have 4K - 1 of wiggle room that we can fit into the last 2245 * segment. For our purposes this is accurate out to 1M which is orders of 2246 * magnitude greater than our largest possible GSO size. 2247 * 2248 * This would then be implemented as: 2249 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 2250 * 2251 * Since multiplication and division are commutative, we can reorder 2252 * operations into: 2253 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 2254 */ 2255 static unsigned int ice_txd_use_count(unsigned int size) 2256 { 2257 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 2258 } 2259 2260 /** 2261 * ice_xmit_desc_count - calculate number of Tx descriptors needed 2262 * @skb: send buffer 2263 * 2264 * Returns number of data descriptors needed for this skb. 2265 */ 2266 static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 2267 { 2268 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 2269 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2270 unsigned int count = 0, size = skb_headlen(skb); 2271 2272 for (;;) { 2273 count += ice_txd_use_count(size); 2274 2275 if (!nr_frags--) 2276 break; 2277 2278 size = skb_frag_size(frag++); 2279 } 2280 2281 return count; 2282 } 2283 2284 /** 2285 * __ice_chk_linearize - Check if there are more than 8 buffers per packet 2286 * @skb: send buffer 2287 * 2288 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 2289 * and so we need to figure out the cases where we need to linearize the skb. 2290 * 2291 * For TSO we need to count the TSO header and segment payload separately. 2292 * As such we need to check cases where we have 7 fragments or more as we 2293 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 2294 * the segment payload in the first descriptor, and another 7 for the 2295 * fragments. 2296 */ 2297 static bool __ice_chk_linearize(struct sk_buff *skb) 2298 { 2299 const skb_frag_t *frag, *stale; 2300 int nr_frags, sum; 2301 2302 /* no need to check if number of frags is less than 7 */ 2303 nr_frags = skb_shinfo(skb)->nr_frags; 2304 if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 2305 return false; 2306 2307 /* We need to walk through the list and validate that each group 2308 * of 6 fragments totals at least gso_size. 2309 */ 2310 nr_frags -= ICE_MAX_BUF_TXD - 2; 2311 frag = &skb_shinfo(skb)->frags[0]; 2312 2313 /* Initialize size to the negative value of gso_size minus 1. We 2314 * use this as the worst case scenario in which the frag ahead 2315 * of us only provides one byte which is why we are limited to 6 2316 * descriptors for a single transmit as the header and previous 2317 * fragment are already consuming 2 descriptors. 2318 */ 2319 sum = 1 - skb_shinfo(skb)->gso_size; 2320 2321 /* Add size of frags 0 through 4 to create our initial sum */ 2322 sum += skb_frag_size(frag++); 2323 sum += skb_frag_size(frag++); 2324 sum += skb_frag_size(frag++); 2325 sum += skb_frag_size(frag++); 2326 sum += skb_frag_size(frag++); 2327 2328 /* Walk through fragments adding latest fragment, testing it, and 2329 * then removing stale fragments from the sum. 2330 */ 2331 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { 2332 int stale_size = skb_frag_size(stale); 2333 2334 sum += skb_frag_size(frag++); 2335 2336 /* The stale fragment may present us with a smaller 2337 * descriptor than the actual fragment size. To account 2338 * for that we need to remove all the data on the front and 2339 * figure out what the remainder would be in the last 2340 * descriptor associated with the fragment. 2341 */ 2342 if (stale_size > ICE_MAX_DATA_PER_TXD) { 2343 int align_pad = -(skb_frag_off(stale)) & 2344 (ICE_MAX_READ_REQ_SIZE - 1); 2345 2346 sum -= align_pad; 2347 stale_size -= align_pad; 2348 2349 do { 2350 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; 2351 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; 2352 } while (stale_size > ICE_MAX_DATA_PER_TXD); 2353 } 2354 2355 /* if sum is negative we failed to make sufficient progress */ 2356 if (sum < 0) 2357 return true; 2358 2359 if (!nr_frags--) 2360 break; 2361 2362 sum -= stale_size; 2363 } 2364 2365 return false; 2366 } 2367 2368 /** 2369 * ice_chk_linearize - Check if there are more than 8 fragments per packet 2370 * @skb: send buffer 2371 * @count: number of buffers used 2372 * 2373 * Note: Our HW can't scatter-gather more than 8 fragments to build 2374 * a packet on the wire and so we need to figure out the cases where we 2375 * need to linearize the skb. 2376 */ 2377 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 2378 { 2379 /* Both TSO and single send will work if count is less than 8 */ 2380 if (likely(count < ICE_MAX_BUF_TXD)) 2381 return false; 2382 2383 if (skb_is_gso(skb)) 2384 return __ice_chk_linearize(skb); 2385 2386 /* we can support up to 8 data buffers for a single send */ 2387 return count != ICE_MAX_BUF_TXD; 2388 } 2389 2390 /** 2391 * ice_tstamp - set up context descriptor for hardware timestamp 2392 * @tx_ring: pointer to the Tx ring to send buffer on 2393 * @skb: pointer to the SKB we're sending 2394 * @first: Tx buffer 2395 * @off: Tx offload parameters 2396 */ 2397 static void 2398 ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb, 2399 struct ice_tx_buf *first, struct ice_tx_offload_params *off) 2400 { 2401 s8 idx; 2402 2403 /* only timestamp the outbound packet if the user has requested it */ 2404 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) 2405 return; 2406 2407 /* Tx timestamps cannot be sampled when doing TSO */ 2408 if (first->tx_flags & ICE_TX_FLAGS_TSO) 2409 return; 2410 2411 /* Grab an open timestamp slot */ 2412 idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb); 2413 if (idx < 0) { 2414 tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++; 2415 return; 2416 } 2417 2418 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2419 (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) | 2420 ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S)); 2421 first->tx_flags |= ICE_TX_FLAGS_TSYN; 2422 } 2423 2424 /** 2425 * ice_xmit_frame_ring - Sends buffer on Tx ring 2426 * @skb: send buffer 2427 * @tx_ring: ring to send buffer on 2428 * 2429 * Returns NETDEV_TX_OK if sent, else an error code 2430 */ 2431 static netdev_tx_t 2432 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) 2433 { 2434 struct ice_tx_offload_params offload = { 0 }; 2435 struct ice_vsi *vsi = tx_ring->vsi; 2436 struct ice_tx_buf *first; 2437 struct ethhdr *eth; 2438 unsigned int count; 2439 int tso, csum; 2440 2441 ice_trace(xmit_frame_ring, tx_ring, skb); 2442 2443 if (unlikely(ipv6_hopopt_jumbo_remove(skb))) 2444 goto out_drop; 2445 2446 count = ice_xmit_desc_count(skb); 2447 if (ice_chk_linearize(skb, count)) { 2448 if (__skb_linearize(skb)) 2449 goto out_drop; 2450 count = ice_txd_use_count(skb->len); 2451 tx_ring->ring_stats->tx_stats.tx_linearize++; 2452 } 2453 2454 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 2455 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 2456 * + 4 desc gap to avoid the cache line where head is, 2457 * + 1 desc for context descriptor, 2458 * otherwise try next time 2459 */ 2460 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 2461 ICE_DESCS_FOR_CTX_DESC)) { 2462 tx_ring->ring_stats->tx_stats.tx_busy++; 2463 return NETDEV_TX_BUSY; 2464 } 2465 2466 /* prefetch for bql data which is infrequently used */ 2467 netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring)); 2468 2469 offload.tx_ring = tx_ring; 2470 2471 /* record the location of the first descriptor for this packet */ 2472 first = &tx_ring->tx_buf[tx_ring->next_to_use]; 2473 first->skb = skb; 2474 first->type = ICE_TX_BUF_SKB; 2475 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 2476 first->gso_segs = 1; 2477 first->tx_flags = 0; 2478 2479 /* prepare the VLAN tagging flags for Tx */ 2480 ice_tx_prepare_vlan_flags(tx_ring, first); 2481 if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) { 2482 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2483 (ICE_TX_CTX_DESC_IL2TAG2 << 2484 ICE_TXD_CTX_QW1_CMD_S)); 2485 offload.cd_l2tag2 = first->vid; 2486 } 2487 2488 /* set up TSO offload */ 2489 tso = ice_tso(first, &offload); 2490 if (tso < 0) 2491 goto out_drop; 2492 2493 /* always set up Tx checksum offload */ 2494 csum = ice_tx_csum(first, &offload); 2495 if (csum < 0) 2496 goto out_drop; 2497 2498 /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ 2499 eth = (struct ethhdr *)skb_mac_header(skb); 2500 2501 if ((ice_is_switchdev_running(vsi->back) || 2502 ice_lag_is_switchdev_running(vsi->back)) && 2503 vsi->type != ICE_VSI_SF) 2504 ice_eswitch_set_target_vsi(skb, &offload); 2505 else if (unlikely((skb->priority == TC_PRIO_CONTROL || 2506 eth->h_proto == htons(ETH_P_LLDP)) && 2507 vsi->type == ICE_VSI_PF && 2508 vsi->port_info->qos_cfg.is_sw_lldp)) 2509 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2510 ICE_TX_CTX_DESC_SWTCH_UPLINK << 2511 ICE_TXD_CTX_QW1_CMD_S); 2512 2513 ice_tstamp(tx_ring, skb, first, &offload); 2514 2515 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { 2516 struct ice_tx_ctx_desc *cdesc; 2517 u16 i = tx_ring->next_to_use; 2518 2519 /* grab the next descriptor */ 2520 cdesc = ICE_TX_CTX_DESC(tx_ring, i); 2521 i++; 2522 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2523 2524 /* setup context descriptor */ 2525 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 2526 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 2527 cdesc->gcs = cpu_to_le16(offload.cd_gcs_params); 2528 cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 2529 } 2530 2531 ice_tx_map(tx_ring, first, &offload); 2532 return NETDEV_TX_OK; 2533 2534 out_drop: 2535 ice_trace(xmit_frame_ring_drop, tx_ring, skb); 2536 dev_kfree_skb_any(skb); 2537 return NETDEV_TX_OK; 2538 } 2539 2540 /** 2541 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 2542 * @skb: send buffer 2543 * @netdev: network interface device structure 2544 * 2545 * Returns NETDEV_TX_OK if sent, else an error code 2546 */ 2547 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 2548 { 2549 struct ice_netdev_priv *np = netdev_priv(netdev); 2550 struct ice_vsi *vsi = np->vsi; 2551 struct ice_tx_ring *tx_ring; 2552 2553 tx_ring = vsi->tx_rings[skb->queue_mapping]; 2554 2555 /* hardware can't handle really short frames, hardware padding works 2556 * beyond this point 2557 */ 2558 if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 2559 return NETDEV_TX_OK; 2560 2561 return ice_xmit_frame_ring(skb, tx_ring); 2562 } 2563 2564 /** 2565 * ice_get_dscp_up - return the UP/TC value for a SKB 2566 * @dcbcfg: DCB config that contains DSCP to UP/TC mapping 2567 * @skb: SKB to query for info to determine UP/TC 2568 * 2569 * This function is to only be called when the PF is in L3 DSCP PFC mode 2570 */ 2571 static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb) 2572 { 2573 u8 dscp = 0; 2574 2575 if (skb->protocol == htons(ETH_P_IP)) 2576 dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; 2577 else if (skb->protocol == htons(ETH_P_IPV6)) 2578 dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; 2579 2580 return dcbcfg->dscp_map[dscp]; 2581 } 2582 2583 u16 2584 ice_select_queue(struct net_device *netdev, struct sk_buff *skb, 2585 struct net_device *sb_dev) 2586 { 2587 struct ice_pf *pf = ice_netdev_to_pf(netdev); 2588 struct ice_dcbx_cfg *dcbcfg; 2589 2590 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; 2591 if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP) 2592 skb->priority = ice_get_dscp_up(dcbcfg, skb); 2593 2594 return netdev_pick_tx(netdev, skb, sb_dev); 2595 } 2596 2597 /** 2598 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue 2599 * @tx_ring: tx_ring to clean 2600 */ 2601 void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring) 2602 { 2603 struct ice_vsi *vsi = tx_ring->vsi; 2604 s16 i = tx_ring->next_to_clean; 2605 int budget = ICE_DFLT_IRQ_WORK; 2606 struct ice_tx_desc *tx_desc; 2607 struct ice_tx_buf *tx_buf; 2608 2609 tx_buf = &tx_ring->tx_buf[i]; 2610 tx_desc = ICE_TX_DESC(tx_ring, i); 2611 i -= tx_ring->count; 2612 2613 do { 2614 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2615 2616 /* if next_to_watch is not set then there is no pending work */ 2617 if (!eop_desc) 2618 break; 2619 2620 /* prevent any other reads prior to eop_desc */ 2621 smp_rmb(); 2622 2623 /* if the descriptor isn't done, no work to do */ 2624 if (!(eop_desc->cmd_type_offset_bsz & 2625 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2626 break; 2627 2628 /* clear next_to_watch to prevent false hangs */ 2629 tx_buf->next_to_watch = NULL; 2630 tx_desc->buf_addr = 0; 2631 tx_desc->cmd_type_offset_bsz = 0; 2632 2633 /* move past filter desc */ 2634 tx_buf++; 2635 tx_desc++; 2636 i++; 2637 if (unlikely(!i)) { 2638 i -= tx_ring->count; 2639 tx_buf = tx_ring->tx_buf; 2640 tx_desc = ICE_TX_DESC(tx_ring, 0); 2641 } 2642 2643 /* unmap the data header */ 2644 if (dma_unmap_len(tx_buf, len)) 2645 dma_unmap_single(tx_ring->dev, 2646 dma_unmap_addr(tx_buf, dma), 2647 dma_unmap_len(tx_buf, len), 2648 DMA_TO_DEVICE); 2649 if (tx_buf->type == ICE_TX_BUF_DUMMY) 2650 devm_kfree(tx_ring->dev, tx_buf->raw_buf); 2651 2652 /* clear next_to_watch to prevent false hangs */ 2653 tx_buf->type = ICE_TX_BUF_EMPTY; 2654 tx_buf->tx_flags = 0; 2655 tx_buf->next_to_watch = NULL; 2656 dma_unmap_len_set(tx_buf, len, 0); 2657 tx_desc->buf_addr = 0; 2658 tx_desc->cmd_type_offset_bsz = 0; 2659 2660 /* move past eop_desc for start of next FD desc */ 2661 tx_buf++; 2662 tx_desc++; 2663 i++; 2664 if (unlikely(!i)) { 2665 i -= tx_ring->count; 2666 tx_buf = tx_ring->tx_buf; 2667 tx_desc = ICE_TX_DESC(tx_ring, 0); 2668 } 2669 2670 budget--; 2671 } while (likely(budget)); 2672 2673 i += tx_ring->count; 2674 tx_ring->next_to_clean = i; 2675 2676 /* re-enable interrupt if needed */ 2677 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]); 2678 } 2679