1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019, Intel Corporation. */ 3 4 #include <linux/bpf_trace.h> 5 #include <net/xdp_sock_drv.h> 6 #include <net/xdp.h> 7 #include "ice.h" 8 #include "ice_base.h" 9 #include "ice_type.h" 10 #include "ice_xsk.h" 11 #include "ice_txrx.h" 12 #include "ice_txrx_lib.h" 13 #include "ice_lib.h" 14 15 static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx) 16 { 17 return &rx_ring->xdp_buf[idx]; 18 } 19 20 /** 21 * ice_qp_reset_stats - Resets all stats for rings of given index 22 * @vsi: VSI that contains rings of interest 23 * @q_idx: ring index in array 24 */ 25 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) 26 { 27 struct ice_vsi_stats *vsi_stat; 28 struct ice_pf *pf; 29 30 pf = vsi->back; 31 if (!pf->vsi_stats) 32 return; 33 34 vsi_stat = pf->vsi_stats[vsi->idx]; 35 if (!vsi_stat) 36 return; 37 38 memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0, 39 sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats)); 40 memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0, 41 sizeof(vsi_stat->tx_ring_stats[q_idx]->stats)); 42 if (ice_is_xdp_ena_vsi(vsi)) 43 memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0, 44 sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats)); 45 } 46 47 /** 48 * ice_qp_clean_rings - Cleans all the rings of a given index 49 * @vsi: VSI that contains rings of interest 50 * @q_idx: ring index in array 51 */ 52 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) 53 { 54 ice_clean_tx_ring(vsi->tx_rings[q_idx]); 55 if (ice_is_xdp_ena_vsi(vsi)) { 56 synchronize_rcu(); 57 ice_clean_tx_ring(vsi->xdp_rings[q_idx]); 58 } 59 ice_clean_rx_ring(vsi->rx_rings[q_idx]); 60 } 61 62 /** 63 * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector 64 * @vsi: VSI that has netdev 65 * @q_vector: q_vector that has NAPI context 66 * @enable: true for enable, false for disable 67 */ 68 static void 69 ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector, 70 bool enable) 71 { 72 if (!vsi->netdev || !q_vector) 73 return; 74 75 if (enable) 76 napi_enable(&q_vector->napi); 77 else 78 napi_disable(&q_vector->napi); 79 } 80 81 /** 82 * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring 83 * @vsi: the VSI that contains queue vector being un-configured 84 * @rx_ring: Rx ring that will have its IRQ disabled 85 * @q_vector: queue vector 86 */ 87 static void 88 ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring, 89 struct ice_q_vector *q_vector) 90 { 91 struct ice_pf *pf = vsi->back; 92 struct ice_hw *hw = &pf->hw; 93 u16 reg; 94 u32 val; 95 96 /* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle 97 * here only QINT_RQCTL 98 */ 99 reg = rx_ring->reg_idx; 100 val = rd32(hw, QINT_RQCTL(reg)); 101 val &= ~QINT_RQCTL_CAUSE_ENA_M; 102 wr32(hw, QINT_RQCTL(reg), val); 103 104 if (q_vector) { 105 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0); 106 ice_flush(hw); 107 synchronize_irq(q_vector->irq.virq); 108 } 109 } 110 111 /** 112 * ice_qvec_cfg_msix - Enable IRQ for given queue vector 113 * @vsi: the VSI that contains queue vector 114 * @q_vector: queue vector 115 */ 116 static void 117 ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector) 118 { 119 u16 reg_idx = q_vector->reg_idx; 120 struct ice_pf *pf = vsi->back; 121 struct ice_hw *hw = &pf->hw; 122 struct ice_tx_ring *tx_ring; 123 struct ice_rx_ring *rx_ring; 124 125 ice_cfg_itr(hw, q_vector); 126 127 ice_for_each_tx_ring(tx_ring, q_vector->tx) 128 ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx, 129 q_vector->tx.itr_idx); 130 131 ice_for_each_rx_ring(rx_ring, q_vector->rx) 132 ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx, 133 q_vector->rx.itr_idx); 134 135 ice_flush(hw); 136 } 137 138 /** 139 * ice_qvec_ena_irq - Enable IRQ for given queue vector 140 * @vsi: the VSI that contains queue vector 141 * @q_vector: queue vector 142 */ 143 static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector) 144 { 145 struct ice_pf *pf = vsi->back; 146 struct ice_hw *hw = &pf->hw; 147 148 ice_irq_dynamic_ena(hw, vsi, q_vector); 149 150 ice_flush(hw); 151 } 152 153 /** 154 * ice_qp_dis - Disables a queue pair 155 * @vsi: VSI of interest 156 * @q_idx: ring index in array 157 * 158 * Returns 0 on success, negative on failure. 159 */ 160 static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) 161 { 162 struct ice_txq_meta txq_meta = { }; 163 struct ice_q_vector *q_vector; 164 struct ice_tx_ring *tx_ring; 165 struct ice_rx_ring *rx_ring; 166 int timeout = 50; 167 int err; 168 169 if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq) 170 return -EINVAL; 171 172 tx_ring = vsi->tx_rings[q_idx]; 173 rx_ring = vsi->rx_rings[q_idx]; 174 q_vector = rx_ring->q_vector; 175 176 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) { 177 timeout--; 178 if (!timeout) 179 return -EBUSY; 180 usleep_range(1000, 2000); 181 } 182 183 ice_qvec_dis_irq(vsi, rx_ring, q_vector); 184 ice_qvec_toggle_napi(vsi, q_vector, false); 185 186 netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); 187 188 ice_fill_txq_meta(vsi, tx_ring, &txq_meta); 189 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta); 190 if (err) 191 return err; 192 if (ice_is_xdp_ena_vsi(vsi)) { 193 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; 194 195 memset(&txq_meta, 0, sizeof(txq_meta)); 196 ice_fill_txq_meta(vsi, xdp_ring, &txq_meta); 197 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring, 198 &txq_meta); 199 if (err) 200 return err; 201 } 202 err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true); 203 if (err) 204 return err; 205 206 ice_qp_clean_rings(vsi, q_idx); 207 ice_qp_reset_stats(vsi, q_idx); 208 209 return 0; 210 } 211 212 /** 213 * ice_qp_ena - Enables a queue pair 214 * @vsi: VSI of interest 215 * @q_idx: ring index in array 216 * 217 * Returns 0 on success, negative on failure. 218 */ 219 static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) 220 { 221 struct ice_q_vector *q_vector; 222 int err; 223 224 err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx); 225 if (err) 226 return err; 227 228 if (ice_is_xdp_ena_vsi(vsi)) { 229 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; 230 231 err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx); 232 if (err) 233 return err; 234 ice_set_ring_xdp(xdp_ring); 235 ice_tx_xsk_pool(vsi, q_idx); 236 } 237 238 err = ice_vsi_cfg_single_rxq(vsi, q_idx); 239 if (err) 240 return err; 241 242 q_vector = vsi->rx_rings[q_idx]->q_vector; 243 ice_qvec_cfg_msix(vsi, q_vector); 244 245 err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true); 246 if (err) 247 return err; 248 249 ice_qvec_toggle_napi(vsi, q_vector, true); 250 ice_qvec_ena_irq(vsi, q_vector); 251 252 netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); 253 clear_bit(ICE_CFG_BUSY, vsi->state); 254 255 return 0; 256 } 257 258 /** 259 * ice_xsk_pool_disable - disable a buffer pool region 260 * @vsi: Current VSI 261 * @qid: queue ID 262 * 263 * Returns 0 on success, negative on failure 264 */ 265 static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid) 266 { 267 struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid); 268 269 if (!pool) 270 return -EINVAL; 271 272 clear_bit(qid, vsi->af_xdp_zc_qps); 273 xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR); 274 275 return 0; 276 } 277 278 /** 279 * ice_xsk_pool_enable - enable a buffer pool region 280 * @vsi: Current VSI 281 * @pool: pointer to a requested buffer pool region 282 * @qid: queue ID 283 * 284 * Returns 0 on success, negative on failure 285 */ 286 static int 287 ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) 288 { 289 int err; 290 291 if (vsi->type != ICE_VSI_PF) 292 return -EINVAL; 293 294 if (qid >= vsi->netdev->real_num_rx_queues || 295 qid >= vsi->netdev->real_num_tx_queues) 296 return -EINVAL; 297 298 err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back), 299 ICE_RX_DMA_ATTR); 300 if (err) 301 return err; 302 303 set_bit(qid, vsi->af_xdp_zc_qps); 304 305 return 0; 306 } 307 308 /** 309 * ice_realloc_rx_xdp_bufs - reallocate for either XSK or normal buffer 310 * @rx_ring: Rx ring 311 * @pool_present: is pool for XSK present 312 * 313 * Try allocating memory and return ENOMEM, if failed to allocate. 314 * If allocation was successful, substitute buffer with allocated one. 315 * Returns 0 on success, negative on failure 316 */ 317 static int 318 ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present) 319 { 320 size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) : 321 sizeof(*rx_ring->rx_buf); 322 void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL); 323 324 if (!sw_ring) 325 return -ENOMEM; 326 327 if (pool_present) { 328 kfree(rx_ring->rx_buf); 329 rx_ring->rx_buf = NULL; 330 rx_ring->xdp_buf = sw_ring; 331 } else { 332 kfree(rx_ring->xdp_buf); 333 rx_ring->xdp_buf = NULL; 334 rx_ring->rx_buf = sw_ring; 335 } 336 337 return 0; 338 } 339 340 /** 341 * ice_realloc_zc_buf - reallocate XDP ZC queue pairs 342 * @vsi: Current VSI 343 * @zc: is zero copy set 344 * 345 * Reallocate buffer for rx_rings that might be used by XSK. 346 * XDP requires more memory, than rx_buf provides. 347 * Returns 0 on success, negative on failure 348 */ 349 int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc) 350 { 351 struct ice_rx_ring *rx_ring; 352 unsigned long q; 353 354 for_each_set_bit(q, vsi->af_xdp_zc_qps, 355 max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) { 356 rx_ring = vsi->rx_rings[q]; 357 if (ice_realloc_rx_xdp_bufs(rx_ring, zc)) 358 return -ENOMEM; 359 } 360 361 return 0; 362 } 363 364 /** 365 * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state 366 * @vsi: Current VSI 367 * @pool: buffer pool to enable/associate to a ring, NULL to disable 368 * @qid: queue ID 369 * 370 * Returns 0 on success, negative on failure 371 */ 372 int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) 373 { 374 bool if_running, pool_present = !!pool; 375 int ret = 0, pool_failure = 0; 376 377 if (qid >= vsi->num_rxq || qid >= vsi->num_txq) { 378 netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n"); 379 pool_failure = -EINVAL; 380 goto failure; 381 } 382 383 if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi); 384 385 if (if_running) { 386 struct ice_rx_ring *rx_ring = vsi->rx_rings[qid]; 387 388 ret = ice_qp_dis(vsi, qid); 389 if (ret) { 390 netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret); 391 goto xsk_pool_if_up; 392 } 393 394 ret = ice_realloc_rx_xdp_bufs(rx_ring, pool_present); 395 if (ret) 396 goto xsk_pool_if_up; 397 } 398 399 pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) : 400 ice_xsk_pool_disable(vsi, qid); 401 402 xsk_pool_if_up: 403 if (if_running) { 404 ret = ice_qp_ena(vsi, qid); 405 if (!ret && pool_present) 406 napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi); 407 else if (ret) 408 netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret); 409 } 410 411 failure: 412 if (pool_failure) { 413 netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n", 414 pool_present ? "en" : "dis", pool_failure); 415 return pool_failure; 416 } 417 418 return ret; 419 } 420 421 /** 422 * ice_fill_rx_descs - pick buffers from XSK buffer pool and use it 423 * @pool: XSK Buffer pool to pull the buffers from 424 * @xdp: SW ring of xdp_buff that will hold the buffers 425 * @rx_desc: Pointer to Rx descriptors that will be filled 426 * @count: The number of buffers to allocate 427 * 428 * This function allocates a number of Rx buffers from the fill ring 429 * or the internal recycle mechanism and places them on the Rx ring. 430 * 431 * Note that ring wrap should be handled by caller of this function. 432 * 433 * Returns the amount of allocated Rx descriptors 434 */ 435 static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp, 436 union ice_32b_rx_flex_desc *rx_desc, u16 count) 437 { 438 dma_addr_t dma; 439 u16 buffs; 440 int i; 441 442 buffs = xsk_buff_alloc_batch(pool, xdp, count); 443 for (i = 0; i < buffs; i++) { 444 dma = xsk_buff_xdp_get_dma(*xdp); 445 rx_desc->read.pkt_addr = cpu_to_le64(dma); 446 rx_desc->wb.status_error0 = 0; 447 448 /* Put private info that changes on a per-packet basis 449 * into xdp_buff_xsk->cb. 450 */ 451 ice_xdp_meta_set_desc(*xdp, rx_desc); 452 453 rx_desc++; 454 xdp++; 455 } 456 457 return buffs; 458 } 459 460 /** 461 * __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers 462 * @rx_ring: Rx ring 463 * @count: The number of buffers to allocate 464 * 465 * Place the @count of descriptors onto Rx ring. Handle the ring wrap 466 * for case where space from next_to_use up to the end of ring is less 467 * than @count. Finally do a tail bump. 468 * 469 * Returns true if all allocations were successful, false if any fail. 470 */ 471 static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) 472 { 473 u32 nb_buffs_extra = 0, nb_buffs = 0; 474 union ice_32b_rx_flex_desc *rx_desc; 475 u16 ntu = rx_ring->next_to_use; 476 u16 total_count = count; 477 struct xdp_buff **xdp; 478 479 rx_desc = ICE_RX_DESC(rx_ring, ntu); 480 xdp = ice_xdp_buf(rx_ring, ntu); 481 482 if (ntu + count >= rx_ring->count) { 483 nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, 484 rx_desc, 485 rx_ring->count - ntu); 486 if (nb_buffs_extra != rx_ring->count - ntu) { 487 ntu += nb_buffs_extra; 488 goto exit; 489 } 490 rx_desc = ICE_RX_DESC(rx_ring, 0); 491 xdp = ice_xdp_buf(rx_ring, 0); 492 ntu = 0; 493 count -= nb_buffs_extra; 494 ice_release_rx_desc(rx_ring, 0); 495 } 496 497 nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count); 498 499 ntu += nb_buffs; 500 if (ntu == rx_ring->count) 501 ntu = 0; 502 503 exit: 504 if (rx_ring->next_to_use != ntu) 505 ice_release_rx_desc(rx_ring, ntu); 506 507 return total_count == (nb_buffs_extra + nb_buffs); 508 } 509 510 /** 511 * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers 512 * @rx_ring: Rx ring 513 * @count: The number of buffers to allocate 514 * 515 * Wrapper for internal allocation routine; figure out how many tail 516 * bumps should take place based on the given threshold 517 * 518 * Returns true if all calls to internal alloc routine succeeded 519 */ 520 bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) 521 { 522 u16 rx_thresh = ICE_RING_QUARTER(rx_ring); 523 u16 leftover, i, tail_bumps; 524 525 tail_bumps = count / rx_thresh; 526 leftover = count - (tail_bumps * rx_thresh); 527 528 for (i = 0; i < tail_bumps; i++) 529 if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh)) 530 return false; 531 return __ice_alloc_rx_bufs_zc(rx_ring, leftover); 532 } 533 534 /** 535 * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer 536 * @rx_ring: Rx ring 537 * @xdp: Pointer to XDP buffer 538 * 539 * This function allocates a new skb from a zero-copy Rx buffer. 540 * 541 * Returns the skb on success, NULL on failure. 542 */ 543 static struct sk_buff * 544 ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) 545 { 546 unsigned int totalsize = xdp->data_end - xdp->data_meta; 547 unsigned int metasize = xdp->data - xdp->data_meta; 548 struct skb_shared_info *sinfo = NULL; 549 struct sk_buff *skb; 550 u32 nr_frags = 0; 551 552 if (unlikely(xdp_buff_has_frags(xdp))) { 553 sinfo = xdp_get_shared_info_from_buff(xdp); 554 nr_frags = sinfo->nr_frags; 555 } 556 net_prefetch(xdp->data_meta); 557 558 skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize); 559 if (unlikely(!skb)) 560 return NULL; 561 562 memcpy(__skb_put(skb, totalsize), xdp->data_meta, 563 ALIGN(totalsize, sizeof(long))); 564 565 if (metasize) { 566 skb_metadata_set(skb, metasize); 567 __skb_pull(skb, metasize); 568 } 569 570 if (likely(!xdp_buff_has_frags(xdp))) 571 goto out; 572 573 for (int i = 0; i < nr_frags; i++) { 574 struct skb_shared_info *skinfo = skb_shinfo(skb); 575 skb_frag_t *frag = &sinfo->frags[i]; 576 struct page *page; 577 void *addr; 578 579 page = dev_alloc_page(); 580 if (!page) { 581 dev_kfree_skb(skb); 582 return NULL; 583 } 584 addr = page_to_virt(page); 585 586 memcpy(addr, skb_frag_page(frag), skb_frag_size(frag)); 587 588 __skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++, 589 addr, 0, skb_frag_size(frag)); 590 } 591 592 out: 593 xsk_buff_free(xdp); 594 return skb; 595 } 596 597 /** 598 * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ 599 * @xdp_ring: XDP Tx ring 600 */ 601 static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring) 602 { 603 u16 ntc = xdp_ring->next_to_clean; 604 struct ice_tx_desc *tx_desc; 605 u16 cnt = xdp_ring->count; 606 struct ice_tx_buf *tx_buf; 607 u16 completed_frames = 0; 608 u16 xsk_frames = 0; 609 u16 last_rs; 610 int i; 611 612 last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1; 613 tx_desc = ICE_TX_DESC(xdp_ring, last_rs); 614 if (tx_desc->cmd_type_offset_bsz & 615 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) { 616 if (last_rs >= ntc) 617 completed_frames = last_rs - ntc + 1; 618 else 619 completed_frames = last_rs + cnt - ntc + 1; 620 } 621 622 if (!completed_frames) 623 return 0; 624 625 if (likely(!xdp_ring->xdp_tx_active)) { 626 xsk_frames = completed_frames; 627 goto skip; 628 } 629 630 ntc = xdp_ring->next_to_clean; 631 for (i = 0; i < completed_frames; i++) { 632 tx_buf = &xdp_ring->tx_buf[ntc]; 633 634 if (tx_buf->type == ICE_TX_BUF_XSK_TX) { 635 tx_buf->type = ICE_TX_BUF_EMPTY; 636 xsk_buff_free(tx_buf->xdp); 637 xdp_ring->xdp_tx_active--; 638 } else { 639 xsk_frames++; 640 } 641 642 ntc++; 643 if (ntc >= xdp_ring->count) 644 ntc = 0; 645 } 646 skip: 647 tx_desc->cmd_type_offset_bsz = 0; 648 xdp_ring->next_to_clean += completed_frames; 649 if (xdp_ring->next_to_clean >= cnt) 650 xdp_ring->next_to_clean -= cnt; 651 if (xsk_frames) 652 xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); 653 654 return completed_frames; 655 } 656 657 /** 658 * ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX 659 * @xdp: XDP buffer to xmit 660 * @xdp_ring: XDP ring to produce descriptor onto 661 * 662 * note that this function works directly on xdp_buff, no need to convert 663 * it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning 664 * side will be able to xsk_buff_free() it. 665 * 666 * Returns ICE_XDP_TX for successfully produced desc, ICE_XDP_CONSUMED if there 667 * was not enough space on XDP ring 668 */ 669 static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp, 670 struct ice_tx_ring *xdp_ring) 671 { 672 struct skb_shared_info *sinfo = NULL; 673 u32 size = xdp->data_end - xdp->data; 674 u32 ntu = xdp_ring->next_to_use; 675 struct ice_tx_desc *tx_desc; 676 struct ice_tx_buf *tx_buf; 677 struct xdp_buff *head; 678 u32 nr_frags = 0; 679 u32 free_space; 680 u32 frag = 0; 681 682 free_space = ICE_DESC_UNUSED(xdp_ring); 683 if (free_space < ICE_RING_QUARTER(xdp_ring)) 684 free_space += ice_clean_xdp_irq_zc(xdp_ring); 685 686 if (unlikely(!free_space)) 687 goto busy; 688 689 if (unlikely(xdp_buff_has_frags(xdp))) { 690 sinfo = xdp_get_shared_info_from_buff(xdp); 691 nr_frags = sinfo->nr_frags; 692 if (free_space < nr_frags + 1) 693 goto busy; 694 } 695 696 tx_desc = ICE_TX_DESC(xdp_ring, ntu); 697 tx_buf = &xdp_ring->tx_buf[ntu]; 698 head = xdp; 699 700 for (;;) { 701 dma_addr_t dma; 702 703 dma = xsk_buff_xdp_get_dma(xdp); 704 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size); 705 706 tx_buf->xdp = xdp; 707 tx_buf->type = ICE_TX_BUF_XSK_TX; 708 tx_desc->buf_addr = cpu_to_le64(dma); 709 tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0); 710 /* account for each xdp_buff from xsk_buff_pool */ 711 xdp_ring->xdp_tx_active++; 712 713 if (++ntu == xdp_ring->count) 714 ntu = 0; 715 716 if (frag == nr_frags) 717 break; 718 719 tx_desc = ICE_TX_DESC(xdp_ring, ntu); 720 tx_buf = &xdp_ring->tx_buf[ntu]; 721 722 xdp = xsk_buff_get_frag(head); 723 size = skb_frag_size(&sinfo->frags[frag]); 724 frag++; 725 } 726 727 xdp_ring->next_to_use = ntu; 728 /* update last descriptor from a frame with EOP */ 729 tx_desc->cmd_type_offset_bsz |= 730 cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S); 731 732 return ICE_XDP_TX; 733 734 busy: 735 xdp_ring->ring_stats->tx_stats.tx_busy++; 736 737 return ICE_XDP_CONSUMED; 738 } 739 740 /** 741 * ice_run_xdp_zc - Executes an XDP program in zero-copy path 742 * @rx_ring: Rx ring 743 * @xdp: xdp_buff used as input to the XDP program 744 * @xdp_prog: XDP program to run 745 * @xdp_ring: ring to be used for XDP_TX action 746 * 747 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 748 */ 749 static int 750 ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, 751 struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring) 752 { 753 int err, result = ICE_XDP_PASS; 754 u32 act; 755 756 act = bpf_prog_run_xdp(xdp_prog, xdp); 757 758 if (likely(act == XDP_REDIRECT)) { 759 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 760 if (!err) 761 return ICE_XDP_REDIR; 762 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) 763 result = ICE_XDP_EXIT; 764 else 765 result = ICE_XDP_CONSUMED; 766 goto out_failure; 767 } 768 769 switch (act) { 770 case XDP_PASS: 771 break; 772 case XDP_TX: 773 result = ice_xmit_xdp_tx_zc(xdp, xdp_ring); 774 if (result == ICE_XDP_CONSUMED) 775 goto out_failure; 776 break; 777 case XDP_DROP: 778 result = ICE_XDP_CONSUMED; 779 break; 780 default: 781 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); 782 fallthrough; 783 case XDP_ABORTED: 784 result = ICE_XDP_CONSUMED; 785 out_failure: 786 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 787 break; 788 } 789 790 return result; 791 } 792 793 static int 794 ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first, 795 struct xdp_buff *xdp, const unsigned int size) 796 { 797 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first); 798 799 if (!size) 800 return 0; 801 802 if (!xdp_buff_has_frags(first)) { 803 sinfo->nr_frags = 0; 804 sinfo->xdp_frags_size = 0; 805 xdp_buff_set_frags_flag(first); 806 } 807 808 if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { 809 xsk_buff_free(first); 810 return -ENOMEM; 811 } 812 813 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, 814 virt_to_page(xdp->data_hard_start), 815 XDP_PACKET_HEADROOM, size); 816 sinfo->xdp_frags_size += size; 817 xsk_buff_add_frag(xdp); 818 819 return 0; 820 } 821 822 /** 823 * ice_clean_rx_irq_zc - consumes packets from the hardware ring 824 * @rx_ring: AF_XDP Rx ring 825 * @budget: NAPI budget 826 * 827 * Returns number of processed packets on success, remaining budget on failure. 828 */ 829 int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) 830 { 831 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 832 struct xsk_buff_pool *xsk_pool = rx_ring->xsk_pool; 833 u32 ntc = rx_ring->next_to_clean; 834 u32 ntu = rx_ring->next_to_use; 835 struct xdp_buff *first = NULL; 836 struct ice_tx_ring *xdp_ring; 837 unsigned int xdp_xmit = 0; 838 struct bpf_prog *xdp_prog; 839 u32 cnt = rx_ring->count; 840 bool failure = false; 841 int entries_to_alloc; 842 843 /* ZC patch is enabled only when XDP program is set, 844 * so here it can not be NULL 845 */ 846 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 847 xdp_ring = rx_ring->xdp_ring; 848 849 if (ntc != rx_ring->first_desc) 850 first = *ice_xdp_buf(rx_ring, rx_ring->first_desc); 851 852 while (likely(total_rx_packets < (unsigned int)budget)) { 853 union ice_32b_rx_flex_desc *rx_desc; 854 unsigned int size, xdp_res = 0; 855 struct xdp_buff *xdp; 856 struct sk_buff *skb; 857 u16 stat_err_bits; 858 u16 vlan_tci; 859 860 rx_desc = ICE_RX_DESC(rx_ring, ntc); 861 862 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 863 if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) 864 break; 865 866 /* This memory barrier is needed to keep us from reading 867 * any other fields out of the rx_desc until we have 868 * verified the descriptor has been written back. 869 */ 870 dma_rmb(); 871 872 if (unlikely(ntc == ntu)) 873 break; 874 875 xdp = *ice_xdp_buf(rx_ring, ntc); 876 877 size = le16_to_cpu(rx_desc->wb.pkt_len) & 878 ICE_RX_FLX_DESC_PKT_LEN_M; 879 880 xsk_buff_set_size(xdp, size); 881 xsk_buff_dma_sync_for_cpu(xdp); 882 883 if (!first) { 884 first = xdp; 885 } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) { 886 break; 887 } 888 889 if (++ntc == cnt) 890 ntc = 0; 891 892 if (ice_is_non_eop(rx_ring, rx_desc)) 893 continue; 894 895 xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring); 896 if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) { 897 xdp_xmit |= xdp_res; 898 } else if (xdp_res == ICE_XDP_EXIT) { 899 failure = true; 900 first = NULL; 901 rx_ring->first_desc = ntc; 902 break; 903 } else if (xdp_res == ICE_XDP_CONSUMED) { 904 xsk_buff_free(first); 905 } else if (xdp_res == ICE_XDP_PASS) { 906 goto construct_skb; 907 } 908 909 total_rx_bytes += xdp_get_buff_len(first); 910 total_rx_packets++; 911 912 first = NULL; 913 rx_ring->first_desc = ntc; 914 continue; 915 916 construct_skb: 917 /* XDP_PASS path */ 918 skb = ice_construct_skb_zc(rx_ring, first); 919 if (!skb) { 920 rx_ring->ring_stats->rx_stats.alloc_buf_failed++; 921 break; 922 } 923 924 first = NULL; 925 rx_ring->first_desc = ntc; 926 927 if (eth_skb_pad(skb)) { 928 skb = NULL; 929 continue; 930 } 931 932 total_rx_bytes += skb->len; 933 total_rx_packets++; 934 935 vlan_tci = ice_get_vlan_tci(rx_desc); 936 937 ice_process_skb_fields(rx_ring, rx_desc, skb); 938 ice_receive_skb(rx_ring, skb, vlan_tci); 939 } 940 941 rx_ring->next_to_clean = ntc; 942 entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring); 943 if (entries_to_alloc > ICE_RING_QUARTER(rx_ring)) 944 failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc); 945 946 ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0); 947 ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); 948 949 if (xsk_uses_need_wakeup(xsk_pool)) { 950 /* ntu could have changed when allocating entries above, so 951 * use rx_ring value instead of stack based one 952 */ 953 if (failure || ntc == rx_ring->next_to_use) 954 xsk_set_rx_need_wakeup(xsk_pool); 955 else 956 xsk_clear_rx_need_wakeup(xsk_pool); 957 958 return (int)total_rx_packets; 959 } 960 961 return failure ? budget : (int)total_rx_packets; 962 } 963 964 /** 965 * ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor 966 * @xdp_ring: XDP ring to produce the HW Tx descriptor on 967 * @desc: AF_XDP descriptor to pull the DMA address and length from 968 * @total_bytes: bytes accumulator that will be used for stats update 969 */ 970 static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc, 971 unsigned int *total_bytes) 972 { 973 struct ice_tx_desc *tx_desc; 974 dma_addr_t dma; 975 976 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); 977 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len); 978 979 tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++); 980 tx_desc->buf_addr = cpu_to_le64(dma); 981 tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(desc), 982 0, desc->len, 0); 983 984 *total_bytes += desc->len; 985 } 986 987 /** 988 * ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors 989 * @xdp_ring: XDP ring to produce the HW Tx descriptors on 990 * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from 991 * @total_bytes: bytes accumulator that will be used for stats update 992 */ 993 static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs, 994 unsigned int *total_bytes) 995 { 996 u16 ntu = xdp_ring->next_to_use; 997 struct ice_tx_desc *tx_desc; 998 u32 i; 999 1000 loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) { 1001 dma_addr_t dma; 1002 1003 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr); 1004 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len); 1005 1006 tx_desc = ICE_TX_DESC(xdp_ring, ntu++); 1007 tx_desc->buf_addr = cpu_to_le64(dma); 1008 tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(&descs[i]), 1009 0, descs[i].len, 0); 1010 1011 *total_bytes += descs[i].len; 1012 } 1013 1014 xdp_ring->next_to_use = ntu; 1015 } 1016 1017 /** 1018 * ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring 1019 * @xdp_ring: XDP ring to produce the HW Tx descriptors on 1020 * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from 1021 * @nb_pkts: count of packets to be send 1022 * @total_bytes: bytes accumulator that will be used for stats update 1023 */ 1024 static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs, 1025 u32 nb_pkts, unsigned int *total_bytes) 1026 { 1027 u32 batched, leftover, i; 1028 1029 batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH); 1030 leftover = nb_pkts & (PKTS_PER_BATCH - 1); 1031 for (i = 0; i < batched; i += PKTS_PER_BATCH) 1032 ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes); 1033 for (; i < batched + leftover; i++) 1034 ice_xmit_pkt(xdp_ring, &descs[i], total_bytes); 1035 } 1036 1037 /** 1038 * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring 1039 * @xdp_ring: XDP ring to produce the HW Tx descriptors on 1040 * 1041 * Returns true if there is no more work that needs to be done, false otherwise 1042 */ 1043 bool ice_xmit_zc(struct ice_tx_ring *xdp_ring) 1044 { 1045 struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs; 1046 u32 nb_pkts, nb_processed = 0; 1047 unsigned int total_bytes = 0; 1048 int budget; 1049 1050 ice_clean_xdp_irq_zc(xdp_ring); 1051 1052 budget = ICE_DESC_UNUSED(xdp_ring); 1053 budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring)); 1054 1055 nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget); 1056 if (!nb_pkts) 1057 return true; 1058 1059 if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) { 1060 nb_processed = xdp_ring->count - xdp_ring->next_to_use; 1061 ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes); 1062 xdp_ring->next_to_use = 0; 1063 } 1064 1065 ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed, 1066 &total_bytes); 1067 1068 ice_set_rs_bit(xdp_ring); 1069 ice_xdp_ring_update_tail(xdp_ring); 1070 ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes); 1071 1072 if (xsk_uses_need_wakeup(xdp_ring->xsk_pool)) 1073 xsk_set_tx_need_wakeup(xdp_ring->xsk_pool); 1074 1075 return nb_pkts < budget; 1076 } 1077 1078 /** 1079 * ice_xsk_wakeup - Implements ndo_xsk_wakeup 1080 * @netdev: net_device 1081 * @queue_id: queue to wake up 1082 * @flags: ignored in our case, since we have Rx and Tx in the same NAPI 1083 * 1084 * Returns negative on error, zero otherwise. 1085 */ 1086 int 1087 ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, 1088 u32 __always_unused flags) 1089 { 1090 struct ice_netdev_priv *np = netdev_priv(netdev); 1091 struct ice_q_vector *q_vector; 1092 struct ice_vsi *vsi = np->vsi; 1093 struct ice_tx_ring *ring; 1094 1095 if (test_bit(ICE_VSI_DOWN, vsi->state)) 1096 return -ENETDOWN; 1097 1098 if (!ice_is_xdp_ena_vsi(vsi)) 1099 return -EINVAL; 1100 1101 if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq) 1102 return -EINVAL; 1103 1104 ring = vsi->rx_rings[queue_id]->xdp_ring; 1105 1106 if (!ring->xsk_pool) 1107 return -EINVAL; 1108 1109 /* The idea here is that if NAPI is running, mark a miss, so 1110 * it will run again. If not, trigger an interrupt and 1111 * schedule the NAPI from interrupt context. If NAPI would be 1112 * scheduled here, the interrupt affinity would not be 1113 * honored. 1114 */ 1115 q_vector = ring->q_vector; 1116 if (!napi_if_scheduled_mark_missed(&q_vector->napi)) 1117 ice_trigger_sw_intr(&vsi->back->hw, q_vector); 1118 1119 return 0; 1120 } 1121 1122 /** 1123 * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached 1124 * @vsi: VSI to be checked 1125 * 1126 * Returns true if any of the Rx rings has an AF_XDP buff pool attached 1127 */ 1128 bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi) 1129 { 1130 int i; 1131 1132 ice_for_each_rxq(vsi, i) { 1133 if (xsk_get_pool_from_qid(vsi->netdev, i)) 1134 return true; 1135 } 1136 1137 return false; 1138 } 1139 1140 /** 1141 * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring 1142 * @rx_ring: ring to be cleaned 1143 */ 1144 void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) 1145 { 1146 u16 ntc = rx_ring->next_to_clean; 1147 u16 ntu = rx_ring->next_to_use; 1148 1149 while (ntc != ntu) { 1150 struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc); 1151 1152 xsk_buff_free(xdp); 1153 ntc++; 1154 if (ntc >= rx_ring->count) 1155 ntc = 0; 1156 } 1157 } 1158 1159 /** 1160 * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues 1161 * @xdp_ring: XDP_Tx ring 1162 */ 1163 void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) 1164 { 1165 u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use; 1166 u32 xsk_frames = 0; 1167 1168 while (ntc != ntu) { 1169 struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc]; 1170 1171 if (tx_buf->type == ICE_TX_BUF_XSK_TX) { 1172 tx_buf->type = ICE_TX_BUF_EMPTY; 1173 xsk_buff_free(tx_buf->xdp); 1174 } else { 1175 xsk_frames++; 1176 } 1177 1178 ntc++; 1179 if (ntc >= xdp_ring->count) 1180 ntc = 0; 1181 } 1182 1183 if (xsk_frames) 1184 xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); 1185 } 1186