1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2023 Intel Corporation */ 3 4 #include "idpf.h" 5 6 /** 7 * idpf_buf_lifo_push - push a buffer pointer onto stack 8 * @stack: pointer to stack struct 9 * @buf: pointer to buf to push 10 * 11 * Returns 0 on success, negative on failure 12 **/ 13 static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack, 14 struct idpf_tx_stash *buf) 15 { 16 if (unlikely(stack->top == stack->size)) 17 return -ENOSPC; 18 19 stack->bufs[stack->top++] = buf; 20 21 return 0; 22 } 23 24 /** 25 * idpf_buf_lifo_pop - pop a buffer pointer from stack 26 * @stack: pointer to stack struct 27 **/ 28 static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack) 29 { 30 if (unlikely(!stack->top)) 31 return NULL; 32 33 return stack->bufs[--stack->top]; 34 } 35 36 /** 37 * idpf_tx_timeout - Respond to a Tx Hang 38 * @netdev: network interface device structure 39 * @txqueue: TX queue 40 */ 41 void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue) 42 { 43 struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev); 44 45 adapter->tx_timeout_count++; 46 47 netdev_err(netdev, "Detected Tx timeout: Count %d, Queue %d\n", 48 adapter->tx_timeout_count, txqueue); 49 if (!idpf_is_reset_in_prog(adapter)) { 50 set_bit(IDPF_HR_FUNC_RESET, adapter->flags); 51 queue_delayed_work(adapter->vc_event_wq, 52 &adapter->vc_event_task, 53 msecs_to_jiffies(10)); 54 } 55 } 56 57 /** 58 * idpf_tx_buf_rel - Release a Tx buffer 59 * @tx_q: the queue that owns the buffer 60 * @tx_buf: the buffer to free 61 */ 62 static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf) 63 { 64 if (tx_buf->skb) { 65 if (dma_unmap_len(tx_buf, len)) 66 dma_unmap_single(tx_q->dev, 67 dma_unmap_addr(tx_buf, dma), 68 dma_unmap_len(tx_buf, len), 69 DMA_TO_DEVICE); 70 dev_kfree_skb_any(tx_buf->skb); 71 } else if (dma_unmap_len(tx_buf, len)) { 72 dma_unmap_page(tx_q->dev, 73 dma_unmap_addr(tx_buf, dma), 74 dma_unmap_len(tx_buf, len), 75 DMA_TO_DEVICE); 76 } 77 78 tx_buf->next_to_watch = NULL; 79 tx_buf->skb = NULL; 80 tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; 81 dma_unmap_len_set(tx_buf, len, 0); 82 } 83 84 /** 85 * idpf_tx_buf_rel_all - Free any empty Tx buffers 86 * @txq: queue to be cleaned 87 */ 88 static void idpf_tx_buf_rel_all(struct idpf_queue *txq) 89 { 90 u16 i; 91 92 /* Buffers already cleared, nothing to do */ 93 if (!txq->tx_buf) 94 return; 95 96 /* Free all the Tx buffer sk_buffs */ 97 for (i = 0; i < txq->desc_count; i++) 98 idpf_tx_buf_rel(txq, &txq->tx_buf[i]); 99 100 kfree(txq->tx_buf); 101 txq->tx_buf = NULL; 102 103 if (!txq->buf_stack.bufs) 104 return; 105 106 for (i = 0; i < txq->buf_stack.size; i++) 107 kfree(txq->buf_stack.bufs[i]); 108 109 kfree(txq->buf_stack.bufs); 110 txq->buf_stack.bufs = NULL; 111 } 112 113 /** 114 * idpf_tx_desc_rel - Free Tx resources per queue 115 * @txq: Tx descriptor ring for a specific queue 116 * @bufq: buffer q or completion q 117 * 118 * Free all transmit software resources 119 */ 120 static void idpf_tx_desc_rel(struct idpf_queue *txq, bool bufq) 121 { 122 if (bufq) 123 idpf_tx_buf_rel_all(txq); 124 125 if (!txq->desc_ring) 126 return; 127 128 dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma); 129 txq->desc_ring = NULL; 130 txq->next_to_alloc = 0; 131 txq->next_to_use = 0; 132 txq->next_to_clean = 0; 133 } 134 135 /** 136 * idpf_tx_desc_rel_all - Free Tx Resources for All Queues 137 * @vport: virtual port structure 138 * 139 * Free all transmit software resources 140 */ 141 static void idpf_tx_desc_rel_all(struct idpf_vport *vport) 142 { 143 int i, j; 144 145 if (!vport->txq_grps) 146 return; 147 148 for (i = 0; i < vport->num_txq_grp; i++) { 149 struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; 150 151 for (j = 0; j < txq_grp->num_txq; j++) 152 idpf_tx_desc_rel(txq_grp->txqs[j], true); 153 154 if (idpf_is_queue_model_split(vport->txq_model)) 155 idpf_tx_desc_rel(txq_grp->complq, false); 156 } 157 } 158 159 /** 160 * idpf_tx_buf_alloc_all - Allocate memory for all buffer resources 161 * @tx_q: queue for which the buffers are allocated 162 * 163 * Returns 0 on success, negative on failure 164 */ 165 static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q) 166 { 167 int buf_size; 168 int i; 169 170 /* Allocate book keeping buffers only. Buffers to be supplied to HW 171 * are allocated by kernel network stack and received as part of skb 172 */ 173 buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count; 174 tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL); 175 if (!tx_q->tx_buf) 176 return -ENOMEM; 177 178 /* Initialize tx_bufs with invalid completion tags */ 179 for (i = 0; i < tx_q->desc_count; i++) 180 tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; 181 182 /* Initialize tx buf stack for out-of-order completions if 183 * flow scheduling offload is enabled 184 */ 185 tx_q->buf_stack.bufs = 186 kcalloc(tx_q->desc_count, sizeof(struct idpf_tx_stash *), 187 GFP_KERNEL); 188 if (!tx_q->buf_stack.bufs) 189 return -ENOMEM; 190 191 tx_q->buf_stack.size = tx_q->desc_count; 192 tx_q->buf_stack.top = tx_q->desc_count; 193 194 for (i = 0; i < tx_q->desc_count; i++) { 195 tx_q->buf_stack.bufs[i] = kzalloc(sizeof(*tx_q->buf_stack.bufs[i]), 196 GFP_KERNEL); 197 if (!tx_q->buf_stack.bufs[i]) 198 return -ENOMEM; 199 } 200 201 return 0; 202 } 203 204 /** 205 * idpf_tx_desc_alloc - Allocate the Tx descriptors 206 * @tx_q: the tx ring to set up 207 * @bufq: buffer or completion queue 208 * 209 * Returns 0 on success, negative on failure 210 */ 211 static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq) 212 { 213 struct device *dev = tx_q->dev; 214 u32 desc_sz; 215 int err; 216 217 if (bufq) { 218 err = idpf_tx_buf_alloc_all(tx_q); 219 if (err) 220 goto err_alloc; 221 222 desc_sz = sizeof(struct idpf_base_tx_desc); 223 } else { 224 desc_sz = sizeof(struct idpf_splitq_tx_compl_desc); 225 } 226 227 tx_q->size = tx_q->desc_count * desc_sz; 228 229 /* Allocate descriptors also round up to nearest 4K */ 230 tx_q->size = ALIGN(tx_q->size, 4096); 231 tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma, 232 GFP_KERNEL); 233 if (!tx_q->desc_ring) { 234 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 235 tx_q->size); 236 err = -ENOMEM; 237 goto err_alloc; 238 } 239 240 tx_q->next_to_alloc = 0; 241 tx_q->next_to_use = 0; 242 tx_q->next_to_clean = 0; 243 set_bit(__IDPF_Q_GEN_CHK, tx_q->flags); 244 245 return 0; 246 247 err_alloc: 248 idpf_tx_desc_rel(tx_q, bufq); 249 250 return err; 251 } 252 253 /** 254 * idpf_tx_desc_alloc_all - allocate all queues Tx resources 255 * @vport: virtual port private structure 256 * 257 * Returns 0 on success, negative on failure 258 */ 259 static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) 260 { 261 struct device *dev = &vport->adapter->pdev->dev; 262 int err = 0; 263 int i, j; 264 265 /* Setup buffer queues. In single queue model buffer queues and 266 * completion queues will be same 267 */ 268 for (i = 0; i < vport->num_txq_grp; i++) { 269 for (j = 0; j < vport->txq_grps[i].num_txq; j++) { 270 struct idpf_queue *txq = vport->txq_grps[i].txqs[j]; 271 u8 gen_bits = 0; 272 u16 bufidx_mask; 273 274 err = idpf_tx_desc_alloc(txq, true); 275 if (err) { 276 dev_err(dev, "Allocation for Tx Queue %u failed\n", 277 i); 278 goto err_out; 279 } 280 281 if (!idpf_is_queue_model_split(vport->txq_model)) 282 continue; 283 284 txq->compl_tag_cur_gen = 0; 285 286 /* Determine the number of bits in the bufid 287 * mask and add one to get the start of the 288 * generation bits 289 */ 290 bufidx_mask = txq->desc_count - 1; 291 while (bufidx_mask >> 1) { 292 txq->compl_tag_gen_s++; 293 bufidx_mask = bufidx_mask >> 1; 294 } 295 txq->compl_tag_gen_s++; 296 297 gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH - 298 txq->compl_tag_gen_s; 299 txq->compl_tag_gen_max = GETMAXVAL(gen_bits); 300 301 /* Set bufid mask based on location of first 302 * gen bit; it cannot simply be the descriptor 303 * ring size-1 since we can have size values 304 * where not all of those bits are set. 305 */ 306 txq->compl_tag_bufid_m = 307 GETMAXVAL(txq->compl_tag_gen_s); 308 } 309 310 if (!idpf_is_queue_model_split(vport->txq_model)) 311 continue; 312 313 /* Setup completion queues */ 314 err = idpf_tx_desc_alloc(vport->txq_grps[i].complq, false); 315 if (err) { 316 dev_err(dev, "Allocation for Tx Completion Queue %u failed\n", 317 i); 318 goto err_out; 319 } 320 } 321 322 err_out: 323 if (err) 324 idpf_tx_desc_rel_all(vport); 325 326 return err; 327 } 328 329 /** 330 * idpf_rx_page_rel - Release an rx buffer page 331 * @rxq: the queue that owns the buffer 332 * @rx_buf: the buffer to free 333 */ 334 static void idpf_rx_page_rel(struct idpf_queue *rxq, struct idpf_rx_buf *rx_buf) 335 { 336 if (unlikely(!rx_buf->page)) 337 return; 338 339 page_pool_put_full_page(rxq->pp, rx_buf->page, false); 340 341 rx_buf->page = NULL; 342 rx_buf->page_offset = 0; 343 } 344 345 /** 346 * idpf_rx_hdr_buf_rel_all - Release header buffer memory 347 * @rxq: queue to use 348 */ 349 static void idpf_rx_hdr_buf_rel_all(struct idpf_queue *rxq) 350 { 351 struct idpf_adapter *adapter = rxq->vport->adapter; 352 353 dma_free_coherent(&adapter->pdev->dev, 354 rxq->desc_count * IDPF_HDR_BUF_SIZE, 355 rxq->rx_buf.hdr_buf_va, 356 rxq->rx_buf.hdr_buf_pa); 357 rxq->rx_buf.hdr_buf_va = NULL; 358 } 359 360 /** 361 * idpf_rx_buf_rel_all - Free all Rx buffer resources for a queue 362 * @rxq: queue to be cleaned 363 */ 364 static void idpf_rx_buf_rel_all(struct idpf_queue *rxq) 365 { 366 u16 i; 367 368 /* queue already cleared, nothing to do */ 369 if (!rxq->rx_buf.buf) 370 return; 371 372 /* Free all the bufs allocated and given to hw on Rx queue */ 373 for (i = 0; i < rxq->desc_count; i++) 374 idpf_rx_page_rel(rxq, &rxq->rx_buf.buf[i]); 375 376 if (rxq->rx_hsplit_en) 377 idpf_rx_hdr_buf_rel_all(rxq); 378 379 page_pool_destroy(rxq->pp); 380 rxq->pp = NULL; 381 382 kfree(rxq->rx_buf.buf); 383 rxq->rx_buf.buf = NULL; 384 } 385 386 /** 387 * idpf_rx_desc_rel - Free a specific Rx q resources 388 * @rxq: queue to clean the resources from 389 * @bufq: buffer q or completion q 390 * @q_model: single or split q model 391 * 392 * Free a specific rx queue resources 393 */ 394 static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model) 395 { 396 if (!rxq) 397 return; 398 399 if (rxq->skb) { 400 dev_kfree_skb_any(rxq->skb); 401 rxq->skb = NULL; 402 } 403 404 if (bufq || !idpf_is_queue_model_split(q_model)) 405 idpf_rx_buf_rel_all(rxq); 406 407 rxq->next_to_alloc = 0; 408 rxq->next_to_clean = 0; 409 rxq->next_to_use = 0; 410 if (!rxq->desc_ring) 411 return; 412 413 dmam_free_coherent(rxq->dev, rxq->size, rxq->desc_ring, rxq->dma); 414 rxq->desc_ring = NULL; 415 } 416 417 /** 418 * idpf_rx_desc_rel_all - Free Rx Resources for All Queues 419 * @vport: virtual port structure 420 * 421 * Free all rx queues resources 422 */ 423 static void idpf_rx_desc_rel_all(struct idpf_vport *vport) 424 { 425 struct idpf_rxq_group *rx_qgrp; 426 u16 num_rxq; 427 int i, j; 428 429 if (!vport->rxq_grps) 430 return; 431 432 for (i = 0; i < vport->num_rxq_grp; i++) { 433 rx_qgrp = &vport->rxq_grps[i]; 434 435 if (!idpf_is_queue_model_split(vport->rxq_model)) { 436 for (j = 0; j < rx_qgrp->singleq.num_rxq; j++) 437 idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], 438 false, vport->rxq_model); 439 continue; 440 } 441 442 num_rxq = rx_qgrp->splitq.num_rxq_sets; 443 for (j = 0; j < num_rxq; j++) 444 idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq, 445 false, vport->rxq_model); 446 447 if (!rx_qgrp->splitq.bufq_sets) 448 continue; 449 450 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { 451 struct idpf_bufq_set *bufq_set = 452 &rx_qgrp->splitq.bufq_sets[j]; 453 454 idpf_rx_desc_rel(&bufq_set->bufq, true, 455 vport->rxq_model); 456 } 457 } 458 } 459 460 /** 461 * idpf_rx_buf_hw_update - Store the new tail and head values 462 * @rxq: queue to bump 463 * @val: new head index 464 */ 465 void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val) 466 { 467 rxq->next_to_use = val; 468 469 if (unlikely(!rxq->tail)) 470 return; 471 472 /* writel has an implicit memory barrier */ 473 writel(val, rxq->tail); 474 } 475 476 /** 477 * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers 478 * @rxq: ring to use 479 * 480 * Returns 0 on success, negative on failure. 481 */ 482 static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq) 483 { 484 struct idpf_adapter *adapter = rxq->vport->adapter; 485 486 rxq->rx_buf.hdr_buf_va = 487 dma_alloc_coherent(&adapter->pdev->dev, 488 IDPF_HDR_BUF_SIZE * rxq->desc_count, 489 &rxq->rx_buf.hdr_buf_pa, 490 GFP_KERNEL); 491 if (!rxq->rx_buf.hdr_buf_va) 492 return -ENOMEM; 493 494 return 0; 495 } 496 497 /** 498 * idpf_rx_post_buf_refill - Post buffer id to refill queue 499 * @refillq: refill queue to post to 500 * @buf_id: buffer id to post 501 */ 502 static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id) 503 { 504 u16 nta = refillq->next_to_alloc; 505 506 /* store the buffer ID and the SW maintained GEN bit to the refillq */ 507 refillq->ring[nta] = 508 FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) | 509 FIELD_PREP(IDPF_RX_BI_GEN_M, 510 test_bit(__IDPF_Q_GEN_CHK, refillq->flags)); 511 512 if (unlikely(++nta == refillq->desc_count)) { 513 nta = 0; 514 change_bit(__IDPF_Q_GEN_CHK, refillq->flags); 515 } 516 refillq->next_to_alloc = nta; 517 } 518 519 /** 520 * idpf_rx_post_buf_desc - Post buffer to bufq descriptor ring 521 * @bufq: buffer queue to post to 522 * @buf_id: buffer id to post 523 * 524 * Returns false if buffer could not be allocated, true otherwise. 525 */ 526 static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id) 527 { 528 struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL; 529 u16 nta = bufq->next_to_alloc; 530 struct idpf_rx_buf *buf; 531 dma_addr_t addr; 532 533 splitq_rx_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, nta); 534 buf = &bufq->rx_buf.buf[buf_id]; 535 536 if (bufq->rx_hsplit_en) { 537 splitq_rx_desc->hdr_addr = 538 cpu_to_le64(bufq->rx_buf.hdr_buf_pa + 539 (u32)buf_id * IDPF_HDR_BUF_SIZE); 540 } 541 542 addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size); 543 if (unlikely(addr == DMA_MAPPING_ERROR)) 544 return false; 545 546 splitq_rx_desc->pkt_addr = cpu_to_le64(addr); 547 splitq_rx_desc->qword0.buf_id = cpu_to_le16(buf_id); 548 549 nta++; 550 if (unlikely(nta == bufq->desc_count)) 551 nta = 0; 552 bufq->next_to_alloc = nta; 553 554 return true; 555 } 556 557 /** 558 * idpf_rx_post_init_bufs - Post initial buffers to bufq 559 * @bufq: buffer queue to post working set to 560 * @working_set: number of buffers to put in working set 561 * 562 * Returns true if @working_set bufs were posted successfully, false otherwise. 563 */ 564 static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set) 565 { 566 int i; 567 568 for (i = 0; i < working_set; i++) { 569 if (!idpf_rx_post_buf_desc(bufq, i)) 570 return false; 571 } 572 573 idpf_rx_buf_hw_update(bufq, 574 bufq->next_to_alloc & ~(bufq->rx_buf_stride - 1)); 575 576 return true; 577 } 578 579 /** 580 * idpf_rx_create_page_pool - Create a page pool 581 * @rxbufq: RX queue to create page pool for 582 * 583 * Returns &page_pool on success, casted -errno on failure 584 */ 585 static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq) 586 { 587 struct page_pool_params pp = { 588 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 589 .order = 0, 590 .pool_size = rxbufq->desc_count, 591 .nid = NUMA_NO_NODE, 592 .dev = rxbufq->vport->netdev->dev.parent, 593 .max_len = PAGE_SIZE, 594 .dma_dir = DMA_FROM_DEVICE, 595 .offset = 0, 596 }; 597 598 return page_pool_create(&pp); 599 } 600 601 /** 602 * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources 603 * @rxbufq: queue for which the buffers are allocated; equivalent to 604 * rxq when operating in singleq mode 605 * 606 * Returns 0 on success, negative on failure 607 */ 608 static int idpf_rx_buf_alloc_all(struct idpf_queue *rxbufq) 609 { 610 int err = 0; 611 612 /* Allocate book keeping buffers */ 613 rxbufq->rx_buf.buf = kcalloc(rxbufq->desc_count, 614 sizeof(struct idpf_rx_buf), GFP_KERNEL); 615 if (!rxbufq->rx_buf.buf) { 616 err = -ENOMEM; 617 goto rx_buf_alloc_all_out; 618 } 619 620 if (rxbufq->rx_hsplit_en) { 621 err = idpf_rx_hdr_buf_alloc_all(rxbufq); 622 if (err) 623 goto rx_buf_alloc_all_out; 624 } 625 626 /* Allocate buffers to be given to HW. */ 627 if (idpf_is_queue_model_split(rxbufq->vport->rxq_model)) { 628 int working_set = IDPF_RX_BUFQ_WORKING_SET(rxbufq); 629 630 if (!idpf_rx_post_init_bufs(rxbufq, working_set)) 631 err = -ENOMEM; 632 } else { 633 if (idpf_rx_singleq_buf_hw_alloc_all(rxbufq, 634 rxbufq->desc_count - 1)) 635 err = -ENOMEM; 636 } 637 638 rx_buf_alloc_all_out: 639 if (err) 640 idpf_rx_buf_rel_all(rxbufq); 641 642 return err; 643 } 644 645 /** 646 * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW 647 * @rxbufq: RX queue to create page pool for 648 * 649 * Returns 0 on success, negative on failure 650 */ 651 static int idpf_rx_bufs_init(struct idpf_queue *rxbufq) 652 { 653 struct page_pool *pool; 654 655 pool = idpf_rx_create_page_pool(rxbufq); 656 if (IS_ERR(pool)) 657 return PTR_ERR(pool); 658 659 rxbufq->pp = pool; 660 661 return idpf_rx_buf_alloc_all(rxbufq); 662 } 663 664 /** 665 * idpf_rx_bufs_init_all - Initialize all RX bufs 666 * @vport: virtual port struct 667 * 668 * Returns 0 on success, negative on failure 669 */ 670 int idpf_rx_bufs_init_all(struct idpf_vport *vport) 671 { 672 struct idpf_rxq_group *rx_qgrp; 673 struct idpf_queue *q; 674 int i, j, err; 675 676 for (i = 0; i < vport->num_rxq_grp; i++) { 677 rx_qgrp = &vport->rxq_grps[i]; 678 679 /* Allocate bufs for the rxq itself in singleq */ 680 if (!idpf_is_queue_model_split(vport->rxq_model)) { 681 int num_rxq = rx_qgrp->singleq.num_rxq; 682 683 for (j = 0; j < num_rxq; j++) { 684 q = rx_qgrp->singleq.rxqs[j]; 685 err = idpf_rx_bufs_init(q); 686 if (err) 687 return err; 688 } 689 690 continue; 691 } 692 693 /* Otherwise, allocate bufs for the buffer queues */ 694 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { 695 q = &rx_qgrp->splitq.bufq_sets[j].bufq; 696 err = idpf_rx_bufs_init(q); 697 if (err) 698 return err; 699 } 700 } 701 702 return 0; 703 } 704 705 /** 706 * idpf_rx_desc_alloc - Allocate queue Rx resources 707 * @rxq: Rx queue for which the resources are setup 708 * @bufq: buffer or completion queue 709 * @q_model: single or split queue model 710 * 711 * Returns 0 on success, negative on failure 712 */ 713 static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model) 714 { 715 struct device *dev = rxq->dev; 716 717 if (bufq) 718 rxq->size = rxq->desc_count * 719 sizeof(struct virtchnl2_splitq_rx_buf_desc); 720 else 721 rxq->size = rxq->desc_count * 722 sizeof(union virtchnl2_rx_desc); 723 724 /* Allocate descriptors and also round up to nearest 4K */ 725 rxq->size = ALIGN(rxq->size, 4096); 726 rxq->desc_ring = dmam_alloc_coherent(dev, rxq->size, 727 &rxq->dma, GFP_KERNEL); 728 if (!rxq->desc_ring) { 729 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 730 rxq->size); 731 return -ENOMEM; 732 } 733 734 rxq->next_to_alloc = 0; 735 rxq->next_to_clean = 0; 736 rxq->next_to_use = 0; 737 set_bit(__IDPF_Q_GEN_CHK, rxq->flags); 738 739 return 0; 740 } 741 742 /** 743 * idpf_rx_desc_alloc_all - allocate all RX queues resources 744 * @vport: virtual port structure 745 * 746 * Returns 0 on success, negative on failure 747 */ 748 static int idpf_rx_desc_alloc_all(struct idpf_vport *vport) 749 { 750 struct device *dev = &vport->adapter->pdev->dev; 751 struct idpf_rxq_group *rx_qgrp; 752 struct idpf_queue *q; 753 int i, j, err; 754 u16 num_rxq; 755 756 for (i = 0; i < vport->num_rxq_grp; i++) { 757 rx_qgrp = &vport->rxq_grps[i]; 758 if (idpf_is_queue_model_split(vport->rxq_model)) 759 num_rxq = rx_qgrp->splitq.num_rxq_sets; 760 else 761 num_rxq = rx_qgrp->singleq.num_rxq; 762 763 for (j = 0; j < num_rxq; j++) { 764 if (idpf_is_queue_model_split(vport->rxq_model)) 765 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; 766 else 767 q = rx_qgrp->singleq.rxqs[j]; 768 err = idpf_rx_desc_alloc(q, false, vport->rxq_model); 769 if (err) { 770 dev_err(dev, "Memory allocation for Rx Queue %u failed\n", 771 i); 772 goto err_out; 773 } 774 } 775 776 if (!idpf_is_queue_model_split(vport->rxq_model)) 777 continue; 778 779 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { 780 q = &rx_qgrp->splitq.bufq_sets[j].bufq; 781 err = idpf_rx_desc_alloc(q, true, vport->rxq_model); 782 if (err) { 783 dev_err(dev, "Memory allocation for Rx Buffer Queue %u failed\n", 784 i); 785 goto err_out; 786 } 787 } 788 } 789 790 return 0; 791 792 err_out: 793 idpf_rx_desc_rel_all(vport); 794 795 return err; 796 } 797 798 /** 799 * idpf_txq_group_rel - Release all resources for txq groups 800 * @vport: vport to release txq groups on 801 */ 802 static void idpf_txq_group_rel(struct idpf_vport *vport) 803 { 804 int i, j; 805 806 if (!vport->txq_grps) 807 return; 808 809 for (i = 0; i < vport->num_txq_grp; i++) { 810 struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; 811 812 for (j = 0; j < txq_grp->num_txq; j++) { 813 kfree(txq_grp->txqs[j]); 814 txq_grp->txqs[j] = NULL; 815 } 816 kfree(txq_grp->complq); 817 txq_grp->complq = NULL; 818 } 819 kfree(vport->txq_grps); 820 vport->txq_grps = NULL; 821 } 822 823 /** 824 * idpf_rxq_sw_queue_rel - Release software queue resources 825 * @rx_qgrp: rx queue group with software queues 826 */ 827 static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp) 828 { 829 int i, j; 830 831 for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) { 832 struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i]; 833 834 for (j = 0; j < bufq_set->num_refillqs; j++) { 835 kfree(bufq_set->refillqs[j].ring); 836 bufq_set->refillqs[j].ring = NULL; 837 } 838 kfree(bufq_set->refillqs); 839 bufq_set->refillqs = NULL; 840 } 841 } 842 843 /** 844 * idpf_rxq_group_rel - Release all resources for rxq groups 845 * @vport: vport to release rxq groups on 846 */ 847 static void idpf_rxq_group_rel(struct idpf_vport *vport) 848 { 849 int i; 850 851 if (!vport->rxq_grps) 852 return; 853 854 for (i = 0; i < vport->num_rxq_grp; i++) { 855 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 856 u16 num_rxq; 857 int j; 858 859 if (idpf_is_queue_model_split(vport->rxq_model)) { 860 num_rxq = rx_qgrp->splitq.num_rxq_sets; 861 for (j = 0; j < num_rxq; j++) { 862 kfree(rx_qgrp->splitq.rxq_sets[j]); 863 rx_qgrp->splitq.rxq_sets[j] = NULL; 864 } 865 866 idpf_rxq_sw_queue_rel(rx_qgrp); 867 kfree(rx_qgrp->splitq.bufq_sets); 868 rx_qgrp->splitq.bufq_sets = NULL; 869 } else { 870 num_rxq = rx_qgrp->singleq.num_rxq; 871 for (j = 0; j < num_rxq; j++) { 872 kfree(rx_qgrp->singleq.rxqs[j]); 873 rx_qgrp->singleq.rxqs[j] = NULL; 874 } 875 } 876 } 877 kfree(vport->rxq_grps); 878 vport->rxq_grps = NULL; 879 } 880 881 /** 882 * idpf_vport_queue_grp_rel_all - Release all queue groups 883 * @vport: vport to release queue groups for 884 */ 885 static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport) 886 { 887 idpf_txq_group_rel(vport); 888 idpf_rxq_group_rel(vport); 889 } 890 891 /** 892 * idpf_vport_queues_rel - Free memory for all queues 893 * @vport: virtual port 894 * 895 * Free the memory allocated for queues associated to a vport 896 */ 897 void idpf_vport_queues_rel(struct idpf_vport *vport) 898 { 899 idpf_tx_desc_rel_all(vport); 900 idpf_rx_desc_rel_all(vport); 901 idpf_vport_queue_grp_rel_all(vport); 902 903 kfree(vport->txqs); 904 vport->txqs = NULL; 905 } 906 907 /** 908 * idpf_vport_init_fast_path_txqs - Initialize fast path txq array 909 * @vport: vport to init txqs on 910 * 911 * We get a queue index from skb->queue_mapping and we need a fast way to 912 * dereference the queue from queue groups. This allows us to quickly pull a 913 * txq based on a queue index. 914 * 915 * Returns 0 on success, negative on failure 916 */ 917 static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport) 918 { 919 int i, j, k = 0; 920 921 vport->txqs = kcalloc(vport->num_txq, sizeof(struct idpf_queue *), 922 GFP_KERNEL); 923 924 if (!vport->txqs) 925 return -ENOMEM; 926 927 for (i = 0; i < vport->num_txq_grp; i++) { 928 struct idpf_txq_group *tx_grp = &vport->txq_grps[i]; 929 930 for (j = 0; j < tx_grp->num_txq; j++, k++) { 931 vport->txqs[k] = tx_grp->txqs[j]; 932 vport->txqs[k]->idx = k; 933 } 934 } 935 936 return 0; 937 } 938 939 /** 940 * idpf_vport_init_num_qs - Initialize number of queues 941 * @vport: vport to initialize queues 942 * @vport_msg: data to be filled into vport 943 */ 944 void idpf_vport_init_num_qs(struct idpf_vport *vport, 945 struct virtchnl2_create_vport *vport_msg) 946 { 947 struct idpf_vport_user_config_data *config_data; 948 u16 idx = vport->idx; 949 950 config_data = &vport->adapter->vport_config[idx]->user_config; 951 vport->num_txq = le16_to_cpu(vport_msg->num_tx_q); 952 vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q); 953 /* number of txqs and rxqs in config data will be zeros only in the 954 * driver load path and we dont update them there after 955 */ 956 if (!config_data->num_req_tx_qs && !config_data->num_req_rx_qs) { 957 config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q); 958 config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q); 959 } 960 961 if (idpf_is_queue_model_split(vport->txq_model)) 962 vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq); 963 if (idpf_is_queue_model_split(vport->rxq_model)) 964 vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq); 965 966 /* Adjust number of buffer queues per Rx queue group. */ 967 if (!idpf_is_queue_model_split(vport->rxq_model)) { 968 vport->num_bufqs_per_qgrp = 0; 969 vport->bufq_size[0] = IDPF_RX_BUF_2048; 970 971 return; 972 } 973 974 vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP; 975 /* Bufq[0] default buffer size is 4K 976 * Bufq[1] default buffer size is 2K 977 */ 978 vport->bufq_size[0] = IDPF_RX_BUF_4096; 979 vport->bufq_size[1] = IDPF_RX_BUF_2048; 980 } 981 982 /** 983 * idpf_vport_calc_num_q_desc - Calculate number of queue groups 984 * @vport: vport to calculate q groups for 985 */ 986 void idpf_vport_calc_num_q_desc(struct idpf_vport *vport) 987 { 988 struct idpf_vport_user_config_data *config_data; 989 int num_bufqs = vport->num_bufqs_per_qgrp; 990 u32 num_req_txq_desc, num_req_rxq_desc; 991 u16 idx = vport->idx; 992 int i; 993 994 config_data = &vport->adapter->vport_config[idx]->user_config; 995 num_req_txq_desc = config_data->num_req_txq_desc; 996 num_req_rxq_desc = config_data->num_req_rxq_desc; 997 998 vport->complq_desc_count = 0; 999 if (num_req_txq_desc) { 1000 vport->txq_desc_count = num_req_txq_desc; 1001 if (idpf_is_queue_model_split(vport->txq_model)) { 1002 vport->complq_desc_count = num_req_txq_desc; 1003 if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC) 1004 vport->complq_desc_count = 1005 IDPF_MIN_TXQ_COMPLQ_DESC; 1006 } 1007 } else { 1008 vport->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT; 1009 if (idpf_is_queue_model_split(vport->txq_model)) 1010 vport->complq_desc_count = 1011 IDPF_DFLT_TX_COMPLQ_DESC_COUNT; 1012 } 1013 1014 if (num_req_rxq_desc) 1015 vport->rxq_desc_count = num_req_rxq_desc; 1016 else 1017 vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT; 1018 1019 for (i = 0; i < num_bufqs; i++) { 1020 if (!vport->bufq_desc_count[i]) 1021 vport->bufq_desc_count[i] = 1022 IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count, 1023 num_bufqs); 1024 } 1025 } 1026 1027 /** 1028 * idpf_vport_calc_total_qs - Calculate total number of queues 1029 * @adapter: private data struct 1030 * @vport_idx: vport idx to retrieve vport pointer 1031 * @vport_msg: message to fill with data 1032 * @max_q: vport max queue info 1033 * 1034 * Return 0 on success, error value on failure. 1035 */ 1036 int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx, 1037 struct virtchnl2_create_vport *vport_msg, 1038 struct idpf_vport_max_q *max_q) 1039 { 1040 int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0; 1041 int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0; 1042 u16 num_req_tx_qs = 0, num_req_rx_qs = 0; 1043 struct idpf_vport_config *vport_config; 1044 u16 num_txq_grps, num_rxq_grps; 1045 u32 num_qs; 1046 1047 vport_config = adapter->vport_config[vport_idx]; 1048 if (vport_config) { 1049 num_req_tx_qs = vport_config->user_config.num_req_tx_qs; 1050 num_req_rx_qs = vport_config->user_config.num_req_rx_qs; 1051 } else { 1052 int num_cpus; 1053 1054 /* Restrict num of queues to cpus online as a default 1055 * configuration to give best performance. User can always 1056 * override to a max number of queues via ethtool. 1057 */ 1058 num_cpus = num_online_cpus(); 1059 1060 dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus); 1061 dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus); 1062 dflt_splitq_rxq_grps = min_t(int, max_q->max_rxq, num_cpus); 1063 dflt_singleq_rxqs = min_t(int, max_q->max_rxq, num_cpus); 1064 } 1065 1066 if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) { 1067 num_txq_grps = num_req_tx_qs ? num_req_tx_qs : dflt_splitq_txq_grps; 1068 vport_msg->num_tx_complq = cpu_to_le16(num_txq_grps * 1069 IDPF_COMPLQ_PER_GROUP); 1070 vport_msg->num_tx_q = cpu_to_le16(num_txq_grps * 1071 IDPF_DFLT_SPLITQ_TXQ_PER_GROUP); 1072 } else { 1073 num_txq_grps = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS; 1074 num_qs = num_txq_grps * (num_req_tx_qs ? num_req_tx_qs : 1075 dflt_singleq_txqs); 1076 vport_msg->num_tx_q = cpu_to_le16(num_qs); 1077 vport_msg->num_tx_complq = 0; 1078 } 1079 if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) { 1080 num_rxq_grps = num_req_rx_qs ? num_req_rx_qs : dflt_splitq_rxq_grps; 1081 vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps * 1082 IDPF_MAX_BUFQS_PER_RXQ_GRP); 1083 vport_msg->num_rx_q = cpu_to_le16(num_rxq_grps * 1084 IDPF_DFLT_SPLITQ_RXQ_PER_GROUP); 1085 } else { 1086 num_rxq_grps = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS; 1087 num_qs = num_rxq_grps * (num_req_rx_qs ? num_req_rx_qs : 1088 dflt_singleq_rxqs); 1089 vport_msg->num_rx_q = cpu_to_le16(num_qs); 1090 vport_msg->num_rx_bufq = 0; 1091 } 1092 1093 return 0; 1094 } 1095 1096 /** 1097 * idpf_vport_calc_num_q_groups - Calculate number of queue groups 1098 * @vport: vport to calculate q groups for 1099 */ 1100 void idpf_vport_calc_num_q_groups(struct idpf_vport *vport) 1101 { 1102 if (idpf_is_queue_model_split(vport->txq_model)) 1103 vport->num_txq_grp = vport->num_txq; 1104 else 1105 vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS; 1106 1107 if (idpf_is_queue_model_split(vport->rxq_model)) 1108 vport->num_rxq_grp = vport->num_rxq; 1109 else 1110 vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS; 1111 } 1112 1113 /** 1114 * idpf_vport_calc_numq_per_grp - Calculate number of queues per group 1115 * @vport: vport to calculate queues for 1116 * @num_txq: return parameter for number of TX queues 1117 * @num_rxq: return parameter for number of RX queues 1118 */ 1119 static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport, 1120 u16 *num_txq, u16 *num_rxq) 1121 { 1122 if (idpf_is_queue_model_split(vport->txq_model)) 1123 *num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP; 1124 else 1125 *num_txq = vport->num_txq; 1126 1127 if (idpf_is_queue_model_split(vport->rxq_model)) 1128 *num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; 1129 else 1130 *num_rxq = vport->num_rxq; 1131 } 1132 1133 /** 1134 * idpf_rxq_set_descids - set the descids supported by this queue 1135 * @vport: virtual port data structure 1136 * @q: rx queue for which descids are set 1137 * 1138 */ 1139 static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q) 1140 { 1141 if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { 1142 q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M; 1143 } else { 1144 if (vport->base_rxd) 1145 q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M; 1146 else 1147 q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M; 1148 } 1149 } 1150 1151 /** 1152 * idpf_txq_group_alloc - Allocate all txq group resources 1153 * @vport: vport to allocate txq groups for 1154 * @num_txq: number of txqs to allocate for each group 1155 * 1156 * Returns 0 on success, negative on failure 1157 */ 1158 static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) 1159 { 1160 bool flow_sch_en; 1161 int err, i; 1162 1163 vport->txq_grps = kcalloc(vport->num_txq_grp, 1164 sizeof(*vport->txq_grps), GFP_KERNEL); 1165 if (!vport->txq_grps) 1166 return -ENOMEM; 1167 1168 flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, 1169 VIRTCHNL2_CAP_SPLITQ_QSCHED); 1170 1171 for (i = 0; i < vport->num_txq_grp; i++) { 1172 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 1173 struct idpf_adapter *adapter = vport->adapter; 1174 int j; 1175 1176 tx_qgrp->vport = vport; 1177 tx_qgrp->num_txq = num_txq; 1178 1179 for (j = 0; j < tx_qgrp->num_txq; j++) { 1180 tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]), 1181 GFP_KERNEL); 1182 if (!tx_qgrp->txqs[j]) { 1183 err = -ENOMEM; 1184 goto err_alloc; 1185 } 1186 } 1187 1188 for (j = 0; j < tx_qgrp->num_txq; j++) { 1189 struct idpf_queue *q = tx_qgrp->txqs[j]; 1190 1191 q->dev = &adapter->pdev->dev; 1192 q->desc_count = vport->txq_desc_count; 1193 q->tx_max_bufs = idpf_get_max_tx_bufs(adapter); 1194 q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter); 1195 q->vport = vport; 1196 q->txq_grp = tx_qgrp; 1197 hash_init(q->sched_buf_hash); 1198 1199 if (flow_sch_en) 1200 set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags); 1201 } 1202 1203 if (!idpf_is_queue_model_split(vport->txq_model)) 1204 continue; 1205 1206 tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP, 1207 sizeof(*tx_qgrp->complq), 1208 GFP_KERNEL); 1209 if (!tx_qgrp->complq) { 1210 err = -ENOMEM; 1211 goto err_alloc; 1212 } 1213 1214 tx_qgrp->complq->dev = &adapter->pdev->dev; 1215 tx_qgrp->complq->desc_count = vport->complq_desc_count; 1216 tx_qgrp->complq->vport = vport; 1217 tx_qgrp->complq->txq_grp = tx_qgrp; 1218 1219 if (flow_sch_en) 1220 __set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags); 1221 } 1222 1223 return 0; 1224 1225 err_alloc: 1226 idpf_txq_group_rel(vport); 1227 1228 return err; 1229 } 1230 1231 /** 1232 * idpf_rxq_group_alloc - Allocate all rxq group resources 1233 * @vport: vport to allocate rxq groups for 1234 * @num_rxq: number of rxqs to allocate for each group 1235 * 1236 * Returns 0 on success, negative on failure 1237 */ 1238 static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) 1239 { 1240 struct idpf_adapter *adapter = vport->adapter; 1241 struct idpf_queue *q; 1242 int i, k, err = 0; 1243 bool hs; 1244 1245 vport->rxq_grps = kcalloc(vport->num_rxq_grp, 1246 sizeof(struct idpf_rxq_group), GFP_KERNEL); 1247 if (!vport->rxq_grps) 1248 return -ENOMEM; 1249 1250 hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED; 1251 1252 for (i = 0; i < vport->num_rxq_grp; i++) { 1253 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1254 int j; 1255 1256 rx_qgrp->vport = vport; 1257 if (!idpf_is_queue_model_split(vport->rxq_model)) { 1258 rx_qgrp->singleq.num_rxq = num_rxq; 1259 for (j = 0; j < num_rxq; j++) { 1260 rx_qgrp->singleq.rxqs[j] = 1261 kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]), 1262 GFP_KERNEL); 1263 if (!rx_qgrp->singleq.rxqs[j]) { 1264 err = -ENOMEM; 1265 goto err_alloc; 1266 } 1267 } 1268 goto skip_splitq_rx_init; 1269 } 1270 rx_qgrp->splitq.num_rxq_sets = num_rxq; 1271 1272 for (j = 0; j < num_rxq; j++) { 1273 rx_qgrp->splitq.rxq_sets[j] = 1274 kzalloc(sizeof(struct idpf_rxq_set), 1275 GFP_KERNEL); 1276 if (!rx_qgrp->splitq.rxq_sets[j]) { 1277 err = -ENOMEM; 1278 goto err_alloc; 1279 } 1280 } 1281 1282 rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp, 1283 sizeof(struct idpf_bufq_set), 1284 GFP_KERNEL); 1285 if (!rx_qgrp->splitq.bufq_sets) { 1286 err = -ENOMEM; 1287 goto err_alloc; 1288 } 1289 1290 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { 1291 struct idpf_bufq_set *bufq_set = 1292 &rx_qgrp->splitq.bufq_sets[j]; 1293 int swq_size = sizeof(struct idpf_sw_queue); 1294 1295 q = &rx_qgrp->splitq.bufq_sets[j].bufq; 1296 q->dev = &adapter->pdev->dev; 1297 q->desc_count = vport->bufq_desc_count[j]; 1298 q->vport = vport; 1299 q->rxq_grp = rx_qgrp; 1300 q->idx = j; 1301 q->rx_buf_size = vport->bufq_size[j]; 1302 q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; 1303 q->rx_buf_stride = IDPF_RX_BUF_STRIDE; 1304 1305 if (hs) { 1306 q->rx_hsplit_en = true; 1307 q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; 1308 } 1309 1310 bufq_set->num_refillqs = num_rxq; 1311 bufq_set->refillqs = kcalloc(num_rxq, swq_size, 1312 GFP_KERNEL); 1313 if (!bufq_set->refillqs) { 1314 err = -ENOMEM; 1315 goto err_alloc; 1316 } 1317 for (k = 0; k < bufq_set->num_refillqs; k++) { 1318 struct idpf_sw_queue *refillq = 1319 &bufq_set->refillqs[k]; 1320 1321 refillq->dev = &vport->adapter->pdev->dev; 1322 refillq->desc_count = 1323 vport->bufq_desc_count[j]; 1324 set_bit(__IDPF_Q_GEN_CHK, refillq->flags); 1325 set_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags); 1326 refillq->ring = kcalloc(refillq->desc_count, 1327 sizeof(u16), 1328 GFP_KERNEL); 1329 if (!refillq->ring) { 1330 err = -ENOMEM; 1331 goto err_alloc; 1332 } 1333 } 1334 } 1335 1336 skip_splitq_rx_init: 1337 for (j = 0; j < num_rxq; j++) { 1338 if (!idpf_is_queue_model_split(vport->rxq_model)) { 1339 q = rx_qgrp->singleq.rxqs[j]; 1340 goto setup_rxq; 1341 } 1342 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; 1343 rx_qgrp->splitq.rxq_sets[j]->refillq0 = 1344 &rx_qgrp->splitq.bufq_sets[0].refillqs[j]; 1345 if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) 1346 rx_qgrp->splitq.rxq_sets[j]->refillq1 = 1347 &rx_qgrp->splitq.bufq_sets[1].refillqs[j]; 1348 1349 if (hs) { 1350 q->rx_hsplit_en = true; 1351 q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; 1352 } 1353 1354 setup_rxq: 1355 q->dev = &adapter->pdev->dev; 1356 q->desc_count = vport->rxq_desc_count; 1357 q->vport = vport; 1358 q->rxq_grp = rx_qgrp; 1359 q->idx = (i * num_rxq) + j; 1360 /* In splitq mode, RXQ buffer size should be 1361 * set to that of the first buffer queue 1362 * associated with this RXQ 1363 */ 1364 q->rx_buf_size = vport->bufq_size[0]; 1365 q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; 1366 q->rx_max_pkt_size = vport->netdev->mtu + 1367 IDPF_PACKET_HDR_PAD; 1368 idpf_rxq_set_descids(vport, q); 1369 } 1370 } 1371 1372 err_alloc: 1373 if (err) 1374 idpf_rxq_group_rel(vport); 1375 1376 return err; 1377 } 1378 1379 /** 1380 * idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources 1381 * @vport: vport with qgrps to allocate 1382 * 1383 * Returns 0 on success, negative on failure 1384 */ 1385 static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport) 1386 { 1387 u16 num_txq, num_rxq; 1388 int err; 1389 1390 idpf_vport_calc_numq_per_grp(vport, &num_txq, &num_rxq); 1391 1392 err = idpf_txq_group_alloc(vport, num_txq); 1393 if (err) 1394 goto err_out; 1395 1396 err = idpf_rxq_group_alloc(vport, num_rxq); 1397 if (err) 1398 goto err_out; 1399 1400 return 0; 1401 1402 err_out: 1403 idpf_vport_queue_grp_rel_all(vport); 1404 1405 return err; 1406 } 1407 1408 /** 1409 * idpf_vport_queues_alloc - Allocate memory for all queues 1410 * @vport: virtual port 1411 * 1412 * Allocate memory for queues associated with a vport. Returns 0 on success, 1413 * negative on failure. 1414 */ 1415 int idpf_vport_queues_alloc(struct idpf_vport *vport) 1416 { 1417 int err; 1418 1419 err = idpf_vport_queue_grp_alloc_all(vport); 1420 if (err) 1421 goto err_out; 1422 1423 err = idpf_tx_desc_alloc_all(vport); 1424 if (err) 1425 goto err_out; 1426 1427 err = idpf_rx_desc_alloc_all(vport); 1428 if (err) 1429 goto err_out; 1430 1431 err = idpf_vport_init_fast_path_txqs(vport); 1432 if (err) 1433 goto err_out; 1434 1435 return 0; 1436 1437 err_out: 1438 idpf_vport_queues_rel(vport); 1439 1440 return err; 1441 } 1442 1443 /** 1444 * idpf_tx_handle_sw_marker - Handle queue marker packet 1445 * @tx_q: tx queue to handle software marker 1446 */ 1447 static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q) 1448 { 1449 struct idpf_vport *vport = tx_q->vport; 1450 int i; 1451 1452 clear_bit(__IDPF_Q_SW_MARKER, tx_q->flags); 1453 /* Hardware must write marker packets to all queues associated with 1454 * completion queues. So check if all queues received marker packets 1455 */ 1456 for (i = 0; i < vport->num_txq; i++) 1457 /* If we're still waiting on any other TXQ marker completions, 1458 * just return now since we cannot wake up the marker_wq yet. 1459 */ 1460 if (test_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags)) 1461 return; 1462 1463 /* Drain complete */ 1464 set_bit(IDPF_VPORT_SW_MARKER, vport->flags); 1465 wake_up(&vport->sw_marker_wq); 1466 } 1467 1468 /** 1469 * idpf_tx_splitq_clean_hdr - Clean TX buffer resources for header portion of 1470 * packet 1471 * @tx_q: tx queue to clean buffer from 1472 * @tx_buf: buffer to be cleaned 1473 * @cleaned: pointer to stats struct to track cleaned packets/bytes 1474 * @napi_budget: Used to determine if we are in netpoll 1475 */ 1476 static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q, 1477 struct idpf_tx_buf *tx_buf, 1478 struct idpf_cleaned_stats *cleaned, 1479 int napi_budget) 1480 { 1481 napi_consume_skb(tx_buf->skb, napi_budget); 1482 1483 if (dma_unmap_len(tx_buf, len)) { 1484 dma_unmap_single(tx_q->dev, 1485 dma_unmap_addr(tx_buf, dma), 1486 dma_unmap_len(tx_buf, len), 1487 DMA_TO_DEVICE); 1488 1489 dma_unmap_len_set(tx_buf, len, 0); 1490 } 1491 1492 /* clear tx_buf data */ 1493 tx_buf->skb = NULL; 1494 1495 cleaned->bytes += tx_buf->bytecount; 1496 cleaned->packets += tx_buf->gso_segs; 1497 } 1498 1499 /** 1500 * idpf_tx_clean_stashed_bufs - clean bufs that were stored for 1501 * out of order completions 1502 * @txq: queue to clean 1503 * @compl_tag: completion tag of packet to clean (from completion descriptor) 1504 * @cleaned: pointer to stats struct to track cleaned packets/bytes 1505 * @budget: Used to determine if we are in netpoll 1506 */ 1507 static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag, 1508 struct idpf_cleaned_stats *cleaned, 1509 int budget) 1510 { 1511 struct idpf_tx_stash *stash; 1512 struct hlist_node *tmp_buf; 1513 1514 /* Buffer completion */ 1515 hash_for_each_possible_safe(txq->sched_buf_hash, stash, tmp_buf, 1516 hlist, compl_tag) { 1517 if (unlikely(stash->buf.compl_tag != (int)compl_tag)) 1518 continue; 1519 1520 if (stash->buf.skb) { 1521 idpf_tx_splitq_clean_hdr(txq, &stash->buf, cleaned, 1522 budget); 1523 } else if (dma_unmap_len(&stash->buf, len)) { 1524 dma_unmap_page(txq->dev, 1525 dma_unmap_addr(&stash->buf, dma), 1526 dma_unmap_len(&stash->buf, len), 1527 DMA_TO_DEVICE); 1528 dma_unmap_len_set(&stash->buf, len, 0); 1529 } 1530 1531 /* Push shadow buf back onto stack */ 1532 idpf_buf_lifo_push(&txq->buf_stack, stash); 1533 1534 hash_del(&stash->hlist); 1535 } 1536 } 1537 1538 /** 1539 * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a 1540 * later time (only relevant for flow scheduling mode) 1541 * @txq: Tx queue to clean 1542 * @tx_buf: buffer to store 1543 */ 1544 static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq, 1545 struct idpf_tx_buf *tx_buf) 1546 { 1547 struct idpf_tx_stash *stash; 1548 1549 if (unlikely(!dma_unmap_addr(tx_buf, dma) && 1550 !dma_unmap_len(tx_buf, len))) 1551 return 0; 1552 1553 stash = idpf_buf_lifo_pop(&txq->buf_stack); 1554 if (unlikely(!stash)) { 1555 net_err_ratelimited("%s: No out-of-order TX buffers left!\n", 1556 txq->vport->netdev->name); 1557 1558 return -ENOMEM; 1559 } 1560 1561 /* Store buffer params in shadow buffer */ 1562 stash->buf.skb = tx_buf->skb; 1563 stash->buf.bytecount = tx_buf->bytecount; 1564 stash->buf.gso_segs = tx_buf->gso_segs; 1565 dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma)); 1566 dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len)); 1567 stash->buf.compl_tag = tx_buf->compl_tag; 1568 1569 /* Add buffer to buf_hash table to be freed later */ 1570 hash_add(txq->sched_buf_hash, &stash->hlist, stash->buf.compl_tag); 1571 1572 memset(tx_buf, 0, sizeof(struct idpf_tx_buf)); 1573 1574 /* Reinitialize buf_id portion of tag */ 1575 tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; 1576 1577 return 0; 1578 } 1579 1580 #define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf) \ 1581 do { \ 1582 (ntc)++; \ 1583 if (unlikely(!(ntc))) { \ 1584 ntc -= (txq)->desc_count; \ 1585 buf = (txq)->tx_buf; \ 1586 desc = IDPF_FLEX_TX_DESC(txq, 0); \ 1587 } else { \ 1588 (buf)++; \ 1589 (desc)++; \ 1590 } \ 1591 } while (0) 1592 1593 /** 1594 * idpf_tx_splitq_clean - Reclaim resources from buffer queue 1595 * @tx_q: Tx queue to clean 1596 * @end: queue index until which it should be cleaned 1597 * @napi_budget: Used to determine if we are in netpoll 1598 * @cleaned: pointer to stats struct to track cleaned packets/bytes 1599 * @descs_only: true if queue is using flow-based scheduling and should 1600 * not clean buffers at this time 1601 * 1602 * Cleans the queue descriptor ring. If the queue is using queue-based 1603 * scheduling, the buffers will be cleaned as well. If the queue is using 1604 * flow-based scheduling, only the descriptors are cleaned at this time. 1605 * Separate packet completion events will be reported on the completion queue, 1606 * and the buffers will be cleaned separately. The stats are not updated from 1607 * this function when using flow-based scheduling. 1608 */ 1609 static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end, 1610 int napi_budget, 1611 struct idpf_cleaned_stats *cleaned, 1612 bool descs_only) 1613 { 1614 union idpf_tx_flex_desc *next_pending_desc = NULL; 1615 union idpf_tx_flex_desc *tx_desc; 1616 s16 ntc = tx_q->next_to_clean; 1617 struct idpf_tx_buf *tx_buf; 1618 1619 tx_desc = IDPF_FLEX_TX_DESC(tx_q, ntc); 1620 next_pending_desc = IDPF_FLEX_TX_DESC(tx_q, end); 1621 tx_buf = &tx_q->tx_buf[ntc]; 1622 ntc -= tx_q->desc_count; 1623 1624 while (tx_desc != next_pending_desc) { 1625 union idpf_tx_flex_desc *eop_desc; 1626 1627 /* If this entry in the ring was used as a context descriptor, 1628 * it's corresponding entry in the buffer ring will have an 1629 * invalid completion tag since no buffer was used. We can 1630 * skip this descriptor since there is no buffer to clean. 1631 */ 1632 if (unlikely(tx_buf->compl_tag == IDPF_SPLITQ_TX_INVAL_COMPL_TAG)) 1633 goto fetch_next_txq_desc; 1634 1635 eop_desc = (union idpf_tx_flex_desc *)tx_buf->next_to_watch; 1636 1637 /* clear next_to_watch to prevent false hangs */ 1638 tx_buf->next_to_watch = NULL; 1639 1640 if (descs_only) { 1641 if (idpf_stash_flow_sch_buffers(tx_q, tx_buf)) 1642 goto tx_splitq_clean_out; 1643 1644 while (tx_desc != eop_desc) { 1645 idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, 1646 tx_desc, tx_buf); 1647 1648 if (dma_unmap_len(tx_buf, len)) { 1649 if (idpf_stash_flow_sch_buffers(tx_q, 1650 tx_buf)) 1651 goto tx_splitq_clean_out; 1652 } 1653 } 1654 } else { 1655 idpf_tx_splitq_clean_hdr(tx_q, tx_buf, cleaned, 1656 napi_budget); 1657 1658 /* unmap remaining buffers */ 1659 while (tx_desc != eop_desc) { 1660 idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, 1661 tx_desc, tx_buf); 1662 1663 /* unmap any remaining paged data */ 1664 if (dma_unmap_len(tx_buf, len)) { 1665 dma_unmap_page(tx_q->dev, 1666 dma_unmap_addr(tx_buf, dma), 1667 dma_unmap_len(tx_buf, len), 1668 DMA_TO_DEVICE); 1669 dma_unmap_len_set(tx_buf, len, 0); 1670 } 1671 } 1672 } 1673 1674 fetch_next_txq_desc: 1675 idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf); 1676 } 1677 1678 tx_splitq_clean_out: 1679 ntc += tx_q->desc_count; 1680 tx_q->next_to_clean = ntc; 1681 } 1682 1683 #define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf) \ 1684 do { \ 1685 (buf)++; \ 1686 (ntc)++; \ 1687 if (unlikely((ntc) == (txq)->desc_count)) { \ 1688 buf = (txq)->tx_buf; \ 1689 ntc = 0; \ 1690 } \ 1691 } while (0) 1692 1693 /** 1694 * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers 1695 * @txq: queue to clean 1696 * @compl_tag: completion tag of packet to clean (from completion descriptor) 1697 * @cleaned: pointer to stats struct to track cleaned packets/bytes 1698 * @budget: Used to determine if we are in netpoll 1699 * 1700 * Cleans all buffers associated with the input completion tag either from the 1701 * TX buffer ring or from the hash table if the buffers were previously 1702 * stashed. Returns the byte/segment count for the cleaned packet associated 1703 * this completion tag. 1704 */ 1705 static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag, 1706 struct idpf_cleaned_stats *cleaned, 1707 int budget) 1708 { 1709 u16 idx = compl_tag & txq->compl_tag_bufid_m; 1710 struct idpf_tx_buf *tx_buf = NULL; 1711 u16 ntc = txq->next_to_clean; 1712 u16 num_descs_cleaned = 0; 1713 u16 orig_idx = idx; 1714 1715 tx_buf = &txq->tx_buf[idx]; 1716 1717 while (tx_buf->compl_tag == (int)compl_tag) { 1718 if (tx_buf->skb) { 1719 idpf_tx_splitq_clean_hdr(txq, tx_buf, cleaned, budget); 1720 } else if (dma_unmap_len(tx_buf, len)) { 1721 dma_unmap_page(txq->dev, 1722 dma_unmap_addr(tx_buf, dma), 1723 dma_unmap_len(tx_buf, len), 1724 DMA_TO_DEVICE); 1725 dma_unmap_len_set(tx_buf, len, 0); 1726 } 1727 1728 memset(tx_buf, 0, sizeof(struct idpf_tx_buf)); 1729 tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; 1730 1731 num_descs_cleaned++; 1732 idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf); 1733 } 1734 1735 /* If we didn't clean anything on the ring for this completion, there's 1736 * nothing more to do. 1737 */ 1738 if (unlikely(!num_descs_cleaned)) 1739 return false; 1740 1741 /* Otherwise, if we did clean a packet on the ring directly, it's safe 1742 * to assume that the descriptors starting from the original 1743 * next_to_clean up until the previously cleaned packet can be reused. 1744 * Therefore, we will go back in the ring and stash any buffers still 1745 * in the ring into the hash table to be cleaned later. 1746 */ 1747 tx_buf = &txq->tx_buf[ntc]; 1748 while (tx_buf != &txq->tx_buf[orig_idx]) { 1749 idpf_stash_flow_sch_buffers(txq, tx_buf); 1750 idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf); 1751 } 1752 1753 /* Finally, update next_to_clean to reflect the work that was just done 1754 * on the ring, if any. If the packet was only cleaned from the hash 1755 * table, the ring will not be impacted, therefore we should not touch 1756 * next_to_clean. The updated idx is used here 1757 */ 1758 txq->next_to_clean = idx; 1759 1760 return true; 1761 } 1762 1763 /** 1764 * idpf_tx_handle_rs_completion - clean a single packet and all of its buffers 1765 * whether on the buffer ring or in the hash table 1766 * @txq: Tx ring to clean 1767 * @desc: pointer to completion queue descriptor to extract completion 1768 * information from 1769 * @cleaned: pointer to stats struct to track cleaned packets/bytes 1770 * @budget: Used to determine if we are in netpoll 1771 * 1772 * Returns bytes/packets cleaned 1773 */ 1774 static void idpf_tx_handle_rs_completion(struct idpf_queue *txq, 1775 struct idpf_splitq_tx_compl_desc *desc, 1776 struct idpf_cleaned_stats *cleaned, 1777 int budget) 1778 { 1779 u16 compl_tag; 1780 1781 if (!test_bit(__IDPF_Q_FLOW_SCH_EN, txq->flags)) { 1782 u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head); 1783 1784 return idpf_tx_splitq_clean(txq, head, budget, cleaned, false); 1785 } 1786 1787 compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag); 1788 1789 /* If we didn't clean anything on the ring, this packet must be 1790 * in the hash table. Go clean it there. 1791 */ 1792 if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget)) 1793 idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget); 1794 } 1795 1796 /** 1797 * idpf_tx_clean_complq - Reclaim resources on completion queue 1798 * @complq: Tx ring to clean 1799 * @budget: Used to determine if we are in netpoll 1800 * @cleaned: returns number of packets cleaned 1801 * 1802 * Returns true if there's any budget left (e.g. the clean is finished) 1803 */ 1804 static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, 1805 int *cleaned) 1806 { 1807 struct idpf_splitq_tx_compl_desc *tx_desc; 1808 struct idpf_vport *vport = complq->vport; 1809 s16 ntc = complq->next_to_clean; 1810 struct idpf_netdev_priv *np; 1811 unsigned int complq_budget; 1812 bool complq_ok = true; 1813 int i; 1814 1815 complq_budget = vport->compln_clean_budget; 1816 tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc); 1817 ntc -= complq->desc_count; 1818 1819 do { 1820 struct idpf_cleaned_stats cleaned_stats = { }; 1821 struct idpf_queue *tx_q; 1822 int rel_tx_qid; 1823 u16 hw_head; 1824 u8 ctype; /* completion type */ 1825 u16 gen; 1826 1827 /* if the descriptor isn't done, no work yet to do */ 1828 gen = le16_get_bits(tx_desc->qid_comptype_gen, 1829 IDPF_TXD_COMPLQ_GEN_M); 1830 if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen) 1831 break; 1832 1833 /* Find necessary info of TX queue to clean buffers */ 1834 rel_tx_qid = le16_get_bits(tx_desc->qid_comptype_gen, 1835 IDPF_TXD_COMPLQ_QID_M); 1836 if (rel_tx_qid >= complq->txq_grp->num_txq || 1837 !complq->txq_grp->txqs[rel_tx_qid]) { 1838 dev_err(&complq->vport->adapter->pdev->dev, 1839 "TxQ not found\n"); 1840 goto fetch_next_desc; 1841 } 1842 tx_q = complq->txq_grp->txqs[rel_tx_qid]; 1843 1844 /* Determine completion type */ 1845 ctype = le16_get_bits(tx_desc->qid_comptype_gen, 1846 IDPF_TXD_COMPLQ_COMPL_TYPE_M); 1847 switch (ctype) { 1848 case IDPF_TXD_COMPLT_RE: 1849 hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head); 1850 1851 idpf_tx_splitq_clean(tx_q, hw_head, budget, 1852 &cleaned_stats, true); 1853 break; 1854 case IDPF_TXD_COMPLT_RS: 1855 idpf_tx_handle_rs_completion(tx_q, tx_desc, 1856 &cleaned_stats, budget); 1857 break; 1858 case IDPF_TXD_COMPLT_SW_MARKER: 1859 idpf_tx_handle_sw_marker(tx_q); 1860 break; 1861 default: 1862 dev_err(&tx_q->vport->adapter->pdev->dev, 1863 "Unknown TX completion type: %d\n", 1864 ctype); 1865 goto fetch_next_desc; 1866 } 1867 1868 u64_stats_update_begin(&tx_q->stats_sync); 1869 u64_stats_add(&tx_q->q_stats.tx.packets, cleaned_stats.packets); 1870 u64_stats_add(&tx_q->q_stats.tx.bytes, cleaned_stats.bytes); 1871 tx_q->cleaned_pkts += cleaned_stats.packets; 1872 tx_q->cleaned_bytes += cleaned_stats.bytes; 1873 complq->num_completions++; 1874 u64_stats_update_end(&tx_q->stats_sync); 1875 1876 fetch_next_desc: 1877 tx_desc++; 1878 ntc++; 1879 if (unlikely(!ntc)) { 1880 ntc -= complq->desc_count; 1881 tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, 0); 1882 change_bit(__IDPF_Q_GEN_CHK, complq->flags); 1883 } 1884 1885 prefetch(tx_desc); 1886 1887 /* update budget accounting */ 1888 complq_budget--; 1889 } while (likely(complq_budget)); 1890 1891 /* Store the state of the complq to be used later in deciding if a 1892 * TXQ can be started again 1893 */ 1894 if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) > 1895 IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq))) 1896 complq_ok = false; 1897 1898 np = netdev_priv(complq->vport->netdev); 1899 for (i = 0; i < complq->txq_grp->num_txq; ++i) { 1900 struct idpf_queue *tx_q = complq->txq_grp->txqs[i]; 1901 struct netdev_queue *nq; 1902 bool dont_wake; 1903 1904 /* We didn't clean anything on this queue, move along */ 1905 if (!tx_q->cleaned_bytes) 1906 continue; 1907 1908 *cleaned += tx_q->cleaned_pkts; 1909 1910 /* Update BQL */ 1911 nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); 1912 1913 dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) || 1914 np->state != __IDPF_VPORT_UP || 1915 !netif_carrier_ok(tx_q->vport->netdev); 1916 /* Check if the TXQ needs to and can be restarted */ 1917 __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes, 1918 IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH, 1919 dont_wake); 1920 1921 /* Reset cleaned stats for the next time this queue is 1922 * cleaned 1923 */ 1924 tx_q->cleaned_bytes = 0; 1925 tx_q->cleaned_pkts = 0; 1926 } 1927 1928 ntc += complq->desc_count; 1929 complq->next_to_clean = ntc; 1930 1931 return !!complq_budget; 1932 } 1933 1934 /** 1935 * idpf_tx_splitq_build_ctb - populate command tag and size for queue 1936 * based scheduling descriptors 1937 * @desc: descriptor to populate 1938 * @params: pointer to tx params struct 1939 * @td_cmd: command to be filled in desc 1940 * @size: size of buffer 1941 */ 1942 void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc, 1943 struct idpf_tx_splitq_params *params, 1944 u16 td_cmd, u16 size) 1945 { 1946 desc->q.qw1.cmd_dtype = 1947 le16_encode_bits(params->dtype, IDPF_FLEX_TXD_QW1_DTYPE_M); 1948 desc->q.qw1.cmd_dtype |= 1949 le16_encode_bits(td_cmd, IDPF_FLEX_TXD_QW1_CMD_M); 1950 desc->q.qw1.buf_size = cpu_to_le16(size); 1951 desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag); 1952 } 1953 1954 /** 1955 * idpf_tx_splitq_build_flow_desc - populate command tag and size for flow 1956 * scheduling descriptors 1957 * @desc: descriptor to populate 1958 * @params: pointer to tx params struct 1959 * @td_cmd: command to be filled in desc 1960 * @size: size of buffer 1961 */ 1962 void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, 1963 struct idpf_tx_splitq_params *params, 1964 u16 td_cmd, u16 size) 1965 { 1966 desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd; 1967 desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size); 1968 desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag); 1969 } 1970 1971 /** 1972 * idpf_tx_maybe_stop_common - 1st level check for common Tx stop conditions 1973 * @tx_q: the queue to be checked 1974 * @size: number of descriptors we want to assure is available 1975 * 1976 * Returns 0 if stop is not needed 1977 */ 1978 int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size) 1979 { 1980 struct netdev_queue *nq; 1981 1982 if (likely(IDPF_DESC_UNUSED(tx_q) >= size)) 1983 return 0; 1984 1985 u64_stats_update_begin(&tx_q->stats_sync); 1986 u64_stats_inc(&tx_q->q_stats.tx.q_busy); 1987 u64_stats_update_end(&tx_q->stats_sync); 1988 1989 nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); 1990 1991 return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size); 1992 } 1993 1994 /** 1995 * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions 1996 * @tx_q: the queue to be checked 1997 * @descs_needed: number of descriptors required for this packet 1998 * 1999 * Returns 0 if stop is not needed 2000 */ 2001 static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q, 2002 unsigned int descs_needed) 2003 { 2004 if (idpf_tx_maybe_stop_common(tx_q, descs_needed)) 2005 goto splitq_stop; 2006 2007 /* If there are too many outstanding completions expected on the 2008 * completion queue, stop the TX queue to give the device some time to 2009 * catch up 2010 */ 2011 if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) > 2012 IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq))) 2013 goto splitq_stop; 2014 2015 /* Also check for available book keeping buffers; if we are low, stop 2016 * the queue to wait for more completions 2017 */ 2018 if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q))) 2019 goto splitq_stop; 2020 2021 return 0; 2022 2023 splitq_stop: 2024 u64_stats_update_begin(&tx_q->stats_sync); 2025 u64_stats_inc(&tx_q->q_stats.tx.q_busy); 2026 u64_stats_update_end(&tx_q->stats_sync); 2027 netif_stop_subqueue(tx_q->vport->netdev, tx_q->idx); 2028 2029 return -EBUSY; 2030 } 2031 2032 /** 2033 * idpf_tx_buf_hw_update - Store the new tail value 2034 * @tx_q: queue to bump 2035 * @val: new tail index 2036 * @xmit_more: more skb's pending 2037 * 2038 * The naming here is special in that 'hw' signals that this function is about 2039 * to do a register write to update our queue status. We know this can only 2040 * mean tail here as HW should be owning head for TX. 2041 */ 2042 void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val, 2043 bool xmit_more) 2044 { 2045 struct netdev_queue *nq; 2046 2047 nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); 2048 tx_q->next_to_use = val; 2049 2050 idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED); 2051 2052 /* Force memory writes to complete before letting h/w 2053 * know there are new descriptors to fetch. (Only 2054 * applicable for weak-ordered memory model archs, 2055 * such as IA-64). 2056 */ 2057 wmb(); 2058 2059 /* notify HW of packet */ 2060 if (netif_xmit_stopped(nq) || !xmit_more) 2061 writel(val, tx_q->tail); 2062 } 2063 2064 /** 2065 * idpf_tx_desc_count_required - calculate number of Tx descriptors needed 2066 * @txq: queue to send buffer on 2067 * @skb: send buffer 2068 * 2069 * Returns number of data descriptors needed for this skb. 2070 */ 2071 unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq, 2072 struct sk_buff *skb) 2073 { 2074 const struct skb_shared_info *shinfo; 2075 unsigned int count = 0, i; 2076 2077 count += !!skb_headlen(skb); 2078 2079 if (!skb_is_nonlinear(skb)) 2080 return count; 2081 2082 shinfo = skb_shinfo(skb); 2083 for (i = 0; i < shinfo->nr_frags; i++) { 2084 unsigned int size; 2085 2086 size = skb_frag_size(&shinfo->frags[i]); 2087 2088 /* We only need to use the idpf_size_to_txd_count check if the 2089 * fragment is going to span multiple descriptors, 2090 * i.e. size >= 16K. 2091 */ 2092 if (size >= SZ_16K) 2093 count += idpf_size_to_txd_count(size); 2094 else 2095 count++; 2096 } 2097 2098 if (idpf_chk_linearize(skb, txq->tx_max_bufs, count)) { 2099 if (__skb_linearize(skb)) 2100 return 0; 2101 2102 count = idpf_size_to_txd_count(skb->len); 2103 u64_stats_update_begin(&txq->stats_sync); 2104 u64_stats_inc(&txq->q_stats.tx.linearize); 2105 u64_stats_update_end(&txq->stats_sync); 2106 } 2107 2108 return count; 2109 } 2110 2111 /** 2112 * idpf_tx_dma_map_error - handle TX DMA map errors 2113 * @txq: queue to send buffer on 2114 * @skb: send buffer 2115 * @first: original first buffer info buffer for packet 2116 * @idx: starting point on ring to unwind 2117 */ 2118 void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb, 2119 struct idpf_tx_buf *first, u16 idx) 2120 { 2121 u64_stats_update_begin(&txq->stats_sync); 2122 u64_stats_inc(&txq->q_stats.tx.dma_map_errs); 2123 u64_stats_update_end(&txq->stats_sync); 2124 2125 /* clear dma mappings for failed tx_buf map */ 2126 for (;;) { 2127 struct idpf_tx_buf *tx_buf; 2128 2129 tx_buf = &txq->tx_buf[idx]; 2130 idpf_tx_buf_rel(txq, tx_buf); 2131 if (tx_buf == first) 2132 break; 2133 if (idx == 0) 2134 idx = txq->desc_count; 2135 idx--; 2136 } 2137 2138 if (skb_is_gso(skb)) { 2139 union idpf_tx_flex_desc *tx_desc; 2140 2141 /* If we failed a DMA mapping for a TSO packet, we will have 2142 * used one additional descriptor for a context 2143 * descriptor. Reset that here. 2144 */ 2145 tx_desc = IDPF_FLEX_TX_DESC(txq, idx); 2146 memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc)); 2147 if (idx == 0) 2148 idx = txq->desc_count; 2149 idx--; 2150 } 2151 2152 /* Update tail in case netdev_xmit_more was previously true */ 2153 idpf_tx_buf_hw_update(txq, idx, false); 2154 } 2155 2156 /** 2157 * idpf_tx_splitq_bump_ntu - adjust NTU and generation 2158 * @txq: the tx ring to wrap 2159 * @ntu: ring index to bump 2160 */ 2161 static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu) 2162 { 2163 ntu++; 2164 2165 if (ntu == txq->desc_count) { 2166 ntu = 0; 2167 txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq); 2168 } 2169 2170 return ntu; 2171 } 2172 2173 /** 2174 * idpf_tx_splitq_map - Build the Tx flex descriptor 2175 * @tx_q: queue to send buffer on 2176 * @params: pointer to splitq params struct 2177 * @first: first buffer info buffer to use 2178 * 2179 * This function loops over the skb data pointed to by *first 2180 * and gets a physical address for each memory location and programs 2181 * it and the length into the transmit flex descriptor. 2182 */ 2183 static void idpf_tx_splitq_map(struct idpf_queue *tx_q, 2184 struct idpf_tx_splitq_params *params, 2185 struct idpf_tx_buf *first) 2186 { 2187 union idpf_tx_flex_desc *tx_desc; 2188 unsigned int data_len, size; 2189 struct idpf_tx_buf *tx_buf; 2190 u16 i = tx_q->next_to_use; 2191 struct netdev_queue *nq; 2192 struct sk_buff *skb; 2193 skb_frag_t *frag; 2194 u16 td_cmd = 0; 2195 dma_addr_t dma; 2196 2197 skb = first->skb; 2198 2199 td_cmd = params->offload.td_cmd; 2200 2201 data_len = skb->data_len; 2202 size = skb_headlen(skb); 2203 2204 tx_desc = IDPF_FLEX_TX_DESC(tx_q, i); 2205 2206 dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); 2207 2208 tx_buf = first; 2209 2210 params->compl_tag = 2211 (tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i; 2212 2213 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 2214 unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED; 2215 2216 if (dma_mapping_error(tx_q->dev, dma)) 2217 return idpf_tx_dma_map_error(tx_q, skb, first, i); 2218 2219 tx_buf->compl_tag = params->compl_tag; 2220 2221 /* record length, and DMA address */ 2222 dma_unmap_len_set(tx_buf, len, size); 2223 dma_unmap_addr_set(tx_buf, dma, dma); 2224 2225 /* buf_addr is in same location for both desc types */ 2226 tx_desc->q.buf_addr = cpu_to_le64(dma); 2227 2228 /* The stack can send us fragments that are too large for a 2229 * single descriptor i.e. frag size > 16K-1. We will need to 2230 * split the fragment across multiple descriptors in this case. 2231 * To adhere to HW alignment restrictions, the fragment needs 2232 * to be split such that the first chunk ends on a 4K boundary 2233 * and all subsequent chunks start on a 4K boundary. We still 2234 * want to send as much data as possible though, so our 2235 * intermediate descriptor chunk size will be 12K. 2236 * 2237 * For example, consider a 32K fragment mapped to DMA addr 2600. 2238 * ------------------------------------------------------------ 2239 * | frag_size = 32K | 2240 * ------------------------------------------------------------ 2241 * |2600 |16384 |28672 2242 * 2243 * 3 descriptors will be used for this fragment. The HW expects 2244 * the descriptors to contain the following: 2245 * ------------------------------------------------------------ 2246 * | size = 13784 | size = 12K | size = 6696 | 2247 * | dma = 2600 | dma = 16384 | dma = 28672 | 2248 * ------------------------------------------------------------ 2249 * 2250 * We need to first adjust the max_data for the first chunk so 2251 * that it ends on a 4K boundary. By negating the value of the 2252 * DMA address and taking only the low order bits, we're 2253 * effectively calculating 2254 * 4K - (DMA addr lower order bits) = 2255 * bytes to next boundary. 2256 * 2257 * Add that to our base aligned max_data (12K) and we have 2258 * our first chunk size. In the example above, 2259 * 13784 = 12K + (4096-2600) 2260 * 2261 * After guaranteeing the first chunk ends on a 4K boundary, we 2262 * will give the intermediate descriptors 12K chunks and 2263 * whatever is left to the final descriptor. This ensures that 2264 * all descriptors used for the remaining chunks of the 2265 * fragment start on a 4K boundary and we use as few 2266 * descriptors as possible. 2267 */ 2268 max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1); 2269 while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) { 2270 idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, 2271 max_data); 2272 2273 tx_desc++; 2274 i++; 2275 2276 if (i == tx_q->desc_count) { 2277 tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0); 2278 i = 0; 2279 tx_q->compl_tag_cur_gen = 2280 IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); 2281 } 2282 2283 /* Since this packet has a buffer that is going to span 2284 * multiple descriptors, it's going to leave holes in 2285 * to the TX buffer ring. To ensure these holes do not 2286 * cause issues in the cleaning routines, we will clear 2287 * them of any stale data and assign them the same 2288 * completion tag as the current packet. Then when the 2289 * packet is being cleaned, the cleaning routines will 2290 * simply pass over these holes and finish cleaning the 2291 * rest of the packet. 2292 */ 2293 memset(&tx_q->tx_buf[i], 0, sizeof(struct idpf_tx_buf)); 2294 tx_q->tx_buf[i].compl_tag = params->compl_tag; 2295 2296 /* Adjust the DMA offset and the remaining size of the 2297 * fragment. On the first iteration of this loop, 2298 * max_data will be >= 12K and <= 16K-1. On any 2299 * subsequent iteration of this loop, max_data will 2300 * always be 12K. 2301 */ 2302 dma += max_data; 2303 size -= max_data; 2304 2305 /* Reset max_data since remaining chunks will be 12K 2306 * at most 2307 */ 2308 max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED; 2309 2310 /* buf_addr is in same location for both desc types */ 2311 tx_desc->q.buf_addr = cpu_to_le64(dma); 2312 } 2313 2314 if (!data_len) 2315 break; 2316 2317 idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size); 2318 tx_desc++; 2319 i++; 2320 2321 if (i == tx_q->desc_count) { 2322 tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0); 2323 i = 0; 2324 tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); 2325 } 2326 2327 size = skb_frag_size(frag); 2328 data_len -= size; 2329 2330 dma = skb_frag_dma_map(tx_q->dev, frag, 0, size, 2331 DMA_TO_DEVICE); 2332 2333 tx_buf = &tx_q->tx_buf[i]; 2334 } 2335 2336 /* record SW timestamp if HW timestamp is not available */ 2337 skb_tx_timestamp(skb); 2338 2339 /* write last descriptor with RS and EOP bits */ 2340 td_cmd |= params->eop_cmd; 2341 idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size); 2342 i = idpf_tx_splitq_bump_ntu(tx_q, i); 2343 2344 /* set next_to_watch value indicating a packet is present */ 2345 first->next_to_watch = tx_desc; 2346 2347 tx_q->txq_grp->num_completions_pending++; 2348 2349 /* record bytecount for BQL */ 2350 nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); 2351 netdev_tx_sent_queue(nq, first->bytecount); 2352 2353 idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more()); 2354 } 2355 2356 /** 2357 * idpf_tso - computes mss and TSO length to prepare for TSO 2358 * @skb: pointer to skb 2359 * @off: pointer to struct that holds offload parameters 2360 * 2361 * Returns error (negative) if TSO was requested but cannot be applied to the 2362 * given skb, 0 if TSO does not apply to the given skb, or 1 otherwise. 2363 */ 2364 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off) 2365 { 2366 const struct skb_shared_info *shinfo; 2367 union { 2368 struct iphdr *v4; 2369 struct ipv6hdr *v6; 2370 unsigned char *hdr; 2371 } ip; 2372 union { 2373 struct tcphdr *tcp; 2374 struct udphdr *udp; 2375 unsigned char *hdr; 2376 } l4; 2377 u32 paylen, l4_start; 2378 int err; 2379 2380 if (!skb_is_gso(skb)) 2381 return 0; 2382 2383 err = skb_cow_head(skb, 0); 2384 if (err < 0) 2385 return err; 2386 2387 shinfo = skb_shinfo(skb); 2388 2389 ip.hdr = skb_network_header(skb); 2390 l4.hdr = skb_transport_header(skb); 2391 2392 /* initialize outer IP header fields */ 2393 if (ip.v4->version == 4) { 2394 ip.v4->tot_len = 0; 2395 ip.v4->check = 0; 2396 } else if (ip.v6->version == 6) { 2397 ip.v6->payload_len = 0; 2398 } 2399 2400 l4_start = skb_transport_offset(skb); 2401 2402 /* remove payload length from checksum */ 2403 paylen = skb->len - l4_start; 2404 2405 switch (shinfo->gso_type & ~SKB_GSO_DODGY) { 2406 case SKB_GSO_TCPV4: 2407 case SKB_GSO_TCPV6: 2408 csum_replace_by_diff(&l4.tcp->check, 2409 (__force __wsum)htonl(paylen)); 2410 off->tso_hdr_len = __tcp_hdrlen(l4.tcp) + l4_start; 2411 break; 2412 case SKB_GSO_UDP_L4: 2413 csum_replace_by_diff(&l4.udp->check, 2414 (__force __wsum)htonl(paylen)); 2415 /* compute length of segmentation header */ 2416 off->tso_hdr_len = sizeof(struct udphdr) + l4_start; 2417 l4.udp->len = htons(shinfo->gso_size + sizeof(struct udphdr)); 2418 break; 2419 default: 2420 return -EINVAL; 2421 } 2422 2423 off->tso_len = skb->len - off->tso_hdr_len; 2424 off->mss = shinfo->gso_size; 2425 off->tso_segs = shinfo->gso_segs; 2426 2427 off->tx_flags |= IDPF_TX_FLAGS_TSO; 2428 2429 return 1; 2430 } 2431 2432 /** 2433 * __idpf_chk_linearize - Check skb is not using too many buffers 2434 * @skb: send buffer 2435 * @max_bufs: maximum number of buffers 2436 * 2437 * For TSO we need to count the TSO header and segment payload separately. As 2438 * such we need to check cases where we have max_bufs-1 fragments or more as we 2439 * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1 2440 * for the segment payload in the first descriptor, and another max_buf-1 for 2441 * the fragments. 2442 */ 2443 static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs) 2444 { 2445 const struct skb_shared_info *shinfo = skb_shinfo(skb); 2446 const skb_frag_t *frag, *stale; 2447 int nr_frags, sum; 2448 2449 /* no need to check if number of frags is less than max_bufs - 1 */ 2450 nr_frags = shinfo->nr_frags; 2451 if (nr_frags < (max_bufs - 1)) 2452 return false; 2453 2454 /* We need to walk through the list and validate that each group 2455 * of max_bufs-2 fragments totals at least gso_size. 2456 */ 2457 nr_frags -= max_bufs - 2; 2458 frag = &shinfo->frags[0]; 2459 2460 /* Initialize size to the negative value of gso_size minus 1. We use 2461 * this as the worst case scenario in which the frag ahead of us only 2462 * provides one byte which is why we are limited to max_bufs-2 2463 * descriptors for a single transmit as the header and previous 2464 * fragment are already consuming 2 descriptors. 2465 */ 2466 sum = 1 - shinfo->gso_size; 2467 2468 /* Add size of frags 0 through 4 to create our initial sum */ 2469 sum += skb_frag_size(frag++); 2470 sum += skb_frag_size(frag++); 2471 sum += skb_frag_size(frag++); 2472 sum += skb_frag_size(frag++); 2473 sum += skb_frag_size(frag++); 2474 2475 /* Walk through fragments adding latest fragment, testing it, and 2476 * then removing stale fragments from the sum. 2477 */ 2478 for (stale = &shinfo->frags[0];; stale++) { 2479 int stale_size = skb_frag_size(stale); 2480 2481 sum += skb_frag_size(frag++); 2482 2483 /* The stale fragment may present us with a smaller 2484 * descriptor than the actual fragment size. To account 2485 * for that we need to remove all the data on the front and 2486 * figure out what the remainder would be in the last 2487 * descriptor associated with the fragment. 2488 */ 2489 if (stale_size > IDPF_TX_MAX_DESC_DATA) { 2490 int align_pad = -(skb_frag_off(stale)) & 2491 (IDPF_TX_MAX_READ_REQ_SIZE - 1); 2492 2493 sum -= align_pad; 2494 stale_size -= align_pad; 2495 2496 do { 2497 sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED; 2498 stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED; 2499 } while (stale_size > IDPF_TX_MAX_DESC_DATA); 2500 } 2501 2502 /* if sum is negative we failed to make sufficient progress */ 2503 if (sum < 0) 2504 return true; 2505 2506 if (!nr_frags--) 2507 break; 2508 2509 sum -= stale_size; 2510 } 2511 2512 return false; 2513 } 2514 2515 /** 2516 * idpf_chk_linearize - Check if skb exceeds max descriptors per packet 2517 * @skb: send buffer 2518 * @max_bufs: maximum scatter gather buffers for single packet 2519 * @count: number of buffers this packet needs 2520 * 2521 * Make sure we don't exceed maximum scatter gather buffers for a single 2522 * packet. We have to do some special checking around the boundary (max_bufs-1) 2523 * if TSO is on since we need count the TSO header and payload separately. 2524 * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO 2525 * header, 1 for segment payload, and then 7 for the fragments. 2526 */ 2527 bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, 2528 unsigned int count) 2529 { 2530 if (likely(count < max_bufs)) 2531 return false; 2532 if (skb_is_gso(skb)) 2533 return __idpf_chk_linearize(skb, max_bufs); 2534 2535 return count > max_bufs; 2536 } 2537 2538 /** 2539 * idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring 2540 * @txq: queue to put context descriptor on 2541 * 2542 * Since the TX buffer rings mimics the descriptor ring, update the tx buffer 2543 * ring entry to reflect that this index is a context descriptor 2544 */ 2545 static struct idpf_flex_tx_ctx_desc * 2546 idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq) 2547 { 2548 struct idpf_flex_tx_ctx_desc *desc; 2549 int i = txq->next_to_use; 2550 2551 memset(&txq->tx_buf[i], 0, sizeof(struct idpf_tx_buf)); 2552 txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; 2553 2554 /* grab the next descriptor */ 2555 desc = IDPF_FLEX_TX_CTX_DESC(txq, i); 2556 txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i); 2557 2558 return desc; 2559 } 2560 2561 /** 2562 * idpf_tx_drop_skb - free the SKB and bump tail if necessary 2563 * @tx_q: queue to send buffer on 2564 * @skb: pointer to skb 2565 */ 2566 netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb) 2567 { 2568 u64_stats_update_begin(&tx_q->stats_sync); 2569 u64_stats_inc(&tx_q->q_stats.tx.skb_drops); 2570 u64_stats_update_end(&tx_q->stats_sync); 2571 2572 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); 2573 2574 dev_kfree_skb(skb); 2575 2576 return NETDEV_TX_OK; 2577 } 2578 2579 /** 2580 * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors 2581 * @skb: send buffer 2582 * @tx_q: queue to send buffer on 2583 * 2584 * Returns NETDEV_TX_OK if sent, else an error code 2585 */ 2586 static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, 2587 struct idpf_queue *tx_q) 2588 { 2589 struct idpf_tx_splitq_params tx_params = { }; 2590 struct idpf_tx_buf *first; 2591 unsigned int count; 2592 int tso; 2593 2594 count = idpf_tx_desc_count_required(tx_q, skb); 2595 if (unlikely(!count)) 2596 return idpf_tx_drop_skb(tx_q, skb); 2597 2598 tso = idpf_tso(skb, &tx_params.offload); 2599 if (unlikely(tso < 0)) 2600 return idpf_tx_drop_skb(tx_q, skb); 2601 2602 /* Check for splitq specific TX resources */ 2603 count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso); 2604 if (idpf_tx_maybe_stop_splitq(tx_q, count)) { 2605 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); 2606 2607 return NETDEV_TX_BUSY; 2608 } 2609 2610 if (tso) { 2611 /* If tso is needed, set up context desc */ 2612 struct idpf_flex_tx_ctx_desc *ctx_desc = 2613 idpf_tx_splitq_get_ctx_desc(tx_q); 2614 2615 ctx_desc->tso.qw1.cmd_dtype = 2616 cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX | 2617 IDPF_TX_FLEX_CTX_DESC_CMD_TSO); 2618 ctx_desc->tso.qw0.flex_tlen = 2619 cpu_to_le32(tx_params.offload.tso_len & 2620 IDPF_TXD_FLEX_CTX_TLEN_M); 2621 ctx_desc->tso.qw0.mss_rt = 2622 cpu_to_le16(tx_params.offload.mss & 2623 IDPF_TXD_FLEX_CTX_MSS_RT_M); 2624 ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len; 2625 2626 u64_stats_update_begin(&tx_q->stats_sync); 2627 u64_stats_inc(&tx_q->q_stats.tx.lso_pkts); 2628 u64_stats_update_end(&tx_q->stats_sync); 2629 } 2630 2631 /* record the location of the first descriptor for this packet */ 2632 first = &tx_q->tx_buf[tx_q->next_to_use]; 2633 first->skb = skb; 2634 2635 if (tso) { 2636 first->gso_segs = tx_params.offload.tso_segs; 2637 first->bytecount = skb->len + 2638 ((first->gso_segs - 1) * tx_params.offload.tso_hdr_len); 2639 } else { 2640 first->gso_segs = 1; 2641 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 2642 } 2643 2644 if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_q->flags)) { 2645 tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE; 2646 tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP; 2647 /* Set the RE bit to catch any packets that may have not been 2648 * stashed during RS completion cleaning. MIN_GAP is set to 2649 * MIN_RING size to ensure it will be set at least once each 2650 * time around the ring. 2651 */ 2652 if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) { 2653 tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE; 2654 tx_q->txq_grp->num_completions_pending++; 2655 } 2656 2657 if (skb->ip_summed == CHECKSUM_PARTIAL) 2658 tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN; 2659 2660 } else { 2661 tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2; 2662 tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD; 2663 2664 if (skb->ip_summed == CHECKSUM_PARTIAL) 2665 tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN; 2666 } 2667 2668 idpf_tx_splitq_map(tx_q, &tx_params, first); 2669 2670 return NETDEV_TX_OK; 2671 } 2672 2673 /** 2674 * idpf_tx_splitq_start - Selects the right Tx queue to send buffer 2675 * @skb: send buffer 2676 * @netdev: network interface device structure 2677 * 2678 * Returns NETDEV_TX_OK if sent, else an error code 2679 */ 2680 netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb, 2681 struct net_device *netdev) 2682 { 2683 struct idpf_vport *vport = idpf_netdev_to_vport(netdev); 2684 struct idpf_queue *tx_q; 2685 2686 if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) { 2687 dev_kfree_skb_any(skb); 2688 2689 return NETDEV_TX_OK; 2690 } 2691 2692 tx_q = vport->txqs[skb_get_queue_mapping(skb)]; 2693 2694 /* hardware can't handle really short frames, hardware padding works 2695 * beyond this point 2696 */ 2697 if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) { 2698 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); 2699 2700 return NETDEV_TX_OK; 2701 } 2702 2703 return idpf_tx_splitq_frame(skb, tx_q); 2704 } 2705 2706 /** 2707 * idpf_ptype_to_htype - get a hash type 2708 * @decoded: Decoded Rx packet type related fields 2709 * 2710 * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by 2711 * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of 2712 * Rx desc. 2713 */ 2714 enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded) 2715 { 2716 if (!decoded->known) 2717 return PKT_HASH_TYPE_NONE; 2718 if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 && 2719 decoded->inner_prot) 2720 return PKT_HASH_TYPE_L4; 2721 if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 && 2722 decoded->outer_ip) 2723 return PKT_HASH_TYPE_L3; 2724 if (decoded->outer_ip == IDPF_RX_PTYPE_OUTER_L2) 2725 return PKT_HASH_TYPE_L2; 2726 2727 return PKT_HASH_TYPE_NONE; 2728 } 2729 2730 /** 2731 * idpf_rx_hash - set the hash value in the skb 2732 * @rxq: Rx descriptor ring packet is being transacted on 2733 * @skb: pointer to current skb being populated 2734 * @rx_desc: Receive descriptor 2735 * @decoded: Decoded Rx packet type related fields 2736 */ 2737 static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb, 2738 struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, 2739 struct idpf_rx_ptype_decoded *decoded) 2740 { 2741 u32 hash; 2742 2743 if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXHASH))) 2744 return; 2745 2746 hash = le16_to_cpu(rx_desc->hash1) | 2747 (rx_desc->ff2_mirrid_hash2.hash2 << 16) | 2748 (rx_desc->hash3 << 24); 2749 2750 skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded)); 2751 } 2752 2753 /** 2754 * idpf_rx_csum - Indicate in skb if checksum is good 2755 * @rxq: Rx descriptor ring packet is being transacted on 2756 * @skb: pointer to current skb being populated 2757 * @csum_bits: checksum fields extracted from the descriptor 2758 * @decoded: Decoded Rx packet type related fields 2759 * 2760 * skb->protocol must be set before this function is called 2761 */ 2762 static void idpf_rx_csum(struct idpf_queue *rxq, struct sk_buff *skb, 2763 struct idpf_rx_csum_decoded *csum_bits, 2764 struct idpf_rx_ptype_decoded *decoded) 2765 { 2766 bool ipv4, ipv6; 2767 2768 /* check if Rx checksum is enabled */ 2769 if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXCSUM))) 2770 return; 2771 2772 /* check if HW has decoded the packet and checksum */ 2773 if (!(csum_bits->l3l4p)) 2774 return; 2775 2776 ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4); 2777 ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6); 2778 2779 if (ipv4 && (csum_bits->ipe || csum_bits->eipe)) 2780 goto checksum_fail; 2781 2782 if (ipv6 && csum_bits->ipv6exadd) 2783 return; 2784 2785 /* check for L4 errors and handle packets that were not able to be 2786 * checksummed 2787 */ 2788 if (csum_bits->l4e) 2789 goto checksum_fail; 2790 2791 /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */ 2792 switch (decoded->inner_prot) { 2793 case IDPF_RX_PTYPE_INNER_PROT_ICMP: 2794 case IDPF_RX_PTYPE_INNER_PROT_TCP: 2795 case IDPF_RX_PTYPE_INNER_PROT_UDP: 2796 if (!csum_bits->raw_csum_inv) { 2797 u16 csum = csum_bits->raw_csum; 2798 2799 skb->csum = csum_unfold((__force __sum16)~swab16(csum)); 2800 skb->ip_summed = CHECKSUM_COMPLETE; 2801 } else { 2802 skb->ip_summed = CHECKSUM_UNNECESSARY; 2803 } 2804 break; 2805 case IDPF_RX_PTYPE_INNER_PROT_SCTP: 2806 skb->ip_summed = CHECKSUM_UNNECESSARY; 2807 break; 2808 default: 2809 break; 2810 } 2811 2812 return; 2813 2814 checksum_fail: 2815 u64_stats_update_begin(&rxq->stats_sync); 2816 u64_stats_inc(&rxq->q_stats.rx.hw_csum_err); 2817 u64_stats_update_end(&rxq->stats_sync); 2818 } 2819 2820 /** 2821 * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor 2822 * @rx_desc: receive descriptor 2823 * @csum: structure to extract checksum fields 2824 * 2825 **/ 2826 static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, 2827 struct idpf_rx_csum_decoded *csum) 2828 { 2829 u8 qword0, qword1; 2830 2831 qword0 = rx_desc->status_err0_qw0; 2832 qword1 = rx_desc->status_err0_qw1; 2833 2834 csum->ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M, 2835 qword1); 2836 csum->eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M, 2837 qword1); 2838 csum->l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M, 2839 qword1); 2840 csum->l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M, 2841 qword1); 2842 csum->ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M, 2843 qword0); 2844 csum->raw_csum_inv = 2845 le16_get_bits(rx_desc->ptype_err_fflags0, 2846 VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M); 2847 csum->raw_csum = le16_to_cpu(rx_desc->misc.raw_cs); 2848 } 2849 2850 /** 2851 * idpf_rx_rsc - Set the RSC fields in the skb 2852 * @rxq : Rx descriptor ring packet is being transacted on 2853 * @skb : pointer to current skb being populated 2854 * @rx_desc: Receive descriptor 2855 * @decoded: Decoded Rx packet type related fields 2856 * 2857 * Return 0 on success and error code on failure 2858 * 2859 * Populate the skb fields with the total number of RSC segments, RSC payload 2860 * length and packet type. 2861 */ 2862 static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb, 2863 struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, 2864 struct idpf_rx_ptype_decoded *decoded) 2865 { 2866 u16 rsc_segments, rsc_seg_len; 2867 bool ipv4, ipv6; 2868 int len; 2869 2870 if (unlikely(!decoded->outer_ip)) 2871 return -EINVAL; 2872 2873 rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen); 2874 if (unlikely(!rsc_seg_len)) 2875 return -EINVAL; 2876 2877 ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4); 2878 ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6); 2879 2880 if (unlikely(!(ipv4 ^ ipv6))) 2881 return -EINVAL; 2882 2883 rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len); 2884 if (unlikely(rsc_segments == 1)) 2885 return 0; 2886 2887 NAPI_GRO_CB(skb)->count = rsc_segments; 2888 skb_shinfo(skb)->gso_size = rsc_seg_len; 2889 2890 skb_reset_network_header(skb); 2891 len = skb->len - skb_transport_offset(skb); 2892 2893 if (ipv4) { 2894 struct iphdr *ipv4h = ip_hdr(skb); 2895 2896 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 2897 2898 /* Reset and set transport header offset in skb */ 2899 skb_set_transport_header(skb, sizeof(struct iphdr)); 2900 2901 /* Compute the TCP pseudo header checksum*/ 2902 tcp_hdr(skb)->check = 2903 ~tcp_v4_check(len, ipv4h->saddr, ipv4h->daddr, 0); 2904 } else { 2905 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 2906 2907 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 2908 skb_set_transport_header(skb, sizeof(struct ipv6hdr)); 2909 tcp_hdr(skb)->check = 2910 ~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0); 2911 } 2912 2913 tcp_gro_complete(skb); 2914 2915 u64_stats_update_begin(&rxq->stats_sync); 2916 u64_stats_inc(&rxq->q_stats.rx.rsc_pkts); 2917 u64_stats_update_end(&rxq->stats_sync); 2918 2919 return 0; 2920 } 2921 2922 /** 2923 * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor 2924 * @rxq: Rx descriptor ring packet is being transacted on 2925 * @skb: pointer to current skb being populated 2926 * @rx_desc: Receive descriptor 2927 * 2928 * This function checks the ring, descriptor, and packet information in 2929 * order to populate the hash, checksum, protocol, and 2930 * other fields within the skb. 2931 */ 2932 static int idpf_rx_process_skb_fields(struct idpf_queue *rxq, 2933 struct sk_buff *skb, 2934 struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) 2935 { 2936 struct idpf_rx_csum_decoded csum_bits = { }; 2937 struct idpf_rx_ptype_decoded decoded; 2938 u16 rx_ptype; 2939 2940 rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0, 2941 VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M); 2942 2943 decoded = rxq->vport->rx_ptype_lkup[rx_ptype]; 2944 /* If we don't know the ptype we can't do anything else with it. Just 2945 * pass it up the stack as-is. 2946 */ 2947 if (!decoded.known) 2948 return 0; 2949 2950 /* process RSS/hash */ 2951 idpf_rx_hash(rxq, skb, rx_desc, &decoded); 2952 2953 skb->protocol = eth_type_trans(skb, rxq->vport->netdev); 2954 2955 if (le16_get_bits(rx_desc->hdrlen_flags, 2956 VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M)) 2957 return idpf_rx_rsc(rxq, skb, rx_desc, &decoded); 2958 2959 idpf_rx_splitq_extract_csum_bits(rx_desc, &csum_bits); 2960 idpf_rx_csum(rxq, skb, &csum_bits, &decoded); 2961 2962 return 0; 2963 } 2964 2965 /** 2966 * idpf_rx_add_frag - Add contents of Rx buffer to sk_buff as a frag 2967 * @rx_buf: buffer containing page to add 2968 * @skb: sk_buff to place the data into 2969 * @size: packet length from rx_desc 2970 * 2971 * This function will add the data contained in rx_buf->page to the skb. 2972 * It will just attach the page as a frag to the skb. 2973 * The function will then update the page offset. 2974 */ 2975 void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb, 2976 unsigned int size) 2977 { 2978 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 2979 rx_buf->page_offset, size, rx_buf->truesize); 2980 2981 rx_buf->page = NULL; 2982 } 2983 2984 /** 2985 * idpf_rx_construct_skb - Allocate skb and populate it 2986 * @rxq: Rx descriptor queue 2987 * @rx_buf: Rx buffer to pull data from 2988 * @size: the length of the packet 2989 * 2990 * This function allocates an skb. It then populates it with the page 2991 * data from the current receive descriptor, taking care to set up the 2992 * skb correctly. 2993 */ 2994 struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq, 2995 struct idpf_rx_buf *rx_buf, 2996 unsigned int size) 2997 { 2998 unsigned int headlen; 2999 struct sk_buff *skb; 3000 void *va; 3001 3002 va = page_address(rx_buf->page) + rx_buf->page_offset; 3003 3004 /* prefetch first cache line of first page */ 3005 net_prefetch(va); 3006 /* allocate a skb to store the frags */ 3007 skb = __napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE, 3008 GFP_ATOMIC); 3009 if (unlikely(!skb)) { 3010 idpf_rx_put_page(rx_buf); 3011 3012 return NULL; 3013 } 3014 3015 skb_record_rx_queue(skb, rxq->idx); 3016 skb_mark_for_recycle(skb); 3017 3018 /* Determine available headroom for copy */ 3019 headlen = size; 3020 if (headlen > IDPF_RX_HDR_SIZE) 3021 headlen = eth_get_headlen(skb->dev, va, IDPF_RX_HDR_SIZE); 3022 3023 /* align pull length to size of long to optimize memcpy performance */ 3024 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); 3025 3026 /* if we exhaust the linear part then add what is left as a frag */ 3027 size -= headlen; 3028 if (!size) { 3029 idpf_rx_put_page(rx_buf); 3030 3031 return skb; 3032 } 3033 3034 skb_add_rx_frag(skb, 0, rx_buf->page, rx_buf->page_offset + headlen, 3035 size, rx_buf->truesize); 3036 3037 /* Since we're giving the page to the stack, clear our reference to it. 3038 * We'll get a new one during buffer posting. 3039 */ 3040 rx_buf->page = NULL; 3041 3042 return skb; 3043 } 3044 3045 /** 3046 * idpf_rx_hdr_construct_skb - Allocate skb and populate it from header buffer 3047 * @rxq: Rx descriptor queue 3048 * @va: Rx buffer to pull data from 3049 * @size: the length of the packet 3050 * 3051 * This function allocates an skb. It then populates it with the page data from 3052 * the current receive descriptor, taking care to set up the skb correctly. 3053 * This specifically uses a header buffer to start building the skb. 3054 */ 3055 static struct sk_buff *idpf_rx_hdr_construct_skb(struct idpf_queue *rxq, 3056 const void *va, 3057 unsigned int size) 3058 { 3059 struct sk_buff *skb; 3060 3061 /* allocate a skb to store the frags */ 3062 skb = __napi_alloc_skb(&rxq->q_vector->napi, size, GFP_ATOMIC); 3063 if (unlikely(!skb)) 3064 return NULL; 3065 3066 skb_record_rx_queue(skb, rxq->idx); 3067 3068 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 3069 3070 /* More than likely, a payload fragment, which will use a page from 3071 * page_pool will be added to the SKB so mark it for recycle 3072 * preemptively. And if not, it's inconsequential. 3073 */ 3074 skb_mark_for_recycle(skb); 3075 3076 return skb; 3077 } 3078 3079 /** 3080 * idpf_rx_splitq_test_staterr - tests bits in Rx descriptor 3081 * status and error fields 3082 * @stat_err_field: field from descriptor to test bits in 3083 * @stat_err_bits: value to mask 3084 * 3085 */ 3086 static bool idpf_rx_splitq_test_staterr(const u8 stat_err_field, 3087 const u8 stat_err_bits) 3088 { 3089 return !!(stat_err_field & stat_err_bits); 3090 } 3091 3092 /** 3093 * idpf_rx_splitq_is_eop - process handling of EOP buffers 3094 * @rx_desc: Rx descriptor for current buffer 3095 * 3096 * If the buffer is an EOP buffer, this function exits returning true, 3097 * otherwise return false indicating that this is in fact a non-EOP buffer. 3098 */ 3099 static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) 3100 { 3101 /* if we are the last buffer then there is nothing else to do */ 3102 return likely(idpf_rx_splitq_test_staterr(rx_desc->status_err0_qw1, 3103 IDPF_RXD_EOF_SPLITQ)); 3104 } 3105 3106 /** 3107 * idpf_rx_splitq_clean - Clean completed descriptors from Rx queue 3108 * @rxq: Rx descriptor queue to retrieve receive buffer queue 3109 * @budget: Total limit on number of packets to process 3110 * 3111 * This function provides a "bounce buffer" approach to Rx interrupt 3112 * processing. The advantage to this is that on systems that have 3113 * expensive overhead for IOMMU access this provides a means of avoiding 3114 * it by maintaining the mapping of the page to the system. 3115 * 3116 * Returns amount of work completed 3117 */ 3118 static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) 3119 { 3120 int total_rx_bytes = 0, total_rx_pkts = 0; 3121 struct idpf_queue *rx_bufq = NULL; 3122 struct sk_buff *skb = rxq->skb; 3123 u16 ntc = rxq->next_to_clean; 3124 3125 /* Process Rx packets bounded by budget */ 3126 while (likely(total_rx_pkts < budget)) { 3127 struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc; 3128 struct idpf_sw_queue *refillq = NULL; 3129 struct idpf_rxq_set *rxq_set = NULL; 3130 struct idpf_rx_buf *rx_buf = NULL; 3131 union virtchnl2_rx_desc *desc; 3132 unsigned int pkt_len = 0; 3133 unsigned int hdr_len = 0; 3134 u16 gen_id, buf_id = 0; 3135 /* Header buffer overflow only valid for header split */ 3136 bool hbo = false; 3137 int bufq_id; 3138 u8 rxdid; 3139 3140 /* get the Rx desc from Rx queue based on 'next_to_clean' */ 3141 desc = IDPF_RX_DESC(rxq, ntc); 3142 rx_desc = (struct virtchnl2_rx_flex_desc_adv_nic_3 *)desc; 3143 3144 /* This memory barrier is needed to keep us from reading 3145 * any other fields out of the rx_desc 3146 */ 3147 dma_rmb(); 3148 3149 /* if the descriptor isn't done, no work yet to do */ 3150 gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id, 3151 VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M); 3152 3153 if (test_bit(__IDPF_Q_GEN_CHK, rxq->flags) != gen_id) 3154 break; 3155 3156 rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M, 3157 rx_desc->rxdid_ucast); 3158 if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) { 3159 IDPF_RX_BUMP_NTC(rxq, ntc); 3160 u64_stats_update_begin(&rxq->stats_sync); 3161 u64_stats_inc(&rxq->q_stats.rx.bad_descs); 3162 u64_stats_update_end(&rxq->stats_sync); 3163 continue; 3164 } 3165 3166 pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id, 3167 VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M); 3168 3169 hbo = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M, 3170 rx_desc->status_err0_qw1); 3171 3172 if (unlikely(hbo)) { 3173 /* If a header buffer overflow, occurs, i.e. header is 3174 * too large to fit in the header split buffer, HW will 3175 * put the entire packet, including headers, in the 3176 * data/payload buffer. 3177 */ 3178 u64_stats_update_begin(&rxq->stats_sync); 3179 u64_stats_inc(&rxq->q_stats.rx.hsplit_buf_ovf); 3180 u64_stats_update_end(&rxq->stats_sync); 3181 goto bypass_hsplit; 3182 } 3183 3184 hdr_len = le16_get_bits(rx_desc->hdrlen_flags, 3185 VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M); 3186 3187 bypass_hsplit: 3188 bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id, 3189 VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M); 3190 3191 rxq_set = container_of(rxq, struct idpf_rxq_set, rxq); 3192 if (!bufq_id) 3193 refillq = rxq_set->refillq0; 3194 else 3195 refillq = rxq_set->refillq1; 3196 3197 /* retrieve buffer from the rxq */ 3198 rx_bufq = &rxq->rxq_grp->splitq.bufq_sets[bufq_id].bufq; 3199 3200 buf_id = le16_to_cpu(rx_desc->buf_id); 3201 3202 rx_buf = &rx_bufq->rx_buf.buf[buf_id]; 3203 3204 if (hdr_len) { 3205 const void *va = (u8 *)rx_bufq->rx_buf.hdr_buf_va + 3206 (u32)buf_id * IDPF_HDR_BUF_SIZE; 3207 3208 skb = idpf_rx_hdr_construct_skb(rxq, va, hdr_len); 3209 u64_stats_update_begin(&rxq->stats_sync); 3210 u64_stats_inc(&rxq->q_stats.rx.hsplit_pkts); 3211 u64_stats_update_end(&rxq->stats_sync); 3212 } 3213 3214 if (pkt_len) { 3215 idpf_rx_sync_for_cpu(rx_buf, pkt_len); 3216 if (skb) 3217 idpf_rx_add_frag(rx_buf, skb, pkt_len); 3218 else 3219 skb = idpf_rx_construct_skb(rxq, rx_buf, 3220 pkt_len); 3221 } else { 3222 idpf_rx_put_page(rx_buf); 3223 } 3224 3225 /* exit if we failed to retrieve a buffer */ 3226 if (!skb) 3227 break; 3228 3229 idpf_rx_post_buf_refill(refillq, buf_id); 3230 3231 IDPF_RX_BUMP_NTC(rxq, ntc); 3232 /* skip if it is non EOP desc */ 3233 if (!idpf_rx_splitq_is_eop(rx_desc)) 3234 continue; 3235 3236 /* pad skb if needed (to make valid ethernet frame) */ 3237 if (eth_skb_pad(skb)) { 3238 skb = NULL; 3239 continue; 3240 } 3241 3242 /* probably a little skewed due to removing CRC */ 3243 total_rx_bytes += skb->len; 3244 3245 /* protocol */ 3246 if (unlikely(idpf_rx_process_skb_fields(rxq, skb, rx_desc))) { 3247 dev_kfree_skb_any(skb); 3248 skb = NULL; 3249 continue; 3250 } 3251 3252 /* send completed skb up the stack */ 3253 napi_gro_receive(&rxq->q_vector->napi, skb); 3254 skb = NULL; 3255 3256 /* update budget accounting */ 3257 total_rx_pkts++; 3258 } 3259 3260 rxq->next_to_clean = ntc; 3261 3262 rxq->skb = skb; 3263 u64_stats_update_begin(&rxq->stats_sync); 3264 u64_stats_add(&rxq->q_stats.rx.packets, total_rx_pkts); 3265 u64_stats_add(&rxq->q_stats.rx.bytes, total_rx_bytes); 3266 u64_stats_update_end(&rxq->stats_sync); 3267 3268 /* guarantee a trip back through this routine if there was a failure */ 3269 return total_rx_pkts; 3270 } 3271 3272 /** 3273 * idpf_rx_update_bufq_desc - Update buffer queue descriptor 3274 * @bufq: Pointer to the buffer queue 3275 * @refill_desc: SW Refill queue descriptor containing buffer ID 3276 * @buf_desc: Buffer queue descriptor 3277 * 3278 * Return 0 on success and negative on failure. 3279 */ 3280 static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc, 3281 struct virtchnl2_splitq_rx_buf_desc *buf_desc) 3282 { 3283 struct idpf_rx_buf *buf; 3284 dma_addr_t addr; 3285 u16 buf_id; 3286 3287 buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc); 3288 3289 buf = &bufq->rx_buf.buf[buf_id]; 3290 3291 addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size); 3292 if (unlikely(addr == DMA_MAPPING_ERROR)) 3293 return -ENOMEM; 3294 3295 buf_desc->pkt_addr = cpu_to_le64(addr); 3296 buf_desc->qword0.buf_id = cpu_to_le16(buf_id); 3297 3298 if (!bufq->rx_hsplit_en) 3299 return 0; 3300 3301 buf_desc->hdr_addr = cpu_to_le64(bufq->rx_buf.hdr_buf_pa + 3302 (u32)buf_id * IDPF_HDR_BUF_SIZE); 3303 3304 return 0; 3305 } 3306 3307 /** 3308 * idpf_rx_clean_refillq - Clean refill queue buffers 3309 * @bufq: buffer queue to post buffers back to 3310 * @refillq: refill queue to clean 3311 * 3312 * This function takes care of the buffer refill management 3313 */ 3314 static void idpf_rx_clean_refillq(struct idpf_queue *bufq, 3315 struct idpf_sw_queue *refillq) 3316 { 3317 struct virtchnl2_splitq_rx_buf_desc *buf_desc; 3318 u16 bufq_nta = bufq->next_to_alloc; 3319 u16 ntc = refillq->next_to_clean; 3320 int cleaned = 0; 3321 u16 gen; 3322 3323 buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, bufq_nta); 3324 3325 /* make sure we stop at ring wrap in the unlikely case ring is full */ 3326 while (likely(cleaned < refillq->desc_count)) { 3327 u16 refill_desc = IDPF_SPLITQ_RX_BI_DESC(refillq, ntc); 3328 bool failure; 3329 3330 gen = FIELD_GET(IDPF_RX_BI_GEN_M, refill_desc); 3331 if (test_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags) != gen) 3332 break; 3333 3334 failure = idpf_rx_update_bufq_desc(bufq, refill_desc, 3335 buf_desc); 3336 if (failure) 3337 break; 3338 3339 if (unlikely(++ntc == refillq->desc_count)) { 3340 change_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags); 3341 ntc = 0; 3342 } 3343 3344 if (unlikely(++bufq_nta == bufq->desc_count)) { 3345 buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, 0); 3346 bufq_nta = 0; 3347 } else { 3348 buf_desc++; 3349 } 3350 3351 cleaned++; 3352 } 3353 3354 if (!cleaned) 3355 return; 3356 3357 /* We want to limit how many transactions on the bus we trigger with 3358 * tail writes so we only do it in strides. It's also important we 3359 * align the write to a multiple of 8 as required by HW. 3360 */ 3361 if (((bufq->next_to_use <= bufq_nta ? 0 : bufq->desc_count) + 3362 bufq_nta - bufq->next_to_use) >= IDPF_RX_BUF_POST_STRIDE) 3363 idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq_nta, 3364 IDPF_RX_BUF_POST_STRIDE)); 3365 3366 /* update next to alloc since we have filled the ring */ 3367 refillq->next_to_clean = ntc; 3368 bufq->next_to_alloc = bufq_nta; 3369 } 3370 3371 /** 3372 * idpf_rx_clean_refillq_all - Clean all refill queues 3373 * @bufq: buffer queue with refill queues 3374 * 3375 * Iterates through all refill queues assigned to the buffer queue assigned to 3376 * this vector. Returns true if clean is complete within budget, false 3377 * otherwise. 3378 */ 3379 static void idpf_rx_clean_refillq_all(struct idpf_queue *bufq) 3380 { 3381 struct idpf_bufq_set *bufq_set; 3382 int i; 3383 3384 bufq_set = container_of(bufq, struct idpf_bufq_set, bufq); 3385 for (i = 0; i < bufq_set->num_refillqs; i++) 3386 idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]); 3387 } 3388 3389 /** 3390 * idpf_vport_intr_clean_queues - MSIX mode Interrupt Handler 3391 * @irq: interrupt number 3392 * @data: pointer to a q_vector 3393 * 3394 */ 3395 static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq, 3396 void *data) 3397 { 3398 struct idpf_q_vector *q_vector = (struct idpf_q_vector *)data; 3399 3400 q_vector->total_events++; 3401 napi_schedule(&q_vector->napi); 3402 3403 return IRQ_HANDLED; 3404 } 3405 3406 /** 3407 * idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport 3408 * @vport: virtual port structure 3409 * 3410 */ 3411 static void idpf_vport_intr_napi_del_all(struct idpf_vport *vport) 3412 { 3413 u16 v_idx; 3414 3415 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) 3416 netif_napi_del(&vport->q_vectors[v_idx].napi); 3417 } 3418 3419 /** 3420 * idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport 3421 * @vport: main vport structure 3422 */ 3423 static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport) 3424 { 3425 int v_idx; 3426 3427 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) 3428 napi_disable(&vport->q_vectors[v_idx].napi); 3429 } 3430 3431 /** 3432 * idpf_vport_intr_rel - Free memory allocated for interrupt vectors 3433 * @vport: virtual port 3434 * 3435 * Free the memory allocated for interrupt vectors associated to a vport 3436 */ 3437 void idpf_vport_intr_rel(struct idpf_vport *vport) 3438 { 3439 int i, j, v_idx; 3440 3441 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { 3442 struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; 3443 3444 kfree(q_vector->bufq); 3445 q_vector->bufq = NULL; 3446 kfree(q_vector->tx); 3447 q_vector->tx = NULL; 3448 kfree(q_vector->rx); 3449 q_vector->rx = NULL; 3450 } 3451 3452 /* Clean up the mapping of queues to vectors */ 3453 for (i = 0; i < vport->num_rxq_grp; i++) { 3454 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 3455 3456 if (idpf_is_queue_model_split(vport->rxq_model)) 3457 for (j = 0; j < rx_qgrp->splitq.num_rxq_sets; j++) 3458 rx_qgrp->splitq.rxq_sets[j]->rxq.q_vector = NULL; 3459 else 3460 for (j = 0; j < rx_qgrp->singleq.num_rxq; j++) 3461 rx_qgrp->singleq.rxqs[j]->q_vector = NULL; 3462 } 3463 3464 if (idpf_is_queue_model_split(vport->txq_model)) 3465 for (i = 0; i < vport->num_txq_grp; i++) 3466 vport->txq_grps[i].complq->q_vector = NULL; 3467 else 3468 for (i = 0; i < vport->num_txq_grp; i++) 3469 for (j = 0; j < vport->txq_grps[i].num_txq; j++) 3470 vport->txq_grps[i].txqs[j]->q_vector = NULL; 3471 3472 kfree(vport->q_vectors); 3473 vport->q_vectors = NULL; 3474 } 3475 3476 /** 3477 * idpf_vport_intr_rel_irq - Free the IRQ association with the OS 3478 * @vport: main vport structure 3479 */ 3480 static void idpf_vport_intr_rel_irq(struct idpf_vport *vport) 3481 { 3482 struct idpf_adapter *adapter = vport->adapter; 3483 int vector; 3484 3485 for (vector = 0; vector < vport->num_q_vectors; vector++) { 3486 struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; 3487 int irq_num, vidx; 3488 3489 /* free only the irqs that were actually requested */ 3490 if (!q_vector) 3491 continue; 3492 3493 vidx = vport->q_vector_idxs[vector]; 3494 irq_num = adapter->msix_entries[vidx].vector; 3495 3496 /* clear the affinity_mask in the IRQ descriptor */ 3497 irq_set_affinity_hint(irq_num, NULL); 3498 free_irq(irq_num, q_vector); 3499 } 3500 } 3501 3502 /** 3503 * idpf_vport_intr_dis_irq_all - Disable all interrupt 3504 * @vport: main vport structure 3505 */ 3506 static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport) 3507 { 3508 struct idpf_q_vector *q_vector = vport->q_vectors; 3509 int q_idx; 3510 3511 for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) 3512 writel(0, q_vector[q_idx].intr_reg.dyn_ctl); 3513 } 3514 3515 /** 3516 * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings 3517 * @q_vector: pointer to q_vector 3518 * @type: itr index 3519 * @itr: itr value 3520 */ 3521 static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector, 3522 const int type, u16 itr) 3523 { 3524 u32 itr_val; 3525 3526 itr &= IDPF_ITR_MASK; 3527 /* Don't clear PBA because that can cause lost interrupts that 3528 * came in while we were cleaning/polling 3529 */ 3530 itr_val = q_vector->intr_reg.dyn_ctl_intena_m | 3531 (type << q_vector->intr_reg.dyn_ctl_itridx_s) | 3532 (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1)); 3533 3534 return itr_val; 3535 } 3536 3537 /** 3538 * idpf_update_dim_sample - Update dim sample with packets and bytes 3539 * @q_vector: the vector associated with the interrupt 3540 * @dim_sample: dim sample to update 3541 * @dim: dim instance structure 3542 * @packets: total packets 3543 * @bytes: total bytes 3544 * 3545 * Update the dim sample with the packets and bytes which are passed to this 3546 * function. Set the dim state appropriately if the dim settings gets stale. 3547 */ 3548 static void idpf_update_dim_sample(struct idpf_q_vector *q_vector, 3549 struct dim_sample *dim_sample, 3550 struct dim *dim, u64 packets, u64 bytes) 3551 { 3552 dim_update_sample(q_vector->total_events, packets, bytes, dim_sample); 3553 dim_sample->comp_ctr = 0; 3554 3555 /* if dim settings get stale, like when not updated for 1 second or 3556 * longer, force it to start again. This addresses the frequent case 3557 * of an idle queue being switched to by the scheduler. 3558 */ 3559 if (ktime_ms_delta(dim_sample->time, dim->start_sample.time) >= HZ) 3560 dim->state = DIM_START_MEASURE; 3561 } 3562 3563 /** 3564 * idpf_net_dim - Update net DIM algorithm 3565 * @q_vector: the vector associated with the interrupt 3566 * 3567 * Create a DIM sample and notify net_dim() so that it can possibly decide 3568 * a new ITR value based on incoming packets, bytes, and interrupts. 3569 * 3570 * This function is a no-op if the queue is not configured to dynamic ITR. 3571 */ 3572 static void idpf_net_dim(struct idpf_q_vector *q_vector) 3573 { 3574 struct dim_sample dim_sample = { }; 3575 u64 packets, bytes; 3576 u32 i; 3577 3578 if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode)) 3579 goto check_rx_itr; 3580 3581 for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) { 3582 struct idpf_queue *txq = q_vector->tx[i]; 3583 unsigned int start; 3584 3585 do { 3586 start = u64_stats_fetch_begin(&txq->stats_sync); 3587 packets += u64_stats_read(&txq->q_stats.tx.packets); 3588 bytes += u64_stats_read(&txq->q_stats.tx.bytes); 3589 } while (u64_stats_fetch_retry(&txq->stats_sync, start)); 3590 } 3591 3592 idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim, 3593 packets, bytes); 3594 net_dim(&q_vector->tx_dim, dim_sample); 3595 3596 check_rx_itr: 3597 if (!IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode)) 3598 return; 3599 3600 for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) { 3601 struct idpf_queue *rxq = q_vector->rx[i]; 3602 unsigned int start; 3603 3604 do { 3605 start = u64_stats_fetch_begin(&rxq->stats_sync); 3606 packets += u64_stats_read(&rxq->q_stats.rx.packets); 3607 bytes += u64_stats_read(&rxq->q_stats.rx.bytes); 3608 } while (u64_stats_fetch_retry(&rxq->stats_sync, start)); 3609 } 3610 3611 idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim, 3612 packets, bytes); 3613 net_dim(&q_vector->rx_dim, dim_sample); 3614 } 3615 3616 /** 3617 * idpf_vport_intr_update_itr_ena_irq - Update itr and re-enable MSIX interrupt 3618 * @q_vector: q_vector for which itr is being updated and interrupt enabled 3619 * 3620 * Update the net_dim() algorithm and re-enable the interrupt associated with 3621 * this vector. 3622 */ 3623 void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector) 3624 { 3625 u32 intval; 3626 3627 /* net_dim() updates ITR out-of-band using a work item */ 3628 idpf_net_dim(q_vector); 3629 3630 intval = idpf_vport_intr_buildreg_itr(q_vector, 3631 IDPF_NO_ITR_UPDATE_IDX, 0); 3632 3633 writel(intval, q_vector->intr_reg.dyn_ctl); 3634 } 3635 3636 /** 3637 * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport 3638 * @vport: main vport structure 3639 * @basename: name for the vector 3640 */ 3641 static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename) 3642 { 3643 struct idpf_adapter *adapter = vport->adapter; 3644 int vector, err, irq_num, vidx; 3645 const char *vec_name; 3646 3647 for (vector = 0; vector < vport->num_q_vectors; vector++) { 3648 struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; 3649 3650 vidx = vport->q_vector_idxs[vector]; 3651 irq_num = adapter->msix_entries[vidx].vector; 3652 3653 if (q_vector->num_rxq && q_vector->num_txq) 3654 vec_name = "TxRx"; 3655 else if (q_vector->num_rxq) 3656 vec_name = "Rx"; 3657 else if (q_vector->num_txq) 3658 vec_name = "Tx"; 3659 else 3660 continue; 3661 3662 q_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d", 3663 basename, vec_name, vidx); 3664 3665 err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0, 3666 q_vector->name, q_vector); 3667 if (err) { 3668 netdev_err(vport->netdev, 3669 "Request_irq failed, error: %d\n", err); 3670 goto free_q_irqs; 3671 } 3672 /* assign the mask for this irq */ 3673 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); 3674 } 3675 3676 return 0; 3677 3678 free_q_irqs: 3679 while (--vector >= 0) { 3680 vidx = vport->q_vector_idxs[vector]; 3681 irq_num = adapter->msix_entries[vidx].vector; 3682 free_irq(irq_num, &vport->q_vectors[vector]); 3683 } 3684 3685 return err; 3686 } 3687 3688 /** 3689 * idpf_vport_intr_write_itr - Write ITR value to the ITR register 3690 * @q_vector: q_vector structure 3691 * @itr: Interrupt throttling rate 3692 * @tx: Tx or Rx ITR 3693 */ 3694 void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, u16 itr, bool tx) 3695 { 3696 struct idpf_intr_reg *intr_reg; 3697 3698 if (tx && !q_vector->tx) 3699 return; 3700 else if (!tx && !q_vector->rx) 3701 return; 3702 3703 intr_reg = &q_vector->intr_reg; 3704 writel(ITR_REG_ALIGN(itr) >> IDPF_ITR_GRAN_S, 3705 tx ? intr_reg->tx_itr : intr_reg->rx_itr); 3706 } 3707 3708 /** 3709 * idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport 3710 * @vport: main vport structure 3711 */ 3712 static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport) 3713 { 3714 bool dynamic; 3715 int q_idx; 3716 u16 itr; 3717 3718 for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) { 3719 struct idpf_q_vector *qv = &vport->q_vectors[q_idx]; 3720 3721 /* Set the initial ITR values */ 3722 if (qv->num_txq) { 3723 dynamic = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode); 3724 itr = vport->tx_itr_profile[qv->tx_dim.profile_ix]; 3725 idpf_vport_intr_write_itr(qv, dynamic ? 3726 itr : qv->tx_itr_value, 3727 true); 3728 } 3729 3730 if (qv->num_rxq) { 3731 dynamic = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode); 3732 itr = vport->rx_itr_profile[qv->rx_dim.profile_ix]; 3733 idpf_vport_intr_write_itr(qv, dynamic ? 3734 itr : qv->rx_itr_value, 3735 false); 3736 } 3737 3738 if (qv->num_txq || qv->num_rxq) 3739 idpf_vport_intr_update_itr_ena_irq(qv); 3740 } 3741 } 3742 3743 /** 3744 * idpf_vport_intr_deinit - Release all vector associations for the vport 3745 * @vport: main vport structure 3746 */ 3747 void idpf_vport_intr_deinit(struct idpf_vport *vport) 3748 { 3749 idpf_vport_intr_napi_dis_all(vport); 3750 idpf_vport_intr_napi_del_all(vport); 3751 idpf_vport_intr_dis_irq_all(vport); 3752 idpf_vport_intr_rel_irq(vport); 3753 } 3754 3755 /** 3756 * idpf_tx_dim_work - Call back from the stack 3757 * @work: work queue structure 3758 */ 3759 static void idpf_tx_dim_work(struct work_struct *work) 3760 { 3761 struct idpf_q_vector *q_vector; 3762 struct idpf_vport *vport; 3763 struct dim *dim; 3764 u16 itr; 3765 3766 dim = container_of(work, struct dim, work); 3767 q_vector = container_of(dim, struct idpf_q_vector, tx_dim); 3768 vport = q_vector->vport; 3769 3770 if (dim->profile_ix >= ARRAY_SIZE(vport->tx_itr_profile)) 3771 dim->profile_ix = ARRAY_SIZE(vport->tx_itr_profile) - 1; 3772 3773 /* look up the values in our local table */ 3774 itr = vport->tx_itr_profile[dim->profile_ix]; 3775 3776 idpf_vport_intr_write_itr(q_vector, itr, true); 3777 3778 dim->state = DIM_START_MEASURE; 3779 } 3780 3781 /** 3782 * idpf_rx_dim_work - Call back from the stack 3783 * @work: work queue structure 3784 */ 3785 static void idpf_rx_dim_work(struct work_struct *work) 3786 { 3787 struct idpf_q_vector *q_vector; 3788 struct idpf_vport *vport; 3789 struct dim *dim; 3790 u16 itr; 3791 3792 dim = container_of(work, struct dim, work); 3793 q_vector = container_of(dim, struct idpf_q_vector, rx_dim); 3794 vport = q_vector->vport; 3795 3796 if (dim->profile_ix >= ARRAY_SIZE(vport->rx_itr_profile)) 3797 dim->profile_ix = ARRAY_SIZE(vport->rx_itr_profile) - 1; 3798 3799 /* look up the values in our local table */ 3800 itr = vport->rx_itr_profile[dim->profile_ix]; 3801 3802 idpf_vport_intr_write_itr(q_vector, itr, false); 3803 3804 dim->state = DIM_START_MEASURE; 3805 } 3806 3807 /** 3808 * idpf_init_dim - Set up dynamic interrupt moderation 3809 * @qv: q_vector structure 3810 */ 3811 static void idpf_init_dim(struct idpf_q_vector *qv) 3812 { 3813 INIT_WORK(&qv->tx_dim.work, idpf_tx_dim_work); 3814 qv->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 3815 qv->tx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX; 3816 3817 INIT_WORK(&qv->rx_dim.work, idpf_rx_dim_work); 3818 qv->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 3819 qv->rx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX; 3820 } 3821 3822 /** 3823 * idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport 3824 * @vport: main vport structure 3825 */ 3826 static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport) 3827 { 3828 int q_idx; 3829 3830 for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) { 3831 struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx]; 3832 3833 idpf_init_dim(q_vector); 3834 napi_enable(&q_vector->napi); 3835 } 3836 } 3837 3838 /** 3839 * idpf_tx_splitq_clean_all- Clean completion queues 3840 * @q_vec: queue vector 3841 * @budget: Used to determine if we are in netpoll 3842 * @cleaned: returns number of packets cleaned 3843 * 3844 * Returns false if clean is not complete else returns true 3845 */ 3846 static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec, 3847 int budget, int *cleaned) 3848 { 3849 u16 num_txq = q_vec->num_txq; 3850 bool clean_complete = true; 3851 int i, budget_per_q; 3852 3853 if (unlikely(!num_txq)) 3854 return true; 3855 3856 budget_per_q = DIV_ROUND_UP(budget, num_txq); 3857 for (i = 0; i < num_txq; i++) 3858 clean_complete &= idpf_tx_clean_complq(q_vec->tx[i], 3859 budget_per_q, cleaned); 3860 3861 return clean_complete; 3862 } 3863 3864 /** 3865 * idpf_rx_splitq_clean_all- Clean completion queues 3866 * @q_vec: queue vector 3867 * @budget: Used to determine if we are in netpoll 3868 * @cleaned: returns number of packets cleaned 3869 * 3870 * Returns false if clean is not complete else returns true 3871 */ 3872 static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget, 3873 int *cleaned) 3874 { 3875 u16 num_rxq = q_vec->num_rxq; 3876 bool clean_complete = true; 3877 int pkts_cleaned = 0; 3878 int i, budget_per_q; 3879 3880 /* We attempt to distribute budget to each Rx queue fairly, but don't 3881 * allow the budget to go below 1 because that would exit polling early. 3882 */ 3883 budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0; 3884 for (i = 0; i < num_rxq; i++) { 3885 struct idpf_queue *rxq = q_vec->rx[i]; 3886 int pkts_cleaned_per_q; 3887 3888 pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q); 3889 /* if we clean as many as budgeted, we must not be done */ 3890 if (pkts_cleaned_per_q >= budget_per_q) 3891 clean_complete = false; 3892 pkts_cleaned += pkts_cleaned_per_q; 3893 } 3894 *cleaned = pkts_cleaned; 3895 3896 for (i = 0; i < q_vec->num_bufq; i++) 3897 idpf_rx_clean_refillq_all(q_vec->bufq[i]); 3898 3899 return clean_complete; 3900 } 3901 3902 /** 3903 * idpf_vport_splitq_napi_poll - NAPI handler 3904 * @napi: struct from which you get q_vector 3905 * @budget: budget provided by stack 3906 */ 3907 static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) 3908 { 3909 struct idpf_q_vector *q_vector = 3910 container_of(napi, struct idpf_q_vector, napi); 3911 bool clean_complete; 3912 int work_done = 0; 3913 3914 /* Handle case where we are called by netpoll with a budget of 0 */ 3915 if (unlikely(!budget)) { 3916 idpf_tx_splitq_clean_all(q_vector, budget, &work_done); 3917 3918 return 0; 3919 } 3920 3921 clean_complete = idpf_rx_splitq_clean_all(q_vector, budget, &work_done); 3922 clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done); 3923 3924 /* If work not completed, return budget and polling will return */ 3925 if (!clean_complete) 3926 return budget; 3927 3928 work_done = min_t(int, work_done, budget - 1); 3929 3930 /* Exit the polling mode, but don't re-enable interrupts if stack might 3931 * poll us due to busy-polling 3932 */ 3933 if (likely(napi_complete_done(napi, work_done))) 3934 idpf_vport_intr_update_itr_ena_irq(q_vector); 3935 3936 /* Switch to poll mode in the tear-down path after sending disable 3937 * queues virtchnl message, as the interrupts will be disabled after 3938 * that 3939 */ 3940 if (unlikely(q_vector->num_txq && test_bit(__IDPF_Q_POLL_MODE, 3941 q_vector->tx[0]->flags))) 3942 return budget; 3943 else 3944 return work_done; 3945 } 3946 3947 /** 3948 * idpf_vport_intr_map_vector_to_qs - Map vectors to queues 3949 * @vport: virtual port 3950 * 3951 * Mapping for vectors to queues 3952 */ 3953 static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport) 3954 { 3955 u16 num_txq_grp = vport->num_txq_grp; 3956 int i, j, qv_idx, bufq_vidx = 0; 3957 struct idpf_rxq_group *rx_qgrp; 3958 struct idpf_txq_group *tx_qgrp; 3959 struct idpf_queue *q, *bufq; 3960 u16 q_index; 3961 3962 for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) { 3963 u16 num_rxq; 3964 3965 rx_qgrp = &vport->rxq_grps[i]; 3966 if (idpf_is_queue_model_split(vport->rxq_model)) 3967 num_rxq = rx_qgrp->splitq.num_rxq_sets; 3968 else 3969 num_rxq = rx_qgrp->singleq.num_rxq; 3970 3971 for (j = 0; j < num_rxq; j++) { 3972 if (qv_idx >= vport->num_q_vectors) 3973 qv_idx = 0; 3974 3975 if (idpf_is_queue_model_split(vport->rxq_model)) 3976 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; 3977 else 3978 q = rx_qgrp->singleq.rxqs[j]; 3979 q->q_vector = &vport->q_vectors[qv_idx]; 3980 q_index = q->q_vector->num_rxq; 3981 q->q_vector->rx[q_index] = q; 3982 q->q_vector->num_rxq++; 3983 qv_idx++; 3984 } 3985 3986 if (idpf_is_queue_model_split(vport->rxq_model)) { 3987 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { 3988 bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; 3989 bufq->q_vector = &vport->q_vectors[bufq_vidx]; 3990 q_index = bufq->q_vector->num_bufq; 3991 bufq->q_vector->bufq[q_index] = bufq; 3992 bufq->q_vector->num_bufq++; 3993 } 3994 if (++bufq_vidx >= vport->num_q_vectors) 3995 bufq_vidx = 0; 3996 } 3997 } 3998 3999 for (i = 0, qv_idx = 0; i < num_txq_grp; i++) { 4000 u16 num_txq; 4001 4002 tx_qgrp = &vport->txq_grps[i]; 4003 num_txq = tx_qgrp->num_txq; 4004 4005 if (idpf_is_queue_model_split(vport->txq_model)) { 4006 if (qv_idx >= vport->num_q_vectors) 4007 qv_idx = 0; 4008 4009 q = tx_qgrp->complq; 4010 q->q_vector = &vport->q_vectors[qv_idx]; 4011 q_index = q->q_vector->num_txq; 4012 q->q_vector->tx[q_index] = q; 4013 q->q_vector->num_txq++; 4014 qv_idx++; 4015 } else { 4016 for (j = 0; j < num_txq; j++) { 4017 if (qv_idx >= vport->num_q_vectors) 4018 qv_idx = 0; 4019 4020 q = tx_qgrp->txqs[j]; 4021 q->q_vector = &vport->q_vectors[qv_idx]; 4022 q_index = q->q_vector->num_txq; 4023 q->q_vector->tx[q_index] = q; 4024 q->q_vector->num_txq++; 4025 4026 qv_idx++; 4027 } 4028 } 4029 } 4030 } 4031 4032 /** 4033 * idpf_vport_intr_init_vec_idx - Initialize the vector indexes 4034 * @vport: virtual port 4035 * 4036 * Initialize vector indexes with values returened over mailbox 4037 */ 4038 static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport) 4039 { 4040 struct idpf_adapter *adapter = vport->adapter; 4041 struct virtchnl2_alloc_vectors *ac; 4042 u16 *vecids, total_vecs; 4043 int i; 4044 4045 ac = adapter->req_vec_chunks; 4046 if (!ac) { 4047 for (i = 0; i < vport->num_q_vectors; i++) 4048 vport->q_vectors[i].v_idx = vport->q_vector_idxs[i]; 4049 4050 return 0; 4051 } 4052 4053 total_vecs = idpf_get_reserved_vecs(adapter); 4054 vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL); 4055 if (!vecids) 4056 return -ENOMEM; 4057 4058 idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks); 4059 4060 for (i = 0; i < vport->num_q_vectors; i++) 4061 vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]]; 4062 4063 kfree(vecids); 4064 4065 return 0; 4066 } 4067 4068 /** 4069 * idpf_vport_intr_napi_add_all- Register napi handler for all qvectors 4070 * @vport: virtual port structure 4071 */ 4072 static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport) 4073 { 4074 int (*napi_poll)(struct napi_struct *napi, int budget); 4075 u16 v_idx; 4076 4077 if (idpf_is_queue_model_split(vport->txq_model)) 4078 napi_poll = idpf_vport_splitq_napi_poll; 4079 else 4080 napi_poll = idpf_vport_singleq_napi_poll; 4081 4082 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { 4083 struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; 4084 4085 netif_napi_add(vport->netdev, &q_vector->napi, napi_poll); 4086 4087 /* only set affinity_mask if the CPU is online */ 4088 if (cpu_online(v_idx)) 4089 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 4090 } 4091 } 4092 4093 /** 4094 * idpf_vport_intr_alloc - Allocate memory for interrupt vectors 4095 * @vport: virtual port 4096 * 4097 * We allocate one q_vector per queue interrupt. If allocation fails we 4098 * return -ENOMEM. 4099 */ 4100 int idpf_vport_intr_alloc(struct idpf_vport *vport) 4101 { 4102 u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector; 4103 struct idpf_q_vector *q_vector; 4104 int v_idx, err; 4105 4106 vport->q_vectors = kcalloc(vport->num_q_vectors, 4107 sizeof(struct idpf_q_vector), GFP_KERNEL); 4108 if (!vport->q_vectors) 4109 return -ENOMEM; 4110 4111 txqs_per_vector = DIV_ROUND_UP(vport->num_txq, vport->num_q_vectors); 4112 rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq, vport->num_q_vectors); 4113 bufqs_per_vector = vport->num_bufqs_per_qgrp * 4114 DIV_ROUND_UP(vport->num_rxq_grp, 4115 vport->num_q_vectors); 4116 4117 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { 4118 q_vector = &vport->q_vectors[v_idx]; 4119 q_vector->vport = vport; 4120 4121 q_vector->tx_itr_value = IDPF_ITR_TX_DEF; 4122 q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC; 4123 q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1; 4124 4125 q_vector->rx_itr_value = IDPF_ITR_RX_DEF; 4126 q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC; 4127 q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0; 4128 4129 q_vector->tx = kcalloc(txqs_per_vector, 4130 sizeof(struct idpf_queue *), 4131 GFP_KERNEL); 4132 if (!q_vector->tx) { 4133 err = -ENOMEM; 4134 goto error; 4135 } 4136 4137 q_vector->rx = kcalloc(rxqs_per_vector, 4138 sizeof(struct idpf_queue *), 4139 GFP_KERNEL); 4140 if (!q_vector->rx) { 4141 err = -ENOMEM; 4142 goto error; 4143 } 4144 4145 if (!idpf_is_queue_model_split(vport->rxq_model)) 4146 continue; 4147 4148 q_vector->bufq = kcalloc(bufqs_per_vector, 4149 sizeof(struct idpf_queue *), 4150 GFP_KERNEL); 4151 if (!q_vector->bufq) { 4152 err = -ENOMEM; 4153 goto error; 4154 } 4155 } 4156 4157 return 0; 4158 4159 error: 4160 idpf_vport_intr_rel(vport); 4161 4162 return err; 4163 } 4164 4165 /** 4166 * idpf_vport_intr_init - Setup all vectors for the given vport 4167 * @vport: virtual port 4168 * 4169 * Returns 0 on success or negative on failure 4170 */ 4171 int idpf_vport_intr_init(struct idpf_vport *vport) 4172 { 4173 char *int_name; 4174 int err; 4175 4176 err = idpf_vport_intr_init_vec_idx(vport); 4177 if (err) 4178 return err; 4179 4180 idpf_vport_intr_map_vector_to_qs(vport); 4181 idpf_vport_intr_napi_add_all(vport); 4182 idpf_vport_intr_napi_ena_all(vport); 4183 4184 err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport); 4185 if (err) 4186 goto unroll_vectors_alloc; 4187 4188 int_name = kasprintf(GFP_KERNEL, "%s-%s", 4189 dev_driver_string(&vport->adapter->pdev->dev), 4190 vport->netdev->name); 4191 4192 err = idpf_vport_intr_req_irq(vport, int_name); 4193 if (err) 4194 goto unroll_vectors_alloc; 4195 4196 idpf_vport_intr_ena_irq_all(vport); 4197 4198 return 0; 4199 4200 unroll_vectors_alloc: 4201 idpf_vport_intr_napi_dis_all(vport); 4202 idpf_vport_intr_napi_del_all(vport); 4203 4204 return err; 4205 } 4206 4207 /** 4208 * idpf_config_rss - Send virtchnl messages to configure RSS 4209 * @vport: virtual port 4210 * 4211 * Return 0 on success, negative on failure 4212 */ 4213 int idpf_config_rss(struct idpf_vport *vport) 4214 { 4215 int err; 4216 4217 err = idpf_send_get_set_rss_key_msg(vport, false); 4218 if (err) 4219 return err; 4220 4221 return idpf_send_get_set_rss_lut_msg(vport, false); 4222 } 4223 4224 /** 4225 * idpf_fill_dflt_rss_lut - Fill the indirection table with the default values 4226 * @vport: virtual port structure 4227 */ 4228 static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport) 4229 { 4230 struct idpf_adapter *adapter = vport->adapter; 4231 u16 num_active_rxq = vport->num_rxq; 4232 struct idpf_rss_data *rss_data; 4233 int i; 4234 4235 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; 4236 4237 for (i = 0; i < rss_data->rss_lut_size; i++) { 4238 rss_data->rss_lut[i] = i % num_active_rxq; 4239 rss_data->cached_lut[i] = rss_data->rss_lut[i]; 4240 } 4241 } 4242 4243 /** 4244 * idpf_init_rss - Allocate and initialize RSS resources 4245 * @vport: virtual port 4246 * 4247 * Return 0 on success, negative on failure 4248 */ 4249 int idpf_init_rss(struct idpf_vport *vport) 4250 { 4251 struct idpf_adapter *adapter = vport->adapter; 4252 struct idpf_rss_data *rss_data; 4253 u32 lut_size; 4254 4255 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; 4256 4257 lut_size = rss_data->rss_lut_size * sizeof(u32); 4258 rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL); 4259 if (!rss_data->rss_lut) 4260 return -ENOMEM; 4261 4262 rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL); 4263 if (!rss_data->cached_lut) { 4264 kfree(rss_data->rss_lut); 4265 rss_data->rss_lut = NULL; 4266 4267 return -ENOMEM; 4268 } 4269 4270 /* Fill the default RSS lut values */ 4271 idpf_fill_dflt_rss_lut(vport); 4272 4273 return idpf_config_rss(vport); 4274 } 4275 4276 /** 4277 * idpf_deinit_rss - Release RSS resources 4278 * @vport: virtual port 4279 */ 4280 void idpf_deinit_rss(struct idpf_vport *vport) 4281 { 4282 struct idpf_adapter *adapter = vport->adapter; 4283 struct idpf_rss_data *rss_data; 4284 4285 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; 4286 kfree(rss_data->cached_lut); 4287 rss_data->cached_lut = NULL; 4288 kfree(rss_data->rss_lut); 4289 rss_data->rss_lut = NULL; 4290 } 4291