1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2023 Intel Corporation */ 3 4 #include "idpf.h" 5 6 /** 7 * idpf_buf_lifo_push - push a buffer pointer onto stack 8 * @stack: pointer to stack struct 9 * @buf: pointer to buf to push 10 * 11 * Returns 0 on success, negative on failure 12 **/ 13 static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack, 14 struct idpf_tx_stash *buf) 15 { 16 if (unlikely(stack->top == stack->size)) 17 return -ENOSPC; 18 19 stack->bufs[stack->top++] = buf; 20 21 return 0; 22 } 23 24 /** 25 * idpf_buf_lifo_pop - pop a buffer pointer from stack 26 * @stack: pointer to stack struct 27 **/ 28 static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack) 29 { 30 if (unlikely(!stack->top)) 31 return NULL; 32 33 return stack->bufs[--stack->top]; 34 } 35 36 /** 37 * idpf_tx_timeout - Respond to a Tx Hang 38 * @netdev: network interface device structure 39 * @txqueue: TX queue 40 */ 41 void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue) 42 { 43 struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev); 44 45 adapter->tx_timeout_count++; 46 47 netdev_err(netdev, "Detected Tx timeout: Count %d, Queue %d\n", 48 adapter->tx_timeout_count, txqueue); 49 if (!idpf_is_reset_in_prog(adapter)) { 50 set_bit(IDPF_HR_FUNC_RESET, adapter->flags); 51 queue_delayed_work(adapter->vc_event_wq, 52 &adapter->vc_event_task, 53 msecs_to_jiffies(10)); 54 } 55 } 56 57 /** 58 * idpf_tx_buf_rel - Release a Tx buffer 59 * @tx_q: the queue that owns the buffer 60 * @tx_buf: the buffer to free 61 */ 62 static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf) 63 { 64 if (tx_buf->skb) { 65 if (dma_unmap_len(tx_buf, len)) 66 dma_unmap_single(tx_q->dev, 67 dma_unmap_addr(tx_buf, dma), 68 dma_unmap_len(tx_buf, len), 69 DMA_TO_DEVICE); 70 dev_kfree_skb_any(tx_buf->skb); 71 } else if (dma_unmap_len(tx_buf, len)) { 72 dma_unmap_page(tx_q->dev, 73 dma_unmap_addr(tx_buf, dma), 74 dma_unmap_len(tx_buf, len), 75 DMA_TO_DEVICE); 76 } 77 78 tx_buf->next_to_watch = NULL; 79 tx_buf->skb = NULL; 80 tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; 81 dma_unmap_len_set(tx_buf, len, 0); 82 } 83 84 /** 85 * idpf_tx_buf_rel_all - Free any empty Tx buffers 86 * @txq: queue to be cleaned 87 */ 88 static void idpf_tx_buf_rel_all(struct idpf_queue *txq) 89 { 90 u16 i; 91 92 /* Buffers already cleared, nothing to do */ 93 if (!txq->tx_buf) 94 return; 95 96 /* Free all the Tx buffer sk_buffs */ 97 for (i = 0; i < txq->desc_count; i++) 98 idpf_tx_buf_rel(txq, &txq->tx_buf[i]); 99 100 kfree(txq->tx_buf); 101 txq->tx_buf = NULL; 102 103 if (!txq->buf_stack.bufs) 104 return; 105 106 for (i = 0; i < txq->buf_stack.size; i++) 107 kfree(txq->buf_stack.bufs[i]); 108 109 kfree(txq->buf_stack.bufs); 110 txq->buf_stack.bufs = NULL; 111 } 112 113 /** 114 * idpf_tx_desc_rel - Free Tx resources per queue 115 * @txq: Tx descriptor ring for a specific queue 116 * @bufq: buffer q or completion q 117 * 118 * Free all transmit software resources 119 */ 120 static void idpf_tx_desc_rel(struct idpf_queue *txq, bool bufq) 121 { 122 if (bufq) 123 idpf_tx_buf_rel_all(txq); 124 125 if (!txq->desc_ring) 126 return; 127 128 dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma); 129 txq->desc_ring = NULL; 130 txq->next_to_alloc = 0; 131 txq->next_to_use = 0; 132 txq->next_to_clean = 0; 133 } 134 135 /** 136 * idpf_tx_desc_rel_all - Free Tx Resources for All Queues 137 * @vport: virtual port structure 138 * 139 * Free all transmit software resources 140 */ 141 static void idpf_tx_desc_rel_all(struct idpf_vport *vport) 142 { 143 int i, j; 144 145 if (!vport->txq_grps) 146 return; 147 148 for (i = 0; i < vport->num_txq_grp; i++) { 149 struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; 150 151 for (j = 0; j < txq_grp->num_txq; j++) 152 idpf_tx_desc_rel(txq_grp->txqs[j], true); 153 154 if (idpf_is_queue_model_split(vport->txq_model)) 155 idpf_tx_desc_rel(txq_grp->complq, false); 156 } 157 } 158 159 /** 160 * idpf_tx_buf_alloc_all - Allocate memory for all buffer resources 161 * @tx_q: queue for which the buffers are allocated 162 * 163 * Returns 0 on success, negative on failure 164 */ 165 static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q) 166 { 167 int buf_size; 168 int i; 169 170 /* Allocate book keeping buffers only. Buffers to be supplied to HW 171 * are allocated by kernel network stack and received as part of skb 172 */ 173 buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count; 174 tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL); 175 if (!tx_q->tx_buf) 176 return -ENOMEM; 177 178 /* Initialize tx_bufs with invalid completion tags */ 179 for (i = 0; i < tx_q->desc_count; i++) 180 tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; 181 182 /* Initialize tx buf stack for out-of-order completions if 183 * flow scheduling offload is enabled 184 */ 185 tx_q->buf_stack.bufs = 186 kcalloc(tx_q->desc_count, sizeof(struct idpf_tx_stash *), 187 GFP_KERNEL); 188 if (!tx_q->buf_stack.bufs) 189 return -ENOMEM; 190 191 tx_q->buf_stack.size = tx_q->desc_count; 192 tx_q->buf_stack.top = tx_q->desc_count; 193 194 for (i = 0; i < tx_q->desc_count; i++) { 195 tx_q->buf_stack.bufs[i] = kzalloc(sizeof(*tx_q->buf_stack.bufs[i]), 196 GFP_KERNEL); 197 if (!tx_q->buf_stack.bufs[i]) 198 return -ENOMEM; 199 } 200 201 return 0; 202 } 203 204 /** 205 * idpf_tx_desc_alloc - Allocate the Tx descriptors 206 * @tx_q: the tx ring to set up 207 * @bufq: buffer or completion queue 208 * 209 * Returns 0 on success, negative on failure 210 */ 211 static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq) 212 { 213 struct device *dev = tx_q->dev; 214 u32 desc_sz; 215 int err; 216 217 if (bufq) { 218 err = idpf_tx_buf_alloc_all(tx_q); 219 if (err) 220 goto err_alloc; 221 222 desc_sz = sizeof(struct idpf_base_tx_desc); 223 } else { 224 desc_sz = sizeof(struct idpf_splitq_tx_compl_desc); 225 } 226 227 tx_q->size = tx_q->desc_count * desc_sz; 228 229 /* Allocate descriptors also round up to nearest 4K */ 230 tx_q->size = ALIGN(tx_q->size, 4096); 231 tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma, 232 GFP_KERNEL); 233 if (!tx_q->desc_ring) { 234 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 235 tx_q->size); 236 err = -ENOMEM; 237 goto err_alloc; 238 } 239 240 tx_q->next_to_alloc = 0; 241 tx_q->next_to_use = 0; 242 tx_q->next_to_clean = 0; 243 set_bit(__IDPF_Q_GEN_CHK, tx_q->flags); 244 245 return 0; 246 247 err_alloc: 248 idpf_tx_desc_rel(tx_q, bufq); 249 250 return err; 251 } 252 253 /** 254 * idpf_tx_desc_alloc_all - allocate all queues Tx resources 255 * @vport: virtual port private structure 256 * 257 * Returns 0 on success, negative on failure 258 */ 259 static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) 260 { 261 struct device *dev = &vport->adapter->pdev->dev; 262 int err = 0; 263 int i, j; 264 265 /* Setup buffer queues. In single queue model buffer queues and 266 * completion queues will be same 267 */ 268 for (i = 0; i < vport->num_txq_grp; i++) { 269 for (j = 0; j < vport->txq_grps[i].num_txq; j++) { 270 struct idpf_queue *txq = vport->txq_grps[i].txqs[j]; 271 u8 gen_bits = 0; 272 u16 bufidx_mask; 273 274 err = idpf_tx_desc_alloc(txq, true); 275 if (err) { 276 dev_err(dev, "Allocation for Tx Queue %u failed\n", 277 i); 278 goto err_out; 279 } 280 281 if (!idpf_is_queue_model_split(vport->txq_model)) 282 continue; 283 284 txq->compl_tag_cur_gen = 0; 285 286 /* Determine the number of bits in the bufid 287 * mask and add one to get the start of the 288 * generation bits 289 */ 290 bufidx_mask = txq->desc_count - 1; 291 while (bufidx_mask >> 1) { 292 txq->compl_tag_gen_s++; 293 bufidx_mask = bufidx_mask >> 1; 294 } 295 txq->compl_tag_gen_s++; 296 297 gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH - 298 txq->compl_tag_gen_s; 299 txq->compl_tag_gen_max = GETMAXVAL(gen_bits); 300 301 /* Set bufid mask based on location of first 302 * gen bit; it cannot simply be the descriptor 303 * ring size-1 since we can have size values 304 * where not all of those bits are set. 305 */ 306 txq->compl_tag_bufid_m = 307 GETMAXVAL(txq->compl_tag_gen_s); 308 } 309 310 if (!idpf_is_queue_model_split(vport->txq_model)) 311 continue; 312 313 /* Setup completion queues */ 314 err = idpf_tx_desc_alloc(vport->txq_grps[i].complq, false); 315 if (err) { 316 dev_err(dev, "Allocation for Tx Completion Queue %u failed\n", 317 i); 318 goto err_out; 319 } 320 } 321 322 err_out: 323 if (err) 324 idpf_tx_desc_rel_all(vport); 325 326 return err; 327 } 328 329 /** 330 * idpf_rx_page_rel - Release an rx buffer page 331 * @rxq: the queue that owns the buffer 332 * @rx_buf: the buffer to free 333 */ 334 static void idpf_rx_page_rel(struct idpf_queue *rxq, struct idpf_rx_buf *rx_buf) 335 { 336 if (unlikely(!rx_buf->page)) 337 return; 338 339 page_pool_put_full_page(rxq->pp, rx_buf->page, false); 340 341 rx_buf->page = NULL; 342 rx_buf->page_offset = 0; 343 } 344 345 /** 346 * idpf_rx_hdr_buf_rel_all - Release header buffer memory 347 * @rxq: queue to use 348 */ 349 static void idpf_rx_hdr_buf_rel_all(struct idpf_queue *rxq) 350 { 351 struct idpf_adapter *adapter = rxq->vport->adapter; 352 353 dma_free_coherent(&adapter->pdev->dev, 354 rxq->desc_count * IDPF_HDR_BUF_SIZE, 355 rxq->rx_buf.hdr_buf_va, 356 rxq->rx_buf.hdr_buf_pa); 357 rxq->rx_buf.hdr_buf_va = NULL; 358 } 359 360 /** 361 * idpf_rx_buf_rel_all - Free all Rx buffer resources for a queue 362 * @rxq: queue to be cleaned 363 */ 364 static void idpf_rx_buf_rel_all(struct idpf_queue *rxq) 365 { 366 u16 i; 367 368 /* queue already cleared, nothing to do */ 369 if (!rxq->rx_buf.buf) 370 return; 371 372 /* Free all the bufs allocated and given to hw on Rx queue */ 373 for (i = 0; i < rxq->desc_count; i++) 374 idpf_rx_page_rel(rxq, &rxq->rx_buf.buf[i]); 375 376 if (rxq->rx_hsplit_en) 377 idpf_rx_hdr_buf_rel_all(rxq); 378 379 page_pool_destroy(rxq->pp); 380 rxq->pp = NULL; 381 382 kfree(rxq->rx_buf.buf); 383 rxq->rx_buf.buf = NULL; 384 } 385 386 /** 387 * idpf_rx_desc_rel - Free a specific Rx q resources 388 * @rxq: queue to clean the resources from 389 * @bufq: buffer q or completion q 390 * @q_model: single or split q model 391 * 392 * Free a specific rx queue resources 393 */ 394 static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model) 395 { 396 if (!rxq) 397 return; 398 399 if (!bufq && idpf_is_queue_model_split(q_model) && rxq->skb) { 400 dev_kfree_skb_any(rxq->skb); 401 rxq->skb = NULL; 402 } 403 404 if (bufq || !idpf_is_queue_model_split(q_model)) 405 idpf_rx_buf_rel_all(rxq); 406 407 rxq->next_to_alloc = 0; 408 rxq->next_to_clean = 0; 409 rxq->next_to_use = 0; 410 if (!rxq->desc_ring) 411 return; 412 413 dmam_free_coherent(rxq->dev, rxq->size, rxq->desc_ring, rxq->dma); 414 rxq->desc_ring = NULL; 415 } 416 417 /** 418 * idpf_rx_desc_rel_all - Free Rx Resources for All Queues 419 * @vport: virtual port structure 420 * 421 * Free all rx queues resources 422 */ 423 static void idpf_rx_desc_rel_all(struct idpf_vport *vport) 424 { 425 struct idpf_rxq_group *rx_qgrp; 426 u16 num_rxq; 427 int i, j; 428 429 if (!vport->rxq_grps) 430 return; 431 432 for (i = 0; i < vport->num_rxq_grp; i++) { 433 rx_qgrp = &vport->rxq_grps[i]; 434 435 if (!idpf_is_queue_model_split(vport->rxq_model)) { 436 for (j = 0; j < rx_qgrp->singleq.num_rxq; j++) 437 idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], 438 false, vport->rxq_model); 439 continue; 440 } 441 442 num_rxq = rx_qgrp->splitq.num_rxq_sets; 443 for (j = 0; j < num_rxq; j++) 444 idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq, 445 false, vport->rxq_model); 446 447 if (!rx_qgrp->splitq.bufq_sets) 448 continue; 449 450 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { 451 struct idpf_bufq_set *bufq_set = 452 &rx_qgrp->splitq.bufq_sets[j]; 453 454 idpf_rx_desc_rel(&bufq_set->bufq, true, 455 vport->rxq_model); 456 } 457 } 458 } 459 460 /** 461 * idpf_rx_buf_hw_update - Store the new tail and head values 462 * @rxq: queue to bump 463 * @val: new head index 464 */ 465 void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val) 466 { 467 rxq->next_to_use = val; 468 469 if (unlikely(!rxq->tail)) 470 return; 471 472 /* writel has an implicit memory barrier */ 473 writel(val, rxq->tail); 474 } 475 476 /** 477 * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers 478 * @rxq: ring to use 479 * 480 * Returns 0 on success, negative on failure. 481 */ 482 static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq) 483 { 484 struct idpf_adapter *adapter = rxq->vport->adapter; 485 486 rxq->rx_buf.hdr_buf_va = 487 dma_alloc_coherent(&adapter->pdev->dev, 488 IDPF_HDR_BUF_SIZE * rxq->desc_count, 489 &rxq->rx_buf.hdr_buf_pa, 490 GFP_KERNEL); 491 if (!rxq->rx_buf.hdr_buf_va) 492 return -ENOMEM; 493 494 return 0; 495 } 496 497 /** 498 * idpf_rx_post_buf_refill - Post buffer id to refill queue 499 * @refillq: refill queue to post to 500 * @buf_id: buffer id to post 501 */ 502 static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id) 503 { 504 u16 nta = refillq->next_to_alloc; 505 506 /* store the buffer ID and the SW maintained GEN bit to the refillq */ 507 refillq->ring[nta] = 508 ((buf_id << IDPF_RX_BI_BUFID_S) & IDPF_RX_BI_BUFID_M) | 509 (!!(test_bit(__IDPF_Q_GEN_CHK, refillq->flags)) << 510 IDPF_RX_BI_GEN_S); 511 512 if (unlikely(++nta == refillq->desc_count)) { 513 nta = 0; 514 change_bit(__IDPF_Q_GEN_CHK, refillq->flags); 515 } 516 refillq->next_to_alloc = nta; 517 } 518 519 /** 520 * idpf_rx_post_buf_desc - Post buffer to bufq descriptor ring 521 * @bufq: buffer queue to post to 522 * @buf_id: buffer id to post 523 * 524 * Returns false if buffer could not be allocated, true otherwise. 525 */ 526 static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id) 527 { 528 struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL; 529 u16 nta = bufq->next_to_alloc; 530 struct idpf_rx_buf *buf; 531 dma_addr_t addr; 532 533 splitq_rx_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, nta); 534 buf = &bufq->rx_buf.buf[buf_id]; 535 536 if (bufq->rx_hsplit_en) { 537 splitq_rx_desc->hdr_addr = 538 cpu_to_le64(bufq->rx_buf.hdr_buf_pa + 539 (u32)buf_id * IDPF_HDR_BUF_SIZE); 540 } 541 542 addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size); 543 if (unlikely(addr == DMA_MAPPING_ERROR)) 544 return false; 545 546 splitq_rx_desc->pkt_addr = cpu_to_le64(addr); 547 splitq_rx_desc->qword0.buf_id = cpu_to_le16(buf_id); 548 549 nta++; 550 if (unlikely(nta == bufq->desc_count)) 551 nta = 0; 552 bufq->next_to_alloc = nta; 553 554 return true; 555 } 556 557 /** 558 * idpf_rx_post_init_bufs - Post initial buffers to bufq 559 * @bufq: buffer queue to post working set to 560 * @working_set: number of buffers to put in working set 561 * 562 * Returns true if @working_set bufs were posted successfully, false otherwise. 563 */ 564 static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set) 565 { 566 int i; 567 568 for (i = 0; i < working_set; i++) { 569 if (!idpf_rx_post_buf_desc(bufq, i)) 570 return false; 571 } 572 573 idpf_rx_buf_hw_update(bufq, 574 bufq->next_to_alloc & ~(bufq->rx_buf_stride - 1)); 575 576 return true; 577 } 578 579 /** 580 * idpf_rx_create_page_pool - Create a page pool 581 * @rxbufq: RX queue to create page pool for 582 * 583 * Returns &page_pool on success, casted -errno on failure 584 */ 585 static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq) 586 { 587 struct page_pool_params pp = { 588 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 589 .order = 0, 590 .pool_size = rxbufq->desc_count, 591 .nid = NUMA_NO_NODE, 592 .dev = rxbufq->vport->netdev->dev.parent, 593 .max_len = PAGE_SIZE, 594 .dma_dir = DMA_FROM_DEVICE, 595 .offset = 0, 596 }; 597 598 return page_pool_create(&pp); 599 } 600 601 /** 602 * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources 603 * @rxbufq: queue for which the buffers are allocated; equivalent to 604 * rxq when operating in singleq mode 605 * 606 * Returns 0 on success, negative on failure 607 */ 608 static int idpf_rx_buf_alloc_all(struct idpf_queue *rxbufq) 609 { 610 int err = 0; 611 612 /* Allocate book keeping buffers */ 613 rxbufq->rx_buf.buf = kcalloc(rxbufq->desc_count, 614 sizeof(struct idpf_rx_buf), GFP_KERNEL); 615 if (!rxbufq->rx_buf.buf) { 616 err = -ENOMEM; 617 goto rx_buf_alloc_all_out; 618 } 619 620 if (rxbufq->rx_hsplit_en) { 621 err = idpf_rx_hdr_buf_alloc_all(rxbufq); 622 if (err) 623 goto rx_buf_alloc_all_out; 624 } 625 626 /* Allocate buffers to be given to HW. */ 627 if (idpf_is_queue_model_split(rxbufq->vport->rxq_model)) { 628 int working_set = IDPF_RX_BUFQ_WORKING_SET(rxbufq); 629 630 if (!idpf_rx_post_init_bufs(rxbufq, working_set)) 631 err = -ENOMEM; 632 } else { 633 if (idpf_rx_singleq_buf_hw_alloc_all(rxbufq, 634 rxbufq->desc_count - 1)) 635 err = -ENOMEM; 636 } 637 638 rx_buf_alloc_all_out: 639 if (err) 640 idpf_rx_buf_rel_all(rxbufq); 641 642 return err; 643 } 644 645 /** 646 * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW 647 * @rxbufq: RX queue to create page pool for 648 * 649 * Returns 0 on success, negative on failure 650 */ 651 static int idpf_rx_bufs_init(struct idpf_queue *rxbufq) 652 { 653 struct page_pool *pool; 654 655 pool = idpf_rx_create_page_pool(rxbufq); 656 if (IS_ERR(pool)) 657 return PTR_ERR(pool); 658 659 rxbufq->pp = pool; 660 661 return idpf_rx_buf_alloc_all(rxbufq); 662 } 663 664 /** 665 * idpf_rx_bufs_init_all - Initialize all RX bufs 666 * @vport: virtual port struct 667 * 668 * Returns 0 on success, negative on failure 669 */ 670 int idpf_rx_bufs_init_all(struct idpf_vport *vport) 671 { 672 struct idpf_rxq_group *rx_qgrp; 673 struct idpf_queue *q; 674 int i, j, err; 675 676 for (i = 0; i < vport->num_rxq_grp; i++) { 677 rx_qgrp = &vport->rxq_grps[i]; 678 679 /* Allocate bufs for the rxq itself in singleq */ 680 if (!idpf_is_queue_model_split(vport->rxq_model)) { 681 int num_rxq = rx_qgrp->singleq.num_rxq; 682 683 for (j = 0; j < num_rxq; j++) { 684 q = rx_qgrp->singleq.rxqs[j]; 685 err = idpf_rx_bufs_init(q); 686 if (err) 687 return err; 688 } 689 690 continue; 691 } 692 693 /* Otherwise, allocate bufs for the buffer queues */ 694 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { 695 q = &rx_qgrp->splitq.bufq_sets[j].bufq; 696 err = idpf_rx_bufs_init(q); 697 if (err) 698 return err; 699 } 700 } 701 702 return 0; 703 } 704 705 /** 706 * idpf_rx_desc_alloc - Allocate queue Rx resources 707 * @rxq: Rx queue for which the resources are setup 708 * @bufq: buffer or completion queue 709 * @q_model: single or split queue model 710 * 711 * Returns 0 on success, negative on failure 712 */ 713 static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model) 714 { 715 struct device *dev = rxq->dev; 716 717 if (bufq) 718 rxq->size = rxq->desc_count * 719 sizeof(struct virtchnl2_splitq_rx_buf_desc); 720 else 721 rxq->size = rxq->desc_count * 722 sizeof(union virtchnl2_rx_desc); 723 724 /* Allocate descriptors and also round up to nearest 4K */ 725 rxq->size = ALIGN(rxq->size, 4096); 726 rxq->desc_ring = dmam_alloc_coherent(dev, rxq->size, 727 &rxq->dma, GFP_KERNEL); 728 if (!rxq->desc_ring) { 729 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 730 rxq->size); 731 return -ENOMEM; 732 } 733 734 rxq->next_to_alloc = 0; 735 rxq->next_to_clean = 0; 736 rxq->next_to_use = 0; 737 set_bit(__IDPF_Q_GEN_CHK, rxq->flags); 738 739 return 0; 740 } 741 742 /** 743 * idpf_rx_desc_alloc_all - allocate all RX queues resources 744 * @vport: virtual port structure 745 * 746 * Returns 0 on success, negative on failure 747 */ 748 static int idpf_rx_desc_alloc_all(struct idpf_vport *vport) 749 { 750 struct device *dev = &vport->adapter->pdev->dev; 751 struct idpf_rxq_group *rx_qgrp; 752 struct idpf_queue *q; 753 int i, j, err; 754 u16 num_rxq; 755 756 for (i = 0; i < vport->num_rxq_grp; i++) { 757 rx_qgrp = &vport->rxq_grps[i]; 758 if (idpf_is_queue_model_split(vport->rxq_model)) 759 num_rxq = rx_qgrp->splitq.num_rxq_sets; 760 else 761 num_rxq = rx_qgrp->singleq.num_rxq; 762 763 for (j = 0; j < num_rxq; j++) { 764 if (idpf_is_queue_model_split(vport->rxq_model)) 765 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; 766 else 767 q = rx_qgrp->singleq.rxqs[j]; 768 err = idpf_rx_desc_alloc(q, false, vport->rxq_model); 769 if (err) { 770 dev_err(dev, "Memory allocation for Rx Queue %u failed\n", 771 i); 772 goto err_out; 773 } 774 } 775 776 if (!idpf_is_queue_model_split(vport->rxq_model)) 777 continue; 778 779 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { 780 q = &rx_qgrp->splitq.bufq_sets[j].bufq; 781 err = idpf_rx_desc_alloc(q, true, vport->rxq_model); 782 if (err) { 783 dev_err(dev, "Memory allocation for Rx Buffer Queue %u failed\n", 784 i); 785 goto err_out; 786 } 787 } 788 } 789 790 return 0; 791 792 err_out: 793 idpf_rx_desc_rel_all(vport); 794 795 return err; 796 } 797 798 /** 799 * idpf_txq_group_rel - Release all resources for txq groups 800 * @vport: vport to release txq groups on 801 */ 802 static void idpf_txq_group_rel(struct idpf_vport *vport) 803 { 804 int i, j; 805 806 if (!vport->txq_grps) 807 return; 808 809 for (i = 0; i < vport->num_txq_grp; i++) { 810 struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; 811 812 for (j = 0; j < txq_grp->num_txq; j++) { 813 kfree(txq_grp->txqs[j]); 814 txq_grp->txqs[j] = NULL; 815 } 816 kfree(txq_grp->complq); 817 txq_grp->complq = NULL; 818 } 819 kfree(vport->txq_grps); 820 vport->txq_grps = NULL; 821 } 822 823 /** 824 * idpf_rxq_sw_queue_rel - Release software queue resources 825 * @rx_qgrp: rx queue group with software queues 826 */ 827 static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp) 828 { 829 int i, j; 830 831 for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) { 832 struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i]; 833 834 for (j = 0; j < bufq_set->num_refillqs; j++) { 835 kfree(bufq_set->refillqs[j].ring); 836 bufq_set->refillqs[j].ring = NULL; 837 } 838 kfree(bufq_set->refillqs); 839 bufq_set->refillqs = NULL; 840 } 841 } 842 843 /** 844 * idpf_rxq_group_rel - Release all resources for rxq groups 845 * @vport: vport to release rxq groups on 846 */ 847 static void idpf_rxq_group_rel(struct idpf_vport *vport) 848 { 849 int i; 850 851 if (!vport->rxq_grps) 852 return; 853 854 for (i = 0; i < vport->num_rxq_grp; i++) { 855 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 856 u16 num_rxq; 857 int j; 858 859 if (idpf_is_queue_model_split(vport->rxq_model)) { 860 num_rxq = rx_qgrp->splitq.num_rxq_sets; 861 for (j = 0; j < num_rxq; j++) { 862 kfree(rx_qgrp->splitq.rxq_sets[j]); 863 rx_qgrp->splitq.rxq_sets[j] = NULL; 864 } 865 866 idpf_rxq_sw_queue_rel(rx_qgrp); 867 kfree(rx_qgrp->splitq.bufq_sets); 868 rx_qgrp->splitq.bufq_sets = NULL; 869 } else { 870 num_rxq = rx_qgrp->singleq.num_rxq; 871 for (j = 0; j < num_rxq; j++) { 872 kfree(rx_qgrp->singleq.rxqs[j]); 873 rx_qgrp->singleq.rxqs[j] = NULL; 874 } 875 } 876 } 877 kfree(vport->rxq_grps); 878 vport->rxq_grps = NULL; 879 } 880 881 /** 882 * idpf_vport_queue_grp_rel_all - Release all queue groups 883 * @vport: vport to release queue groups for 884 */ 885 static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport) 886 { 887 idpf_txq_group_rel(vport); 888 idpf_rxq_group_rel(vport); 889 } 890 891 /** 892 * idpf_vport_queues_rel - Free memory for all queues 893 * @vport: virtual port 894 * 895 * Free the memory allocated for queues associated to a vport 896 */ 897 void idpf_vport_queues_rel(struct idpf_vport *vport) 898 { 899 idpf_tx_desc_rel_all(vport); 900 idpf_rx_desc_rel_all(vport); 901 idpf_vport_queue_grp_rel_all(vport); 902 903 kfree(vport->txqs); 904 vport->txqs = NULL; 905 } 906 907 /** 908 * idpf_vport_init_fast_path_txqs - Initialize fast path txq array 909 * @vport: vport to init txqs on 910 * 911 * We get a queue index from skb->queue_mapping and we need a fast way to 912 * dereference the queue from queue groups. This allows us to quickly pull a 913 * txq based on a queue index. 914 * 915 * Returns 0 on success, negative on failure 916 */ 917 static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport) 918 { 919 int i, j, k = 0; 920 921 vport->txqs = kcalloc(vport->num_txq, sizeof(struct idpf_queue *), 922 GFP_KERNEL); 923 924 if (!vport->txqs) 925 return -ENOMEM; 926 927 for (i = 0; i < vport->num_txq_grp; i++) { 928 struct idpf_txq_group *tx_grp = &vport->txq_grps[i]; 929 930 for (j = 0; j < tx_grp->num_txq; j++, k++) { 931 vport->txqs[k] = tx_grp->txqs[j]; 932 vport->txqs[k]->idx = k; 933 } 934 } 935 936 return 0; 937 } 938 939 /** 940 * idpf_vport_init_num_qs - Initialize number of queues 941 * @vport: vport to initialize queues 942 * @vport_msg: data to be filled into vport 943 */ 944 void idpf_vport_init_num_qs(struct idpf_vport *vport, 945 struct virtchnl2_create_vport *vport_msg) 946 { 947 struct idpf_vport_user_config_data *config_data; 948 u16 idx = vport->idx; 949 950 config_data = &vport->adapter->vport_config[idx]->user_config; 951 vport->num_txq = le16_to_cpu(vport_msg->num_tx_q); 952 vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q); 953 /* number of txqs and rxqs in config data will be zeros only in the 954 * driver load path and we dont update them there after 955 */ 956 if (!config_data->num_req_tx_qs && !config_data->num_req_rx_qs) { 957 config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q); 958 config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q); 959 } 960 961 if (idpf_is_queue_model_split(vport->txq_model)) 962 vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq); 963 if (idpf_is_queue_model_split(vport->rxq_model)) 964 vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq); 965 966 /* Adjust number of buffer queues per Rx queue group. */ 967 if (!idpf_is_queue_model_split(vport->rxq_model)) { 968 vport->num_bufqs_per_qgrp = 0; 969 vport->bufq_size[0] = IDPF_RX_BUF_2048; 970 971 return; 972 } 973 974 vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP; 975 /* Bufq[0] default buffer size is 4K 976 * Bufq[1] default buffer size is 2K 977 */ 978 vport->bufq_size[0] = IDPF_RX_BUF_4096; 979 vport->bufq_size[1] = IDPF_RX_BUF_2048; 980 } 981 982 /** 983 * idpf_vport_calc_num_q_desc - Calculate number of queue groups 984 * @vport: vport to calculate q groups for 985 */ 986 void idpf_vport_calc_num_q_desc(struct idpf_vport *vport) 987 { 988 struct idpf_vport_user_config_data *config_data; 989 int num_bufqs = vport->num_bufqs_per_qgrp; 990 u32 num_req_txq_desc, num_req_rxq_desc; 991 u16 idx = vport->idx; 992 int i; 993 994 config_data = &vport->adapter->vport_config[idx]->user_config; 995 num_req_txq_desc = config_data->num_req_txq_desc; 996 num_req_rxq_desc = config_data->num_req_rxq_desc; 997 998 vport->complq_desc_count = 0; 999 if (num_req_txq_desc) { 1000 vport->txq_desc_count = num_req_txq_desc; 1001 if (idpf_is_queue_model_split(vport->txq_model)) { 1002 vport->complq_desc_count = num_req_txq_desc; 1003 if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC) 1004 vport->complq_desc_count = 1005 IDPF_MIN_TXQ_COMPLQ_DESC; 1006 } 1007 } else { 1008 vport->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT; 1009 if (idpf_is_queue_model_split(vport->txq_model)) 1010 vport->complq_desc_count = 1011 IDPF_DFLT_TX_COMPLQ_DESC_COUNT; 1012 } 1013 1014 if (num_req_rxq_desc) 1015 vport->rxq_desc_count = num_req_rxq_desc; 1016 else 1017 vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT; 1018 1019 for (i = 0; i < num_bufqs; i++) { 1020 if (!vport->bufq_desc_count[i]) 1021 vport->bufq_desc_count[i] = 1022 IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count, 1023 num_bufqs); 1024 } 1025 } 1026 1027 /** 1028 * idpf_vport_calc_total_qs - Calculate total number of queues 1029 * @adapter: private data struct 1030 * @vport_idx: vport idx to retrieve vport pointer 1031 * @vport_msg: message to fill with data 1032 * @max_q: vport max queue info 1033 * 1034 * Return 0 on success, error value on failure. 1035 */ 1036 int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx, 1037 struct virtchnl2_create_vport *vport_msg, 1038 struct idpf_vport_max_q *max_q) 1039 { 1040 int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0; 1041 int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0; 1042 u16 num_req_tx_qs = 0, num_req_rx_qs = 0; 1043 struct idpf_vport_config *vport_config; 1044 u16 num_txq_grps, num_rxq_grps; 1045 u32 num_qs; 1046 1047 vport_config = adapter->vport_config[vport_idx]; 1048 if (vport_config) { 1049 num_req_tx_qs = vport_config->user_config.num_req_tx_qs; 1050 num_req_rx_qs = vport_config->user_config.num_req_rx_qs; 1051 } else { 1052 int num_cpus; 1053 1054 /* Restrict num of queues to cpus online as a default 1055 * configuration to give best performance. User can always 1056 * override to a max number of queues via ethtool. 1057 */ 1058 num_cpus = num_online_cpus(); 1059 1060 dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus); 1061 dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus); 1062 dflt_splitq_rxq_grps = min_t(int, max_q->max_rxq, num_cpus); 1063 dflt_singleq_rxqs = min_t(int, max_q->max_rxq, num_cpus); 1064 } 1065 1066 if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) { 1067 num_txq_grps = num_req_tx_qs ? num_req_tx_qs : dflt_splitq_txq_grps; 1068 vport_msg->num_tx_complq = cpu_to_le16(num_txq_grps * 1069 IDPF_COMPLQ_PER_GROUP); 1070 vport_msg->num_tx_q = cpu_to_le16(num_txq_grps * 1071 IDPF_DFLT_SPLITQ_TXQ_PER_GROUP); 1072 } else { 1073 num_txq_grps = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS; 1074 num_qs = num_txq_grps * (num_req_tx_qs ? num_req_tx_qs : 1075 dflt_singleq_txqs); 1076 vport_msg->num_tx_q = cpu_to_le16(num_qs); 1077 vport_msg->num_tx_complq = 0; 1078 } 1079 if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) { 1080 num_rxq_grps = num_req_rx_qs ? num_req_rx_qs : dflt_splitq_rxq_grps; 1081 vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps * 1082 IDPF_MAX_BUFQS_PER_RXQ_GRP); 1083 vport_msg->num_rx_q = cpu_to_le16(num_rxq_grps * 1084 IDPF_DFLT_SPLITQ_RXQ_PER_GROUP); 1085 } else { 1086 num_rxq_grps = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS; 1087 num_qs = num_rxq_grps * (num_req_rx_qs ? num_req_rx_qs : 1088 dflt_singleq_rxqs); 1089 vport_msg->num_rx_q = cpu_to_le16(num_qs); 1090 vport_msg->num_rx_bufq = 0; 1091 } 1092 1093 return 0; 1094 } 1095 1096 /** 1097 * idpf_vport_calc_num_q_groups - Calculate number of queue groups 1098 * @vport: vport to calculate q groups for 1099 */ 1100 void idpf_vport_calc_num_q_groups(struct idpf_vport *vport) 1101 { 1102 if (idpf_is_queue_model_split(vport->txq_model)) 1103 vport->num_txq_grp = vport->num_txq; 1104 else 1105 vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS; 1106 1107 if (idpf_is_queue_model_split(vport->rxq_model)) 1108 vport->num_rxq_grp = vport->num_rxq; 1109 else 1110 vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS; 1111 } 1112 1113 /** 1114 * idpf_vport_calc_numq_per_grp - Calculate number of queues per group 1115 * @vport: vport to calculate queues for 1116 * @num_txq: return parameter for number of TX queues 1117 * @num_rxq: return parameter for number of RX queues 1118 */ 1119 static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport, 1120 u16 *num_txq, u16 *num_rxq) 1121 { 1122 if (idpf_is_queue_model_split(vport->txq_model)) 1123 *num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP; 1124 else 1125 *num_txq = vport->num_txq; 1126 1127 if (idpf_is_queue_model_split(vport->rxq_model)) 1128 *num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; 1129 else 1130 *num_rxq = vport->num_rxq; 1131 } 1132 1133 /** 1134 * idpf_rxq_set_descids - set the descids supported by this queue 1135 * @vport: virtual port data structure 1136 * @q: rx queue for which descids are set 1137 * 1138 */ 1139 static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q) 1140 { 1141 if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { 1142 q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M; 1143 } else { 1144 if (vport->base_rxd) 1145 q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M; 1146 else 1147 q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M; 1148 } 1149 } 1150 1151 /** 1152 * idpf_txq_group_alloc - Allocate all txq group resources 1153 * @vport: vport to allocate txq groups for 1154 * @num_txq: number of txqs to allocate for each group 1155 * 1156 * Returns 0 on success, negative on failure 1157 */ 1158 static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) 1159 { 1160 bool flow_sch_en; 1161 int err, i; 1162 1163 vport->txq_grps = kcalloc(vport->num_txq_grp, 1164 sizeof(*vport->txq_grps), GFP_KERNEL); 1165 if (!vport->txq_grps) 1166 return -ENOMEM; 1167 1168 flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, 1169 VIRTCHNL2_CAP_SPLITQ_QSCHED); 1170 1171 for (i = 0; i < vport->num_txq_grp; i++) { 1172 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 1173 struct idpf_adapter *adapter = vport->adapter; 1174 int j; 1175 1176 tx_qgrp->vport = vport; 1177 tx_qgrp->num_txq = num_txq; 1178 1179 for (j = 0; j < tx_qgrp->num_txq; j++) { 1180 tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]), 1181 GFP_KERNEL); 1182 if (!tx_qgrp->txqs[j]) { 1183 err = -ENOMEM; 1184 goto err_alloc; 1185 } 1186 } 1187 1188 for (j = 0; j < tx_qgrp->num_txq; j++) { 1189 struct idpf_queue *q = tx_qgrp->txqs[j]; 1190 1191 q->dev = &adapter->pdev->dev; 1192 q->desc_count = vport->txq_desc_count; 1193 q->tx_max_bufs = idpf_get_max_tx_bufs(adapter); 1194 q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter); 1195 q->vport = vport; 1196 q->txq_grp = tx_qgrp; 1197 hash_init(q->sched_buf_hash); 1198 1199 if (flow_sch_en) 1200 set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags); 1201 } 1202 1203 if (!idpf_is_queue_model_split(vport->txq_model)) 1204 continue; 1205 1206 tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP, 1207 sizeof(*tx_qgrp->complq), 1208 GFP_KERNEL); 1209 if (!tx_qgrp->complq) { 1210 err = -ENOMEM; 1211 goto err_alloc; 1212 } 1213 1214 tx_qgrp->complq->dev = &adapter->pdev->dev; 1215 tx_qgrp->complq->desc_count = vport->complq_desc_count; 1216 tx_qgrp->complq->vport = vport; 1217 tx_qgrp->complq->txq_grp = tx_qgrp; 1218 1219 if (flow_sch_en) 1220 __set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags); 1221 } 1222 1223 return 0; 1224 1225 err_alloc: 1226 idpf_txq_group_rel(vport); 1227 1228 return err; 1229 } 1230 1231 /** 1232 * idpf_rxq_group_alloc - Allocate all rxq group resources 1233 * @vport: vport to allocate rxq groups for 1234 * @num_rxq: number of rxqs to allocate for each group 1235 * 1236 * Returns 0 on success, negative on failure 1237 */ 1238 static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) 1239 { 1240 struct idpf_adapter *adapter = vport->adapter; 1241 struct idpf_queue *q; 1242 int i, k, err = 0; 1243 1244 vport->rxq_grps = kcalloc(vport->num_rxq_grp, 1245 sizeof(struct idpf_rxq_group), GFP_KERNEL); 1246 if (!vport->rxq_grps) 1247 return -ENOMEM; 1248 1249 for (i = 0; i < vport->num_rxq_grp; i++) { 1250 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1251 int j; 1252 1253 rx_qgrp->vport = vport; 1254 if (!idpf_is_queue_model_split(vport->rxq_model)) { 1255 rx_qgrp->singleq.num_rxq = num_rxq; 1256 for (j = 0; j < num_rxq; j++) { 1257 rx_qgrp->singleq.rxqs[j] = 1258 kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]), 1259 GFP_KERNEL); 1260 if (!rx_qgrp->singleq.rxqs[j]) { 1261 err = -ENOMEM; 1262 goto err_alloc; 1263 } 1264 } 1265 goto skip_splitq_rx_init; 1266 } 1267 rx_qgrp->splitq.num_rxq_sets = num_rxq; 1268 1269 for (j = 0; j < num_rxq; j++) { 1270 rx_qgrp->splitq.rxq_sets[j] = 1271 kzalloc(sizeof(struct idpf_rxq_set), 1272 GFP_KERNEL); 1273 if (!rx_qgrp->splitq.rxq_sets[j]) { 1274 err = -ENOMEM; 1275 goto err_alloc; 1276 } 1277 } 1278 1279 rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp, 1280 sizeof(struct idpf_bufq_set), 1281 GFP_KERNEL); 1282 if (!rx_qgrp->splitq.bufq_sets) { 1283 err = -ENOMEM; 1284 goto err_alloc; 1285 } 1286 1287 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { 1288 struct idpf_bufq_set *bufq_set = 1289 &rx_qgrp->splitq.bufq_sets[j]; 1290 int swq_size = sizeof(struct idpf_sw_queue); 1291 1292 q = &rx_qgrp->splitq.bufq_sets[j].bufq; 1293 q->dev = &adapter->pdev->dev; 1294 q->desc_count = vport->bufq_desc_count[j]; 1295 q->vport = vport; 1296 q->rxq_grp = rx_qgrp; 1297 q->idx = j; 1298 q->rx_buf_size = vport->bufq_size[j]; 1299 q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; 1300 q->rx_buf_stride = IDPF_RX_BUF_STRIDE; 1301 if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS, 1302 IDPF_CAP_HSPLIT) && 1303 idpf_is_queue_model_split(vport->rxq_model)) { 1304 q->rx_hsplit_en = true; 1305 q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; 1306 } 1307 1308 bufq_set->num_refillqs = num_rxq; 1309 bufq_set->refillqs = kcalloc(num_rxq, swq_size, 1310 GFP_KERNEL); 1311 if (!bufq_set->refillqs) { 1312 err = -ENOMEM; 1313 goto err_alloc; 1314 } 1315 for (k = 0; k < bufq_set->num_refillqs; k++) { 1316 struct idpf_sw_queue *refillq = 1317 &bufq_set->refillqs[k]; 1318 1319 refillq->dev = &vport->adapter->pdev->dev; 1320 refillq->desc_count = 1321 vport->bufq_desc_count[j]; 1322 set_bit(__IDPF_Q_GEN_CHK, refillq->flags); 1323 set_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags); 1324 refillq->ring = kcalloc(refillq->desc_count, 1325 sizeof(u16), 1326 GFP_KERNEL); 1327 if (!refillq->ring) { 1328 err = -ENOMEM; 1329 goto err_alloc; 1330 } 1331 } 1332 } 1333 1334 skip_splitq_rx_init: 1335 for (j = 0; j < num_rxq; j++) { 1336 if (!idpf_is_queue_model_split(vport->rxq_model)) { 1337 q = rx_qgrp->singleq.rxqs[j]; 1338 goto setup_rxq; 1339 } 1340 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; 1341 rx_qgrp->splitq.rxq_sets[j]->refillq0 = 1342 &rx_qgrp->splitq.bufq_sets[0].refillqs[j]; 1343 if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) 1344 rx_qgrp->splitq.rxq_sets[j]->refillq1 = 1345 &rx_qgrp->splitq.bufq_sets[1].refillqs[j]; 1346 1347 if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS, 1348 IDPF_CAP_HSPLIT) && 1349 idpf_is_queue_model_split(vport->rxq_model)) { 1350 q->rx_hsplit_en = true; 1351 q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; 1352 } 1353 1354 setup_rxq: 1355 q->dev = &adapter->pdev->dev; 1356 q->desc_count = vport->rxq_desc_count; 1357 q->vport = vport; 1358 q->rxq_grp = rx_qgrp; 1359 q->idx = (i * num_rxq) + j; 1360 /* In splitq mode, RXQ buffer size should be 1361 * set to that of the first buffer queue 1362 * associated with this RXQ 1363 */ 1364 q->rx_buf_size = vport->bufq_size[0]; 1365 q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; 1366 q->rx_max_pkt_size = vport->netdev->mtu + 1367 IDPF_PACKET_HDR_PAD; 1368 idpf_rxq_set_descids(vport, q); 1369 } 1370 } 1371 1372 err_alloc: 1373 if (err) 1374 idpf_rxq_group_rel(vport); 1375 1376 return err; 1377 } 1378 1379 /** 1380 * idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources 1381 * @vport: vport with qgrps to allocate 1382 * 1383 * Returns 0 on success, negative on failure 1384 */ 1385 static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport) 1386 { 1387 u16 num_txq, num_rxq; 1388 int err; 1389 1390 idpf_vport_calc_numq_per_grp(vport, &num_txq, &num_rxq); 1391 1392 err = idpf_txq_group_alloc(vport, num_txq); 1393 if (err) 1394 goto err_out; 1395 1396 err = idpf_rxq_group_alloc(vport, num_rxq); 1397 if (err) 1398 goto err_out; 1399 1400 return 0; 1401 1402 err_out: 1403 idpf_vport_queue_grp_rel_all(vport); 1404 1405 return err; 1406 } 1407 1408 /** 1409 * idpf_vport_queues_alloc - Allocate memory for all queues 1410 * @vport: virtual port 1411 * 1412 * Allocate memory for queues associated with a vport. Returns 0 on success, 1413 * negative on failure. 1414 */ 1415 int idpf_vport_queues_alloc(struct idpf_vport *vport) 1416 { 1417 int err; 1418 1419 err = idpf_vport_queue_grp_alloc_all(vport); 1420 if (err) 1421 goto err_out; 1422 1423 err = idpf_tx_desc_alloc_all(vport); 1424 if (err) 1425 goto err_out; 1426 1427 err = idpf_rx_desc_alloc_all(vport); 1428 if (err) 1429 goto err_out; 1430 1431 err = idpf_vport_init_fast_path_txqs(vport); 1432 if (err) 1433 goto err_out; 1434 1435 return 0; 1436 1437 err_out: 1438 idpf_vport_queues_rel(vport); 1439 1440 return err; 1441 } 1442 1443 /** 1444 * idpf_tx_handle_sw_marker - Handle queue marker packet 1445 * @tx_q: tx queue to handle software marker 1446 */ 1447 static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q) 1448 { 1449 struct idpf_vport *vport = tx_q->vport; 1450 int i; 1451 1452 clear_bit(__IDPF_Q_SW_MARKER, tx_q->flags); 1453 /* Hardware must write marker packets to all queues associated with 1454 * completion queues. So check if all queues received marker packets 1455 */ 1456 for (i = 0; i < vport->num_txq; i++) 1457 /* If we're still waiting on any other TXQ marker completions, 1458 * just return now since we cannot wake up the marker_wq yet. 1459 */ 1460 if (test_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags)) 1461 return; 1462 1463 /* Drain complete */ 1464 set_bit(IDPF_VPORT_SW_MARKER, vport->flags); 1465 wake_up(&vport->sw_marker_wq); 1466 } 1467 1468 /** 1469 * idpf_tx_splitq_clean_hdr - Clean TX buffer resources for header portion of 1470 * packet 1471 * @tx_q: tx queue to clean buffer from 1472 * @tx_buf: buffer to be cleaned 1473 * @cleaned: pointer to stats struct to track cleaned packets/bytes 1474 * @napi_budget: Used to determine if we are in netpoll 1475 */ 1476 static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q, 1477 struct idpf_tx_buf *tx_buf, 1478 struct idpf_cleaned_stats *cleaned, 1479 int napi_budget) 1480 { 1481 napi_consume_skb(tx_buf->skb, napi_budget); 1482 1483 if (dma_unmap_len(tx_buf, len)) { 1484 dma_unmap_single(tx_q->dev, 1485 dma_unmap_addr(tx_buf, dma), 1486 dma_unmap_len(tx_buf, len), 1487 DMA_TO_DEVICE); 1488 1489 dma_unmap_len_set(tx_buf, len, 0); 1490 } 1491 1492 /* clear tx_buf data */ 1493 tx_buf->skb = NULL; 1494 1495 cleaned->bytes += tx_buf->bytecount; 1496 cleaned->packets += tx_buf->gso_segs; 1497 } 1498 1499 /** 1500 * idpf_tx_clean_stashed_bufs - clean bufs that were stored for 1501 * out of order completions 1502 * @txq: queue to clean 1503 * @compl_tag: completion tag of packet to clean (from completion descriptor) 1504 * @cleaned: pointer to stats struct to track cleaned packets/bytes 1505 * @budget: Used to determine if we are in netpoll 1506 */ 1507 static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag, 1508 struct idpf_cleaned_stats *cleaned, 1509 int budget) 1510 { 1511 struct idpf_tx_stash *stash; 1512 struct hlist_node *tmp_buf; 1513 1514 /* Buffer completion */ 1515 hash_for_each_possible_safe(txq->sched_buf_hash, stash, tmp_buf, 1516 hlist, compl_tag) { 1517 if (unlikely(stash->buf.compl_tag != (int)compl_tag)) 1518 continue; 1519 1520 if (stash->buf.skb) { 1521 idpf_tx_splitq_clean_hdr(txq, &stash->buf, cleaned, 1522 budget); 1523 } else if (dma_unmap_len(&stash->buf, len)) { 1524 dma_unmap_page(txq->dev, 1525 dma_unmap_addr(&stash->buf, dma), 1526 dma_unmap_len(&stash->buf, len), 1527 DMA_TO_DEVICE); 1528 dma_unmap_len_set(&stash->buf, len, 0); 1529 } 1530 1531 /* Push shadow buf back onto stack */ 1532 idpf_buf_lifo_push(&txq->buf_stack, stash); 1533 1534 hash_del(&stash->hlist); 1535 } 1536 } 1537 1538 /** 1539 * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a 1540 * later time (only relevant for flow scheduling mode) 1541 * @txq: Tx queue to clean 1542 * @tx_buf: buffer to store 1543 */ 1544 static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq, 1545 struct idpf_tx_buf *tx_buf) 1546 { 1547 struct idpf_tx_stash *stash; 1548 1549 if (unlikely(!dma_unmap_addr(tx_buf, dma) && 1550 !dma_unmap_len(tx_buf, len))) 1551 return 0; 1552 1553 stash = idpf_buf_lifo_pop(&txq->buf_stack); 1554 if (unlikely(!stash)) { 1555 net_err_ratelimited("%s: No out-of-order TX buffers left!\n", 1556 txq->vport->netdev->name); 1557 1558 return -ENOMEM; 1559 } 1560 1561 /* Store buffer params in shadow buffer */ 1562 stash->buf.skb = tx_buf->skb; 1563 stash->buf.bytecount = tx_buf->bytecount; 1564 stash->buf.gso_segs = tx_buf->gso_segs; 1565 dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma)); 1566 dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len)); 1567 stash->buf.compl_tag = tx_buf->compl_tag; 1568 1569 /* Add buffer to buf_hash table to be freed later */ 1570 hash_add(txq->sched_buf_hash, &stash->hlist, stash->buf.compl_tag); 1571 1572 memset(tx_buf, 0, sizeof(struct idpf_tx_buf)); 1573 1574 /* Reinitialize buf_id portion of tag */ 1575 tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; 1576 1577 return 0; 1578 } 1579 1580 #define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf) \ 1581 do { \ 1582 (ntc)++; \ 1583 if (unlikely(!(ntc))) { \ 1584 ntc -= (txq)->desc_count; \ 1585 buf = (txq)->tx_buf; \ 1586 desc = IDPF_FLEX_TX_DESC(txq, 0); \ 1587 } else { \ 1588 (buf)++; \ 1589 (desc)++; \ 1590 } \ 1591 } while (0) 1592 1593 /** 1594 * idpf_tx_splitq_clean - Reclaim resources from buffer queue 1595 * @tx_q: Tx queue to clean 1596 * @end: queue index until which it should be cleaned 1597 * @napi_budget: Used to determine if we are in netpoll 1598 * @cleaned: pointer to stats struct to track cleaned packets/bytes 1599 * @descs_only: true if queue is using flow-based scheduling and should 1600 * not clean buffers at this time 1601 * 1602 * Cleans the queue descriptor ring. If the queue is using queue-based 1603 * scheduling, the buffers will be cleaned as well. If the queue is using 1604 * flow-based scheduling, only the descriptors are cleaned at this time. 1605 * Separate packet completion events will be reported on the completion queue, 1606 * and the buffers will be cleaned separately. The stats are not updated from 1607 * this function when using flow-based scheduling. 1608 */ 1609 static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end, 1610 int napi_budget, 1611 struct idpf_cleaned_stats *cleaned, 1612 bool descs_only) 1613 { 1614 union idpf_tx_flex_desc *next_pending_desc = NULL; 1615 union idpf_tx_flex_desc *tx_desc; 1616 s16 ntc = tx_q->next_to_clean; 1617 struct idpf_tx_buf *tx_buf; 1618 1619 tx_desc = IDPF_FLEX_TX_DESC(tx_q, ntc); 1620 next_pending_desc = IDPF_FLEX_TX_DESC(tx_q, end); 1621 tx_buf = &tx_q->tx_buf[ntc]; 1622 ntc -= tx_q->desc_count; 1623 1624 while (tx_desc != next_pending_desc) { 1625 union idpf_tx_flex_desc *eop_desc; 1626 1627 /* If this entry in the ring was used as a context descriptor, 1628 * it's corresponding entry in the buffer ring will have an 1629 * invalid completion tag since no buffer was used. We can 1630 * skip this descriptor since there is no buffer to clean. 1631 */ 1632 if (unlikely(tx_buf->compl_tag == IDPF_SPLITQ_TX_INVAL_COMPL_TAG)) 1633 goto fetch_next_txq_desc; 1634 1635 eop_desc = (union idpf_tx_flex_desc *)tx_buf->next_to_watch; 1636 1637 /* clear next_to_watch to prevent false hangs */ 1638 tx_buf->next_to_watch = NULL; 1639 1640 if (descs_only) { 1641 if (idpf_stash_flow_sch_buffers(tx_q, tx_buf)) 1642 goto tx_splitq_clean_out; 1643 1644 while (tx_desc != eop_desc) { 1645 idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, 1646 tx_desc, tx_buf); 1647 1648 if (dma_unmap_len(tx_buf, len)) { 1649 if (idpf_stash_flow_sch_buffers(tx_q, 1650 tx_buf)) 1651 goto tx_splitq_clean_out; 1652 } 1653 } 1654 } else { 1655 idpf_tx_splitq_clean_hdr(tx_q, tx_buf, cleaned, 1656 napi_budget); 1657 1658 /* unmap remaining buffers */ 1659 while (tx_desc != eop_desc) { 1660 idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, 1661 tx_desc, tx_buf); 1662 1663 /* unmap any remaining paged data */ 1664 if (dma_unmap_len(tx_buf, len)) { 1665 dma_unmap_page(tx_q->dev, 1666 dma_unmap_addr(tx_buf, dma), 1667 dma_unmap_len(tx_buf, len), 1668 DMA_TO_DEVICE); 1669 dma_unmap_len_set(tx_buf, len, 0); 1670 } 1671 } 1672 } 1673 1674 fetch_next_txq_desc: 1675 idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf); 1676 } 1677 1678 tx_splitq_clean_out: 1679 ntc += tx_q->desc_count; 1680 tx_q->next_to_clean = ntc; 1681 } 1682 1683 #define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf) \ 1684 do { \ 1685 (buf)++; \ 1686 (ntc)++; \ 1687 if (unlikely((ntc) == (txq)->desc_count)) { \ 1688 buf = (txq)->tx_buf; \ 1689 ntc = 0; \ 1690 } \ 1691 } while (0) 1692 1693 /** 1694 * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers 1695 * @txq: queue to clean 1696 * @compl_tag: completion tag of packet to clean (from completion descriptor) 1697 * @cleaned: pointer to stats struct to track cleaned packets/bytes 1698 * @budget: Used to determine if we are in netpoll 1699 * 1700 * Cleans all buffers associated with the input completion tag either from the 1701 * TX buffer ring or from the hash table if the buffers were previously 1702 * stashed. Returns the byte/segment count for the cleaned packet associated 1703 * this completion tag. 1704 */ 1705 static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag, 1706 struct idpf_cleaned_stats *cleaned, 1707 int budget) 1708 { 1709 u16 idx = compl_tag & txq->compl_tag_bufid_m; 1710 struct idpf_tx_buf *tx_buf = NULL; 1711 u16 ntc = txq->next_to_clean; 1712 u16 num_descs_cleaned = 0; 1713 u16 orig_idx = idx; 1714 1715 tx_buf = &txq->tx_buf[idx]; 1716 1717 while (tx_buf->compl_tag == (int)compl_tag) { 1718 if (tx_buf->skb) { 1719 idpf_tx_splitq_clean_hdr(txq, tx_buf, cleaned, budget); 1720 } else if (dma_unmap_len(tx_buf, len)) { 1721 dma_unmap_page(txq->dev, 1722 dma_unmap_addr(tx_buf, dma), 1723 dma_unmap_len(tx_buf, len), 1724 DMA_TO_DEVICE); 1725 dma_unmap_len_set(tx_buf, len, 0); 1726 } 1727 1728 memset(tx_buf, 0, sizeof(struct idpf_tx_buf)); 1729 tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; 1730 1731 num_descs_cleaned++; 1732 idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf); 1733 } 1734 1735 /* If we didn't clean anything on the ring for this completion, there's 1736 * nothing more to do. 1737 */ 1738 if (unlikely(!num_descs_cleaned)) 1739 return false; 1740 1741 /* Otherwise, if we did clean a packet on the ring directly, it's safe 1742 * to assume that the descriptors starting from the original 1743 * next_to_clean up until the previously cleaned packet can be reused. 1744 * Therefore, we will go back in the ring and stash any buffers still 1745 * in the ring into the hash table to be cleaned later. 1746 */ 1747 tx_buf = &txq->tx_buf[ntc]; 1748 while (tx_buf != &txq->tx_buf[orig_idx]) { 1749 idpf_stash_flow_sch_buffers(txq, tx_buf); 1750 idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf); 1751 } 1752 1753 /* Finally, update next_to_clean to reflect the work that was just done 1754 * on the ring, if any. If the packet was only cleaned from the hash 1755 * table, the ring will not be impacted, therefore we should not touch 1756 * next_to_clean. The updated idx is used here 1757 */ 1758 txq->next_to_clean = idx; 1759 1760 return true; 1761 } 1762 1763 /** 1764 * idpf_tx_handle_rs_completion - clean a single packet and all of its buffers 1765 * whether on the buffer ring or in the hash table 1766 * @txq: Tx ring to clean 1767 * @desc: pointer to completion queue descriptor to extract completion 1768 * information from 1769 * @cleaned: pointer to stats struct to track cleaned packets/bytes 1770 * @budget: Used to determine if we are in netpoll 1771 * 1772 * Returns bytes/packets cleaned 1773 */ 1774 static void idpf_tx_handle_rs_completion(struct idpf_queue *txq, 1775 struct idpf_splitq_tx_compl_desc *desc, 1776 struct idpf_cleaned_stats *cleaned, 1777 int budget) 1778 { 1779 u16 compl_tag; 1780 1781 if (!test_bit(__IDPF_Q_FLOW_SCH_EN, txq->flags)) { 1782 u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head); 1783 1784 return idpf_tx_splitq_clean(txq, head, budget, cleaned, false); 1785 } 1786 1787 compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag); 1788 1789 /* If we didn't clean anything on the ring, this packet must be 1790 * in the hash table. Go clean it there. 1791 */ 1792 if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget)) 1793 idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget); 1794 } 1795 1796 /** 1797 * idpf_tx_clean_complq - Reclaim resources on completion queue 1798 * @complq: Tx ring to clean 1799 * @budget: Used to determine if we are in netpoll 1800 * @cleaned: returns number of packets cleaned 1801 * 1802 * Returns true if there's any budget left (e.g. the clean is finished) 1803 */ 1804 static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, 1805 int *cleaned) 1806 { 1807 struct idpf_splitq_tx_compl_desc *tx_desc; 1808 struct idpf_vport *vport = complq->vport; 1809 s16 ntc = complq->next_to_clean; 1810 struct idpf_netdev_priv *np; 1811 unsigned int complq_budget; 1812 bool complq_ok = true; 1813 int i; 1814 1815 complq_budget = vport->compln_clean_budget; 1816 tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc); 1817 ntc -= complq->desc_count; 1818 1819 do { 1820 struct idpf_cleaned_stats cleaned_stats = { }; 1821 struct idpf_queue *tx_q; 1822 int rel_tx_qid; 1823 u16 hw_head; 1824 u8 ctype; /* completion type */ 1825 u16 gen; 1826 1827 /* if the descriptor isn't done, no work yet to do */ 1828 gen = (le16_to_cpu(tx_desc->qid_comptype_gen) & 1829 IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S; 1830 if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen) 1831 break; 1832 1833 /* Find necessary info of TX queue to clean buffers */ 1834 rel_tx_qid = (le16_to_cpu(tx_desc->qid_comptype_gen) & 1835 IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S; 1836 if (rel_tx_qid >= complq->txq_grp->num_txq || 1837 !complq->txq_grp->txqs[rel_tx_qid]) { 1838 dev_err(&complq->vport->adapter->pdev->dev, 1839 "TxQ not found\n"); 1840 goto fetch_next_desc; 1841 } 1842 tx_q = complq->txq_grp->txqs[rel_tx_qid]; 1843 1844 /* Determine completion type */ 1845 ctype = (le16_to_cpu(tx_desc->qid_comptype_gen) & 1846 IDPF_TXD_COMPLQ_COMPL_TYPE_M) >> 1847 IDPF_TXD_COMPLQ_COMPL_TYPE_S; 1848 switch (ctype) { 1849 case IDPF_TXD_COMPLT_RE: 1850 hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head); 1851 1852 idpf_tx_splitq_clean(tx_q, hw_head, budget, 1853 &cleaned_stats, true); 1854 break; 1855 case IDPF_TXD_COMPLT_RS: 1856 idpf_tx_handle_rs_completion(tx_q, tx_desc, 1857 &cleaned_stats, budget); 1858 break; 1859 case IDPF_TXD_COMPLT_SW_MARKER: 1860 idpf_tx_handle_sw_marker(tx_q); 1861 break; 1862 default: 1863 dev_err(&tx_q->vport->adapter->pdev->dev, 1864 "Unknown TX completion type: %d\n", 1865 ctype); 1866 goto fetch_next_desc; 1867 } 1868 1869 u64_stats_update_begin(&tx_q->stats_sync); 1870 u64_stats_add(&tx_q->q_stats.tx.packets, cleaned_stats.packets); 1871 u64_stats_add(&tx_q->q_stats.tx.bytes, cleaned_stats.bytes); 1872 tx_q->cleaned_pkts += cleaned_stats.packets; 1873 tx_q->cleaned_bytes += cleaned_stats.bytes; 1874 complq->num_completions++; 1875 u64_stats_update_end(&tx_q->stats_sync); 1876 1877 fetch_next_desc: 1878 tx_desc++; 1879 ntc++; 1880 if (unlikely(!ntc)) { 1881 ntc -= complq->desc_count; 1882 tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, 0); 1883 change_bit(__IDPF_Q_GEN_CHK, complq->flags); 1884 } 1885 1886 prefetch(tx_desc); 1887 1888 /* update budget accounting */ 1889 complq_budget--; 1890 } while (likely(complq_budget)); 1891 1892 /* Store the state of the complq to be used later in deciding if a 1893 * TXQ can be started again 1894 */ 1895 if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) > 1896 IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq))) 1897 complq_ok = false; 1898 1899 np = netdev_priv(complq->vport->netdev); 1900 for (i = 0; i < complq->txq_grp->num_txq; ++i) { 1901 struct idpf_queue *tx_q = complq->txq_grp->txqs[i]; 1902 struct netdev_queue *nq; 1903 bool dont_wake; 1904 1905 /* We didn't clean anything on this queue, move along */ 1906 if (!tx_q->cleaned_bytes) 1907 continue; 1908 1909 *cleaned += tx_q->cleaned_pkts; 1910 1911 /* Update BQL */ 1912 nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); 1913 1914 dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) || 1915 np->state != __IDPF_VPORT_UP || 1916 !netif_carrier_ok(tx_q->vport->netdev); 1917 /* Check if the TXQ needs to and can be restarted */ 1918 __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes, 1919 IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH, 1920 dont_wake); 1921 1922 /* Reset cleaned stats for the next time this queue is 1923 * cleaned 1924 */ 1925 tx_q->cleaned_bytes = 0; 1926 tx_q->cleaned_pkts = 0; 1927 } 1928 1929 ntc += complq->desc_count; 1930 complq->next_to_clean = ntc; 1931 1932 return !!complq_budget; 1933 } 1934 1935 /** 1936 * idpf_tx_splitq_build_ctb - populate command tag and size for queue 1937 * based scheduling descriptors 1938 * @desc: descriptor to populate 1939 * @params: pointer to tx params struct 1940 * @td_cmd: command to be filled in desc 1941 * @size: size of buffer 1942 */ 1943 void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc, 1944 struct idpf_tx_splitq_params *params, 1945 u16 td_cmd, u16 size) 1946 { 1947 desc->q.qw1.cmd_dtype = 1948 cpu_to_le16(params->dtype & IDPF_FLEX_TXD_QW1_DTYPE_M); 1949 desc->q.qw1.cmd_dtype |= 1950 cpu_to_le16((td_cmd << IDPF_FLEX_TXD_QW1_CMD_S) & 1951 IDPF_FLEX_TXD_QW1_CMD_M); 1952 desc->q.qw1.buf_size = cpu_to_le16((u16)size); 1953 desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag); 1954 } 1955 1956 /** 1957 * idpf_tx_splitq_build_flow_desc - populate command tag and size for flow 1958 * scheduling descriptors 1959 * @desc: descriptor to populate 1960 * @params: pointer to tx params struct 1961 * @td_cmd: command to be filled in desc 1962 * @size: size of buffer 1963 */ 1964 void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, 1965 struct idpf_tx_splitq_params *params, 1966 u16 td_cmd, u16 size) 1967 { 1968 desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd; 1969 desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size); 1970 desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag); 1971 } 1972 1973 /** 1974 * idpf_tx_maybe_stop_common - 1st level check for common Tx stop conditions 1975 * @tx_q: the queue to be checked 1976 * @size: number of descriptors we want to assure is available 1977 * 1978 * Returns 0 if stop is not needed 1979 */ 1980 int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size) 1981 { 1982 struct netdev_queue *nq; 1983 1984 if (likely(IDPF_DESC_UNUSED(tx_q) >= size)) 1985 return 0; 1986 1987 u64_stats_update_begin(&tx_q->stats_sync); 1988 u64_stats_inc(&tx_q->q_stats.tx.q_busy); 1989 u64_stats_update_end(&tx_q->stats_sync); 1990 1991 nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); 1992 1993 return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size); 1994 } 1995 1996 /** 1997 * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions 1998 * @tx_q: the queue to be checked 1999 * @descs_needed: number of descriptors required for this packet 2000 * 2001 * Returns 0 if stop is not needed 2002 */ 2003 static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q, 2004 unsigned int descs_needed) 2005 { 2006 if (idpf_tx_maybe_stop_common(tx_q, descs_needed)) 2007 goto splitq_stop; 2008 2009 /* If there are too many outstanding completions expected on the 2010 * completion queue, stop the TX queue to give the device some time to 2011 * catch up 2012 */ 2013 if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) > 2014 IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq))) 2015 goto splitq_stop; 2016 2017 /* Also check for available book keeping buffers; if we are low, stop 2018 * the queue to wait for more completions 2019 */ 2020 if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q))) 2021 goto splitq_stop; 2022 2023 return 0; 2024 2025 splitq_stop: 2026 u64_stats_update_begin(&tx_q->stats_sync); 2027 u64_stats_inc(&tx_q->q_stats.tx.q_busy); 2028 u64_stats_update_end(&tx_q->stats_sync); 2029 netif_stop_subqueue(tx_q->vport->netdev, tx_q->idx); 2030 2031 return -EBUSY; 2032 } 2033 2034 /** 2035 * idpf_tx_buf_hw_update - Store the new tail value 2036 * @tx_q: queue to bump 2037 * @val: new tail index 2038 * @xmit_more: more skb's pending 2039 * 2040 * The naming here is special in that 'hw' signals that this function is about 2041 * to do a register write to update our queue status. We know this can only 2042 * mean tail here as HW should be owning head for TX. 2043 */ 2044 void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val, 2045 bool xmit_more) 2046 { 2047 struct netdev_queue *nq; 2048 2049 nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); 2050 tx_q->next_to_use = val; 2051 2052 idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED); 2053 2054 /* Force memory writes to complete before letting h/w 2055 * know there are new descriptors to fetch. (Only 2056 * applicable for weak-ordered memory model archs, 2057 * such as IA-64). 2058 */ 2059 wmb(); 2060 2061 /* notify HW of packet */ 2062 if (netif_xmit_stopped(nq) || !xmit_more) 2063 writel(val, tx_q->tail); 2064 } 2065 2066 /** 2067 * idpf_tx_desc_count_required - calculate number of Tx descriptors needed 2068 * @txq: queue to send buffer on 2069 * @skb: send buffer 2070 * 2071 * Returns number of data descriptors needed for this skb. 2072 */ 2073 unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq, 2074 struct sk_buff *skb) 2075 { 2076 const struct skb_shared_info *shinfo; 2077 unsigned int count = 0, i; 2078 2079 count += !!skb_headlen(skb); 2080 2081 if (!skb_is_nonlinear(skb)) 2082 return count; 2083 2084 shinfo = skb_shinfo(skb); 2085 for (i = 0; i < shinfo->nr_frags; i++) { 2086 unsigned int size; 2087 2088 size = skb_frag_size(&shinfo->frags[i]); 2089 2090 /* We only need to use the idpf_size_to_txd_count check if the 2091 * fragment is going to span multiple descriptors, 2092 * i.e. size >= 16K. 2093 */ 2094 if (size >= SZ_16K) 2095 count += idpf_size_to_txd_count(size); 2096 else 2097 count++; 2098 } 2099 2100 if (idpf_chk_linearize(skb, txq->tx_max_bufs, count)) { 2101 if (__skb_linearize(skb)) 2102 return 0; 2103 2104 count = idpf_size_to_txd_count(skb->len); 2105 u64_stats_update_begin(&txq->stats_sync); 2106 u64_stats_inc(&txq->q_stats.tx.linearize); 2107 u64_stats_update_end(&txq->stats_sync); 2108 } 2109 2110 return count; 2111 } 2112 2113 /** 2114 * idpf_tx_dma_map_error - handle TX DMA map errors 2115 * @txq: queue to send buffer on 2116 * @skb: send buffer 2117 * @first: original first buffer info buffer for packet 2118 * @idx: starting point on ring to unwind 2119 */ 2120 void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb, 2121 struct idpf_tx_buf *first, u16 idx) 2122 { 2123 u64_stats_update_begin(&txq->stats_sync); 2124 u64_stats_inc(&txq->q_stats.tx.dma_map_errs); 2125 u64_stats_update_end(&txq->stats_sync); 2126 2127 /* clear dma mappings for failed tx_buf map */ 2128 for (;;) { 2129 struct idpf_tx_buf *tx_buf; 2130 2131 tx_buf = &txq->tx_buf[idx]; 2132 idpf_tx_buf_rel(txq, tx_buf); 2133 if (tx_buf == first) 2134 break; 2135 if (idx == 0) 2136 idx = txq->desc_count; 2137 idx--; 2138 } 2139 2140 if (skb_is_gso(skb)) { 2141 union idpf_tx_flex_desc *tx_desc; 2142 2143 /* If we failed a DMA mapping for a TSO packet, we will have 2144 * used one additional descriptor for a context 2145 * descriptor. Reset that here. 2146 */ 2147 tx_desc = IDPF_FLEX_TX_DESC(txq, idx); 2148 memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc)); 2149 if (idx == 0) 2150 idx = txq->desc_count; 2151 idx--; 2152 } 2153 2154 /* Update tail in case netdev_xmit_more was previously true */ 2155 idpf_tx_buf_hw_update(txq, idx, false); 2156 } 2157 2158 /** 2159 * idpf_tx_splitq_bump_ntu - adjust NTU and generation 2160 * @txq: the tx ring to wrap 2161 * @ntu: ring index to bump 2162 */ 2163 static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu) 2164 { 2165 ntu++; 2166 2167 if (ntu == txq->desc_count) { 2168 ntu = 0; 2169 txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq); 2170 } 2171 2172 return ntu; 2173 } 2174 2175 /** 2176 * idpf_tx_splitq_map - Build the Tx flex descriptor 2177 * @tx_q: queue to send buffer on 2178 * @params: pointer to splitq params struct 2179 * @first: first buffer info buffer to use 2180 * 2181 * This function loops over the skb data pointed to by *first 2182 * and gets a physical address for each memory location and programs 2183 * it and the length into the transmit flex descriptor. 2184 */ 2185 static void idpf_tx_splitq_map(struct idpf_queue *tx_q, 2186 struct idpf_tx_splitq_params *params, 2187 struct idpf_tx_buf *first) 2188 { 2189 union idpf_tx_flex_desc *tx_desc; 2190 unsigned int data_len, size; 2191 struct idpf_tx_buf *tx_buf; 2192 u16 i = tx_q->next_to_use; 2193 struct netdev_queue *nq; 2194 struct sk_buff *skb; 2195 skb_frag_t *frag; 2196 u16 td_cmd = 0; 2197 dma_addr_t dma; 2198 2199 skb = first->skb; 2200 2201 td_cmd = params->offload.td_cmd; 2202 2203 data_len = skb->data_len; 2204 size = skb_headlen(skb); 2205 2206 tx_desc = IDPF_FLEX_TX_DESC(tx_q, i); 2207 2208 dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); 2209 2210 tx_buf = first; 2211 2212 params->compl_tag = 2213 (tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i; 2214 2215 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 2216 unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED; 2217 2218 if (dma_mapping_error(tx_q->dev, dma)) 2219 return idpf_tx_dma_map_error(tx_q, skb, first, i); 2220 2221 tx_buf->compl_tag = params->compl_tag; 2222 2223 /* record length, and DMA address */ 2224 dma_unmap_len_set(tx_buf, len, size); 2225 dma_unmap_addr_set(tx_buf, dma, dma); 2226 2227 /* buf_addr is in same location for both desc types */ 2228 tx_desc->q.buf_addr = cpu_to_le64(dma); 2229 2230 /* The stack can send us fragments that are too large for a 2231 * single descriptor i.e. frag size > 16K-1. We will need to 2232 * split the fragment across multiple descriptors in this case. 2233 * To adhere to HW alignment restrictions, the fragment needs 2234 * to be split such that the first chunk ends on a 4K boundary 2235 * and all subsequent chunks start on a 4K boundary. We still 2236 * want to send as much data as possible though, so our 2237 * intermediate descriptor chunk size will be 12K. 2238 * 2239 * For example, consider a 32K fragment mapped to DMA addr 2600. 2240 * ------------------------------------------------------------ 2241 * | frag_size = 32K | 2242 * ------------------------------------------------------------ 2243 * |2600 |16384 |28672 2244 * 2245 * 3 descriptors will be used for this fragment. The HW expects 2246 * the descriptors to contain the following: 2247 * ------------------------------------------------------------ 2248 * | size = 13784 | size = 12K | size = 6696 | 2249 * | dma = 2600 | dma = 16384 | dma = 28672 | 2250 * ------------------------------------------------------------ 2251 * 2252 * We need to first adjust the max_data for the first chunk so 2253 * that it ends on a 4K boundary. By negating the value of the 2254 * DMA address and taking only the low order bits, we're 2255 * effectively calculating 2256 * 4K - (DMA addr lower order bits) = 2257 * bytes to next boundary. 2258 * 2259 * Add that to our base aligned max_data (12K) and we have 2260 * our first chunk size. In the example above, 2261 * 13784 = 12K + (4096-2600) 2262 * 2263 * After guaranteeing the first chunk ends on a 4K boundary, we 2264 * will give the intermediate descriptors 12K chunks and 2265 * whatever is left to the final descriptor. This ensures that 2266 * all descriptors used for the remaining chunks of the 2267 * fragment start on a 4K boundary and we use as few 2268 * descriptors as possible. 2269 */ 2270 max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1); 2271 while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) { 2272 idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, 2273 max_data); 2274 2275 tx_desc++; 2276 i++; 2277 2278 if (i == tx_q->desc_count) { 2279 tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0); 2280 i = 0; 2281 tx_q->compl_tag_cur_gen = 2282 IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); 2283 } 2284 2285 /* Since this packet has a buffer that is going to span 2286 * multiple descriptors, it's going to leave holes in 2287 * to the TX buffer ring. To ensure these holes do not 2288 * cause issues in the cleaning routines, we will clear 2289 * them of any stale data and assign them the same 2290 * completion tag as the current packet. Then when the 2291 * packet is being cleaned, the cleaning routines will 2292 * simply pass over these holes and finish cleaning the 2293 * rest of the packet. 2294 */ 2295 memset(&tx_q->tx_buf[i], 0, sizeof(struct idpf_tx_buf)); 2296 tx_q->tx_buf[i].compl_tag = params->compl_tag; 2297 2298 /* Adjust the DMA offset and the remaining size of the 2299 * fragment. On the first iteration of this loop, 2300 * max_data will be >= 12K and <= 16K-1. On any 2301 * subsequent iteration of this loop, max_data will 2302 * always be 12K. 2303 */ 2304 dma += max_data; 2305 size -= max_data; 2306 2307 /* Reset max_data since remaining chunks will be 12K 2308 * at most 2309 */ 2310 max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED; 2311 2312 /* buf_addr is in same location for both desc types */ 2313 tx_desc->q.buf_addr = cpu_to_le64(dma); 2314 } 2315 2316 if (!data_len) 2317 break; 2318 2319 idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size); 2320 tx_desc++; 2321 i++; 2322 2323 if (i == tx_q->desc_count) { 2324 tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0); 2325 i = 0; 2326 tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); 2327 } 2328 2329 size = skb_frag_size(frag); 2330 data_len -= size; 2331 2332 dma = skb_frag_dma_map(tx_q->dev, frag, 0, size, 2333 DMA_TO_DEVICE); 2334 2335 tx_buf = &tx_q->tx_buf[i]; 2336 } 2337 2338 /* record SW timestamp if HW timestamp is not available */ 2339 skb_tx_timestamp(skb); 2340 2341 /* write last descriptor with RS and EOP bits */ 2342 td_cmd |= params->eop_cmd; 2343 idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size); 2344 i = idpf_tx_splitq_bump_ntu(tx_q, i); 2345 2346 /* set next_to_watch value indicating a packet is present */ 2347 first->next_to_watch = tx_desc; 2348 2349 tx_q->txq_grp->num_completions_pending++; 2350 2351 /* record bytecount for BQL */ 2352 nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); 2353 netdev_tx_sent_queue(nq, first->bytecount); 2354 2355 idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more()); 2356 } 2357 2358 /** 2359 * idpf_tso - computes mss and TSO length to prepare for TSO 2360 * @skb: pointer to skb 2361 * @off: pointer to struct that holds offload parameters 2362 * 2363 * Returns error (negative) if TSO was requested but cannot be applied to the 2364 * given skb, 0 if TSO does not apply to the given skb, or 1 otherwise. 2365 */ 2366 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off) 2367 { 2368 const struct skb_shared_info *shinfo = skb_shinfo(skb); 2369 union { 2370 struct iphdr *v4; 2371 struct ipv6hdr *v6; 2372 unsigned char *hdr; 2373 } ip; 2374 union { 2375 struct tcphdr *tcp; 2376 struct udphdr *udp; 2377 unsigned char *hdr; 2378 } l4; 2379 u32 paylen, l4_start; 2380 int err; 2381 2382 if (!shinfo->gso_size) 2383 return 0; 2384 2385 err = skb_cow_head(skb, 0); 2386 if (err < 0) 2387 return err; 2388 2389 ip.hdr = skb_network_header(skb); 2390 l4.hdr = skb_transport_header(skb); 2391 2392 /* initialize outer IP header fields */ 2393 if (ip.v4->version == 4) { 2394 ip.v4->tot_len = 0; 2395 ip.v4->check = 0; 2396 } else if (ip.v6->version == 6) { 2397 ip.v6->payload_len = 0; 2398 } 2399 2400 l4_start = skb_transport_offset(skb); 2401 2402 /* remove payload length from checksum */ 2403 paylen = skb->len - l4_start; 2404 2405 switch (shinfo->gso_type & ~SKB_GSO_DODGY) { 2406 case SKB_GSO_TCPV4: 2407 case SKB_GSO_TCPV6: 2408 csum_replace_by_diff(&l4.tcp->check, 2409 (__force __wsum)htonl(paylen)); 2410 off->tso_hdr_len = __tcp_hdrlen(l4.tcp) + l4_start; 2411 break; 2412 case SKB_GSO_UDP_L4: 2413 csum_replace_by_diff(&l4.udp->check, 2414 (__force __wsum)htonl(paylen)); 2415 /* compute length of segmentation header */ 2416 off->tso_hdr_len = sizeof(struct udphdr) + l4_start; 2417 l4.udp->len = htons(shinfo->gso_size + sizeof(struct udphdr)); 2418 break; 2419 default: 2420 return -EINVAL; 2421 } 2422 2423 off->tso_len = skb->len - off->tso_hdr_len; 2424 off->mss = shinfo->gso_size; 2425 off->tso_segs = shinfo->gso_segs; 2426 2427 off->tx_flags |= IDPF_TX_FLAGS_TSO; 2428 2429 return 1; 2430 } 2431 2432 /** 2433 * __idpf_chk_linearize - Check skb is not using too many buffers 2434 * @skb: send buffer 2435 * @max_bufs: maximum number of buffers 2436 * 2437 * For TSO we need to count the TSO header and segment payload separately. As 2438 * such we need to check cases where we have max_bufs-1 fragments or more as we 2439 * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1 2440 * for the segment payload in the first descriptor, and another max_buf-1 for 2441 * the fragments. 2442 */ 2443 static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs) 2444 { 2445 const struct skb_shared_info *shinfo = skb_shinfo(skb); 2446 const skb_frag_t *frag, *stale; 2447 int nr_frags, sum; 2448 2449 /* no need to check if number of frags is less than max_bufs - 1 */ 2450 nr_frags = shinfo->nr_frags; 2451 if (nr_frags < (max_bufs - 1)) 2452 return false; 2453 2454 /* We need to walk through the list and validate that each group 2455 * of max_bufs-2 fragments totals at least gso_size. 2456 */ 2457 nr_frags -= max_bufs - 2; 2458 frag = &shinfo->frags[0]; 2459 2460 /* Initialize size to the negative value of gso_size minus 1. We use 2461 * this as the worst case scenario in which the frag ahead of us only 2462 * provides one byte which is why we are limited to max_bufs-2 2463 * descriptors for a single transmit as the header and previous 2464 * fragment are already consuming 2 descriptors. 2465 */ 2466 sum = 1 - shinfo->gso_size; 2467 2468 /* Add size of frags 0 through 4 to create our initial sum */ 2469 sum += skb_frag_size(frag++); 2470 sum += skb_frag_size(frag++); 2471 sum += skb_frag_size(frag++); 2472 sum += skb_frag_size(frag++); 2473 sum += skb_frag_size(frag++); 2474 2475 /* Walk through fragments adding latest fragment, testing it, and 2476 * then removing stale fragments from the sum. 2477 */ 2478 for (stale = &shinfo->frags[0];; stale++) { 2479 int stale_size = skb_frag_size(stale); 2480 2481 sum += skb_frag_size(frag++); 2482 2483 /* The stale fragment may present us with a smaller 2484 * descriptor than the actual fragment size. To account 2485 * for that we need to remove all the data on the front and 2486 * figure out what the remainder would be in the last 2487 * descriptor associated with the fragment. 2488 */ 2489 if (stale_size > IDPF_TX_MAX_DESC_DATA) { 2490 int align_pad = -(skb_frag_off(stale)) & 2491 (IDPF_TX_MAX_READ_REQ_SIZE - 1); 2492 2493 sum -= align_pad; 2494 stale_size -= align_pad; 2495 2496 do { 2497 sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED; 2498 stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED; 2499 } while (stale_size > IDPF_TX_MAX_DESC_DATA); 2500 } 2501 2502 /* if sum is negative we failed to make sufficient progress */ 2503 if (sum < 0) 2504 return true; 2505 2506 if (!nr_frags--) 2507 break; 2508 2509 sum -= stale_size; 2510 } 2511 2512 return false; 2513 } 2514 2515 /** 2516 * idpf_chk_linearize - Check if skb exceeds max descriptors per packet 2517 * @skb: send buffer 2518 * @max_bufs: maximum scatter gather buffers for single packet 2519 * @count: number of buffers this packet needs 2520 * 2521 * Make sure we don't exceed maximum scatter gather buffers for a single 2522 * packet. We have to do some special checking around the boundary (max_bufs-1) 2523 * if TSO is on since we need count the TSO header and payload separately. 2524 * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO 2525 * header, 1 for segment payload, and then 7 for the fragments. 2526 */ 2527 bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, 2528 unsigned int count) 2529 { 2530 if (likely(count < max_bufs)) 2531 return false; 2532 if (skb_is_gso(skb)) 2533 return __idpf_chk_linearize(skb, max_bufs); 2534 2535 return count > max_bufs; 2536 } 2537 2538 /** 2539 * idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring 2540 * @txq: queue to put context descriptor on 2541 * 2542 * Since the TX buffer rings mimics the descriptor ring, update the tx buffer 2543 * ring entry to reflect that this index is a context descriptor 2544 */ 2545 static struct idpf_flex_tx_ctx_desc * 2546 idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq) 2547 { 2548 struct idpf_flex_tx_ctx_desc *desc; 2549 int i = txq->next_to_use; 2550 2551 memset(&txq->tx_buf[i], 0, sizeof(struct idpf_tx_buf)); 2552 txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; 2553 2554 /* grab the next descriptor */ 2555 desc = IDPF_FLEX_TX_CTX_DESC(txq, i); 2556 txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i); 2557 2558 return desc; 2559 } 2560 2561 /** 2562 * idpf_tx_drop_skb - free the SKB and bump tail if necessary 2563 * @tx_q: queue to send buffer on 2564 * @skb: pointer to skb 2565 */ 2566 netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb) 2567 { 2568 u64_stats_update_begin(&tx_q->stats_sync); 2569 u64_stats_inc(&tx_q->q_stats.tx.skb_drops); 2570 u64_stats_update_end(&tx_q->stats_sync); 2571 2572 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); 2573 2574 dev_kfree_skb(skb); 2575 2576 return NETDEV_TX_OK; 2577 } 2578 2579 /** 2580 * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors 2581 * @skb: send buffer 2582 * @tx_q: queue to send buffer on 2583 * 2584 * Returns NETDEV_TX_OK if sent, else an error code 2585 */ 2586 static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, 2587 struct idpf_queue *tx_q) 2588 { 2589 struct idpf_tx_splitq_params tx_params = { }; 2590 struct idpf_tx_buf *first; 2591 unsigned int count; 2592 int tso; 2593 2594 count = idpf_tx_desc_count_required(tx_q, skb); 2595 if (unlikely(!count)) 2596 return idpf_tx_drop_skb(tx_q, skb); 2597 2598 tso = idpf_tso(skb, &tx_params.offload); 2599 if (unlikely(tso < 0)) 2600 return idpf_tx_drop_skb(tx_q, skb); 2601 2602 /* Check for splitq specific TX resources */ 2603 count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso); 2604 if (idpf_tx_maybe_stop_splitq(tx_q, count)) { 2605 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); 2606 2607 return NETDEV_TX_BUSY; 2608 } 2609 2610 if (tso) { 2611 /* If tso is needed, set up context desc */ 2612 struct idpf_flex_tx_ctx_desc *ctx_desc = 2613 idpf_tx_splitq_get_ctx_desc(tx_q); 2614 2615 ctx_desc->tso.qw1.cmd_dtype = 2616 cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX | 2617 IDPF_TX_FLEX_CTX_DESC_CMD_TSO); 2618 ctx_desc->tso.qw0.flex_tlen = 2619 cpu_to_le32(tx_params.offload.tso_len & 2620 IDPF_TXD_FLEX_CTX_TLEN_M); 2621 ctx_desc->tso.qw0.mss_rt = 2622 cpu_to_le16(tx_params.offload.mss & 2623 IDPF_TXD_FLEX_CTX_MSS_RT_M); 2624 ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len; 2625 2626 u64_stats_update_begin(&tx_q->stats_sync); 2627 u64_stats_inc(&tx_q->q_stats.tx.lso_pkts); 2628 u64_stats_update_end(&tx_q->stats_sync); 2629 } 2630 2631 /* record the location of the first descriptor for this packet */ 2632 first = &tx_q->tx_buf[tx_q->next_to_use]; 2633 first->skb = skb; 2634 2635 if (tso) { 2636 first->gso_segs = tx_params.offload.tso_segs; 2637 first->bytecount = skb->len + 2638 ((first->gso_segs - 1) * tx_params.offload.tso_hdr_len); 2639 } else { 2640 first->gso_segs = 1; 2641 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 2642 } 2643 2644 if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_q->flags)) { 2645 tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE; 2646 tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP; 2647 /* Set the RE bit to catch any packets that may have not been 2648 * stashed during RS completion cleaning. MIN_GAP is set to 2649 * MIN_RING size to ensure it will be set at least once each 2650 * time around the ring. 2651 */ 2652 if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) { 2653 tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE; 2654 tx_q->txq_grp->num_completions_pending++; 2655 } 2656 2657 if (skb->ip_summed == CHECKSUM_PARTIAL) 2658 tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN; 2659 2660 } else { 2661 tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2; 2662 tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD; 2663 2664 if (skb->ip_summed == CHECKSUM_PARTIAL) 2665 tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN; 2666 } 2667 2668 idpf_tx_splitq_map(tx_q, &tx_params, first); 2669 2670 return NETDEV_TX_OK; 2671 } 2672 2673 /** 2674 * idpf_tx_splitq_start - Selects the right Tx queue to send buffer 2675 * @skb: send buffer 2676 * @netdev: network interface device structure 2677 * 2678 * Returns NETDEV_TX_OK if sent, else an error code 2679 */ 2680 netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb, 2681 struct net_device *netdev) 2682 { 2683 struct idpf_vport *vport = idpf_netdev_to_vport(netdev); 2684 struct idpf_queue *tx_q; 2685 2686 if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) { 2687 dev_kfree_skb_any(skb); 2688 2689 return NETDEV_TX_OK; 2690 } 2691 2692 tx_q = vport->txqs[skb_get_queue_mapping(skb)]; 2693 2694 /* hardware can't handle really short frames, hardware padding works 2695 * beyond this point 2696 */ 2697 if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) { 2698 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); 2699 2700 return NETDEV_TX_OK; 2701 } 2702 2703 return idpf_tx_splitq_frame(skb, tx_q); 2704 } 2705 2706 /** 2707 * idpf_ptype_to_htype - get a hash type 2708 * @decoded: Decoded Rx packet type related fields 2709 * 2710 * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by 2711 * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of 2712 * Rx desc. 2713 */ 2714 enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded) 2715 { 2716 if (!decoded->known) 2717 return PKT_HASH_TYPE_NONE; 2718 if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 && 2719 decoded->inner_prot) 2720 return PKT_HASH_TYPE_L4; 2721 if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 && 2722 decoded->outer_ip) 2723 return PKT_HASH_TYPE_L3; 2724 if (decoded->outer_ip == IDPF_RX_PTYPE_OUTER_L2) 2725 return PKT_HASH_TYPE_L2; 2726 2727 return PKT_HASH_TYPE_NONE; 2728 } 2729 2730 /** 2731 * idpf_rx_hash - set the hash value in the skb 2732 * @rxq: Rx descriptor ring packet is being transacted on 2733 * @skb: pointer to current skb being populated 2734 * @rx_desc: Receive descriptor 2735 * @decoded: Decoded Rx packet type related fields 2736 */ 2737 static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb, 2738 struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, 2739 struct idpf_rx_ptype_decoded *decoded) 2740 { 2741 u32 hash; 2742 2743 if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXHASH))) 2744 return; 2745 2746 hash = le16_to_cpu(rx_desc->hash1) | 2747 (rx_desc->ff2_mirrid_hash2.hash2 << 16) | 2748 (rx_desc->hash3 << 24); 2749 2750 skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded)); 2751 } 2752 2753 /** 2754 * idpf_rx_csum - Indicate in skb if checksum is good 2755 * @rxq: Rx descriptor ring packet is being transacted on 2756 * @skb: pointer to current skb being populated 2757 * @csum_bits: checksum fields extracted from the descriptor 2758 * @decoded: Decoded Rx packet type related fields 2759 * 2760 * skb->protocol must be set before this function is called 2761 */ 2762 static void idpf_rx_csum(struct idpf_queue *rxq, struct sk_buff *skb, 2763 struct idpf_rx_csum_decoded *csum_bits, 2764 struct idpf_rx_ptype_decoded *decoded) 2765 { 2766 bool ipv4, ipv6; 2767 2768 /* check if Rx checksum is enabled */ 2769 if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXCSUM))) 2770 return; 2771 2772 /* check if HW has decoded the packet and checksum */ 2773 if (!(csum_bits->l3l4p)) 2774 return; 2775 2776 ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4); 2777 ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6); 2778 2779 if (ipv4 && (csum_bits->ipe || csum_bits->eipe)) 2780 goto checksum_fail; 2781 2782 if (ipv6 && csum_bits->ipv6exadd) 2783 return; 2784 2785 /* check for L4 errors and handle packets that were not able to be 2786 * checksummed 2787 */ 2788 if (csum_bits->l4e) 2789 goto checksum_fail; 2790 2791 /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */ 2792 switch (decoded->inner_prot) { 2793 case IDPF_RX_PTYPE_INNER_PROT_ICMP: 2794 case IDPF_RX_PTYPE_INNER_PROT_TCP: 2795 case IDPF_RX_PTYPE_INNER_PROT_UDP: 2796 if (!csum_bits->raw_csum_inv) { 2797 u16 csum = csum_bits->raw_csum; 2798 2799 skb->csum = csum_unfold((__force __sum16)~swab16(csum)); 2800 skb->ip_summed = CHECKSUM_COMPLETE; 2801 } else { 2802 skb->ip_summed = CHECKSUM_UNNECESSARY; 2803 } 2804 break; 2805 case IDPF_RX_PTYPE_INNER_PROT_SCTP: 2806 skb->ip_summed = CHECKSUM_UNNECESSARY; 2807 break; 2808 default: 2809 break; 2810 } 2811 2812 return; 2813 2814 checksum_fail: 2815 u64_stats_update_begin(&rxq->stats_sync); 2816 u64_stats_inc(&rxq->q_stats.rx.hw_csum_err); 2817 u64_stats_update_end(&rxq->stats_sync); 2818 } 2819 2820 /** 2821 * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor 2822 * @rx_desc: receive descriptor 2823 * @csum: structure to extract checksum fields 2824 * 2825 **/ 2826 static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, 2827 struct idpf_rx_csum_decoded *csum) 2828 { 2829 u8 qword0, qword1; 2830 2831 qword0 = rx_desc->status_err0_qw0; 2832 qword1 = rx_desc->status_err0_qw1; 2833 2834 csum->ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M, 2835 qword1); 2836 csum->eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M, 2837 qword1); 2838 csum->l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M, 2839 qword1); 2840 csum->l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M, 2841 qword1); 2842 csum->ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M, 2843 qword0); 2844 csum->raw_csum_inv = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M, 2845 le16_to_cpu(rx_desc->ptype_err_fflags0)); 2846 csum->raw_csum = le16_to_cpu(rx_desc->misc.raw_cs); 2847 } 2848 2849 /** 2850 * idpf_rx_rsc - Set the RSC fields in the skb 2851 * @rxq : Rx descriptor ring packet is being transacted on 2852 * @skb : pointer to current skb being populated 2853 * @rx_desc: Receive descriptor 2854 * @decoded: Decoded Rx packet type related fields 2855 * 2856 * Return 0 on success and error code on failure 2857 * 2858 * Populate the skb fields with the total number of RSC segments, RSC payload 2859 * length and packet type. 2860 */ 2861 static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb, 2862 struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, 2863 struct idpf_rx_ptype_decoded *decoded) 2864 { 2865 u16 rsc_segments, rsc_seg_len; 2866 bool ipv4, ipv6; 2867 int len; 2868 2869 if (unlikely(!decoded->outer_ip)) 2870 return -EINVAL; 2871 2872 rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen); 2873 if (unlikely(!rsc_seg_len)) 2874 return -EINVAL; 2875 2876 ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4); 2877 ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6); 2878 2879 if (unlikely(!(ipv4 ^ ipv6))) 2880 return -EINVAL; 2881 2882 rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len); 2883 if (unlikely(rsc_segments == 1)) 2884 return 0; 2885 2886 NAPI_GRO_CB(skb)->count = rsc_segments; 2887 skb_shinfo(skb)->gso_size = rsc_seg_len; 2888 2889 skb_reset_network_header(skb); 2890 len = skb->len - skb_transport_offset(skb); 2891 2892 if (ipv4) { 2893 struct iphdr *ipv4h = ip_hdr(skb); 2894 2895 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 2896 2897 /* Reset and set transport header offset in skb */ 2898 skb_set_transport_header(skb, sizeof(struct iphdr)); 2899 2900 /* Compute the TCP pseudo header checksum*/ 2901 tcp_hdr(skb)->check = 2902 ~tcp_v4_check(len, ipv4h->saddr, ipv4h->daddr, 0); 2903 } else { 2904 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 2905 2906 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 2907 skb_set_transport_header(skb, sizeof(struct ipv6hdr)); 2908 tcp_hdr(skb)->check = 2909 ~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0); 2910 } 2911 2912 tcp_gro_complete(skb); 2913 2914 u64_stats_update_begin(&rxq->stats_sync); 2915 u64_stats_inc(&rxq->q_stats.rx.rsc_pkts); 2916 u64_stats_update_end(&rxq->stats_sync); 2917 2918 return 0; 2919 } 2920 2921 /** 2922 * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor 2923 * @rxq: Rx descriptor ring packet is being transacted on 2924 * @skb: pointer to current skb being populated 2925 * @rx_desc: Receive descriptor 2926 * 2927 * This function checks the ring, descriptor, and packet information in 2928 * order to populate the hash, checksum, protocol, and 2929 * other fields within the skb. 2930 */ 2931 static int idpf_rx_process_skb_fields(struct idpf_queue *rxq, 2932 struct sk_buff *skb, 2933 struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) 2934 { 2935 struct idpf_rx_csum_decoded csum_bits = { }; 2936 struct idpf_rx_ptype_decoded decoded; 2937 u16 rx_ptype; 2938 2939 rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M, 2940 le16_to_cpu(rx_desc->ptype_err_fflags0)); 2941 2942 decoded = rxq->vport->rx_ptype_lkup[rx_ptype]; 2943 /* If we don't know the ptype we can't do anything else with it. Just 2944 * pass it up the stack as-is. 2945 */ 2946 if (!decoded.known) 2947 return 0; 2948 2949 /* process RSS/hash */ 2950 idpf_rx_hash(rxq, skb, rx_desc, &decoded); 2951 2952 skb->protocol = eth_type_trans(skb, rxq->vport->netdev); 2953 2954 if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M, 2955 le16_to_cpu(rx_desc->hdrlen_flags))) 2956 return idpf_rx_rsc(rxq, skb, rx_desc, &decoded); 2957 2958 idpf_rx_splitq_extract_csum_bits(rx_desc, &csum_bits); 2959 idpf_rx_csum(rxq, skb, &csum_bits, &decoded); 2960 2961 return 0; 2962 } 2963 2964 /** 2965 * idpf_rx_add_frag - Add contents of Rx buffer to sk_buff as a frag 2966 * @rx_buf: buffer containing page to add 2967 * @skb: sk_buff to place the data into 2968 * @size: packet length from rx_desc 2969 * 2970 * This function will add the data contained in rx_buf->page to the skb. 2971 * It will just attach the page as a frag to the skb. 2972 * The function will then update the page offset. 2973 */ 2974 void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb, 2975 unsigned int size) 2976 { 2977 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 2978 rx_buf->page_offset, size, rx_buf->truesize); 2979 2980 rx_buf->page = NULL; 2981 } 2982 2983 /** 2984 * idpf_rx_construct_skb - Allocate skb and populate it 2985 * @rxq: Rx descriptor queue 2986 * @rx_buf: Rx buffer to pull data from 2987 * @size: the length of the packet 2988 * 2989 * This function allocates an skb. It then populates it with the page 2990 * data from the current receive descriptor, taking care to set up the 2991 * skb correctly. 2992 */ 2993 struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq, 2994 struct idpf_rx_buf *rx_buf, 2995 unsigned int size) 2996 { 2997 unsigned int headlen; 2998 struct sk_buff *skb; 2999 void *va; 3000 3001 va = page_address(rx_buf->page) + rx_buf->page_offset; 3002 3003 /* prefetch first cache line of first page */ 3004 net_prefetch(va); 3005 /* allocate a skb to store the frags */ 3006 skb = __napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE, 3007 GFP_ATOMIC); 3008 if (unlikely(!skb)) { 3009 idpf_rx_put_page(rx_buf); 3010 3011 return NULL; 3012 } 3013 3014 skb_record_rx_queue(skb, rxq->idx); 3015 skb_mark_for_recycle(skb); 3016 3017 /* Determine available headroom for copy */ 3018 headlen = size; 3019 if (headlen > IDPF_RX_HDR_SIZE) 3020 headlen = eth_get_headlen(skb->dev, va, IDPF_RX_HDR_SIZE); 3021 3022 /* align pull length to size of long to optimize memcpy performance */ 3023 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); 3024 3025 /* if we exhaust the linear part then add what is left as a frag */ 3026 size -= headlen; 3027 if (!size) { 3028 idpf_rx_put_page(rx_buf); 3029 3030 return skb; 3031 } 3032 3033 skb_add_rx_frag(skb, 0, rx_buf->page, rx_buf->page_offset + headlen, 3034 size, rx_buf->truesize); 3035 3036 /* Since we're giving the page to the stack, clear our reference to it. 3037 * We'll get a new one during buffer posting. 3038 */ 3039 rx_buf->page = NULL; 3040 3041 return skb; 3042 } 3043 3044 /** 3045 * idpf_rx_hdr_construct_skb - Allocate skb and populate it from header buffer 3046 * @rxq: Rx descriptor queue 3047 * @va: Rx buffer to pull data from 3048 * @size: the length of the packet 3049 * 3050 * This function allocates an skb. It then populates it with the page data from 3051 * the current receive descriptor, taking care to set up the skb correctly. 3052 * This specifically uses a header buffer to start building the skb. 3053 */ 3054 static struct sk_buff *idpf_rx_hdr_construct_skb(struct idpf_queue *rxq, 3055 const void *va, 3056 unsigned int size) 3057 { 3058 struct sk_buff *skb; 3059 3060 /* allocate a skb to store the frags */ 3061 skb = __napi_alloc_skb(&rxq->q_vector->napi, size, GFP_ATOMIC); 3062 if (unlikely(!skb)) 3063 return NULL; 3064 3065 skb_record_rx_queue(skb, rxq->idx); 3066 3067 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 3068 3069 /* More than likely, a payload fragment, which will use a page from 3070 * page_pool will be added to the SKB so mark it for recycle 3071 * preemptively. And if not, it's inconsequential. 3072 */ 3073 skb_mark_for_recycle(skb); 3074 3075 return skb; 3076 } 3077 3078 /** 3079 * idpf_rx_splitq_test_staterr - tests bits in Rx descriptor 3080 * status and error fields 3081 * @stat_err_field: field from descriptor to test bits in 3082 * @stat_err_bits: value to mask 3083 * 3084 */ 3085 static bool idpf_rx_splitq_test_staterr(const u8 stat_err_field, 3086 const u8 stat_err_bits) 3087 { 3088 return !!(stat_err_field & stat_err_bits); 3089 } 3090 3091 /** 3092 * idpf_rx_splitq_is_eop - process handling of EOP buffers 3093 * @rx_desc: Rx descriptor for current buffer 3094 * 3095 * If the buffer is an EOP buffer, this function exits returning true, 3096 * otherwise return false indicating that this is in fact a non-EOP buffer. 3097 */ 3098 static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) 3099 { 3100 /* if we are the last buffer then there is nothing else to do */ 3101 return likely(idpf_rx_splitq_test_staterr(rx_desc->status_err0_qw1, 3102 IDPF_RXD_EOF_SPLITQ)); 3103 } 3104 3105 /** 3106 * idpf_rx_splitq_clean - Clean completed descriptors from Rx queue 3107 * @rxq: Rx descriptor queue to retrieve receive buffer queue 3108 * @budget: Total limit on number of packets to process 3109 * 3110 * This function provides a "bounce buffer" approach to Rx interrupt 3111 * processing. The advantage to this is that on systems that have 3112 * expensive overhead for IOMMU access this provides a means of avoiding 3113 * it by maintaining the mapping of the page to the system. 3114 * 3115 * Returns amount of work completed 3116 */ 3117 static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) 3118 { 3119 int total_rx_bytes = 0, total_rx_pkts = 0; 3120 struct idpf_queue *rx_bufq = NULL; 3121 struct sk_buff *skb = rxq->skb; 3122 u16 ntc = rxq->next_to_clean; 3123 3124 /* Process Rx packets bounded by budget */ 3125 while (likely(total_rx_pkts < budget)) { 3126 struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc; 3127 struct idpf_sw_queue *refillq = NULL; 3128 struct idpf_rxq_set *rxq_set = NULL; 3129 struct idpf_rx_buf *rx_buf = NULL; 3130 union virtchnl2_rx_desc *desc; 3131 unsigned int pkt_len = 0; 3132 unsigned int hdr_len = 0; 3133 u16 gen_id, buf_id = 0; 3134 /* Header buffer overflow only valid for header split */ 3135 bool hbo = false; 3136 int bufq_id; 3137 u8 rxdid; 3138 3139 /* get the Rx desc from Rx queue based on 'next_to_clean' */ 3140 desc = IDPF_RX_DESC(rxq, ntc); 3141 rx_desc = (struct virtchnl2_rx_flex_desc_adv_nic_3 *)desc; 3142 3143 /* This memory barrier is needed to keep us from reading 3144 * any other fields out of the rx_desc 3145 */ 3146 dma_rmb(); 3147 3148 /* if the descriptor isn't done, no work yet to do */ 3149 gen_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id); 3150 gen_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M, gen_id); 3151 3152 if (test_bit(__IDPF_Q_GEN_CHK, rxq->flags) != gen_id) 3153 break; 3154 3155 rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M, 3156 rx_desc->rxdid_ucast); 3157 if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) { 3158 IDPF_RX_BUMP_NTC(rxq, ntc); 3159 u64_stats_update_begin(&rxq->stats_sync); 3160 u64_stats_inc(&rxq->q_stats.rx.bad_descs); 3161 u64_stats_update_end(&rxq->stats_sync); 3162 continue; 3163 } 3164 3165 pkt_len = le16_to_cpu(rx_desc->pktlen_gen_bufq_id); 3166 pkt_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M, 3167 pkt_len); 3168 3169 hbo = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M, 3170 rx_desc->status_err0_qw1); 3171 3172 if (unlikely(hbo)) { 3173 /* If a header buffer overflow, occurs, i.e. header is 3174 * too large to fit in the header split buffer, HW will 3175 * put the entire packet, including headers, in the 3176 * data/payload buffer. 3177 */ 3178 u64_stats_update_begin(&rxq->stats_sync); 3179 u64_stats_inc(&rxq->q_stats.rx.hsplit_buf_ovf); 3180 u64_stats_update_end(&rxq->stats_sync); 3181 goto bypass_hsplit; 3182 } 3183 3184 hdr_len = le16_to_cpu(rx_desc->hdrlen_flags); 3185 hdr_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M, 3186 hdr_len); 3187 3188 bypass_hsplit: 3189 bufq_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id); 3190 bufq_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M, 3191 bufq_id); 3192 3193 rxq_set = container_of(rxq, struct idpf_rxq_set, rxq); 3194 if (!bufq_id) 3195 refillq = rxq_set->refillq0; 3196 else 3197 refillq = rxq_set->refillq1; 3198 3199 /* retrieve buffer from the rxq */ 3200 rx_bufq = &rxq->rxq_grp->splitq.bufq_sets[bufq_id].bufq; 3201 3202 buf_id = le16_to_cpu(rx_desc->buf_id); 3203 3204 rx_buf = &rx_bufq->rx_buf.buf[buf_id]; 3205 3206 if (hdr_len) { 3207 const void *va = (u8 *)rx_bufq->rx_buf.hdr_buf_va + 3208 (u32)buf_id * IDPF_HDR_BUF_SIZE; 3209 3210 skb = idpf_rx_hdr_construct_skb(rxq, va, hdr_len); 3211 u64_stats_update_begin(&rxq->stats_sync); 3212 u64_stats_inc(&rxq->q_stats.rx.hsplit_pkts); 3213 u64_stats_update_end(&rxq->stats_sync); 3214 } 3215 3216 if (pkt_len) { 3217 idpf_rx_sync_for_cpu(rx_buf, pkt_len); 3218 if (skb) 3219 idpf_rx_add_frag(rx_buf, skb, pkt_len); 3220 else 3221 skb = idpf_rx_construct_skb(rxq, rx_buf, 3222 pkt_len); 3223 } else { 3224 idpf_rx_put_page(rx_buf); 3225 } 3226 3227 /* exit if we failed to retrieve a buffer */ 3228 if (!skb) 3229 break; 3230 3231 idpf_rx_post_buf_refill(refillq, buf_id); 3232 3233 IDPF_RX_BUMP_NTC(rxq, ntc); 3234 /* skip if it is non EOP desc */ 3235 if (!idpf_rx_splitq_is_eop(rx_desc)) 3236 continue; 3237 3238 /* pad skb if needed (to make valid ethernet frame) */ 3239 if (eth_skb_pad(skb)) { 3240 skb = NULL; 3241 continue; 3242 } 3243 3244 /* probably a little skewed due to removing CRC */ 3245 total_rx_bytes += skb->len; 3246 3247 /* protocol */ 3248 if (unlikely(idpf_rx_process_skb_fields(rxq, skb, rx_desc))) { 3249 dev_kfree_skb_any(skb); 3250 skb = NULL; 3251 continue; 3252 } 3253 3254 /* send completed skb up the stack */ 3255 napi_gro_receive(&rxq->q_vector->napi, skb); 3256 skb = NULL; 3257 3258 /* update budget accounting */ 3259 total_rx_pkts++; 3260 } 3261 3262 rxq->next_to_clean = ntc; 3263 3264 rxq->skb = skb; 3265 u64_stats_update_begin(&rxq->stats_sync); 3266 u64_stats_add(&rxq->q_stats.rx.packets, total_rx_pkts); 3267 u64_stats_add(&rxq->q_stats.rx.bytes, total_rx_bytes); 3268 u64_stats_update_end(&rxq->stats_sync); 3269 3270 /* guarantee a trip back through this routine if there was a failure */ 3271 return total_rx_pkts; 3272 } 3273 3274 /** 3275 * idpf_rx_update_bufq_desc - Update buffer queue descriptor 3276 * @bufq: Pointer to the buffer queue 3277 * @refill_desc: SW Refill queue descriptor containing buffer ID 3278 * @buf_desc: Buffer queue descriptor 3279 * 3280 * Return 0 on success and negative on failure. 3281 */ 3282 static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc, 3283 struct virtchnl2_splitq_rx_buf_desc *buf_desc) 3284 { 3285 struct idpf_rx_buf *buf; 3286 dma_addr_t addr; 3287 u16 buf_id; 3288 3289 buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc); 3290 3291 buf = &bufq->rx_buf.buf[buf_id]; 3292 3293 addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size); 3294 if (unlikely(addr == DMA_MAPPING_ERROR)) 3295 return -ENOMEM; 3296 3297 buf_desc->pkt_addr = cpu_to_le64(addr); 3298 buf_desc->qword0.buf_id = cpu_to_le16(buf_id); 3299 3300 if (!bufq->rx_hsplit_en) 3301 return 0; 3302 3303 buf_desc->hdr_addr = cpu_to_le64(bufq->rx_buf.hdr_buf_pa + 3304 (u32)buf_id * IDPF_HDR_BUF_SIZE); 3305 3306 return 0; 3307 } 3308 3309 /** 3310 * idpf_rx_clean_refillq - Clean refill queue buffers 3311 * @bufq: buffer queue to post buffers back to 3312 * @refillq: refill queue to clean 3313 * 3314 * This function takes care of the buffer refill management 3315 */ 3316 static void idpf_rx_clean_refillq(struct idpf_queue *bufq, 3317 struct idpf_sw_queue *refillq) 3318 { 3319 struct virtchnl2_splitq_rx_buf_desc *buf_desc; 3320 u16 bufq_nta = bufq->next_to_alloc; 3321 u16 ntc = refillq->next_to_clean; 3322 int cleaned = 0; 3323 u16 gen; 3324 3325 buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, bufq_nta); 3326 3327 /* make sure we stop at ring wrap in the unlikely case ring is full */ 3328 while (likely(cleaned < refillq->desc_count)) { 3329 u16 refill_desc = IDPF_SPLITQ_RX_BI_DESC(refillq, ntc); 3330 bool failure; 3331 3332 gen = FIELD_GET(IDPF_RX_BI_GEN_M, refill_desc); 3333 if (test_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags) != gen) 3334 break; 3335 3336 failure = idpf_rx_update_bufq_desc(bufq, refill_desc, 3337 buf_desc); 3338 if (failure) 3339 break; 3340 3341 if (unlikely(++ntc == refillq->desc_count)) { 3342 change_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags); 3343 ntc = 0; 3344 } 3345 3346 if (unlikely(++bufq_nta == bufq->desc_count)) { 3347 buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, 0); 3348 bufq_nta = 0; 3349 } else { 3350 buf_desc++; 3351 } 3352 3353 cleaned++; 3354 } 3355 3356 if (!cleaned) 3357 return; 3358 3359 /* We want to limit how many transactions on the bus we trigger with 3360 * tail writes so we only do it in strides. It's also important we 3361 * align the write to a multiple of 8 as required by HW. 3362 */ 3363 if (((bufq->next_to_use <= bufq_nta ? 0 : bufq->desc_count) + 3364 bufq_nta - bufq->next_to_use) >= IDPF_RX_BUF_POST_STRIDE) 3365 idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq_nta, 3366 IDPF_RX_BUF_POST_STRIDE)); 3367 3368 /* update next to alloc since we have filled the ring */ 3369 refillq->next_to_clean = ntc; 3370 bufq->next_to_alloc = bufq_nta; 3371 } 3372 3373 /** 3374 * idpf_rx_clean_refillq_all - Clean all refill queues 3375 * @bufq: buffer queue with refill queues 3376 * 3377 * Iterates through all refill queues assigned to the buffer queue assigned to 3378 * this vector. Returns true if clean is complete within budget, false 3379 * otherwise. 3380 */ 3381 static void idpf_rx_clean_refillq_all(struct idpf_queue *bufq) 3382 { 3383 struct idpf_bufq_set *bufq_set; 3384 int i; 3385 3386 bufq_set = container_of(bufq, struct idpf_bufq_set, bufq); 3387 for (i = 0; i < bufq_set->num_refillqs; i++) 3388 idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]); 3389 } 3390 3391 /** 3392 * idpf_vport_intr_clean_queues - MSIX mode Interrupt Handler 3393 * @irq: interrupt number 3394 * @data: pointer to a q_vector 3395 * 3396 */ 3397 static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq, 3398 void *data) 3399 { 3400 struct idpf_q_vector *q_vector = (struct idpf_q_vector *)data; 3401 3402 q_vector->total_events++; 3403 napi_schedule(&q_vector->napi); 3404 3405 return IRQ_HANDLED; 3406 } 3407 3408 /** 3409 * idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport 3410 * @vport: virtual port structure 3411 * 3412 */ 3413 static void idpf_vport_intr_napi_del_all(struct idpf_vport *vport) 3414 { 3415 u16 v_idx; 3416 3417 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) 3418 netif_napi_del(&vport->q_vectors[v_idx].napi); 3419 } 3420 3421 /** 3422 * idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport 3423 * @vport: main vport structure 3424 */ 3425 static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport) 3426 { 3427 int v_idx; 3428 3429 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) 3430 napi_disable(&vport->q_vectors[v_idx].napi); 3431 } 3432 3433 /** 3434 * idpf_vport_intr_rel - Free memory allocated for interrupt vectors 3435 * @vport: virtual port 3436 * 3437 * Free the memory allocated for interrupt vectors associated to a vport 3438 */ 3439 void idpf_vport_intr_rel(struct idpf_vport *vport) 3440 { 3441 int i, j, v_idx; 3442 3443 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { 3444 struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; 3445 3446 kfree(q_vector->bufq); 3447 q_vector->bufq = NULL; 3448 kfree(q_vector->tx); 3449 q_vector->tx = NULL; 3450 kfree(q_vector->rx); 3451 q_vector->rx = NULL; 3452 } 3453 3454 /* Clean up the mapping of queues to vectors */ 3455 for (i = 0; i < vport->num_rxq_grp; i++) { 3456 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 3457 3458 if (idpf_is_queue_model_split(vport->rxq_model)) 3459 for (j = 0; j < rx_qgrp->splitq.num_rxq_sets; j++) 3460 rx_qgrp->splitq.rxq_sets[j]->rxq.q_vector = NULL; 3461 else 3462 for (j = 0; j < rx_qgrp->singleq.num_rxq; j++) 3463 rx_qgrp->singleq.rxqs[j]->q_vector = NULL; 3464 } 3465 3466 if (idpf_is_queue_model_split(vport->txq_model)) 3467 for (i = 0; i < vport->num_txq_grp; i++) 3468 vport->txq_grps[i].complq->q_vector = NULL; 3469 else 3470 for (i = 0; i < vport->num_txq_grp; i++) 3471 for (j = 0; j < vport->txq_grps[i].num_txq; j++) 3472 vport->txq_grps[i].txqs[j]->q_vector = NULL; 3473 3474 kfree(vport->q_vectors); 3475 vport->q_vectors = NULL; 3476 } 3477 3478 /** 3479 * idpf_vport_intr_rel_irq - Free the IRQ association with the OS 3480 * @vport: main vport structure 3481 */ 3482 static void idpf_vport_intr_rel_irq(struct idpf_vport *vport) 3483 { 3484 struct idpf_adapter *adapter = vport->adapter; 3485 int vector; 3486 3487 for (vector = 0; vector < vport->num_q_vectors; vector++) { 3488 struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; 3489 int irq_num, vidx; 3490 3491 /* free only the irqs that were actually requested */ 3492 if (!q_vector) 3493 continue; 3494 3495 vidx = vport->q_vector_idxs[vector]; 3496 irq_num = adapter->msix_entries[vidx].vector; 3497 3498 /* clear the affinity_mask in the IRQ descriptor */ 3499 irq_set_affinity_hint(irq_num, NULL); 3500 free_irq(irq_num, q_vector); 3501 } 3502 } 3503 3504 /** 3505 * idpf_vport_intr_dis_irq_all - Disable all interrupt 3506 * @vport: main vport structure 3507 */ 3508 static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport) 3509 { 3510 struct idpf_q_vector *q_vector = vport->q_vectors; 3511 int q_idx; 3512 3513 for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) 3514 writel(0, q_vector[q_idx].intr_reg.dyn_ctl); 3515 } 3516 3517 /** 3518 * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings 3519 * @q_vector: pointer to q_vector 3520 * @type: itr index 3521 * @itr: itr value 3522 */ 3523 static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector, 3524 const int type, u16 itr) 3525 { 3526 u32 itr_val; 3527 3528 itr &= IDPF_ITR_MASK; 3529 /* Don't clear PBA because that can cause lost interrupts that 3530 * came in while we were cleaning/polling 3531 */ 3532 itr_val = q_vector->intr_reg.dyn_ctl_intena_m | 3533 (type << q_vector->intr_reg.dyn_ctl_itridx_s) | 3534 (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1)); 3535 3536 return itr_val; 3537 } 3538 3539 /** 3540 * idpf_update_dim_sample - Update dim sample with packets and bytes 3541 * @q_vector: the vector associated with the interrupt 3542 * @dim_sample: dim sample to update 3543 * @dim: dim instance structure 3544 * @packets: total packets 3545 * @bytes: total bytes 3546 * 3547 * Update the dim sample with the packets and bytes which are passed to this 3548 * function. Set the dim state appropriately if the dim settings gets stale. 3549 */ 3550 static void idpf_update_dim_sample(struct idpf_q_vector *q_vector, 3551 struct dim_sample *dim_sample, 3552 struct dim *dim, u64 packets, u64 bytes) 3553 { 3554 dim_update_sample(q_vector->total_events, packets, bytes, dim_sample); 3555 dim_sample->comp_ctr = 0; 3556 3557 /* if dim settings get stale, like when not updated for 1 second or 3558 * longer, force it to start again. This addresses the frequent case 3559 * of an idle queue being switched to by the scheduler. 3560 */ 3561 if (ktime_ms_delta(dim_sample->time, dim->start_sample.time) >= HZ) 3562 dim->state = DIM_START_MEASURE; 3563 } 3564 3565 /** 3566 * idpf_net_dim - Update net DIM algorithm 3567 * @q_vector: the vector associated with the interrupt 3568 * 3569 * Create a DIM sample and notify net_dim() so that it can possibly decide 3570 * a new ITR value based on incoming packets, bytes, and interrupts. 3571 * 3572 * This function is a no-op if the queue is not configured to dynamic ITR. 3573 */ 3574 static void idpf_net_dim(struct idpf_q_vector *q_vector) 3575 { 3576 struct dim_sample dim_sample = { }; 3577 u64 packets, bytes; 3578 u32 i; 3579 3580 if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode)) 3581 goto check_rx_itr; 3582 3583 for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) { 3584 struct idpf_queue *txq = q_vector->tx[i]; 3585 unsigned int start; 3586 3587 do { 3588 start = u64_stats_fetch_begin(&txq->stats_sync); 3589 packets += u64_stats_read(&txq->q_stats.tx.packets); 3590 bytes += u64_stats_read(&txq->q_stats.tx.bytes); 3591 } while (u64_stats_fetch_retry(&txq->stats_sync, start)); 3592 } 3593 3594 idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim, 3595 packets, bytes); 3596 net_dim(&q_vector->tx_dim, dim_sample); 3597 3598 check_rx_itr: 3599 if (!IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode)) 3600 return; 3601 3602 for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) { 3603 struct idpf_queue *rxq = q_vector->rx[i]; 3604 unsigned int start; 3605 3606 do { 3607 start = u64_stats_fetch_begin(&rxq->stats_sync); 3608 packets += u64_stats_read(&rxq->q_stats.rx.packets); 3609 bytes += u64_stats_read(&rxq->q_stats.rx.bytes); 3610 } while (u64_stats_fetch_retry(&rxq->stats_sync, start)); 3611 } 3612 3613 idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim, 3614 packets, bytes); 3615 net_dim(&q_vector->rx_dim, dim_sample); 3616 } 3617 3618 /** 3619 * idpf_vport_intr_update_itr_ena_irq - Update itr and re-enable MSIX interrupt 3620 * @q_vector: q_vector for which itr is being updated and interrupt enabled 3621 * 3622 * Update the net_dim() algorithm and re-enable the interrupt associated with 3623 * this vector. 3624 */ 3625 void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector) 3626 { 3627 u32 intval; 3628 3629 /* net_dim() updates ITR out-of-band using a work item */ 3630 idpf_net_dim(q_vector); 3631 3632 intval = idpf_vport_intr_buildreg_itr(q_vector, 3633 IDPF_NO_ITR_UPDATE_IDX, 0); 3634 3635 writel(intval, q_vector->intr_reg.dyn_ctl); 3636 } 3637 3638 /** 3639 * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport 3640 * @vport: main vport structure 3641 * @basename: name for the vector 3642 */ 3643 static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename) 3644 { 3645 struct idpf_adapter *adapter = vport->adapter; 3646 int vector, err, irq_num, vidx; 3647 const char *vec_name; 3648 3649 for (vector = 0; vector < vport->num_q_vectors; vector++) { 3650 struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; 3651 3652 vidx = vport->q_vector_idxs[vector]; 3653 irq_num = adapter->msix_entries[vidx].vector; 3654 3655 if (q_vector->num_rxq && q_vector->num_txq) 3656 vec_name = "TxRx"; 3657 else if (q_vector->num_rxq) 3658 vec_name = "Rx"; 3659 else if (q_vector->num_txq) 3660 vec_name = "Tx"; 3661 else 3662 continue; 3663 3664 q_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d", 3665 basename, vec_name, vidx); 3666 3667 err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0, 3668 q_vector->name, q_vector); 3669 if (err) { 3670 netdev_err(vport->netdev, 3671 "Request_irq failed, error: %d\n", err); 3672 goto free_q_irqs; 3673 } 3674 /* assign the mask for this irq */ 3675 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); 3676 } 3677 3678 return 0; 3679 3680 free_q_irqs: 3681 while (--vector >= 0) { 3682 vidx = vport->q_vector_idxs[vector]; 3683 irq_num = adapter->msix_entries[vidx].vector; 3684 free_irq(irq_num, &vport->q_vectors[vector]); 3685 } 3686 3687 return err; 3688 } 3689 3690 /** 3691 * idpf_vport_intr_write_itr - Write ITR value to the ITR register 3692 * @q_vector: q_vector structure 3693 * @itr: Interrupt throttling rate 3694 * @tx: Tx or Rx ITR 3695 */ 3696 void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, u16 itr, bool tx) 3697 { 3698 struct idpf_intr_reg *intr_reg; 3699 3700 if (tx && !q_vector->tx) 3701 return; 3702 else if (!tx && !q_vector->rx) 3703 return; 3704 3705 intr_reg = &q_vector->intr_reg; 3706 writel(ITR_REG_ALIGN(itr) >> IDPF_ITR_GRAN_S, 3707 tx ? intr_reg->tx_itr : intr_reg->rx_itr); 3708 } 3709 3710 /** 3711 * idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport 3712 * @vport: main vport structure 3713 */ 3714 static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport) 3715 { 3716 bool dynamic; 3717 int q_idx; 3718 u16 itr; 3719 3720 for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) { 3721 struct idpf_q_vector *qv = &vport->q_vectors[q_idx]; 3722 3723 /* Set the initial ITR values */ 3724 if (qv->num_txq) { 3725 dynamic = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode); 3726 itr = vport->tx_itr_profile[qv->tx_dim.profile_ix]; 3727 idpf_vport_intr_write_itr(qv, dynamic ? 3728 itr : qv->tx_itr_value, 3729 true); 3730 } 3731 3732 if (qv->num_rxq) { 3733 dynamic = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode); 3734 itr = vport->rx_itr_profile[qv->rx_dim.profile_ix]; 3735 idpf_vport_intr_write_itr(qv, dynamic ? 3736 itr : qv->rx_itr_value, 3737 false); 3738 } 3739 3740 if (qv->num_txq || qv->num_rxq) 3741 idpf_vport_intr_update_itr_ena_irq(qv); 3742 } 3743 } 3744 3745 /** 3746 * idpf_vport_intr_deinit - Release all vector associations for the vport 3747 * @vport: main vport structure 3748 */ 3749 void idpf_vport_intr_deinit(struct idpf_vport *vport) 3750 { 3751 idpf_vport_intr_napi_dis_all(vport); 3752 idpf_vport_intr_napi_del_all(vport); 3753 idpf_vport_intr_dis_irq_all(vport); 3754 idpf_vport_intr_rel_irq(vport); 3755 } 3756 3757 /** 3758 * idpf_tx_dim_work - Call back from the stack 3759 * @work: work queue structure 3760 */ 3761 static void idpf_tx_dim_work(struct work_struct *work) 3762 { 3763 struct idpf_q_vector *q_vector; 3764 struct idpf_vport *vport; 3765 struct dim *dim; 3766 u16 itr; 3767 3768 dim = container_of(work, struct dim, work); 3769 q_vector = container_of(dim, struct idpf_q_vector, tx_dim); 3770 vport = q_vector->vport; 3771 3772 if (dim->profile_ix >= ARRAY_SIZE(vport->tx_itr_profile)) 3773 dim->profile_ix = ARRAY_SIZE(vport->tx_itr_profile) - 1; 3774 3775 /* look up the values in our local table */ 3776 itr = vport->tx_itr_profile[dim->profile_ix]; 3777 3778 idpf_vport_intr_write_itr(q_vector, itr, true); 3779 3780 dim->state = DIM_START_MEASURE; 3781 } 3782 3783 /** 3784 * idpf_rx_dim_work - Call back from the stack 3785 * @work: work queue structure 3786 */ 3787 static void idpf_rx_dim_work(struct work_struct *work) 3788 { 3789 struct idpf_q_vector *q_vector; 3790 struct idpf_vport *vport; 3791 struct dim *dim; 3792 u16 itr; 3793 3794 dim = container_of(work, struct dim, work); 3795 q_vector = container_of(dim, struct idpf_q_vector, rx_dim); 3796 vport = q_vector->vport; 3797 3798 if (dim->profile_ix >= ARRAY_SIZE(vport->rx_itr_profile)) 3799 dim->profile_ix = ARRAY_SIZE(vport->rx_itr_profile) - 1; 3800 3801 /* look up the values in our local table */ 3802 itr = vport->rx_itr_profile[dim->profile_ix]; 3803 3804 idpf_vport_intr_write_itr(q_vector, itr, false); 3805 3806 dim->state = DIM_START_MEASURE; 3807 } 3808 3809 /** 3810 * idpf_init_dim - Set up dynamic interrupt moderation 3811 * @qv: q_vector structure 3812 */ 3813 static void idpf_init_dim(struct idpf_q_vector *qv) 3814 { 3815 INIT_WORK(&qv->tx_dim.work, idpf_tx_dim_work); 3816 qv->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 3817 qv->tx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX; 3818 3819 INIT_WORK(&qv->rx_dim.work, idpf_rx_dim_work); 3820 qv->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 3821 qv->rx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX; 3822 } 3823 3824 /** 3825 * idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport 3826 * @vport: main vport structure 3827 */ 3828 static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport) 3829 { 3830 int q_idx; 3831 3832 for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) { 3833 struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx]; 3834 3835 idpf_init_dim(q_vector); 3836 napi_enable(&q_vector->napi); 3837 } 3838 } 3839 3840 /** 3841 * idpf_tx_splitq_clean_all- Clean completion queues 3842 * @q_vec: queue vector 3843 * @budget: Used to determine if we are in netpoll 3844 * @cleaned: returns number of packets cleaned 3845 * 3846 * Returns false if clean is not complete else returns true 3847 */ 3848 static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec, 3849 int budget, int *cleaned) 3850 { 3851 u16 num_txq = q_vec->num_txq; 3852 bool clean_complete = true; 3853 int i, budget_per_q; 3854 3855 if (unlikely(!num_txq)) 3856 return true; 3857 3858 budget_per_q = DIV_ROUND_UP(budget, num_txq); 3859 for (i = 0; i < num_txq; i++) 3860 clean_complete &= idpf_tx_clean_complq(q_vec->tx[i], 3861 budget_per_q, cleaned); 3862 3863 return clean_complete; 3864 } 3865 3866 /** 3867 * idpf_rx_splitq_clean_all- Clean completion queues 3868 * @q_vec: queue vector 3869 * @budget: Used to determine if we are in netpoll 3870 * @cleaned: returns number of packets cleaned 3871 * 3872 * Returns false if clean is not complete else returns true 3873 */ 3874 static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget, 3875 int *cleaned) 3876 { 3877 u16 num_rxq = q_vec->num_rxq; 3878 bool clean_complete = true; 3879 int pkts_cleaned = 0; 3880 int i, budget_per_q; 3881 3882 /* We attempt to distribute budget to each Rx queue fairly, but don't 3883 * allow the budget to go below 1 because that would exit polling early. 3884 */ 3885 budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0; 3886 for (i = 0; i < num_rxq; i++) { 3887 struct idpf_queue *rxq = q_vec->rx[i]; 3888 int pkts_cleaned_per_q; 3889 3890 pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q); 3891 /* if we clean as many as budgeted, we must not be done */ 3892 if (pkts_cleaned_per_q >= budget_per_q) 3893 clean_complete = false; 3894 pkts_cleaned += pkts_cleaned_per_q; 3895 } 3896 *cleaned = pkts_cleaned; 3897 3898 for (i = 0; i < q_vec->num_bufq; i++) 3899 idpf_rx_clean_refillq_all(q_vec->bufq[i]); 3900 3901 return clean_complete; 3902 } 3903 3904 /** 3905 * idpf_vport_splitq_napi_poll - NAPI handler 3906 * @napi: struct from which you get q_vector 3907 * @budget: budget provided by stack 3908 */ 3909 static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) 3910 { 3911 struct idpf_q_vector *q_vector = 3912 container_of(napi, struct idpf_q_vector, napi); 3913 bool clean_complete; 3914 int work_done = 0; 3915 3916 /* Handle case where we are called by netpoll with a budget of 0 */ 3917 if (unlikely(!budget)) { 3918 idpf_tx_splitq_clean_all(q_vector, budget, &work_done); 3919 3920 return 0; 3921 } 3922 3923 clean_complete = idpf_rx_splitq_clean_all(q_vector, budget, &work_done); 3924 clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done); 3925 3926 /* If work not completed, return budget and polling will return */ 3927 if (!clean_complete) 3928 return budget; 3929 3930 work_done = min_t(int, work_done, budget - 1); 3931 3932 /* Exit the polling mode, but don't re-enable interrupts if stack might 3933 * poll us due to busy-polling 3934 */ 3935 if (likely(napi_complete_done(napi, work_done))) 3936 idpf_vport_intr_update_itr_ena_irq(q_vector); 3937 3938 /* Switch to poll mode in the tear-down path after sending disable 3939 * queues virtchnl message, as the interrupts will be disabled after 3940 * that 3941 */ 3942 if (unlikely(q_vector->num_txq && test_bit(__IDPF_Q_POLL_MODE, 3943 q_vector->tx[0]->flags))) 3944 return budget; 3945 else 3946 return work_done; 3947 } 3948 3949 /** 3950 * idpf_vport_intr_map_vector_to_qs - Map vectors to queues 3951 * @vport: virtual port 3952 * 3953 * Mapping for vectors to queues 3954 */ 3955 static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport) 3956 { 3957 u16 num_txq_grp = vport->num_txq_grp; 3958 int i, j, qv_idx, bufq_vidx = 0; 3959 struct idpf_rxq_group *rx_qgrp; 3960 struct idpf_txq_group *tx_qgrp; 3961 struct idpf_queue *q, *bufq; 3962 u16 q_index; 3963 3964 for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) { 3965 u16 num_rxq; 3966 3967 rx_qgrp = &vport->rxq_grps[i]; 3968 if (idpf_is_queue_model_split(vport->rxq_model)) 3969 num_rxq = rx_qgrp->splitq.num_rxq_sets; 3970 else 3971 num_rxq = rx_qgrp->singleq.num_rxq; 3972 3973 for (j = 0; j < num_rxq; j++) { 3974 if (qv_idx >= vport->num_q_vectors) 3975 qv_idx = 0; 3976 3977 if (idpf_is_queue_model_split(vport->rxq_model)) 3978 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; 3979 else 3980 q = rx_qgrp->singleq.rxqs[j]; 3981 q->q_vector = &vport->q_vectors[qv_idx]; 3982 q_index = q->q_vector->num_rxq; 3983 q->q_vector->rx[q_index] = q; 3984 q->q_vector->num_rxq++; 3985 qv_idx++; 3986 } 3987 3988 if (idpf_is_queue_model_split(vport->rxq_model)) { 3989 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { 3990 bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; 3991 bufq->q_vector = &vport->q_vectors[bufq_vidx]; 3992 q_index = bufq->q_vector->num_bufq; 3993 bufq->q_vector->bufq[q_index] = bufq; 3994 bufq->q_vector->num_bufq++; 3995 } 3996 if (++bufq_vidx >= vport->num_q_vectors) 3997 bufq_vidx = 0; 3998 } 3999 } 4000 4001 for (i = 0, qv_idx = 0; i < num_txq_grp; i++) { 4002 u16 num_txq; 4003 4004 tx_qgrp = &vport->txq_grps[i]; 4005 num_txq = tx_qgrp->num_txq; 4006 4007 if (idpf_is_queue_model_split(vport->txq_model)) { 4008 if (qv_idx >= vport->num_q_vectors) 4009 qv_idx = 0; 4010 4011 q = tx_qgrp->complq; 4012 q->q_vector = &vport->q_vectors[qv_idx]; 4013 q_index = q->q_vector->num_txq; 4014 q->q_vector->tx[q_index] = q; 4015 q->q_vector->num_txq++; 4016 qv_idx++; 4017 } else { 4018 for (j = 0; j < num_txq; j++) { 4019 if (qv_idx >= vport->num_q_vectors) 4020 qv_idx = 0; 4021 4022 q = tx_qgrp->txqs[j]; 4023 q->q_vector = &vport->q_vectors[qv_idx]; 4024 q_index = q->q_vector->num_txq; 4025 q->q_vector->tx[q_index] = q; 4026 q->q_vector->num_txq++; 4027 4028 qv_idx++; 4029 } 4030 } 4031 } 4032 } 4033 4034 /** 4035 * idpf_vport_intr_init_vec_idx - Initialize the vector indexes 4036 * @vport: virtual port 4037 * 4038 * Initialize vector indexes with values returened over mailbox 4039 */ 4040 static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport) 4041 { 4042 struct idpf_adapter *adapter = vport->adapter; 4043 struct virtchnl2_alloc_vectors *ac; 4044 u16 *vecids, total_vecs; 4045 int i; 4046 4047 ac = adapter->req_vec_chunks; 4048 if (!ac) { 4049 for (i = 0; i < vport->num_q_vectors; i++) 4050 vport->q_vectors[i].v_idx = vport->q_vector_idxs[i]; 4051 4052 return 0; 4053 } 4054 4055 total_vecs = idpf_get_reserved_vecs(adapter); 4056 vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL); 4057 if (!vecids) 4058 return -ENOMEM; 4059 4060 idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks); 4061 4062 for (i = 0; i < vport->num_q_vectors; i++) 4063 vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]]; 4064 4065 kfree(vecids); 4066 4067 return 0; 4068 } 4069 4070 /** 4071 * idpf_vport_intr_napi_add_all- Register napi handler for all qvectors 4072 * @vport: virtual port structure 4073 */ 4074 static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport) 4075 { 4076 int (*napi_poll)(struct napi_struct *napi, int budget); 4077 u16 v_idx; 4078 4079 if (idpf_is_queue_model_split(vport->txq_model)) 4080 napi_poll = idpf_vport_splitq_napi_poll; 4081 else 4082 napi_poll = idpf_vport_singleq_napi_poll; 4083 4084 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { 4085 struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; 4086 4087 netif_napi_add(vport->netdev, &q_vector->napi, napi_poll); 4088 4089 /* only set affinity_mask if the CPU is online */ 4090 if (cpu_online(v_idx)) 4091 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 4092 } 4093 } 4094 4095 /** 4096 * idpf_vport_intr_alloc - Allocate memory for interrupt vectors 4097 * @vport: virtual port 4098 * 4099 * We allocate one q_vector per queue interrupt. If allocation fails we 4100 * return -ENOMEM. 4101 */ 4102 int idpf_vport_intr_alloc(struct idpf_vport *vport) 4103 { 4104 u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector; 4105 struct idpf_q_vector *q_vector; 4106 int v_idx, err; 4107 4108 vport->q_vectors = kcalloc(vport->num_q_vectors, 4109 sizeof(struct idpf_q_vector), GFP_KERNEL); 4110 if (!vport->q_vectors) 4111 return -ENOMEM; 4112 4113 txqs_per_vector = DIV_ROUND_UP(vport->num_txq, vport->num_q_vectors); 4114 rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq, vport->num_q_vectors); 4115 bufqs_per_vector = vport->num_bufqs_per_qgrp * 4116 DIV_ROUND_UP(vport->num_rxq_grp, 4117 vport->num_q_vectors); 4118 4119 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { 4120 q_vector = &vport->q_vectors[v_idx]; 4121 q_vector->vport = vport; 4122 4123 q_vector->tx_itr_value = IDPF_ITR_TX_DEF; 4124 q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC; 4125 q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1; 4126 4127 q_vector->rx_itr_value = IDPF_ITR_RX_DEF; 4128 q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC; 4129 q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0; 4130 4131 q_vector->tx = kcalloc(txqs_per_vector, 4132 sizeof(struct idpf_queue *), 4133 GFP_KERNEL); 4134 if (!q_vector->tx) { 4135 err = -ENOMEM; 4136 goto error; 4137 } 4138 4139 q_vector->rx = kcalloc(rxqs_per_vector, 4140 sizeof(struct idpf_queue *), 4141 GFP_KERNEL); 4142 if (!q_vector->rx) { 4143 err = -ENOMEM; 4144 goto error; 4145 } 4146 4147 if (!idpf_is_queue_model_split(vport->rxq_model)) 4148 continue; 4149 4150 q_vector->bufq = kcalloc(bufqs_per_vector, 4151 sizeof(struct idpf_queue *), 4152 GFP_KERNEL); 4153 if (!q_vector->bufq) { 4154 err = -ENOMEM; 4155 goto error; 4156 } 4157 } 4158 4159 return 0; 4160 4161 error: 4162 idpf_vport_intr_rel(vport); 4163 4164 return err; 4165 } 4166 4167 /** 4168 * idpf_vport_intr_init - Setup all vectors for the given vport 4169 * @vport: virtual port 4170 * 4171 * Returns 0 on success or negative on failure 4172 */ 4173 int idpf_vport_intr_init(struct idpf_vport *vport) 4174 { 4175 char *int_name; 4176 int err; 4177 4178 err = idpf_vport_intr_init_vec_idx(vport); 4179 if (err) 4180 return err; 4181 4182 idpf_vport_intr_map_vector_to_qs(vport); 4183 idpf_vport_intr_napi_add_all(vport); 4184 idpf_vport_intr_napi_ena_all(vport); 4185 4186 err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport); 4187 if (err) 4188 goto unroll_vectors_alloc; 4189 4190 int_name = kasprintf(GFP_KERNEL, "%s-%s", 4191 dev_driver_string(&vport->adapter->pdev->dev), 4192 vport->netdev->name); 4193 4194 err = idpf_vport_intr_req_irq(vport, int_name); 4195 if (err) 4196 goto unroll_vectors_alloc; 4197 4198 idpf_vport_intr_ena_irq_all(vport); 4199 4200 return 0; 4201 4202 unroll_vectors_alloc: 4203 idpf_vport_intr_napi_dis_all(vport); 4204 idpf_vport_intr_napi_del_all(vport); 4205 4206 return err; 4207 } 4208 4209 /** 4210 * idpf_config_rss - Send virtchnl messages to configure RSS 4211 * @vport: virtual port 4212 * 4213 * Return 0 on success, negative on failure 4214 */ 4215 int idpf_config_rss(struct idpf_vport *vport) 4216 { 4217 int err; 4218 4219 err = idpf_send_get_set_rss_key_msg(vport, false); 4220 if (err) 4221 return err; 4222 4223 return idpf_send_get_set_rss_lut_msg(vport, false); 4224 } 4225 4226 /** 4227 * idpf_fill_dflt_rss_lut - Fill the indirection table with the default values 4228 * @vport: virtual port structure 4229 */ 4230 static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport) 4231 { 4232 struct idpf_adapter *adapter = vport->adapter; 4233 u16 num_active_rxq = vport->num_rxq; 4234 struct idpf_rss_data *rss_data; 4235 int i; 4236 4237 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; 4238 4239 for (i = 0; i < rss_data->rss_lut_size; i++) { 4240 rss_data->rss_lut[i] = i % num_active_rxq; 4241 rss_data->cached_lut[i] = rss_data->rss_lut[i]; 4242 } 4243 } 4244 4245 /** 4246 * idpf_init_rss - Allocate and initialize RSS resources 4247 * @vport: virtual port 4248 * 4249 * Return 0 on success, negative on failure 4250 */ 4251 int idpf_init_rss(struct idpf_vport *vport) 4252 { 4253 struct idpf_adapter *adapter = vport->adapter; 4254 struct idpf_rss_data *rss_data; 4255 u32 lut_size; 4256 4257 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; 4258 4259 lut_size = rss_data->rss_lut_size * sizeof(u32); 4260 rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL); 4261 if (!rss_data->rss_lut) 4262 return -ENOMEM; 4263 4264 rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL); 4265 if (!rss_data->cached_lut) { 4266 kfree(rss_data->rss_lut); 4267 rss_data->rss_lut = NULL; 4268 4269 return -ENOMEM; 4270 } 4271 4272 /* Fill the default RSS lut values */ 4273 idpf_fill_dflt_rss_lut(vport); 4274 4275 return idpf_config_rss(vport); 4276 } 4277 4278 /** 4279 * idpf_deinit_rss - Release RSS resources 4280 * @vport: virtual port 4281 */ 4282 void idpf_deinit_rss(struct idpf_vport *vport) 4283 { 4284 struct idpf_adapter *adapter = vport->adapter; 4285 struct idpf_rss_data *rss_data; 4286 4287 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; 4288 kfree(rss_data->cached_lut); 4289 rss_data->cached_lut = NULL; 4290 kfree(rss_data->rss_lut); 4291 rss_data->rss_lut = NULL; 4292 } 4293