1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_base.h" 6 #include "ice_flow.h" 7 #include "ice_lib.h" 8 #include "ice_fltr.h" 9 #include "ice_dcb_lib.h" 10 #include "ice_type.h" 11 #include "ice_vsi_vlan_ops.h" 12 13 /** 14 * ice_vsi_type_str - maps VSI type enum to string equivalents 15 * @vsi_type: VSI type enum 16 */ 17 const char *ice_vsi_type_str(enum ice_vsi_type vsi_type) 18 { 19 switch (vsi_type) { 20 case ICE_VSI_PF: 21 return "ICE_VSI_PF"; 22 case ICE_VSI_VF: 23 return "ICE_VSI_VF"; 24 case ICE_VSI_SF: 25 return "ICE_VSI_SF"; 26 case ICE_VSI_CTRL: 27 return "ICE_VSI_CTRL"; 28 case ICE_VSI_CHNL: 29 return "ICE_VSI_CHNL"; 30 case ICE_VSI_LB: 31 return "ICE_VSI_LB"; 32 default: 33 return "unknown"; 34 } 35 } 36 37 /** 38 * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings 39 * @vsi: the VSI being configured 40 * @ena: start or stop the Rx rings 41 * 42 * First enable/disable all of the Rx rings, flush any remaining writes, and 43 * then verify that they have all been enabled/disabled successfully. This will 44 * let all of the register writes complete when enabling/disabling the Rx rings 45 * before waiting for the change in hardware to complete. 46 */ 47 static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena) 48 { 49 int ret = 0; 50 u16 i; 51 52 ice_for_each_rxq(vsi, i) 53 ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false); 54 55 ice_flush(&vsi->back->hw); 56 57 ice_for_each_rxq(vsi, i) { 58 ret = ice_vsi_wait_one_rx_ring(vsi, ena, i); 59 if (ret) 60 break; 61 } 62 63 return ret; 64 } 65 66 /** 67 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI 68 * @vsi: VSI pointer 69 * 70 * On error: returns error code (negative) 71 * On success: returns 0 72 */ 73 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) 74 { 75 struct ice_pf *pf = vsi->back; 76 struct device *dev; 77 78 dev = ice_pf_to_dev(pf); 79 if (vsi->type == ICE_VSI_CHNL) 80 return 0; 81 82 /* allocate memory for both Tx and Rx ring pointers */ 83 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq, 84 sizeof(*vsi->tx_rings), GFP_KERNEL); 85 if (!vsi->tx_rings) 86 return -ENOMEM; 87 88 vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq, 89 sizeof(*vsi->rx_rings), GFP_KERNEL); 90 if (!vsi->rx_rings) 91 goto err_rings; 92 93 /* txq_map needs to have enough space to track both Tx (stack) rings 94 * and XDP rings; at this point vsi->num_xdp_txq might not be set, 95 * so use num_possible_cpus() as we want to always provide XDP ring 96 * per CPU, regardless of queue count settings from user that might 97 * have come from ethtool's set_channels() callback; 98 */ 99 vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()), 100 sizeof(*vsi->txq_map), GFP_KERNEL); 101 102 if (!vsi->txq_map) 103 goto err_txq_map; 104 105 vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq, 106 sizeof(*vsi->rxq_map), GFP_KERNEL); 107 if (!vsi->rxq_map) 108 goto err_rxq_map; 109 110 /* There is no need to allocate q_vectors for a loopback VSI. */ 111 if (vsi->type == ICE_VSI_LB) 112 return 0; 113 114 /* allocate memory for q_vector pointers */ 115 vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors, 116 sizeof(*vsi->q_vectors), GFP_KERNEL); 117 if (!vsi->q_vectors) 118 goto err_vectors; 119 120 return 0; 121 122 err_vectors: 123 devm_kfree(dev, vsi->rxq_map); 124 err_rxq_map: 125 devm_kfree(dev, vsi->txq_map); 126 err_txq_map: 127 devm_kfree(dev, vsi->rx_rings); 128 err_rings: 129 devm_kfree(dev, vsi->tx_rings); 130 return -ENOMEM; 131 } 132 133 /** 134 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI 135 * @vsi: the VSI being configured 136 */ 137 static void ice_vsi_set_num_desc(struct ice_vsi *vsi) 138 { 139 switch (vsi->type) { 140 case ICE_VSI_PF: 141 case ICE_VSI_SF: 142 case ICE_VSI_CTRL: 143 case ICE_VSI_LB: 144 /* a user could change the values of num_[tr]x_desc using 145 * ethtool -G so we should keep those values instead of 146 * overwriting them with the defaults. 147 */ 148 if (!vsi->num_rx_desc) 149 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; 150 if (!vsi->num_tx_desc) 151 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; 152 break; 153 default: 154 dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n", 155 vsi->type); 156 break; 157 } 158 } 159 160 static u16 ice_get_rxq_count(struct ice_pf *pf) 161 { 162 return min(ice_get_avail_rxq_count(pf), 163 netif_get_num_default_rss_queues()); 164 } 165 166 static u16 ice_get_txq_count(struct ice_pf *pf) 167 { 168 return min(ice_get_avail_txq_count(pf), 169 netif_get_num_default_rss_queues()); 170 } 171 172 /** 173 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI 174 * @vsi: the VSI being configured 175 * 176 * Return 0 on success and a negative value on error 177 */ 178 static void ice_vsi_set_num_qs(struct ice_vsi *vsi) 179 { 180 enum ice_vsi_type vsi_type = vsi->type; 181 struct ice_pf *pf = vsi->back; 182 struct ice_vf *vf = vsi->vf; 183 184 if (WARN_ON(vsi_type == ICE_VSI_VF && !vf)) 185 return; 186 187 switch (vsi_type) { 188 case ICE_VSI_PF: 189 if (vsi->req_txq) { 190 vsi->alloc_txq = vsi->req_txq; 191 vsi->num_txq = vsi->req_txq; 192 } else { 193 vsi->alloc_txq = ice_get_txq_count(pf); 194 } 195 196 pf->num_lan_tx = vsi->alloc_txq; 197 198 /* only 1 Rx queue unless RSS is enabled */ 199 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 200 vsi->alloc_rxq = 1; 201 } else { 202 if (vsi->req_rxq) { 203 vsi->alloc_rxq = vsi->req_rxq; 204 vsi->num_rxq = vsi->req_rxq; 205 } else { 206 vsi->alloc_rxq = ice_get_rxq_count(pf); 207 } 208 } 209 210 pf->num_lan_rx = vsi->alloc_rxq; 211 212 vsi->num_q_vectors = max(vsi->alloc_rxq, vsi->alloc_txq); 213 break; 214 case ICE_VSI_SF: 215 vsi->alloc_txq = 1; 216 vsi->alloc_rxq = 1; 217 vsi->num_q_vectors = 1; 218 vsi->irq_dyn_alloc = true; 219 break; 220 case ICE_VSI_VF: 221 if (vf->num_req_qs) 222 vf->num_vf_qs = vf->num_req_qs; 223 vsi->alloc_txq = vf->num_vf_qs; 224 vsi->alloc_rxq = vf->num_vf_qs; 225 /* pf->vfs.num_msix_per includes (VF miscellaneous vector + 226 * data queue interrupts). Since vsi->num_q_vectors is number 227 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the 228 * original vector count 229 */ 230 vsi->num_q_vectors = vf->num_msix - ICE_NONQ_VECS_VF; 231 break; 232 case ICE_VSI_CTRL: 233 vsi->alloc_txq = 1; 234 vsi->alloc_rxq = 1; 235 vsi->num_q_vectors = 1; 236 break; 237 case ICE_VSI_CHNL: 238 vsi->alloc_txq = 0; 239 vsi->alloc_rxq = 0; 240 break; 241 case ICE_VSI_LB: 242 vsi->alloc_txq = 1; 243 vsi->alloc_rxq = 1; 244 break; 245 default: 246 dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type); 247 break; 248 } 249 250 ice_vsi_set_num_desc(vsi); 251 } 252 253 /** 254 * ice_get_free_slot - get the next non-NULL location index in array 255 * @array: array to search 256 * @size: size of the array 257 * @curr: last known occupied index to be used as a search hint 258 * 259 * void * is being used to keep the functionality generic. This lets us use this 260 * function on any array of pointers. 261 */ 262 static int ice_get_free_slot(void *array, int size, int curr) 263 { 264 int **tmp_array = (int **)array; 265 int next; 266 267 if (curr < (size - 1) && !tmp_array[curr + 1]) { 268 next = curr + 1; 269 } else { 270 int i = 0; 271 272 while ((i < size) && (tmp_array[i])) 273 i++; 274 if (i == size) 275 next = ICE_NO_VSI; 276 else 277 next = i; 278 } 279 return next; 280 } 281 282 /** 283 * ice_vsi_delete_from_hw - delete a VSI from the switch 284 * @vsi: pointer to VSI being removed 285 */ 286 static void ice_vsi_delete_from_hw(struct ice_vsi *vsi) 287 { 288 struct ice_pf *pf = vsi->back; 289 struct ice_vsi_ctx *ctxt; 290 int status; 291 292 ice_fltr_remove_all(vsi); 293 ctxt = kzalloc_obj(*ctxt); 294 if (!ctxt) 295 return; 296 297 if (vsi->type == ICE_VSI_VF) 298 ctxt->vf_num = vsi->vf->vf_id; 299 ctxt->vsi_num = vsi->vsi_num; 300 301 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); 302 303 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); 304 if (status) 305 dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n", 306 vsi->vsi_num, status); 307 308 kfree(ctxt); 309 } 310 311 /** 312 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI 313 * @vsi: pointer to VSI being cleared 314 */ 315 static void ice_vsi_free_arrays(struct ice_vsi *vsi) 316 { 317 struct ice_pf *pf = vsi->back; 318 struct device *dev; 319 320 dev = ice_pf_to_dev(pf); 321 322 /* free the ring and vector containers */ 323 devm_kfree(dev, vsi->q_vectors); 324 vsi->q_vectors = NULL; 325 devm_kfree(dev, vsi->tx_rings); 326 vsi->tx_rings = NULL; 327 devm_kfree(dev, vsi->rx_rings); 328 vsi->rx_rings = NULL; 329 devm_kfree(dev, vsi->txq_map); 330 vsi->txq_map = NULL; 331 devm_kfree(dev, vsi->rxq_map); 332 vsi->rxq_map = NULL; 333 } 334 335 /** 336 * ice_vsi_free_stats - Free the ring statistics structures 337 * @vsi: VSI pointer 338 */ 339 static void ice_vsi_free_stats(struct ice_vsi *vsi) 340 { 341 struct ice_vsi_stats *vsi_stat; 342 struct ice_pf *pf = vsi->back; 343 int i; 344 345 if (vsi->type == ICE_VSI_CHNL) 346 return; 347 if (!pf->vsi_stats) 348 return; 349 350 vsi_stat = pf->vsi_stats[vsi->idx]; 351 if (!vsi_stat) 352 return; 353 354 ice_for_each_alloc_txq(vsi, i) { 355 if (vsi_stat->tx_ring_stats[i]) { 356 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); 357 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); 358 } 359 } 360 361 ice_for_each_alloc_rxq(vsi, i) { 362 if (vsi_stat->rx_ring_stats[i]) { 363 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); 364 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); 365 } 366 } 367 368 kfree(vsi_stat->tx_ring_stats); 369 kfree(vsi_stat->rx_ring_stats); 370 kfree(vsi_stat); 371 pf->vsi_stats[vsi->idx] = NULL; 372 } 373 374 /** 375 * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI 376 * @vsi: VSI which is having stats allocated 377 */ 378 static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi) 379 { 380 struct ice_ring_stats **tx_ring_stats; 381 struct ice_ring_stats **rx_ring_stats; 382 struct ice_vsi_stats *vsi_stats; 383 struct ice_pf *pf = vsi->back; 384 u16 i; 385 386 vsi_stats = pf->vsi_stats[vsi->idx]; 387 tx_ring_stats = vsi_stats->tx_ring_stats; 388 rx_ring_stats = vsi_stats->rx_ring_stats; 389 390 /* Allocate Tx ring stats */ 391 ice_for_each_alloc_txq(vsi, i) { 392 struct ice_ring_stats *ring_stats; 393 struct ice_tx_ring *ring; 394 395 ring = vsi->tx_rings[i]; 396 ring_stats = tx_ring_stats[i]; 397 398 if (!ring_stats) { 399 ring_stats = kzalloc_obj(*ring_stats); 400 if (!ring_stats) 401 goto err_out; 402 403 u64_stats_init(&ring_stats->syncp); 404 405 WRITE_ONCE(tx_ring_stats[i], ring_stats); 406 } 407 408 ring->ring_stats = ring_stats; 409 } 410 411 /* Allocate Rx ring stats */ 412 ice_for_each_alloc_rxq(vsi, i) { 413 struct ice_ring_stats *ring_stats; 414 struct ice_rx_ring *ring; 415 416 ring = vsi->rx_rings[i]; 417 ring_stats = rx_ring_stats[i]; 418 419 if (!ring_stats) { 420 ring_stats = kzalloc_obj(*ring_stats); 421 if (!ring_stats) 422 goto err_out; 423 424 u64_stats_init(&ring_stats->syncp); 425 426 WRITE_ONCE(rx_ring_stats[i], ring_stats); 427 } 428 429 ring->ring_stats = ring_stats; 430 } 431 432 return 0; 433 434 err_out: 435 ice_vsi_free_stats(vsi); 436 return -ENOMEM; 437 } 438 439 /** 440 * ice_vsi_free - clean up and deallocate the provided VSI 441 * @vsi: pointer to VSI being cleared 442 * 443 * This deallocates the VSI's queue resources, removes it from the PF's 444 * VSI array if necessary, and deallocates the VSI 445 */ 446 void ice_vsi_free(struct ice_vsi *vsi) 447 { 448 struct ice_pf *pf = NULL; 449 struct device *dev; 450 451 if (!vsi || !vsi->back) 452 return; 453 454 pf = vsi->back; 455 dev = ice_pf_to_dev(pf); 456 457 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { 458 dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx); 459 return; 460 } 461 462 mutex_lock(&pf->sw_mutex); 463 /* updates the PF for this cleared VSI */ 464 465 pf->vsi[vsi->idx] = NULL; 466 pf->next_vsi = vsi->idx; 467 468 ice_vsi_free_stats(vsi); 469 ice_vsi_free_arrays(vsi); 470 mutex_destroy(&vsi->xdp_state_lock); 471 mutex_unlock(&pf->sw_mutex); 472 devm_kfree(dev, vsi); 473 } 474 475 void ice_vsi_delete(struct ice_vsi *vsi) 476 { 477 ice_vsi_delete_from_hw(vsi); 478 ice_vsi_free(vsi); 479 } 480 481 /** 482 * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI 483 * @irq: interrupt number 484 * @data: pointer to a q_vector 485 */ 486 static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data) 487 { 488 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; 489 490 if (!q_vector->tx.tx_ring) 491 return IRQ_HANDLED; 492 493 ice_clean_ctrl_rx_irq(q_vector->rx.rx_ring); 494 ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring); 495 496 return IRQ_HANDLED; 497 } 498 499 /** 500 * ice_msix_clean_rings - MSIX mode Interrupt Handler 501 * @irq: interrupt number 502 * @data: pointer to a q_vector 503 */ 504 static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) 505 { 506 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; 507 508 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) 509 return IRQ_HANDLED; 510 511 q_vector->total_events++; 512 513 napi_schedule(&q_vector->napi); 514 515 return IRQ_HANDLED; 516 } 517 518 /** 519 * ice_vsi_alloc_stat_arrays - Allocate statistics arrays 520 * @vsi: VSI pointer 521 */ 522 static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi) 523 { 524 struct ice_vsi_stats *vsi_stat; 525 struct ice_pf *pf = vsi->back; 526 527 if (vsi->type == ICE_VSI_CHNL) 528 return 0; 529 if (!pf->vsi_stats) 530 return -ENOENT; 531 532 if (pf->vsi_stats[vsi->idx]) 533 /* realloc will happen in rebuild path */ 534 return 0; 535 536 vsi_stat = kzalloc_obj(*vsi_stat); 537 if (!vsi_stat) 538 return -ENOMEM; 539 540 vsi_stat->tx_ring_stats = 541 kzalloc_objs(*vsi_stat->tx_ring_stats, vsi->alloc_txq); 542 if (!vsi_stat->tx_ring_stats) 543 goto err_alloc_tx; 544 545 vsi_stat->rx_ring_stats = 546 kzalloc_objs(*vsi_stat->rx_ring_stats, vsi->alloc_rxq); 547 if (!vsi_stat->rx_ring_stats) 548 goto err_alloc_rx; 549 550 pf->vsi_stats[vsi->idx] = vsi_stat; 551 552 return 0; 553 554 err_alloc_rx: 555 kfree(vsi_stat->rx_ring_stats); 556 err_alloc_tx: 557 kfree(vsi_stat->tx_ring_stats); 558 kfree(vsi_stat); 559 pf->vsi_stats[vsi->idx] = NULL; 560 return -ENOMEM; 561 } 562 563 /** 564 * ice_vsi_alloc_def - set default values for already allocated VSI 565 * @vsi: ptr to VSI 566 * @ch: ptr to channel 567 */ 568 static int 569 ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch) 570 { 571 if (vsi->type != ICE_VSI_CHNL) { 572 ice_vsi_set_num_qs(vsi); 573 if (ice_vsi_alloc_arrays(vsi)) 574 return -ENOMEM; 575 } 576 577 vsi->irq_dyn_alloc = pci_msix_can_alloc_dyn(vsi->back->pdev); 578 579 switch (vsi->type) { 580 case ICE_VSI_PF: 581 case ICE_VSI_SF: 582 /* Setup default MSIX irq handler for VSI */ 583 vsi->irq_handler = ice_msix_clean_rings; 584 break; 585 case ICE_VSI_CTRL: 586 /* Setup ctrl VSI MSIX irq handler */ 587 vsi->irq_handler = ice_msix_clean_ctrl_vsi; 588 break; 589 case ICE_VSI_CHNL: 590 if (!ch) 591 return -EINVAL; 592 593 vsi->num_rxq = ch->num_rxq; 594 vsi->num_txq = ch->num_txq; 595 vsi->next_base_q = ch->base_q; 596 break; 597 case ICE_VSI_VF: 598 case ICE_VSI_LB: 599 break; 600 default: 601 ice_vsi_free_arrays(vsi); 602 return -EINVAL; 603 } 604 605 return 0; 606 } 607 608 /** 609 * ice_vsi_alloc - Allocates the next available struct VSI in the PF 610 * @pf: board private structure 611 * 612 * Reserves a VSI index from the PF and allocates an empty VSI structure 613 * without a type. The VSI structure must later be initialized by calling 614 * ice_vsi_cfg(). 615 * 616 * returns a pointer to a VSI on success, NULL on failure. 617 */ 618 struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf) 619 { 620 struct device *dev = ice_pf_to_dev(pf); 621 struct ice_vsi *vsi = NULL; 622 623 /* Need to protect the allocation of the VSIs at the PF level */ 624 mutex_lock(&pf->sw_mutex); 625 626 /* If we have already allocated our maximum number of VSIs, 627 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index 628 * is available to be populated 629 */ 630 if (pf->next_vsi == ICE_NO_VSI) { 631 dev_dbg(dev, "out of VSI slots!\n"); 632 goto unlock_pf; 633 } 634 635 vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL); 636 if (!vsi) 637 goto unlock_pf; 638 639 vsi->back = pf; 640 set_bit(ICE_VSI_DOWN, vsi->state); 641 642 /* fill slot and make note of the index */ 643 vsi->idx = pf->next_vsi; 644 pf->vsi[pf->next_vsi] = vsi; 645 646 /* prepare pf->next_vsi for next use */ 647 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, 648 pf->next_vsi); 649 650 mutex_init(&vsi->xdp_state_lock); 651 652 unlock_pf: 653 mutex_unlock(&pf->sw_mutex); 654 return vsi; 655 } 656 657 /** 658 * ice_alloc_fd_res - Allocate FD resource for a VSI 659 * @vsi: pointer to the ice_vsi 660 * 661 * This allocates the FD resources 662 * 663 * Returns 0 on success, -EPERM on no-op or -EIO on failure 664 */ 665 static int ice_alloc_fd_res(struct ice_vsi *vsi) 666 { 667 struct ice_pf *pf = vsi->back; 668 u32 g_val, b_val; 669 670 /* Flow Director filters are only allocated/assigned to the PF VSI or 671 * CHNL VSI which passes the traffic. The CTRL VSI is only used to 672 * add/delete filters so resources are not allocated to it 673 */ 674 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 675 return -EPERM; 676 677 if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF || 678 vsi->type == ICE_VSI_CHNL)) 679 return -EPERM; 680 681 /* FD filters from guaranteed pool per VSI */ 682 g_val = pf->hw.func_caps.fd_fltr_guar; 683 if (!g_val) 684 return -EPERM; 685 686 /* FD filters from best effort pool */ 687 b_val = pf->hw.func_caps.fd_fltr_best_effort; 688 if (!b_val) 689 return -EPERM; 690 691 /* PF main VSI gets only 64 FD resources from guaranteed pool 692 * when ADQ is configured. 693 */ 694 #define ICE_PF_VSI_GFLTR 64 695 696 /* determine FD filter resources per VSI from shared(best effort) and 697 * dedicated pool 698 */ 699 if (vsi->type == ICE_VSI_PF) { 700 vsi->num_gfltr = g_val; 701 /* if MQPRIO is configured, main VSI doesn't get all FD 702 * resources from guaranteed pool. PF VSI gets 64 FD resources 703 */ 704 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 705 if (g_val < ICE_PF_VSI_GFLTR) 706 return -EPERM; 707 /* allow bare minimum entries for PF VSI */ 708 vsi->num_gfltr = ICE_PF_VSI_GFLTR; 709 } 710 711 /* each VSI gets same "best_effort" quota */ 712 vsi->num_bfltr = b_val; 713 } else if (vsi->type == ICE_VSI_VF) { 714 vsi->num_gfltr = 0; 715 716 /* each VSI gets same "best_effort" quota */ 717 vsi->num_bfltr = b_val; 718 } else { 719 struct ice_vsi *main_vsi; 720 int numtc; 721 722 main_vsi = ice_get_main_vsi(pf); 723 if (!main_vsi) 724 return -EPERM; 725 726 if (!main_vsi->all_numtc) 727 return -EINVAL; 728 729 /* figure out ADQ numtc */ 730 numtc = main_vsi->all_numtc - ICE_CHNL_START_TC; 731 732 /* only one TC but still asking resources for channels, 733 * invalid config 734 */ 735 if (numtc < ICE_CHNL_START_TC) 736 return -EPERM; 737 738 g_val -= ICE_PF_VSI_GFLTR; 739 /* channel VSIs gets equal share from guaranteed pool */ 740 vsi->num_gfltr = g_val / numtc; 741 742 /* each VSI gets same "best_effort" quota */ 743 vsi->num_bfltr = b_val; 744 } 745 746 return 0; 747 } 748 749 /** 750 * ice_vsi_get_qs - Assign queues from PF to VSI 751 * @vsi: the VSI to assign queues to 752 * 753 * Returns 0 on success and a negative value on error 754 */ 755 static int ice_vsi_get_qs(struct ice_vsi *vsi) 756 { 757 struct ice_pf *pf = vsi->back; 758 struct ice_qs_cfg tx_qs_cfg = { 759 .qs_mutex = &pf->avail_q_mutex, 760 .pf_map = pf->avail_txqs, 761 .pf_map_size = pf->max_pf_txqs, 762 .q_count = vsi->alloc_txq, 763 .scatter_count = ICE_MAX_SCATTER_TXQS, 764 .vsi_map = vsi->txq_map, 765 .vsi_map_offset = 0, 766 .mapping_mode = ICE_VSI_MAP_CONTIG 767 }; 768 struct ice_qs_cfg rx_qs_cfg = { 769 .qs_mutex = &pf->avail_q_mutex, 770 .pf_map = pf->avail_rxqs, 771 .pf_map_size = pf->max_pf_rxqs, 772 .q_count = vsi->alloc_rxq, 773 .scatter_count = ICE_MAX_SCATTER_RXQS, 774 .vsi_map = vsi->rxq_map, 775 .vsi_map_offset = 0, 776 .mapping_mode = ICE_VSI_MAP_CONTIG 777 }; 778 int ret; 779 780 if (vsi->type == ICE_VSI_CHNL) 781 return 0; 782 783 ret = __ice_vsi_get_qs(&tx_qs_cfg); 784 if (ret) 785 return ret; 786 vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode; 787 788 ret = __ice_vsi_get_qs(&rx_qs_cfg); 789 if (ret) 790 return ret; 791 vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode; 792 793 return 0; 794 } 795 796 /** 797 * ice_vsi_put_qs - Release queues from VSI to PF 798 * @vsi: the VSI that is going to release queues 799 */ 800 static void ice_vsi_put_qs(struct ice_vsi *vsi) 801 { 802 struct ice_pf *pf = vsi->back; 803 int i; 804 805 mutex_lock(&pf->avail_q_mutex); 806 807 ice_for_each_alloc_txq(vsi, i) { 808 clear_bit(vsi->txq_map[i], pf->avail_txqs); 809 vsi->txq_map[i] = ICE_INVAL_Q_INDEX; 810 } 811 812 ice_for_each_alloc_rxq(vsi, i) { 813 clear_bit(vsi->rxq_map[i], pf->avail_rxqs); 814 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; 815 } 816 817 mutex_unlock(&pf->avail_q_mutex); 818 } 819 820 /** 821 * ice_is_safe_mode 822 * @pf: pointer to the PF struct 823 * 824 * returns true if driver is in safe mode, false otherwise 825 */ 826 bool ice_is_safe_mode(struct ice_pf *pf) 827 { 828 return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 829 } 830 831 /** 832 * ice_is_rdma_ena 833 * @pf: pointer to the PF struct 834 * 835 * returns true if RDMA is currently supported, false otherwise 836 */ 837 bool ice_is_rdma_ena(struct ice_pf *pf) 838 { 839 union devlink_param_value value; 840 int err; 841 842 err = devl_param_driverinit_value_get(priv_to_devlink(pf), 843 DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA, 844 &value); 845 return err ? test_bit(ICE_FLAG_RDMA_ENA, pf->flags) : value.vbool; 846 } 847 848 /** 849 * ice_vsi_clean_rss_flow_fld - Delete RSS configuration 850 * @vsi: the VSI being cleaned up 851 * 852 * This function deletes RSS input set for all flows that were configured 853 * for this VSI 854 */ 855 static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi) 856 { 857 struct ice_pf *pf = vsi->back; 858 int status; 859 860 if (ice_is_safe_mode(pf)) 861 return; 862 863 status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); 864 if (status) 865 dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n", 866 vsi->vsi_num, status); 867 } 868 869 /** 870 * ice_rss_clean - Delete RSS related VSI structures and configuration 871 * @vsi: the VSI being removed 872 */ 873 static void ice_rss_clean(struct ice_vsi *vsi) 874 { 875 struct ice_pf *pf = vsi->back; 876 struct device *dev; 877 878 dev = ice_pf_to_dev(pf); 879 880 devm_kfree(dev, vsi->rss_hkey_user); 881 devm_kfree(dev, vsi->rss_lut_user); 882 883 ice_vsi_clean_rss_flow_fld(vsi); 884 /* remove RSS replay list */ 885 if (!ice_is_safe_mode(pf)) 886 ice_rem_vsi_rss_list(&pf->hw, vsi->idx); 887 } 888 889 /** 890 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type 891 * @vsi: the VSI being configured 892 */ 893 static void ice_vsi_set_rss_params(struct ice_vsi *vsi) 894 { 895 struct ice_hw_common_caps *cap; 896 struct ice_pf *pf = vsi->back; 897 u16 max_rss_size; 898 899 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 900 vsi->rss_size = 1; 901 return; 902 } 903 904 cap = &pf->hw.func_caps.common_cap; 905 max_rss_size = BIT(cap->rss_table_entry_width); 906 switch (vsi->type) { 907 case ICE_VSI_CHNL: 908 case ICE_VSI_PF: 909 /* PF VSI will inherit RSS instance of PF */ 910 vsi->rss_table_size = (u16)cap->rss_table_size; 911 if (vsi->type == ICE_VSI_CHNL) 912 vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size); 913 else 914 vsi->rss_size = min_t(u16, 915 netif_get_num_default_rss_queues(), 916 max_rss_size); 917 vsi->rss_lut_type = ICE_LUT_PF; 918 break; 919 case ICE_VSI_SF: 920 vsi->rss_table_size = ICE_LUT_VSI_SIZE; 921 vsi->rss_size = min_t(u16, netif_get_num_default_rss_queues(), 922 max_rss_size); 923 vsi->rss_lut_type = ICE_LUT_VSI; 924 break; 925 case ICE_VSI_VF: 926 /* VF VSI will get a small RSS table. 927 * For VSI_LUT, LUT size should be set to 64 bytes. 928 */ 929 vsi->rss_table_size = ICE_LUT_VSI_SIZE; 930 vsi->rss_size = ICE_MAX_RSS_QS_PER_VF; 931 vsi->rss_lut_type = ICE_LUT_VSI; 932 break; 933 case ICE_VSI_LB: 934 break; 935 default: 936 dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n", 937 ice_vsi_type_str(vsi->type)); 938 break; 939 } 940 } 941 942 /** 943 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI 944 * @hw: HW structure used to determine the VLAN mode of the device 945 * @ctxt: the VSI context being set 946 * 947 * This initializes a default VSI context for all sections except the Queues. 948 */ 949 static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt) 950 { 951 u32 table = 0; 952 953 memset(&ctxt->info, 0, sizeof(ctxt->info)); 954 /* VSI's should be allocated from shared pool */ 955 ctxt->alloc_from_pool = true; 956 /* Src pruning enabled by default */ 957 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; 958 /* Traffic from VSI can be sent to LAN */ 959 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 960 /* allow all untagged/tagged packets by default on Tx */ 961 ctxt->info.inner_vlan_flags = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_TX_MODE_M, 962 ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL); 963 /* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which 964 * results in legacy behavior (show VLAN, DEI, and UP) in descriptor. 965 * 966 * DVM - leave inner VLAN in packet by default 967 */ 968 if (ice_is_dvm_ena(hw)) { 969 ctxt->info.inner_vlan_flags |= 970 FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M, 971 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING); 972 ctxt->info.outer_vlan_flags = 973 FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M, 974 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL); 975 ctxt->info.outer_vlan_flags |= 976 FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, 977 ICE_AQ_VSI_OUTER_TAG_VLAN_8100); 978 ctxt->info.outer_vlan_flags |= 979 FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M, 980 ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING); 981 } 982 /* Have 1:1 UP mapping for both ingress/egress tables */ 983 table |= ICE_UP_TABLE_TRANSLATE(0, 0); 984 table |= ICE_UP_TABLE_TRANSLATE(1, 1); 985 table |= ICE_UP_TABLE_TRANSLATE(2, 2); 986 table |= ICE_UP_TABLE_TRANSLATE(3, 3); 987 table |= ICE_UP_TABLE_TRANSLATE(4, 4); 988 table |= ICE_UP_TABLE_TRANSLATE(5, 5); 989 table |= ICE_UP_TABLE_TRANSLATE(6, 6); 990 table |= ICE_UP_TABLE_TRANSLATE(7, 7); 991 ctxt->info.ingress_table = cpu_to_le32(table); 992 ctxt->info.egress_table = cpu_to_le32(table); 993 /* Have 1:1 UP mapping for outer to inner UP table */ 994 ctxt->info.outer_up_table = cpu_to_le32(table); 995 /* No Outer tag support outer_tag_flags remains to zero */ 996 } 997 998 /** 999 * ice_vsi_setup_q_map - Setup a VSI queue map 1000 * @vsi: the VSI being configured 1001 * @ctxt: VSI context structure 1002 */ 1003 static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) 1004 { 1005 u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0; 1006 u16 num_txq_per_tc, num_rxq_per_tc; 1007 u16 qcount_tx = vsi->alloc_txq; 1008 u16 qcount_rx = vsi->alloc_rxq; 1009 u8 netdev_tc = 0; 1010 int i; 1011 1012 if (!vsi->tc_cfg.numtc) { 1013 /* at least TC0 should be enabled by default */ 1014 vsi->tc_cfg.numtc = 1; 1015 vsi->tc_cfg.ena_tc = 1; 1016 } 1017 1018 num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC); 1019 if (!num_rxq_per_tc) 1020 num_rxq_per_tc = 1; 1021 num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc; 1022 if (!num_txq_per_tc) 1023 num_txq_per_tc = 1; 1024 1025 /* find the (rounded up) power-of-2 of qcount */ 1026 pow = (u16)order_base_2(num_rxq_per_tc); 1027 1028 /* TC mapping is a function of the number of Rx queues assigned to the 1029 * VSI for each traffic class and the offset of these queues. 1030 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of 1031 * queues allocated to TC0. No:of queues is a power-of-2. 1032 * 1033 * If TC is not enabled, the queue offset is set to 0, and allocate one 1034 * queue, this way, traffic for the given TC will be sent to the default 1035 * queue. 1036 * 1037 * Setup number and offset of Rx queues for all TCs for the VSI 1038 */ 1039 ice_for_each_traffic_class(i) { 1040 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { 1041 /* TC is not enabled */ 1042 vsi->tc_cfg.tc_info[i].qoffset = 0; 1043 vsi->tc_cfg.tc_info[i].qcount_rx = 1; 1044 vsi->tc_cfg.tc_info[i].qcount_tx = 1; 1045 vsi->tc_cfg.tc_info[i].netdev_tc = 0; 1046 ctxt->info.tc_mapping[i] = 0; 1047 continue; 1048 } 1049 1050 /* TC is enabled */ 1051 vsi->tc_cfg.tc_info[i].qoffset = offset; 1052 vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc; 1053 vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc; 1054 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; 1055 1056 qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset); 1057 qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow); 1058 offset += num_rxq_per_tc; 1059 tx_count += num_txq_per_tc; 1060 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 1061 } 1062 1063 /* if offset is non-zero, means it is calculated correctly based on 1064 * enabled TCs for a given VSI otherwise qcount_rx will always 1065 * be correct and non-zero because it is based off - VSI's 1066 * allocated Rx queues which is at least 1 (hence qcount_tx will be 1067 * at least 1) 1068 */ 1069 if (offset) 1070 rx_count = offset; 1071 else 1072 rx_count = num_rxq_per_tc; 1073 1074 if (rx_count > vsi->alloc_rxq) { 1075 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", 1076 rx_count, vsi->alloc_rxq); 1077 return -EINVAL; 1078 } 1079 1080 if (tx_count > vsi->alloc_txq) { 1081 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", 1082 tx_count, vsi->alloc_txq); 1083 return -EINVAL; 1084 } 1085 1086 vsi->num_txq = tx_count; 1087 vsi->num_rxq = rx_count; 1088 1089 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { 1090 dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); 1091 /* since there is a chance that num_rxq could have been changed 1092 * in the above for loop, make num_txq equal to num_rxq. 1093 */ 1094 vsi->num_txq = vsi->num_rxq; 1095 } 1096 1097 /* Rx queue mapping */ 1098 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); 1099 /* q_mapping buffer holds the info for the first queue allocated for 1100 * this VSI in the PF space and also the number of queues associated 1101 * with this VSI. 1102 */ 1103 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); 1104 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); 1105 1106 return 0; 1107 } 1108 1109 /** 1110 * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI 1111 * @ctxt: the VSI context being set 1112 * @vsi: the VSI being configured 1113 */ 1114 static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) 1115 { 1116 u8 dflt_q_group, dflt_q_prio; 1117 u16 dflt_q, report_q, val; 1118 1119 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL && 1120 vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL) 1121 return; 1122 1123 val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID; 1124 ctxt->info.valid_sections |= cpu_to_le16(val); 1125 dflt_q = 0; 1126 dflt_q_group = 0; 1127 report_q = 0; 1128 dflt_q_prio = 0; 1129 1130 /* enable flow director filtering/programming */ 1131 val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE; 1132 ctxt->info.fd_options = cpu_to_le16(val); 1133 /* max of allocated flow director filters */ 1134 ctxt->info.max_fd_fltr_dedicated = 1135 cpu_to_le16(vsi->num_gfltr); 1136 /* max of shared flow director filters any VSI may program */ 1137 ctxt->info.max_fd_fltr_shared = 1138 cpu_to_le16(vsi->num_bfltr); 1139 /* default queue index within the VSI of the default FD */ 1140 val = FIELD_PREP(ICE_AQ_VSI_FD_DEF_Q_M, dflt_q); 1141 /* target queue or queue group to the FD filter */ 1142 val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_GRP_M, dflt_q_group); 1143 ctxt->info.fd_def_q = cpu_to_le16(val); 1144 /* queue index on which FD filter completion is reported */ 1145 val = FIELD_PREP(ICE_AQ_VSI_FD_REPORT_Q_M, report_q); 1146 /* priority of the default qindex action */ 1147 val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_PRIORITY_M, dflt_q_prio); 1148 ctxt->info.fd_report_opt = cpu_to_le16(val); 1149 } 1150 1151 /** 1152 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI 1153 * @ctxt: the VSI context being set 1154 * @vsi: the VSI being configured 1155 */ 1156 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) 1157 { 1158 u8 lut_type, hash_type; 1159 struct device *dev; 1160 struct ice_pf *pf; 1161 1162 pf = vsi->back; 1163 dev = ice_pf_to_dev(pf); 1164 1165 switch (vsi->type) { 1166 case ICE_VSI_CHNL: 1167 case ICE_VSI_PF: 1168 /* PF VSI will inherit RSS instance of PF */ 1169 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; 1170 break; 1171 case ICE_VSI_VF: 1172 case ICE_VSI_SF: 1173 /* VF VSI will gets a small RSS table which is a VSI LUT type */ 1174 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; 1175 break; 1176 default: 1177 dev_dbg(dev, "Unsupported VSI type %s\n", 1178 ice_vsi_type_str(vsi->type)); 1179 return; 1180 } 1181 1182 hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; 1183 vsi->rss_hfunc = hash_type; 1184 1185 ctxt->info.q_opt_rss = 1186 FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) | 1187 FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type); 1188 } 1189 1190 static void 1191 ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) 1192 { 1193 u16 qcount, qmap; 1194 u8 offset = 0; 1195 int pow; 1196 1197 qcount = vsi->num_rxq; 1198 1199 pow = order_base_2(qcount); 1200 qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset); 1201 qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow); 1202 1203 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); 1204 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); 1205 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q); 1206 ctxt->info.q_mapping[1] = cpu_to_le16(qcount); 1207 } 1208 1209 /** 1210 * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not 1211 * @vsi: VSI to check whether or not VLAN pruning is enabled. 1212 * 1213 * returns true if Rx VLAN pruning is enabled and false otherwise. 1214 */ 1215 static bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi) 1216 { 1217 return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 1218 } 1219 1220 /** 1221 * ice_vsi_init - Create and initialize a VSI 1222 * @vsi: the VSI being configured 1223 * @vsi_flags: VSI configuration flags 1224 * 1225 * Set ICE_FLAG_VSI_INIT to initialize a new VSI context, clear it to 1226 * reconfigure an existing context. 1227 * 1228 * This initializes a VSI context depending on the VSI type to be added and 1229 * passes it down to the add_vsi aq command to create a new VSI. 1230 */ 1231 static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags) 1232 { 1233 struct ice_pf *pf = vsi->back; 1234 struct ice_hw *hw = &pf->hw; 1235 struct ice_vsi_ctx *ctxt; 1236 struct device *dev; 1237 int ret = 0; 1238 1239 dev = ice_pf_to_dev(pf); 1240 ctxt = kzalloc_obj(*ctxt); 1241 if (!ctxt) 1242 return -ENOMEM; 1243 1244 switch (vsi->type) { 1245 case ICE_VSI_CTRL: 1246 case ICE_VSI_LB: 1247 case ICE_VSI_PF: 1248 ctxt->flags = ICE_AQ_VSI_TYPE_PF; 1249 break; 1250 case ICE_VSI_SF: 1251 case ICE_VSI_CHNL: 1252 ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2; 1253 break; 1254 case ICE_VSI_VF: 1255 ctxt->flags = ICE_AQ_VSI_TYPE_VF; 1256 /* VF number here is the absolute VF number (0-255) */ 1257 ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id; 1258 break; 1259 default: 1260 ret = -ENODEV; 1261 goto out; 1262 } 1263 1264 /* Handle VLAN pruning for channel VSI if main VSI has VLAN 1265 * prune enabled 1266 */ 1267 if (vsi->type == ICE_VSI_CHNL) { 1268 struct ice_vsi *main_vsi; 1269 1270 main_vsi = ice_get_main_vsi(pf); 1271 if (main_vsi && ice_vsi_is_vlan_pruning_ena(main_vsi)) 1272 ctxt->info.sw_flags2 |= 1273 ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 1274 else 1275 ctxt->info.sw_flags2 &= 1276 ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 1277 } 1278 1279 ice_set_dflt_vsi_ctx(hw, ctxt); 1280 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) 1281 ice_set_fd_vsi_ctx(ctxt, vsi); 1282 /* if the switch is in VEB mode, allow VSI loopback */ 1283 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) 1284 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 1285 1286 /* Set LUT type and HASH type if RSS is enabled */ 1287 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) && 1288 vsi->type != ICE_VSI_CTRL) { 1289 ice_set_rss_vsi_ctx(ctxt, vsi); 1290 /* if updating VSI context, make sure to set valid_section: 1291 * to indicate which section of VSI context being updated 1292 */ 1293 if (!(vsi_flags & ICE_VSI_FLAG_INIT)) 1294 ctxt->info.valid_sections |= 1295 cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); 1296 } 1297 1298 ctxt->info.sw_id = vsi->port_info->sw_id; 1299 if (vsi->type == ICE_VSI_CHNL) { 1300 ice_chnl_vsi_setup_q_map(vsi, ctxt); 1301 } else { 1302 ret = ice_vsi_setup_q_map(vsi, ctxt); 1303 if (ret) 1304 goto out; 1305 1306 if (!(vsi_flags & ICE_VSI_FLAG_INIT)) 1307 /* means VSI being updated */ 1308 /* must to indicate which section of VSI context are 1309 * being modified 1310 */ 1311 ctxt->info.valid_sections |= 1312 cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); 1313 } 1314 1315 /* Allow control frames out of main VSI */ 1316 if (vsi->type == ICE_VSI_PF) { 1317 ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; 1318 ctxt->info.valid_sections |= 1319 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 1320 } 1321 1322 if (vsi_flags & ICE_VSI_FLAG_INIT) { 1323 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); 1324 if (ret) { 1325 dev_err(dev, "Add VSI failed, err %d\n", ret); 1326 ret = -EIO; 1327 goto out; 1328 } 1329 } else { 1330 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 1331 if (ret) { 1332 dev_err(dev, "Update VSI failed, err %d\n", ret); 1333 ret = -EIO; 1334 goto out; 1335 } 1336 } 1337 1338 /* keep context for update VSI operations */ 1339 vsi->info = ctxt->info; 1340 1341 /* record VSI number returned */ 1342 vsi->vsi_num = ctxt->vsi_num; 1343 1344 out: 1345 kfree(ctxt); 1346 return ret; 1347 } 1348 1349 /** 1350 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI 1351 * @vsi: the VSI having rings deallocated 1352 */ 1353 static void ice_vsi_clear_rings(struct ice_vsi *vsi) 1354 { 1355 int i; 1356 1357 /* Avoid stale references by clearing map from vector to ring */ 1358 if (vsi->q_vectors) { 1359 ice_for_each_q_vector(vsi, i) { 1360 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 1361 1362 if (q_vector) { 1363 q_vector->tx.tx_ring = NULL; 1364 q_vector->rx.rx_ring = NULL; 1365 } 1366 } 1367 } 1368 1369 if (vsi->tx_rings) { 1370 ice_for_each_alloc_txq(vsi, i) { 1371 if (vsi->tx_rings[i]) { 1372 kfree_rcu(vsi->tx_rings[i], rcu); 1373 WRITE_ONCE(vsi->tx_rings[i], NULL); 1374 } 1375 } 1376 } 1377 if (vsi->rx_rings) { 1378 ice_for_each_alloc_rxq(vsi, i) { 1379 if (vsi->rx_rings[i]) { 1380 kfree_rcu(vsi->rx_rings[i], rcu); 1381 WRITE_ONCE(vsi->rx_rings[i], NULL); 1382 } 1383 } 1384 } 1385 } 1386 1387 /** 1388 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI 1389 * @vsi: VSI which is having rings allocated 1390 */ 1391 static int ice_vsi_alloc_rings(struct ice_vsi *vsi) 1392 { 1393 bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw); 1394 struct ice_pf *pf = vsi->back; 1395 struct device *dev; 1396 u16 i; 1397 1398 dev = ice_pf_to_dev(pf); 1399 /* Allocate Tx rings */ 1400 ice_for_each_alloc_txq(vsi, i) { 1401 struct ice_tx_ring *ring; 1402 1403 /* allocate with kzalloc(), free with kfree_rcu() */ 1404 ring = kzalloc_obj(*ring); 1405 1406 if (!ring) 1407 goto err_out; 1408 1409 ring->q_index = i; 1410 ring->reg_idx = vsi->txq_map[i]; 1411 ring->vsi = vsi; 1412 ring->tx_tstamps = &pf->ptp.port.tx; 1413 ring->dev = dev; 1414 ring->count = vsi->num_tx_desc; 1415 ring->txq_teid = ICE_INVAL_TEID; 1416 if (dvm_ena) 1417 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2; 1418 else 1419 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1; 1420 WRITE_ONCE(vsi->tx_rings[i], ring); 1421 } 1422 1423 /* Allocate Rx rings */ 1424 ice_for_each_alloc_rxq(vsi, i) { 1425 struct ice_rx_ring *ring; 1426 1427 /* allocate with kzalloc(), free with kfree_rcu() */ 1428 ring = kzalloc_obj(*ring); 1429 if (!ring) 1430 goto err_out; 1431 1432 ring->q_index = i; 1433 ring->reg_idx = vsi->rxq_map[i]; 1434 ring->vsi = vsi; 1435 ring->netdev = vsi->netdev; 1436 ring->count = vsi->num_rx_desc; 1437 ring->cached_phctime = pf->ptp.cached_phc_time; 1438 1439 if (ice_is_feature_supported(pf, ICE_F_GCS)) 1440 ring->flags |= ICE_RX_FLAGS_RING_GCS; 1441 1442 WRITE_ONCE(vsi->rx_rings[i], ring); 1443 } 1444 1445 return 0; 1446 1447 err_out: 1448 ice_vsi_clear_rings(vsi); 1449 return -ENOMEM; 1450 } 1451 1452 /** 1453 * ice_vsi_manage_rss_lut - disable/enable RSS 1454 * @vsi: the VSI being changed 1455 * @ena: boolean value indicating if this is an enable or disable request 1456 * 1457 * In the event of disable request for RSS, this function will zero out RSS 1458 * LUT, while in the event of enable request for RSS, it will reconfigure RSS 1459 * LUT. 1460 */ 1461 void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) 1462 { 1463 u8 *lut; 1464 1465 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 1466 if (!lut) 1467 return; 1468 1469 if (ena) { 1470 if (vsi->rss_lut_user) 1471 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 1472 else 1473 ice_fill_rss_lut(lut, vsi->rss_table_size, 1474 vsi->rss_size); 1475 } 1476 1477 ice_set_rss_lut(vsi, lut, vsi->rss_table_size); 1478 kfree(lut); 1479 } 1480 1481 /** 1482 * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI 1483 * @vsi: VSI to be configured 1484 * @disable: set to true to have FCS / CRC in the frame data 1485 */ 1486 void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable) 1487 { 1488 int i; 1489 1490 ice_for_each_rxq(vsi, i) 1491 if (disable) 1492 vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS; 1493 else 1494 vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS; 1495 } 1496 1497 /** 1498 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI 1499 * @vsi: VSI to be configured 1500 */ 1501 int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) 1502 { 1503 struct ice_pf *pf = vsi->back; 1504 struct device *dev; 1505 u8 *lut, *key; 1506 int err; 1507 1508 dev = ice_pf_to_dev(pf); 1509 if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size && 1510 (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) { 1511 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size); 1512 } else { 1513 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq); 1514 1515 /* If orig_rss_size is valid and it is less than determined 1516 * main VSI's rss_size, update main VSI's rss_size to be 1517 * orig_rss_size so that when tc-qdisc is deleted, main VSI 1518 * RSS table gets programmed to be correct (whatever it was 1519 * to begin with (prior to setup-tc for ADQ config) 1520 */ 1521 if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size && 1522 vsi->orig_rss_size <= vsi->num_rxq) { 1523 vsi->rss_size = vsi->orig_rss_size; 1524 /* now orig_rss_size is used, reset it to zero */ 1525 vsi->orig_rss_size = 0; 1526 } 1527 } 1528 1529 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 1530 if (!lut) 1531 return -ENOMEM; 1532 1533 if (vsi->rss_lut_user) 1534 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 1535 else 1536 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); 1537 1538 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); 1539 if (err) { 1540 dev_err(dev, "set_rss_lut failed, error %d\n", err); 1541 goto ice_vsi_cfg_rss_exit; 1542 } 1543 1544 key = kzalloc(ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE, GFP_KERNEL); 1545 if (!key) { 1546 err = -ENOMEM; 1547 goto ice_vsi_cfg_rss_exit; 1548 } 1549 1550 if (vsi->rss_hkey_user) 1551 memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); 1552 else 1553 netdev_rss_key_fill((void *)key, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); 1554 1555 err = ice_set_rss_key(vsi, key); 1556 if (err) 1557 dev_err(dev, "set_rss_key failed, error %d\n", err); 1558 1559 kfree(key); 1560 ice_vsi_cfg_rss_exit: 1561 kfree(lut); 1562 return err; 1563 } 1564 1565 /** 1566 * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows 1567 * @vsi: VSI to be configured 1568 * 1569 * This function will only be called during the VF VSI setup. Upon successful 1570 * completion of package download, this function will configure default RSS 1571 * input sets for VF VSI. 1572 */ 1573 static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) 1574 { 1575 struct ice_pf *pf = vsi->back; 1576 struct device *dev; 1577 int status; 1578 1579 dev = ice_pf_to_dev(pf); 1580 if (ice_is_safe_mode(pf)) { 1581 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", 1582 vsi->vsi_num); 1583 return; 1584 } 1585 1586 status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HASHCFG); 1587 if (status) 1588 dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n", 1589 vsi->vsi_num, status); 1590 } 1591 1592 static const struct ice_rss_hash_cfg default_rss_cfgs[] = { 1593 /* configure RSS for IPv4 with input set IP src/dst */ 1594 {ICE_FLOW_SEG_HDR_IPV4, ICE_FLOW_HASH_IPV4, ICE_RSS_ANY_HEADERS, false}, 1595 /* configure RSS for IPv6 with input set IPv6 src/dst */ 1596 {ICE_FLOW_SEG_HDR_IPV6, ICE_FLOW_HASH_IPV6, ICE_RSS_ANY_HEADERS, false}, 1597 /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */ 1598 {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4, 1599 ICE_HASH_TCP_IPV4, ICE_RSS_ANY_HEADERS, false}, 1600 /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */ 1601 {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4, 1602 ICE_HASH_UDP_IPV4, ICE_RSS_ANY_HEADERS, false}, 1603 /* configure RSS for sctp4 with input set IP src/dst - only support 1604 * RSS on SCTPv4 on outer headers (non-tunneled) 1605 */ 1606 {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4, 1607 ICE_HASH_SCTP_IPV4, ICE_RSS_OUTER_HEADERS, false}, 1608 /* configure RSS for gtpc4 with input set IPv4 src/dst */ 1609 {ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4, 1610 ICE_FLOW_HASH_IPV4, ICE_RSS_OUTER_HEADERS, false}, 1611 /* configure RSS for gtpc4t with input set IPv4 src/dst */ 1612 {ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4, 1613 ICE_FLOW_HASH_GTP_C_IPV4_TEID, ICE_RSS_OUTER_HEADERS, false}, 1614 /* configure RSS for gtpu4 with input set IPv4 src/dst */ 1615 {ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4, 1616 ICE_FLOW_HASH_GTP_U_IPV4_TEID, ICE_RSS_OUTER_HEADERS, false}, 1617 /* configure RSS for gtpu4e with input set IPv4 src/dst */ 1618 {ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4, 1619 ICE_FLOW_HASH_GTP_U_IPV4_EH, ICE_RSS_OUTER_HEADERS, false}, 1620 /* configure RSS for gtpu4u with input set IPv4 src/dst */ 1621 { ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4, 1622 ICE_FLOW_HASH_GTP_U_IPV4_UP, ICE_RSS_OUTER_HEADERS, false}, 1623 /* configure RSS for gtpu4d with input set IPv4 src/dst */ 1624 {ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4, 1625 ICE_FLOW_HASH_GTP_U_IPV4_DWN, ICE_RSS_OUTER_HEADERS, false}, 1626 1627 /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */ 1628 {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6, 1629 ICE_HASH_TCP_IPV6, ICE_RSS_ANY_HEADERS, false}, 1630 /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */ 1631 {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6, 1632 ICE_HASH_UDP_IPV6, ICE_RSS_ANY_HEADERS, false}, 1633 /* configure RSS for sctp6 with input set IPv6 src/dst - only support 1634 * RSS on SCTPv6 on outer headers (non-tunneled) 1635 */ 1636 {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6, 1637 ICE_HASH_SCTP_IPV6, ICE_RSS_OUTER_HEADERS, false}, 1638 /* configure RSS for IPSEC ESP SPI with input set MAC_IPV4_SPI */ 1639 {ICE_FLOW_SEG_HDR_ESP, 1640 ICE_FLOW_HASH_ESP_SPI, ICE_RSS_OUTER_HEADERS, false}, 1641 /* configure RSS for gtpc6 with input set IPv6 src/dst */ 1642 {ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6, 1643 ICE_FLOW_HASH_IPV6, ICE_RSS_OUTER_HEADERS, false}, 1644 /* configure RSS for gtpc6t with input set IPv6 src/dst */ 1645 {ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6, 1646 ICE_FLOW_HASH_GTP_C_IPV6_TEID, ICE_RSS_OUTER_HEADERS, false}, 1647 /* configure RSS for gtpu6 with input set IPv6 src/dst */ 1648 {ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6, 1649 ICE_FLOW_HASH_GTP_U_IPV6_TEID, ICE_RSS_OUTER_HEADERS, false}, 1650 /* configure RSS for gtpu6e with input set IPv6 src/dst */ 1651 {ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6, 1652 ICE_FLOW_HASH_GTP_U_IPV6_EH, ICE_RSS_OUTER_HEADERS, false}, 1653 /* configure RSS for gtpu6u with input set IPv6 src/dst */ 1654 { ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6, 1655 ICE_FLOW_HASH_GTP_U_IPV6_UP, ICE_RSS_OUTER_HEADERS, false}, 1656 /* configure RSS for gtpu6d with input set IPv6 src/dst */ 1657 {ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6, 1658 ICE_FLOW_HASH_GTP_U_IPV6_DWN, ICE_RSS_OUTER_HEADERS, false}, 1659 }; 1660 1661 /** 1662 * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows 1663 * @vsi: VSI to be configured 1664 * 1665 * This function will only be called after successful download package call 1666 * during initialization of PF. Since the downloaded package will erase the 1667 * RSS section, this function will configure RSS input sets for different 1668 * flow types. The last profile added has the highest priority, therefore 2 1669 * tuple profiles (i.e. IPv4 src/dst) are added before 4 tuple profiles 1670 * (i.e. IPv4 src/dst TCP src/dst port). 1671 */ 1672 static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) 1673 { 1674 u16 vsi_num = vsi->vsi_num; 1675 struct ice_pf *pf = vsi->back; 1676 struct ice_hw *hw = &pf->hw; 1677 struct device *dev; 1678 int status; 1679 u32 i; 1680 1681 dev = ice_pf_to_dev(pf); 1682 if (ice_is_safe_mode(pf)) { 1683 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", 1684 vsi_num); 1685 return; 1686 } 1687 for (i = 0; i < ARRAY_SIZE(default_rss_cfgs); i++) { 1688 const struct ice_rss_hash_cfg *cfg = &default_rss_cfgs[i]; 1689 1690 status = ice_add_rss_cfg(hw, vsi, cfg); 1691 if (status) 1692 dev_dbg(dev, "ice_add_rss_cfg failed, addl_hdrs = %x, hash_flds = %llx, hdr_type = %d, symm = %d\n", 1693 cfg->addl_hdrs, cfg->hash_flds, 1694 cfg->hdr_type, cfg->symm); 1695 } 1696 } 1697 1698 /** 1699 * ice_pf_state_is_nominal - checks the PF for nominal state 1700 * @pf: pointer to PF to check 1701 * 1702 * Check the PF's state for a collection of bits that would indicate 1703 * the PF is in a state that would inhibit normal operation for 1704 * driver functionality. 1705 * 1706 * Returns true if PF is in a nominal state, false otherwise 1707 */ 1708 bool ice_pf_state_is_nominal(struct ice_pf *pf) 1709 { 1710 DECLARE_BITMAP(check_bits, ICE_STATE_NBITS) = { 0 }; 1711 1712 if (!pf) 1713 return false; 1714 1715 bitmap_set(check_bits, 0, ICE_STATE_NOMINAL_CHECK_BITS); 1716 if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS)) 1717 return false; 1718 1719 return true; 1720 } 1721 1722 #define ICE_FW_MODE_REC_M BIT(1) 1723 bool ice_is_recovery_mode(struct ice_hw *hw) 1724 { 1725 return rd32(hw, GL_MNG_FWSM) & ICE_FW_MODE_REC_M; 1726 } 1727 1728 /** 1729 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters 1730 * @vsi: the VSI to be updated 1731 */ 1732 void ice_update_eth_stats(struct ice_vsi *vsi) 1733 { 1734 struct ice_eth_stats *prev_es, *cur_es; 1735 struct ice_hw *hw = &vsi->back->hw; 1736 struct ice_pf *pf = vsi->back; 1737 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ 1738 1739 prev_es = &vsi->eth_stats_prev; 1740 cur_es = &vsi->eth_stats; 1741 1742 if (ice_is_reset_in_progress(pf->state)) 1743 vsi->stat_offsets_loaded = false; 1744 1745 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded, 1746 &prev_es->rx_bytes, &cur_es->rx_bytes); 1747 1748 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded, 1749 &prev_es->rx_unicast, &cur_es->rx_unicast); 1750 1751 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded, 1752 &prev_es->rx_multicast, &cur_es->rx_multicast); 1753 1754 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded, 1755 &prev_es->rx_broadcast, &cur_es->rx_broadcast); 1756 1757 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, 1758 &prev_es->rx_discards, &cur_es->rx_discards); 1759 1760 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded, 1761 &prev_es->tx_bytes, &cur_es->tx_bytes); 1762 1763 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded, 1764 &prev_es->tx_unicast, &cur_es->tx_unicast); 1765 1766 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded, 1767 &prev_es->tx_multicast, &cur_es->tx_multicast); 1768 1769 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded, 1770 &prev_es->tx_broadcast, &cur_es->tx_broadcast); 1771 1772 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, 1773 &prev_es->tx_errors, &cur_es->tx_errors); 1774 1775 vsi->stat_offsets_loaded = true; 1776 } 1777 1778 /** 1779 * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register 1780 * @hw: HW pointer 1781 * @pf_q: index of the Rx queue in the PF's queue space 1782 * @rxdid: flexible descriptor RXDID 1783 * @prio: priority for the RXDID for this queue 1784 * @ena_ts: true to enable timestamp and false to disable timestamp 1785 */ 1786 void ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, 1787 bool ena_ts) 1788 { 1789 int regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); 1790 1791 /* clear any previous values */ 1792 regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M | 1793 QRXFLXP_CNTXT_RXDID_PRIO_M | 1794 QRXFLXP_CNTXT_TS_M); 1795 1796 regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_IDX_M, rxdid); 1797 regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_PRIO_M, prio); 1798 1799 if (ena_ts) 1800 /* Enable TimeSync on this queue */ 1801 regval |= QRXFLXP_CNTXT_TS_M; 1802 1803 wr32(hw, QRXFLXP_CNTXT(pf_q), regval); 1804 } 1805 1806 /** 1807 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value 1808 * @intrl: interrupt rate limit in usecs 1809 * @gran: interrupt rate limit granularity in usecs 1810 * 1811 * This function converts a decimal interrupt rate limit in usecs to the format 1812 * expected by firmware. 1813 */ 1814 static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) 1815 { 1816 u32 val = intrl / gran; 1817 1818 if (val) 1819 return val | GLINT_RATE_INTRL_ENA_M; 1820 return 0; 1821 } 1822 1823 /** 1824 * ice_write_intrl - write throttle rate limit to interrupt specific register 1825 * @q_vector: pointer to interrupt specific structure 1826 * @intrl: throttle rate limit in microseconds to write 1827 */ 1828 void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl) 1829 { 1830 struct ice_hw *hw = &q_vector->vsi->back->hw; 1831 1832 wr32(hw, GLINT_RATE(q_vector->reg_idx), 1833 ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25)); 1834 } 1835 1836 static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc) 1837 { 1838 switch (rc->type) { 1839 case ICE_RX_CONTAINER: 1840 if (rc->rx_ring) 1841 return rc->rx_ring->q_vector; 1842 break; 1843 case ICE_TX_CONTAINER: 1844 if (rc->tx_ring) 1845 return rc->tx_ring->q_vector; 1846 break; 1847 default: 1848 break; 1849 } 1850 1851 return NULL; 1852 } 1853 1854 /** 1855 * __ice_write_itr - write throttle rate to register 1856 * @q_vector: pointer to interrupt data structure 1857 * @rc: pointer to ring container 1858 * @itr: throttle rate in microseconds to write 1859 */ 1860 static void __ice_write_itr(struct ice_q_vector *q_vector, 1861 struct ice_ring_container *rc, u16 itr) 1862 { 1863 struct ice_hw *hw = &q_vector->vsi->back->hw; 1864 1865 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), 1866 ITR_REG_ALIGN(itr) >> ICE_ITR_GRAN_S); 1867 } 1868 1869 /** 1870 * ice_write_itr - write throttle rate to queue specific register 1871 * @rc: pointer to ring container 1872 * @itr: throttle rate in microseconds to write 1873 */ 1874 void ice_write_itr(struct ice_ring_container *rc, u16 itr) 1875 { 1876 struct ice_q_vector *q_vector; 1877 1878 q_vector = ice_pull_qvec_from_rc(rc); 1879 if (!q_vector) 1880 return; 1881 1882 __ice_write_itr(q_vector, rc, itr); 1883 } 1884 1885 /** 1886 * ice_set_q_vector_intrl - set up interrupt rate limiting 1887 * @q_vector: the vector to be configured 1888 * 1889 * Interrupt rate limiting is local to the vector, not per-queue so we must 1890 * detect if either ring container has dynamic moderation enabled to decide 1891 * what to set the interrupt rate limit to via INTRL settings. In the case that 1892 * dynamic moderation is disabled on both, write the value with the cached 1893 * setting to make sure INTRL register matches the user visible value. 1894 */ 1895 void ice_set_q_vector_intrl(struct ice_q_vector *q_vector) 1896 { 1897 if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) { 1898 /* in the case of dynamic enabled, cap each vector to no more 1899 * than (4 us) 250,000 ints/sec, which allows low latency 1900 * but still less than 500,000 interrupts per second, which 1901 * reduces CPU a bit in the case of the lowest latency 1902 * setting. The 4 here is a value in microseconds. 1903 */ 1904 ice_write_intrl(q_vector, 4); 1905 } else { 1906 ice_write_intrl(q_vector, q_vector->intrl); 1907 } 1908 } 1909 1910 /** 1911 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW 1912 * @vsi: the VSI being configured 1913 * 1914 * This configures MSIX mode interrupts for the PF VSI, and should not be used 1915 * for the VF VSI. 1916 */ 1917 void ice_vsi_cfg_msix(struct ice_vsi *vsi) 1918 { 1919 struct ice_pf *pf = vsi->back; 1920 struct ice_hw *hw = &pf->hw; 1921 u16 txq = 0, rxq = 0; 1922 int i, q; 1923 1924 ice_for_each_q_vector(vsi, i) { 1925 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 1926 u16 reg_idx = q_vector->reg_idx; 1927 1928 ice_cfg_itr(hw, q_vector); 1929 1930 /* Both Transmit Queue Interrupt Cause Control register 1931 * and Receive Queue Interrupt Cause control register 1932 * expects MSIX_INDX field to be the vector index 1933 * within the function space and not the absolute 1934 * vector index across PF or across device. 1935 * For SR-IOV VF VSIs queue vector index always starts 1936 * with 1 since first vector index(0) is used for OICR 1937 * in VF space. Since VMDq and other PF VSIs are within 1938 * the PF function space, use the vector index that is 1939 * tracked for this PF. 1940 */ 1941 for (q = 0; q < q_vector->num_ring_tx; q++) { 1942 ice_cfg_txq_interrupt(vsi, txq, reg_idx, 1943 q_vector->tx.itr_idx); 1944 txq++; 1945 } 1946 1947 for (q = 0; q < q_vector->num_ring_rx; q++) { 1948 ice_cfg_rxq_interrupt(vsi, rxq, reg_idx, 1949 q_vector->rx.itr_idx); 1950 rxq++; 1951 } 1952 } 1953 } 1954 1955 /** 1956 * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings 1957 * @vsi: the VSI whose rings are to be enabled 1958 * 1959 * Returns 0 on success and a negative value on error 1960 */ 1961 int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi) 1962 { 1963 return ice_vsi_ctrl_all_rx_rings(vsi, true); 1964 } 1965 1966 /** 1967 * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings 1968 * @vsi: the VSI whose rings are to be disabled 1969 * 1970 * Returns 0 on success and a negative value on error 1971 */ 1972 int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi) 1973 { 1974 return ice_vsi_ctrl_all_rx_rings(vsi, false); 1975 } 1976 1977 /** 1978 * ice_vsi_stop_tx_rings - Disable Tx rings 1979 * @vsi: the VSI being configured 1980 * @rst_src: reset source 1981 * @rel_vmvf_num: Relative ID of VF/VM 1982 * @rings: Tx ring array to be stopped 1983 * @count: number of Tx ring array elements 1984 */ 1985 static int 1986 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 1987 u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count) 1988 { 1989 u16 q_idx; 1990 1991 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) 1992 return -EINVAL; 1993 1994 for (q_idx = 0; q_idx < count; q_idx++) { 1995 struct ice_txq_meta txq_meta = { }; 1996 int status; 1997 1998 if (!rings || !rings[q_idx]) 1999 return -EINVAL; 2000 2001 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); 2002 status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num, 2003 rings[q_idx], &txq_meta); 2004 2005 if (status) 2006 return status; 2007 } 2008 2009 return 0; 2010 } 2011 2012 /** 2013 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings 2014 * @vsi: the VSI being configured 2015 * @rst_src: reset source 2016 * @rel_vmvf_num: Relative ID of VF/VM 2017 */ 2018 int 2019 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 2020 u16 rel_vmvf_num) 2021 { 2022 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq); 2023 } 2024 2025 /** 2026 * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings 2027 * @vsi: the VSI being configured 2028 */ 2029 int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi) 2030 { 2031 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq); 2032 } 2033 2034 /** 2035 * ice_vsi_is_rx_queue_active 2036 * @vsi: the VSI being configured 2037 * 2038 * Return true if at least one queue is active. 2039 */ 2040 bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi) 2041 { 2042 struct ice_pf *pf = vsi->back; 2043 struct ice_hw *hw = &pf->hw; 2044 int i; 2045 2046 ice_for_each_rxq(vsi, i) { 2047 u32 rx_reg; 2048 int pf_q; 2049 2050 pf_q = vsi->rxq_map[i]; 2051 rx_reg = rd32(hw, QRX_CTRL(pf_q)); 2052 if (rx_reg & QRX_CTRL_QENA_STAT_M) 2053 return true; 2054 } 2055 2056 return false; 2057 } 2058 2059 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) 2060 { 2061 if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) { 2062 vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS; 2063 vsi->tc_cfg.numtc = 1; 2064 return; 2065 } 2066 2067 /* set VSI TC information based on DCB config */ 2068 ice_vsi_set_dcb_tc_cfg(vsi); 2069 } 2070 2071 /** 2072 * ice_vsi_cfg_sw_lldp - Config switch rules for LLDP packet handling 2073 * @vsi: the VSI being configured 2074 * @tx: bool to determine Tx or Rx rule 2075 * @create: bool to determine create or remove Rule 2076 * 2077 * Adding an ethtype Tx rule to the uplink VSI results in it being applied 2078 * to the whole port, so LLDP transmission for VFs will be blocked too. 2079 */ 2080 void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) 2081 { 2082 int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag, 2083 enum ice_sw_fwd_act_type act); 2084 struct ice_pf *pf = vsi->back; 2085 struct device *dev; 2086 int status; 2087 2088 dev = ice_pf_to_dev(pf); 2089 eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth; 2090 2091 if (tx) { 2092 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX, 2093 ICE_DROP_PACKET); 2094 } else { 2095 if (!test_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags)) { 2096 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX, 2097 ICE_FWD_TO_VSI); 2098 if (!status || !create) 2099 goto report; 2100 2101 dev_info(dev, 2102 "Failed to add generic LLDP Rx filter on VSI %i error: %d, falling back to specialized AQ control\n", 2103 vsi->vsi_num, status); 2104 } 2105 2106 status = ice_lldp_fltr_add_remove(&pf->hw, vsi, create); 2107 if (!status) 2108 set_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags); 2109 2110 } 2111 2112 report: 2113 if (status) 2114 dev_warn(dev, "Failed to %s %s LLDP rule on VSI %i error: %d\n", 2115 create ? "add" : "remove", tx ? "Tx" : "Rx", 2116 vsi->vsi_num, status); 2117 } 2118 2119 /** 2120 * ice_cfg_sw_rx_lldp - Enable/disable software handling of LLDP 2121 * @pf: the PF being configured 2122 * @enable: enable or disable 2123 * 2124 * Configure switch rules to enable/disable LLDP handling by software 2125 * across PF. 2126 */ 2127 void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable) 2128 { 2129 struct ice_vsi *vsi; 2130 struct ice_vf *vf; 2131 unsigned int bkt; 2132 2133 vsi = ice_get_main_vsi(pf); 2134 ice_vsi_cfg_sw_lldp(vsi, false, enable); 2135 2136 if (!test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) 2137 return; 2138 2139 ice_for_each_vf(pf, bkt, vf) { 2140 vsi = ice_get_vf_vsi(vf); 2141 2142 if (WARN_ON(!vsi)) 2143 continue; 2144 2145 if (ice_vf_is_lldp_ena(vf)) 2146 ice_vsi_cfg_sw_lldp(vsi, false, enable); 2147 } 2148 } 2149 2150 /** 2151 * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it 2152 * @vsi: pointer to the VSI 2153 * 2154 * This function will allocate new scheduler aggregator now if needed and will 2155 * move specified VSI into it. 2156 */ 2157 static void ice_set_agg_vsi(struct ice_vsi *vsi) 2158 { 2159 struct device *dev = ice_pf_to_dev(vsi->back); 2160 struct ice_agg_node *agg_node_iter = NULL; 2161 u32 agg_id = ICE_INVALID_AGG_NODE_ID; 2162 struct ice_agg_node *agg_node = NULL; 2163 int node_offset, max_agg_nodes = 0; 2164 struct ice_port_info *port_info; 2165 struct ice_pf *pf = vsi->back; 2166 u32 agg_node_id_start = 0; 2167 int status; 2168 2169 /* create (as needed) scheduler aggregator node and move VSI into 2170 * corresponding aggregator node 2171 * - PF aggregator node to contains VSIs of type _PF and _CTRL 2172 * - VF aggregator nodes will contain VF VSI 2173 */ 2174 port_info = pf->hw.port_info; 2175 if (!port_info) 2176 return; 2177 2178 switch (vsi->type) { 2179 case ICE_VSI_CTRL: 2180 case ICE_VSI_CHNL: 2181 case ICE_VSI_LB: 2182 case ICE_VSI_PF: 2183 case ICE_VSI_SF: 2184 max_agg_nodes = ICE_MAX_PF_AGG_NODES; 2185 agg_node_id_start = ICE_PF_AGG_NODE_ID_START; 2186 agg_node_iter = &pf->pf_agg_node[0]; 2187 break; 2188 case ICE_VSI_VF: 2189 /* user can create 'n' VFs on a given PF, but since max children 2190 * per aggregator node can be only 64. Following code handles 2191 * aggregator(s) for VF VSIs, either selects a agg_node which 2192 * was already created provided num_vsis < 64, otherwise 2193 * select next available node, which will be created 2194 */ 2195 max_agg_nodes = ICE_MAX_VF_AGG_NODES; 2196 agg_node_id_start = ICE_VF_AGG_NODE_ID_START; 2197 agg_node_iter = &pf->vf_agg_node[0]; 2198 break; 2199 default: 2200 /* other VSI type, handle later if needed */ 2201 dev_dbg(dev, "unexpected VSI type %s\n", 2202 ice_vsi_type_str(vsi->type)); 2203 return; 2204 } 2205 2206 /* find the appropriate aggregator node */ 2207 for (node_offset = 0; node_offset < max_agg_nodes; node_offset++) { 2208 /* see if we can find space in previously created 2209 * node if num_vsis < 64, otherwise skip 2210 */ 2211 if (agg_node_iter->num_vsis && 2212 agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { 2213 agg_node_iter++; 2214 continue; 2215 } 2216 2217 if (agg_node_iter->valid && 2218 agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) { 2219 agg_id = agg_node_iter->agg_id; 2220 agg_node = agg_node_iter; 2221 break; 2222 } 2223 2224 /* find unclaimed agg_id */ 2225 if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) { 2226 agg_id = node_offset + agg_node_id_start; 2227 agg_node = agg_node_iter; 2228 break; 2229 } 2230 /* move to next agg_node */ 2231 agg_node_iter++; 2232 } 2233 2234 if (!agg_node) 2235 return; 2236 2237 /* if selected aggregator node was not created, create it */ 2238 if (!agg_node->valid) { 2239 status = ice_cfg_agg(port_info, agg_id, ICE_AGG_TYPE_AGG, 2240 (u8)vsi->tc_cfg.ena_tc); 2241 if (status) { 2242 dev_err(dev, "unable to create aggregator node with agg_id %u\n", 2243 agg_id); 2244 return; 2245 } 2246 /* aggregator node is created, store the needed info */ 2247 agg_node->valid = true; 2248 agg_node->agg_id = agg_id; 2249 } 2250 2251 /* move VSI to corresponding aggregator node */ 2252 status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx, 2253 (u8)vsi->tc_cfg.ena_tc); 2254 if (status) { 2255 dev_err(dev, "unable to move VSI idx %u into aggregator %u node", 2256 vsi->idx, agg_id); 2257 return; 2258 } 2259 2260 /* keep active children count for aggregator node */ 2261 agg_node->num_vsis++; 2262 2263 /* cache the 'agg_id' in VSI, so that after reset - VSI will be moved 2264 * to aggregator node 2265 */ 2266 vsi->agg_node = agg_node; 2267 dev_dbg(dev, "successfully moved VSI idx %u tc_bitmap 0x%x) into aggregator node %d which has num_vsis %u\n", 2268 vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id, 2269 vsi->agg_node->num_vsis); 2270 } 2271 2272 static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi) 2273 { 2274 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2275 struct device *dev = ice_pf_to_dev(pf); 2276 int ret, i; 2277 2278 /* configure VSI nodes based on number of queues and TC's */ 2279 ice_for_each_traffic_class(i) { 2280 if (!(vsi->tc_cfg.ena_tc & BIT(i))) 2281 continue; 2282 2283 if (vsi->type == ICE_VSI_CHNL) { 2284 if (!vsi->alloc_txq && vsi->num_txq) 2285 max_txqs[i] = vsi->num_txq; 2286 else 2287 max_txqs[i] = pf->num_lan_tx; 2288 } else { 2289 max_txqs[i] = vsi->alloc_txq; 2290 } 2291 2292 if (vsi->type == ICE_VSI_PF) 2293 max_txqs[i] += vsi->num_xdp_txq; 2294 } 2295 2296 dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); 2297 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2298 max_txqs); 2299 if (ret) { 2300 dev_err(dev, "VSI %d failed lan queue config, error %d\n", 2301 vsi->vsi_num, ret); 2302 return ret; 2303 } 2304 2305 return 0; 2306 } 2307 2308 /** 2309 * ice_vsi_cfg_def - configure default VSI based on the type 2310 * @vsi: pointer to VSI 2311 */ 2312 static int ice_vsi_cfg_def(struct ice_vsi *vsi) 2313 { 2314 struct device *dev = ice_pf_to_dev(vsi->back); 2315 struct ice_pf *pf = vsi->back; 2316 int ret; 2317 2318 vsi->vsw = pf->first_sw; 2319 2320 ret = ice_vsi_alloc_def(vsi, vsi->ch); 2321 if (ret) 2322 return ret; 2323 2324 /* allocate memory for Tx/Rx ring stat pointers */ 2325 ret = ice_vsi_alloc_stat_arrays(vsi); 2326 if (ret) 2327 goto unroll_vsi_alloc; 2328 2329 ice_alloc_fd_res(vsi); 2330 2331 ret = ice_vsi_get_qs(vsi); 2332 if (ret) { 2333 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", 2334 vsi->idx); 2335 goto unroll_vsi_alloc_stat; 2336 } 2337 2338 /* set RSS capabilities */ 2339 ice_vsi_set_rss_params(vsi); 2340 2341 /* set TC configuration */ 2342 ice_vsi_set_tc_cfg(vsi); 2343 2344 /* create the VSI */ 2345 ret = ice_vsi_init(vsi, vsi->flags); 2346 if (ret) 2347 goto unroll_get_qs; 2348 2349 ice_vsi_init_vlan_ops(vsi); 2350 2351 switch (vsi->type) { 2352 case ICE_VSI_CTRL: 2353 case ICE_VSI_SF: 2354 case ICE_VSI_PF: 2355 ret = ice_vsi_alloc_q_vectors(vsi); 2356 if (ret) 2357 goto unroll_vsi_init; 2358 2359 ret = ice_vsi_alloc_rings(vsi); 2360 if (ret) 2361 goto unroll_vector_base; 2362 2363 ret = ice_vsi_alloc_ring_stats(vsi); 2364 if (ret) 2365 goto unroll_vector_base; 2366 2367 if (ice_is_xdp_ena_vsi(vsi)) { 2368 ret = ice_vsi_determine_xdp_res(vsi); 2369 if (ret) 2370 goto unroll_vector_base; 2371 ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog, 2372 ICE_XDP_CFG_PART); 2373 if (ret) 2374 goto unroll_vector_base; 2375 } 2376 2377 ice_vsi_map_rings_to_vectors(vsi); 2378 2379 vsi->stat_offsets_loaded = false; 2380 2381 /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ 2382 if (vsi->type != ICE_VSI_CTRL) 2383 /* Do not exit if configuring RSS had an issue, at 2384 * least receive traffic on first queue. Hence no 2385 * need to capture return value 2386 */ 2387 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 2388 ice_vsi_cfg_rss_lut_key(vsi); 2389 ice_vsi_set_rss_flow_fld(vsi); 2390 } 2391 ice_init_arfs(vsi); 2392 break; 2393 case ICE_VSI_CHNL: 2394 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 2395 ice_vsi_cfg_rss_lut_key(vsi); 2396 ice_vsi_set_rss_flow_fld(vsi); 2397 } 2398 break; 2399 case ICE_VSI_VF: 2400 /* VF driver will take care of creating netdev for this type and 2401 * map queues to vectors through Virtchnl, PF driver only 2402 * creates a VSI and corresponding structures for bookkeeping 2403 * purpose 2404 */ 2405 ret = ice_vsi_alloc_q_vectors(vsi); 2406 if (ret) 2407 goto unroll_vsi_init; 2408 2409 ret = ice_vsi_alloc_rings(vsi); 2410 if (ret) 2411 goto unroll_alloc_q_vector; 2412 2413 ret = ice_vsi_alloc_ring_stats(vsi); 2414 if (ret) 2415 goto unroll_vector_base; 2416 2417 vsi->stat_offsets_loaded = false; 2418 2419 /* Do not exit if configuring RSS had an issue, at least 2420 * receive traffic on first queue. Hence no need to capture 2421 * return value 2422 */ 2423 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 2424 ice_vsi_cfg_rss_lut_key(vsi); 2425 ice_vsi_set_vf_rss_flow_fld(vsi); 2426 } 2427 break; 2428 case ICE_VSI_LB: 2429 ret = ice_vsi_alloc_rings(vsi); 2430 if (ret) 2431 goto unroll_vsi_init; 2432 2433 ret = ice_vsi_alloc_ring_stats(vsi); 2434 if (ret) 2435 goto unroll_vector_base; 2436 2437 break; 2438 default: 2439 /* clean up the resources and exit */ 2440 ret = -EINVAL; 2441 goto unroll_vsi_init; 2442 } 2443 2444 return 0; 2445 2446 unroll_vector_base: 2447 /* reclaim SW interrupts back to the common pool */ 2448 unroll_alloc_q_vector: 2449 ice_vsi_free_q_vectors(vsi); 2450 unroll_vsi_init: 2451 ice_vsi_delete_from_hw(vsi); 2452 unroll_get_qs: 2453 ice_vsi_put_qs(vsi); 2454 unroll_vsi_alloc_stat: 2455 ice_vsi_free_stats(vsi); 2456 unroll_vsi_alloc: 2457 ice_vsi_free_arrays(vsi); 2458 return ret; 2459 } 2460 2461 /** 2462 * ice_vsi_cfg - configure a previously allocated VSI 2463 * @vsi: pointer to VSI 2464 */ 2465 int ice_vsi_cfg(struct ice_vsi *vsi) 2466 { 2467 struct ice_pf *pf = vsi->back; 2468 int ret; 2469 2470 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) 2471 return -EINVAL; 2472 2473 ret = ice_vsi_cfg_def(vsi); 2474 if (ret) 2475 return ret; 2476 2477 ret = ice_vsi_cfg_tc_lan(vsi->back, vsi); 2478 if (ret) 2479 ice_vsi_decfg(vsi); 2480 2481 if (vsi->type == ICE_VSI_CTRL) { 2482 if (vsi->vf) { 2483 WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI); 2484 vsi->vf->ctrl_vsi_idx = vsi->idx; 2485 } else { 2486 WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI); 2487 pf->ctrl_vsi_idx = vsi->idx; 2488 } 2489 } 2490 2491 return ret; 2492 } 2493 2494 /** 2495 * ice_vsi_decfg - remove all VSI configuration 2496 * @vsi: pointer to VSI 2497 */ 2498 void ice_vsi_decfg(struct ice_vsi *vsi) 2499 { 2500 struct ice_pf *pf = vsi->back; 2501 int err; 2502 2503 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); 2504 err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); 2505 if (err) 2506 dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", 2507 vsi->vsi_num, err); 2508 2509 if (vsi->xdp_rings) 2510 /* return value check can be skipped here, it always returns 2511 * 0 if reset is in progress 2512 */ 2513 ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART); 2514 2515 ice_vsi_clear_rings(vsi); 2516 ice_vsi_free_q_vectors(vsi); 2517 ice_vsi_put_qs(vsi); 2518 ice_vsi_free_arrays(vsi); 2519 2520 /* SR-IOV determines needed MSIX resources all at once instead of per 2521 * VSI since when VFs are spawned we know how many VFs there are and how 2522 * many interrupts each VF needs. SR-IOV MSIX resources are also 2523 * cleared in the same manner. 2524 */ 2525 2526 if (vsi->type == ICE_VSI_VF && 2527 vsi->agg_node && vsi->agg_node->valid) 2528 vsi->agg_node->num_vsis--; 2529 } 2530 2531 /** 2532 * ice_vsi_setup - Set up a VSI by a given type 2533 * @pf: board private structure 2534 * @params: parameters to use when creating the VSI 2535 * 2536 * This allocates the sw VSI structure and its queue resources. 2537 * 2538 * Returns pointer to the successfully allocated and configured VSI sw struct on 2539 * success, NULL on failure. 2540 */ 2541 struct ice_vsi * 2542 ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params) 2543 { 2544 struct device *dev = ice_pf_to_dev(pf); 2545 struct ice_vsi *vsi; 2546 int ret; 2547 2548 /* ice_vsi_setup can only initialize a new VSI, and we must have 2549 * a port_info structure for it. 2550 */ 2551 if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) || 2552 WARN_ON(!params->port_info)) 2553 return NULL; 2554 2555 vsi = ice_vsi_alloc(pf); 2556 if (!vsi) { 2557 dev_err(dev, "could not allocate VSI\n"); 2558 return NULL; 2559 } 2560 2561 vsi->params = *params; 2562 ret = ice_vsi_cfg(vsi); 2563 if (ret) 2564 goto err_vsi_cfg; 2565 2566 /* Add switch rule to drop all Tx Flow Control Frames, of look up 2567 * type ETHERTYPE from VSIs, and restrict malicious VF from sending 2568 * out PAUSE or PFC frames. If enabled, FW can still send FC frames. 2569 * The rule is added once for PF VSI in order to create appropriate 2570 * recipe, since VSI/VSI list is ignored with drop action... 2571 * Also add rules to handle LLDP Tx packets. Tx LLDP packets need to 2572 * be dropped so that VFs cannot send LLDP packets to reconfig DCB 2573 * settings in the HW. 2574 */ 2575 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) { 2576 ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, 2577 ICE_DROP_PACKET); 2578 ice_vsi_cfg_sw_lldp(vsi, true, true); 2579 } 2580 2581 if (!vsi->agg_node) 2582 ice_set_agg_vsi(vsi); 2583 2584 return vsi; 2585 2586 err_vsi_cfg: 2587 ice_vsi_free(vsi); 2588 2589 return NULL; 2590 } 2591 2592 /** 2593 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW 2594 * @vsi: the VSI being cleaned up 2595 */ 2596 static void ice_vsi_release_msix(struct ice_vsi *vsi) 2597 { 2598 struct ice_pf *pf = vsi->back; 2599 struct ice_hw *hw = &pf->hw; 2600 u32 txq = 0; 2601 u32 rxq = 0; 2602 int i, q; 2603 2604 ice_for_each_q_vector(vsi, i) { 2605 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2606 2607 ice_write_intrl(q_vector, 0); 2608 for (q = 0; q < q_vector->num_ring_tx; q++) { 2609 ice_write_itr(&q_vector->tx, 0); 2610 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); 2611 if (vsi->xdp_rings) { 2612 u32 xdp_txq = txq + vsi->num_xdp_txq; 2613 2614 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0); 2615 } 2616 txq++; 2617 } 2618 2619 for (q = 0; q < q_vector->num_ring_rx; q++) { 2620 ice_write_itr(&q_vector->rx, 0); 2621 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); 2622 rxq++; 2623 } 2624 } 2625 2626 ice_flush(hw); 2627 } 2628 2629 /** 2630 * ice_vsi_free_irq - Free the IRQ association with the OS 2631 * @vsi: the VSI being configured 2632 */ 2633 void ice_vsi_free_irq(struct ice_vsi *vsi) 2634 { 2635 struct ice_pf *pf = vsi->back; 2636 int i; 2637 2638 if (!vsi->q_vectors || !vsi->irqs_ready) 2639 return; 2640 2641 ice_vsi_release_msix(vsi); 2642 if (vsi->type == ICE_VSI_VF) 2643 return; 2644 2645 vsi->irqs_ready = false; 2646 2647 ice_for_each_q_vector(vsi, i) { 2648 int irq_num; 2649 2650 irq_num = vsi->q_vectors[i]->irq.virq; 2651 2652 /* free only the irqs that were actually requested */ 2653 if (!vsi->q_vectors[i] || 2654 !(vsi->q_vectors[i]->num_ring_tx || 2655 vsi->q_vectors[i]->num_ring_rx)) 2656 continue; 2657 2658 synchronize_irq(irq_num); 2659 devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]); 2660 } 2661 } 2662 2663 /** 2664 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues 2665 * @vsi: the VSI having resources freed 2666 */ 2667 void ice_vsi_free_tx_rings(struct ice_vsi *vsi) 2668 { 2669 int i; 2670 2671 if (!vsi->tx_rings) 2672 return; 2673 2674 ice_for_each_txq(vsi, i) 2675 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 2676 ice_free_tx_ring(vsi->tx_rings[i]); 2677 } 2678 2679 /** 2680 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues 2681 * @vsi: the VSI having resources freed 2682 */ 2683 void ice_vsi_free_rx_rings(struct ice_vsi *vsi) 2684 { 2685 int i; 2686 2687 if (!vsi->rx_rings) 2688 return; 2689 2690 ice_for_each_rxq(vsi, i) 2691 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 2692 ice_free_rx_ring(vsi->rx_rings[i]); 2693 } 2694 2695 /** 2696 * ice_vsi_close - Shut down a VSI 2697 * @vsi: the VSI being shut down 2698 */ 2699 void ice_vsi_close(struct ice_vsi *vsi) 2700 { 2701 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) 2702 ice_down(vsi); 2703 2704 ice_vsi_clear_napi_queues(vsi); 2705 ice_vsi_free_irq(vsi); 2706 ice_vsi_free_tx_rings(vsi); 2707 ice_vsi_free_rx_rings(vsi); 2708 } 2709 2710 /** 2711 * ice_ena_vsi - resume a VSI 2712 * @vsi: the VSI being resume 2713 * @locked: is the rtnl_lock already held 2714 */ 2715 int ice_ena_vsi(struct ice_vsi *vsi, bool locked) 2716 { 2717 int err = 0; 2718 2719 if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state)) 2720 return 0; 2721 2722 clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state); 2723 2724 if (vsi->netdev && (vsi->type == ICE_VSI_PF || 2725 vsi->type == ICE_VSI_SF)) { 2726 if (netif_running(vsi->netdev)) { 2727 if (!locked) 2728 rtnl_lock(); 2729 2730 err = ice_open_internal(vsi->netdev); 2731 2732 if (!locked) 2733 rtnl_unlock(); 2734 } 2735 } else if (vsi->type == ICE_VSI_CTRL) { 2736 err = ice_vsi_open_ctrl(vsi); 2737 } 2738 2739 return err; 2740 } 2741 2742 /** 2743 * ice_dis_vsi - pause a VSI 2744 * @vsi: the VSI being paused 2745 * @locked: is the rtnl_lock already held 2746 */ 2747 void ice_dis_vsi(struct ice_vsi *vsi, bool locked) 2748 { 2749 bool already_down = test_bit(ICE_VSI_DOWN, vsi->state); 2750 2751 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); 2752 2753 if (vsi->netdev && (vsi->type == ICE_VSI_PF || 2754 vsi->type == ICE_VSI_SF)) { 2755 if (netif_running(vsi->netdev)) { 2756 if (!locked) 2757 rtnl_lock(); 2758 already_down = test_bit(ICE_VSI_DOWN, vsi->state); 2759 if (!already_down) 2760 ice_vsi_close(vsi); 2761 2762 if (!locked) 2763 rtnl_unlock(); 2764 } else if (!already_down) { 2765 ice_vsi_close(vsi); 2766 } 2767 } else if (vsi->type == ICE_VSI_CTRL && !already_down) { 2768 ice_vsi_close(vsi); 2769 } 2770 } 2771 2772 /** 2773 * ice_vsi_set_napi_queues - associate netdev queues with napi 2774 * @vsi: VSI pointer 2775 * 2776 * Associate queue[s] with napi for all vectors. 2777 */ 2778 void ice_vsi_set_napi_queues(struct ice_vsi *vsi) 2779 { 2780 struct net_device *netdev = vsi->netdev; 2781 int q_idx, v_idx; 2782 2783 if (!netdev) 2784 return; 2785 2786 ASSERT_RTNL(); 2787 ice_for_each_rxq(vsi, q_idx) 2788 if (vsi->rx_rings[q_idx] && vsi->rx_rings[q_idx]->q_vector) 2789 netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, 2790 &vsi->rx_rings[q_idx]->q_vector->napi); 2791 2792 ice_for_each_txq(vsi, q_idx) 2793 if (vsi->tx_rings[q_idx] && vsi->tx_rings[q_idx]->q_vector) 2794 netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, 2795 &vsi->tx_rings[q_idx]->q_vector->napi); 2796 /* Also set the interrupt number for the NAPI */ 2797 ice_for_each_q_vector(vsi, v_idx) { 2798 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2799 2800 netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); 2801 } 2802 } 2803 2804 /** 2805 * ice_vsi_clear_napi_queues - dissociate netdev queues from napi 2806 * @vsi: VSI pointer 2807 * 2808 * Clear the association between all VSI queues queue[s] and napi. 2809 */ 2810 void ice_vsi_clear_napi_queues(struct ice_vsi *vsi) 2811 { 2812 struct net_device *netdev = vsi->netdev; 2813 int q_idx, v_idx; 2814 2815 if (!netdev) 2816 return; 2817 2818 ASSERT_RTNL(); 2819 /* Clear the NAPI's interrupt number */ 2820 ice_for_each_q_vector(vsi, v_idx) { 2821 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2822 2823 netif_napi_set_irq(&q_vector->napi, -1); 2824 } 2825 2826 ice_for_each_txq(vsi, q_idx) 2827 netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, NULL); 2828 2829 ice_for_each_rxq(vsi, q_idx) 2830 netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, NULL); 2831 } 2832 2833 /** 2834 * ice_napi_add - register NAPI handler for the VSI 2835 * @vsi: VSI for which NAPI handler is to be registered 2836 * 2837 * This function is only called in the driver's load path. Registering the NAPI 2838 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, 2839 * reset/rebuild, etc.) 2840 */ 2841 void ice_napi_add(struct ice_vsi *vsi) 2842 { 2843 int v_idx; 2844 2845 if (!vsi->netdev) 2846 return; 2847 2848 ice_for_each_q_vector(vsi, v_idx) 2849 netif_napi_add_config(vsi->netdev, 2850 &vsi->q_vectors[v_idx]->napi, 2851 ice_napi_poll, 2852 v_idx); 2853 } 2854 2855 /** 2856 * ice_vsi_release - Delete a VSI and free its resources 2857 * @vsi: the VSI being removed 2858 * 2859 * Returns 0 on success or < 0 on error 2860 */ 2861 int ice_vsi_release(struct ice_vsi *vsi) 2862 { 2863 struct ice_pf *pf; 2864 2865 if (!vsi->back) 2866 return -ENODEV; 2867 pf = vsi->back; 2868 2869 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 2870 ice_rss_clean(vsi); 2871 2872 ice_vsi_close(vsi); 2873 2874 /* The Rx rule will only exist to remove if the LLDP FW 2875 * engine is currently stopped 2876 */ 2877 if (!ice_is_safe_mode(pf) && 2878 !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) && 2879 (vsi->type == ICE_VSI_PF || (vsi->type == ICE_VSI_VF && 2880 ice_vf_is_lldp_ena(vsi->vf)))) 2881 ice_vsi_cfg_sw_lldp(vsi, false, false); 2882 2883 ice_vsi_decfg(vsi); 2884 2885 /* retain SW VSI data structure since it is needed to unregister and 2886 * free VSI netdev when PF is not in reset recovery pending state,\ 2887 * for ex: during rmmod. 2888 */ 2889 if (!ice_is_reset_in_progress(pf->state)) 2890 ice_vsi_delete(vsi); 2891 2892 return 0; 2893 } 2894 2895 /** 2896 * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors 2897 * @vsi: VSI connected with q_vectors 2898 * @coalesce: array of struct with stored coalesce 2899 * 2900 * Returns array size. 2901 */ 2902 static int 2903 ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi, 2904 struct ice_coalesce_stored *coalesce) 2905 { 2906 int i; 2907 2908 ice_for_each_q_vector(vsi, i) { 2909 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2910 2911 coalesce[i].itr_tx = q_vector->tx.itr_settings; 2912 coalesce[i].itr_rx = q_vector->rx.itr_settings; 2913 coalesce[i].intrl = q_vector->intrl; 2914 2915 if (i < vsi->num_txq) 2916 coalesce[i].tx_valid = true; 2917 if (i < vsi->num_rxq) 2918 coalesce[i].rx_valid = true; 2919 } 2920 2921 return vsi->num_q_vectors; 2922 } 2923 2924 /** 2925 * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays 2926 * @vsi: VSI connected with q_vectors 2927 * @coalesce: pointer to array of struct with stored coalesce 2928 * @size: size of coalesce array 2929 * 2930 * Before this function, ice_vsi_rebuild_get_coalesce should be called to save 2931 * ITR params in arrays. If size is 0 or coalesce wasn't stored set coalesce 2932 * to default value. 2933 */ 2934 static void 2935 ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, 2936 struct ice_coalesce_stored *coalesce, int size) 2937 { 2938 struct ice_ring_container *rc; 2939 int i; 2940 2941 if ((size && !coalesce) || !vsi) 2942 return; 2943 2944 /* There are a couple of cases that have to be handled here: 2945 * 1. The case where the number of queue vectors stays the same, but 2946 * the number of Tx or Rx rings changes (the first for loop) 2947 * 2. The case where the number of queue vectors increased (the 2948 * second for loop) 2949 */ 2950 for (i = 0; i < size && i < vsi->num_q_vectors; i++) { 2951 /* There are 2 cases to handle here and they are the same for 2952 * both Tx and Rx: 2953 * if the entry was valid previously (coalesce[i].[tr]x_valid 2954 * and the loop variable is less than the number of rings 2955 * allocated, then write the previous values 2956 * 2957 * if the entry was not valid previously, but the number of 2958 * rings is less than are allocated (this means the number of 2959 * rings increased from previously), then write out the 2960 * values in the first element 2961 * 2962 * Also, always write the ITR, even if in ITR_IS_DYNAMIC 2963 * as there is no harm because the dynamic algorithm 2964 * will just overwrite. 2965 */ 2966 if (i < vsi->alloc_rxq && coalesce[i].rx_valid) { 2967 rc = &vsi->q_vectors[i]->rx; 2968 rc->itr_settings = coalesce[i].itr_rx; 2969 ice_write_itr(rc, rc->itr_setting); 2970 } else if (i < vsi->alloc_rxq) { 2971 rc = &vsi->q_vectors[i]->rx; 2972 rc->itr_settings = coalesce[0].itr_rx; 2973 ice_write_itr(rc, rc->itr_setting); 2974 } 2975 2976 if (i < vsi->alloc_txq && coalesce[i].tx_valid) { 2977 rc = &vsi->q_vectors[i]->tx; 2978 rc->itr_settings = coalesce[i].itr_tx; 2979 ice_write_itr(rc, rc->itr_setting); 2980 } else if (i < vsi->alloc_txq) { 2981 rc = &vsi->q_vectors[i]->tx; 2982 rc->itr_settings = coalesce[0].itr_tx; 2983 ice_write_itr(rc, rc->itr_setting); 2984 } 2985 2986 vsi->q_vectors[i]->intrl = coalesce[i].intrl; 2987 ice_set_q_vector_intrl(vsi->q_vectors[i]); 2988 } 2989 2990 /* the number of queue vectors increased so write whatever is in 2991 * the first element 2992 */ 2993 for (; i < vsi->num_q_vectors; i++) { 2994 /* transmit */ 2995 rc = &vsi->q_vectors[i]->tx; 2996 rc->itr_settings = coalesce[0].itr_tx; 2997 ice_write_itr(rc, rc->itr_setting); 2998 2999 /* receive */ 3000 rc = &vsi->q_vectors[i]->rx; 3001 rc->itr_settings = coalesce[0].itr_rx; 3002 ice_write_itr(rc, rc->itr_setting); 3003 3004 vsi->q_vectors[i]->intrl = coalesce[0].intrl; 3005 ice_set_q_vector_intrl(vsi->q_vectors[i]); 3006 } 3007 } 3008 3009 /** 3010 * ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones 3011 * @vsi: VSI pointer 3012 */ 3013 static int 3014 ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi) 3015 { 3016 u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq; 3017 u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq; 3018 struct ice_ring_stats **tx_ring_stats; 3019 struct ice_ring_stats **rx_ring_stats; 3020 struct ice_vsi_stats *vsi_stat; 3021 struct ice_pf *pf = vsi->back; 3022 u16 prev_txq = vsi->alloc_txq; 3023 u16 prev_rxq = vsi->alloc_rxq; 3024 int i; 3025 3026 vsi_stat = pf->vsi_stats[vsi->idx]; 3027 3028 if (req_txq < prev_txq) { 3029 for (i = req_txq; i < prev_txq; i++) { 3030 if (vsi_stat->tx_ring_stats[i]) { 3031 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); 3032 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); 3033 } 3034 } 3035 } 3036 3037 tx_ring_stats = vsi_stat->tx_ring_stats; 3038 vsi_stat->tx_ring_stats = 3039 krealloc_array(vsi_stat->tx_ring_stats, req_txq, 3040 sizeof(*vsi_stat->tx_ring_stats), 3041 GFP_KERNEL | __GFP_ZERO); 3042 if (!vsi_stat->tx_ring_stats) { 3043 vsi_stat->tx_ring_stats = tx_ring_stats; 3044 return -ENOMEM; 3045 } 3046 3047 if (req_rxq < prev_rxq) { 3048 for (i = req_rxq; i < prev_rxq; i++) { 3049 if (vsi_stat->rx_ring_stats[i]) { 3050 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); 3051 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); 3052 } 3053 } 3054 } 3055 3056 rx_ring_stats = vsi_stat->rx_ring_stats; 3057 vsi_stat->rx_ring_stats = 3058 krealloc_array(vsi_stat->rx_ring_stats, req_rxq, 3059 sizeof(*vsi_stat->rx_ring_stats), 3060 GFP_KERNEL | __GFP_ZERO); 3061 if (!vsi_stat->rx_ring_stats) { 3062 vsi_stat->rx_ring_stats = rx_ring_stats; 3063 return -ENOMEM; 3064 } 3065 3066 return 0; 3067 } 3068 3069 /** 3070 * ice_vsi_rebuild - Rebuild VSI after reset 3071 * @vsi: VSI to be rebuild 3072 * @vsi_flags: flags used for VSI rebuild flow 3073 * 3074 * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or 3075 * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware. 3076 * 3077 * Returns 0 on success and negative value on failure 3078 */ 3079 int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) 3080 { 3081 struct ice_coalesce_stored *coalesce; 3082 int prev_num_q_vectors; 3083 struct ice_pf *pf; 3084 int ret; 3085 3086 if (!vsi) 3087 return -EINVAL; 3088 3089 vsi->flags = vsi_flags; 3090 pf = vsi->back; 3091 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) 3092 return -EINVAL; 3093 3094 mutex_lock(&vsi->xdp_state_lock); 3095 3096 ret = ice_vsi_realloc_stat_arrays(vsi); 3097 if (ret) 3098 goto unlock; 3099 3100 ice_vsi_decfg(vsi); 3101 ret = ice_vsi_cfg_def(vsi); 3102 if (ret) 3103 goto unlock; 3104 3105 coalesce = kzalloc_objs(struct ice_coalesce_stored, vsi->num_q_vectors); 3106 if (!coalesce) { 3107 ret = -ENOMEM; 3108 goto decfg; 3109 } 3110 3111 prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce); 3112 3113 ret = ice_vsi_cfg_tc_lan(pf, vsi); 3114 if (ret) { 3115 if (vsi_flags & ICE_VSI_FLAG_INIT) { 3116 ret = -EIO; 3117 goto free_coalesce; 3118 } 3119 3120 ret = ice_schedule_reset(pf, ICE_RESET_PFR); 3121 goto free_coalesce; 3122 } 3123 3124 ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors); 3125 clear_bit(ICE_VSI_REBUILD_PENDING, vsi->state); 3126 3127 free_coalesce: 3128 kfree(coalesce); 3129 decfg: 3130 if (ret) 3131 ice_vsi_decfg(vsi); 3132 unlock: 3133 mutex_unlock(&vsi->xdp_state_lock); 3134 return ret; 3135 } 3136 3137 /** 3138 * ice_is_reset_in_progress - check for a reset in progress 3139 * @state: PF state field 3140 */ 3141 bool ice_is_reset_in_progress(unsigned long *state) 3142 { 3143 return test_bit(ICE_RESET_OICR_RECV, state) || 3144 test_bit(ICE_PFR_REQ, state) || 3145 test_bit(ICE_CORER_REQ, state) || 3146 test_bit(ICE_GLOBR_REQ, state); 3147 } 3148 3149 /** 3150 * ice_wait_for_reset - Wait for driver to finish reset and rebuild 3151 * @pf: pointer to the PF structure 3152 * @timeout: length of time to wait, in jiffies 3153 * 3154 * Wait (sleep) for a short time until the driver finishes cleaning up from 3155 * a device reset. The caller must be able to sleep. Use this to delay 3156 * operations that could fail while the driver is cleaning up after a device 3157 * reset. 3158 * 3159 * Returns 0 on success, -EBUSY if the reset is not finished within the 3160 * timeout, and -ERESTARTSYS if the thread was interrupted. 3161 */ 3162 int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout) 3163 { 3164 long ret; 3165 3166 ret = wait_event_interruptible_timeout(pf->reset_wait_queue, 3167 !ice_is_reset_in_progress(pf->state), 3168 timeout); 3169 if (ret < 0) 3170 return ret; 3171 else if (!ret) 3172 return -EBUSY; 3173 else 3174 return 0; 3175 } 3176 3177 /** 3178 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map 3179 * @vsi: VSI being configured 3180 * @ctx: the context buffer returned from AQ VSI update command 3181 */ 3182 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) 3183 { 3184 vsi->info.mapping_flags = ctx->info.mapping_flags; 3185 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping, 3186 sizeof(vsi->info.q_mapping)); 3187 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping, 3188 sizeof(vsi->info.tc_mapping)); 3189 } 3190 3191 /** 3192 * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration 3193 * @vsi: the VSI being configured 3194 * @ena_tc: TC map to be enabled 3195 */ 3196 void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) 3197 { 3198 struct net_device *netdev = vsi->netdev; 3199 struct ice_pf *pf = vsi->back; 3200 int numtc = vsi->tc_cfg.numtc; 3201 struct ice_dcbx_cfg *dcbcfg; 3202 u8 netdev_tc; 3203 int i; 3204 3205 if (!netdev) 3206 return; 3207 3208 /* CHNL VSI doesn't have its own netdev, hence, no netdev_tc */ 3209 if (vsi->type == ICE_VSI_CHNL) 3210 return; 3211 3212 if (!ena_tc) { 3213 netdev_reset_tc(netdev); 3214 return; 3215 } 3216 3217 if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf)) 3218 numtc = vsi->all_numtc; 3219 3220 if (netdev_set_num_tc(netdev, numtc)) 3221 return; 3222 3223 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; 3224 3225 ice_for_each_traffic_class(i) 3226 if (vsi->tc_cfg.ena_tc & BIT(i)) 3227 netdev_set_tc_queue(netdev, 3228 vsi->tc_cfg.tc_info[i].netdev_tc, 3229 vsi->tc_cfg.tc_info[i].qcount_tx, 3230 vsi->tc_cfg.tc_info[i].qoffset); 3231 /* setup TC queue map for CHNL TCs */ 3232 ice_for_each_chnl_tc(i) { 3233 if (!(vsi->all_enatc & BIT(i))) 3234 break; 3235 if (!vsi->mqprio_qopt.qopt.count[i]) 3236 break; 3237 netdev_set_tc_queue(netdev, i, 3238 vsi->mqprio_qopt.qopt.count[i], 3239 vsi->mqprio_qopt.qopt.offset[i]); 3240 } 3241 3242 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 3243 return; 3244 3245 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { 3246 u8 ets_tc = dcbcfg->etscfg.prio_table[i]; 3247 3248 /* Get the mapped netdev TC# for the UP */ 3249 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; 3250 netdev_set_prio_tc_map(netdev, i, netdev_tc); 3251 } 3252 } 3253 3254 /** 3255 * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config 3256 * @vsi: the VSI being configured, 3257 * @ctxt: VSI context structure 3258 * @ena_tc: number of traffic classes to enable 3259 * 3260 * Prepares VSI tc_config to have queue configurations based on MQPRIO options. 3261 */ 3262 static int 3263 ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, 3264 u8 ena_tc) 3265 { 3266 u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap; 3267 u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0]; 3268 int tc0_qcount = vsi->mqprio_qopt.qopt.count[0]; 3269 u16 new_txq, new_rxq; 3270 u8 netdev_tc = 0; 3271 int i; 3272 3273 vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1; 3274 3275 pow = order_base_2(tc0_qcount); 3276 qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, tc0_offset); 3277 qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow); 3278 3279 ice_for_each_traffic_class(i) { 3280 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { 3281 /* TC is not enabled */ 3282 vsi->tc_cfg.tc_info[i].qoffset = 0; 3283 vsi->tc_cfg.tc_info[i].qcount_rx = 1; 3284 vsi->tc_cfg.tc_info[i].qcount_tx = 1; 3285 vsi->tc_cfg.tc_info[i].netdev_tc = 0; 3286 ctxt->info.tc_mapping[i] = 0; 3287 continue; 3288 } 3289 3290 offset = vsi->mqprio_qopt.qopt.offset[i]; 3291 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; 3292 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; 3293 vsi->tc_cfg.tc_info[i].qoffset = offset; 3294 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx; 3295 vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx; 3296 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; 3297 } 3298 3299 if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) { 3300 ice_for_each_chnl_tc(i) { 3301 if (!(vsi->all_enatc & BIT(i))) 3302 continue; 3303 offset = vsi->mqprio_qopt.qopt.offset[i]; 3304 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; 3305 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; 3306 } 3307 } 3308 3309 new_txq = offset + qcount_tx; 3310 if (new_txq > vsi->alloc_txq) { 3311 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", 3312 new_txq, vsi->alloc_txq); 3313 return -EINVAL; 3314 } 3315 3316 new_rxq = offset + qcount_rx; 3317 if (new_rxq > vsi->alloc_rxq) { 3318 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", 3319 new_rxq, vsi->alloc_rxq); 3320 return -EINVAL; 3321 } 3322 3323 /* Set actual Tx/Rx queue pairs */ 3324 vsi->num_txq = new_txq; 3325 vsi->num_rxq = new_rxq; 3326 3327 /* Setup queue TC[0].qmap for given VSI context */ 3328 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); 3329 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); 3330 ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount); 3331 3332 /* Find queue count available for channel VSIs and starting offset 3333 * for channel VSIs 3334 */ 3335 if (tc0_qcount && tc0_qcount < vsi->num_rxq) { 3336 vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount; 3337 vsi->next_base_q = tc0_qcount; 3338 } 3339 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq); 3340 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq); 3341 dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n", 3342 vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc); 3343 3344 return 0; 3345 } 3346 3347 /** 3348 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map 3349 * @vsi: VSI to be configured 3350 * @ena_tc: TC bitmap 3351 * 3352 * VSI queues expected to be quiesced before calling this function 3353 */ 3354 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) 3355 { 3356 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 3357 struct ice_pf *pf = vsi->back; 3358 struct ice_tc_cfg old_tc_cfg; 3359 struct ice_vsi_ctx *ctx; 3360 struct device *dev; 3361 int i, ret = 0; 3362 u8 num_tc = 0; 3363 3364 dev = ice_pf_to_dev(pf); 3365 if (vsi->tc_cfg.ena_tc == ena_tc && 3366 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) 3367 return 0; 3368 3369 ice_for_each_traffic_class(i) { 3370 /* build bitmap of enabled TCs */ 3371 if (ena_tc & BIT(i)) 3372 num_tc++; 3373 /* populate max_txqs per TC */ 3374 max_txqs[i] = vsi->alloc_txq; 3375 /* Update max_txqs if it is CHNL VSI, because alloc_t[r]xq are 3376 * zero for CHNL VSI, hence use num_txq instead as max_txqs 3377 */ 3378 if (vsi->type == ICE_VSI_CHNL && 3379 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 3380 max_txqs[i] = vsi->num_txq; 3381 } 3382 3383 memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg)); 3384 vsi->tc_cfg.ena_tc = ena_tc; 3385 vsi->tc_cfg.numtc = num_tc; 3386 3387 ctx = kzalloc_obj(*ctx); 3388 if (!ctx) 3389 return -ENOMEM; 3390 3391 ctx->vf_num = 0; 3392 ctx->info = vsi->info; 3393 3394 if (vsi->type == ICE_VSI_PF && 3395 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 3396 ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc); 3397 else 3398 ret = ice_vsi_setup_q_map(vsi, ctx); 3399 3400 if (ret) { 3401 memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg)); 3402 goto out; 3403 } 3404 3405 /* must to indicate which section of VSI context are being modified */ 3406 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); 3407 ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); 3408 if (ret) { 3409 dev_info(dev, "Failed VSI Update\n"); 3410 goto out; 3411 } 3412 3413 if (vsi->type == ICE_VSI_PF && 3414 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 3415 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); 3416 else 3417 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 3418 vsi->tc_cfg.ena_tc, max_txqs); 3419 3420 if (ret) { 3421 dev_err(dev, "VSI %d failed TC config, error %d\n", 3422 vsi->vsi_num, ret); 3423 goto out; 3424 } 3425 ice_vsi_update_q_map(vsi, ctx); 3426 vsi->info.valid_sections = 0; 3427 3428 ice_vsi_cfg_netdev_tc(vsi, ena_tc); 3429 out: 3430 kfree(ctx); 3431 return ret; 3432 } 3433 3434 /** 3435 * ice_update_tx_ring_stats - Update Tx ring specific counters 3436 * @tx_ring: ring to update 3437 * @pkts: number of processed packets 3438 * @bytes: number of processed bytes 3439 */ 3440 void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes) 3441 { 3442 u64_stats_update_begin(&tx_ring->ring_stats->syncp); 3443 u64_stats_add(&tx_ring->ring_stats->pkts, pkts); 3444 u64_stats_add(&tx_ring->ring_stats->bytes, bytes); 3445 u64_stats_update_end(&tx_ring->ring_stats->syncp); 3446 } 3447 3448 /** 3449 * ice_update_rx_ring_stats - Update Rx ring specific counters 3450 * @rx_ring: ring to update 3451 * @pkts: number of processed packets 3452 * @bytes: number of processed bytes 3453 */ 3454 void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes) 3455 { 3456 u64_stats_update_begin(&rx_ring->ring_stats->syncp); 3457 u64_stats_add(&rx_ring->ring_stats->pkts, pkts); 3458 u64_stats_add(&rx_ring->ring_stats->bytes, bytes); 3459 u64_stats_update_end(&rx_ring->ring_stats->syncp); 3460 } 3461 3462 /** 3463 * ice_fetch_tx_ring_stats - Fetch Tx ring packet and byte counters 3464 * @ring: ring to update 3465 * @pkts: number of processed packets 3466 * @bytes: number of processed bytes 3467 */ 3468 void ice_fetch_tx_ring_stats(const struct ice_tx_ring *ring, 3469 u64 *pkts, u64 *bytes) 3470 { 3471 unsigned int start; 3472 3473 do { 3474 start = u64_stats_fetch_begin(&ring->ring_stats->syncp); 3475 *pkts = u64_stats_read(&ring->ring_stats->pkts); 3476 *bytes = u64_stats_read(&ring->ring_stats->bytes); 3477 } while (u64_stats_fetch_retry(&ring->ring_stats->syncp, start)); 3478 } 3479 3480 /** 3481 * ice_fetch_rx_ring_stats - Fetch Rx ring packet and byte counters 3482 * @ring: ring to read 3483 * @pkts: number of processed packets 3484 * @bytes: number of processed bytes 3485 */ 3486 void ice_fetch_rx_ring_stats(const struct ice_rx_ring *ring, 3487 u64 *pkts, u64 *bytes) 3488 { 3489 unsigned int start; 3490 3491 do { 3492 start = u64_stats_fetch_begin(&ring->ring_stats->syncp); 3493 *pkts = u64_stats_read(&ring->ring_stats->pkts); 3494 *bytes = u64_stats_read(&ring->ring_stats->bytes); 3495 } while (u64_stats_fetch_retry(&ring->ring_stats->syncp, start)); 3496 } 3497 3498 /** 3499 * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used 3500 * @pi: port info of the switch with default VSI 3501 * 3502 * Return true if the there is a single VSI in default forwarding VSI list 3503 */ 3504 bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi) 3505 { 3506 bool exists = false; 3507 3508 ice_check_if_dflt_vsi(pi, 0, &exists); 3509 return exists; 3510 } 3511 3512 /** 3513 * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI 3514 * @vsi: VSI to compare against default forwarding VSI 3515 * 3516 * If this VSI passed in is the default forwarding VSI then return true, else 3517 * return false 3518 */ 3519 bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi) 3520 { 3521 return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL); 3522 } 3523 3524 /** 3525 * ice_set_dflt_vsi - set the default forwarding VSI 3526 * @vsi: VSI getting set as the default forwarding VSI on the switch 3527 * 3528 * If the VSI passed in is already the default VSI and it's enabled just return 3529 * success. 3530 * 3531 * Otherwise try to set the VSI passed in as the switch's default VSI and 3532 * return the result. 3533 */ 3534 int ice_set_dflt_vsi(struct ice_vsi *vsi) 3535 { 3536 struct device *dev; 3537 int status; 3538 3539 if (!vsi) 3540 return -EINVAL; 3541 3542 dev = ice_pf_to_dev(vsi->back); 3543 3544 if (ice_lag_is_switchdev_running(vsi->back)) { 3545 dev_dbg(dev, "VSI %d passed is a part of LAG containing interfaces in switchdev mode, nothing to do\n", 3546 vsi->vsi_num); 3547 return 0; 3548 } 3549 3550 /* the VSI passed in is already the default VSI */ 3551 if (ice_is_vsi_dflt_vsi(vsi)) { 3552 dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n", 3553 vsi->vsi_num); 3554 return 0; 3555 } 3556 3557 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX); 3558 if (status) { 3559 dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n", 3560 vsi->vsi_num, status); 3561 return status; 3562 } 3563 3564 return 0; 3565 } 3566 3567 /** 3568 * ice_clear_dflt_vsi - clear the default forwarding VSI 3569 * @vsi: VSI to remove from filter list 3570 * 3571 * If the switch has no default VSI or it's not enabled then return error. 3572 * 3573 * Otherwise try to clear the default VSI and return the result. 3574 */ 3575 int ice_clear_dflt_vsi(struct ice_vsi *vsi) 3576 { 3577 struct device *dev; 3578 int status; 3579 3580 if (!vsi) 3581 return -EINVAL; 3582 3583 dev = ice_pf_to_dev(vsi->back); 3584 3585 /* there is no default VSI configured */ 3586 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) 3587 return -ENODEV; 3588 3589 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false, 3590 ICE_FLTR_RX); 3591 if (status) { 3592 dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n", 3593 vsi->vsi_num, status); 3594 return -EIO; 3595 } 3596 3597 return 0; 3598 } 3599 3600 /** 3601 * ice_get_link_speed_mbps - get link speed in Mbps 3602 * @vsi: the VSI whose link speed is being queried 3603 * 3604 * Return current VSI link speed and 0 if the speed is unknown. 3605 */ 3606 int ice_get_link_speed_mbps(struct ice_vsi *vsi) 3607 { 3608 unsigned int link_speed; 3609 3610 link_speed = vsi->port_info->phy.link_info.link_speed; 3611 3612 return (int)ice_get_link_speed(fls(link_speed) - 1); 3613 } 3614 3615 /** 3616 * ice_get_link_speed_kbps - get link speed in Kbps 3617 * @vsi: the VSI whose link speed is being queried 3618 * 3619 * Return current VSI link speed and 0 if the speed is unknown. 3620 */ 3621 int ice_get_link_speed_kbps(struct ice_vsi *vsi) 3622 { 3623 int speed_mbps; 3624 3625 speed_mbps = ice_get_link_speed_mbps(vsi); 3626 3627 return speed_mbps * 1000; 3628 } 3629 3630 /** 3631 * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate 3632 * @vsi: VSI to be configured 3633 * @min_tx_rate: min Tx rate in Kbps to be configured as BW limit 3634 * 3635 * If the min_tx_rate is specified as 0 that means to clear the minimum BW limit 3636 * profile, otherwise a non-zero value will force a minimum BW limit for the VSI 3637 * on TC 0. 3638 */ 3639 int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) 3640 { 3641 struct ice_pf *pf = vsi->back; 3642 struct device *dev; 3643 int status; 3644 int speed; 3645 3646 dev = ice_pf_to_dev(pf); 3647 if (!vsi->port_info) { 3648 dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n", 3649 vsi->idx, vsi->type); 3650 return -EINVAL; 3651 } 3652 3653 speed = ice_get_link_speed_kbps(vsi); 3654 if (min_tx_rate > (u64)speed) { 3655 dev_err(dev, "invalid min Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n", 3656 min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, 3657 speed); 3658 return -EINVAL; 3659 } 3660 3661 /* Configure min BW for VSI limit */ 3662 if (min_tx_rate) { 3663 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, 3664 ICE_MIN_BW, min_tx_rate); 3665 if (status) { 3666 dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n", 3667 min_tx_rate, ice_vsi_type_str(vsi->type), 3668 vsi->idx); 3669 return status; 3670 } 3671 3672 dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n", 3673 min_tx_rate, ice_vsi_type_str(vsi->type)); 3674 } else { 3675 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, 3676 vsi->idx, 0, 3677 ICE_MIN_BW); 3678 if (status) { 3679 dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n", 3680 ice_vsi_type_str(vsi->type), vsi->idx); 3681 return status; 3682 } 3683 3684 dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n", 3685 ice_vsi_type_str(vsi->type), vsi->idx); 3686 } 3687 3688 return 0; 3689 } 3690 3691 /** 3692 * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate 3693 * @vsi: VSI to be configured 3694 * @max_tx_rate: max Tx rate in Kbps to be configured as BW limit 3695 * 3696 * If the max_tx_rate is specified as 0 that means to clear the maximum BW limit 3697 * profile, otherwise a non-zero value will force a maximum BW limit for the VSI 3698 * on TC 0. 3699 */ 3700 int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate) 3701 { 3702 struct ice_pf *pf = vsi->back; 3703 struct device *dev; 3704 int status; 3705 int speed; 3706 3707 dev = ice_pf_to_dev(pf); 3708 if (!vsi->port_info) { 3709 dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n", 3710 vsi->idx, vsi->type); 3711 return -EINVAL; 3712 } 3713 3714 speed = ice_get_link_speed_kbps(vsi); 3715 if (max_tx_rate > (u64)speed) { 3716 dev_err(dev, "invalid max Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n", 3717 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, 3718 speed); 3719 return -EINVAL; 3720 } 3721 3722 /* Configure max BW for VSI limit */ 3723 if (max_tx_rate) { 3724 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, 3725 ICE_MAX_BW, max_tx_rate); 3726 if (status) { 3727 dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n", 3728 max_tx_rate, ice_vsi_type_str(vsi->type), 3729 vsi->idx); 3730 return status; 3731 } 3732 3733 dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n", 3734 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx); 3735 } else { 3736 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, 3737 vsi->idx, 0, 3738 ICE_MAX_BW); 3739 if (status) { 3740 dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n", 3741 ice_vsi_type_str(vsi->type), vsi->idx); 3742 return status; 3743 } 3744 3745 dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n", 3746 ice_vsi_type_str(vsi->type), vsi->idx); 3747 } 3748 3749 return 0; 3750 } 3751 3752 /** 3753 * ice_set_link - turn on/off physical link 3754 * @vsi: VSI to modify physical link on 3755 * @ena: turn on/off physical link 3756 */ 3757 int ice_set_link(struct ice_vsi *vsi, bool ena) 3758 { 3759 struct device *dev = ice_pf_to_dev(vsi->back); 3760 struct ice_port_info *pi = vsi->port_info; 3761 struct ice_hw *hw = pi->hw; 3762 int status; 3763 3764 if (vsi->type != ICE_VSI_PF) 3765 return -EINVAL; 3766 3767 status = ice_aq_set_link_restart_an(pi, ena, NULL); 3768 3769 /* if link is owned by manageability, FW will return LIBIE_AQ_RC_EMODE. 3770 * this is not a fatal error, so print a warning message and return 3771 * a success code. Return an error if FW returns an error code other 3772 * than LIBIE_AQ_RC_EMODE 3773 */ 3774 if (status == -EIO) { 3775 if (hw->adminq.sq_last_status == LIBIE_AQ_RC_EMODE) 3776 dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n", 3777 (ena ? "ON" : "OFF"), status, 3778 libie_aq_str(hw->adminq.sq_last_status)); 3779 } else if (status) { 3780 dev_err(dev, "can't set link to %s, err %d aq_err %s\n", 3781 (ena ? "ON" : "OFF"), status, 3782 libie_aq_str(hw->adminq.sq_last_status)); 3783 return status; 3784 } 3785 3786 return 0; 3787 } 3788 3789 /** 3790 * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI 3791 * @vsi: VSI used to add VLAN filters 3792 * 3793 * In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are based 3794 * on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8) doesn't 3795 * matter. In Double VLAN Mode (DVM), outer/single VLAN filters via 3796 * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID. 3797 * 3798 * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic 3799 * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged 3800 * traffic in SVM, since the VLAN TPID isn't part of filtering. 3801 * 3802 * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be 3803 * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is 3804 * part of filtering. 3805 */ 3806 int ice_vsi_add_vlan_zero(struct ice_vsi *vsi) 3807 { 3808 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 3809 struct ice_vlan vlan; 3810 int err; 3811 3812 vlan = ICE_VLAN(0, 0, 0); 3813 err = vlan_ops->add_vlan(vsi, &vlan); 3814 if (err && err != -EEXIST) 3815 return err; 3816 3817 /* in SVM both VLAN 0 filters are identical */ 3818 if (!ice_is_dvm_ena(&vsi->back->hw)) 3819 return 0; 3820 3821 vlan = ICE_VLAN(ETH_P_8021Q, 0, 0); 3822 err = vlan_ops->add_vlan(vsi, &vlan); 3823 if (err && err != -EEXIST) 3824 return err; 3825 3826 return 0; 3827 } 3828 3829 /** 3830 * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI 3831 * @vsi: VSI used to add VLAN filters 3832 * 3833 * Delete the VLAN 0 filters in the same manner that they were added in 3834 * ice_vsi_add_vlan_zero. 3835 */ 3836 int ice_vsi_del_vlan_zero(struct ice_vsi *vsi) 3837 { 3838 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 3839 struct ice_pf *pf = vsi->back; 3840 struct ice_vlan vlan; 3841 int err; 3842 3843 if (pf->lag && pf->lag->primary) { 3844 dev_dbg(ice_pf_to_dev(pf), "Interface is primary in aggregate - not deleting prune list\n"); 3845 } else { 3846 vlan = ICE_VLAN(0, 0, 0); 3847 err = vlan_ops->del_vlan(vsi, &vlan); 3848 if (err && err != -EEXIST) 3849 return err; 3850 } 3851 3852 /* in SVM both VLAN 0 filters are identical */ 3853 if (!ice_is_dvm_ena(&vsi->back->hw)) 3854 return 0; 3855 3856 if (pf->lag && pf->lag->primary) { 3857 dev_dbg(ice_pf_to_dev(pf), "Interface is primary in aggregate - not deleting QinQ prune list\n"); 3858 } else { 3859 vlan = ICE_VLAN(ETH_P_8021Q, 0, 0); 3860 err = vlan_ops->del_vlan(vsi, &vlan); 3861 if (err && err != -EEXIST) 3862 return err; 3863 } 3864 3865 /* when deleting the last VLAN filter, make sure to disable the VLAN 3866 * promisc mode so the filter isn't left by accident 3867 */ 3868 return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 3869 ICE_MCAST_VLAN_PROMISC_BITS, 0); 3870 } 3871 3872 /** 3873 * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode 3874 * @vsi: VSI used to get the VLAN mode 3875 * 3876 * If DVM is enabled then 2 VLAN 0 filters are added, else if SVM is enabled 3877 * then 1 VLAN 0 filter is added. See ice_vsi_add_vlan_zero for more details. 3878 */ 3879 static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi) 3880 { 3881 #define ICE_DVM_NUM_ZERO_VLAN_FLTRS 2 3882 #define ICE_SVM_NUM_ZERO_VLAN_FLTRS 1 3883 /* no VLAN 0 filter is created when a port VLAN is active */ 3884 if (vsi->type == ICE_VSI_VF) { 3885 if (WARN_ON(!vsi->vf)) 3886 return 0; 3887 3888 if (ice_vf_is_port_vlan_ena(vsi->vf)) 3889 return 0; 3890 } 3891 3892 if (ice_is_dvm_ena(&vsi->back->hw)) 3893 return ICE_DVM_NUM_ZERO_VLAN_FLTRS; 3894 else 3895 return ICE_SVM_NUM_ZERO_VLAN_FLTRS; 3896 } 3897 3898 /** 3899 * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs 3900 * @vsi: VSI used to determine if any non-zero VLANs have been added 3901 */ 3902 bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi) 3903 { 3904 return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi)); 3905 } 3906 3907 /** 3908 * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI 3909 * @vsi: VSI used to get the number of non-zero VLANs added 3910 */ 3911 u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi) 3912 { 3913 return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi)); 3914 } 3915 3916 /** 3917 * ice_is_feature_supported 3918 * @pf: pointer to the struct ice_pf instance 3919 * @f: feature enum to be checked 3920 * 3921 * returns true if feature is supported, false otherwise 3922 */ 3923 bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f) 3924 { 3925 if (f < 0 || f >= ICE_F_MAX) 3926 return false; 3927 3928 return test_bit(f, pf->features); 3929 } 3930 3931 /** 3932 * ice_set_feature_support 3933 * @pf: pointer to the struct ice_pf instance 3934 * @f: feature enum to set 3935 */ 3936 void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f) 3937 { 3938 if (f < 0 || f >= ICE_F_MAX) 3939 return; 3940 3941 set_bit(f, pf->features); 3942 } 3943 3944 /** 3945 * ice_clear_feature_support 3946 * @pf: pointer to the struct ice_pf instance 3947 * @f: feature enum to clear 3948 */ 3949 void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f) 3950 { 3951 if (f < 0 || f >= ICE_F_MAX) 3952 return; 3953 3954 clear_bit(f, pf->features); 3955 } 3956 3957 /** 3958 * ice_init_feature_support 3959 * @pf: pointer to the struct ice_pf instance 3960 * 3961 * called during init to setup supported feature 3962 */ 3963 void ice_init_feature_support(struct ice_pf *pf) 3964 { 3965 switch (pf->hw.device_id) { 3966 case ICE_DEV_ID_E810C_BACKPLANE: 3967 case ICE_DEV_ID_E810C_QSFP: 3968 case ICE_DEV_ID_E810C_SFP: 3969 case ICE_DEV_ID_E810_XXV_BACKPLANE: 3970 case ICE_DEV_ID_E810_XXV_QSFP: 3971 case ICE_DEV_ID_E810_XXV_SFP: 3972 ice_set_feature_support(pf, ICE_F_DSCP); 3973 if (ice_is_phy_rclk_in_netlist(&pf->hw)) 3974 ice_set_feature_support(pf, ICE_F_PHY_RCLK); 3975 /* If we don't own the timer - don't enable other caps */ 3976 if (!ice_pf_src_tmr_owned(pf)) 3977 break; 3978 if (ice_is_cgu_in_netlist(&pf->hw)) 3979 ice_set_feature_support(pf, ICE_F_CGU); 3980 if (ice_is_clock_mux_in_netlist(&pf->hw)) 3981 ice_set_feature_support(pf, ICE_F_SMA_CTRL); 3982 if (ice_gnss_is_module_present(&pf->hw)) 3983 ice_set_feature_support(pf, ICE_F_GNSS); 3984 break; 3985 default: 3986 break; 3987 } 3988 3989 if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) 3990 ice_set_feature_support(pf, ICE_F_PHY_RCLK); 3991 3992 if (pf->hw.mac_type == ICE_MAC_E830) { 3993 ice_set_feature_support(pf, ICE_F_MBX_LIMIT); 3994 ice_set_feature_support(pf, ICE_F_GCS); 3995 ice_set_feature_support(pf, ICE_F_TXTIME); 3996 } 3997 } 3998 3999 /** 4000 * ice_vsi_update_security - update security block in VSI 4001 * @vsi: pointer to VSI structure 4002 * @fill: function pointer to fill ctx 4003 */ 4004 int 4005 ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *)) 4006 { 4007 struct ice_vsi_ctx ctx = { 0 }; 4008 4009 ctx.info = vsi->info; 4010 ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 4011 fill(&ctx); 4012 4013 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) 4014 return -ENODEV; 4015 4016 vsi->info = ctx.info; 4017 return 0; 4018 } 4019 4020 /** 4021 * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx 4022 * @ctx: pointer to VSI ctx structure 4023 */ 4024 void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx) 4025 { 4026 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF | 4027 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 4028 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 4029 } 4030 4031 /** 4032 * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx 4033 * @ctx: pointer to VSI ctx structure 4034 */ 4035 void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx) 4036 { 4037 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF & 4038 ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 4039 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 4040 } 4041 4042 /** 4043 * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit 4044 * @vsi: pointer to VSI structure 4045 * @set: set or unset the bit 4046 */ 4047 int 4048 ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set) 4049 { 4050 struct ice_vsi_ctx ctx = { 4051 .info = vsi->info, 4052 }; 4053 4054 ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 4055 if (set) 4056 ctx.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_LOCAL_LB; 4057 else 4058 ctx.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_LOCAL_LB; 4059 4060 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) 4061 return -ENODEV; 4062 4063 vsi->info = ctx.info; 4064 return 0; 4065 } 4066 4067 /** 4068 * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI 4069 * @vsi: VSI used to update l2tsel on 4070 * @l2tsel: l2tsel setting requested 4071 * 4072 * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel. 4073 * This will modify which descriptor field the first offloaded VLAN will be 4074 * stripped into. 4075 */ 4076 void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel) 4077 { 4078 struct ice_hw *hw = &vsi->back->hw; 4079 u32 l2tsel_bit; 4080 int i; 4081 4082 if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND) 4083 l2tsel_bit = 0; 4084 else 4085 l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET); 4086 4087 for (i = 0; i < vsi->alloc_rxq; i++) { 4088 u16 pfq = vsi->rxq_map[i]; 4089 u32 qrx_context_offset; 4090 u32 regval; 4091 4092 qrx_context_offset = 4093 QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq); 4094 4095 regval = rd32(hw, qrx_context_offset); 4096 regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET); 4097 regval |= l2tsel_bit; 4098 wr32(hw, qrx_context_offset, regval); 4099 } 4100 } 4101