1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_base.h" 6 #include "ice_flow.h" 7 #include "ice_lib.h" 8 #include "ice_fltr.h" 9 #include "ice_dcb_lib.h" 10 #include "ice_devlink.h" 11 #include "ice_vsi_vlan_ops.h" 12 13 /** 14 * ice_vsi_type_str - maps VSI type enum to string equivalents 15 * @vsi_type: VSI type enum 16 */ 17 const char *ice_vsi_type_str(enum ice_vsi_type vsi_type) 18 { 19 switch (vsi_type) { 20 case ICE_VSI_PF: 21 return "ICE_VSI_PF"; 22 case ICE_VSI_VF: 23 return "ICE_VSI_VF"; 24 case ICE_VSI_CTRL: 25 return "ICE_VSI_CTRL"; 26 case ICE_VSI_CHNL: 27 return "ICE_VSI_CHNL"; 28 case ICE_VSI_LB: 29 return "ICE_VSI_LB"; 30 case ICE_VSI_SWITCHDEV_CTRL: 31 return "ICE_VSI_SWITCHDEV_CTRL"; 32 default: 33 return "unknown"; 34 } 35 } 36 37 /** 38 * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings 39 * @vsi: the VSI being configured 40 * @ena: start or stop the Rx rings 41 * 42 * First enable/disable all of the Rx rings, flush any remaining writes, and 43 * then verify that they have all been enabled/disabled successfully. This will 44 * let all of the register writes complete when enabling/disabling the Rx rings 45 * before waiting for the change in hardware to complete. 46 */ 47 static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena) 48 { 49 int ret = 0; 50 u16 i; 51 52 ice_for_each_rxq(vsi, i) 53 ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false); 54 55 ice_flush(&vsi->back->hw); 56 57 ice_for_each_rxq(vsi, i) { 58 ret = ice_vsi_wait_one_rx_ring(vsi, ena, i); 59 if (ret) 60 break; 61 } 62 63 return ret; 64 } 65 66 /** 67 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI 68 * @vsi: VSI pointer 69 * 70 * On error: returns error code (negative) 71 * On success: returns 0 72 */ 73 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) 74 { 75 struct ice_pf *pf = vsi->back; 76 struct device *dev; 77 78 dev = ice_pf_to_dev(pf); 79 if (vsi->type == ICE_VSI_CHNL) 80 return 0; 81 82 /* allocate memory for both Tx and Rx ring pointers */ 83 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq, 84 sizeof(*vsi->tx_rings), GFP_KERNEL); 85 if (!vsi->tx_rings) 86 return -ENOMEM; 87 88 vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq, 89 sizeof(*vsi->rx_rings), GFP_KERNEL); 90 if (!vsi->rx_rings) 91 goto err_rings; 92 93 /* txq_map needs to have enough space to track both Tx (stack) rings 94 * and XDP rings; at this point vsi->num_xdp_txq might not be set, 95 * so use num_possible_cpus() as we want to always provide XDP ring 96 * per CPU, regardless of queue count settings from user that might 97 * have come from ethtool's set_channels() callback; 98 */ 99 vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()), 100 sizeof(*vsi->txq_map), GFP_KERNEL); 101 102 if (!vsi->txq_map) 103 goto err_txq_map; 104 105 vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq, 106 sizeof(*vsi->rxq_map), GFP_KERNEL); 107 if (!vsi->rxq_map) 108 goto err_rxq_map; 109 110 /* There is no need to allocate q_vectors for a loopback VSI. */ 111 if (vsi->type == ICE_VSI_LB) 112 return 0; 113 114 /* allocate memory for q_vector pointers */ 115 vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors, 116 sizeof(*vsi->q_vectors), GFP_KERNEL); 117 if (!vsi->q_vectors) 118 goto err_vectors; 119 120 vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL); 121 if (!vsi->af_xdp_zc_qps) 122 goto err_zc_qps; 123 124 return 0; 125 126 err_zc_qps: 127 devm_kfree(dev, vsi->q_vectors); 128 err_vectors: 129 devm_kfree(dev, vsi->rxq_map); 130 err_rxq_map: 131 devm_kfree(dev, vsi->txq_map); 132 err_txq_map: 133 devm_kfree(dev, vsi->rx_rings); 134 err_rings: 135 devm_kfree(dev, vsi->tx_rings); 136 return -ENOMEM; 137 } 138 139 /** 140 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI 141 * @vsi: the VSI being configured 142 */ 143 static void ice_vsi_set_num_desc(struct ice_vsi *vsi) 144 { 145 switch (vsi->type) { 146 case ICE_VSI_PF: 147 case ICE_VSI_SWITCHDEV_CTRL: 148 case ICE_VSI_CTRL: 149 case ICE_VSI_LB: 150 /* a user could change the values of num_[tr]x_desc using 151 * ethtool -G so we should keep those values instead of 152 * overwriting them with the defaults. 153 */ 154 if (!vsi->num_rx_desc) 155 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; 156 if (!vsi->num_tx_desc) 157 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; 158 break; 159 default: 160 dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n", 161 vsi->type); 162 break; 163 } 164 } 165 166 /** 167 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI 168 * @vsi: the VSI being configured 169 * 170 * Return 0 on success and a negative value on error 171 */ 172 static void ice_vsi_set_num_qs(struct ice_vsi *vsi) 173 { 174 enum ice_vsi_type vsi_type = vsi->type; 175 struct ice_pf *pf = vsi->back; 176 struct ice_vf *vf = vsi->vf; 177 178 if (WARN_ON(vsi_type == ICE_VSI_VF && !vf)) 179 return; 180 181 switch (vsi_type) { 182 case ICE_VSI_PF: 183 if (vsi->req_txq) { 184 vsi->alloc_txq = vsi->req_txq; 185 vsi->num_txq = vsi->req_txq; 186 } else { 187 vsi->alloc_txq = min3(pf->num_lan_msix, 188 ice_get_avail_txq_count(pf), 189 (u16)num_online_cpus()); 190 } 191 192 pf->num_lan_tx = vsi->alloc_txq; 193 194 /* only 1 Rx queue unless RSS is enabled */ 195 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 196 vsi->alloc_rxq = 1; 197 } else { 198 if (vsi->req_rxq) { 199 vsi->alloc_rxq = vsi->req_rxq; 200 vsi->num_rxq = vsi->req_rxq; 201 } else { 202 vsi->alloc_rxq = min3(pf->num_lan_msix, 203 ice_get_avail_rxq_count(pf), 204 (u16)num_online_cpus()); 205 } 206 } 207 208 pf->num_lan_rx = vsi->alloc_rxq; 209 210 vsi->num_q_vectors = min_t(int, pf->num_lan_msix, 211 max_t(int, vsi->alloc_rxq, 212 vsi->alloc_txq)); 213 break; 214 case ICE_VSI_SWITCHDEV_CTRL: 215 /* The number of queues for ctrl VSI is equal to number of VFs. 216 * Each ring is associated to the corresponding VF_PR netdev. 217 */ 218 vsi->alloc_txq = ice_get_num_vfs(pf); 219 vsi->alloc_rxq = vsi->alloc_txq; 220 vsi->num_q_vectors = 1; 221 break; 222 case ICE_VSI_VF: 223 if (vf->num_req_qs) 224 vf->num_vf_qs = vf->num_req_qs; 225 vsi->alloc_txq = vf->num_vf_qs; 226 vsi->alloc_rxq = vf->num_vf_qs; 227 /* pf->vfs.num_msix_per includes (VF miscellaneous vector + 228 * data queue interrupts). Since vsi->num_q_vectors is number 229 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the 230 * original vector count 231 */ 232 vsi->num_q_vectors = vf->num_msix - ICE_NONQ_VECS_VF; 233 break; 234 case ICE_VSI_CTRL: 235 vsi->alloc_txq = 1; 236 vsi->alloc_rxq = 1; 237 vsi->num_q_vectors = 1; 238 break; 239 case ICE_VSI_CHNL: 240 vsi->alloc_txq = 0; 241 vsi->alloc_rxq = 0; 242 break; 243 case ICE_VSI_LB: 244 vsi->alloc_txq = 1; 245 vsi->alloc_rxq = 1; 246 break; 247 default: 248 dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type); 249 break; 250 } 251 252 ice_vsi_set_num_desc(vsi); 253 } 254 255 /** 256 * ice_get_free_slot - get the next non-NULL location index in array 257 * @array: array to search 258 * @size: size of the array 259 * @curr: last known occupied index to be used as a search hint 260 * 261 * void * is being used to keep the functionality generic. This lets us use this 262 * function on any array of pointers. 263 */ 264 static int ice_get_free_slot(void *array, int size, int curr) 265 { 266 int **tmp_array = (int **)array; 267 int next; 268 269 if (curr < (size - 1) && !tmp_array[curr + 1]) { 270 next = curr + 1; 271 } else { 272 int i = 0; 273 274 while ((i < size) && (tmp_array[i])) 275 i++; 276 if (i == size) 277 next = ICE_NO_VSI; 278 else 279 next = i; 280 } 281 return next; 282 } 283 284 /** 285 * ice_vsi_delete_from_hw - delete a VSI from the switch 286 * @vsi: pointer to VSI being removed 287 */ 288 static void ice_vsi_delete_from_hw(struct ice_vsi *vsi) 289 { 290 struct ice_pf *pf = vsi->back; 291 struct ice_vsi_ctx *ctxt; 292 int status; 293 294 ice_fltr_remove_all(vsi); 295 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 296 if (!ctxt) 297 return; 298 299 if (vsi->type == ICE_VSI_VF) 300 ctxt->vf_num = vsi->vf->vf_id; 301 ctxt->vsi_num = vsi->vsi_num; 302 303 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); 304 305 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); 306 if (status) 307 dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n", 308 vsi->vsi_num, status); 309 310 kfree(ctxt); 311 } 312 313 /** 314 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI 315 * @vsi: pointer to VSI being cleared 316 */ 317 static void ice_vsi_free_arrays(struct ice_vsi *vsi) 318 { 319 struct ice_pf *pf = vsi->back; 320 struct device *dev; 321 322 dev = ice_pf_to_dev(pf); 323 324 bitmap_free(vsi->af_xdp_zc_qps); 325 vsi->af_xdp_zc_qps = NULL; 326 /* free the ring and vector containers */ 327 devm_kfree(dev, vsi->q_vectors); 328 vsi->q_vectors = NULL; 329 devm_kfree(dev, vsi->tx_rings); 330 vsi->tx_rings = NULL; 331 devm_kfree(dev, vsi->rx_rings); 332 vsi->rx_rings = NULL; 333 devm_kfree(dev, vsi->txq_map); 334 vsi->txq_map = NULL; 335 devm_kfree(dev, vsi->rxq_map); 336 vsi->rxq_map = NULL; 337 } 338 339 /** 340 * ice_vsi_free_stats - Free the ring statistics structures 341 * @vsi: VSI pointer 342 */ 343 static void ice_vsi_free_stats(struct ice_vsi *vsi) 344 { 345 struct ice_vsi_stats *vsi_stat; 346 struct ice_pf *pf = vsi->back; 347 int i; 348 349 if (vsi->type == ICE_VSI_CHNL) 350 return; 351 if (!pf->vsi_stats) 352 return; 353 354 vsi_stat = pf->vsi_stats[vsi->idx]; 355 if (!vsi_stat) 356 return; 357 358 ice_for_each_alloc_txq(vsi, i) { 359 if (vsi_stat->tx_ring_stats[i]) { 360 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); 361 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); 362 } 363 } 364 365 ice_for_each_alloc_rxq(vsi, i) { 366 if (vsi_stat->rx_ring_stats[i]) { 367 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); 368 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); 369 } 370 } 371 372 kfree(vsi_stat->tx_ring_stats); 373 kfree(vsi_stat->rx_ring_stats); 374 kfree(vsi_stat); 375 pf->vsi_stats[vsi->idx] = NULL; 376 } 377 378 /** 379 * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI 380 * @vsi: VSI which is having stats allocated 381 */ 382 static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi) 383 { 384 struct ice_ring_stats **tx_ring_stats; 385 struct ice_ring_stats **rx_ring_stats; 386 struct ice_vsi_stats *vsi_stats; 387 struct ice_pf *pf = vsi->back; 388 u16 i; 389 390 vsi_stats = pf->vsi_stats[vsi->idx]; 391 tx_ring_stats = vsi_stats->tx_ring_stats; 392 rx_ring_stats = vsi_stats->rx_ring_stats; 393 394 /* Allocate Tx ring stats */ 395 ice_for_each_alloc_txq(vsi, i) { 396 struct ice_ring_stats *ring_stats; 397 struct ice_tx_ring *ring; 398 399 ring = vsi->tx_rings[i]; 400 ring_stats = tx_ring_stats[i]; 401 402 if (!ring_stats) { 403 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); 404 if (!ring_stats) 405 goto err_out; 406 407 WRITE_ONCE(tx_ring_stats[i], ring_stats); 408 } 409 410 ring->ring_stats = ring_stats; 411 } 412 413 /* Allocate Rx ring stats */ 414 ice_for_each_alloc_rxq(vsi, i) { 415 struct ice_ring_stats *ring_stats; 416 struct ice_rx_ring *ring; 417 418 ring = vsi->rx_rings[i]; 419 ring_stats = rx_ring_stats[i]; 420 421 if (!ring_stats) { 422 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); 423 if (!ring_stats) 424 goto err_out; 425 426 WRITE_ONCE(rx_ring_stats[i], ring_stats); 427 } 428 429 ring->ring_stats = ring_stats; 430 } 431 432 return 0; 433 434 err_out: 435 ice_vsi_free_stats(vsi); 436 return -ENOMEM; 437 } 438 439 /** 440 * ice_vsi_free - clean up and deallocate the provided VSI 441 * @vsi: pointer to VSI being cleared 442 * 443 * This deallocates the VSI's queue resources, removes it from the PF's 444 * VSI array if necessary, and deallocates the VSI 445 */ 446 static void ice_vsi_free(struct ice_vsi *vsi) 447 { 448 struct ice_pf *pf = NULL; 449 struct device *dev; 450 451 if (!vsi || !vsi->back) 452 return; 453 454 pf = vsi->back; 455 dev = ice_pf_to_dev(pf); 456 457 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { 458 dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx); 459 return; 460 } 461 462 mutex_lock(&pf->sw_mutex); 463 /* updates the PF for this cleared VSI */ 464 465 pf->vsi[vsi->idx] = NULL; 466 pf->next_vsi = vsi->idx; 467 468 ice_vsi_free_stats(vsi); 469 ice_vsi_free_arrays(vsi); 470 mutex_unlock(&pf->sw_mutex); 471 devm_kfree(dev, vsi); 472 } 473 474 void ice_vsi_delete(struct ice_vsi *vsi) 475 { 476 ice_vsi_delete_from_hw(vsi); 477 ice_vsi_free(vsi); 478 } 479 480 /** 481 * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI 482 * @irq: interrupt number 483 * @data: pointer to a q_vector 484 */ 485 static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data) 486 { 487 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; 488 489 if (!q_vector->tx.tx_ring) 490 return IRQ_HANDLED; 491 492 #define FDIR_RX_DESC_CLEAN_BUDGET 64 493 ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET); 494 ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring); 495 496 return IRQ_HANDLED; 497 } 498 499 /** 500 * ice_msix_clean_rings - MSIX mode Interrupt Handler 501 * @irq: interrupt number 502 * @data: pointer to a q_vector 503 */ 504 static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) 505 { 506 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; 507 508 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) 509 return IRQ_HANDLED; 510 511 q_vector->total_events++; 512 513 napi_schedule(&q_vector->napi); 514 515 return IRQ_HANDLED; 516 } 517 518 static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data) 519 { 520 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; 521 struct ice_pf *pf = q_vector->vsi->back; 522 struct ice_vf *vf; 523 unsigned int bkt; 524 525 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) 526 return IRQ_HANDLED; 527 528 rcu_read_lock(); 529 ice_for_each_vf_rcu(pf, bkt, vf) 530 napi_schedule(&vf->repr->q_vector->napi); 531 rcu_read_unlock(); 532 533 return IRQ_HANDLED; 534 } 535 536 /** 537 * ice_vsi_alloc_stat_arrays - Allocate statistics arrays 538 * @vsi: VSI pointer 539 */ 540 static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi) 541 { 542 struct ice_vsi_stats *vsi_stat; 543 struct ice_pf *pf = vsi->back; 544 545 if (vsi->type == ICE_VSI_CHNL) 546 return 0; 547 if (!pf->vsi_stats) 548 return -ENOENT; 549 550 if (pf->vsi_stats[vsi->idx]) 551 /* realloc will happen in rebuild path */ 552 return 0; 553 554 vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL); 555 if (!vsi_stat) 556 return -ENOMEM; 557 558 vsi_stat->tx_ring_stats = 559 kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats), 560 GFP_KERNEL); 561 if (!vsi_stat->tx_ring_stats) 562 goto err_alloc_tx; 563 564 vsi_stat->rx_ring_stats = 565 kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats), 566 GFP_KERNEL); 567 if (!vsi_stat->rx_ring_stats) 568 goto err_alloc_rx; 569 570 pf->vsi_stats[vsi->idx] = vsi_stat; 571 572 return 0; 573 574 err_alloc_rx: 575 kfree(vsi_stat->rx_ring_stats); 576 err_alloc_tx: 577 kfree(vsi_stat->tx_ring_stats); 578 kfree(vsi_stat); 579 pf->vsi_stats[vsi->idx] = NULL; 580 return -ENOMEM; 581 } 582 583 /** 584 * ice_vsi_alloc_def - set default values for already allocated VSI 585 * @vsi: ptr to VSI 586 * @ch: ptr to channel 587 */ 588 static int 589 ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch) 590 { 591 if (vsi->type != ICE_VSI_CHNL) { 592 ice_vsi_set_num_qs(vsi); 593 if (ice_vsi_alloc_arrays(vsi)) 594 return -ENOMEM; 595 } 596 597 switch (vsi->type) { 598 case ICE_VSI_SWITCHDEV_CTRL: 599 /* Setup eswitch MSIX irq handler for VSI */ 600 vsi->irq_handler = ice_eswitch_msix_clean_rings; 601 break; 602 case ICE_VSI_PF: 603 /* Setup default MSIX irq handler for VSI */ 604 vsi->irq_handler = ice_msix_clean_rings; 605 break; 606 case ICE_VSI_CTRL: 607 /* Setup ctrl VSI MSIX irq handler */ 608 vsi->irq_handler = ice_msix_clean_ctrl_vsi; 609 break; 610 case ICE_VSI_CHNL: 611 if (!ch) 612 return -EINVAL; 613 614 vsi->num_rxq = ch->num_rxq; 615 vsi->num_txq = ch->num_txq; 616 vsi->next_base_q = ch->base_q; 617 break; 618 case ICE_VSI_VF: 619 case ICE_VSI_LB: 620 break; 621 default: 622 ice_vsi_free_arrays(vsi); 623 return -EINVAL; 624 } 625 626 return 0; 627 } 628 629 /** 630 * ice_vsi_alloc - Allocates the next available struct VSI in the PF 631 * @pf: board private structure 632 * 633 * Reserves a VSI index from the PF and allocates an empty VSI structure 634 * without a type. The VSI structure must later be initialized by calling 635 * ice_vsi_cfg(). 636 * 637 * returns a pointer to a VSI on success, NULL on failure. 638 */ 639 static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf) 640 { 641 struct device *dev = ice_pf_to_dev(pf); 642 struct ice_vsi *vsi = NULL; 643 644 /* Need to protect the allocation of the VSIs at the PF level */ 645 mutex_lock(&pf->sw_mutex); 646 647 /* If we have already allocated our maximum number of VSIs, 648 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index 649 * is available to be populated 650 */ 651 if (pf->next_vsi == ICE_NO_VSI) { 652 dev_dbg(dev, "out of VSI slots!\n"); 653 goto unlock_pf; 654 } 655 656 vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL); 657 if (!vsi) 658 goto unlock_pf; 659 660 vsi->back = pf; 661 set_bit(ICE_VSI_DOWN, vsi->state); 662 663 /* fill slot and make note of the index */ 664 vsi->idx = pf->next_vsi; 665 pf->vsi[pf->next_vsi] = vsi; 666 667 /* prepare pf->next_vsi for next use */ 668 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, 669 pf->next_vsi); 670 671 unlock_pf: 672 mutex_unlock(&pf->sw_mutex); 673 return vsi; 674 } 675 676 /** 677 * ice_alloc_fd_res - Allocate FD resource for a VSI 678 * @vsi: pointer to the ice_vsi 679 * 680 * This allocates the FD resources 681 * 682 * Returns 0 on success, -EPERM on no-op or -EIO on failure 683 */ 684 static int ice_alloc_fd_res(struct ice_vsi *vsi) 685 { 686 struct ice_pf *pf = vsi->back; 687 u32 g_val, b_val; 688 689 /* Flow Director filters are only allocated/assigned to the PF VSI or 690 * CHNL VSI which passes the traffic. The CTRL VSI is only used to 691 * add/delete filters so resources are not allocated to it 692 */ 693 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 694 return -EPERM; 695 696 if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF || 697 vsi->type == ICE_VSI_CHNL)) 698 return -EPERM; 699 700 /* FD filters from guaranteed pool per VSI */ 701 g_val = pf->hw.func_caps.fd_fltr_guar; 702 if (!g_val) 703 return -EPERM; 704 705 /* FD filters from best effort pool */ 706 b_val = pf->hw.func_caps.fd_fltr_best_effort; 707 if (!b_val) 708 return -EPERM; 709 710 /* PF main VSI gets only 64 FD resources from guaranteed pool 711 * when ADQ is configured. 712 */ 713 #define ICE_PF_VSI_GFLTR 64 714 715 /* determine FD filter resources per VSI from shared(best effort) and 716 * dedicated pool 717 */ 718 if (vsi->type == ICE_VSI_PF) { 719 vsi->num_gfltr = g_val; 720 /* if MQPRIO is configured, main VSI doesn't get all FD 721 * resources from guaranteed pool. PF VSI gets 64 FD resources 722 */ 723 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 724 if (g_val < ICE_PF_VSI_GFLTR) 725 return -EPERM; 726 /* allow bare minimum entries for PF VSI */ 727 vsi->num_gfltr = ICE_PF_VSI_GFLTR; 728 } 729 730 /* each VSI gets same "best_effort" quota */ 731 vsi->num_bfltr = b_val; 732 } else if (vsi->type == ICE_VSI_VF) { 733 vsi->num_gfltr = 0; 734 735 /* each VSI gets same "best_effort" quota */ 736 vsi->num_bfltr = b_val; 737 } else { 738 struct ice_vsi *main_vsi; 739 int numtc; 740 741 main_vsi = ice_get_main_vsi(pf); 742 if (!main_vsi) 743 return -EPERM; 744 745 if (!main_vsi->all_numtc) 746 return -EINVAL; 747 748 /* figure out ADQ numtc */ 749 numtc = main_vsi->all_numtc - ICE_CHNL_START_TC; 750 751 /* only one TC but still asking resources for channels, 752 * invalid config 753 */ 754 if (numtc < ICE_CHNL_START_TC) 755 return -EPERM; 756 757 g_val -= ICE_PF_VSI_GFLTR; 758 /* channel VSIs gets equal share from guaranteed pool */ 759 vsi->num_gfltr = g_val / numtc; 760 761 /* each VSI gets same "best_effort" quota */ 762 vsi->num_bfltr = b_val; 763 } 764 765 return 0; 766 } 767 768 /** 769 * ice_vsi_get_qs - Assign queues from PF to VSI 770 * @vsi: the VSI to assign queues to 771 * 772 * Returns 0 on success and a negative value on error 773 */ 774 static int ice_vsi_get_qs(struct ice_vsi *vsi) 775 { 776 struct ice_pf *pf = vsi->back; 777 struct ice_qs_cfg tx_qs_cfg = { 778 .qs_mutex = &pf->avail_q_mutex, 779 .pf_map = pf->avail_txqs, 780 .pf_map_size = pf->max_pf_txqs, 781 .q_count = vsi->alloc_txq, 782 .scatter_count = ICE_MAX_SCATTER_TXQS, 783 .vsi_map = vsi->txq_map, 784 .vsi_map_offset = 0, 785 .mapping_mode = ICE_VSI_MAP_CONTIG 786 }; 787 struct ice_qs_cfg rx_qs_cfg = { 788 .qs_mutex = &pf->avail_q_mutex, 789 .pf_map = pf->avail_rxqs, 790 .pf_map_size = pf->max_pf_rxqs, 791 .q_count = vsi->alloc_rxq, 792 .scatter_count = ICE_MAX_SCATTER_RXQS, 793 .vsi_map = vsi->rxq_map, 794 .vsi_map_offset = 0, 795 .mapping_mode = ICE_VSI_MAP_CONTIG 796 }; 797 int ret; 798 799 if (vsi->type == ICE_VSI_CHNL) 800 return 0; 801 802 ret = __ice_vsi_get_qs(&tx_qs_cfg); 803 if (ret) 804 return ret; 805 vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode; 806 807 ret = __ice_vsi_get_qs(&rx_qs_cfg); 808 if (ret) 809 return ret; 810 vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode; 811 812 return 0; 813 } 814 815 /** 816 * ice_vsi_put_qs - Release queues from VSI to PF 817 * @vsi: the VSI that is going to release queues 818 */ 819 static void ice_vsi_put_qs(struct ice_vsi *vsi) 820 { 821 struct ice_pf *pf = vsi->back; 822 int i; 823 824 mutex_lock(&pf->avail_q_mutex); 825 826 ice_for_each_alloc_txq(vsi, i) { 827 clear_bit(vsi->txq_map[i], pf->avail_txqs); 828 vsi->txq_map[i] = ICE_INVAL_Q_INDEX; 829 } 830 831 ice_for_each_alloc_rxq(vsi, i) { 832 clear_bit(vsi->rxq_map[i], pf->avail_rxqs); 833 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; 834 } 835 836 mutex_unlock(&pf->avail_q_mutex); 837 } 838 839 /** 840 * ice_is_safe_mode 841 * @pf: pointer to the PF struct 842 * 843 * returns true if driver is in safe mode, false otherwise 844 */ 845 bool ice_is_safe_mode(struct ice_pf *pf) 846 { 847 return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 848 } 849 850 /** 851 * ice_is_rdma_ena 852 * @pf: pointer to the PF struct 853 * 854 * returns true if RDMA is currently supported, false otherwise 855 */ 856 bool ice_is_rdma_ena(struct ice_pf *pf) 857 { 858 return test_bit(ICE_FLAG_RDMA_ENA, pf->flags); 859 } 860 861 /** 862 * ice_vsi_clean_rss_flow_fld - Delete RSS configuration 863 * @vsi: the VSI being cleaned up 864 * 865 * This function deletes RSS input set for all flows that were configured 866 * for this VSI 867 */ 868 static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi) 869 { 870 struct ice_pf *pf = vsi->back; 871 int status; 872 873 if (ice_is_safe_mode(pf)) 874 return; 875 876 status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); 877 if (status) 878 dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n", 879 vsi->vsi_num, status); 880 } 881 882 /** 883 * ice_rss_clean - Delete RSS related VSI structures and configuration 884 * @vsi: the VSI being removed 885 */ 886 static void ice_rss_clean(struct ice_vsi *vsi) 887 { 888 struct ice_pf *pf = vsi->back; 889 struct device *dev; 890 891 dev = ice_pf_to_dev(pf); 892 893 devm_kfree(dev, vsi->rss_hkey_user); 894 devm_kfree(dev, vsi->rss_lut_user); 895 896 ice_vsi_clean_rss_flow_fld(vsi); 897 /* remove RSS replay list */ 898 if (!ice_is_safe_mode(pf)) 899 ice_rem_vsi_rss_list(&pf->hw, vsi->idx); 900 } 901 902 /** 903 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type 904 * @vsi: the VSI being configured 905 */ 906 static void ice_vsi_set_rss_params(struct ice_vsi *vsi) 907 { 908 struct ice_hw_common_caps *cap; 909 struct ice_pf *pf = vsi->back; 910 u16 max_rss_size; 911 912 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 913 vsi->rss_size = 1; 914 return; 915 } 916 917 cap = &pf->hw.func_caps.common_cap; 918 max_rss_size = BIT(cap->rss_table_entry_width); 919 switch (vsi->type) { 920 case ICE_VSI_CHNL: 921 case ICE_VSI_PF: 922 /* PF VSI will inherit RSS instance of PF */ 923 vsi->rss_table_size = (u16)cap->rss_table_size; 924 if (vsi->type == ICE_VSI_CHNL) 925 vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size); 926 else 927 vsi->rss_size = min_t(u16, num_online_cpus(), 928 max_rss_size); 929 vsi->rss_lut_type = ICE_LUT_PF; 930 break; 931 case ICE_VSI_SWITCHDEV_CTRL: 932 vsi->rss_table_size = ICE_LUT_VSI_SIZE; 933 vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size); 934 vsi->rss_lut_type = ICE_LUT_VSI; 935 break; 936 case ICE_VSI_VF: 937 /* VF VSI will get a small RSS table. 938 * For VSI_LUT, LUT size should be set to 64 bytes. 939 */ 940 vsi->rss_table_size = ICE_LUT_VSI_SIZE; 941 vsi->rss_size = ICE_MAX_RSS_QS_PER_VF; 942 vsi->rss_lut_type = ICE_LUT_VSI; 943 break; 944 case ICE_VSI_LB: 945 break; 946 default: 947 dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n", 948 ice_vsi_type_str(vsi->type)); 949 break; 950 } 951 } 952 953 /** 954 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI 955 * @hw: HW structure used to determine the VLAN mode of the device 956 * @ctxt: the VSI context being set 957 * 958 * This initializes a default VSI context for all sections except the Queues. 959 */ 960 static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt) 961 { 962 u32 table = 0; 963 964 memset(&ctxt->info, 0, sizeof(ctxt->info)); 965 /* VSI's should be allocated from shared pool */ 966 ctxt->alloc_from_pool = true; 967 /* Src pruning enabled by default */ 968 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; 969 /* Traffic from VSI can be sent to LAN */ 970 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 971 /* allow all untagged/tagged packets by default on Tx */ 972 ctxt->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL & 973 ICE_AQ_VSI_INNER_VLAN_TX_MODE_M) >> 974 ICE_AQ_VSI_INNER_VLAN_TX_MODE_S); 975 /* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which 976 * results in legacy behavior (show VLAN, DEI, and UP) in descriptor. 977 * 978 * DVM - leave inner VLAN in packet by default 979 */ 980 if (ice_is_dvm_ena(hw)) { 981 ctxt->info.inner_vlan_flags |= 982 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING; 983 ctxt->info.outer_vlan_flags = 984 (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL << 985 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) & 986 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M; 987 ctxt->info.outer_vlan_flags |= 988 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 << 989 ICE_AQ_VSI_OUTER_TAG_TYPE_S) & 990 ICE_AQ_VSI_OUTER_TAG_TYPE_M; 991 ctxt->info.outer_vlan_flags |= 992 FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M, 993 ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING); 994 } 995 /* Have 1:1 UP mapping for both ingress/egress tables */ 996 table |= ICE_UP_TABLE_TRANSLATE(0, 0); 997 table |= ICE_UP_TABLE_TRANSLATE(1, 1); 998 table |= ICE_UP_TABLE_TRANSLATE(2, 2); 999 table |= ICE_UP_TABLE_TRANSLATE(3, 3); 1000 table |= ICE_UP_TABLE_TRANSLATE(4, 4); 1001 table |= ICE_UP_TABLE_TRANSLATE(5, 5); 1002 table |= ICE_UP_TABLE_TRANSLATE(6, 6); 1003 table |= ICE_UP_TABLE_TRANSLATE(7, 7); 1004 ctxt->info.ingress_table = cpu_to_le32(table); 1005 ctxt->info.egress_table = cpu_to_le32(table); 1006 /* Have 1:1 UP mapping for outer to inner UP table */ 1007 ctxt->info.outer_up_table = cpu_to_le32(table); 1008 /* No Outer tag support outer_tag_flags remains to zero */ 1009 } 1010 1011 /** 1012 * ice_vsi_setup_q_map - Setup a VSI queue map 1013 * @vsi: the VSI being configured 1014 * @ctxt: VSI context structure 1015 */ 1016 static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) 1017 { 1018 u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0; 1019 u16 num_txq_per_tc, num_rxq_per_tc; 1020 u16 qcount_tx = vsi->alloc_txq; 1021 u16 qcount_rx = vsi->alloc_rxq; 1022 u8 netdev_tc = 0; 1023 int i; 1024 1025 if (!vsi->tc_cfg.numtc) { 1026 /* at least TC0 should be enabled by default */ 1027 vsi->tc_cfg.numtc = 1; 1028 vsi->tc_cfg.ena_tc = 1; 1029 } 1030 1031 num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC); 1032 if (!num_rxq_per_tc) 1033 num_rxq_per_tc = 1; 1034 num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc; 1035 if (!num_txq_per_tc) 1036 num_txq_per_tc = 1; 1037 1038 /* find the (rounded up) power-of-2 of qcount */ 1039 pow = (u16)order_base_2(num_rxq_per_tc); 1040 1041 /* TC mapping is a function of the number of Rx queues assigned to the 1042 * VSI for each traffic class and the offset of these queues. 1043 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of 1044 * queues allocated to TC0. No:of queues is a power-of-2. 1045 * 1046 * If TC is not enabled, the queue offset is set to 0, and allocate one 1047 * queue, this way, traffic for the given TC will be sent to the default 1048 * queue. 1049 * 1050 * Setup number and offset of Rx queues for all TCs for the VSI 1051 */ 1052 ice_for_each_traffic_class(i) { 1053 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { 1054 /* TC is not enabled */ 1055 vsi->tc_cfg.tc_info[i].qoffset = 0; 1056 vsi->tc_cfg.tc_info[i].qcount_rx = 1; 1057 vsi->tc_cfg.tc_info[i].qcount_tx = 1; 1058 vsi->tc_cfg.tc_info[i].netdev_tc = 0; 1059 ctxt->info.tc_mapping[i] = 0; 1060 continue; 1061 } 1062 1063 /* TC is enabled */ 1064 vsi->tc_cfg.tc_info[i].qoffset = offset; 1065 vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc; 1066 vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc; 1067 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; 1068 1069 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & 1070 ICE_AQ_VSI_TC_Q_OFFSET_M) | 1071 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & 1072 ICE_AQ_VSI_TC_Q_NUM_M); 1073 offset += num_rxq_per_tc; 1074 tx_count += num_txq_per_tc; 1075 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 1076 } 1077 1078 /* if offset is non-zero, means it is calculated correctly based on 1079 * enabled TCs for a given VSI otherwise qcount_rx will always 1080 * be correct and non-zero because it is based off - VSI's 1081 * allocated Rx queues which is at least 1 (hence qcount_tx will be 1082 * at least 1) 1083 */ 1084 if (offset) 1085 rx_count = offset; 1086 else 1087 rx_count = num_rxq_per_tc; 1088 1089 if (rx_count > vsi->alloc_rxq) { 1090 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", 1091 rx_count, vsi->alloc_rxq); 1092 return -EINVAL; 1093 } 1094 1095 if (tx_count > vsi->alloc_txq) { 1096 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", 1097 tx_count, vsi->alloc_txq); 1098 return -EINVAL; 1099 } 1100 1101 vsi->num_txq = tx_count; 1102 vsi->num_rxq = rx_count; 1103 1104 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { 1105 dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); 1106 /* since there is a chance that num_rxq could have been changed 1107 * in the above for loop, make num_txq equal to num_rxq. 1108 */ 1109 vsi->num_txq = vsi->num_rxq; 1110 } 1111 1112 /* Rx queue mapping */ 1113 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); 1114 /* q_mapping buffer holds the info for the first queue allocated for 1115 * this VSI in the PF space and also the number of queues associated 1116 * with this VSI. 1117 */ 1118 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); 1119 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); 1120 1121 return 0; 1122 } 1123 1124 /** 1125 * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI 1126 * @ctxt: the VSI context being set 1127 * @vsi: the VSI being configured 1128 */ 1129 static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) 1130 { 1131 u8 dflt_q_group, dflt_q_prio; 1132 u16 dflt_q, report_q, val; 1133 1134 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL && 1135 vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL) 1136 return; 1137 1138 val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID; 1139 ctxt->info.valid_sections |= cpu_to_le16(val); 1140 dflt_q = 0; 1141 dflt_q_group = 0; 1142 report_q = 0; 1143 dflt_q_prio = 0; 1144 1145 /* enable flow director filtering/programming */ 1146 val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE; 1147 ctxt->info.fd_options = cpu_to_le16(val); 1148 /* max of allocated flow director filters */ 1149 ctxt->info.max_fd_fltr_dedicated = 1150 cpu_to_le16(vsi->num_gfltr); 1151 /* max of shared flow director filters any VSI may program */ 1152 ctxt->info.max_fd_fltr_shared = 1153 cpu_to_le16(vsi->num_bfltr); 1154 /* default queue index within the VSI of the default FD */ 1155 val = ((dflt_q << ICE_AQ_VSI_FD_DEF_Q_S) & 1156 ICE_AQ_VSI_FD_DEF_Q_M); 1157 /* target queue or queue group to the FD filter */ 1158 val |= ((dflt_q_group << ICE_AQ_VSI_FD_DEF_GRP_S) & 1159 ICE_AQ_VSI_FD_DEF_GRP_M); 1160 ctxt->info.fd_def_q = cpu_to_le16(val); 1161 /* queue index on which FD filter completion is reported */ 1162 val = ((report_q << ICE_AQ_VSI_FD_REPORT_Q_S) & 1163 ICE_AQ_VSI_FD_REPORT_Q_M); 1164 /* priority of the default qindex action */ 1165 val |= ((dflt_q_prio << ICE_AQ_VSI_FD_DEF_PRIORITY_S) & 1166 ICE_AQ_VSI_FD_DEF_PRIORITY_M); 1167 ctxt->info.fd_report_opt = cpu_to_le16(val); 1168 } 1169 1170 /** 1171 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI 1172 * @ctxt: the VSI context being set 1173 * @vsi: the VSI being configured 1174 */ 1175 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) 1176 { 1177 u8 lut_type, hash_type; 1178 struct device *dev; 1179 struct ice_pf *pf; 1180 1181 pf = vsi->back; 1182 dev = ice_pf_to_dev(pf); 1183 1184 switch (vsi->type) { 1185 case ICE_VSI_CHNL: 1186 case ICE_VSI_PF: 1187 /* PF VSI will inherit RSS instance of PF */ 1188 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; 1189 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 1190 break; 1191 case ICE_VSI_VF: 1192 /* VF VSI will gets a small RSS table which is a VSI LUT type */ 1193 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; 1194 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 1195 break; 1196 default: 1197 dev_dbg(dev, "Unsupported VSI type %s\n", 1198 ice_vsi_type_str(vsi->type)); 1199 return; 1200 } 1201 1202 ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & 1203 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | 1204 (hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M); 1205 } 1206 1207 static void 1208 ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) 1209 { 1210 struct ice_pf *pf = vsi->back; 1211 u16 qcount, qmap; 1212 u8 offset = 0; 1213 int pow; 1214 1215 qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix); 1216 1217 pow = order_base_2(qcount); 1218 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & 1219 ICE_AQ_VSI_TC_Q_OFFSET_M) | 1220 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & 1221 ICE_AQ_VSI_TC_Q_NUM_M); 1222 1223 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); 1224 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); 1225 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q); 1226 ctxt->info.q_mapping[1] = cpu_to_le16(qcount); 1227 } 1228 1229 /** 1230 * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not 1231 * @vsi: VSI to check whether or not VLAN pruning is enabled. 1232 * 1233 * returns true if Rx VLAN pruning is enabled and false otherwise. 1234 */ 1235 static bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi) 1236 { 1237 return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 1238 } 1239 1240 /** 1241 * ice_vsi_init - Create and initialize a VSI 1242 * @vsi: the VSI being configured 1243 * @vsi_flags: VSI configuration flags 1244 * 1245 * Set ICE_FLAG_VSI_INIT to initialize a new VSI context, clear it to 1246 * reconfigure an existing context. 1247 * 1248 * This initializes a VSI context depending on the VSI type to be added and 1249 * passes it down to the add_vsi aq command to create a new VSI. 1250 */ 1251 static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags) 1252 { 1253 struct ice_pf *pf = vsi->back; 1254 struct ice_hw *hw = &pf->hw; 1255 struct ice_vsi_ctx *ctxt; 1256 struct device *dev; 1257 int ret = 0; 1258 1259 dev = ice_pf_to_dev(pf); 1260 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 1261 if (!ctxt) 1262 return -ENOMEM; 1263 1264 switch (vsi->type) { 1265 case ICE_VSI_CTRL: 1266 case ICE_VSI_LB: 1267 case ICE_VSI_PF: 1268 ctxt->flags = ICE_AQ_VSI_TYPE_PF; 1269 break; 1270 case ICE_VSI_SWITCHDEV_CTRL: 1271 case ICE_VSI_CHNL: 1272 ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2; 1273 break; 1274 case ICE_VSI_VF: 1275 ctxt->flags = ICE_AQ_VSI_TYPE_VF; 1276 /* VF number here is the absolute VF number (0-255) */ 1277 ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id; 1278 break; 1279 default: 1280 ret = -ENODEV; 1281 goto out; 1282 } 1283 1284 /* Handle VLAN pruning for channel VSI if main VSI has VLAN 1285 * prune enabled 1286 */ 1287 if (vsi->type == ICE_VSI_CHNL) { 1288 struct ice_vsi *main_vsi; 1289 1290 main_vsi = ice_get_main_vsi(pf); 1291 if (main_vsi && ice_vsi_is_vlan_pruning_ena(main_vsi)) 1292 ctxt->info.sw_flags2 |= 1293 ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 1294 else 1295 ctxt->info.sw_flags2 &= 1296 ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 1297 } 1298 1299 ice_set_dflt_vsi_ctx(hw, ctxt); 1300 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) 1301 ice_set_fd_vsi_ctx(ctxt, vsi); 1302 /* if the switch is in VEB mode, allow VSI loopback */ 1303 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) 1304 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 1305 1306 /* Set LUT type and HASH type if RSS is enabled */ 1307 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) && 1308 vsi->type != ICE_VSI_CTRL) { 1309 ice_set_rss_vsi_ctx(ctxt, vsi); 1310 /* if updating VSI context, make sure to set valid_section: 1311 * to indicate which section of VSI context being updated 1312 */ 1313 if (!(vsi_flags & ICE_VSI_FLAG_INIT)) 1314 ctxt->info.valid_sections |= 1315 cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); 1316 } 1317 1318 ctxt->info.sw_id = vsi->port_info->sw_id; 1319 if (vsi->type == ICE_VSI_CHNL) { 1320 ice_chnl_vsi_setup_q_map(vsi, ctxt); 1321 } else { 1322 ret = ice_vsi_setup_q_map(vsi, ctxt); 1323 if (ret) 1324 goto out; 1325 1326 if (!(vsi_flags & ICE_VSI_FLAG_INIT)) 1327 /* means VSI being updated */ 1328 /* must to indicate which section of VSI context are 1329 * being modified 1330 */ 1331 ctxt->info.valid_sections |= 1332 cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); 1333 } 1334 1335 /* Allow control frames out of main VSI */ 1336 if (vsi->type == ICE_VSI_PF) { 1337 ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; 1338 ctxt->info.valid_sections |= 1339 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 1340 } 1341 1342 if (vsi_flags & ICE_VSI_FLAG_INIT) { 1343 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); 1344 if (ret) { 1345 dev_err(dev, "Add VSI failed, err %d\n", ret); 1346 ret = -EIO; 1347 goto out; 1348 } 1349 } else { 1350 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 1351 if (ret) { 1352 dev_err(dev, "Update VSI failed, err %d\n", ret); 1353 ret = -EIO; 1354 goto out; 1355 } 1356 } 1357 1358 /* keep context for update VSI operations */ 1359 vsi->info = ctxt->info; 1360 1361 /* record VSI number returned */ 1362 vsi->vsi_num = ctxt->vsi_num; 1363 1364 out: 1365 kfree(ctxt); 1366 return ret; 1367 } 1368 1369 /** 1370 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI 1371 * @vsi: the VSI having rings deallocated 1372 */ 1373 static void ice_vsi_clear_rings(struct ice_vsi *vsi) 1374 { 1375 int i; 1376 1377 /* Avoid stale references by clearing map from vector to ring */ 1378 if (vsi->q_vectors) { 1379 ice_for_each_q_vector(vsi, i) { 1380 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 1381 1382 if (q_vector) { 1383 q_vector->tx.tx_ring = NULL; 1384 q_vector->rx.rx_ring = NULL; 1385 } 1386 } 1387 } 1388 1389 if (vsi->tx_rings) { 1390 ice_for_each_alloc_txq(vsi, i) { 1391 if (vsi->tx_rings[i]) { 1392 kfree_rcu(vsi->tx_rings[i], rcu); 1393 WRITE_ONCE(vsi->tx_rings[i], NULL); 1394 } 1395 } 1396 } 1397 if (vsi->rx_rings) { 1398 ice_for_each_alloc_rxq(vsi, i) { 1399 if (vsi->rx_rings[i]) { 1400 kfree_rcu(vsi->rx_rings[i], rcu); 1401 WRITE_ONCE(vsi->rx_rings[i], NULL); 1402 } 1403 } 1404 } 1405 } 1406 1407 /** 1408 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI 1409 * @vsi: VSI which is having rings allocated 1410 */ 1411 static int ice_vsi_alloc_rings(struct ice_vsi *vsi) 1412 { 1413 bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw); 1414 struct ice_pf *pf = vsi->back; 1415 struct device *dev; 1416 u16 i; 1417 1418 dev = ice_pf_to_dev(pf); 1419 /* Allocate Tx rings */ 1420 ice_for_each_alloc_txq(vsi, i) { 1421 struct ice_tx_ring *ring; 1422 1423 /* allocate with kzalloc(), free with kfree_rcu() */ 1424 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1425 1426 if (!ring) 1427 goto err_out; 1428 1429 ring->q_index = i; 1430 ring->reg_idx = vsi->txq_map[i]; 1431 ring->vsi = vsi; 1432 ring->tx_tstamps = &pf->ptp.port.tx; 1433 ring->dev = dev; 1434 ring->count = vsi->num_tx_desc; 1435 ring->txq_teid = ICE_INVAL_TEID; 1436 if (dvm_ena) 1437 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2; 1438 else 1439 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1; 1440 WRITE_ONCE(vsi->tx_rings[i], ring); 1441 } 1442 1443 /* Allocate Rx rings */ 1444 ice_for_each_alloc_rxq(vsi, i) { 1445 struct ice_rx_ring *ring; 1446 1447 /* allocate with kzalloc(), free with kfree_rcu() */ 1448 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1449 if (!ring) 1450 goto err_out; 1451 1452 ring->q_index = i; 1453 ring->reg_idx = vsi->rxq_map[i]; 1454 ring->vsi = vsi; 1455 ring->netdev = vsi->netdev; 1456 ring->dev = dev; 1457 ring->count = vsi->num_rx_desc; 1458 ring->cached_phctime = pf->ptp.cached_phc_time; 1459 WRITE_ONCE(vsi->rx_rings[i], ring); 1460 } 1461 1462 return 0; 1463 1464 err_out: 1465 ice_vsi_clear_rings(vsi); 1466 return -ENOMEM; 1467 } 1468 1469 /** 1470 * ice_vsi_manage_rss_lut - disable/enable RSS 1471 * @vsi: the VSI being changed 1472 * @ena: boolean value indicating if this is an enable or disable request 1473 * 1474 * In the event of disable request for RSS, this function will zero out RSS 1475 * LUT, while in the event of enable request for RSS, it will reconfigure RSS 1476 * LUT. 1477 */ 1478 void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) 1479 { 1480 u8 *lut; 1481 1482 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 1483 if (!lut) 1484 return; 1485 1486 if (ena) { 1487 if (vsi->rss_lut_user) 1488 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 1489 else 1490 ice_fill_rss_lut(lut, vsi->rss_table_size, 1491 vsi->rss_size); 1492 } 1493 1494 ice_set_rss_lut(vsi, lut, vsi->rss_table_size); 1495 kfree(lut); 1496 } 1497 1498 /** 1499 * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI 1500 * @vsi: VSI to be configured 1501 * @disable: set to true to have FCS / CRC in the frame data 1502 */ 1503 void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable) 1504 { 1505 int i; 1506 1507 ice_for_each_rxq(vsi, i) 1508 if (disable) 1509 vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS; 1510 else 1511 vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS; 1512 } 1513 1514 /** 1515 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI 1516 * @vsi: VSI to be configured 1517 */ 1518 int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) 1519 { 1520 struct ice_pf *pf = vsi->back; 1521 struct device *dev; 1522 u8 *lut, *key; 1523 int err; 1524 1525 dev = ice_pf_to_dev(pf); 1526 if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size && 1527 (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) { 1528 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size); 1529 } else { 1530 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq); 1531 1532 /* If orig_rss_size is valid and it is less than determined 1533 * main VSI's rss_size, update main VSI's rss_size to be 1534 * orig_rss_size so that when tc-qdisc is deleted, main VSI 1535 * RSS table gets programmed to be correct (whatever it was 1536 * to begin with (prior to setup-tc for ADQ config) 1537 */ 1538 if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size && 1539 vsi->orig_rss_size <= vsi->num_rxq) { 1540 vsi->rss_size = vsi->orig_rss_size; 1541 /* now orig_rss_size is used, reset it to zero */ 1542 vsi->orig_rss_size = 0; 1543 } 1544 } 1545 1546 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 1547 if (!lut) 1548 return -ENOMEM; 1549 1550 if (vsi->rss_lut_user) 1551 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 1552 else 1553 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); 1554 1555 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); 1556 if (err) { 1557 dev_err(dev, "set_rss_lut failed, error %d\n", err); 1558 goto ice_vsi_cfg_rss_exit; 1559 } 1560 1561 key = kzalloc(ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE, GFP_KERNEL); 1562 if (!key) { 1563 err = -ENOMEM; 1564 goto ice_vsi_cfg_rss_exit; 1565 } 1566 1567 if (vsi->rss_hkey_user) 1568 memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); 1569 else 1570 netdev_rss_key_fill((void *)key, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); 1571 1572 err = ice_set_rss_key(vsi, key); 1573 if (err) 1574 dev_err(dev, "set_rss_key failed, error %d\n", err); 1575 1576 kfree(key); 1577 ice_vsi_cfg_rss_exit: 1578 kfree(lut); 1579 return err; 1580 } 1581 1582 /** 1583 * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows 1584 * @vsi: VSI to be configured 1585 * 1586 * This function will only be called during the VF VSI setup. Upon successful 1587 * completion of package download, this function will configure default RSS 1588 * input sets for VF VSI. 1589 */ 1590 static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) 1591 { 1592 struct ice_pf *pf = vsi->back; 1593 struct device *dev; 1594 int status; 1595 1596 dev = ice_pf_to_dev(pf); 1597 if (ice_is_safe_mode(pf)) { 1598 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", 1599 vsi->vsi_num); 1600 return; 1601 } 1602 1603 status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA); 1604 if (status) 1605 dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n", 1606 vsi->vsi_num, status); 1607 } 1608 1609 /** 1610 * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows 1611 * @vsi: VSI to be configured 1612 * 1613 * This function will only be called after successful download package call 1614 * during initialization of PF. Since the downloaded package will erase the 1615 * RSS section, this function will configure RSS input sets for different 1616 * flow types. The last profile added has the highest priority, therefore 2 1617 * tuple profiles (i.e. IPv4 src/dst) are added before 4 tuple profiles 1618 * (i.e. IPv4 src/dst TCP src/dst port). 1619 */ 1620 static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) 1621 { 1622 u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num; 1623 struct ice_pf *pf = vsi->back; 1624 struct ice_hw *hw = &pf->hw; 1625 struct device *dev; 1626 int status; 1627 1628 dev = ice_pf_to_dev(pf); 1629 if (ice_is_safe_mode(pf)) { 1630 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", 1631 vsi_num); 1632 return; 1633 } 1634 /* configure RSS for IPv4 with input set IP src/dst */ 1635 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, 1636 ICE_FLOW_SEG_HDR_IPV4); 1637 if (status) 1638 dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n", 1639 vsi_num, status); 1640 1641 /* configure RSS for IPv6 with input set IPv6 src/dst */ 1642 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, 1643 ICE_FLOW_SEG_HDR_IPV6); 1644 if (status) 1645 dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n", 1646 vsi_num, status); 1647 1648 /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */ 1649 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4, 1650 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4); 1651 if (status) 1652 dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n", 1653 vsi_num, status); 1654 1655 /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */ 1656 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4, 1657 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4); 1658 if (status) 1659 dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n", 1660 vsi_num, status); 1661 1662 /* configure RSS for sctp4 with input set IP src/dst */ 1663 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, 1664 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4); 1665 if (status) 1666 dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n", 1667 vsi_num, status); 1668 1669 /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */ 1670 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6, 1671 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6); 1672 if (status) 1673 dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n", 1674 vsi_num, status); 1675 1676 /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */ 1677 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6, 1678 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6); 1679 if (status) 1680 dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n", 1681 vsi_num, status); 1682 1683 /* configure RSS for sctp6 with input set IPv6 src/dst */ 1684 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, 1685 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6); 1686 if (status) 1687 dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n", 1688 vsi_num, status); 1689 1690 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI, 1691 ICE_FLOW_SEG_HDR_ESP); 1692 if (status) 1693 dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n", 1694 vsi_num, status); 1695 } 1696 1697 /** 1698 * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length 1699 * @vsi: VSI 1700 */ 1701 static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) 1702 { 1703 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { 1704 vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; 1705 vsi->rx_buf_len = ICE_RXBUF_1664; 1706 #if (PAGE_SIZE < 8192) 1707 } else if (!ICE_2K_TOO_SMALL_WITH_PADDING && 1708 (vsi->netdev->mtu <= ETH_DATA_LEN)) { 1709 vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; 1710 vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; 1711 #endif 1712 } else { 1713 vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; 1714 vsi->rx_buf_len = ICE_RXBUF_3072; 1715 } 1716 } 1717 1718 /** 1719 * ice_pf_state_is_nominal - checks the PF for nominal state 1720 * @pf: pointer to PF to check 1721 * 1722 * Check the PF's state for a collection of bits that would indicate 1723 * the PF is in a state that would inhibit normal operation for 1724 * driver functionality. 1725 * 1726 * Returns true if PF is in a nominal state, false otherwise 1727 */ 1728 bool ice_pf_state_is_nominal(struct ice_pf *pf) 1729 { 1730 DECLARE_BITMAP(check_bits, ICE_STATE_NBITS) = { 0 }; 1731 1732 if (!pf) 1733 return false; 1734 1735 bitmap_set(check_bits, 0, ICE_STATE_NOMINAL_CHECK_BITS); 1736 if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS)) 1737 return false; 1738 1739 return true; 1740 } 1741 1742 /** 1743 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters 1744 * @vsi: the VSI to be updated 1745 */ 1746 void ice_update_eth_stats(struct ice_vsi *vsi) 1747 { 1748 struct ice_eth_stats *prev_es, *cur_es; 1749 struct ice_hw *hw = &vsi->back->hw; 1750 struct ice_pf *pf = vsi->back; 1751 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ 1752 1753 prev_es = &vsi->eth_stats_prev; 1754 cur_es = &vsi->eth_stats; 1755 1756 if (ice_is_reset_in_progress(pf->state)) 1757 vsi->stat_offsets_loaded = false; 1758 1759 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded, 1760 &prev_es->rx_bytes, &cur_es->rx_bytes); 1761 1762 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded, 1763 &prev_es->rx_unicast, &cur_es->rx_unicast); 1764 1765 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded, 1766 &prev_es->rx_multicast, &cur_es->rx_multicast); 1767 1768 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded, 1769 &prev_es->rx_broadcast, &cur_es->rx_broadcast); 1770 1771 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, 1772 &prev_es->rx_discards, &cur_es->rx_discards); 1773 1774 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded, 1775 &prev_es->tx_bytes, &cur_es->tx_bytes); 1776 1777 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded, 1778 &prev_es->tx_unicast, &cur_es->tx_unicast); 1779 1780 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded, 1781 &prev_es->tx_multicast, &cur_es->tx_multicast); 1782 1783 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded, 1784 &prev_es->tx_broadcast, &cur_es->tx_broadcast); 1785 1786 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, 1787 &prev_es->tx_errors, &cur_es->tx_errors); 1788 1789 vsi->stat_offsets_loaded = true; 1790 } 1791 1792 /** 1793 * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register 1794 * @hw: HW pointer 1795 * @pf_q: index of the Rx queue in the PF's queue space 1796 * @rxdid: flexible descriptor RXDID 1797 * @prio: priority for the RXDID for this queue 1798 * @ena_ts: true to enable timestamp and false to disable timestamp 1799 */ 1800 void 1801 ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, 1802 bool ena_ts) 1803 { 1804 int regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); 1805 1806 /* clear any previous values */ 1807 regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M | 1808 QRXFLXP_CNTXT_RXDID_PRIO_M | 1809 QRXFLXP_CNTXT_TS_M); 1810 1811 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & 1812 QRXFLXP_CNTXT_RXDID_IDX_M; 1813 1814 regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) & 1815 QRXFLXP_CNTXT_RXDID_PRIO_M; 1816 1817 if (ena_ts) 1818 /* Enable TimeSync on this queue */ 1819 regval |= QRXFLXP_CNTXT_TS_M; 1820 1821 wr32(hw, QRXFLXP_CNTXT(pf_q), regval); 1822 } 1823 1824 int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) 1825 { 1826 if (q_idx >= vsi->num_rxq) 1827 return -EINVAL; 1828 1829 return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]); 1830 } 1831 1832 int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx) 1833 { 1834 DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1); 1835 1836 if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) 1837 return -EINVAL; 1838 1839 qg_buf->num_txqs = 1; 1840 1841 return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf); 1842 } 1843 1844 /** 1845 * ice_vsi_cfg_rxqs - Configure the VSI for Rx 1846 * @vsi: the VSI being configured 1847 * 1848 * Return 0 on success and a negative value on error 1849 * Configure the Rx VSI for operation. 1850 */ 1851 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) 1852 { 1853 u16 i; 1854 1855 if (vsi->type == ICE_VSI_VF) 1856 goto setup_rings; 1857 1858 ice_vsi_cfg_frame_size(vsi); 1859 setup_rings: 1860 /* set up individual rings */ 1861 ice_for_each_rxq(vsi, i) { 1862 int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]); 1863 1864 if (err) 1865 return err; 1866 } 1867 1868 return 0; 1869 } 1870 1871 /** 1872 * ice_vsi_cfg_txqs - Configure the VSI for Tx 1873 * @vsi: the VSI being configured 1874 * @rings: Tx ring array to be configured 1875 * @count: number of Tx ring array elements 1876 * 1877 * Return 0 on success and a negative value on error 1878 * Configure the Tx VSI for operation. 1879 */ 1880 static int 1881 ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count) 1882 { 1883 DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1); 1884 int err = 0; 1885 u16 q_idx; 1886 1887 qg_buf->num_txqs = 1; 1888 1889 for (q_idx = 0; q_idx < count; q_idx++) { 1890 err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf); 1891 if (err) 1892 break; 1893 } 1894 1895 return err; 1896 } 1897 1898 /** 1899 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx 1900 * @vsi: the VSI being configured 1901 * 1902 * Return 0 on success and a negative value on error 1903 * Configure the Tx VSI for operation. 1904 */ 1905 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi) 1906 { 1907 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq); 1908 } 1909 1910 /** 1911 * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI 1912 * @vsi: the VSI being configured 1913 * 1914 * Return 0 on success and a negative value on error 1915 * Configure the Tx queues dedicated for XDP in given VSI for operation. 1916 */ 1917 int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi) 1918 { 1919 int ret; 1920 int i; 1921 1922 ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq); 1923 if (ret) 1924 return ret; 1925 1926 ice_for_each_rxq(vsi, i) 1927 ice_tx_xsk_pool(vsi, i); 1928 1929 return 0; 1930 } 1931 1932 /** 1933 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value 1934 * @intrl: interrupt rate limit in usecs 1935 * @gran: interrupt rate limit granularity in usecs 1936 * 1937 * This function converts a decimal interrupt rate limit in usecs to the format 1938 * expected by firmware. 1939 */ 1940 static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) 1941 { 1942 u32 val = intrl / gran; 1943 1944 if (val) 1945 return val | GLINT_RATE_INTRL_ENA_M; 1946 return 0; 1947 } 1948 1949 /** 1950 * ice_write_intrl - write throttle rate limit to interrupt specific register 1951 * @q_vector: pointer to interrupt specific structure 1952 * @intrl: throttle rate limit in microseconds to write 1953 */ 1954 void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl) 1955 { 1956 struct ice_hw *hw = &q_vector->vsi->back->hw; 1957 1958 wr32(hw, GLINT_RATE(q_vector->reg_idx), 1959 ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25)); 1960 } 1961 1962 static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc) 1963 { 1964 switch (rc->type) { 1965 case ICE_RX_CONTAINER: 1966 if (rc->rx_ring) 1967 return rc->rx_ring->q_vector; 1968 break; 1969 case ICE_TX_CONTAINER: 1970 if (rc->tx_ring) 1971 return rc->tx_ring->q_vector; 1972 break; 1973 default: 1974 break; 1975 } 1976 1977 return NULL; 1978 } 1979 1980 /** 1981 * __ice_write_itr - write throttle rate to register 1982 * @q_vector: pointer to interrupt data structure 1983 * @rc: pointer to ring container 1984 * @itr: throttle rate in microseconds to write 1985 */ 1986 static void __ice_write_itr(struct ice_q_vector *q_vector, 1987 struct ice_ring_container *rc, u16 itr) 1988 { 1989 struct ice_hw *hw = &q_vector->vsi->back->hw; 1990 1991 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), 1992 ITR_REG_ALIGN(itr) >> ICE_ITR_GRAN_S); 1993 } 1994 1995 /** 1996 * ice_write_itr - write throttle rate to queue specific register 1997 * @rc: pointer to ring container 1998 * @itr: throttle rate in microseconds to write 1999 */ 2000 void ice_write_itr(struct ice_ring_container *rc, u16 itr) 2001 { 2002 struct ice_q_vector *q_vector; 2003 2004 q_vector = ice_pull_qvec_from_rc(rc); 2005 if (!q_vector) 2006 return; 2007 2008 __ice_write_itr(q_vector, rc, itr); 2009 } 2010 2011 /** 2012 * ice_set_q_vector_intrl - set up interrupt rate limiting 2013 * @q_vector: the vector to be configured 2014 * 2015 * Interrupt rate limiting is local to the vector, not per-queue so we must 2016 * detect if either ring container has dynamic moderation enabled to decide 2017 * what to set the interrupt rate limit to via INTRL settings. In the case that 2018 * dynamic moderation is disabled on both, write the value with the cached 2019 * setting to make sure INTRL register matches the user visible value. 2020 */ 2021 void ice_set_q_vector_intrl(struct ice_q_vector *q_vector) 2022 { 2023 if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) { 2024 /* in the case of dynamic enabled, cap each vector to no more 2025 * than (4 us) 250,000 ints/sec, which allows low latency 2026 * but still less than 500,000 interrupts per second, which 2027 * reduces CPU a bit in the case of the lowest latency 2028 * setting. The 4 here is a value in microseconds. 2029 */ 2030 ice_write_intrl(q_vector, 4); 2031 } else { 2032 ice_write_intrl(q_vector, q_vector->intrl); 2033 } 2034 } 2035 2036 /** 2037 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW 2038 * @vsi: the VSI being configured 2039 * 2040 * This configures MSIX mode interrupts for the PF VSI, and should not be used 2041 * for the VF VSI. 2042 */ 2043 void ice_vsi_cfg_msix(struct ice_vsi *vsi) 2044 { 2045 struct ice_pf *pf = vsi->back; 2046 struct ice_hw *hw = &pf->hw; 2047 u16 txq = 0, rxq = 0; 2048 int i, q; 2049 2050 ice_for_each_q_vector(vsi, i) { 2051 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2052 u16 reg_idx = q_vector->reg_idx; 2053 2054 ice_cfg_itr(hw, q_vector); 2055 2056 /* Both Transmit Queue Interrupt Cause Control register 2057 * and Receive Queue Interrupt Cause control register 2058 * expects MSIX_INDX field to be the vector index 2059 * within the function space and not the absolute 2060 * vector index across PF or across device. 2061 * For SR-IOV VF VSIs queue vector index always starts 2062 * with 1 since first vector index(0) is used for OICR 2063 * in VF space. Since VMDq and other PF VSIs are within 2064 * the PF function space, use the vector index that is 2065 * tracked for this PF. 2066 */ 2067 for (q = 0; q < q_vector->num_ring_tx; q++) { 2068 ice_cfg_txq_interrupt(vsi, txq, reg_idx, 2069 q_vector->tx.itr_idx); 2070 txq++; 2071 } 2072 2073 for (q = 0; q < q_vector->num_ring_rx; q++) { 2074 ice_cfg_rxq_interrupt(vsi, rxq, reg_idx, 2075 q_vector->rx.itr_idx); 2076 rxq++; 2077 } 2078 } 2079 } 2080 2081 /** 2082 * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings 2083 * @vsi: the VSI whose rings are to be enabled 2084 * 2085 * Returns 0 on success and a negative value on error 2086 */ 2087 int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi) 2088 { 2089 return ice_vsi_ctrl_all_rx_rings(vsi, true); 2090 } 2091 2092 /** 2093 * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings 2094 * @vsi: the VSI whose rings are to be disabled 2095 * 2096 * Returns 0 on success and a negative value on error 2097 */ 2098 int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi) 2099 { 2100 return ice_vsi_ctrl_all_rx_rings(vsi, false); 2101 } 2102 2103 /** 2104 * ice_vsi_stop_tx_rings - Disable Tx rings 2105 * @vsi: the VSI being configured 2106 * @rst_src: reset source 2107 * @rel_vmvf_num: Relative ID of VF/VM 2108 * @rings: Tx ring array to be stopped 2109 * @count: number of Tx ring array elements 2110 */ 2111 static int 2112 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 2113 u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count) 2114 { 2115 u16 q_idx; 2116 2117 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) 2118 return -EINVAL; 2119 2120 for (q_idx = 0; q_idx < count; q_idx++) { 2121 struct ice_txq_meta txq_meta = { }; 2122 int status; 2123 2124 if (!rings || !rings[q_idx]) 2125 return -EINVAL; 2126 2127 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); 2128 status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num, 2129 rings[q_idx], &txq_meta); 2130 2131 if (status) 2132 return status; 2133 } 2134 2135 return 0; 2136 } 2137 2138 /** 2139 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings 2140 * @vsi: the VSI being configured 2141 * @rst_src: reset source 2142 * @rel_vmvf_num: Relative ID of VF/VM 2143 */ 2144 int 2145 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 2146 u16 rel_vmvf_num) 2147 { 2148 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq); 2149 } 2150 2151 /** 2152 * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings 2153 * @vsi: the VSI being configured 2154 */ 2155 int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi) 2156 { 2157 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq); 2158 } 2159 2160 /** 2161 * ice_vsi_is_rx_queue_active 2162 * @vsi: the VSI being configured 2163 * 2164 * Return true if at least one queue is active. 2165 */ 2166 bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi) 2167 { 2168 struct ice_pf *pf = vsi->back; 2169 struct ice_hw *hw = &pf->hw; 2170 int i; 2171 2172 ice_for_each_rxq(vsi, i) { 2173 u32 rx_reg; 2174 int pf_q; 2175 2176 pf_q = vsi->rxq_map[i]; 2177 rx_reg = rd32(hw, QRX_CTRL(pf_q)); 2178 if (rx_reg & QRX_CTRL_QENA_STAT_M) 2179 return true; 2180 } 2181 2182 return false; 2183 } 2184 2185 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) 2186 { 2187 if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) { 2188 vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS; 2189 vsi->tc_cfg.numtc = 1; 2190 return; 2191 } 2192 2193 /* set VSI TC information based on DCB config */ 2194 ice_vsi_set_dcb_tc_cfg(vsi); 2195 } 2196 2197 /** 2198 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling 2199 * @vsi: the VSI being configured 2200 * @tx: bool to determine Tx or Rx rule 2201 * @create: bool to determine create or remove Rule 2202 */ 2203 void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) 2204 { 2205 int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag, 2206 enum ice_sw_fwd_act_type act); 2207 struct ice_pf *pf = vsi->back; 2208 struct device *dev; 2209 int status; 2210 2211 dev = ice_pf_to_dev(pf); 2212 eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth; 2213 2214 if (tx) { 2215 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX, 2216 ICE_DROP_PACKET); 2217 } else { 2218 if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) { 2219 status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num, 2220 create); 2221 } else { 2222 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX, 2223 ICE_FWD_TO_VSI); 2224 } 2225 } 2226 2227 if (status) 2228 dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n", 2229 create ? "adding" : "removing", tx ? "TX" : "RX", 2230 vsi->vsi_num, status); 2231 } 2232 2233 /** 2234 * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it 2235 * @vsi: pointer to the VSI 2236 * 2237 * This function will allocate new scheduler aggregator now if needed and will 2238 * move specified VSI into it. 2239 */ 2240 static void ice_set_agg_vsi(struct ice_vsi *vsi) 2241 { 2242 struct device *dev = ice_pf_to_dev(vsi->back); 2243 struct ice_agg_node *agg_node_iter = NULL; 2244 u32 agg_id = ICE_INVALID_AGG_NODE_ID; 2245 struct ice_agg_node *agg_node = NULL; 2246 int node_offset, max_agg_nodes = 0; 2247 struct ice_port_info *port_info; 2248 struct ice_pf *pf = vsi->back; 2249 u32 agg_node_id_start = 0; 2250 int status; 2251 2252 /* create (as needed) scheduler aggregator node and move VSI into 2253 * corresponding aggregator node 2254 * - PF aggregator node to contains VSIs of type _PF and _CTRL 2255 * - VF aggregator nodes will contain VF VSI 2256 */ 2257 port_info = pf->hw.port_info; 2258 if (!port_info) 2259 return; 2260 2261 switch (vsi->type) { 2262 case ICE_VSI_CTRL: 2263 case ICE_VSI_CHNL: 2264 case ICE_VSI_LB: 2265 case ICE_VSI_PF: 2266 case ICE_VSI_SWITCHDEV_CTRL: 2267 max_agg_nodes = ICE_MAX_PF_AGG_NODES; 2268 agg_node_id_start = ICE_PF_AGG_NODE_ID_START; 2269 agg_node_iter = &pf->pf_agg_node[0]; 2270 break; 2271 case ICE_VSI_VF: 2272 /* user can create 'n' VFs on a given PF, but since max children 2273 * per aggregator node can be only 64. Following code handles 2274 * aggregator(s) for VF VSIs, either selects a agg_node which 2275 * was already created provided num_vsis < 64, otherwise 2276 * select next available node, which will be created 2277 */ 2278 max_agg_nodes = ICE_MAX_VF_AGG_NODES; 2279 agg_node_id_start = ICE_VF_AGG_NODE_ID_START; 2280 agg_node_iter = &pf->vf_agg_node[0]; 2281 break; 2282 default: 2283 /* other VSI type, handle later if needed */ 2284 dev_dbg(dev, "unexpected VSI type %s\n", 2285 ice_vsi_type_str(vsi->type)); 2286 return; 2287 } 2288 2289 /* find the appropriate aggregator node */ 2290 for (node_offset = 0; node_offset < max_agg_nodes; node_offset++) { 2291 /* see if we can find space in previously created 2292 * node if num_vsis < 64, otherwise skip 2293 */ 2294 if (agg_node_iter->num_vsis && 2295 agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { 2296 agg_node_iter++; 2297 continue; 2298 } 2299 2300 if (agg_node_iter->valid && 2301 agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) { 2302 agg_id = agg_node_iter->agg_id; 2303 agg_node = agg_node_iter; 2304 break; 2305 } 2306 2307 /* find unclaimed agg_id */ 2308 if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) { 2309 agg_id = node_offset + agg_node_id_start; 2310 agg_node = agg_node_iter; 2311 break; 2312 } 2313 /* move to next agg_node */ 2314 agg_node_iter++; 2315 } 2316 2317 if (!agg_node) 2318 return; 2319 2320 /* if selected aggregator node was not created, create it */ 2321 if (!agg_node->valid) { 2322 status = ice_cfg_agg(port_info, agg_id, ICE_AGG_TYPE_AGG, 2323 (u8)vsi->tc_cfg.ena_tc); 2324 if (status) { 2325 dev_err(dev, "unable to create aggregator node with agg_id %u\n", 2326 agg_id); 2327 return; 2328 } 2329 /* aggregator node is created, store the needed info */ 2330 agg_node->valid = true; 2331 agg_node->agg_id = agg_id; 2332 } 2333 2334 /* move VSI to corresponding aggregator node */ 2335 status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx, 2336 (u8)vsi->tc_cfg.ena_tc); 2337 if (status) { 2338 dev_err(dev, "unable to move VSI idx %u into aggregator %u node", 2339 vsi->idx, agg_id); 2340 return; 2341 } 2342 2343 /* keep active children count for aggregator node */ 2344 agg_node->num_vsis++; 2345 2346 /* cache the 'agg_id' in VSI, so that after reset - VSI will be moved 2347 * to aggregator node 2348 */ 2349 vsi->agg_node = agg_node; 2350 dev_dbg(dev, "successfully moved VSI idx %u tc_bitmap 0x%x) into aggregator node %d which has num_vsis %u\n", 2351 vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id, 2352 vsi->agg_node->num_vsis); 2353 } 2354 2355 static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi) 2356 { 2357 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2358 struct device *dev = ice_pf_to_dev(pf); 2359 int ret, i; 2360 2361 /* configure VSI nodes based on number of queues and TC's */ 2362 ice_for_each_traffic_class(i) { 2363 if (!(vsi->tc_cfg.ena_tc & BIT(i))) 2364 continue; 2365 2366 if (vsi->type == ICE_VSI_CHNL) { 2367 if (!vsi->alloc_txq && vsi->num_txq) 2368 max_txqs[i] = vsi->num_txq; 2369 else 2370 max_txqs[i] = pf->num_lan_tx; 2371 } else { 2372 max_txqs[i] = vsi->alloc_txq; 2373 } 2374 } 2375 2376 dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); 2377 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2378 max_txqs); 2379 if (ret) { 2380 dev_err(dev, "VSI %d failed lan queue config, error %d\n", 2381 vsi->vsi_num, ret); 2382 return ret; 2383 } 2384 2385 return 0; 2386 } 2387 2388 /** 2389 * ice_vsi_cfg_def - configure default VSI based on the type 2390 * @vsi: pointer to VSI 2391 * @params: the parameters to configure this VSI with 2392 */ 2393 static int 2394 ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) 2395 { 2396 struct device *dev = ice_pf_to_dev(vsi->back); 2397 struct ice_pf *pf = vsi->back; 2398 int ret; 2399 2400 vsi->vsw = pf->first_sw; 2401 2402 ret = ice_vsi_alloc_def(vsi, params->ch); 2403 if (ret) 2404 return ret; 2405 2406 /* allocate memory for Tx/Rx ring stat pointers */ 2407 ret = ice_vsi_alloc_stat_arrays(vsi); 2408 if (ret) 2409 goto unroll_vsi_alloc; 2410 2411 ice_alloc_fd_res(vsi); 2412 2413 ret = ice_vsi_get_qs(vsi); 2414 if (ret) { 2415 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", 2416 vsi->idx); 2417 goto unroll_vsi_alloc_stat; 2418 } 2419 2420 /* set RSS capabilities */ 2421 ice_vsi_set_rss_params(vsi); 2422 2423 /* set TC configuration */ 2424 ice_vsi_set_tc_cfg(vsi); 2425 2426 /* create the VSI */ 2427 ret = ice_vsi_init(vsi, params->flags); 2428 if (ret) 2429 goto unroll_get_qs; 2430 2431 ice_vsi_init_vlan_ops(vsi); 2432 2433 switch (vsi->type) { 2434 case ICE_VSI_CTRL: 2435 case ICE_VSI_SWITCHDEV_CTRL: 2436 case ICE_VSI_PF: 2437 ret = ice_vsi_alloc_q_vectors(vsi); 2438 if (ret) 2439 goto unroll_vsi_init; 2440 2441 ret = ice_vsi_alloc_rings(vsi); 2442 if (ret) 2443 goto unroll_vector_base; 2444 2445 ret = ice_vsi_alloc_ring_stats(vsi); 2446 if (ret) 2447 goto unroll_vector_base; 2448 2449 ice_vsi_map_rings_to_vectors(vsi); 2450 vsi->stat_offsets_loaded = false; 2451 2452 if (ice_is_xdp_ena_vsi(vsi)) { 2453 ret = ice_vsi_determine_xdp_res(vsi); 2454 if (ret) 2455 goto unroll_vector_base; 2456 ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); 2457 if (ret) 2458 goto unroll_vector_base; 2459 } 2460 2461 /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ 2462 if (vsi->type != ICE_VSI_CTRL) 2463 /* Do not exit if configuring RSS had an issue, at 2464 * least receive traffic on first queue. Hence no 2465 * need to capture return value 2466 */ 2467 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 2468 ice_vsi_cfg_rss_lut_key(vsi); 2469 ice_vsi_set_rss_flow_fld(vsi); 2470 } 2471 ice_init_arfs(vsi); 2472 break; 2473 case ICE_VSI_CHNL: 2474 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 2475 ice_vsi_cfg_rss_lut_key(vsi); 2476 ice_vsi_set_rss_flow_fld(vsi); 2477 } 2478 break; 2479 case ICE_VSI_VF: 2480 /* VF driver will take care of creating netdev for this type and 2481 * map queues to vectors through Virtchnl, PF driver only 2482 * creates a VSI and corresponding structures for bookkeeping 2483 * purpose 2484 */ 2485 ret = ice_vsi_alloc_q_vectors(vsi); 2486 if (ret) 2487 goto unroll_vsi_init; 2488 2489 ret = ice_vsi_alloc_rings(vsi); 2490 if (ret) 2491 goto unroll_alloc_q_vector; 2492 2493 ret = ice_vsi_alloc_ring_stats(vsi); 2494 if (ret) 2495 goto unroll_vector_base; 2496 2497 vsi->stat_offsets_loaded = false; 2498 2499 /* Do not exit if configuring RSS had an issue, at least 2500 * receive traffic on first queue. Hence no need to capture 2501 * return value 2502 */ 2503 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 2504 ice_vsi_cfg_rss_lut_key(vsi); 2505 ice_vsi_set_vf_rss_flow_fld(vsi); 2506 } 2507 break; 2508 case ICE_VSI_LB: 2509 ret = ice_vsi_alloc_rings(vsi); 2510 if (ret) 2511 goto unroll_vsi_init; 2512 2513 ret = ice_vsi_alloc_ring_stats(vsi); 2514 if (ret) 2515 goto unroll_vector_base; 2516 2517 break; 2518 default: 2519 /* clean up the resources and exit */ 2520 ret = -EINVAL; 2521 goto unroll_vsi_init; 2522 } 2523 2524 return 0; 2525 2526 unroll_vector_base: 2527 /* reclaim SW interrupts back to the common pool */ 2528 unroll_alloc_q_vector: 2529 ice_vsi_free_q_vectors(vsi); 2530 unroll_vsi_init: 2531 ice_vsi_delete_from_hw(vsi); 2532 unroll_get_qs: 2533 ice_vsi_put_qs(vsi); 2534 unroll_vsi_alloc_stat: 2535 ice_vsi_free_stats(vsi); 2536 unroll_vsi_alloc: 2537 ice_vsi_free_arrays(vsi); 2538 return ret; 2539 } 2540 2541 /** 2542 * ice_vsi_cfg - configure a previously allocated VSI 2543 * @vsi: pointer to VSI 2544 * @params: parameters used to configure this VSI 2545 */ 2546 int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) 2547 { 2548 struct ice_pf *pf = vsi->back; 2549 int ret; 2550 2551 if (WARN_ON(params->type == ICE_VSI_VF && !params->vf)) 2552 return -EINVAL; 2553 2554 vsi->type = params->type; 2555 vsi->port_info = params->pi; 2556 2557 /* For VSIs which don't have a connected VF, this will be NULL */ 2558 vsi->vf = params->vf; 2559 2560 ret = ice_vsi_cfg_def(vsi, params); 2561 if (ret) 2562 return ret; 2563 2564 ret = ice_vsi_cfg_tc_lan(vsi->back, vsi); 2565 if (ret) 2566 ice_vsi_decfg(vsi); 2567 2568 if (vsi->type == ICE_VSI_CTRL) { 2569 if (vsi->vf) { 2570 WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI); 2571 vsi->vf->ctrl_vsi_idx = vsi->idx; 2572 } else { 2573 WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI); 2574 pf->ctrl_vsi_idx = vsi->idx; 2575 } 2576 } 2577 2578 return ret; 2579 } 2580 2581 /** 2582 * ice_vsi_decfg - remove all VSI configuration 2583 * @vsi: pointer to VSI 2584 */ 2585 void ice_vsi_decfg(struct ice_vsi *vsi) 2586 { 2587 struct ice_pf *pf = vsi->back; 2588 int err; 2589 2590 /* The Rx rule will only exist to remove if the LLDP FW 2591 * engine is currently stopped 2592 */ 2593 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF && 2594 !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) 2595 ice_cfg_sw_lldp(vsi, false, false); 2596 2597 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); 2598 err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); 2599 if (err) 2600 dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", 2601 vsi->vsi_num, err); 2602 2603 if (ice_is_xdp_ena_vsi(vsi)) 2604 /* return value check can be skipped here, it always returns 2605 * 0 if reset is in progress 2606 */ 2607 ice_destroy_xdp_rings(vsi); 2608 2609 ice_vsi_clear_rings(vsi); 2610 ice_vsi_free_q_vectors(vsi); 2611 ice_vsi_put_qs(vsi); 2612 ice_vsi_free_arrays(vsi); 2613 2614 /* SR-IOV determines needed MSIX resources all at once instead of per 2615 * VSI since when VFs are spawned we know how many VFs there are and how 2616 * many interrupts each VF needs. SR-IOV MSIX resources are also 2617 * cleared in the same manner. 2618 */ 2619 2620 if (vsi->type == ICE_VSI_VF && 2621 vsi->agg_node && vsi->agg_node->valid) 2622 vsi->agg_node->num_vsis--; 2623 if (vsi->agg_node) { 2624 vsi->agg_node->valid = false; 2625 vsi->agg_node->agg_id = 0; 2626 } 2627 } 2628 2629 /** 2630 * ice_vsi_setup - Set up a VSI by a given type 2631 * @pf: board private structure 2632 * @params: parameters to use when creating the VSI 2633 * 2634 * This allocates the sw VSI structure and its queue resources. 2635 * 2636 * Returns pointer to the successfully allocated and configured VSI sw struct on 2637 * success, NULL on failure. 2638 */ 2639 struct ice_vsi * 2640 ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params) 2641 { 2642 struct device *dev = ice_pf_to_dev(pf); 2643 struct ice_vsi *vsi; 2644 int ret; 2645 2646 /* ice_vsi_setup can only initialize a new VSI, and we must have 2647 * a port_info structure for it. 2648 */ 2649 if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) || 2650 WARN_ON(!params->pi)) 2651 return NULL; 2652 2653 vsi = ice_vsi_alloc(pf); 2654 if (!vsi) { 2655 dev_err(dev, "could not allocate VSI\n"); 2656 return NULL; 2657 } 2658 2659 ret = ice_vsi_cfg(vsi, params); 2660 if (ret) 2661 goto err_vsi_cfg; 2662 2663 /* Add switch rule to drop all Tx Flow Control Frames, of look up 2664 * type ETHERTYPE from VSIs, and restrict malicious VF from sending 2665 * out PAUSE or PFC frames. If enabled, FW can still send FC frames. 2666 * The rule is added once for PF VSI in order to create appropriate 2667 * recipe, since VSI/VSI list is ignored with drop action... 2668 * Also add rules to handle LLDP Tx packets. Tx LLDP packets need to 2669 * be dropped so that VFs cannot send LLDP packets to reconfig DCB 2670 * settings in the HW. 2671 */ 2672 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) { 2673 ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, 2674 ICE_DROP_PACKET); 2675 ice_cfg_sw_lldp(vsi, true, true); 2676 } 2677 2678 if (!vsi->agg_node) 2679 ice_set_agg_vsi(vsi); 2680 2681 return vsi; 2682 2683 err_vsi_cfg: 2684 ice_vsi_free(vsi); 2685 2686 return NULL; 2687 } 2688 2689 /** 2690 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW 2691 * @vsi: the VSI being cleaned up 2692 */ 2693 static void ice_vsi_release_msix(struct ice_vsi *vsi) 2694 { 2695 struct ice_pf *pf = vsi->back; 2696 struct ice_hw *hw = &pf->hw; 2697 u32 txq = 0; 2698 u32 rxq = 0; 2699 int i, q; 2700 2701 ice_for_each_q_vector(vsi, i) { 2702 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2703 2704 ice_write_intrl(q_vector, 0); 2705 for (q = 0; q < q_vector->num_ring_tx; q++) { 2706 ice_write_itr(&q_vector->tx, 0); 2707 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); 2708 if (ice_is_xdp_ena_vsi(vsi)) { 2709 u32 xdp_txq = txq + vsi->num_xdp_txq; 2710 2711 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0); 2712 } 2713 txq++; 2714 } 2715 2716 for (q = 0; q < q_vector->num_ring_rx; q++) { 2717 ice_write_itr(&q_vector->rx, 0); 2718 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); 2719 rxq++; 2720 } 2721 } 2722 2723 ice_flush(hw); 2724 } 2725 2726 /** 2727 * ice_vsi_free_irq - Free the IRQ association with the OS 2728 * @vsi: the VSI being configured 2729 */ 2730 void ice_vsi_free_irq(struct ice_vsi *vsi) 2731 { 2732 struct ice_pf *pf = vsi->back; 2733 int i; 2734 2735 if (!vsi->q_vectors || !vsi->irqs_ready) 2736 return; 2737 2738 ice_vsi_release_msix(vsi); 2739 if (vsi->type == ICE_VSI_VF) 2740 return; 2741 2742 vsi->irqs_ready = false; 2743 ice_free_cpu_rx_rmap(vsi); 2744 2745 ice_for_each_q_vector(vsi, i) { 2746 int irq_num; 2747 2748 irq_num = vsi->q_vectors[i]->irq.virq; 2749 2750 /* free only the irqs that were actually requested */ 2751 if (!vsi->q_vectors[i] || 2752 !(vsi->q_vectors[i]->num_ring_tx || 2753 vsi->q_vectors[i]->num_ring_rx)) 2754 continue; 2755 2756 /* clear the affinity notifier in the IRQ descriptor */ 2757 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) 2758 irq_set_affinity_notifier(irq_num, NULL); 2759 2760 /* clear the affinity_mask in the IRQ descriptor */ 2761 irq_set_affinity_hint(irq_num, NULL); 2762 synchronize_irq(irq_num); 2763 devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]); 2764 } 2765 } 2766 2767 /** 2768 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues 2769 * @vsi: the VSI having resources freed 2770 */ 2771 void ice_vsi_free_tx_rings(struct ice_vsi *vsi) 2772 { 2773 int i; 2774 2775 if (!vsi->tx_rings) 2776 return; 2777 2778 ice_for_each_txq(vsi, i) 2779 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 2780 ice_free_tx_ring(vsi->tx_rings[i]); 2781 } 2782 2783 /** 2784 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues 2785 * @vsi: the VSI having resources freed 2786 */ 2787 void ice_vsi_free_rx_rings(struct ice_vsi *vsi) 2788 { 2789 int i; 2790 2791 if (!vsi->rx_rings) 2792 return; 2793 2794 ice_for_each_rxq(vsi, i) 2795 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 2796 ice_free_rx_ring(vsi->rx_rings[i]); 2797 } 2798 2799 /** 2800 * ice_vsi_close - Shut down a VSI 2801 * @vsi: the VSI being shut down 2802 */ 2803 void ice_vsi_close(struct ice_vsi *vsi) 2804 { 2805 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) 2806 ice_down(vsi); 2807 2808 ice_vsi_free_irq(vsi); 2809 ice_vsi_free_tx_rings(vsi); 2810 ice_vsi_free_rx_rings(vsi); 2811 } 2812 2813 /** 2814 * ice_ena_vsi - resume a VSI 2815 * @vsi: the VSI being resume 2816 * @locked: is the rtnl_lock already held 2817 */ 2818 int ice_ena_vsi(struct ice_vsi *vsi, bool locked) 2819 { 2820 int err = 0; 2821 2822 if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state)) 2823 return 0; 2824 2825 clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state); 2826 2827 if (vsi->netdev && vsi->type == ICE_VSI_PF) { 2828 if (netif_running(vsi->netdev)) { 2829 if (!locked) 2830 rtnl_lock(); 2831 2832 err = ice_open_internal(vsi->netdev); 2833 2834 if (!locked) 2835 rtnl_unlock(); 2836 } 2837 } else if (vsi->type == ICE_VSI_CTRL) { 2838 err = ice_vsi_open_ctrl(vsi); 2839 } 2840 2841 return err; 2842 } 2843 2844 /** 2845 * ice_dis_vsi - pause a VSI 2846 * @vsi: the VSI being paused 2847 * @locked: is the rtnl_lock already held 2848 */ 2849 void ice_dis_vsi(struct ice_vsi *vsi, bool locked) 2850 { 2851 if (test_bit(ICE_VSI_DOWN, vsi->state)) 2852 return; 2853 2854 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); 2855 2856 if (vsi->type == ICE_VSI_PF && vsi->netdev) { 2857 if (netif_running(vsi->netdev)) { 2858 if (!locked) 2859 rtnl_lock(); 2860 2861 ice_vsi_close(vsi); 2862 2863 if (!locked) 2864 rtnl_unlock(); 2865 } else { 2866 ice_vsi_close(vsi); 2867 } 2868 } else if (vsi->type == ICE_VSI_CTRL || 2869 vsi->type == ICE_VSI_SWITCHDEV_CTRL) { 2870 ice_vsi_close(vsi); 2871 } 2872 } 2873 2874 /** 2875 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI 2876 * @vsi: the VSI being un-configured 2877 */ 2878 void ice_vsi_dis_irq(struct ice_vsi *vsi) 2879 { 2880 struct ice_pf *pf = vsi->back; 2881 struct ice_hw *hw = &pf->hw; 2882 u32 val; 2883 int i; 2884 2885 /* disable interrupt causation from each queue */ 2886 if (vsi->tx_rings) { 2887 ice_for_each_txq(vsi, i) { 2888 if (vsi->tx_rings[i]) { 2889 u16 reg; 2890 2891 reg = vsi->tx_rings[i]->reg_idx; 2892 val = rd32(hw, QINT_TQCTL(reg)); 2893 val &= ~QINT_TQCTL_CAUSE_ENA_M; 2894 wr32(hw, QINT_TQCTL(reg), val); 2895 } 2896 } 2897 } 2898 2899 if (vsi->rx_rings) { 2900 ice_for_each_rxq(vsi, i) { 2901 if (vsi->rx_rings[i]) { 2902 u16 reg; 2903 2904 reg = vsi->rx_rings[i]->reg_idx; 2905 val = rd32(hw, QINT_RQCTL(reg)); 2906 val &= ~QINT_RQCTL_CAUSE_ENA_M; 2907 wr32(hw, QINT_RQCTL(reg), val); 2908 } 2909 } 2910 } 2911 2912 /* disable each interrupt */ 2913 ice_for_each_q_vector(vsi, i) { 2914 if (!vsi->q_vectors[i]) 2915 continue; 2916 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); 2917 } 2918 2919 ice_flush(hw); 2920 2921 /* don't call synchronize_irq() for VF's from the host */ 2922 if (vsi->type == ICE_VSI_VF) 2923 return; 2924 2925 ice_for_each_q_vector(vsi, i) 2926 synchronize_irq(vsi->q_vectors[i]->irq.virq); 2927 } 2928 2929 /** 2930 * ice_vsi_release - Delete a VSI and free its resources 2931 * @vsi: the VSI being removed 2932 * 2933 * Returns 0 on success or < 0 on error 2934 */ 2935 int ice_vsi_release(struct ice_vsi *vsi) 2936 { 2937 struct ice_pf *pf; 2938 2939 if (!vsi->back) 2940 return -ENODEV; 2941 pf = vsi->back; 2942 2943 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 2944 ice_rss_clean(vsi); 2945 2946 ice_vsi_close(vsi); 2947 ice_vsi_decfg(vsi); 2948 2949 /* retain SW VSI data structure since it is needed to unregister and 2950 * free VSI netdev when PF is not in reset recovery pending state,\ 2951 * for ex: during rmmod. 2952 */ 2953 if (!ice_is_reset_in_progress(pf->state)) 2954 ice_vsi_delete(vsi); 2955 2956 return 0; 2957 } 2958 2959 /** 2960 * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors 2961 * @vsi: VSI connected with q_vectors 2962 * @coalesce: array of struct with stored coalesce 2963 * 2964 * Returns array size. 2965 */ 2966 static int 2967 ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi, 2968 struct ice_coalesce_stored *coalesce) 2969 { 2970 int i; 2971 2972 ice_for_each_q_vector(vsi, i) { 2973 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2974 2975 coalesce[i].itr_tx = q_vector->tx.itr_settings; 2976 coalesce[i].itr_rx = q_vector->rx.itr_settings; 2977 coalesce[i].intrl = q_vector->intrl; 2978 2979 if (i < vsi->num_txq) 2980 coalesce[i].tx_valid = true; 2981 if (i < vsi->num_rxq) 2982 coalesce[i].rx_valid = true; 2983 } 2984 2985 return vsi->num_q_vectors; 2986 } 2987 2988 /** 2989 * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays 2990 * @vsi: VSI connected with q_vectors 2991 * @coalesce: pointer to array of struct with stored coalesce 2992 * @size: size of coalesce array 2993 * 2994 * Before this function, ice_vsi_rebuild_get_coalesce should be called to save 2995 * ITR params in arrays. If size is 0 or coalesce wasn't stored set coalesce 2996 * to default value. 2997 */ 2998 static void 2999 ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, 3000 struct ice_coalesce_stored *coalesce, int size) 3001 { 3002 struct ice_ring_container *rc; 3003 int i; 3004 3005 if ((size && !coalesce) || !vsi) 3006 return; 3007 3008 /* There are a couple of cases that have to be handled here: 3009 * 1. The case where the number of queue vectors stays the same, but 3010 * the number of Tx or Rx rings changes (the first for loop) 3011 * 2. The case where the number of queue vectors increased (the 3012 * second for loop) 3013 */ 3014 for (i = 0; i < size && i < vsi->num_q_vectors; i++) { 3015 /* There are 2 cases to handle here and they are the same for 3016 * both Tx and Rx: 3017 * if the entry was valid previously (coalesce[i].[tr]x_valid 3018 * and the loop variable is less than the number of rings 3019 * allocated, then write the previous values 3020 * 3021 * if the entry was not valid previously, but the number of 3022 * rings is less than are allocated (this means the number of 3023 * rings increased from previously), then write out the 3024 * values in the first element 3025 * 3026 * Also, always write the ITR, even if in ITR_IS_DYNAMIC 3027 * as there is no harm because the dynamic algorithm 3028 * will just overwrite. 3029 */ 3030 if (i < vsi->alloc_rxq && coalesce[i].rx_valid) { 3031 rc = &vsi->q_vectors[i]->rx; 3032 rc->itr_settings = coalesce[i].itr_rx; 3033 ice_write_itr(rc, rc->itr_setting); 3034 } else if (i < vsi->alloc_rxq) { 3035 rc = &vsi->q_vectors[i]->rx; 3036 rc->itr_settings = coalesce[0].itr_rx; 3037 ice_write_itr(rc, rc->itr_setting); 3038 } 3039 3040 if (i < vsi->alloc_txq && coalesce[i].tx_valid) { 3041 rc = &vsi->q_vectors[i]->tx; 3042 rc->itr_settings = coalesce[i].itr_tx; 3043 ice_write_itr(rc, rc->itr_setting); 3044 } else if (i < vsi->alloc_txq) { 3045 rc = &vsi->q_vectors[i]->tx; 3046 rc->itr_settings = coalesce[0].itr_tx; 3047 ice_write_itr(rc, rc->itr_setting); 3048 } 3049 3050 vsi->q_vectors[i]->intrl = coalesce[i].intrl; 3051 ice_set_q_vector_intrl(vsi->q_vectors[i]); 3052 } 3053 3054 /* the number of queue vectors increased so write whatever is in 3055 * the first element 3056 */ 3057 for (; i < vsi->num_q_vectors; i++) { 3058 /* transmit */ 3059 rc = &vsi->q_vectors[i]->tx; 3060 rc->itr_settings = coalesce[0].itr_tx; 3061 ice_write_itr(rc, rc->itr_setting); 3062 3063 /* receive */ 3064 rc = &vsi->q_vectors[i]->rx; 3065 rc->itr_settings = coalesce[0].itr_rx; 3066 ice_write_itr(rc, rc->itr_setting); 3067 3068 vsi->q_vectors[i]->intrl = coalesce[0].intrl; 3069 ice_set_q_vector_intrl(vsi->q_vectors[i]); 3070 } 3071 } 3072 3073 /** 3074 * ice_vsi_realloc_stat_arrays - Frees unused stat structures 3075 * @vsi: VSI pointer 3076 * @prev_txq: Number of Tx rings before ring reallocation 3077 * @prev_rxq: Number of Rx rings before ring reallocation 3078 */ 3079 static void 3080 ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq) 3081 { 3082 struct ice_vsi_stats *vsi_stat; 3083 struct ice_pf *pf = vsi->back; 3084 int i; 3085 3086 if (!prev_txq || !prev_rxq) 3087 return; 3088 if (vsi->type == ICE_VSI_CHNL) 3089 return; 3090 3091 vsi_stat = pf->vsi_stats[vsi->idx]; 3092 3093 if (vsi->num_txq < prev_txq) { 3094 for (i = vsi->num_txq; i < prev_txq; i++) { 3095 if (vsi_stat->tx_ring_stats[i]) { 3096 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); 3097 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); 3098 } 3099 } 3100 } 3101 3102 if (vsi->num_rxq < prev_rxq) { 3103 for (i = vsi->num_rxq; i < prev_rxq; i++) { 3104 if (vsi_stat->rx_ring_stats[i]) { 3105 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); 3106 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); 3107 } 3108 } 3109 } 3110 } 3111 3112 /** 3113 * ice_vsi_rebuild - Rebuild VSI after reset 3114 * @vsi: VSI to be rebuild 3115 * @vsi_flags: flags used for VSI rebuild flow 3116 * 3117 * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or 3118 * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware. 3119 * 3120 * Returns 0 on success and negative value on failure 3121 */ 3122 int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) 3123 { 3124 struct ice_vsi_cfg_params params = {}; 3125 struct ice_coalesce_stored *coalesce; 3126 int ret, prev_txq, prev_rxq; 3127 int prev_num_q_vectors = 0; 3128 struct ice_pf *pf; 3129 3130 if (!vsi) 3131 return -EINVAL; 3132 3133 params = ice_vsi_to_params(vsi); 3134 params.flags = vsi_flags; 3135 3136 pf = vsi->back; 3137 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) 3138 return -EINVAL; 3139 3140 coalesce = kcalloc(vsi->num_q_vectors, 3141 sizeof(struct ice_coalesce_stored), GFP_KERNEL); 3142 if (!coalesce) 3143 return -ENOMEM; 3144 3145 prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce); 3146 3147 prev_txq = vsi->num_txq; 3148 prev_rxq = vsi->num_rxq; 3149 3150 ice_vsi_decfg(vsi); 3151 ret = ice_vsi_cfg_def(vsi, ¶ms); 3152 if (ret) 3153 goto err_vsi_cfg; 3154 3155 ret = ice_vsi_cfg_tc_lan(pf, vsi); 3156 if (ret) { 3157 if (vsi_flags & ICE_VSI_FLAG_INIT) { 3158 ret = -EIO; 3159 goto err_vsi_cfg_tc_lan; 3160 } 3161 3162 kfree(coalesce); 3163 return ice_schedule_reset(pf, ICE_RESET_PFR); 3164 } 3165 3166 ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq); 3167 3168 ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors); 3169 kfree(coalesce); 3170 3171 return 0; 3172 3173 err_vsi_cfg_tc_lan: 3174 ice_vsi_decfg(vsi); 3175 err_vsi_cfg: 3176 kfree(coalesce); 3177 return ret; 3178 } 3179 3180 /** 3181 * ice_is_reset_in_progress - check for a reset in progress 3182 * @state: PF state field 3183 */ 3184 bool ice_is_reset_in_progress(unsigned long *state) 3185 { 3186 return test_bit(ICE_RESET_OICR_RECV, state) || 3187 test_bit(ICE_PFR_REQ, state) || 3188 test_bit(ICE_CORER_REQ, state) || 3189 test_bit(ICE_GLOBR_REQ, state); 3190 } 3191 3192 /** 3193 * ice_wait_for_reset - Wait for driver to finish reset and rebuild 3194 * @pf: pointer to the PF structure 3195 * @timeout: length of time to wait, in jiffies 3196 * 3197 * Wait (sleep) for a short time until the driver finishes cleaning up from 3198 * a device reset. The caller must be able to sleep. Use this to delay 3199 * operations that could fail while the driver is cleaning up after a device 3200 * reset. 3201 * 3202 * Returns 0 on success, -EBUSY if the reset is not finished within the 3203 * timeout, and -ERESTARTSYS if the thread was interrupted. 3204 */ 3205 int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout) 3206 { 3207 long ret; 3208 3209 ret = wait_event_interruptible_timeout(pf->reset_wait_queue, 3210 !ice_is_reset_in_progress(pf->state), 3211 timeout); 3212 if (ret < 0) 3213 return ret; 3214 else if (!ret) 3215 return -EBUSY; 3216 else 3217 return 0; 3218 } 3219 3220 /** 3221 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map 3222 * @vsi: VSI being configured 3223 * @ctx: the context buffer returned from AQ VSI update command 3224 */ 3225 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) 3226 { 3227 vsi->info.mapping_flags = ctx->info.mapping_flags; 3228 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping, 3229 sizeof(vsi->info.q_mapping)); 3230 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping, 3231 sizeof(vsi->info.tc_mapping)); 3232 } 3233 3234 /** 3235 * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration 3236 * @vsi: the VSI being configured 3237 * @ena_tc: TC map to be enabled 3238 */ 3239 void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) 3240 { 3241 struct net_device *netdev = vsi->netdev; 3242 struct ice_pf *pf = vsi->back; 3243 int numtc = vsi->tc_cfg.numtc; 3244 struct ice_dcbx_cfg *dcbcfg; 3245 u8 netdev_tc; 3246 int i; 3247 3248 if (!netdev) 3249 return; 3250 3251 /* CHNL VSI doesn't have it's own netdev, hence, no netdev_tc */ 3252 if (vsi->type == ICE_VSI_CHNL) 3253 return; 3254 3255 if (!ena_tc) { 3256 netdev_reset_tc(netdev); 3257 return; 3258 } 3259 3260 if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf)) 3261 numtc = vsi->all_numtc; 3262 3263 if (netdev_set_num_tc(netdev, numtc)) 3264 return; 3265 3266 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; 3267 3268 ice_for_each_traffic_class(i) 3269 if (vsi->tc_cfg.ena_tc & BIT(i)) 3270 netdev_set_tc_queue(netdev, 3271 vsi->tc_cfg.tc_info[i].netdev_tc, 3272 vsi->tc_cfg.tc_info[i].qcount_tx, 3273 vsi->tc_cfg.tc_info[i].qoffset); 3274 /* setup TC queue map for CHNL TCs */ 3275 ice_for_each_chnl_tc(i) { 3276 if (!(vsi->all_enatc & BIT(i))) 3277 break; 3278 if (!vsi->mqprio_qopt.qopt.count[i]) 3279 break; 3280 netdev_set_tc_queue(netdev, i, 3281 vsi->mqprio_qopt.qopt.count[i], 3282 vsi->mqprio_qopt.qopt.offset[i]); 3283 } 3284 3285 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 3286 return; 3287 3288 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { 3289 u8 ets_tc = dcbcfg->etscfg.prio_table[i]; 3290 3291 /* Get the mapped netdev TC# for the UP */ 3292 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; 3293 netdev_set_prio_tc_map(netdev, i, netdev_tc); 3294 } 3295 } 3296 3297 /** 3298 * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config 3299 * @vsi: the VSI being configured, 3300 * @ctxt: VSI context structure 3301 * @ena_tc: number of traffic classes to enable 3302 * 3303 * Prepares VSI tc_config to have queue configurations based on MQPRIO options. 3304 */ 3305 static int 3306 ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, 3307 u8 ena_tc) 3308 { 3309 u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap; 3310 u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0]; 3311 int tc0_qcount = vsi->mqprio_qopt.qopt.count[0]; 3312 u16 new_txq, new_rxq; 3313 u8 netdev_tc = 0; 3314 int i; 3315 3316 vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1; 3317 3318 pow = order_base_2(tc0_qcount); 3319 qmap = ((tc0_offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & 3320 ICE_AQ_VSI_TC_Q_OFFSET_M) | 3321 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M); 3322 3323 ice_for_each_traffic_class(i) { 3324 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { 3325 /* TC is not enabled */ 3326 vsi->tc_cfg.tc_info[i].qoffset = 0; 3327 vsi->tc_cfg.tc_info[i].qcount_rx = 1; 3328 vsi->tc_cfg.tc_info[i].qcount_tx = 1; 3329 vsi->tc_cfg.tc_info[i].netdev_tc = 0; 3330 ctxt->info.tc_mapping[i] = 0; 3331 continue; 3332 } 3333 3334 offset = vsi->mqprio_qopt.qopt.offset[i]; 3335 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; 3336 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; 3337 vsi->tc_cfg.tc_info[i].qoffset = offset; 3338 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx; 3339 vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx; 3340 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; 3341 } 3342 3343 if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) { 3344 ice_for_each_chnl_tc(i) { 3345 if (!(vsi->all_enatc & BIT(i))) 3346 continue; 3347 offset = vsi->mqprio_qopt.qopt.offset[i]; 3348 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; 3349 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; 3350 } 3351 } 3352 3353 new_txq = offset + qcount_tx; 3354 if (new_txq > vsi->alloc_txq) { 3355 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", 3356 new_txq, vsi->alloc_txq); 3357 return -EINVAL; 3358 } 3359 3360 new_rxq = offset + qcount_rx; 3361 if (new_rxq > vsi->alloc_rxq) { 3362 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", 3363 new_rxq, vsi->alloc_rxq); 3364 return -EINVAL; 3365 } 3366 3367 /* Set actual Tx/Rx queue pairs */ 3368 vsi->num_txq = new_txq; 3369 vsi->num_rxq = new_rxq; 3370 3371 /* Setup queue TC[0].qmap for given VSI context */ 3372 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); 3373 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); 3374 ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount); 3375 3376 /* Find queue count available for channel VSIs and starting offset 3377 * for channel VSIs 3378 */ 3379 if (tc0_qcount && tc0_qcount < vsi->num_rxq) { 3380 vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount; 3381 vsi->next_base_q = tc0_qcount; 3382 } 3383 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq); 3384 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq); 3385 dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n", 3386 vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc); 3387 3388 return 0; 3389 } 3390 3391 /** 3392 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map 3393 * @vsi: VSI to be configured 3394 * @ena_tc: TC bitmap 3395 * 3396 * VSI queues expected to be quiesced before calling this function 3397 */ 3398 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) 3399 { 3400 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 3401 struct ice_pf *pf = vsi->back; 3402 struct ice_tc_cfg old_tc_cfg; 3403 struct ice_vsi_ctx *ctx; 3404 struct device *dev; 3405 int i, ret = 0; 3406 u8 num_tc = 0; 3407 3408 dev = ice_pf_to_dev(pf); 3409 if (vsi->tc_cfg.ena_tc == ena_tc && 3410 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) 3411 return 0; 3412 3413 ice_for_each_traffic_class(i) { 3414 /* build bitmap of enabled TCs */ 3415 if (ena_tc & BIT(i)) 3416 num_tc++; 3417 /* populate max_txqs per TC */ 3418 max_txqs[i] = vsi->alloc_txq; 3419 /* Update max_txqs if it is CHNL VSI, because alloc_t[r]xq are 3420 * zero for CHNL VSI, hence use num_txq instead as max_txqs 3421 */ 3422 if (vsi->type == ICE_VSI_CHNL && 3423 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 3424 max_txqs[i] = vsi->num_txq; 3425 } 3426 3427 memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg)); 3428 vsi->tc_cfg.ena_tc = ena_tc; 3429 vsi->tc_cfg.numtc = num_tc; 3430 3431 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 3432 if (!ctx) 3433 return -ENOMEM; 3434 3435 ctx->vf_num = 0; 3436 ctx->info = vsi->info; 3437 3438 if (vsi->type == ICE_VSI_PF && 3439 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 3440 ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc); 3441 else 3442 ret = ice_vsi_setup_q_map(vsi, ctx); 3443 3444 if (ret) { 3445 memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg)); 3446 goto out; 3447 } 3448 3449 /* must to indicate which section of VSI context are being modified */ 3450 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); 3451 ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); 3452 if (ret) { 3453 dev_info(dev, "Failed VSI Update\n"); 3454 goto out; 3455 } 3456 3457 if (vsi->type == ICE_VSI_PF && 3458 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 3459 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); 3460 else 3461 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 3462 vsi->tc_cfg.ena_tc, max_txqs); 3463 3464 if (ret) { 3465 dev_err(dev, "VSI %d failed TC config, error %d\n", 3466 vsi->vsi_num, ret); 3467 goto out; 3468 } 3469 ice_vsi_update_q_map(vsi, ctx); 3470 vsi->info.valid_sections = 0; 3471 3472 ice_vsi_cfg_netdev_tc(vsi, ena_tc); 3473 out: 3474 kfree(ctx); 3475 return ret; 3476 } 3477 3478 /** 3479 * ice_update_ring_stats - Update ring statistics 3480 * @stats: stats to be updated 3481 * @pkts: number of processed packets 3482 * @bytes: number of processed bytes 3483 * 3484 * This function assumes that caller has acquired a u64_stats_sync lock. 3485 */ 3486 static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes) 3487 { 3488 stats->bytes += bytes; 3489 stats->pkts += pkts; 3490 } 3491 3492 /** 3493 * ice_update_tx_ring_stats - Update Tx ring specific counters 3494 * @tx_ring: ring to update 3495 * @pkts: number of processed packets 3496 * @bytes: number of processed bytes 3497 */ 3498 void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes) 3499 { 3500 u64_stats_update_begin(&tx_ring->ring_stats->syncp); 3501 ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes); 3502 u64_stats_update_end(&tx_ring->ring_stats->syncp); 3503 } 3504 3505 /** 3506 * ice_update_rx_ring_stats - Update Rx ring specific counters 3507 * @rx_ring: ring to update 3508 * @pkts: number of processed packets 3509 * @bytes: number of processed bytes 3510 */ 3511 void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes) 3512 { 3513 u64_stats_update_begin(&rx_ring->ring_stats->syncp); 3514 ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes); 3515 u64_stats_update_end(&rx_ring->ring_stats->syncp); 3516 } 3517 3518 /** 3519 * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used 3520 * @pi: port info of the switch with default VSI 3521 * 3522 * Return true if the there is a single VSI in default forwarding VSI list 3523 */ 3524 bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi) 3525 { 3526 bool exists = false; 3527 3528 ice_check_if_dflt_vsi(pi, 0, &exists); 3529 return exists; 3530 } 3531 3532 /** 3533 * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI 3534 * @vsi: VSI to compare against default forwarding VSI 3535 * 3536 * If this VSI passed in is the default forwarding VSI then return true, else 3537 * return false 3538 */ 3539 bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi) 3540 { 3541 return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL); 3542 } 3543 3544 /** 3545 * ice_set_dflt_vsi - set the default forwarding VSI 3546 * @vsi: VSI getting set as the default forwarding VSI on the switch 3547 * 3548 * If the VSI passed in is already the default VSI and it's enabled just return 3549 * success. 3550 * 3551 * Otherwise try to set the VSI passed in as the switch's default VSI and 3552 * return the result. 3553 */ 3554 int ice_set_dflt_vsi(struct ice_vsi *vsi) 3555 { 3556 struct device *dev; 3557 int status; 3558 3559 if (!vsi) 3560 return -EINVAL; 3561 3562 dev = ice_pf_to_dev(vsi->back); 3563 3564 if (ice_lag_is_switchdev_running(vsi->back)) { 3565 dev_dbg(dev, "VSI %d passed is a part of LAG containing interfaces in switchdev mode, nothing to do\n", 3566 vsi->vsi_num); 3567 return 0; 3568 } 3569 3570 /* the VSI passed in is already the default VSI */ 3571 if (ice_is_vsi_dflt_vsi(vsi)) { 3572 dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n", 3573 vsi->vsi_num); 3574 return 0; 3575 } 3576 3577 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX); 3578 if (status) { 3579 dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n", 3580 vsi->vsi_num, status); 3581 return status; 3582 } 3583 3584 return 0; 3585 } 3586 3587 /** 3588 * ice_clear_dflt_vsi - clear the default forwarding VSI 3589 * @vsi: VSI to remove from filter list 3590 * 3591 * If the switch has no default VSI or it's not enabled then return error. 3592 * 3593 * Otherwise try to clear the default VSI and return the result. 3594 */ 3595 int ice_clear_dflt_vsi(struct ice_vsi *vsi) 3596 { 3597 struct device *dev; 3598 int status; 3599 3600 if (!vsi) 3601 return -EINVAL; 3602 3603 dev = ice_pf_to_dev(vsi->back); 3604 3605 /* there is no default VSI configured */ 3606 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) 3607 return -ENODEV; 3608 3609 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false, 3610 ICE_FLTR_RX); 3611 if (status) { 3612 dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n", 3613 vsi->vsi_num, status); 3614 return -EIO; 3615 } 3616 3617 return 0; 3618 } 3619 3620 /** 3621 * ice_get_link_speed_mbps - get link speed in Mbps 3622 * @vsi: the VSI whose link speed is being queried 3623 * 3624 * Return current VSI link speed and 0 if the speed is unknown. 3625 */ 3626 int ice_get_link_speed_mbps(struct ice_vsi *vsi) 3627 { 3628 unsigned int link_speed; 3629 3630 link_speed = vsi->port_info->phy.link_info.link_speed; 3631 3632 return (int)ice_get_link_speed(fls(link_speed) - 1); 3633 } 3634 3635 /** 3636 * ice_get_link_speed_kbps - get link speed in Kbps 3637 * @vsi: the VSI whose link speed is being queried 3638 * 3639 * Return current VSI link speed and 0 if the speed is unknown. 3640 */ 3641 int ice_get_link_speed_kbps(struct ice_vsi *vsi) 3642 { 3643 int speed_mbps; 3644 3645 speed_mbps = ice_get_link_speed_mbps(vsi); 3646 3647 return speed_mbps * 1000; 3648 } 3649 3650 /** 3651 * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate 3652 * @vsi: VSI to be configured 3653 * @min_tx_rate: min Tx rate in Kbps to be configured as BW limit 3654 * 3655 * If the min_tx_rate is specified as 0 that means to clear the minimum BW limit 3656 * profile, otherwise a non-zero value will force a minimum BW limit for the VSI 3657 * on TC 0. 3658 */ 3659 int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) 3660 { 3661 struct ice_pf *pf = vsi->back; 3662 struct device *dev; 3663 int status; 3664 int speed; 3665 3666 dev = ice_pf_to_dev(pf); 3667 if (!vsi->port_info) { 3668 dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n", 3669 vsi->idx, vsi->type); 3670 return -EINVAL; 3671 } 3672 3673 speed = ice_get_link_speed_kbps(vsi); 3674 if (min_tx_rate > (u64)speed) { 3675 dev_err(dev, "invalid min Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n", 3676 min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, 3677 speed); 3678 return -EINVAL; 3679 } 3680 3681 /* Configure min BW for VSI limit */ 3682 if (min_tx_rate) { 3683 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, 3684 ICE_MIN_BW, min_tx_rate); 3685 if (status) { 3686 dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n", 3687 min_tx_rate, ice_vsi_type_str(vsi->type), 3688 vsi->idx); 3689 return status; 3690 } 3691 3692 dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n", 3693 min_tx_rate, ice_vsi_type_str(vsi->type)); 3694 } else { 3695 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, 3696 vsi->idx, 0, 3697 ICE_MIN_BW); 3698 if (status) { 3699 dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n", 3700 ice_vsi_type_str(vsi->type), vsi->idx); 3701 return status; 3702 } 3703 3704 dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n", 3705 ice_vsi_type_str(vsi->type), vsi->idx); 3706 } 3707 3708 return 0; 3709 } 3710 3711 /** 3712 * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate 3713 * @vsi: VSI to be configured 3714 * @max_tx_rate: max Tx rate in Kbps to be configured as BW limit 3715 * 3716 * If the max_tx_rate is specified as 0 that means to clear the maximum BW limit 3717 * profile, otherwise a non-zero value will force a maximum BW limit for the VSI 3718 * on TC 0. 3719 */ 3720 int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate) 3721 { 3722 struct ice_pf *pf = vsi->back; 3723 struct device *dev; 3724 int status; 3725 int speed; 3726 3727 dev = ice_pf_to_dev(pf); 3728 if (!vsi->port_info) { 3729 dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n", 3730 vsi->idx, vsi->type); 3731 return -EINVAL; 3732 } 3733 3734 speed = ice_get_link_speed_kbps(vsi); 3735 if (max_tx_rate > (u64)speed) { 3736 dev_err(dev, "invalid max Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n", 3737 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, 3738 speed); 3739 return -EINVAL; 3740 } 3741 3742 /* Configure max BW for VSI limit */ 3743 if (max_tx_rate) { 3744 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, 3745 ICE_MAX_BW, max_tx_rate); 3746 if (status) { 3747 dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n", 3748 max_tx_rate, ice_vsi_type_str(vsi->type), 3749 vsi->idx); 3750 return status; 3751 } 3752 3753 dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n", 3754 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx); 3755 } else { 3756 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, 3757 vsi->idx, 0, 3758 ICE_MAX_BW); 3759 if (status) { 3760 dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n", 3761 ice_vsi_type_str(vsi->type), vsi->idx); 3762 return status; 3763 } 3764 3765 dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n", 3766 ice_vsi_type_str(vsi->type), vsi->idx); 3767 } 3768 3769 return 0; 3770 } 3771 3772 /** 3773 * ice_set_link - turn on/off physical link 3774 * @vsi: VSI to modify physical link on 3775 * @ena: turn on/off physical link 3776 */ 3777 int ice_set_link(struct ice_vsi *vsi, bool ena) 3778 { 3779 struct device *dev = ice_pf_to_dev(vsi->back); 3780 struct ice_port_info *pi = vsi->port_info; 3781 struct ice_hw *hw = pi->hw; 3782 int status; 3783 3784 if (vsi->type != ICE_VSI_PF) 3785 return -EINVAL; 3786 3787 status = ice_aq_set_link_restart_an(pi, ena, NULL); 3788 3789 /* if link is owned by manageability, FW will return ICE_AQ_RC_EMODE. 3790 * this is not a fatal error, so print a warning message and return 3791 * a success code. Return an error if FW returns an error code other 3792 * than ICE_AQ_RC_EMODE 3793 */ 3794 if (status == -EIO) { 3795 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3796 dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n", 3797 (ena ? "ON" : "OFF"), status, 3798 ice_aq_str(hw->adminq.sq_last_status)); 3799 } else if (status) { 3800 dev_err(dev, "can't set link to %s, err %d aq_err %s\n", 3801 (ena ? "ON" : "OFF"), status, 3802 ice_aq_str(hw->adminq.sq_last_status)); 3803 return status; 3804 } 3805 3806 return 0; 3807 } 3808 3809 /** 3810 * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI 3811 * @vsi: VSI used to add VLAN filters 3812 * 3813 * In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are based 3814 * on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8) doesn't 3815 * matter. In Double VLAN Mode (DVM), outer/single VLAN filters via 3816 * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID. 3817 * 3818 * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic 3819 * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged 3820 * traffic in SVM, since the VLAN TPID isn't part of filtering. 3821 * 3822 * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be 3823 * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is 3824 * part of filtering. 3825 */ 3826 int ice_vsi_add_vlan_zero(struct ice_vsi *vsi) 3827 { 3828 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 3829 struct ice_vlan vlan; 3830 int err; 3831 3832 vlan = ICE_VLAN(0, 0, 0); 3833 err = vlan_ops->add_vlan(vsi, &vlan); 3834 if (err && err != -EEXIST) 3835 return err; 3836 3837 /* in SVM both VLAN 0 filters are identical */ 3838 if (!ice_is_dvm_ena(&vsi->back->hw)) 3839 return 0; 3840 3841 vlan = ICE_VLAN(ETH_P_8021Q, 0, 0); 3842 err = vlan_ops->add_vlan(vsi, &vlan); 3843 if (err && err != -EEXIST) 3844 return err; 3845 3846 return 0; 3847 } 3848 3849 /** 3850 * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI 3851 * @vsi: VSI used to add VLAN filters 3852 * 3853 * Delete the VLAN 0 filters in the same manner that they were added in 3854 * ice_vsi_add_vlan_zero. 3855 */ 3856 int ice_vsi_del_vlan_zero(struct ice_vsi *vsi) 3857 { 3858 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 3859 struct ice_vlan vlan; 3860 int err; 3861 3862 vlan = ICE_VLAN(0, 0, 0); 3863 err = vlan_ops->del_vlan(vsi, &vlan); 3864 if (err && err != -EEXIST) 3865 return err; 3866 3867 /* in SVM both VLAN 0 filters are identical */ 3868 if (!ice_is_dvm_ena(&vsi->back->hw)) 3869 return 0; 3870 3871 vlan = ICE_VLAN(ETH_P_8021Q, 0, 0); 3872 err = vlan_ops->del_vlan(vsi, &vlan); 3873 if (err && err != -EEXIST) 3874 return err; 3875 3876 /* when deleting the last VLAN filter, make sure to disable the VLAN 3877 * promisc mode so the filter isn't left by accident 3878 */ 3879 return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 3880 ICE_MCAST_VLAN_PROMISC_BITS, 0); 3881 } 3882 3883 /** 3884 * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode 3885 * @vsi: VSI used to get the VLAN mode 3886 * 3887 * If DVM is enabled then 2 VLAN 0 filters are added, else if SVM is enabled 3888 * then 1 VLAN 0 filter is added. See ice_vsi_add_vlan_zero for more details. 3889 */ 3890 static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi) 3891 { 3892 #define ICE_DVM_NUM_ZERO_VLAN_FLTRS 2 3893 #define ICE_SVM_NUM_ZERO_VLAN_FLTRS 1 3894 /* no VLAN 0 filter is created when a port VLAN is active */ 3895 if (vsi->type == ICE_VSI_VF) { 3896 if (WARN_ON(!vsi->vf)) 3897 return 0; 3898 3899 if (ice_vf_is_port_vlan_ena(vsi->vf)) 3900 return 0; 3901 } 3902 3903 if (ice_is_dvm_ena(&vsi->back->hw)) 3904 return ICE_DVM_NUM_ZERO_VLAN_FLTRS; 3905 else 3906 return ICE_SVM_NUM_ZERO_VLAN_FLTRS; 3907 } 3908 3909 /** 3910 * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs 3911 * @vsi: VSI used to determine if any non-zero VLANs have been added 3912 */ 3913 bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi) 3914 { 3915 return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi)); 3916 } 3917 3918 /** 3919 * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI 3920 * @vsi: VSI used to get the number of non-zero VLANs added 3921 */ 3922 u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi) 3923 { 3924 return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi)); 3925 } 3926 3927 /** 3928 * ice_is_feature_supported 3929 * @pf: pointer to the struct ice_pf instance 3930 * @f: feature enum to be checked 3931 * 3932 * returns true if feature is supported, false otherwise 3933 */ 3934 bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f) 3935 { 3936 if (f < 0 || f >= ICE_F_MAX) 3937 return false; 3938 3939 return test_bit(f, pf->features); 3940 } 3941 3942 /** 3943 * ice_set_feature_support 3944 * @pf: pointer to the struct ice_pf instance 3945 * @f: feature enum to set 3946 */ 3947 void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f) 3948 { 3949 if (f < 0 || f >= ICE_F_MAX) 3950 return; 3951 3952 set_bit(f, pf->features); 3953 } 3954 3955 /** 3956 * ice_clear_feature_support 3957 * @pf: pointer to the struct ice_pf instance 3958 * @f: feature enum to clear 3959 */ 3960 void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f) 3961 { 3962 if (f < 0 || f >= ICE_F_MAX) 3963 return; 3964 3965 clear_bit(f, pf->features); 3966 } 3967 3968 /** 3969 * ice_init_feature_support 3970 * @pf: pointer to the struct ice_pf instance 3971 * 3972 * called during init to setup supported feature 3973 */ 3974 void ice_init_feature_support(struct ice_pf *pf) 3975 { 3976 switch (pf->hw.device_id) { 3977 case ICE_DEV_ID_E810C_BACKPLANE: 3978 case ICE_DEV_ID_E810C_QSFP: 3979 case ICE_DEV_ID_E810C_SFP: 3980 case ICE_DEV_ID_E810_XXV_BACKPLANE: 3981 case ICE_DEV_ID_E810_XXV_QSFP: 3982 case ICE_DEV_ID_E810_XXV_SFP: 3983 ice_set_feature_support(pf, ICE_F_DSCP); 3984 if (ice_is_phy_rclk_in_netlist(&pf->hw)) 3985 ice_set_feature_support(pf, ICE_F_PHY_RCLK); 3986 /* If we don't own the timer - don't enable other caps */ 3987 if (!ice_pf_src_tmr_owned(pf)) 3988 break; 3989 if (ice_is_cgu_in_netlist(&pf->hw)) 3990 ice_set_feature_support(pf, ICE_F_CGU); 3991 if (ice_is_clock_mux_in_netlist(&pf->hw)) 3992 ice_set_feature_support(pf, ICE_F_SMA_CTRL); 3993 if (ice_gnss_is_gps_present(&pf->hw)) 3994 ice_set_feature_support(pf, ICE_F_GNSS); 3995 break; 3996 default: 3997 break; 3998 } 3999 } 4000 4001 /** 4002 * ice_vsi_update_security - update security block in VSI 4003 * @vsi: pointer to VSI structure 4004 * @fill: function pointer to fill ctx 4005 */ 4006 int 4007 ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *)) 4008 { 4009 struct ice_vsi_ctx ctx = { 0 }; 4010 4011 ctx.info = vsi->info; 4012 ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 4013 fill(&ctx); 4014 4015 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) 4016 return -ENODEV; 4017 4018 vsi->info = ctx.info; 4019 return 0; 4020 } 4021 4022 /** 4023 * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx 4024 * @ctx: pointer to VSI ctx structure 4025 */ 4026 void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx) 4027 { 4028 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF | 4029 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 4030 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 4031 } 4032 4033 /** 4034 * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx 4035 * @ctx: pointer to VSI ctx structure 4036 */ 4037 void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx) 4038 { 4039 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF & 4040 ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 4041 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 4042 } 4043 4044 /** 4045 * ice_vsi_ctx_set_allow_override - allow destination override on VSI 4046 * @ctx: pointer to VSI ctx structure 4047 */ 4048 void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx) 4049 { 4050 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; 4051 } 4052 4053 /** 4054 * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI 4055 * @ctx: pointer to VSI ctx structure 4056 */ 4057 void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx) 4058 { 4059 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; 4060 } 4061 4062 /** 4063 * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit 4064 * @vsi: pointer to VSI structure 4065 * @set: set or unset the bit 4066 */ 4067 int 4068 ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set) 4069 { 4070 struct ice_vsi_ctx ctx = { 4071 .info = vsi->info, 4072 }; 4073 4074 ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 4075 if (set) 4076 ctx.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_LOCAL_LB; 4077 else 4078 ctx.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_LOCAL_LB; 4079 4080 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) 4081 return -ENODEV; 4082 4083 vsi->info = ctx.info; 4084 return 0; 4085 } 4086