1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_lib.h" 6 #include "ice_dcb_lib.h" 7 8 /** 9 * ice_setup_rx_ctx - Configure a receive ring context 10 * @ring: The Rx ring to configure 11 * 12 * Configure the Rx descriptor ring in RLAN context. 13 */ 14 static int ice_setup_rx_ctx(struct ice_ring *ring) 15 { 16 struct ice_vsi *vsi = ring->vsi; 17 struct ice_hw *hw = &vsi->back->hw; 18 u32 rxdid = ICE_RXDID_FLEX_NIC; 19 struct ice_rlan_ctx rlan_ctx; 20 u32 regval; 21 u16 pf_q; 22 int err; 23 24 /* what is Rx queue number in global space of 2K Rx queues */ 25 pf_q = vsi->rxq_map[ring->q_index]; 26 27 /* clear the context structure first */ 28 memset(&rlan_ctx, 0, sizeof(rlan_ctx)); 29 30 rlan_ctx.base = ring->dma >> 7; 31 32 rlan_ctx.qlen = ring->count; 33 34 /* Receive Packet Data Buffer Size. 35 * The Packet Data Buffer Size is defined in 128 byte units. 36 */ 37 rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; 38 39 /* use 32 byte descriptors */ 40 rlan_ctx.dsize = 1; 41 42 /* Strip the Ethernet CRC bytes before the packet is posted to host 43 * memory. 44 */ 45 rlan_ctx.crcstrip = 1; 46 47 /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ 48 rlan_ctx.l2tsel = 1; 49 50 rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; 51 rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; 52 rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; 53 54 /* This controls whether VLAN is stripped from inner headers 55 * The VLAN in the inner L2 header is stripped to the receive 56 * descriptor if enabled by this flag. 57 */ 58 rlan_ctx.showiv = 0; 59 60 /* Max packet size for this queue - must not be set to a larger value 61 * than 5 x DBUF 62 */ 63 rlan_ctx.rxmax = min_t(u16, vsi->max_frame, 64 ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len); 65 66 /* Rx queue threshold in units of 64 */ 67 rlan_ctx.lrxqthresh = 1; 68 69 /* Enable Flexible Descriptors in the queue context which 70 * allows this driver to select a specific receive descriptor format 71 */ 72 if (vsi->type != ICE_VSI_VF) { 73 regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); 74 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & 75 QRXFLXP_CNTXT_RXDID_IDX_M; 76 77 /* increasing context priority to pick up profile ID; 78 * default is 0x01; setting to 0x03 to ensure profile 79 * is programming if prev context is of same priority 80 */ 81 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & 82 QRXFLXP_CNTXT_RXDID_PRIO_M; 83 84 wr32(hw, QRXFLXP_CNTXT(pf_q), regval); 85 } 86 87 /* Absolute queue number out of 2K needs to be passed */ 88 err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); 89 if (err) { 90 dev_err(&vsi->back->pdev->dev, 91 "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", 92 pf_q, err); 93 return -EIO; 94 } 95 96 if (vsi->type == ICE_VSI_VF) 97 return 0; 98 99 /* init queue specific tail register */ 100 ring->tail = hw->hw_addr + QRX_TAIL(pf_q); 101 writel(0, ring->tail); 102 ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); 103 104 return 0; 105 } 106 107 /** 108 * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance 109 * @ring: The Tx ring to configure 110 * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized 111 * @pf_q: queue index in the PF space 112 * 113 * Configure the Tx descriptor ring in TLAN context. 114 */ 115 static void 116 ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) 117 { 118 struct ice_vsi *vsi = ring->vsi; 119 struct ice_hw *hw = &vsi->back->hw; 120 121 tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; 122 123 tlan_ctx->port_num = vsi->port_info->lport; 124 125 /* Transmit Queue Length */ 126 tlan_ctx->qlen = ring->count; 127 128 ice_set_cgd_num(tlan_ctx, ring); 129 130 /* PF number */ 131 tlan_ctx->pf_num = hw->pf_id; 132 133 /* queue belongs to a specific VSI type 134 * VF / VM index should be programmed per vmvf_type setting: 135 * for vmvf_type = VF, it is VF number between 0-256 136 * for vmvf_type = VM, it is VM number between 0-767 137 * for PF or EMP this field should be set to zero 138 */ 139 switch (vsi->type) { 140 case ICE_VSI_PF: 141 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 142 break; 143 case ICE_VSI_VF: 144 /* Firmware expects vmvf_num to be absolute VF ID */ 145 tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; 146 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; 147 break; 148 default: 149 return; 150 } 151 152 /* make sure the context is associated with the right VSI */ 153 tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); 154 155 tlan_ctx->tso_ena = ICE_TX_LEGACY; 156 tlan_ctx->tso_qnum = pf_q; 157 158 /* Legacy or Advanced Host Interface: 159 * 0: Advanced Host Interface 160 * 1: Legacy Host Interface 161 */ 162 tlan_ctx->legacy_int = ICE_TX_LEGACY; 163 } 164 165 /** 166 * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled 167 * @pf: the PF being configured 168 * @pf_q: the PF queue 169 * @ena: enable or disable state of the queue 170 * 171 * This routine will wait for the given Rx queue of the PF to reach the 172 * enabled or disabled state. 173 * Returns -ETIMEDOUT in case of failing to reach the requested state after 174 * multiple retries; else will return 0 in case of success. 175 */ 176 static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) 177 { 178 int i; 179 180 for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) { 181 if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) & 182 QRX_CTRL_QENA_STAT_M)) 183 return 0; 184 185 usleep_range(20, 40); 186 } 187 188 return -ETIMEDOUT; 189 } 190 191 /** 192 * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings 193 * @vsi: the VSI being configured 194 * @ena: start or stop the Rx rings 195 */ 196 static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) 197 { 198 struct ice_pf *pf = vsi->back; 199 struct ice_hw *hw = &pf->hw; 200 int i, ret = 0; 201 202 for (i = 0; i < vsi->num_rxq; i++) { 203 int pf_q = vsi->rxq_map[i]; 204 u32 rx_reg; 205 206 rx_reg = rd32(hw, QRX_CTRL(pf_q)); 207 208 /* Skip if the queue is already in the requested state */ 209 if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) 210 continue; 211 212 /* turn on/off the queue */ 213 if (ena) 214 rx_reg |= QRX_CTRL_QENA_REQ_M; 215 else 216 rx_reg &= ~QRX_CTRL_QENA_REQ_M; 217 wr32(hw, QRX_CTRL(pf_q), rx_reg); 218 219 /* wait for the change to finish */ 220 ret = ice_pf_rxq_wait(pf, pf_q, ena); 221 if (ret) { 222 dev_err(&pf->pdev->dev, 223 "VSI idx %d Rx ring %d %sable timeout\n", 224 vsi->idx, pf_q, (ena ? "en" : "dis")); 225 break; 226 } 227 } 228 229 return ret; 230 } 231 232 /** 233 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI 234 * @vsi: VSI pointer 235 * 236 * On error: returns error code (negative) 237 * On success: returns 0 238 */ 239 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) 240 { 241 struct ice_pf *pf = vsi->back; 242 243 /* allocate memory for both Tx and Rx ring pointers */ 244 vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, 245 sizeof(*vsi->tx_rings), GFP_KERNEL); 246 if (!vsi->tx_rings) 247 goto err_txrings; 248 249 vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, 250 sizeof(*vsi->rx_rings), GFP_KERNEL); 251 if (!vsi->rx_rings) 252 goto err_rxrings; 253 254 /* allocate memory for q_vector pointers */ 255 vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->num_q_vectors, 256 sizeof(*vsi->q_vectors), GFP_KERNEL); 257 if (!vsi->q_vectors) 258 goto err_vectors; 259 260 return 0; 261 262 err_vectors: 263 devm_kfree(&pf->pdev->dev, vsi->rx_rings); 264 err_rxrings: 265 devm_kfree(&pf->pdev->dev, vsi->tx_rings); 266 err_txrings: 267 return -ENOMEM; 268 } 269 270 /** 271 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI 272 * @vsi: the VSI being configured 273 */ 274 static void ice_vsi_set_num_desc(struct ice_vsi *vsi) 275 { 276 switch (vsi->type) { 277 case ICE_VSI_PF: 278 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; 279 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; 280 break; 281 default: 282 dev_dbg(&vsi->back->pdev->dev, 283 "Not setting number of Tx/Rx descriptors for VSI type %d\n", 284 vsi->type); 285 break; 286 } 287 } 288 289 /** 290 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI 291 * @vsi: the VSI being configured 292 * @vf_id: ID of the VF being configured 293 * 294 * Return 0 on success and a negative value on error 295 */ 296 static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) 297 { 298 struct ice_pf *pf = vsi->back; 299 struct ice_vf *vf = NULL; 300 301 if (vsi->type == ICE_VSI_VF) 302 vsi->vf_id = vf_id; 303 304 switch (vsi->type) { 305 case ICE_VSI_PF: 306 vsi->alloc_txq = pf->num_lan_tx; 307 vsi->alloc_rxq = pf->num_lan_rx; 308 vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); 309 break; 310 case ICE_VSI_VF: 311 vf = &pf->vf[vsi->vf_id]; 312 vsi->alloc_txq = vf->num_vf_qs; 313 vsi->alloc_rxq = vf->num_vf_qs; 314 /* pf->num_vf_msix includes (VF miscellaneous vector + 315 * data queue interrupts). Since vsi->num_q_vectors is number 316 * of queues vectors, subtract 1 from the original vector 317 * count 318 */ 319 vsi->num_q_vectors = pf->num_vf_msix - 1; 320 break; 321 default: 322 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); 323 break; 324 } 325 326 ice_vsi_set_num_desc(vsi); 327 } 328 329 /** 330 * ice_get_free_slot - get the next non-NULL location index in array 331 * @array: array to search 332 * @size: size of the array 333 * @curr: last known occupied index to be used as a search hint 334 * 335 * void * is being used to keep the functionality generic. This lets us use this 336 * function on any array of pointers. 337 */ 338 static int ice_get_free_slot(void *array, int size, int curr) 339 { 340 int **tmp_array = (int **)array; 341 int next; 342 343 if (curr < (size - 1) && !tmp_array[curr + 1]) { 344 next = curr + 1; 345 } else { 346 int i = 0; 347 348 while ((i < size) && (tmp_array[i])) 349 i++; 350 if (i == size) 351 next = ICE_NO_VSI; 352 else 353 next = i; 354 } 355 return next; 356 } 357 358 /** 359 * ice_vsi_delete - delete a VSI from the switch 360 * @vsi: pointer to VSI being removed 361 */ 362 void ice_vsi_delete(struct ice_vsi *vsi) 363 { 364 struct ice_pf *pf = vsi->back; 365 struct ice_vsi_ctx *ctxt; 366 enum ice_status status; 367 368 ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL); 369 if (!ctxt) 370 return; 371 372 if (vsi->type == ICE_VSI_VF) 373 ctxt->vf_num = vsi->vf_id; 374 ctxt->vsi_num = vsi->vsi_num; 375 376 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); 377 378 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); 379 if (status) 380 dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", 381 vsi->vsi_num); 382 383 devm_kfree(&pf->pdev->dev, ctxt); 384 } 385 386 /** 387 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI 388 * @vsi: pointer to VSI being cleared 389 */ 390 static void ice_vsi_free_arrays(struct ice_vsi *vsi) 391 { 392 struct ice_pf *pf = vsi->back; 393 394 /* free the ring and vector containers */ 395 if (vsi->q_vectors) { 396 devm_kfree(&pf->pdev->dev, vsi->q_vectors); 397 vsi->q_vectors = NULL; 398 } 399 if (vsi->tx_rings) { 400 devm_kfree(&pf->pdev->dev, vsi->tx_rings); 401 vsi->tx_rings = NULL; 402 } 403 if (vsi->rx_rings) { 404 devm_kfree(&pf->pdev->dev, vsi->rx_rings); 405 vsi->rx_rings = NULL; 406 } 407 } 408 409 /** 410 * ice_vsi_clear - clean up and deallocate the provided VSI 411 * @vsi: pointer to VSI being cleared 412 * 413 * This deallocates the VSI's queue resources, removes it from the PF's 414 * VSI array if necessary, and deallocates the VSI 415 * 416 * Returns 0 on success, negative on failure 417 */ 418 int ice_vsi_clear(struct ice_vsi *vsi) 419 { 420 struct ice_pf *pf = NULL; 421 422 if (!vsi) 423 return 0; 424 425 if (!vsi->back) 426 return -EINVAL; 427 428 pf = vsi->back; 429 430 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { 431 dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n", 432 vsi->idx); 433 return -EINVAL; 434 } 435 436 mutex_lock(&pf->sw_mutex); 437 /* updates the PF for this cleared VSI */ 438 439 pf->vsi[vsi->idx] = NULL; 440 if (vsi->idx < pf->next_vsi) 441 pf->next_vsi = vsi->idx; 442 443 ice_vsi_free_arrays(vsi); 444 mutex_unlock(&pf->sw_mutex); 445 devm_kfree(&pf->pdev->dev, vsi); 446 447 return 0; 448 } 449 450 /** 451 * ice_msix_clean_rings - MSIX mode Interrupt Handler 452 * @irq: interrupt number 453 * @data: pointer to a q_vector 454 */ 455 static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) 456 { 457 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; 458 459 if (!q_vector->tx.ring && !q_vector->rx.ring) 460 return IRQ_HANDLED; 461 462 napi_schedule(&q_vector->napi); 463 464 return IRQ_HANDLED; 465 } 466 467 /** 468 * ice_vsi_alloc - Allocates the next available struct VSI in the PF 469 * @pf: board private structure 470 * @type: type of VSI 471 * @vf_id: ID of the VF being configured 472 * 473 * returns a pointer to a VSI on success, NULL on failure. 474 */ 475 static struct ice_vsi * 476 ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id) 477 { 478 struct ice_vsi *vsi = NULL; 479 480 /* Need to protect the allocation of the VSIs at the PF level */ 481 mutex_lock(&pf->sw_mutex); 482 483 /* If we have already allocated our maximum number of VSIs, 484 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index 485 * is available to be populated 486 */ 487 if (pf->next_vsi == ICE_NO_VSI) { 488 dev_dbg(&pf->pdev->dev, "out of VSI slots!\n"); 489 goto unlock_pf; 490 } 491 492 vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL); 493 if (!vsi) 494 goto unlock_pf; 495 496 vsi->type = type; 497 vsi->back = pf; 498 set_bit(__ICE_DOWN, vsi->state); 499 vsi->idx = pf->next_vsi; 500 vsi->work_lmt = ICE_DFLT_IRQ_WORK; 501 502 if (type == ICE_VSI_VF) 503 ice_vsi_set_num_qs(vsi, vf_id); 504 else 505 ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); 506 507 switch (vsi->type) { 508 case ICE_VSI_PF: 509 if (ice_vsi_alloc_arrays(vsi)) 510 goto err_rings; 511 512 /* Setup default MSIX irq handler for VSI */ 513 vsi->irq_handler = ice_msix_clean_rings; 514 break; 515 case ICE_VSI_VF: 516 if (ice_vsi_alloc_arrays(vsi)) 517 goto err_rings; 518 break; 519 default: 520 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); 521 goto unlock_pf; 522 } 523 524 /* fill VSI slot in the PF struct */ 525 pf->vsi[pf->next_vsi] = vsi; 526 527 /* prepare pf->next_vsi for next use */ 528 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, 529 pf->next_vsi); 530 goto unlock_pf; 531 532 err_rings: 533 devm_kfree(&pf->pdev->dev, vsi); 534 vsi = NULL; 535 unlock_pf: 536 mutex_unlock(&pf->sw_mutex); 537 return vsi; 538 } 539 540 /** 541 * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI 542 * @qs_cfg: gathered variables needed for PF->VSI queues assignment 543 * 544 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 545 */ 546 static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) 547 { 548 int offset, i; 549 550 mutex_lock(qs_cfg->qs_mutex); 551 offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size, 552 0, qs_cfg->q_count, 0); 553 if (offset >= qs_cfg->pf_map_size) { 554 mutex_unlock(qs_cfg->qs_mutex); 555 return -ENOMEM; 556 } 557 558 bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count); 559 for (i = 0; i < qs_cfg->q_count; i++) 560 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset; 561 mutex_unlock(qs_cfg->qs_mutex); 562 563 return 0; 564 } 565 566 /** 567 * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI 568 * @qs_cfg: gathered variables needed for pf->vsi queues assignment 569 * 570 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 571 */ 572 static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg) 573 { 574 int i, index = 0; 575 576 mutex_lock(qs_cfg->qs_mutex); 577 for (i = 0; i < qs_cfg->q_count; i++) { 578 index = find_next_zero_bit(qs_cfg->pf_map, 579 qs_cfg->pf_map_size, index); 580 if (index >= qs_cfg->pf_map_size) 581 goto err_scatter; 582 set_bit(index, qs_cfg->pf_map); 583 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index; 584 } 585 mutex_unlock(qs_cfg->qs_mutex); 586 587 return 0; 588 err_scatter: 589 for (index = 0; index < i; index++) { 590 clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map); 591 qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0; 592 } 593 mutex_unlock(qs_cfg->qs_mutex); 594 595 return -ENOMEM; 596 } 597 598 /** 599 * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI 600 * @qs_cfg: gathered variables needed for pf->vsi queues assignment 601 * 602 * This function first tries to find contiguous space. If it is not successful, 603 * it tries with the scatter approach. 604 * 605 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 606 */ 607 static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg) 608 { 609 int ret = 0; 610 611 ret = __ice_vsi_get_qs_contig(qs_cfg); 612 if (ret) { 613 /* contig failed, so try with scatter approach */ 614 qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER; 615 qs_cfg->q_count = min_t(u16, qs_cfg->q_count, 616 qs_cfg->scatter_count); 617 ret = __ice_vsi_get_qs_sc(qs_cfg); 618 } 619 return ret; 620 } 621 622 /** 623 * ice_vsi_get_qs - Assign queues from PF to VSI 624 * @vsi: the VSI to assign queues to 625 * 626 * Returns 0 on success and a negative value on error 627 */ 628 static int ice_vsi_get_qs(struct ice_vsi *vsi) 629 { 630 struct ice_pf *pf = vsi->back; 631 struct ice_qs_cfg tx_qs_cfg = { 632 .qs_mutex = &pf->avail_q_mutex, 633 .pf_map = pf->avail_txqs, 634 .pf_map_size = ICE_MAX_TXQS, 635 .q_count = vsi->alloc_txq, 636 .scatter_count = ICE_MAX_SCATTER_TXQS, 637 .vsi_map = vsi->txq_map, 638 .vsi_map_offset = 0, 639 .mapping_mode = vsi->tx_mapping_mode 640 }; 641 struct ice_qs_cfg rx_qs_cfg = { 642 .qs_mutex = &pf->avail_q_mutex, 643 .pf_map = pf->avail_rxqs, 644 .pf_map_size = ICE_MAX_RXQS, 645 .q_count = vsi->alloc_rxq, 646 .scatter_count = ICE_MAX_SCATTER_RXQS, 647 .vsi_map = vsi->rxq_map, 648 .vsi_map_offset = 0, 649 .mapping_mode = vsi->rx_mapping_mode 650 }; 651 int ret = 0; 652 653 vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG; 654 vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG; 655 656 ret = __ice_vsi_get_qs(&tx_qs_cfg); 657 if (!ret) 658 ret = __ice_vsi_get_qs(&rx_qs_cfg); 659 660 return ret; 661 } 662 663 /** 664 * ice_vsi_put_qs - Release queues from VSI to PF 665 * @vsi: the VSI that is going to release queues 666 */ 667 void ice_vsi_put_qs(struct ice_vsi *vsi) 668 { 669 struct ice_pf *pf = vsi->back; 670 int i; 671 672 mutex_lock(&pf->avail_q_mutex); 673 674 for (i = 0; i < vsi->alloc_txq; i++) { 675 clear_bit(vsi->txq_map[i], pf->avail_txqs); 676 vsi->txq_map[i] = ICE_INVAL_Q_INDEX; 677 } 678 679 for (i = 0; i < vsi->alloc_rxq; i++) { 680 clear_bit(vsi->rxq_map[i], pf->avail_rxqs); 681 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; 682 } 683 684 mutex_unlock(&pf->avail_q_mutex); 685 } 686 687 /** 688 * ice_rss_clean - Delete RSS related VSI structures that hold user inputs 689 * @vsi: the VSI being removed 690 */ 691 static void ice_rss_clean(struct ice_vsi *vsi) 692 { 693 struct ice_pf *pf; 694 695 pf = vsi->back; 696 697 if (vsi->rss_hkey_user) 698 devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user); 699 if (vsi->rss_lut_user) 700 devm_kfree(&pf->pdev->dev, vsi->rss_lut_user); 701 } 702 703 /** 704 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type 705 * @vsi: the VSI being configured 706 */ 707 static void ice_vsi_set_rss_params(struct ice_vsi *vsi) 708 { 709 struct ice_hw_common_caps *cap; 710 struct ice_pf *pf = vsi->back; 711 712 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 713 vsi->rss_size = 1; 714 return; 715 } 716 717 cap = &pf->hw.func_caps.common_cap; 718 switch (vsi->type) { 719 case ICE_VSI_PF: 720 /* PF VSI will inherit RSS instance of PF */ 721 vsi->rss_table_size = cap->rss_table_size; 722 vsi->rss_size = min_t(int, num_online_cpus(), 723 BIT(cap->rss_table_entry_width)); 724 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 725 break; 726 case ICE_VSI_VF: 727 /* VF VSI will gets a small RSS table 728 * For VSI_LUT, LUT size should be set to 64 bytes 729 */ 730 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; 731 vsi->rss_size = min_t(int, num_online_cpus(), 732 BIT(cap->rss_table_entry_width)); 733 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; 734 break; 735 default: 736 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", 737 vsi->type); 738 break; 739 } 740 } 741 742 /** 743 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI 744 * @ctxt: the VSI context being set 745 * 746 * This initializes a default VSI context for all sections except the Queues. 747 */ 748 static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) 749 { 750 u32 table = 0; 751 752 memset(&ctxt->info, 0, sizeof(ctxt->info)); 753 /* VSI's should be allocated from shared pool */ 754 ctxt->alloc_from_pool = true; 755 /* Src pruning enabled by default */ 756 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; 757 /* Traffic from VSI can be sent to LAN */ 758 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 759 /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy 760 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all 761 * packets untagged/tagged. 762 */ 763 ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & 764 ICE_AQ_VSI_VLAN_MODE_M) >> 765 ICE_AQ_VSI_VLAN_MODE_S); 766 /* Have 1:1 UP mapping for both ingress/egress tables */ 767 table |= ICE_UP_TABLE_TRANSLATE(0, 0); 768 table |= ICE_UP_TABLE_TRANSLATE(1, 1); 769 table |= ICE_UP_TABLE_TRANSLATE(2, 2); 770 table |= ICE_UP_TABLE_TRANSLATE(3, 3); 771 table |= ICE_UP_TABLE_TRANSLATE(4, 4); 772 table |= ICE_UP_TABLE_TRANSLATE(5, 5); 773 table |= ICE_UP_TABLE_TRANSLATE(6, 6); 774 table |= ICE_UP_TABLE_TRANSLATE(7, 7); 775 ctxt->info.ingress_table = cpu_to_le32(table); 776 ctxt->info.egress_table = cpu_to_le32(table); 777 /* Have 1:1 UP mapping for outer to inner UP table */ 778 ctxt->info.outer_up_table = cpu_to_le32(table); 779 /* No Outer tag support outer_tag_flags remains to zero */ 780 } 781 782 /** 783 * ice_vsi_setup_q_map - Setup a VSI queue map 784 * @vsi: the VSI being configured 785 * @ctxt: VSI context structure 786 */ 787 static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) 788 { 789 u16 offset = 0, qmap = 0, tx_count = 0; 790 u16 qcount_tx = vsi->alloc_txq; 791 u16 qcount_rx = vsi->alloc_rxq; 792 u16 tx_numq_tc, rx_numq_tc; 793 u16 pow = 0, max_rss = 0; 794 bool ena_tc0 = false; 795 u8 netdev_tc = 0; 796 int i; 797 798 /* at least TC0 should be enabled by default */ 799 if (vsi->tc_cfg.numtc) { 800 if (!(vsi->tc_cfg.ena_tc & BIT(0))) 801 ena_tc0 = true; 802 } else { 803 ena_tc0 = true; 804 } 805 806 if (ena_tc0) { 807 vsi->tc_cfg.numtc++; 808 vsi->tc_cfg.ena_tc |= 1; 809 } 810 811 rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc; 812 if (!rx_numq_tc) 813 rx_numq_tc = 1; 814 tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc; 815 if (!tx_numq_tc) 816 tx_numq_tc = 1; 817 818 /* TC mapping is a function of the number of Rx queues assigned to the 819 * VSI for each traffic class and the offset of these queues. 820 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of 821 * queues allocated to TC0. No:of queues is a power-of-2. 822 * 823 * If TC is not enabled, the queue offset is set to 0, and allocate one 824 * queue, this way, traffic for the given TC will be sent to the default 825 * queue. 826 * 827 * Setup number and offset of Rx queues for all TCs for the VSI 828 */ 829 830 qcount_rx = rx_numq_tc; 831 832 /* qcount will change if RSS is enabled */ 833 if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) { 834 if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) { 835 if (vsi->type == ICE_VSI_PF) 836 max_rss = ICE_MAX_LG_RSS_QS; 837 else 838 max_rss = ICE_MAX_SMALL_RSS_QS; 839 qcount_rx = min_t(int, rx_numq_tc, max_rss); 840 qcount_rx = min_t(int, qcount_rx, vsi->rss_size); 841 } 842 } 843 844 /* find the (rounded up) power-of-2 of qcount */ 845 pow = order_base_2(qcount_rx); 846 847 ice_for_each_traffic_class(i) { 848 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { 849 /* TC is not enabled */ 850 vsi->tc_cfg.tc_info[i].qoffset = 0; 851 vsi->tc_cfg.tc_info[i].qcount_rx = 1; 852 vsi->tc_cfg.tc_info[i].qcount_tx = 1; 853 vsi->tc_cfg.tc_info[i].netdev_tc = 0; 854 ctxt->info.tc_mapping[i] = 0; 855 continue; 856 } 857 858 /* TC is enabled */ 859 vsi->tc_cfg.tc_info[i].qoffset = offset; 860 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx; 861 vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc; 862 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; 863 864 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & 865 ICE_AQ_VSI_TC_Q_OFFSET_M) | 866 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & 867 ICE_AQ_VSI_TC_Q_NUM_M); 868 offset += qcount_rx; 869 tx_count += tx_numq_tc; 870 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 871 } 872 873 /* if offset is non-zero, means it is calculated correctly based on 874 * enabled TCs for a given VSI otherwise qcount_rx will always 875 * be correct and non-zero because it is based off - VSI's 876 * allocated Rx queues which is at least 1 (hence qcount_tx will be 877 * at least 1) 878 */ 879 if (offset) 880 vsi->num_rxq = offset; 881 else 882 vsi->num_rxq = qcount_rx; 883 884 vsi->num_txq = tx_count; 885 886 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { 887 dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); 888 /* since there is a chance that num_rxq could have been changed 889 * in the above for loop, make num_txq equal to num_rxq. 890 */ 891 vsi->num_txq = vsi->num_rxq; 892 } 893 894 /* Rx queue mapping */ 895 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); 896 /* q_mapping buffer holds the info for the first queue allocated for 897 * this VSI in the PF space and also the number of queues associated 898 * with this VSI. 899 */ 900 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); 901 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); 902 } 903 904 /** 905 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI 906 * @ctxt: the VSI context being set 907 * @vsi: the VSI being configured 908 */ 909 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) 910 { 911 u8 lut_type, hash_type; 912 struct ice_pf *pf; 913 914 pf = vsi->back; 915 916 switch (vsi->type) { 917 case ICE_VSI_PF: 918 /* PF VSI will inherit RSS instance of PF */ 919 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; 920 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 921 break; 922 case ICE_VSI_VF: 923 /* VF VSI will gets a small RSS table which is a VSI LUT type */ 924 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; 925 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 926 break; 927 default: 928 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); 929 return; 930 } 931 932 ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & 933 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | 934 ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & 935 ICE_AQ_VSI_Q_OPT_RSS_HASH_M); 936 } 937 938 /** 939 * ice_vsi_init - Create and initialize a VSI 940 * @vsi: the VSI being configured 941 * 942 * This initializes a VSI context depending on the VSI type to be added and 943 * passes it down to the add_vsi aq command to create a new VSI. 944 */ 945 static int ice_vsi_init(struct ice_vsi *vsi) 946 { 947 struct ice_pf *pf = vsi->back; 948 struct ice_hw *hw = &pf->hw; 949 struct ice_vsi_ctx *ctxt; 950 int ret = 0; 951 952 ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL); 953 if (!ctxt) 954 return -ENOMEM; 955 956 ctxt->info = vsi->info; 957 switch (vsi->type) { 958 case ICE_VSI_PF: 959 ctxt->flags = ICE_AQ_VSI_TYPE_PF; 960 break; 961 case ICE_VSI_VF: 962 ctxt->flags = ICE_AQ_VSI_TYPE_VF; 963 /* VF number here is the absolute VF number (0-255) */ 964 ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 965 break; 966 default: 967 return -ENODEV; 968 } 969 970 ice_set_dflt_vsi_ctx(ctxt); 971 /* if the switch is in VEB mode, allow VSI loopback */ 972 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) 973 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 974 975 /* Set LUT type and HASH type if RSS is enabled */ 976 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 977 ice_set_rss_vsi_ctx(ctxt, vsi); 978 979 ctxt->info.sw_id = vsi->port_info->sw_id; 980 ice_vsi_setup_q_map(vsi, ctxt); 981 982 /* Enable MAC Antispoof with new VSI being initialized or updated */ 983 if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) { 984 ctxt->info.valid_sections |= 985 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 986 ctxt->info.sec_flags |= 987 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; 988 } 989 990 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); 991 if (ret) { 992 dev_err(&pf->pdev->dev, 993 "Add VSI failed, err %d\n", ret); 994 return -EIO; 995 } 996 997 /* keep context for update VSI operations */ 998 vsi->info = ctxt->info; 999 1000 /* record VSI number returned */ 1001 vsi->vsi_num = ctxt->vsi_num; 1002 1003 devm_kfree(&pf->pdev->dev, ctxt); 1004 return ret; 1005 } 1006 1007 /** 1008 * ice_free_q_vector - Free memory allocated for a specific interrupt vector 1009 * @vsi: VSI having the memory freed 1010 * @v_idx: index of the vector to be freed 1011 */ 1012 static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) 1013 { 1014 struct ice_q_vector *q_vector; 1015 struct ice_pf *pf = vsi->back; 1016 struct ice_ring *ring; 1017 1018 if (!vsi->q_vectors[v_idx]) { 1019 dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n", 1020 v_idx); 1021 return; 1022 } 1023 q_vector = vsi->q_vectors[v_idx]; 1024 1025 ice_for_each_ring(ring, q_vector->tx) 1026 ring->q_vector = NULL; 1027 ice_for_each_ring(ring, q_vector->rx) 1028 ring->q_vector = NULL; 1029 1030 /* only VSI with an associated netdev is set up with NAPI */ 1031 if (vsi->netdev) 1032 netif_napi_del(&q_vector->napi); 1033 1034 devm_kfree(&pf->pdev->dev, q_vector); 1035 vsi->q_vectors[v_idx] = NULL; 1036 } 1037 1038 /** 1039 * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors 1040 * @vsi: the VSI having memory freed 1041 */ 1042 void ice_vsi_free_q_vectors(struct ice_vsi *vsi) 1043 { 1044 int v_idx; 1045 1046 ice_for_each_q_vector(vsi, v_idx) 1047 ice_free_q_vector(vsi, v_idx); 1048 } 1049 1050 /** 1051 * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector 1052 * @vsi: the VSI being configured 1053 * @v_idx: index of the vector in the VSI struct 1054 * 1055 * We allocate one q_vector. If allocation fails we return -ENOMEM. 1056 */ 1057 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) 1058 { 1059 struct ice_pf *pf = vsi->back; 1060 struct ice_q_vector *q_vector; 1061 1062 /* allocate q_vector */ 1063 q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL); 1064 if (!q_vector) 1065 return -ENOMEM; 1066 1067 q_vector->vsi = vsi; 1068 q_vector->v_idx = v_idx; 1069 if (vsi->type == ICE_VSI_VF) 1070 goto out; 1071 /* only set affinity_mask if the CPU is online */ 1072 if (cpu_online(v_idx)) 1073 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 1074 1075 /* This will not be called in the driver load path because the netdev 1076 * will not be created yet. All other cases with register the NAPI 1077 * handler here (i.e. resume, reset/rebuild, etc.) 1078 */ 1079 if (vsi->netdev) 1080 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, 1081 NAPI_POLL_WEIGHT); 1082 1083 out: 1084 /* tie q_vector and VSI together */ 1085 vsi->q_vectors[v_idx] = q_vector; 1086 1087 return 0; 1088 } 1089 1090 /** 1091 * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors 1092 * @vsi: the VSI being configured 1093 * 1094 * We allocate one q_vector per queue interrupt. If allocation fails we 1095 * return -ENOMEM. 1096 */ 1097 static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) 1098 { 1099 struct ice_pf *pf = vsi->back; 1100 int v_idx = 0, num_q_vectors; 1101 int err; 1102 1103 if (vsi->q_vectors[0]) { 1104 dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n", 1105 vsi->vsi_num); 1106 return -EEXIST; 1107 } 1108 1109 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { 1110 num_q_vectors = vsi->num_q_vectors; 1111 } else { 1112 err = -EINVAL; 1113 goto err_out; 1114 } 1115 1116 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 1117 err = ice_vsi_alloc_q_vector(vsi, v_idx); 1118 if (err) 1119 goto err_out; 1120 } 1121 1122 return 0; 1123 1124 err_out: 1125 while (v_idx--) 1126 ice_free_q_vector(vsi, v_idx); 1127 1128 dev_err(&pf->pdev->dev, 1129 "Failed to allocate %d q_vector for VSI %d, ret=%d\n", 1130 vsi->num_q_vectors, vsi->vsi_num, err); 1131 vsi->num_q_vectors = 0; 1132 return err; 1133 } 1134 1135 /** 1136 * ice_vsi_setup_vector_base - Set up the base vector for the given VSI 1137 * @vsi: ptr to the VSI 1138 * 1139 * This should only be called after ice_vsi_alloc() which allocates the 1140 * corresponding SW VSI structure and initializes num_queue_pairs for the 1141 * newly allocated VSI. 1142 * 1143 * Returns 0 on success or negative on failure 1144 */ 1145 static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) 1146 { 1147 struct ice_pf *pf = vsi->back; 1148 int num_q_vectors = 0; 1149 1150 if (vsi->sw_base_vector || vsi->hw_base_vector) { 1151 dev_dbg(&pf->pdev->dev, "VSI %d has non-zero HW base vector %d or SW base vector %d\n", 1152 vsi->vsi_num, vsi->hw_base_vector, vsi->sw_base_vector); 1153 return -EEXIST; 1154 } 1155 1156 if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 1157 return -ENOENT; 1158 1159 switch (vsi->type) { 1160 case ICE_VSI_PF: 1161 num_q_vectors = vsi->num_q_vectors; 1162 /* reserve slots from OS requested IRQs */ 1163 vsi->sw_base_vector = ice_get_res(pf, pf->sw_irq_tracker, 1164 num_q_vectors, vsi->idx); 1165 if (vsi->sw_base_vector < 0) { 1166 dev_err(&pf->pdev->dev, 1167 "Failed to get tracking for %d SW vectors for VSI %d, err=%d\n", 1168 num_q_vectors, vsi->vsi_num, 1169 vsi->sw_base_vector); 1170 return -ENOENT; 1171 } 1172 pf->num_avail_sw_msix -= num_q_vectors; 1173 1174 /* reserve slots from HW interrupts */ 1175 vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker, 1176 num_q_vectors, vsi->idx); 1177 break; 1178 case ICE_VSI_VF: 1179 /* take VF misc vector and data vectors into account */ 1180 num_q_vectors = pf->num_vf_msix; 1181 /* For VF VSI, reserve slots only from HW interrupts */ 1182 vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker, 1183 num_q_vectors, vsi->idx); 1184 break; 1185 default: 1186 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); 1187 break; 1188 } 1189 1190 if (vsi->hw_base_vector < 0) { 1191 dev_err(&pf->pdev->dev, 1192 "Failed to get tracking for %d HW vectors for VSI %d, err=%d\n", 1193 num_q_vectors, vsi->vsi_num, vsi->hw_base_vector); 1194 if (vsi->type != ICE_VSI_VF) { 1195 ice_free_res(pf->sw_irq_tracker, 1196 vsi->sw_base_vector, vsi->idx); 1197 pf->num_avail_sw_msix += num_q_vectors; 1198 } 1199 return -ENOENT; 1200 } 1201 1202 pf->num_avail_hw_msix -= num_q_vectors; 1203 1204 return 0; 1205 } 1206 1207 /** 1208 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI 1209 * @vsi: the VSI having rings deallocated 1210 */ 1211 static void ice_vsi_clear_rings(struct ice_vsi *vsi) 1212 { 1213 int i; 1214 1215 if (vsi->tx_rings) { 1216 for (i = 0; i < vsi->alloc_txq; i++) { 1217 if (vsi->tx_rings[i]) { 1218 kfree_rcu(vsi->tx_rings[i], rcu); 1219 vsi->tx_rings[i] = NULL; 1220 } 1221 } 1222 } 1223 if (vsi->rx_rings) { 1224 for (i = 0; i < vsi->alloc_rxq; i++) { 1225 if (vsi->rx_rings[i]) { 1226 kfree_rcu(vsi->rx_rings[i], rcu); 1227 vsi->rx_rings[i] = NULL; 1228 } 1229 } 1230 } 1231 } 1232 1233 /** 1234 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI 1235 * @vsi: VSI which is having rings allocated 1236 */ 1237 static int ice_vsi_alloc_rings(struct ice_vsi *vsi) 1238 { 1239 struct ice_pf *pf = vsi->back; 1240 int i; 1241 1242 /* Allocate Tx rings */ 1243 for (i = 0; i < vsi->alloc_txq; i++) { 1244 struct ice_ring *ring; 1245 1246 /* allocate with kzalloc(), free with kfree_rcu() */ 1247 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1248 1249 if (!ring) 1250 goto err_out; 1251 1252 ring->q_index = i; 1253 ring->reg_idx = vsi->txq_map[i]; 1254 ring->ring_active = false; 1255 ring->vsi = vsi; 1256 ring->dev = &pf->pdev->dev; 1257 ring->count = vsi->num_tx_desc; 1258 vsi->tx_rings[i] = ring; 1259 } 1260 1261 /* Allocate Rx rings */ 1262 for (i = 0; i < vsi->alloc_rxq; i++) { 1263 struct ice_ring *ring; 1264 1265 /* allocate with kzalloc(), free with kfree_rcu() */ 1266 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1267 if (!ring) 1268 goto err_out; 1269 1270 ring->q_index = i; 1271 ring->reg_idx = vsi->rxq_map[i]; 1272 ring->ring_active = false; 1273 ring->vsi = vsi; 1274 ring->netdev = vsi->netdev; 1275 ring->dev = &pf->pdev->dev; 1276 ring->count = vsi->num_rx_desc; 1277 vsi->rx_rings[i] = ring; 1278 } 1279 1280 return 0; 1281 1282 err_out: 1283 ice_vsi_clear_rings(vsi); 1284 return -ENOMEM; 1285 } 1286 1287 /** 1288 * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors 1289 * @vsi: the VSI being configured 1290 * 1291 * This function maps descriptor rings to the queue-specific vectors allotted 1292 * through the MSI-X enabling code. On a constrained vector budget, we map Tx 1293 * and Rx rings to the vector as "efficiently" as possible. 1294 */ 1295 #ifdef CONFIG_DCB 1296 void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) 1297 #else 1298 static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) 1299 #endif /* CONFIG_DCB */ 1300 { 1301 int q_vectors = vsi->num_q_vectors; 1302 int tx_rings_rem, rx_rings_rem; 1303 int v_id; 1304 1305 /* initially assigning remaining rings count to VSIs num queue value */ 1306 tx_rings_rem = vsi->num_txq; 1307 rx_rings_rem = vsi->num_rxq; 1308 1309 for (v_id = 0; v_id < q_vectors; v_id++) { 1310 struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; 1311 int tx_rings_per_v, rx_rings_per_v, q_id, q_base; 1312 1313 /* Tx rings mapping to vector */ 1314 tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); 1315 q_vector->num_ring_tx = tx_rings_per_v; 1316 q_vector->tx.ring = NULL; 1317 q_vector->tx.itr_idx = ICE_TX_ITR; 1318 q_base = vsi->num_txq - tx_rings_rem; 1319 1320 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { 1321 struct ice_ring *tx_ring = vsi->tx_rings[q_id]; 1322 1323 tx_ring->q_vector = q_vector; 1324 tx_ring->next = q_vector->tx.ring; 1325 q_vector->tx.ring = tx_ring; 1326 } 1327 tx_rings_rem -= tx_rings_per_v; 1328 1329 /* Rx rings mapping to vector */ 1330 rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); 1331 q_vector->num_ring_rx = rx_rings_per_v; 1332 q_vector->rx.ring = NULL; 1333 q_vector->rx.itr_idx = ICE_RX_ITR; 1334 q_base = vsi->num_rxq - rx_rings_rem; 1335 1336 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { 1337 struct ice_ring *rx_ring = vsi->rx_rings[q_id]; 1338 1339 rx_ring->q_vector = q_vector; 1340 rx_ring->next = q_vector->rx.ring; 1341 q_vector->rx.ring = rx_ring; 1342 } 1343 rx_rings_rem -= rx_rings_per_v; 1344 } 1345 } 1346 1347 /** 1348 * ice_vsi_manage_rss_lut - disable/enable RSS 1349 * @vsi: the VSI being changed 1350 * @ena: boolean value indicating if this is an enable or disable request 1351 * 1352 * In the event of disable request for RSS, this function will zero out RSS 1353 * LUT, while in the event of enable request for RSS, it will reconfigure RSS 1354 * LUT. 1355 */ 1356 int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) 1357 { 1358 int err = 0; 1359 u8 *lut; 1360 1361 lut = devm_kzalloc(&vsi->back->pdev->dev, vsi->rss_table_size, 1362 GFP_KERNEL); 1363 if (!lut) 1364 return -ENOMEM; 1365 1366 if (ena) { 1367 if (vsi->rss_lut_user) 1368 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 1369 else 1370 ice_fill_rss_lut(lut, vsi->rss_table_size, 1371 vsi->rss_size); 1372 } 1373 1374 err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size); 1375 devm_kfree(&vsi->back->pdev->dev, lut); 1376 return err; 1377 } 1378 1379 /** 1380 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI 1381 * @vsi: VSI to be configured 1382 */ 1383 static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) 1384 { 1385 struct ice_aqc_get_set_rss_keys *key; 1386 struct ice_pf *pf = vsi->back; 1387 enum ice_status status; 1388 int err = 0; 1389 u8 *lut; 1390 1391 vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq); 1392 1393 lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL); 1394 if (!lut) 1395 return -ENOMEM; 1396 1397 if (vsi->rss_lut_user) 1398 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 1399 else 1400 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); 1401 1402 status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut, 1403 vsi->rss_table_size); 1404 1405 if (status) { 1406 dev_err(&pf->pdev->dev, 1407 "set_rss_lut failed, error %d\n", status); 1408 err = -EIO; 1409 goto ice_vsi_cfg_rss_exit; 1410 } 1411 1412 key = devm_kzalloc(&pf->pdev->dev, sizeof(*key), GFP_KERNEL); 1413 if (!key) { 1414 err = -ENOMEM; 1415 goto ice_vsi_cfg_rss_exit; 1416 } 1417 1418 if (vsi->rss_hkey_user) 1419 memcpy(key, 1420 (struct ice_aqc_get_set_rss_keys *)vsi->rss_hkey_user, 1421 ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); 1422 else 1423 netdev_rss_key_fill((void *)key, 1424 ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); 1425 1426 status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key); 1427 1428 if (status) { 1429 dev_err(&pf->pdev->dev, "set_rss_key failed, error %d\n", 1430 status); 1431 err = -EIO; 1432 } 1433 1434 devm_kfree(&pf->pdev->dev, key); 1435 ice_vsi_cfg_rss_exit: 1436 devm_kfree(&pf->pdev->dev, lut); 1437 return err; 1438 } 1439 1440 /** 1441 * ice_add_mac_to_list - Add a MAC address filter entry to the list 1442 * @vsi: the VSI to be forwarded to 1443 * @add_list: pointer to the list which contains MAC filter entries 1444 * @macaddr: the MAC address to be added. 1445 * 1446 * Adds MAC address filter entry to the temp list 1447 * 1448 * Returns 0 on success or ENOMEM on failure. 1449 */ 1450 int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, 1451 const u8 *macaddr) 1452 { 1453 struct ice_fltr_list_entry *tmp; 1454 struct ice_pf *pf = vsi->back; 1455 1456 tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC); 1457 if (!tmp) 1458 return -ENOMEM; 1459 1460 tmp->fltr_info.flag = ICE_FLTR_TX; 1461 tmp->fltr_info.src_id = ICE_SRC_ID_VSI; 1462 tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC; 1463 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; 1464 tmp->fltr_info.vsi_handle = vsi->idx; 1465 ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr); 1466 1467 INIT_LIST_HEAD(&tmp->list_entry); 1468 list_add(&tmp->list_entry, add_list); 1469 1470 return 0; 1471 } 1472 1473 /** 1474 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters 1475 * @vsi: the VSI to be updated 1476 */ 1477 void ice_update_eth_stats(struct ice_vsi *vsi) 1478 { 1479 struct ice_eth_stats *prev_es, *cur_es; 1480 struct ice_hw *hw = &vsi->back->hw; 1481 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ 1482 1483 prev_es = &vsi->eth_stats_prev; 1484 cur_es = &vsi->eth_stats; 1485 1486 ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num), 1487 vsi->stat_offsets_loaded, &prev_es->rx_bytes, 1488 &cur_es->rx_bytes); 1489 1490 ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num), 1491 vsi->stat_offsets_loaded, &prev_es->rx_unicast, 1492 &cur_es->rx_unicast); 1493 1494 ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num), 1495 vsi->stat_offsets_loaded, &prev_es->rx_multicast, 1496 &cur_es->rx_multicast); 1497 1498 ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num), 1499 vsi->stat_offsets_loaded, &prev_es->rx_broadcast, 1500 &cur_es->rx_broadcast); 1501 1502 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, 1503 &prev_es->rx_discards, &cur_es->rx_discards); 1504 1505 ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num), 1506 vsi->stat_offsets_loaded, &prev_es->tx_bytes, 1507 &cur_es->tx_bytes); 1508 1509 ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num), 1510 vsi->stat_offsets_loaded, &prev_es->tx_unicast, 1511 &cur_es->tx_unicast); 1512 1513 ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num), 1514 vsi->stat_offsets_loaded, &prev_es->tx_multicast, 1515 &cur_es->tx_multicast); 1516 1517 ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num), 1518 vsi->stat_offsets_loaded, &prev_es->tx_broadcast, 1519 &cur_es->tx_broadcast); 1520 1521 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, 1522 &prev_es->tx_errors, &cur_es->tx_errors); 1523 1524 vsi->stat_offsets_loaded = true; 1525 } 1526 1527 /** 1528 * ice_free_fltr_list - free filter lists helper 1529 * @dev: pointer to the device struct 1530 * @h: pointer to the list head to be freed 1531 * 1532 * Helper function to free filter lists previously created using 1533 * ice_add_mac_to_list 1534 */ 1535 void ice_free_fltr_list(struct device *dev, struct list_head *h) 1536 { 1537 struct ice_fltr_list_entry *e, *tmp; 1538 1539 list_for_each_entry_safe(e, tmp, h, list_entry) { 1540 list_del(&e->list_entry); 1541 devm_kfree(dev, e); 1542 } 1543 } 1544 1545 /** 1546 * ice_vsi_add_vlan - Add VSI membership for given VLAN 1547 * @vsi: the VSI being configured 1548 * @vid: VLAN ID to be added 1549 */ 1550 int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) 1551 { 1552 struct ice_fltr_list_entry *tmp; 1553 struct ice_pf *pf = vsi->back; 1554 LIST_HEAD(tmp_add_list); 1555 enum ice_status status; 1556 int err = 0; 1557 1558 tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL); 1559 if (!tmp) 1560 return -ENOMEM; 1561 1562 tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 1563 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; 1564 tmp->fltr_info.flag = ICE_FLTR_TX; 1565 tmp->fltr_info.src_id = ICE_SRC_ID_VSI; 1566 tmp->fltr_info.vsi_handle = vsi->idx; 1567 tmp->fltr_info.l_data.vlan.vlan_id = vid; 1568 1569 INIT_LIST_HEAD(&tmp->list_entry); 1570 list_add(&tmp->list_entry, &tmp_add_list); 1571 1572 status = ice_add_vlan(&pf->hw, &tmp_add_list); 1573 if (status) { 1574 err = -ENODEV; 1575 dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n", 1576 vid, vsi->vsi_num); 1577 } 1578 1579 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 1580 return err; 1581 } 1582 1583 /** 1584 * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN 1585 * @vsi: the VSI being configured 1586 * @vid: VLAN ID to be removed 1587 * 1588 * Returns 0 on success and negative on failure 1589 */ 1590 int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) 1591 { 1592 struct ice_fltr_list_entry *list; 1593 struct ice_pf *pf = vsi->back; 1594 LIST_HEAD(tmp_add_list); 1595 enum ice_status status; 1596 int err = 0; 1597 1598 list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); 1599 if (!list) 1600 return -ENOMEM; 1601 1602 list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 1603 list->fltr_info.vsi_handle = vsi->idx; 1604 list->fltr_info.fltr_act = ICE_FWD_TO_VSI; 1605 list->fltr_info.l_data.vlan.vlan_id = vid; 1606 list->fltr_info.flag = ICE_FLTR_TX; 1607 list->fltr_info.src_id = ICE_SRC_ID_VSI; 1608 1609 INIT_LIST_HEAD(&list->list_entry); 1610 list_add(&list->list_entry, &tmp_add_list); 1611 1612 status = ice_remove_vlan(&pf->hw, &tmp_add_list); 1613 if (status == ICE_ERR_DOES_NOT_EXIST) { 1614 dev_dbg(&pf->pdev->dev, 1615 "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n", 1616 vid, vsi->vsi_num, status); 1617 } else if (status) { 1618 dev_err(&pf->pdev->dev, 1619 "Error removing VLAN %d on vsi %i error: %d\n", 1620 vid, vsi->vsi_num, status); 1621 err = -EIO; 1622 } 1623 1624 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 1625 return err; 1626 } 1627 1628 /** 1629 * ice_vsi_cfg_rxqs - Configure the VSI for Rx 1630 * @vsi: the VSI being configured 1631 * 1632 * Return 0 on success and a negative value on error 1633 * Configure the Rx VSI for operation. 1634 */ 1635 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) 1636 { 1637 u16 i; 1638 1639 if (vsi->type == ICE_VSI_VF) 1640 goto setup_rings; 1641 1642 if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN) 1643 vsi->max_frame = vsi->netdev->mtu + 1644 ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 1645 else 1646 vsi->max_frame = ICE_RXBUF_2048; 1647 1648 vsi->rx_buf_len = ICE_RXBUF_2048; 1649 setup_rings: 1650 /* set up individual rings */ 1651 for (i = 0; i < vsi->num_rxq; i++) { 1652 int err; 1653 1654 err = ice_setup_rx_ctx(vsi->rx_rings[i]); 1655 if (err) { 1656 dev_err(&vsi->back->pdev->dev, 1657 "ice_setup_rx_ctx failed for RxQ %d, err %d\n", 1658 i, err); 1659 return err; 1660 } 1661 } 1662 1663 return 0; 1664 } 1665 1666 /** 1667 * ice_vsi_cfg_txqs - Configure the VSI for Tx 1668 * @vsi: the VSI being configured 1669 * @rings: Tx ring array to be configured 1670 * @offset: offset within vsi->txq_map 1671 * 1672 * Return 0 on success and a negative value on error 1673 * Configure the Tx VSI for operation. 1674 */ 1675 static int 1676 ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) 1677 { 1678 struct ice_aqc_add_tx_qgrp *qg_buf; 1679 struct ice_aqc_add_txqs_perq *txq; 1680 struct ice_pf *pf = vsi->back; 1681 u8 num_q_grps, q_idx = 0; 1682 enum ice_status status; 1683 u16 buf_len, i, pf_q; 1684 int err = 0, tc; 1685 1686 buf_len = sizeof(*qg_buf); 1687 qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); 1688 if (!qg_buf) 1689 return -ENOMEM; 1690 1691 qg_buf->num_txqs = 1; 1692 num_q_grps = 1; 1693 1694 /* set up and configure the Tx queues for each enabled TC */ 1695 ice_for_each_traffic_class(tc) { 1696 if (!(vsi->tc_cfg.ena_tc & BIT(tc))) 1697 break; 1698 1699 for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { 1700 struct ice_tlan_ctx tlan_ctx = { 0 }; 1701 1702 pf_q = vsi->txq_map[q_idx + offset]; 1703 ice_setup_tx_ctx(rings[q_idx], &tlan_ctx, pf_q); 1704 /* copy context contents into the qg_buf */ 1705 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); 1706 ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, 1707 ice_tlan_ctx_info); 1708 1709 /* init queue specific tail reg. It is referred as 1710 * transmit comm scheduler queue doorbell. 1711 */ 1712 rings[q_idx]->tail = 1713 pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); 1714 status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, 1715 i, num_q_grps, qg_buf, 1716 buf_len, NULL); 1717 if (status) { 1718 dev_err(&pf->pdev->dev, 1719 "Failed to set LAN Tx queue context, error: %d\n", 1720 status); 1721 err = -ENODEV; 1722 goto err_cfg_txqs; 1723 } 1724 1725 /* Add Tx Queue TEID into the VSI Tx ring from the 1726 * response. This will complete configuring and 1727 * enabling the queue. 1728 */ 1729 txq = &qg_buf->txqs[0]; 1730 if (pf_q == le16_to_cpu(txq->txq_id)) 1731 rings[q_idx]->txq_teid = 1732 le32_to_cpu(txq->q_teid); 1733 1734 q_idx++; 1735 } 1736 } 1737 err_cfg_txqs: 1738 devm_kfree(&pf->pdev->dev, qg_buf); 1739 return err; 1740 } 1741 1742 /** 1743 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx 1744 * @vsi: the VSI being configured 1745 * 1746 * Return 0 on success and a negative value on error 1747 * Configure the Tx VSI for operation. 1748 */ 1749 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi) 1750 { 1751 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0); 1752 } 1753 1754 /** 1755 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value 1756 * @intrl: interrupt rate limit in usecs 1757 * @gran: interrupt rate limit granularity in usecs 1758 * 1759 * This function converts a decimal interrupt rate limit in usecs to the format 1760 * expected by firmware. 1761 */ 1762 u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) 1763 { 1764 u32 val = intrl / gran; 1765 1766 if (val) 1767 return val | GLINT_RATE_INTRL_ENA_M; 1768 return 0; 1769 } 1770 1771 /** 1772 * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set 1773 * @hw: board specific structure 1774 */ 1775 static void ice_cfg_itr_gran(struct ice_hw *hw) 1776 { 1777 u32 regval = rd32(hw, GLINT_CTL); 1778 1779 /* no need to update global register if ITR gran is already set */ 1780 if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) && 1781 (((regval & GLINT_CTL_ITR_GRAN_200_M) >> 1782 GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) && 1783 (((regval & GLINT_CTL_ITR_GRAN_100_M) >> 1784 GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) && 1785 (((regval & GLINT_CTL_ITR_GRAN_50_M) >> 1786 GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) && 1787 (((regval & GLINT_CTL_ITR_GRAN_25_M) >> 1788 GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US)) 1789 return; 1790 1791 regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) & 1792 GLINT_CTL_ITR_GRAN_200_M) | 1793 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) & 1794 GLINT_CTL_ITR_GRAN_100_M) | 1795 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) & 1796 GLINT_CTL_ITR_GRAN_50_M) | 1797 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) & 1798 GLINT_CTL_ITR_GRAN_25_M); 1799 wr32(hw, GLINT_CTL, regval); 1800 } 1801 1802 /** 1803 * ice_cfg_itr - configure the initial interrupt throttle values 1804 * @hw: pointer to the HW structure 1805 * @q_vector: interrupt vector that's being configured 1806 * 1807 * Configure interrupt throttling values for the ring containers that are 1808 * associated with the interrupt vector passed in. 1809 */ 1810 static void 1811 ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector) 1812 { 1813 ice_cfg_itr_gran(hw); 1814 1815 if (q_vector->num_ring_rx) { 1816 struct ice_ring_container *rc = &q_vector->rx; 1817 1818 /* if this value is set then don't overwrite with default */ 1819 if (!rc->itr_setting) 1820 rc->itr_setting = ICE_DFLT_RX_ITR; 1821 1822 rc->target_itr = ITR_TO_REG(rc->itr_setting); 1823 rc->next_update = jiffies + 1; 1824 rc->current_itr = rc->target_itr; 1825 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), 1826 ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); 1827 } 1828 1829 if (q_vector->num_ring_tx) { 1830 struct ice_ring_container *rc = &q_vector->tx; 1831 1832 /* if this value is set then don't overwrite with default */ 1833 if (!rc->itr_setting) 1834 rc->itr_setting = ICE_DFLT_TX_ITR; 1835 1836 rc->target_itr = ITR_TO_REG(rc->itr_setting); 1837 rc->next_update = jiffies + 1; 1838 rc->current_itr = rc->target_itr; 1839 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), 1840 ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); 1841 } 1842 } 1843 1844 /** 1845 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW 1846 * @vsi: the VSI being configured 1847 */ 1848 void ice_vsi_cfg_msix(struct ice_vsi *vsi) 1849 { 1850 struct ice_pf *pf = vsi->back; 1851 struct ice_hw *hw = &pf->hw; 1852 u32 txq = 0, rxq = 0; 1853 int i, q; 1854 1855 for (i = 0; i < vsi->num_q_vectors; i++) { 1856 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 1857 u16 reg_idx = q_vector->reg_idx; 1858 1859 ice_cfg_itr(hw, q_vector); 1860 1861 wr32(hw, GLINT_RATE(reg_idx), 1862 ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); 1863 1864 /* Both Transmit Queue Interrupt Cause Control register 1865 * and Receive Queue Interrupt Cause control register 1866 * expects MSIX_INDX field to be the vector index 1867 * within the function space and not the absolute 1868 * vector index across PF or across device. 1869 * For SR-IOV VF VSIs queue vector index always starts 1870 * with 1 since first vector index(0) is used for OICR 1871 * in VF space. Since VMDq and other PF VSIs are within 1872 * the PF function space, use the vector index that is 1873 * tracked for this PF. 1874 */ 1875 for (q = 0; q < q_vector->num_ring_tx; q++) { 1876 int itr_idx = (q_vector->tx.itr_idx << 1877 QINT_TQCTL_ITR_INDX_S) & 1878 QINT_TQCTL_ITR_INDX_M; 1879 u32 val; 1880 1881 if (vsi->type == ICE_VSI_VF) 1882 val = QINT_TQCTL_CAUSE_ENA_M | itr_idx | 1883 (((i + 1) << QINT_TQCTL_MSIX_INDX_S) & 1884 QINT_TQCTL_MSIX_INDX_M); 1885 else 1886 val = QINT_TQCTL_CAUSE_ENA_M | itr_idx | 1887 ((reg_idx << QINT_TQCTL_MSIX_INDX_S) & 1888 QINT_TQCTL_MSIX_INDX_M); 1889 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); 1890 txq++; 1891 } 1892 1893 for (q = 0; q < q_vector->num_ring_rx; q++) { 1894 int itr_idx = (q_vector->rx.itr_idx << 1895 QINT_RQCTL_ITR_INDX_S) & 1896 QINT_RQCTL_ITR_INDX_M; 1897 u32 val; 1898 1899 if (vsi->type == ICE_VSI_VF) 1900 val = QINT_RQCTL_CAUSE_ENA_M | itr_idx | 1901 (((i + 1) << QINT_RQCTL_MSIX_INDX_S) & 1902 QINT_RQCTL_MSIX_INDX_M); 1903 else 1904 val = QINT_RQCTL_CAUSE_ENA_M | itr_idx | 1905 ((reg_idx << QINT_RQCTL_MSIX_INDX_S) & 1906 QINT_RQCTL_MSIX_INDX_M); 1907 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); 1908 rxq++; 1909 } 1910 } 1911 1912 ice_flush(hw); 1913 } 1914 1915 /** 1916 * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx 1917 * @vsi: the VSI being changed 1918 */ 1919 int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) 1920 { 1921 struct device *dev = &vsi->back->pdev->dev; 1922 struct ice_hw *hw = &vsi->back->hw; 1923 struct ice_vsi_ctx *ctxt; 1924 enum ice_status status; 1925 int ret = 0; 1926 1927 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); 1928 if (!ctxt) 1929 return -ENOMEM; 1930 1931 /* Here we are configuring the VSI to let the driver add VLAN tags by 1932 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag 1933 * insertion happens in the Tx hot path, in ice_tx_map. 1934 */ 1935 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; 1936 1937 /* Preserve existing VLAN strip setting */ 1938 ctxt->info.vlan_flags |= (vsi->info.vlan_flags & 1939 ICE_AQ_VSI_VLAN_EMOD_M); 1940 1941 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 1942 1943 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 1944 if (status) { 1945 dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", 1946 status, hw->adminq.sq_last_status); 1947 ret = -EIO; 1948 goto out; 1949 } 1950 1951 vsi->info.vlan_flags = ctxt->info.vlan_flags; 1952 out: 1953 devm_kfree(dev, ctxt); 1954 return ret; 1955 } 1956 1957 /** 1958 * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx 1959 * @vsi: the VSI being changed 1960 * @ena: boolean value indicating if this is a enable or disable request 1961 */ 1962 int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) 1963 { 1964 struct device *dev = &vsi->back->pdev->dev; 1965 struct ice_hw *hw = &vsi->back->hw; 1966 struct ice_vsi_ctx *ctxt; 1967 enum ice_status status; 1968 int ret = 0; 1969 1970 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); 1971 if (!ctxt) 1972 return -ENOMEM; 1973 1974 /* Here we are configuring what the VSI should do with the VLAN tag in 1975 * the Rx packet. We can either leave the tag in the packet or put it in 1976 * the Rx descriptor. 1977 */ 1978 if (ena) 1979 /* Strip VLAN tag from Rx packet and put it in the desc */ 1980 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; 1981 else 1982 /* Disable stripping. Leave tag in packet */ 1983 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; 1984 1985 /* Allow all packets untagged/tagged */ 1986 ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; 1987 1988 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 1989 1990 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 1991 if (status) { 1992 dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n", 1993 ena, status, hw->adminq.sq_last_status); 1994 ret = -EIO; 1995 goto out; 1996 } 1997 1998 vsi->info.vlan_flags = ctxt->info.vlan_flags; 1999 out: 2000 devm_kfree(dev, ctxt); 2001 return ret; 2002 } 2003 2004 /** 2005 * ice_vsi_start_rx_rings - start VSI's Rx rings 2006 * @vsi: the VSI whose rings are to be started 2007 * 2008 * Returns 0 on success and a negative value on error 2009 */ 2010 int ice_vsi_start_rx_rings(struct ice_vsi *vsi) 2011 { 2012 return ice_vsi_ctrl_rx_rings(vsi, true); 2013 } 2014 2015 /** 2016 * ice_vsi_stop_rx_rings - stop VSI's Rx rings 2017 * @vsi: the VSI 2018 * 2019 * Returns 0 on success and a negative value on error 2020 */ 2021 int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) 2022 { 2023 return ice_vsi_ctrl_rx_rings(vsi, false); 2024 } 2025 2026 /** 2027 * ice_vsi_stop_tx_rings - Disable Tx rings 2028 * @vsi: the VSI being configured 2029 * @rst_src: reset source 2030 * @rel_vmvf_num: Relative ID of VF/VM 2031 * @rings: Tx ring array to be stopped 2032 * @offset: offset within vsi->txq_map 2033 */ 2034 static int 2035 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 2036 u16 rel_vmvf_num, struct ice_ring **rings, int offset) 2037 { 2038 struct ice_pf *pf = vsi->back; 2039 struct ice_hw *hw = &pf->hw; 2040 int tc, q_idx = 0, err = 0; 2041 u16 *q_ids, *q_handles, i; 2042 enum ice_status status; 2043 u32 *q_teids, val; 2044 2045 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) 2046 return -EINVAL; 2047 2048 q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids), 2049 GFP_KERNEL); 2050 if (!q_teids) 2051 return -ENOMEM; 2052 2053 q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids), 2054 GFP_KERNEL); 2055 if (!q_ids) { 2056 err = -ENOMEM; 2057 goto err_alloc_q_ids; 2058 } 2059 2060 q_handles = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, 2061 sizeof(*q_handles), GFP_KERNEL); 2062 if (!q_handles) { 2063 err = -ENOMEM; 2064 goto err_alloc_q_handles; 2065 } 2066 2067 /* set up the Tx queue list to be disabled for each enabled TC */ 2068 ice_for_each_traffic_class(tc) { 2069 if (!(vsi->tc_cfg.ena_tc & BIT(tc))) 2070 break; 2071 2072 for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { 2073 if (!rings || !rings[q_idx] || 2074 !rings[q_idx]->q_vector) { 2075 err = -EINVAL; 2076 goto err_out; 2077 } 2078 2079 q_ids[i] = vsi->txq_map[q_idx + offset]; 2080 q_teids[i] = rings[q_idx]->txq_teid; 2081 q_handles[i] = i; 2082 2083 /* clear cause_ena bit for disabled queues */ 2084 val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx)); 2085 val &= ~QINT_TQCTL_CAUSE_ENA_M; 2086 wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val); 2087 2088 /* software is expected to wait for 100 ns */ 2089 ndelay(100); 2090 2091 /* trigger a software interrupt for the vector 2092 * associated to the queue to schedule NAPI handler 2093 */ 2094 wr32(hw, GLINT_DYN_CTL(rings[i]->q_vector->reg_idx), 2095 GLINT_DYN_CTL_SWINT_TRIG_M | 2096 GLINT_DYN_CTL_INTENA_MSK_M); 2097 q_idx++; 2098 } 2099 status = ice_dis_vsi_txq(vsi->port_info, vsi->idx, tc, 2100 vsi->num_txq, q_handles, q_ids, 2101 q_teids, rst_src, rel_vmvf_num, NULL); 2102 2103 /* if the disable queue command was exercised during an active 2104 * reset flow, ICE_ERR_RESET_ONGOING is returned. This is not 2105 * an error as the reset operation disables queues at the 2106 * hardware level anyway. 2107 */ 2108 if (status == ICE_ERR_RESET_ONGOING) { 2109 dev_dbg(&pf->pdev->dev, 2110 "Reset in progress. LAN Tx queues already disabled\n"); 2111 } else if (status) { 2112 dev_err(&pf->pdev->dev, 2113 "Failed to disable LAN Tx queues, error: %d\n", 2114 status); 2115 err = -ENODEV; 2116 } 2117 } 2118 2119 err_out: 2120 devm_kfree(&pf->pdev->dev, q_handles); 2121 2122 err_alloc_q_handles: 2123 devm_kfree(&pf->pdev->dev, q_ids); 2124 2125 err_alloc_q_ids: 2126 devm_kfree(&pf->pdev->dev, q_teids); 2127 2128 return err; 2129 } 2130 2131 /** 2132 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings 2133 * @vsi: the VSI being configured 2134 * @rst_src: reset source 2135 * @rel_vmvf_num: Relative ID of VF/VM 2136 */ 2137 int 2138 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 2139 u16 rel_vmvf_num) 2140 { 2141 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, 2142 0); 2143 } 2144 2145 /** 2146 * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI 2147 * @vsi: VSI to enable or disable VLAN pruning on 2148 * @ena: set to true to enable VLAN pruning and false to disable it 2149 * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode 2150 * 2151 * returns 0 if VSI is updated, negative otherwise 2152 */ 2153 int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc) 2154 { 2155 struct ice_vsi_ctx *ctxt; 2156 struct device *dev; 2157 struct ice_pf *pf; 2158 int status; 2159 2160 if (!vsi) 2161 return -EINVAL; 2162 2163 pf = vsi->back; 2164 dev = &pf->pdev->dev; 2165 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); 2166 if (!ctxt) 2167 return -ENOMEM; 2168 2169 ctxt->info = vsi->info; 2170 2171 if (ena) { 2172 ctxt->info.sec_flags |= 2173 ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 2174 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S; 2175 ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 2176 } else { 2177 ctxt->info.sec_flags &= 2178 ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 2179 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 2180 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 2181 } 2182 2183 if (!vlan_promisc) 2184 ctxt->info.valid_sections = 2185 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID | 2186 ICE_AQ_VSI_PROP_SW_VALID); 2187 2188 status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL); 2189 if (status) { 2190 netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n", 2191 ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status, 2192 pf->hw.adminq.sq_last_status); 2193 goto err_out; 2194 } 2195 2196 vsi->info.sec_flags = ctxt->info.sec_flags; 2197 vsi->info.sw_flags2 = ctxt->info.sw_flags2; 2198 2199 devm_kfree(dev, ctxt); 2200 return 0; 2201 2202 err_out: 2203 devm_kfree(dev, ctxt); 2204 return -EIO; 2205 } 2206 2207 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) 2208 { 2209 struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg; 2210 2211 vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg); 2212 vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg); 2213 } 2214 2215 /** 2216 * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors 2217 * @vsi: VSI to set the q_vectors register index on 2218 */ 2219 static int 2220 ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi) 2221 { 2222 u16 i; 2223 2224 if (!vsi || !vsi->q_vectors) 2225 return -EINVAL; 2226 2227 ice_for_each_q_vector(vsi, i) { 2228 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2229 2230 if (!q_vector) { 2231 dev_err(&vsi->back->pdev->dev, 2232 "Failed to set reg_idx on q_vector %d VSI %d\n", 2233 i, vsi->vsi_num); 2234 goto clear_reg_idx; 2235 } 2236 2237 q_vector->reg_idx = q_vector->v_idx + vsi->hw_base_vector; 2238 } 2239 2240 return 0; 2241 2242 clear_reg_idx: 2243 ice_for_each_q_vector(vsi, i) { 2244 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2245 2246 if (q_vector) 2247 q_vector->reg_idx = 0; 2248 } 2249 2250 return -EINVAL; 2251 } 2252 2253 /** 2254 * ice_vsi_add_rem_eth_mac - Program VSI ethertype based filter with rule 2255 * @vsi: the VSI being configured 2256 * @add_rule: boolean value to add or remove ethertype filter rule 2257 */ 2258 static void 2259 ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule) 2260 { 2261 struct ice_fltr_list_entry *list; 2262 struct ice_pf *pf = vsi->back; 2263 LIST_HEAD(tmp_add_list); 2264 enum ice_status status; 2265 2266 list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); 2267 if (!list) 2268 return; 2269 2270 list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE; 2271 list->fltr_info.fltr_act = ICE_DROP_PACKET; 2272 list->fltr_info.flag = ICE_FLTR_TX; 2273 list->fltr_info.src_id = ICE_SRC_ID_VSI; 2274 list->fltr_info.vsi_handle = vsi->idx; 2275 list->fltr_info.l_data.ethertype_mac.ethertype = vsi->ethtype; 2276 2277 INIT_LIST_HEAD(&list->list_entry); 2278 list_add(&list->list_entry, &tmp_add_list); 2279 2280 if (add_rule) 2281 status = ice_add_eth_mac(&pf->hw, &tmp_add_list); 2282 else 2283 status = ice_remove_eth_mac(&pf->hw, &tmp_add_list); 2284 2285 if (status) 2286 dev_err(&pf->pdev->dev, 2287 "Failure Adding or Removing Ethertype on VSI %i error: %d\n", 2288 vsi->vsi_num, status); 2289 2290 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 2291 } 2292 2293 /** 2294 * ice_vsi_setup - Set up a VSI by a given type 2295 * @pf: board private structure 2296 * @pi: pointer to the port_info instance 2297 * @type: VSI type 2298 * @vf_id: defines VF ID to which this VSI connects. This field is meant to be 2299 * used only for ICE_VSI_VF VSI type. For other VSI types, should 2300 * fill-in ICE_INVAL_VFID as input. 2301 * 2302 * This allocates the sw VSI structure and its queue resources. 2303 * 2304 * Returns pointer to the successfully allocated and configured VSI sw struct on 2305 * success, NULL on failure. 2306 */ 2307 struct ice_vsi * 2308 ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, 2309 enum ice_vsi_type type, u16 vf_id) 2310 { 2311 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2312 struct device *dev = &pf->pdev->dev; 2313 struct ice_vsi *vsi; 2314 int ret, i; 2315 2316 if (type == ICE_VSI_VF) 2317 vsi = ice_vsi_alloc(pf, type, vf_id); 2318 else 2319 vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID); 2320 2321 if (!vsi) { 2322 dev_err(dev, "could not allocate VSI\n"); 2323 return NULL; 2324 } 2325 2326 vsi->port_info = pi; 2327 vsi->vsw = pf->first_sw; 2328 if (vsi->type == ICE_VSI_PF) 2329 vsi->ethtype = ETH_P_PAUSE; 2330 2331 if (vsi->type == ICE_VSI_VF) 2332 vsi->vf_id = vf_id; 2333 2334 if (ice_vsi_get_qs(vsi)) { 2335 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", 2336 vsi->idx); 2337 goto unroll_get_qs; 2338 } 2339 2340 /* set RSS capabilities */ 2341 ice_vsi_set_rss_params(vsi); 2342 2343 /* set TC configuration */ 2344 ice_vsi_set_tc_cfg(vsi); 2345 2346 /* create the VSI */ 2347 ret = ice_vsi_init(vsi); 2348 if (ret) 2349 goto unroll_get_qs; 2350 2351 switch (vsi->type) { 2352 case ICE_VSI_PF: 2353 ret = ice_vsi_alloc_q_vectors(vsi); 2354 if (ret) 2355 goto unroll_vsi_init; 2356 2357 ret = ice_vsi_setup_vector_base(vsi); 2358 if (ret) 2359 goto unroll_alloc_q_vector; 2360 2361 ret = ice_vsi_set_q_vectors_reg_idx(vsi); 2362 if (ret) 2363 goto unroll_vector_base; 2364 2365 ret = ice_vsi_alloc_rings(vsi); 2366 if (ret) 2367 goto unroll_vector_base; 2368 2369 ice_vsi_map_rings_to_vectors(vsi); 2370 2371 /* Do not exit if configuring RSS had an issue, at least 2372 * receive traffic on first queue. Hence no need to capture 2373 * return value 2374 */ 2375 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 2376 ice_vsi_cfg_rss_lut_key(vsi); 2377 break; 2378 case ICE_VSI_VF: 2379 /* VF driver will take care of creating netdev for this type and 2380 * map queues to vectors through Virtchnl, PF driver only 2381 * creates a VSI and corresponding structures for bookkeeping 2382 * purpose 2383 */ 2384 ret = ice_vsi_alloc_q_vectors(vsi); 2385 if (ret) 2386 goto unroll_vsi_init; 2387 2388 ret = ice_vsi_alloc_rings(vsi); 2389 if (ret) 2390 goto unroll_alloc_q_vector; 2391 2392 /* Setup Vector base only during VF init phase or when VF asks 2393 * for more vectors than assigned number. In all other cases, 2394 * assign hw_base_vector to the value given earlier. 2395 */ 2396 if (test_bit(ICE_VF_STATE_CFG_INTR, pf->vf[vf_id].vf_states)) { 2397 ret = ice_vsi_setup_vector_base(vsi); 2398 if (ret) 2399 goto unroll_vector_base; 2400 } else { 2401 vsi->hw_base_vector = pf->vf[vf_id].first_vector_idx; 2402 } 2403 ret = ice_vsi_set_q_vectors_reg_idx(vsi); 2404 if (ret) 2405 goto unroll_vector_base; 2406 2407 pf->q_left_tx -= vsi->alloc_txq; 2408 pf->q_left_rx -= vsi->alloc_rxq; 2409 break; 2410 default: 2411 /* clean up the resources and exit */ 2412 goto unroll_vsi_init; 2413 } 2414 2415 /* configure VSI nodes based on number of queues and TC's */ 2416 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2417 max_txqs[i] = pf->num_lan_tx; 2418 2419 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2420 max_txqs); 2421 if (ret) { 2422 dev_err(&pf->pdev->dev, 2423 "VSI %d failed lan queue config, error %d\n", 2424 vsi->vsi_num, ret); 2425 goto unroll_vector_base; 2426 } 2427 2428 /* Add switch rule to drop all Tx Flow Control Frames, of look up 2429 * type ETHERTYPE from VSIs, and restrict malicious VF from sending 2430 * out PAUSE or PFC frames. If enabled, FW can still send FC frames. 2431 * The rule is added once for PF VSI in order to create appropriate 2432 * recipe, since VSI/VSI list is ignored with drop action... 2433 */ 2434 if (vsi->type == ICE_VSI_PF) 2435 ice_vsi_add_rem_eth_mac(vsi, true); 2436 2437 return vsi; 2438 2439 unroll_vector_base: 2440 /* reclaim SW interrupts back to the common pool */ 2441 ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); 2442 pf->num_avail_sw_msix += vsi->num_q_vectors; 2443 /* reclaim HW interrupt back to the common pool */ 2444 ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector, vsi->idx); 2445 pf->num_avail_hw_msix += vsi->num_q_vectors; 2446 unroll_alloc_q_vector: 2447 ice_vsi_free_q_vectors(vsi); 2448 unroll_vsi_init: 2449 ice_vsi_delete(vsi); 2450 unroll_get_qs: 2451 ice_vsi_put_qs(vsi); 2452 pf->q_left_tx += vsi->alloc_txq; 2453 pf->q_left_rx += vsi->alloc_rxq; 2454 ice_vsi_clear(vsi); 2455 2456 return NULL; 2457 } 2458 2459 /** 2460 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW 2461 * @vsi: the VSI being cleaned up 2462 */ 2463 static void ice_vsi_release_msix(struct ice_vsi *vsi) 2464 { 2465 struct ice_pf *pf = vsi->back; 2466 u16 vector = vsi->hw_base_vector; 2467 struct ice_hw *hw = &pf->hw; 2468 u32 txq = 0; 2469 u32 rxq = 0; 2470 int i, q; 2471 2472 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 2473 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2474 2475 wr32(hw, GLINT_ITR(ICE_IDX_ITR0, vector), 0); 2476 wr32(hw, GLINT_ITR(ICE_IDX_ITR1, vector), 0); 2477 for (q = 0; q < q_vector->num_ring_tx; q++) { 2478 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); 2479 txq++; 2480 } 2481 2482 for (q = 0; q < q_vector->num_ring_rx; q++) { 2483 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); 2484 rxq++; 2485 } 2486 } 2487 2488 ice_flush(hw); 2489 } 2490 2491 /** 2492 * ice_vsi_free_irq - Free the IRQ association with the OS 2493 * @vsi: the VSI being configured 2494 */ 2495 void ice_vsi_free_irq(struct ice_vsi *vsi) 2496 { 2497 struct ice_pf *pf = vsi->back; 2498 int base = vsi->sw_base_vector; 2499 2500 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { 2501 int i; 2502 2503 if (!vsi->q_vectors || !vsi->irqs_ready) 2504 return; 2505 2506 ice_vsi_release_msix(vsi); 2507 if (vsi->type == ICE_VSI_VF) 2508 return; 2509 2510 vsi->irqs_ready = false; 2511 ice_for_each_q_vector(vsi, i) { 2512 u16 vector = i + base; 2513 int irq_num; 2514 2515 irq_num = pf->msix_entries[vector].vector; 2516 2517 /* free only the irqs that were actually requested */ 2518 if (!vsi->q_vectors[i] || 2519 !(vsi->q_vectors[i]->num_ring_tx || 2520 vsi->q_vectors[i]->num_ring_rx)) 2521 continue; 2522 2523 /* clear the affinity notifier in the IRQ descriptor */ 2524 irq_set_affinity_notifier(irq_num, NULL); 2525 2526 /* clear the affinity_mask in the IRQ descriptor */ 2527 irq_set_affinity_hint(irq_num, NULL); 2528 synchronize_irq(irq_num); 2529 devm_free_irq(&pf->pdev->dev, irq_num, 2530 vsi->q_vectors[i]); 2531 } 2532 } 2533 } 2534 2535 /** 2536 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues 2537 * @vsi: the VSI having resources freed 2538 */ 2539 void ice_vsi_free_tx_rings(struct ice_vsi *vsi) 2540 { 2541 int i; 2542 2543 if (!vsi->tx_rings) 2544 return; 2545 2546 ice_for_each_txq(vsi, i) 2547 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 2548 ice_free_tx_ring(vsi->tx_rings[i]); 2549 } 2550 2551 /** 2552 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues 2553 * @vsi: the VSI having resources freed 2554 */ 2555 void ice_vsi_free_rx_rings(struct ice_vsi *vsi) 2556 { 2557 int i; 2558 2559 if (!vsi->rx_rings) 2560 return; 2561 2562 ice_for_each_rxq(vsi, i) 2563 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 2564 ice_free_rx_ring(vsi->rx_rings[i]); 2565 } 2566 2567 /** 2568 * ice_vsi_close - Shut down a VSI 2569 * @vsi: the VSI being shut down 2570 */ 2571 void ice_vsi_close(struct ice_vsi *vsi) 2572 { 2573 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) 2574 ice_down(vsi); 2575 2576 ice_vsi_free_irq(vsi); 2577 ice_vsi_free_tx_rings(vsi); 2578 ice_vsi_free_rx_rings(vsi); 2579 } 2580 2581 /** 2582 * ice_free_res - free a block of resources 2583 * @res: pointer to the resource 2584 * @index: starting index previously returned by ice_get_res 2585 * @id: identifier to track owner 2586 * 2587 * Returns number of resources freed 2588 */ 2589 int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) 2590 { 2591 int count = 0; 2592 int i; 2593 2594 if (!res || index >= res->num_entries) 2595 return -EINVAL; 2596 2597 id |= ICE_RES_VALID_BIT; 2598 for (i = index; i < res->num_entries && res->list[i] == id; i++) { 2599 res->list[i] = 0; 2600 count++; 2601 } 2602 2603 return count; 2604 } 2605 2606 /** 2607 * ice_search_res - Search the tracker for a block of resources 2608 * @res: pointer to the resource 2609 * @needed: size of the block needed 2610 * @id: identifier to track owner 2611 * 2612 * Returns the base item index of the block, or -ENOMEM for error 2613 */ 2614 static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) 2615 { 2616 int start = res->search_hint; 2617 int end = start; 2618 2619 if ((start + needed) > res->num_entries) 2620 return -ENOMEM; 2621 2622 id |= ICE_RES_VALID_BIT; 2623 2624 do { 2625 /* skip already allocated entries */ 2626 if (res->list[end++] & ICE_RES_VALID_BIT) { 2627 start = end; 2628 if ((start + needed) > res->num_entries) 2629 break; 2630 } 2631 2632 if (end == (start + needed)) { 2633 int i = start; 2634 2635 /* there was enough, so assign it to the requestor */ 2636 while (i != end) 2637 res->list[i++] = id; 2638 2639 if (end == res->num_entries) 2640 end = 0; 2641 2642 res->search_hint = end; 2643 return start; 2644 } 2645 } while (1); 2646 2647 return -ENOMEM; 2648 } 2649 2650 /** 2651 * ice_get_res - get a block of resources 2652 * @pf: board private structure 2653 * @res: pointer to the resource 2654 * @needed: size of the block needed 2655 * @id: identifier to track owner 2656 * 2657 * Returns the base item index of the block, or -ENOMEM for error 2658 * The search_hint trick and lack of advanced fit-finding only works 2659 * because we're highly likely to have all the same sized requests. 2660 * Linear search time and any fragmentation should be minimal. 2661 */ 2662 int 2663 ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) 2664 { 2665 int ret; 2666 2667 if (!res || !pf) 2668 return -EINVAL; 2669 2670 if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { 2671 dev_err(&pf->pdev->dev, 2672 "param err: needed=%d, num_entries = %d id=0x%04x\n", 2673 needed, res->num_entries, id); 2674 return -EINVAL; 2675 } 2676 2677 /* search based on search_hint */ 2678 ret = ice_search_res(res, needed, id); 2679 2680 if (ret < 0) { 2681 /* previous search failed. Reset search hint and try again */ 2682 res->search_hint = 0; 2683 ret = ice_search_res(res, needed, id); 2684 } 2685 2686 return ret; 2687 } 2688 2689 /** 2690 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI 2691 * @vsi: the VSI being un-configured 2692 */ 2693 void ice_vsi_dis_irq(struct ice_vsi *vsi) 2694 { 2695 int base = vsi->sw_base_vector; 2696 struct ice_pf *pf = vsi->back; 2697 struct ice_hw *hw = &pf->hw; 2698 u32 val; 2699 int i; 2700 2701 /* disable interrupt causation from each queue */ 2702 if (vsi->tx_rings) { 2703 ice_for_each_txq(vsi, i) { 2704 if (vsi->tx_rings[i]) { 2705 u16 reg; 2706 2707 reg = vsi->tx_rings[i]->reg_idx; 2708 val = rd32(hw, QINT_TQCTL(reg)); 2709 val &= ~QINT_TQCTL_CAUSE_ENA_M; 2710 wr32(hw, QINT_TQCTL(reg), val); 2711 } 2712 } 2713 } 2714 2715 if (vsi->rx_rings) { 2716 ice_for_each_rxq(vsi, i) { 2717 if (vsi->rx_rings[i]) { 2718 u16 reg; 2719 2720 reg = vsi->rx_rings[i]->reg_idx; 2721 val = rd32(hw, QINT_RQCTL(reg)); 2722 val &= ~QINT_RQCTL_CAUSE_ENA_M; 2723 wr32(hw, QINT_RQCTL(reg), val); 2724 } 2725 } 2726 } 2727 2728 /* disable each interrupt */ 2729 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { 2730 ice_for_each_q_vector(vsi, i) 2731 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); 2732 2733 ice_flush(hw); 2734 2735 ice_for_each_q_vector(vsi, i) 2736 synchronize_irq(pf->msix_entries[i + base].vector); 2737 } 2738 } 2739 2740 /** 2741 * ice_vsi_release - Delete a VSI and free its resources 2742 * @vsi: the VSI being removed 2743 * 2744 * Returns 0 on success or < 0 on error 2745 */ 2746 int ice_vsi_release(struct ice_vsi *vsi) 2747 { 2748 struct ice_vf *vf = NULL; 2749 struct ice_pf *pf; 2750 2751 if (!vsi->back) 2752 return -ENODEV; 2753 pf = vsi->back; 2754 2755 if (vsi->type == ICE_VSI_VF) 2756 vf = &pf->vf[vsi->vf_id]; 2757 /* do not unregister and free netdevs while driver is in the reset 2758 * recovery pending state. Since reset/rebuild happens through PF 2759 * service task workqueue, its not a good idea to unregister netdev 2760 * that is associated to the PF that is running the work queue items 2761 * currently. This is done to avoid check_flush_dependency() warning 2762 * on this wq 2763 */ 2764 if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) { 2765 ice_napi_del(vsi); 2766 unregister_netdev(vsi->netdev); 2767 free_netdev(vsi->netdev); 2768 vsi->netdev = NULL; 2769 } 2770 2771 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 2772 ice_rss_clean(vsi); 2773 2774 /* Disable VSI and free resources */ 2775 ice_vsi_dis_irq(vsi); 2776 ice_vsi_close(vsi); 2777 2778 /* reclaim interrupt vectors back to PF */ 2779 if (vsi->type != ICE_VSI_VF) { 2780 /* reclaim SW interrupts back to the common pool */ 2781 ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); 2782 pf->num_avail_sw_msix += vsi->num_q_vectors; 2783 /* reclaim HW interrupts back to the common pool */ 2784 ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector, vsi->idx); 2785 pf->num_avail_hw_msix += vsi->num_q_vectors; 2786 } else if (test_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states)) { 2787 /* Reclaim VF resources back only while freeing all VFs or 2788 * vector reassignment is requested 2789 */ 2790 ice_free_res(pf->hw_irq_tracker, vf->first_vector_idx, 2791 vsi->idx); 2792 pf->num_avail_hw_msix += pf->num_vf_msix; 2793 } 2794 2795 if (vsi->type == ICE_VSI_PF) 2796 ice_vsi_add_rem_eth_mac(vsi, false); 2797 2798 ice_remove_vsi_fltr(&pf->hw, vsi->idx); 2799 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); 2800 ice_vsi_delete(vsi); 2801 ice_vsi_free_q_vectors(vsi); 2802 ice_vsi_clear_rings(vsi); 2803 2804 ice_vsi_put_qs(vsi); 2805 pf->q_left_tx += vsi->alloc_txq; 2806 pf->q_left_rx += vsi->alloc_rxq; 2807 2808 /* retain SW VSI data structure since it is needed to unregister and 2809 * free VSI netdev when PF is not in reset recovery pending state,\ 2810 * for ex: during rmmod. 2811 */ 2812 if (!ice_is_reset_in_progress(pf->state)) 2813 ice_vsi_clear(vsi); 2814 2815 return 0; 2816 } 2817 2818 /** 2819 * ice_vsi_rebuild - Rebuild VSI after reset 2820 * @vsi: VSI to be rebuild 2821 * 2822 * Returns 0 on success and negative value on failure 2823 */ 2824 int ice_vsi_rebuild(struct ice_vsi *vsi) 2825 { 2826 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2827 struct ice_vf *vf = NULL; 2828 struct ice_pf *pf; 2829 int ret, i; 2830 2831 if (!vsi) 2832 return -EINVAL; 2833 2834 pf = vsi->back; 2835 if (vsi->type == ICE_VSI_VF) 2836 vf = &pf->vf[vsi->vf_id]; 2837 2838 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); 2839 ice_vsi_free_q_vectors(vsi); 2840 2841 if (vsi->type != ICE_VSI_VF) { 2842 /* reclaim SW interrupts back to the common pool */ 2843 ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); 2844 pf->num_avail_sw_msix += vsi->num_q_vectors; 2845 vsi->sw_base_vector = 0; 2846 /* reclaim HW interrupts back to the common pool */ 2847 ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector, 2848 vsi->idx); 2849 pf->num_avail_hw_msix += vsi->num_q_vectors; 2850 } else { 2851 /* Reclaim VF resources back to the common pool for reset and 2852 * and rebuild, with vector reassignment 2853 */ 2854 ice_free_res(pf->hw_irq_tracker, vf->first_vector_idx, 2855 vsi->idx); 2856 pf->num_avail_hw_msix += pf->num_vf_msix; 2857 } 2858 vsi->hw_base_vector = 0; 2859 2860 ice_vsi_clear_rings(vsi); 2861 ice_vsi_free_arrays(vsi); 2862 ice_dev_onetime_setup(&pf->hw); 2863 if (vsi->type == ICE_VSI_VF) 2864 ice_vsi_set_num_qs(vsi, vf->vf_id); 2865 else 2866 ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); 2867 ice_vsi_set_tc_cfg(vsi); 2868 2869 /* Initialize VSI struct elements and create VSI in FW */ 2870 ret = ice_vsi_init(vsi); 2871 if (ret < 0) 2872 goto err_vsi; 2873 2874 ret = ice_vsi_alloc_arrays(vsi); 2875 if (ret < 0) 2876 goto err_vsi; 2877 2878 switch (vsi->type) { 2879 case ICE_VSI_PF: 2880 ret = ice_vsi_alloc_q_vectors(vsi); 2881 if (ret) 2882 goto err_rings; 2883 2884 ret = ice_vsi_setup_vector_base(vsi); 2885 if (ret) 2886 goto err_vectors; 2887 2888 ret = ice_vsi_set_q_vectors_reg_idx(vsi); 2889 if (ret) 2890 goto err_vectors; 2891 2892 ret = ice_vsi_alloc_rings(vsi); 2893 if (ret) 2894 goto err_vectors; 2895 2896 ice_vsi_map_rings_to_vectors(vsi); 2897 /* Do not exit if configuring RSS had an issue, at least 2898 * receive traffic on first queue. Hence no need to capture 2899 * return value 2900 */ 2901 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 2902 ice_vsi_cfg_rss_lut_key(vsi); 2903 break; 2904 case ICE_VSI_VF: 2905 ret = ice_vsi_alloc_q_vectors(vsi); 2906 if (ret) 2907 goto err_rings; 2908 2909 ret = ice_vsi_setup_vector_base(vsi); 2910 if (ret) 2911 goto err_vectors; 2912 2913 ret = ice_vsi_set_q_vectors_reg_idx(vsi); 2914 if (ret) 2915 goto err_vectors; 2916 2917 ret = ice_vsi_alloc_rings(vsi); 2918 if (ret) 2919 goto err_vectors; 2920 2921 pf->q_left_tx -= vsi->alloc_txq; 2922 pf->q_left_rx -= vsi->alloc_rxq; 2923 break; 2924 default: 2925 break; 2926 } 2927 2928 /* configure VSI nodes based on number of queues and TC's */ 2929 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2930 max_txqs[i] = pf->num_lan_tx; 2931 2932 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2933 max_txqs); 2934 if (ret) { 2935 dev_err(&pf->pdev->dev, 2936 "VSI %d failed lan queue config, error %d\n", 2937 vsi->vsi_num, ret); 2938 goto err_vectors; 2939 } 2940 return 0; 2941 2942 err_vectors: 2943 ice_vsi_free_q_vectors(vsi); 2944 err_rings: 2945 if (vsi->netdev) { 2946 vsi->current_netdev_flags = 0; 2947 unregister_netdev(vsi->netdev); 2948 free_netdev(vsi->netdev); 2949 vsi->netdev = NULL; 2950 } 2951 err_vsi: 2952 ice_vsi_clear(vsi); 2953 set_bit(__ICE_RESET_FAILED, pf->state); 2954 return ret; 2955 } 2956 2957 /** 2958 * ice_is_reset_in_progress - check for a reset in progress 2959 * @state: pf state field 2960 */ 2961 bool ice_is_reset_in_progress(unsigned long *state) 2962 { 2963 return test_bit(__ICE_RESET_OICR_RECV, state) || 2964 test_bit(__ICE_PFR_REQ, state) || 2965 test_bit(__ICE_CORER_REQ, state) || 2966 test_bit(__ICE_GLOBR_REQ, state); 2967 } 2968 2969 #ifdef CONFIG_DCB 2970 /** 2971 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map 2972 * @vsi: VSI being configured 2973 * @ctx: the context buffer returned from AQ VSI update command 2974 */ 2975 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) 2976 { 2977 vsi->info.mapping_flags = ctx->info.mapping_flags; 2978 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping, 2979 sizeof(vsi->info.q_mapping)); 2980 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping, 2981 sizeof(vsi->info.tc_mapping)); 2982 } 2983 2984 /** 2985 * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration 2986 * @vsi: the VSI being configured 2987 * @ena_tc: TC map to be enabled 2988 */ 2989 static void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) 2990 { 2991 struct net_device *netdev = vsi->netdev; 2992 struct ice_pf *pf = vsi->back; 2993 struct ice_dcbx_cfg *dcbcfg; 2994 u8 netdev_tc; 2995 int i; 2996 2997 if (!netdev) 2998 return; 2999 3000 if (!ena_tc) { 3001 netdev_reset_tc(netdev); 3002 return; 3003 } 3004 3005 if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc)) 3006 return; 3007 3008 dcbcfg = &pf->hw.port_info->local_dcbx_cfg; 3009 3010 ice_for_each_traffic_class(i) 3011 if (vsi->tc_cfg.ena_tc & BIT(i)) 3012 netdev_set_tc_queue(netdev, 3013 vsi->tc_cfg.tc_info[i].netdev_tc, 3014 vsi->tc_cfg.tc_info[i].qcount_tx, 3015 vsi->tc_cfg.tc_info[i].qoffset); 3016 3017 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { 3018 u8 ets_tc = dcbcfg->etscfg.prio_table[i]; 3019 3020 /* Get the mapped netdev TC# for the UP */ 3021 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; 3022 netdev_set_prio_tc_map(netdev, i, netdev_tc); 3023 } 3024 } 3025 3026 /** 3027 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map 3028 * @vsi: VSI to be configured 3029 * @ena_tc: TC bitmap 3030 * 3031 * VSI queues expected to be quiesced before calling this function 3032 */ 3033 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) 3034 { 3035 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 3036 struct ice_vsi_ctx *ctx; 3037 struct ice_pf *pf = vsi->back; 3038 enum ice_status status; 3039 int i, ret = 0; 3040 u8 num_tc = 0; 3041 3042 ice_for_each_traffic_class(i) { 3043 /* build bitmap of enabled TCs */ 3044 if (ena_tc & BIT(i)) 3045 num_tc++; 3046 /* populate max_txqs per TC */ 3047 max_txqs[i] = pf->num_lan_tx; 3048 } 3049 3050 vsi->tc_cfg.ena_tc = ena_tc; 3051 vsi->tc_cfg.numtc = num_tc; 3052 3053 ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL); 3054 if (!ctx) 3055 return -ENOMEM; 3056 3057 ctx->vf_num = 0; 3058 ctx->info = vsi->info; 3059 3060 ice_vsi_setup_q_map(vsi, ctx); 3061 3062 /* must to indicate which section of VSI context are being modified */ 3063 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); 3064 status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); 3065 if (status) { 3066 dev_info(&pf->pdev->dev, "Failed VSI Update\n"); 3067 ret = -EIO; 3068 goto out; 3069 } 3070 3071 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 3072 max_txqs); 3073 3074 if (status) { 3075 dev_err(&pf->pdev->dev, 3076 "VSI %d failed TC config, error %d\n", 3077 vsi->vsi_num, status); 3078 ret = -EIO; 3079 goto out; 3080 } 3081 ice_vsi_update_q_map(vsi, ctx); 3082 vsi->info.valid_sections = 0; 3083 3084 ice_vsi_cfg_netdev_tc(vsi, ena_tc); 3085 out: 3086 devm_kfree(&pf->pdev->dev, ctx); 3087 return ret; 3088 } 3089 #endif /* CONFIG_DCB */ 3090