1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2020, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*$FreeBSD$*/ 32 33 #include "ice_sched.h" 34 35 /** 36 * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB 37 * @pi: port information structure 38 * @info: Scheduler element information from firmware 39 * 40 * This function inserts the root node of the scheduling tree topology 41 * to the SW DB. 42 */ 43 static enum ice_status 44 ice_sched_add_root_node(struct ice_port_info *pi, 45 struct ice_aqc_txsched_elem_data *info) 46 { 47 struct ice_sched_node *root; 48 struct ice_hw *hw; 49 50 if (!pi) 51 return ICE_ERR_PARAM; 52 53 hw = pi->hw; 54 55 root = (struct ice_sched_node *)ice_malloc(hw, sizeof(*root)); 56 if (!root) 57 return ICE_ERR_NO_MEMORY; 58 59 /* coverity[suspicious_sizeof] */ 60 root->children = (struct ice_sched_node **) 61 ice_calloc(hw, hw->max_children[0], sizeof(*root)); 62 if (!root->children) { 63 ice_free(hw, root); 64 return ICE_ERR_NO_MEMORY; 65 } 66 67 ice_memcpy(&root->info, info, sizeof(*info), ICE_DMA_TO_NONDMA); 68 pi->root = root; 69 return ICE_SUCCESS; 70 } 71 72 /** 73 * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB 74 * @start_node: pointer to the starting ice_sched_node struct in a sub-tree 75 * @teid: node TEID to search 76 * 77 * This function searches for a node matching the TEID in the scheduling tree 78 * from the SW DB. The search is recursive and is restricted by the number of 79 * layers it has searched through; stopping at the max supported layer. 80 * 81 * This function needs to be called when holding the port_info->sched_lock 82 */ 83 struct ice_sched_node * 84 ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) 85 { 86 u16 i; 87 88 /* The TEID is same as that of the start_node */ 89 if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid) 90 return start_node; 91 92 /* The node has no children or is at the max layer */ 93 if (!start_node->num_children || 94 start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM || 95 start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) 96 return NULL; 97 98 /* Check if TEID matches to any of the children nodes */ 99 for (i = 0; i < start_node->num_children; i++) 100 if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid) 101 return start_node->children[i]; 102 103 /* Search within each child's sub-tree */ 104 for (i = 0; i < start_node->num_children; i++) { 105 struct ice_sched_node *tmp; 106 107 tmp = ice_sched_find_node_by_teid(start_node->children[i], 108 teid); 109 if (tmp) 110 return tmp; 111 } 112 113 return NULL; 114 } 115 116 /** 117 * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd 118 * @hw: pointer to the HW struct 119 * @cmd_opc: cmd opcode 120 * @elems_req: number of elements to request 121 * @buf: pointer to buffer 122 * @buf_size: buffer size in bytes 123 * @elems_resp: returns total number of elements response 124 * @cd: pointer to command details structure or NULL 125 * 126 * This function sends a scheduling elements cmd (cmd_opc) 127 */ 128 static enum ice_status 129 ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, 130 u16 elems_req, void *buf, u16 buf_size, 131 u16 *elems_resp, struct ice_sq_cd *cd) 132 { 133 struct ice_aqc_sched_elem_cmd *cmd; 134 struct ice_aq_desc desc; 135 enum ice_status status; 136 137 cmd = &desc.params.sched_elem_cmd; 138 ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc); 139 cmd->num_elem_req = CPU_TO_LE16(elems_req); 140 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 141 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 142 if (!status && elems_resp) 143 *elems_resp = LE16_TO_CPU(cmd->num_elem_resp); 144 145 return status; 146 } 147 148 /** 149 * ice_aq_query_sched_elems - query scheduler elements 150 * @hw: pointer to the HW struct 151 * @elems_req: number of elements to query 152 * @buf: pointer to buffer 153 * @buf_size: buffer size in bytes 154 * @elems_ret: returns total number of elements returned 155 * @cd: pointer to command details structure or NULL 156 * 157 * Query scheduling elements (0x0404) 158 */ 159 enum ice_status 160 ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, 161 struct ice_aqc_txsched_elem_data *buf, u16 buf_size, 162 u16 *elems_ret, struct ice_sq_cd *cd) 163 { 164 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems, 165 elems_req, (void *)buf, buf_size, 166 elems_ret, cd); 167 } 168 169 /** 170 * ice_sched_add_node - Insert the Tx scheduler node in SW DB 171 * @pi: port information structure 172 * @layer: Scheduler layer of the node 173 * @info: Scheduler element information from firmware 174 * 175 * This function inserts a scheduler node to the SW DB. 176 */ 177 enum ice_status 178 ice_sched_add_node(struct ice_port_info *pi, u8 layer, 179 struct ice_aqc_txsched_elem_data *info) 180 { 181 struct ice_aqc_txsched_elem_data elem; 182 struct ice_sched_node *parent; 183 struct ice_sched_node *node; 184 enum ice_status status; 185 struct ice_hw *hw; 186 187 if (!pi) 188 return ICE_ERR_PARAM; 189 190 hw = pi->hw; 191 192 /* A valid parent node should be there */ 193 parent = ice_sched_find_node_by_teid(pi->root, 194 LE32_TO_CPU(info->parent_teid)); 195 if (!parent) { 196 ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n", 197 LE32_TO_CPU(info->parent_teid)); 198 return ICE_ERR_PARAM; 199 } 200 201 /* query the current node information from FW before adding it 202 * to the SW DB 203 */ 204 status = ice_sched_query_elem(hw, LE32_TO_CPU(info->node_teid), &elem); 205 if (status) 206 return status; 207 node = (struct ice_sched_node *)ice_malloc(hw, sizeof(*node)); 208 if (!node) 209 return ICE_ERR_NO_MEMORY; 210 if (hw->max_children[layer]) { 211 /* coverity[suspicious_sizeof] */ 212 node->children = (struct ice_sched_node **) 213 ice_calloc(hw, hw->max_children[layer], sizeof(*node)); 214 if (!node->children) { 215 ice_free(hw, node); 216 return ICE_ERR_NO_MEMORY; 217 } 218 } 219 220 node->in_use = true; 221 node->parent = parent; 222 node->tx_sched_layer = layer; 223 parent->children[parent->num_children++] = node; 224 node->info = elem; 225 return ICE_SUCCESS; 226 } 227 228 /** 229 * ice_aq_delete_sched_elems - delete scheduler elements 230 * @hw: pointer to the HW struct 231 * @grps_req: number of groups to delete 232 * @buf: pointer to buffer 233 * @buf_size: buffer size in bytes 234 * @grps_del: returns total number of elements deleted 235 * @cd: pointer to command details structure or NULL 236 * 237 * Delete scheduling elements (0x040F) 238 */ 239 static enum ice_status 240 ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, 241 struct ice_aqc_delete_elem *buf, u16 buf_size, 242 u16 *grps_del, struct ice_sq_cd *cd) 243 { 244 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems, 245 grps_req, (void *)buf, buf_size, 246 grps_del, cd); 247 } 248 249 /** 250 * ice_sched_remove_elems - remove nodes from HW 251 * @hw: pointer to the HW struct 252 * @parent: pointer to the parent node 253 * @num_nodes: number of nodes 254 * @node_teids: array of node teids to be deleted 255 * 256 * This function remove nodes from HW 257 */ 258 static enum ice_status 259 ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, 260 u16 num_nodes, u32 *node_teids) 261 { 262 struct ice_aqc_delete_elem *buf; 263 u16 i, num_groups_removed = 0; 264 enum ice_status status; 265 u16 buf_size; 266 267 buf_size = ice_struct_size(buf, teid, num_nodes); 268 buf = (struct ice_aqc_delete_elem *)ice_malloc(hw, buf_size); 269 if (!buf) 270 return ICE_ERR_NO_MEMORY; 271 272 buf->hdr.parent_teid = parent->info.node_teid; 273 buf->hdr.num_elems = CPU_TO_LE16(num_nodes); 274 for (i = 0; i < num_nodes; i++) 275 buf->teid[i] = CPU_TO_LE32(node_teids[i]); 276 277 status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size, 278 &num_groups_removed, NULL); 279 if (status != ICE_SUCCESS || num_groups_removed != 1) 280 ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n", 281 hw->adminq.sq_last_status); 282 283 ice_free(hw, buf); 284 return status; 285 } 286 287 /** 288 * ice_sched_get_first_node - get the first node of the given layer 289 * @pi: port information structure 290 * @parent: pointer the base node of the subtree 291 * @layer: layer number 292 * 293 * This function retrieves the first node of the given layer from the subtree 294 */ 295 static struct ice_sched_node * 296 ice_sched_get_first_node(struct ice_port_info *pi, 297 struct ice_sched_node *parent, u8 layer) 298 { 299 return pi->sib_head[parent->tc_num][layer]; 300 } 301 302 /** 303 * ice_sched_get_tc_node - get pointer to TC node 304 * @pi: port information structure 305 * @tc: TC number 306 * 307 * This function returns the TC node pointer 308 */ 309 struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc) 310 { 311 u8 i; 312 313 if (!pi || !pi->root) 314 return NULL; 315 for (i = 0; i < pi->root->num_children; i++) 316 if (pi->root->children[i]->tc_num == tc) 317 return pi->root->children[i]; 318 return NULL; 319 } 320 321 /** 322 * ice_free_sched_node - Free a Tx scheduler node from SW DB 323 * @pi: port information structure 324 * @node: pointer to the ice_sched_node struct 325 * 326 * This function frees up a node from SW DB as well as from HW 327 * 328 * This function needs to be called with the port_info->sched_lock held 329 */ 330 void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) 331 { 332 struct ice_sched_node *parent; 333 struct ice_hw *hw = pi->hw; 334 u8 i, j; 335 336 /* Free the children before freeing up the parent node 337 * The parent array is updated below and that shifts the nodes 338 * in the array. So always pick the first child if num children > 0 339 */ 340 while (node->num_children) 341 ice_free_sched_node(pi, node->children[0]); 342 343 /* Leaf, TC and root nodes can't be deleted by SW */ 344 if (node->tx_sched_layer >= hw->sw_entry_point_layer && 345 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && 346 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT && 347 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) { 348 u32 teid = LE32_TO_CPU(node->info.node_teid); 349 350 ice_sched_remove_elems(hw, node->parent, 1, &teid); 351 } 352 parent = node->parent; 353 /* root has no parent */ 354 if (parent) { 355 struct ice_sched_node *p; 356 357 /* update the parent */ 358 for (i = 0; i < parent->num_children; i++) 359 if (parent->children[i] == node) { 360 for (j = i + 1; j < parent->num_children; j++) 361 parent->children[j - 1] = 362 parent->children[j]; 363 parent->num_children--; 364 break; 365 } 366 367 p = ice_sched_get_first_node(pi, node, node->tx_sched_layer); 368 while (p) { 369 if (p->sibling == node) { 370 p->sibling = node->sibling; 371 break; 372 } 373 p = p->sibling; 374 } 375 376 /* update the sibling head if head is getting removed */ 377 if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node) 378 pi->sib_head[node->tc_num][node->tx_sched_layer] = 379 node->sibling; 380 } 381 382 /* leaf nodes have no children */ 383 if (node->children) 384 ice_free(hw, node->children); 385 ice_free(hw, node); 386 } 387 388 /** 389 * ice_aq_get_dflt_topo - gets default scheduler topology 390 * @hw: pointer to the HW struct 391 * @lport: logical port number 392 * @buf: pointer to buffer 393 * @buf_size: buffer size in bytes 394 * @num_branches: returns total number of queue to port branches 395 * @cd: pointer to command details structure or NULL 396 * 397 * Get default scheduler topology (0x400) 398 */ 399 static enum ice_status 400 ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, 401 struct ice_aqc_get_topo_elem *buf, u16 buf_size, 402 u8 *num_branches, struct ice_sq_cd *cd) 403 { 404 struct ice_aqc_get_topo *cmd; 405 struct ice_aq_desc desc; 406 enum ice_status status; 407 408 cmd = &desc.params.get_topo; 409 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo); 410 cmd->port_num = lport; 411 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 412 if (!status && num_branches) 413 *num_branches = cmd->num_branches; 414 415 return status; 416 } 417 418 /** 419 * ice_aq_add_sched_elems - adds scheduling element 420 * @hw: pointer to the HW struct 421 * @grps_req: the number of groups that are requested to be added 422 * @buf: pointer to buffer 423 * @buf_size: buffer size in bytes 424 * @grps_added: returns total number of groups added 425 * @cd: pointer to command details structure or NULL 426 * 427 * Add scheduling elements (0x0401) 428 */ 429 static enum ice_status 430 ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, 431 struct ice_aqc_add_elem *buf, u16 buf_size, 432 u16 *grps_added, struct ice_sq_cd *cd) 433 { 434 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems, 435 grps_req, (void *)buf, buf_size, 436 grps_added, cd); 437 } 438 439 /** 440 * ice_aq_cfg_sched_elems - configures scheduler elements 441 * @hw: pointer to the HW struct 442 * @elems_req: number of elements to configure 443 * @buf: pointer to buffer 444 * @buf_size: buffer size in bytes 445 * @elems_cfgd: returns total number of elements configured 446 * @cd: pointer to command details structure or NULL 447 * 448 * Configure scheduling elements (0x0403) 449 */ 450 static enum ice_status 451 ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, 452 struct ice_aqc_txsched_elem_data *buf, u16 buf_size, 453 u16 *elems_cfgd, struct ice_sq_cd *cd) 454 { 455 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems, 456 elems_req, (void *)buf, buf_size, 457 elems_cfgd, cd); 458 } 459 460 /** 461 * ice_aq_move_sched_elems - move scheduler elements 462 * @hw: pointer to the HW struct 463 * @grps_req: number of groups to move 464 * @buf: pointer to buffer 465 * @buf_size: buffer size in bytes 466 * @grps_movd: returns total number of groups moved 467 * @cd: pointer to command details structure or NULL 468 * 469 * Move scheduling elements (0x0408) 470 */ 471 static enum ice_status 472 ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, 473 struct ice_aqc_move_elem *buf, u16 buf_size, 474 u16 *grps_movd, struct ice_sq_cd *cd) 475 { 476 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems, 477 grps_req, (void *)buf, buf_size, 478 grps_movd, cd); 479 } 480 481 /** 482 * ice_aq_suspend_sched_elems - suspend scheduler elements 483 * @hw: pointer to the HW struct 484 * @elems_req: number of elements to suspend 485 * @buf: pointer to buffer 486 * @buf_size: buffer size in bytes 487 * @elems_ret: returns total number of elements suspended 488 * @cd: pointer to command details structure or NULL 489 * 490 * Suspend scheduling elements (0x0409) 491 */ 492 static enum ice_status 493 ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, 494 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) 495 { 496 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems, 497 elems_req, (void *)buf, buf_size, 498 elems_ret, cd); 499 } 500 501 /** 502 * ice_aq_resume_sched_elems - resume scheduler elements 503 * @hw: pointer to the HW struct 504 * @elems_req: number of elements to resume 505 * @buf: pointer to buffer 506 * @buf_size: buffer size in bytes 507 * @elems_ret: returns total number of elements resumed 508 * @cd: pointer to command details structure or NULL 509 * 510 * resume scheduling elements (0x040A) 511 */ 512 static enum ice_status 513 ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, 514 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) 515 { 516 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems, 517 elems_req, (void *)buf, buf_size, 518 elems_ret, cd); 519 } 520 521 /** 522 * ice_aq_query_sched_res - query scheduler resource 523 * @hw: pointer to the HW struct 524 * @buf_size: buffer size in bytes 525 * @buf: pointer to buffer 526 * @cd: pointer to command details structure or NULL 527 * 528 * Query scheduler resource allocation (0x0412) 529 */ 530 static enum ice_status 531 ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, 532 struct ice_aqc_query_txsched_res_resp *buf, 533 struct ice_sq_cd *cd) 534 { 535 struct ice_aq_desc desc; 536 537 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res); 538 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 539 } 540 541 /** 542 * ice_sched_suspend_resume_elems - suspend or resume HW nodes 543 * @hw: pointer to the HW struct 544 * @num_nodes: number of nodes 545 * @node_teids: array of node teids to be suspended or resumed 546 * @suspend: true means suspend / false means resume 547 * 548 * This function suspends or resumes HW nodes 549 */ 550 static enum ice_status 551 ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, 552 bool suspend) 553 { 554 u16 i, buf_size, num_elem_ret = 0; 555 enum ice_status status; 556 __le32 *buf; 557 558 buf_size = sizeof(*buf) * num_nodes; 559 buf = (__le32 *)ice_malloc(hw, buf_size); 560 if (!buf) 561 return ICE_ERR_NO_MEMORY; 562 563 for (i = 0; i < num_nodes; i++) 564 buf[i] = CPU_TO_LE32(node_teids[i]); 565 566 if (suspend) 567 status = ice_aq_suspend_sched_elems(hw, num_nodes, buf, 568 buf_size, &num_elem_ret, 569 NULL); 570 else 571 status = ice_aq_resume_sched_elems(hw, num_nodes, buf, 572 buf_size, &num_elem_ret, 573 NULL); 574 if (status != ICE_SUCCESS || num_elem_ret != num_nodes) 575 ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n"); 576 577 ice_free(hw, buf); 578 return status; 579 } 580 581 /** 582 * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC 583 * @hw: pointer to the HW struct 584 * @vsi_handle: VSI handle 585 * @tc: TC number 586 * @new_numqs: number of queues 587 */ 588 static enum ice_status 589 ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) 590 { 591 struct ice_vsi_ctx *vsi_ctx; 592 struct ice_q_ctx *q_ctx; 593 594 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 595 if (!vsi_ctx) 596 return ICE_ERR_PARAM; 597 /* allocate LAN queue contexts */ 598 if (!vsi_ctx->lan_q_ctx[tc]) { 599 vsi_ctx->lan_q_ctx[tc] = (struct ice_q_ctx *) 600 ice_calloc(hw, new_numqs, sizeof(*q_ctx)); 601 if (!vsi_ctx->lan_q_ctx[tc]) 602 return ICE_ERR_NO_MEMORY; 603 vsi_ctx->num_lan_q_entries[tc] = new_numqs; 604 return ICE_SUCCESS; 605 } 606 /* num queues are increased, update the queue contexts */ 607 if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) { 608 u16 prev_num = vsi_ctx->num_lan_q_entries[tc]; 609 610 q_ctx = (struct ice_q_ctx *) 611 ice_calloc(hw, new_numqs, sizeof(*q_ctx)); 612 if (!q_ctx) 613 return ICE_ERR_NO_MEMORY; 614 ice_memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], 615 prev_num * sizeof(*q_ctx), ICE_DMA_TO_NONDMA); 616 ice_free(hw, vsi_ctx->lan_q_ctx[tc]); 617 vsi_ctx->lan_q_ctx[tc] = q_ctx; 618 vsi_ctx->num_lan_q_entries[tc] = new_numqs; 619 } 620 return ICE_SUCCESS; 621 } 622 623 /** 624 * ice_aq_rl_profile - performs a rate limiting task 625 * @hw: pointer to the HW struct 626 * @opcode: opcode for add, query, or remove profile(s) 627 * @num_profiles: the number of profiles 628 * @buf: pointer to buffer 629 * @buf_size: buffer size in bytes 630 * @num_processed: number of processed add or remove profile(s) to return 631 * @cd: pointer to command details structure 632 * 633 * RL profile function to add, query, or remove profile(s) 634 */ 635 static enum ice_status 636 ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, 637 u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, 638 u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd) 639 { 640 struct ice_aqc_rl_profile *cmd; 641 struct ice_aq_desc desc; 642 enum ice_status status; 643 644 cmd = &desc.params.rl_profile; 645 646 ice_fill_dflt_direct_cmd_desc(&desc, opcode); 647 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 648 cmd->num_profiles = CPU_TO_LE16(num_profiles); 649 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 650 if (!status && num_processed) 651 *num_processed = LE16_TO_CPU(cmd->num_processed); 652 return status; 653 } 654 655 /** 656 * ice_aq_add_rl_profile - adds rate limiting profile(s) 657 * @hw: pointer to the HW struct 658 * @num_profiles: the number of profile(s) to be add 659 * @buf: pointer to buffer 660 * @buf_size: buffer size in bytes 661 * @num_profiles_added: total number of profiles added to return 662 * @cd: pointer to command details structure 663 * 664 * Add RL profile (0x0410) 665 */ 666 static enum ice_status 667 ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, 668 struct ice_aqc_rl_profile_elem *buf, u16 buf_size, 669 u16 *num_profiles_added, struct ice_sq_cd *cd) 670 { 671 return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles, 672 buf, buf_size, num_profiles_added, cd); 673 } 674 675 /** 676 * ice_aq_query_rl_profile - query rate limiting profile(s) 677 * @hw: pointer to the HW struct 678 * @num_profiles: the number of profile(s) to query 679 * @buf: pointer to buffer 680 * @buf_size: buffer size in bytes 681 * @cd: pointer to command details structure 682 * 683 * Query RL profile (0x0411) 684 */ 685 enum ice_status 686 ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles, 687 struct ice_aqc_rl_profile_elem *buf, u16 buf_size, 688 struct ice_sq_cd *cd) 689 { 690 return ice_aq_rl_profile(hw, ice_aqc_opc_query_rl_profiles, 691 num_profiles, buf, buf_size, NULL, cd); 692 } 693 694 /** 695 * ice_aq_remove_rl_profile - removes RL profile(s) 696 * @hw: pointer to the HW struct 697 * @num_profiles: the number of profile(s) to remove 698 * @buf: pointer to buffer 699 * @buf_size: buffer size in bytes 700 * @num_profiles_removed: total number of profiles removed to return 701 * @cd: pointer to command details structure or NULL 702 * 703 * Remove RL profile (0x0415) 704 */ 705 static enum ice_status 706 ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, 707 struct ice_aqc_rl_profile_elem *buf, u16 buf_size, 708 u16 *num_profiles_removed, struct ice_sq_cd *cd) 709 { 710 return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles, 711 num_profiles, buf, buf_size, 712 num_profiles_removed, cd); 713 } 714 715 /** 716 * ice_sched_del_rl_profile - remove RL profile 717 * @hw: pointer to the HW struct 718 * @rl_info: rate limit profile information 719 * 720 * If the profile ID is not referenced anymore, it removes profile ID with 721 * its associated parameters from HW DB,and locally. The caller needs to 722 * hold scheduler lock. 723 */ 724 static enum ice_status 725 ice_sched_del_rl_profile(struct ice_hw *hw, 726 struct ice_aqc_rl_profile_info *rl_info) 727 { 728 struct ice_aqc_rl_profile_elem *buf; 729 u16 num_profiles_removed; 730 enum ice_status status; 731 u16 num_profiles = 1; 732 733 if (rl_info->prof_id_ref != 0) 734 return ICE_ERR_IN_USE; 735 736 /* Safe to remove profile ID */ 737 buf = &rl_info->profile; 738 status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf), 739 &num_profiles_removed, NULL); 740 if (status || num_profiles_removed != num_profiles) 741 return ICE_ERR_CFG; 742 743 /* Delete stale entry now */ 744 LIST_DEL(&rl_info->list_entry); 745 ice_free(hw, rl_info); 746 return status; 747 } 748 749 /** 750 * ice_sched_clear_rl_prof - clears RL prof entries 751 * @pi: port information structure 752 * 753 * This function removes all RL profile from HW as well as from SW DB. 754 */ 755 static void ice_sched_clear_rl_prof(struct ice_port_info *pi) 756 { 757 u16 ln; 758 759 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) { 760 struct ice_aqc_rl_profile_info *rl_prof_elem; 761 struct ice_aqc_rl_profile_info *rl_prof_tmp; 762 763 LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp, 764 &pi->rl_prof_list[ln], 765 ice_aqc_rl_profile_info, list_entry) { 766 struct ice_hw *hw = pi->hw; 767 enum ice_status status; 768 769 rl_prof_elem->prof_id_ref = 0; 770 status = ice_sched_del_rl_profile(hw, rl_prof_elem); 771 if (status) { 772 ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); 773 /* On error, free mem required */ 774 LIST_DEL(&rl_prof_elem->list_entry); 775 ice_free(hw, rl_prof_elem); 776 } 777 } 778 } 779 } 780 781 /** 782 * ice_sched_clear_agg - clears the aggregator related information 783 * @hw: pointer to the hardware structure 784 * 785 * This function removes aggregator list and free up aggregator related memory 786 * previously allocated. 787 */ 788 void ice_sched_clear_agg(struct ice_hw *hw) 789 { 790 struct ice_sched_agg_info *agg_info; 791 struct ice_sched_agg_info *atmp; 792 793 LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &hw->agg_list, 794 ice_sched_agg_info, 795 list_entry) { 796 struct ice_sched_agg_vsi_info *agg_vsi_info; 797 struct ice_sched_agg_vsi_info *vtmp; 798 799 LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp, 800 &agg_info->agg_vsi_list, 801 ice_sched_agg_vsi_info, list_entry) { 802 LIST_DEL(&agg_vsi_info->list_entry); 803 ice_free(hw, agg_vsi_info); 804 } 805 LIST_DEL(&agg_info->list_entry); 806 ice_free(hw, agg_info); 807 } 808 } 809 810 /** 811 * ice_sched_clear_tx_topo - clears the scheduler tree nodes 812 * @pi: port information structure 813 * 814 * This function removes all the nodes from HW as well as from SW DB. 815 */ 816 static void ice_sched_clear_tx_topo(struct ice_port_info *pi) 817 { 818 if (!pi) 819 return; 820 /* remove RL profiles related lists */ 821 ice_sched_clear_rl_prof(pi); 822 if (pi->root) { 823 ice_free_sched_node(pi, pi->root); 824 pi->root = NULL; 825 } 826 } 827 828 /** 829 * ice_sched_clear_port - clear the scheduler elements from SW DB for a port 830 * @pi: port information structure 831 * 832 * Cleanup scheduling elements from SW DB 833 */ 834 void ice_sched_clear_port(struct ice_port_info *pi) 835 { 836 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 837 return; 838 839 pi->port_state = ICE_SCHED_PORT_STATE_INIT; 840 ice_acquire_lock(&pi->sched_lock); 841 ice_sched_clear_tx_topo(pi); 842 ice_release_lock(&pi->sched_lock); 843 ice_destroy_lock(&pi->sched_lock); 844 } 845 846 /** 847 * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports 848 * @hw: pointer to the HW struct 849 * 850 * Cleanup scheduling elements from SW DB for all the ports 851 */ 852 void ice_sched_cleanup_all(struct ice_hw *hw) 853 { 854 if (!hw) 855 return; 856 857 if (hw->layer_info) { 858 ice_free(hw, hw->layer_info); 859 hw->layer_info = NULL; 860 } 861 862 ice_sched_clear_port(hw->port_info); 863 864 hw->num_tx_sched_layers = 0; 865 hw->num_tx_sched_phys_layers = 0; 866 hw->flattened_layers = 0; 867 hw->max_cgds = 0; 868 } 869 870 /** 871 * ice_aq_cfg_l2_node_cgd - configures L2 node to CGD mapping 872 * @hw: pointer to the HW struct 873 * @num_l2_nodes: the number of L2 nodes whose CGDs to configure 874 * @buf: pointer to buffer 875 * @buf_size: buffer size in bytes 876 * @cd: pointer to command details structure or NULL 877 * 878 * Configure L2 Node CGD (0x0414) 879 */ 880 enum ice_status 881 ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes, 882 struct ice_aqc_cfg_l2_node_cgd_elem *buf, 883 u16 buf_size, struct ice_sq_cd *cd) 884 { 885 struct ice_aqc_cfg_l2_node_cgd *cmd; 886 struct ice_aq_desc desc; 887 888 cmd = &desc.params.cfg_l2_node_cgd; 889 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_l2_node_cgd); 890 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 891 892 cmd->num_l2_nodes = CPU_TO_LE16(num_l2_nodes); 893 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 894 } 895 896 /** 897 * ice_sched_add_elems - add nodes to HW and SW DB 898 * @pi: port information structure 899 * @tc_node: pointer to the branch node 900 * @parent: pointer to the parent node 901 * @layer: layer number to add nodes 902 * @num_nodes: number of nodes 903 * @num_nodes_added: pointer to num nodes added 904 * @first_node_teid: if new nodes are added then return the TEID of first node 905 * 906 * This function add nodes to HW as well as to SW DB for a given layer 907 */ 908 static enum ice_status 909 ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, 910 struct ice_sched_node *parent, u8 layer, u16 num_nodes, 911 u16 *num_nodes_added, u32 *first_node_teid) 912 { 913 struct ice_sched_node *prev, *new_node; 914 struct ice_aqc_add_elem *buf; 915 u16 i, num_groups_added = 0; 916 enum ice_status status = ICE_SUCCESS; 917 struct ice_hw *hw = pi->hw; 918 u16 buf_size; 919 u32 teid; 920 921 buf_size = ice_struct_size(buf, generic, num_nodes); 922 buf = (struct ice_aqc_add_elem *)ice_malloc(hw, buf_size); 923 if (!buf) 924 return ICE_ERR_NO_MEMORY; 925 926 buf->hdr.parent_teid = parent->info.node_teid; 927 buf->hdr.num_elems = CPU_TO_LE16(num_nodes); 928 for (i = 0; i < num_nodes; i++) { 929 buf->generic[i].parent_teid = parent->info.node_teid; 930 buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC; 931 buf->generic[i].data.valid_sections = 932 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 933 ICE_AQC_ELEM_VALID_EIR; 934 buf->generic[i].data.generic = 0; 935 buf->generic[i].data.cir_bw.bw_profile_idx = 936 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); 937 buf->generic[i].data.cir_bw.bw_alloc = 938 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); 939 buf->generic[i].data.eir_bw.bw_profile_idx = 940 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); 941 buf->generic[i].data.eir_bw.bw_alloc = 942 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); 943 } 944 945 status = ice_aq_add_sched_elems(hw, 1, buf, buf_size, 946 &num_groups_added, NULL); 947 if (status != ICE_SUCCESS || num_groups_added != 1) { 948 ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n", 949 hw->adminq.sq_last_status); 950 ice_free(hw, buf); 951 return ICE_ERR_CFG; 952 } 953 954 *num_nodes_added = num_nodes; 955 /* add nodes to the SW DB */ 956 for (i = 0; i < num_nodes; i++) { 957 status = ice_sched_add_node(pi, layer, &buf->generic[i]); 958 if (status != ICE_SUCCESS) { 959 ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n", 960 status); 961 break; 962 } 963 964 teid = LE32_TO_CPU(buf->generic[i].node_teid); 965 new_node = ice_sched_find_node_by_teid(parent, teid); 966 if (!new_node) { 967 ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid); 968 break; 969 } 970 971 new_node->sibling = NULL; 972 new_node->tc_num = tc_node->tc_num; 973 974 /* add it to previous node sibling pointer */ 975 /* Note: siblings are not linked across branches */ 976 prev = ice_sched_get_first_node(pi, tc_node, layer); 977 if (prev && prev != new_node) { 978 while (prev->sibling) 979 prev = prev->sibling; 980 prev->sibling = new_node; 981 } 982 983 /* initialize the sibling head */ 984 if (!pi->sib_head[tc_node->tc_num][layer]) 985 pi->sib_head[tc_node->tc_num][layer] = new_node; 986 987 if (i == 0) 988 *first_node_teid = teid; 989 } 990 991 ice_free(hw, buf); 992 return status; 993 } 994 995 /** 996 * ice_sched_add_nodes_to_layer - Add nodes to a given layer 997 * @pi: port information structure 998 * @tc_node: pointer to TC node 999 * @parent: pointer to parent node 1000 * @layer: layer number to add nodes 1001 * @num_nodes: number of nodes to be added 1002 * @first_node_teid: pointer to the first node TEID 1003 * @num_nodes_added: pointer to number of nodes added 1004 * 1005 * This function add nodes to a given layer. 1006 */ 1007 static enum ice_status 1008 ice_sched_add_nodes_to_layer(struct ice_port_info *pi, 1009 struct ice_sched_node *tc_node, 1010 struct ice_sched_node *parent, u8 layer, 1011 u16 num_nodes, u32 *first_node_teid, 1012 u16 *num_nodes_added) 1013 { 1014 u32 *first_teid_ptr = first_node_teid; 1015 u16 new_num_nodes, max_child_nodes; 1016 enum ice_status status = ICE_SUCCESS; 1017 struct ice_hw *hw = pi->hw; 1018 u16 num_added = 0; 1019 u32 temp; 1020 1021 *num_nodes_added = 0; 1022 1023 if (!num_nodes) 1024 return status; 1025 1026 if (!parent || layer < hw->sw_entry_point_layer) 1027 return ICE_ERR_PARAM; 1028 1029 /* max children per node per layer */ 1030 max_child_nodes = hw->max_children[parent->tx_sched_layer]; 1031 1032 /* current number of children + required nodes exceed max children ? */ 1033 if ((parent->num_children + num_nodes) > max_child_nodes) { 1034 /* Fail if the parent is a TC node */ 1035 if (parent == tc_node) 1036 return ICE_ERR_CFG; 1037 1038 /* utilize all the spaces if the parent is not full */ 1039 if (parent->num_children < max_child_nodes) { 1040 new_num_nodes = max_child_nodes - parent->num_children; 1041 /* this recursion is intentional, and wouldn't 1042 * go more than 2 calls 1043 */ 1044 status = ice_sched_add_nodes_to_layer(pi, tc_node, 1045 parent, layer, 1046 new_num_nodes, 1047 first_node_teid, 1048 &num_added); 1049 if (status != ICE_SUCCESS) 1050 return status; 1051 1052 *num_nodes_added += num_added; 1053 } 1054 /* Don't modify the first node TEID memory if the first node was 1055 * added already in the above call. Instead send some temp 1056 * memory for all other recursive calls. 1057 */ 1058 if (num_added) 1059 first_teid_ptr = &temp; 1060 1061 new_num_nodes = num_nodes - num_added; 1062 1063 /* This parent is full, try the next sibling */ 1064 parent = parent->sibling; 1065 1066 /* this recursion is intentional, for 1024 queues 1067 * per VSI, it goes max of 16 iterations. 1068 * 1024 / 8 = 128 layer 8 nodes 1069 * 128 /8 = 16 (add 8 nodes per iteration) 1070 */ 1071 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, 1072 layer, new_num_nodes, 1073 first_teid_ptr, 1074 &num_added); 1075 *num_nodes_added += num_added; 1076 return status; 1077 } 1078 1079 status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, 1080 num_nodes_added, first_node_teid); 1081 return status; 1082 } 1083 1084 /** 1085 * ice_sched_get_qgrp_layer - get the current queue group layer number 1086 * @hw: pointer to the HW struct 1087 * 1088 * This function returns the current queue group layer number 1089 */ 1090 static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw) 1091 { 1092 /* It's always total layers - 1, the array is 0 relative so -2 */ 1093 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET; 1094 } 1095 1096 /** 1097 * ice_sched_get_vsi_layer - get the current VSI layer number 1098 * @hw: pointer to the HW struct 1099 * 1100 * This function returns the current VSI layer number 1101 */ 1102 static u8 ice_sched_get_vsi_layer(struct ice_hw *hw) 1103 { 1104 /* Num Layers VSI layer 1105 * 9 6 1106 * 7 4 1107 * 5 or less sw_entry_point_layer 1108 */ 1109 /* calculate the VSI layer based on number of layers. */ 1110 if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) { 1111 u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; 1112 1113 if (layer > hw->sw_entry_point_layer) 1114 return layer; 1115 } 1116 return hw->sw_entry_point_layer; 1117 } 1118 1119 /** 1120 * ice_sched_get_agg_layer - get the current aggregator layer number 1121 * @hw: pointer to the HW struct 1122 * 1123 * This function returns the current aggregator layer number 1124 */ 1125 static u8 ice_sched_get_agg_layer(struct ice_hw *hw) 1126 { 1127 /* Num Layers aggregator layer 1128 * 9 4 1129 * 7 or less sw_entry_point_layer 1130 */ 1131 /* calculate the aggregator layer based on number of layers. */ 1132 if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) { 1133 u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET; 1134 1135 if (layer > hw->sw_entry_point_layer) 1136 return layer; 1137 } 1138 return hw->sw_entry_point_layer; 1139 } 1140 1141 /** 1142 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree 1143 * @pi: port information structure 1144 * 1145 * This function removes the leaf node that was created by the FW 1146 * during initialization 1147 */ 1148 static void ice_rm_dflt_leaf_node(struct ice_port_info *pi) 1149 { 1150 struct ice_sched_node *node; 1151 1152 node = pi->root; 1153 while (node) { 1154 if (!node->num_children) 1155 break; 1156 node = node->children[0]; 1157 } 1158 if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) { 1159 u32 teid = LE32_TO_CPU(node->info.node_teid); 1160 enum ice_status status; 1161 1162 /* remove the default leaf node */ 1163 status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid); 1164 if (!status) 1165 ice_free_sched_node(pi, node); 1166 } 1167 } 1168 1169 /** 1170 * ice_sched_rm_dflt_nodes - free the default nodes in the tree 1171 * @pi: port information structure 1172 * 1173 * This function frees all the nodes except root and TC that were created by 1174 * the FW during initialization 1175 */ 1176 static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi) 1177 { 1178 struct ice_sched_node *node; 1179 1180 ice_rm_dflt_leaf_node(pi); 1181 1182 /* remove the default nodes except TC and root nodes */ 1183 node = pi->root; 1184 while (node) { 1185 if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer && 1186 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && 1187 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) { 1188 ice_free_sched_node(pi, node); 1189 break; 1190 } 1191 1192 if (!node->num_children) 1193 break; 1194 node = node->children[0]; 1195 } 1196 } 1197 1198 /** 1199 * ice_sched_init_port - Initialize scheduler by querying information from FW 1200 * @pi: port info structure for the tree to cleanup 1201 * 1202 * This function is the initial call to find the total number of Tx scheduler 1203 * resources, default topology created by firmware and storing the information 1204 * in SW DB. 1205 */ 1206 enum ice_status ice_sched_init_port(struct ice_port_info *pi) 1207 { 1208 struct ice_aqc_get_topo_elem *buf; 1209 enum ice_status status; 1210 struct ice_hw *hw; 1211 u8 num_branches; 1212 u16 num_elems; 1213 u8 i, j; 1214 1215 if (!pi) 1216 return ICE_ERR_PARAM; 1217 hw = pi->hw; 1218 1219 /* Query the Default Topology from FW */ 1220 buf = (struct ice_aqc_get_topo_elem *)ice_malloc(hw, 1221 ICE_AQ_MAX_BUF_LEN); 1222 if (!buf) 1223 return ICE_ERR_NO_MEMORY; 1224 1225 /* Query default scheduling tree topology */ 1226 status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN, 1227 &num_branches, NULL); 1228 if (status) 1229 goto err_init_port; 1230 1231 /* num_branches should be between 1-8 */ 1232 if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) { 1233 ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n", 1234 num_branches); 1235 status = ICE_ERR_PARAM; 1236 goto err_init_port; 1237 } 1238 1239 /* get the number of elements on the default/first branch */ 1240 num_elems = LE16_TO_CPU(buf[0].hdr.num_elems); 1241 1242 /* num_elems should always be between 1-9 */ 1243 if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) { 1244 ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n", 1245 num_elems); 1246 status = ICE_ERR_PARAM; 1247 goto err_init_port; 1248 } 1249 1250 /* If the last node is a leaf node then the index of the queue group 1251 * layer is two less than the number of elements. 1252 */ 1253 if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type == 1254 ICE_AQC_ELEM_TYPE_LEAF) 1255 pi->last_node_teid = 1256 LE32_TO_CPU(buf[0].generic[num_elems - 2].node_teid); 1257 else 1258 pi->last_node_teid = 1259 LE32_TO_CPU(buf[0].generic[num_elems - 1].node_teid); 1260 1261 /* Insert the Tx Sched root node */ 1262 status = ice_sched_add_root_node(pi, &buf[0].generic[0]); 1263 if (status) 1264 goto err_init_port; 1265 1266 /* Parse the default tree and cache the information */ 1267 for (i = 0; i < num_branches; i++) { 1268 num_elems = LE16_TO_CPU(buf[i].hdr.num_elems); 1269 1270 /* Skip root element as already inserted */ 1271 for (j = 1; j < num_elems; j++) { 1272 /* update the sw entry point */ 1273 if (buf[0].generic[j].data.elem_type == 1274 ICE_AQC_ELEM_TYPE_ENTRY_POINT) 1275 hw->sw_entry_point_layer = j; 1276 1277 status = ice_sched_add_node(pi, j, &buf[i].generic[j]); 1278 if (status) 1279 goto err_init_port; 1280 } 1281 } 1282 1283 /* Remove the default nodes. */ 1284 if (pi->root) 1285 ice_sched_rm_dflt_nodes(pi); 1286 1287 /* initialize the port for handling the scheduler tree */ 1288 pi->port_state = ICE_SCHED_PORT_STATE_READY; 1289 ice_init_lock(&pi->sched_lock); 1290 for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) 1291 INIT_LIST_HEAD(&pi->rl_prof_list[i]); 1292 1293 err_init_port: 1294 if (status && pi->root) { 1295 ice_free_sched_node(pi, pi->root); 1296 pi->root = NULL; 1297 } 1298 1299 ice_free(hw, buf); 1300 return status; 1301 } 1302 1303 /** 1304 * ice_sched_get_node - Get the struct ice_sched_node for given TEID 1305 * @pi: port information structure 1306 * @teid: Scheduler node TEID 1307 * 1308 * This function retrieves the ice_sched_node struct for given TEID from 1309 * the SW DB and returns it to the caller. 1310 */ 1311 struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid) 1312 { 1313 struct ice_sched_node *node; 1314 1315 if (!pi) 1316 return NULL; 1317 1318 /* Find the node starting from root */ 1319 ice_acquire_lock(&pi->sched_lock); 1320 node = ice_sched_find_node_by_teid(pi->root, teid); 1321 ice_release_lock(&pi->sched_lock); 1322 1323 if (!node) 1324 ice_debug(pi->hw, ICE_DBG_SCHED, "Node not found for teid=0x%x\n", teid); 1325 1326 return node; 1327 } 1328 1329 /** 1330 * ice_sched_query_res_alloc - query the FW for num of logical sched layers 1331 * @hw: pointer to the HW struct 1332 * 1333 * query FW for allocated scheduler resources and store in HW struct 1334 */ 1335 enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) 1336 { 1337 struct ice_aqc_query_txsched_res_resp *buf; 1338 enum ice_status status = ICE_SUCCESS; 1339 __le16 max_sibl; 1340 u8 i; 1341 1342 if (hw->layer_info) 1343 return status; 1344 1345 buf = (struct ice_aqc_query_txsched_res_resp *) 1346 ice_malloc(hw, sizeof(*buf)); 1347 if (!buf) 1348 return ICE_ERR_NO_MEMORY; 1349 1350 status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL); 1351 if (status) 1352 goto sched_query_out; 1353 1354 hw->num_tx_sched_layers = LE16_TO_CPU(buf->sched_props.logical_levels); 1355 hw->num_tx_sched_phys_layers = 1356 LE16_TO_CPU(buf->sched_props.phys_levels); 1357 hw->flattened_layers = buf->sched_props.flattening_bitmap; 1358 hw->max_cgds = buf->sched_props.max_pf_cgds; 1359 1360 /* max sibling group size of current layer refers to the max children 1361 * of the below layer node. 1362 * layer 1 node max children will be layer 2 max sibling group size 1363 * layer 2 node max children will be layer 3 max sibling group size 1364 * and so on. This array will be populated from root (index 0) to 1365 * qgroup layer 7. Leaf node has no children. 1366 */ 1367 for (i = 0; i < hw->num_tx_sched_layers - 1; i++) { 1368 max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz; 1369 hw->max_children[i] = LE16_TO_CPU(max_sibl); 1370 } 1371 1372 hw->layer_info = (struct ice_aqc_layer_props *) 1373 ice_memdup(hw, buf->layer_props, 1374 (hw->num_tx_sched_layers * 1375 sizeof(*hw->layer_info)), 1376 ICE_DMA_TO_DMA); 1377 if (!hw->layer_info) { 1378 status = ICE_ERR_NO_MEMORY; 1379 goto sched_query_out; 1380 } 1381 1382 sched_query_out: 1383 ice_free(hw, buf); 1384 return status; 1385 } 1386 1387 /** 1388 * ice_sched_get_psm_clk_freq - determine the PSM clock frequency 1389 * @hw: pointer to the HW struct 1390 * 1391 * Determine the PSM clock frequency and store in HW struct 1392 */ 1393 void ice_sched_get_psm_clk_freq(struct ice_hw *hw) 1394 { 1395 u32 val, clk_src; 1396 1397 val = rd32(hw, GLGEN_CLKSTAT_SRC); 1398 clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >> 1399 GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S; 1400 1401 #define PSM_CLK_SRC_367_MHZ 0x0 1402 #define PSM_CLK_SRC_416_MHZ 0x1 1403 #define PSM_CLK_SRC_446_MHZ 0x2 1404 #define PSM_CLK_SRC_390_MHZ 0x3 1405 1406 switch (clk_src) { 1407 case PSM_CLK_SRC_367_MHZ: 1408 hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ; 1409 break; 1410 case PSM_CLK_SRC_416_MHZ: 1411 hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ; 1412 break; 1413 case PSM_CLK_SRC_446_MHZ: 1414 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ; 1415 break; 1416 case PSM_CLK_SRC_390_MHZ: 1417 hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ; 1418 break; 1419 default: 1420 ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n", 1421 clk_src); 1422 /* fall back to a safe default */ 1423 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ; 1424 } 1425 } 1426 1427 /** 1428 * ice_sched_find_node_in_subtree - Find node in part of base node subtree 1429 * @hw: pointer to the HW struct 1430 * @base: pointer to the base node 1431 * @node: pointer to the node to search 1432 * 1433 * This function checks whether a given node is part of the base node 1434 * subtree or not 1435 */ 1436 bool 1437 ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base, 1438 struct ice_sched_node *node) 1439 { 1440 u8 i; 1441 1442 for (i = 0; i < base->num_children; i++) { 1443 struct ice_sched_node *child = base->children[i]; 1444 1445 if (node == child) 1446 return true; 1447 1448 if (child->tx_sched_layer > node->tx_sched_layer) 1449 return false; 1450 1451 /* this recursion is intentional, and wouldn't 1452 * go more than 8 calls 1453 */ 1454 if (ice_sched_find_node_in_subtree(hw, child, node)) 1455 return true; 1456 } 1457 return false; 1458 } 1459 1460 /** 1461 * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node 1462 * @pi: port information structure 1463 * @vsi_node: software VSI handle 1464 * @qgrp_node: first queue group node identified for scanning 1465 * @owner: LAN or RDMA 1466 * 1467 * This function retrieves a free LAN or RDMA queue group node by scanning 1468 * qgrp_node and its siblings for the queue group with the fewest number 1469 * of queues currently assigned. 1470 */ 1471 static struct ice_sched_node * 1472 ice_sched_get_free_qgrp(struct ice_port_info *pi, 1473 struct ice_sched_node *vsi_node, 1474 struct ice_sched_node *qgrp_node, u8 owner) 1475 { 1476 struct ice_sched_node *min_qgrp; 1477 u8 min_children; 1478 1479 if (!qgrp_node) 1480 return qgrp_node; 1481 min_children = qgrp_node->num_children; 1482 if (!min_children) 1483 return qgrp_node; 1484 min_qgrp = qgrp_node; 1485 /* scan all queue groups until find a node which has less than the 1486 * minimum number of children. This way all queue group nodes get 1487 * equal number of shares and active. The bandwidth will be equally 1488 * distributed across all queues. 1489 */ 1490 while (qgrp_node) { 1491 /* make sure the qgroup node is part of the VSI subtree */ 1492 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) 1493 if (qgrp_node->num_children < min_children && 1494 qgrp_node->owner == owner) { 1495 /* replace the new min queue group node */ 1496 min_qgrp = qgrp_node; 1497 min_children = min_qgrp->num_children; 1498 /* break if it has no children, */ 1499 if (!min_children) 1500 break; 1501 } 1502 qgrp_node = qgrp_node->sibling; 1503 } 1504 return min_qgrp; 1505 } 1506 1507 /** 1508 * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node 1509 * @pi: port information structure 1510 * @vsi_handle: software VSI handle 1511 * @tc: branch number 1512 * @owner: LAN or RDMA 1513 * 1514 * This function retrieves a free LAN or RDMA queue group node 1515 */ 1516 struct ice_sched_node * 1517 ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 1518 u8 owner) 1519 { 1520 struct ice_sched_node *vsi_node, *qgrp_node; 1521 struct ice_vsi_ctx *vsi_ctx; 1522 u16 max_children; 1523 u8 qgrp_layer; 1524 1525 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); 1526 max_children = pi->hw->max_children[qgrp_layer]; 1527 1528 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 1529 if (!vsi_ctx) 1530 return NULL; 1531 vsi_node = vsi_ctx->sched.vsi_node[tc]; 1532 /* validate invalid VSI ID */ 1533 if (!vsi_node) 1534 return NULL; 1535 1536 /* get the first queue group node from VSI sub-tree */ 1537 qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); 1538 while (qgrp_node) { 1539 /* make sure the qgroup node is part of the VSI subtree */ 1540 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) 1541 if (qgrp_node->num_children < max_children && 1542 qgrp_node->owner == owner) 1543 break; 1544 qgrp_node = qgrp_node->sibling; 1545 } 1546 1547 /* Select the best queue group */ 1548 return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner); 1549 } 1550 1551 /** 1552 * ice_sched_get_vsi_node - Get a VSI node based on VSI ID 1553 * @pi: pointer to the port information structure 1554 * @tc_node: pointer to the TC node 1555 * @vsi_handle: software VSI handle 1556 * 1557 * This function retrieves a VSI node for a given VSI ID from a given 1558 * TC branch 1559 */ 1560 struct ice_sched_node * 1561 ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, 1562 u16 vsi_handle) 1563 { 1564 struct ice_sched_node *node; 1565 u8 vsi_layer; 1566 1567 vsi_layer = ice_sched_get_vsi_layer(pi->hw); 1568 node = ice_sched_get_first_node(pi, tc_node, vsi_layer); 1569 1570 /* Check whether it already exists */ 1571 while (node) { 1572 if (node->vsi_handle == vsi_handle) 1573 return node; 1574 node = node->sibling; 1575 } 1576 1577 return node; 1578 } 1579 1580 /** 1581 * ice_sched_get_agg_node - Get an aggregator node based on aggregator ID 1582 * @pi: pointer to the port information structure 1583 * @tc_node: pointer to the TC node 1584 * @agg_id: aggregator ID 1585 * 1586 * This function retrieves an aggregator node for a given aggregator ID from 1587 * a given TC branch 1588 */ 1589 static struct ice_sched_node * 1590 ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, 1591 u32 agg_id) 1592 { 1593 struct ice_sched_node *node; 1594 struct ice_hw *hw = pi->hw; 1595 u8 agg_layer; 1596 1597 if (!hw) 1598 return NULL; 1599 agg_layer = ice_sched_get_agg_layer(hw); 1600 node = ice_sched_get_first_node(pi, tc_node, agg_layer); 1601 1602 /* Check whether it already exists */ 1603 while (node) { 1604 if (node->agg_id == agg_id) 1605 return node; 1606 node = node->sibling; 1607 } 1608 1609 return node; 1610 } 1611 1612 /** 1613 * ice_sched_check_node - Compare node parameters between SW DB and HW DB 1614 * @hw: pointer to the HW struct 1615 * @node: pointer to the ice_sched_node struct 1616 * 1617 * This function queries and compares the HW element with SW DB node parameters 1618 */ 1619 static bool ice_sched_check_node(struct ice_hw *hw, struct ice_sched_node *node) 1620 { 1621 struct ice_aqc_txsched_elem_data buf; 1622 enum ice_status status; 1623 u32 node_teid; 1624 1625 node_teid = LE32_TO_CPU(node->info.node_teid); 1626 status = ice_sched_query_elem(hw, node_teid, &buf); 1627 if (status != ICE_SUCCESS) 1628 return false; 1629 1630 if (memcmp(&buf, &node->info, sizeof(buf))) { 1631 ice_debug(hw, ICE_DBG_SCHED, "Node mismatch for teid=0x%x\n", 1632 node_teid); 1633 return false; 1634 } 1635 1636 return true; 1637 } 1638 1639 /** 1640 * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes 1641 * @hw: pointer to the HW struct 1642 * @num_qs: number of queues 1643 * @num_nodes: num nodes array 1644 * 1645 * This function calculates the number of VSI child nodes based on the 1646 * number of queues. 1647 */ 1648 static void 1649 ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) 1650 { 1651 u16 num = num_qs; 1652 u8 i, qgl, vsil; 1653 1654 qgl = ice_sched_get_qgrp_layer(hw); 1655 vsil = ice_sched_get_vsi_layer(hw); 1656 1657 /* calculate num nodes from queue group to VSI layer */ 1658 for (i = qgl; i > vsil; i--) { 1659 /* round to the next integer if there is a remainder */ 1660 num = DIVIDE_AND_ROUND_UP(num, hw->max_children[i]); 1661 1662 /* need at least one node */ 1663 num_nodes[i] = num ? num : 1; 1664 } 1665 } 1666 1667 /** 1668 * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree 1669 * @pi: port information structure 1670 * @vsi_handle: software VSI handle 1671 * @tc_node: pointer to the TC node 1672 * @num_nodes: pointer to the num nodes that needs to be added per layer 1673 * @owner: node owner (LAN or RDMA) 1674 * 1675 * This function adds the VSI child nodes to tree. It gets called for 1676 * LAN and RDMA separately. 1677 */ 1678 static enum ice_status 1679 ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, 1680 struct ice_sched_node *tc_node, u16 *num_nodes, 1681 u8 owner) 1682 { 1683 struct ice_sched_node *parent, *node; 1684 struct ice_hw *hw = pi->hw; 1685 enum ice_status status; 1686 u32 first_node_teid; 1687 u16 num_added = 0; 1688 u8 i, qgl, vsil; 1689 1690 qgl = ice_sched_get_qgrp_layer(hw); 1691 vsil = ice_sched_get_vsi_layer(hw); 1692 parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 1693 for (i = vsil + 1; i <= qgl; i++) { 1694 if (!parent) 1695 return ICE_ERR_CFG; 1696 1697 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, 1698 num_nodes[i], 1699 &first_node_teid, 1700 &num_added); 1701 if (status != ICE_SUCCESS || num_nodes[i] != num_added) 1702 return ICE_ERR_CFG; 1703 1704 /* The newly added node can be a new parent for the next 1705 * layer nodes 1706 */ 1707 if (num_added) { 1708 parent = ice_sched_find_node_by_teid(tc_node, 1709 first_node_teid); 1710 node = parent; 1711 while (node) { 1712 node->owner = owner; 1713 node = node->sibling; 1714 } 1715 } else { 1716 parent = parent->children[0]; 1717 } 1718 } 1719 1720 return ICE_SUCCESS; 1721 } 1722 1723 /** 1724 * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes 1725 * @pi: pointer to the port info structure 1726 * @tc_node: pointer to TC node 1727 * @num_nodes: pointer to num nodes array 1728 * 1729 * This function calculates the number of supported nodes needed to add this 1730 * VSI into Tx tree including the VSI, parent and intermediate nodes in below 1731 * layers 1732 */ 1733 static void 1734 ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi, 1735 struct ice_sched_node *tc_node, u16 *num_nodes) 1736 { 1737 struct ice_sched_node *node; 1738 u8 vsil; 1739 int i; 1740 1741 vsil = ice_sched_get_vsi_layer(pi->hw); 1742 for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--) 1743 /* Add intermediate nodes if TC has no children and 1744 * need at least one node for VSI 1745 */ 1746 if (!tc_node->num_children || i == vsil) { 1747 num_nodes[i]++; 1748 } else { 1749 /* If intermediate nodes are reached max children 1750 * then add a new one. 1751 */ 1752 node = ice_sched_get_first_node(pi, tc_node, (u8)i); 1753 /* scan all the siblings */ 1754 while (node) { 1755 if (node->num_children < 1756 pi->hw->max_children[i]) 1757 break; 1758 node = node->sibling; 1759 } 1760 1761 /* tree has one intermediate node to add this new VSI. 1762 * So no need to calculate supported nodes for below 1763 * layers. 1764 */ 1765 if (node) 1766 break; 1767 /* all the nodes are full, allocate a new one */ 1768 num_nodes[i]++; 1769 } 1770 } 1771 1772 /** 1773 * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree 1774 * @pi: port information structure 1775 * @vsi_handle: software VSI handle 1776 * @tc_node: pointer to TC node 1777 * @num_nodes: pointer to num nodes array 1778 * 1779 * This function adds the VSI supported nodes into Tx tree including the 1780 * VSI, its parent and intermediate nodes in below layers 1781 */ 1782 static enum ice_status 1783 ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, 1784 struct ice_sched_node *tc_node, u16 *num_nodes) 1785 { 1786 struct ice_sched_node *parent = tc_node; 1787 enum ice_status status; 1788 u32 first_node_teid; 1789 u16 num_added = 0; 1790 u8 i, vsil; 1791 1792 if (!pi) 1793 return ICE_ERR_PARAM; 1794 1795 vsil = ice_sched_get_vsi_layer(pi->hw); 1796 for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { 1797 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, 1798 i, num_nodes[i], 1799 &first_node_teid, 1800 &num_added); 1801 if (status != ICE_SUCCESS || num_nodes[i] != num_added) 1802 return ICE_ERR_CFG; 1803 1804 /* The newly added node can be a new parent for the next 1805 * layer nodes 1806 */ 1807 if (num_added) 1808 parent = ice_sched_find_node_by_teid(tc_node, 1809 first_node_teid); 1810 else 1811 parent = parent->children[0]; 1812 1813 if (!parent) 1814 return ICE_ERR_CFG; 1815 1816 if (i == vsil) 1817 parent->vsi_handle = vsi_handle; 1818 } 1819 1820 return ICE_SUCCESS; 1821 } 1822 1823 /** 1824 * ice_sched_add_vsi_to_topo - add a new VSI into tree 1825 * @pi: port information structure 1826 * @vsi_handle: software VSI handle 1827 * @tc: TC number 1828 * 1829 * This function adds a new VSI into scheduler tree 1830 */ 1831 static enum ice_status 1832 ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) 1833 { 1834 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 1835 struct ice_sched_node *tc_node; 1836 1837 tc_node = ice_sched_get_tc_node(pi, tc); 1838 if (!tc_node) 1839 return ICE_ERR_PARAM; 1840 1841 /* calculate number of supported nodes needed for this VSI */ 1842 ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes); 1843 1844 /* add VSI supported nodes to TC subtree */ 1845 return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node, 1846 num_nodes); 1847 } 1848 1849 /** 1850 * ice_sched_update_vsi_child_nodes - update VSI child nodes 1851 * @pi: port information structure 1852 * @vsi_handle: software VSI handle 1853 * @tc: TC number 1854 * @new_numqs: new number of max queues 1855 * @owner: owner of this subtree 1856 * 1857 * This function updates the VSI child nodes based on the number of queues 1858 */ 1859 static enum ice_status 1860 ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, 1861 u8 tc, u16 new_numqs, u8 owner) 1862 { 1863 u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 1864 struct ice_sched_node *vsi_node; 1865 struct ice_sched_node *tc_node; 1866 struct ice_vsi_ctx *vsi_ctx; 1867 enum ice_status status = ICE_SUCCESS; 1868 struct ice_hw *hw = pi->hw; 1869 u16 prev_numqs; 1870 1871 tc_node = ice_sched_get_tc_node(pi, tc); 1872 if (!tc_node) 1873 return ICE_ERR_CFG; 1874 1875 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 1876 if (!vsi_node) 1877 return ICE_ERR_CFG; 1878 1879 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 1880 if (!vsi_ctx) 1881 return ICE_ERR_PARAM; 1882 1883 prev_numqs = vsi_ctx->sched.max_lanq[tc]; 1884 /* num queues are not changed or less than the previous number */ 1885 if (new_numqs <= prev_numqs) 1886 return status; 1887 status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs); 1888 if (status) 1889 return status; 1890 1891 if (new_numqs) 1892 ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); 1893 /* Keep the max number of queue configuration all the time. Update the 1894 * tree only if number of queues > previous number of queues. This may 1895 * leave some extra nodes in the tree if number of queues < previous 1896 * number but that wouldn't harm anything. Removing those extra nodes 1897 * may complicate the code if those nodes are part of SRL or 1898 * individually rate limited. 1899 */ 1900 status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node, 1901 new_num_nodes, owner); 1902 if (status) 1903 return status; 1904 vsi_ctx->sched.max_lanq[tc] = new_numqs; 1905 1906 return ICE_SUCCESS; 1907 } 1908 1909 /** 1910 * ice_sched_cfg_vsi - configure the new/existing VSI 1911 * @pi: port information structure 1912 * @vsi_handle: software VSI handle 1913 * @tc: TC number 1914 * @maxqs: max number of queues 1915 * @owner: LAN or RDMA 1916 * @enable: TC enabled or disabled 1917 * 1918 * This function adds/updates VSI nodes based on the number of queues. If TC is 1919 * enabled and VSI is in suspended state then resume the VSI back. If TC is 1920 * disabled then suspend the VSI if it is not already. 1921 */ 1922 enum ice_status 1923 ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, 1924 u8 owner, bool enable) 1925 { 1926 struct ice_sched_node *vsi_node, *tc_node; 1927 struct ice_vsi_ctx *vsi_ctx; 1928 enum ice_status status = ICE_SUCCESS; 1929 struct ice_hw *hw = pi->hw; 1930 1931 ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle); 1932 tc_node = ice_sched_get_tc_node(pi, tc); 1933 if (!tc_node) 1934 return ICE_ERR_PARAM; 1935 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 1936 if (!vsi_ctx) 1937 return ICE_ERR_PARAM; 1938 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 1939 1940 /* suspend the VSI if TC is not enabled */ 1941 if (!enable) { 1942 if (vsi_node && vsi_node->in_use) { 1943 u32 teid = LE32_TO_CPU(vsi_node->info.node_teid); 1944 1945 status = ice_sched_suspend_resume_elems(hw, 1, &teid, 1946 true); 1947 if (!status) 1948 vsi_node->in_use = false; 1949 } 1950 return status; 1951 } 1952 1953 /* TC is enabled, if it is a new VSI then add it to the tree */ 1954 if (!vsi_node) { 1955 status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc); 1956 if (status) 1957 return status; 1958 1959 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 1960 if (!vsi_node) 1961 return ICE_ERR_CFG; 1962 1963 vsi_ctx->sched.vsi_node[tc] = vsi_node; 1964 vsi_node->in_use = true; 1965 /* invalidate the max queues whenever VSI gets added first time 1966 * into the scheduler tree (boot or after reset). We need to 1967 * recreate the child nodes all the time in these cases. 1968 */ 1969 vsi_ctx->sched.max_lanq[tc] = 0; 1970 } 1971 1972 /* update the VSI child nodes */ 1973 status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs, 1974 owner); 1975 if (status) 1976 return status; 1977 1978 /* TC is enabled, resume the VSI if it is in the suspend state */ 1979 if (!vsi_node->in_use) { 1980 u32 teid = LE32_TO_CPU(vsi_node->info.node_teid); 1981 1982 status = ice_sched_suspend_resume_elems(hw, 1, &teid, false); 1983 if (!status) 1984 vsi_node->in_use = true; 1985 } 1986 1987 return status; 1988 } 1989 1990 /** 1991 * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry 1992 * @pi: port information structure 1993 * @vsi_handle: software VSI handle 1994 * 1995 * This function removes single aggregator VSI info entry from 1996 * aggregator list. 1997 */ 1998 static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle) 1999 { 2000 struct ice_sched_agg_info *agg_info; 2001 struct ice_sched_agg_info *atmp; 2002 2003 LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &pi->hw->agg_list, 2004 ice_sched_agg_info, 2005 list_entry) { 2006 struct ice_sched_agg_vsi_info *agg_vsi_info; 2007 struct ice_sched_agg_vsi_info *vtmp; 2008 2009 LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp, 2010 &agg_info->agg_vsi_list, 2011 ice_sched_agg_vsi_info, list_entry) 2012 if (agg_vsi_info->vsi_handle == vsi_handle) { 2013 LIST_DEL(&agg_vsi_info->list_entry); 2014 ice_free(pi->hw, agg_vsi_info); 2015 return; 2016 } 2017 } 2018 } 2019 2020 /** 2021 * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree 2022 * @node: pointer to the sub-tree node 2023 * 2024 * This function checks for a leaf node presence in a given sub-tree node. 2025 */ 2026 static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node) 2027 { 2028 u8 i; 2029 2030 for (i = 0; i < node->num_children; i++) 2031 if (ice_sched_is_leaf_node_present(node->children[i])) 2032 return true; 2033 /* check for a leaf node */ 2034 return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF); 2035 } 2036 2037 /** 2038 * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes 2039 * @pi: port information structure 2040 * @vsi_handle: software VSI handle 2041 * @owner: LAN or RDMA 2042 * 2043 * This function removes the VSI and its LAN or RDMA children nodes from the 2044 * scheduler tree. 2045 */ 2046 static enum ice_status 2047 ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) 2048 { 2049 enum ice_status status = ICE_ERR_PARAM; 2050 struct ice_vsi_ctx *vsi_ctx; 2051 u8 i; 2052 2053 ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle); 2054 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 2055 return status; 2056 ice_acquire_lock(&pi->sched_lock); 2057 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 2058 if (!vsi_ctx) 2059 goto exit_sched_rm_vsi_cfg; 2060 2061 ice_for_each_traffic_class(i) { 2062 struct ice_sched_node *vsi_node, *tc_node; 2063 u8 j = 0; 2064 2065 tc_node = ice_sched_get_tc_node(pi, i); 2066 if (!tc_node) 2067 continue; 2068 2069 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 2070 if (!vsi_node) 2071 continue; 2072 2073 if (ice_sched_is_leaf_node_present(vsi_node)) { 2074 ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i); 2075 status = ICE_ERR_IN_USE; 2076 goto exit_sched_rm_vsi_cfg; 2077 } 2078 while (j < vsi_node->num_children) { 2079 if (vsi_node->children[j]->owner == owner) { 2080 ice_free_sched_node(pi, vsi_node->children[j]); 2081 2082 /* reset the counter again since the num 2083 * children will be updated after node removal 2084 */ 2085 j = 0; 2086 } else { 2087 j++; 2088 } 2089 } 2090 /* remove the VSI if it has no children */ 2091 if (!vsi_node->num_children) { 2092 ice_free_sched_node(pi, vsi_node); 2093 vsi_ctx->sched.vsi_node[i] = NULL; 2094 2095 /* clean up aggregator related VSI info if any */ 2096 ice_sched_rm_agg_vsi_info(pi, vsi_handle); 2097 } 2098 if (owner == ICE_SCHED_NODE_OWNER_LAN) 2099 vsi_ctx->sched.max_lanq[i] = 0; 2100 } 2101 status = ICE_SUCCESS; 2102 2103 exit_sched_rm_vsi_cfg: 2104 ice_release_lock(&pi->sched_lock); 2105 return status; 2106 } 2107 2108 /** 2109 * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes 2110 * @pi: port information structure 2111 * @vsi_handle: software VSI handle 2112 * 2113 * This function clears the VSI and its LAN children nodes from scheduler tree 2114 * for all TCs. 2115 */ 2116 enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) 2117 { 2118 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN); 2119 } 2120 2121 /** 2122 * ice_sched_is_tree_balanced - Check tree nodes are identical or not 2123 * @hw: pointer to the HW struct 2124 * @node: pointer to the ice_sched_node struct 2125 * 2126 * This function compares all the nodes for a given tree against HW DB nodes 2127 * This function needs to be called with the port_info->sched_lock held 2128 */ 2129 bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node) 2130 { 2131 u8 i; 2132 2133 /* start from the leaf node */ 2134 for (i = 0; i < node->num_children; i++) 2135 /* Fail if node doesn't match with the SW DB 2136 * this recursion is intentional, and wouldn't 2137 * go more than 9 calls 2138 */ 2139 if (!ice_sched_is_tree_balanced(hw, node->children[i])) 2140 return false; 2141 2142 return ice_sched_check_node(hw, node); 2143 } 2144 2145 /** 2146 * ice_aq_query_node_to_root - retrieve the tree topology for a given node TEID 2147 * @hw: pointer to the HW struct 2148 * @node_teid: node TEID 2149 * @buf: pointer to buffer 2150 * @buf_size: buffer size in bytes 2151 * @cd: pointer to command details structure or NULL 2152 * 2153 * This function retrieves the tree topology from the firmware for a given 2154 * node TEID to the root node. 2155 */ 2156 enum ice_status 2157 ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid, 2158 struct ice_aqc_txsched_elem_data *buf, u16 buf_size, 2159 struct ice_sq_cd *cd) 2160 { 2161 struct ice_aqc_query_node_to_root *cmd; 2162 struct ice_aq_desc desc; 2163 2164 cmd = &desc.params.query_node_to_root; 2165 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_node_to_root); 2166 cmd->teid = CPU_TO_LE32(node_teid); 2167 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2168 } 2169 2170 /** 2171 * ice_get_agg_info - get the aggregator ID 2172 * @hw: pointer to the hardware structure 2173 * @agg_id: aggregator ID 2174 * 2175 * This function validates aggregator ID. The function returns info if 2176 * aggregator ID is present in list otherwise it returns null. 2177 */ 2178 static struct ice_sched_agg_info * 2179 ice_get_agg_info(struct ice_hw *hw, u32 agg_id) 2180 { 2181 struct ice_sched_agg_info *agg_info; 2182 2183 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 2184 list_entry) 2185 if (agg_info->agg_id == agg_id) 2186 return agg_info; 2187 2188 return NULL; 2189 } 2190 2191 /** 2192 * ice_sched_get_free_vsi_parent - Find a free parent node in aggregator subtree 2193 * @hw: pointer to the HW struct 2194 * @node: pointer to a child node 2195 * @num_nodes: num nodes count array 2196 * 2197 * This function walks through the aggregator subtree to find a free parent 2198 * node 2199 */ 2200 static struct ice_sched_node * 2201 ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node, 2202 u16 *num_nodes) 2203 { 2204 u8 l = node->tx_sched_layer; 2205 u8 vsil, i; 2206 2207 vsil = ice_sched_get_vsi_layer(hw); 2208 2209 /* Is it VSI parent layer ? */ 2210 if (l == vsil - 1) 2211 return (node->num_children < hw->max_children[l]) ? node : NULL; 2212 2213 /* We have intermediate nodes. Let's walk through the subtree. If the 2214 * intermediate node has space to add a new node then clear the count 2215 */ 2216 if (node->num_children < hw->max_children[l]) 2217 num_nodes[l] = 0; 2218 /* The below recursive call is intentional and wouldn't go more than 2219 * 2 or 3 iterations. 2220 */ 2221 2222 for (i = 0; i < node->num_children; i++) { 2223 struct ice_sched_node *parent; 2224 2225 parent = ice_sched_get_free_vsi_parent(hw, node->children[i], 2226 num_nodes); 2227 if (parent) 2228 return parent; 2229 } 2230 2231 return NULL; 2232 } 2233 2234 /** 2235 * ice_sched_update_parent - update the new parent in SW DB 2236 * @new_parent: pointer to a new parent node 2237 * @node: pointer to a child node 2238 * 2239 * This function removes the child from the old parent and adds it to a new 2240 * parent 2241 */ 2242 static void 2243 ice_sched_update_parent(struct ice_sched_node *new_parent, 2244 struct ice_sched_node *node) 2245 { 2246 struct ice_sched_node *old_parent; 2247 u8 i, j; 2248 2249 old_parent = node->parent; 2250 2251 /* update the old parent children */ 2252 for (i = 0; i < old_parent->num_children; i++) 2253 if (old_parent->children[i] == node) { 2254 for (j = i + 1; j < old_parent->num_children; j++) 2255 old_parent->children[j - 1] = 2256 old_parent->children[j]; 2257 old_parent->num_children--; 2258 break; 2259 } 2260 2261 /* now move the node to a new parent */ 2262 new_parent->children[new_parent->num_children++] = node; 2263 node->parent = new_parent; 2264 node->info.parent_teid = new_parent->info.node_teid; 2265 } 2266 2267 /** 2268 * ice_sched_move_nodes - move child nodes to a given parent 2269 * @pi: port information structure 2270 * @parent: pointer to parent node 2271 * @num_items: number of child nodes to be moved 2272 * @list: pointer to child node teids 2273 * 2274 * This function move the child nodes to a given parent. 2275 */ 2276 static enum ice_status 2277 ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, 2278 u16 num_items, u32 *list) 2279 { 2280 enum ice_status status = ICE_SUCCESS; 2281 struct ice_aqc_move_elem *buf; 2282 struct ice_sched_node *node; 2283 u16 i, grps_movd = 0; 2284 struct ice_hw *hw; 2285 u16 buf_len; 2286 2287 hw = pi->hw; 2288 2289 if (!parent || !num_items) 2290 return ICE_ERR_PARAM; 2291 2292 /* Does parent have enough space */ 2293 if (parent->num_children + num_items > 2294 hw->max_children[parent->tx_sched_layer]) 2295 return ICE_ERR_AQ_FULL; 2296 2297 buf_len = ice_struct_size(buf, teid, 1); 2298 buf = (struct ice_aqc_move_elem *)ice_malloc(hw, buf_len); 2299 if (!buf) 2300 return ICE_ERR_NO_MEMORY; 2301 2302 for (i = 0; i < num_items; i++) { 2303 node = ice_sched_find_node_by_teid(pi->root, list[i]); 2304 if (!node) { 2305 status = ICE_ERR_PARAM; 2306 goto move_err_exit; 2307 } 2308 2309 buf->hdr.src_parent_teid = node->info.parent_teid; 2310 buf->hdr.dest_parent_teid = parent->info.node_teid; 2311 buf->teid[0] = node->info.node_teid; 2312 buf->hdr.num_elems = CPU_TO_LE16(1); 2313 status = ice_aq_move_sched_elems(hw, 1, buf, buf_len, 2314 &grps_movd, NULL); 2315 if (status && grps_movd != 1) { 2316 status = ICE_ERR_CFG; 2317 goto move_err_exit; 2318 } 2319 2320 /* update the SW DB */ 2321 ice_sched_update_parent(parent, node); 2322 } 2323 2324 move_err_exit: 2325 ice_free(hw, buf); 2326 return status; 2327 } 2328 2329 /** 2330 * ice_sched_move_vsi_to_agg - move VSI to aggregator node 2331 * @pi: port information structure 2332 * @vsi_handle: software VSI handle 2333 * @agg_id: aggregator ID 2334 * @tc: TC number 2335 * 2336 * This function moves a VSI to an aggregator node or its subtree. 2337 * Intermediate nodes may be created if required. 2338 */ 2339 static enum ice_status 2340 ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, 2341 u8 tc) 2342 { 2343 struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent; 2344 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 2345 u32 first_node_teid, vsi_teid; 2346 enum ice_status status; 2347 u16 num_nodes_added; 2348 u8 aggl, vsil, i; 2349 2350 tc_node = ice_sched_get_tc_node(pi, tc); 2351 if (!tc_node) 2352 return ICE_ERR_CFG; 2353 2354 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 2355 if (!agg_node) 2356 return ICE_ERR_DOES_NOT_EXIST; 2357 2358 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 2359 if (!vsi_node) 2360 return ICE_ERR_DOES_NOT_EXIST; 2361 2362 /* Is this VSI already part of given aggregator? */ 2363 if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node)) 2364 return ICE_SUCCESS; 2365 2366 aggl = ice_sched_get_agg_layer(pi->hw); 2367 vsil = ice_sched_get_vsi_layer(pi->hw); 2368 2369 /* set intermediate node count to 1 between aggregator and VSI layers */ 2370 for (i = aggl + 1; i < vsil; i++) 2371 num_nodes[i] = 1; 2372 2373 /* Check if the aggregator subtree has any free node to add the VSI */ 2374 for (i = 0; i < agg_node->num_children; i++) { 2375 parent = ice_sched_get_free_vsi_parent(pi->hw, 2376 agg_node->children[i], 2377 num_nodes); 2378 if (parent) 2379 goto move_nodes; 2380 } 2381 2382 /* add new nodes */ 2383 parent = agg_node; 2384 for (i = aggl + 1; i < vsil; i++) { 2385 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, 2386 num_nodes[i], 2387 &first_node_teid, 2388 &num_nodes_added); 2389 if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added) 2390 return ICE_ERR_CFG; 2391 2392 /* The newly added node can be a new parent for the next 2393 * layer nodes 2394 */ 2395 if (num_nodes_added) 2396 parent = ice_sched_find_node_by_teid(tc_node, 2397 first_node_teid); 2398 else 2399 parent = parent->children[0]; 2400 2401 if (!parent) 2402 return ICE_ERR_CFG; 2403 } 2404 2405 move_nodes: 2406 vsi_teid = LE32_TO_CPU(vsi_node->info.node_teid); 2407 return ice_sched_move_nodes(pi, parent, 1, &vsi_teid); 2408 } 2409 2410 /** 2411 * ice_move_all_vsi_to_dflt_agg - move all VSI(s) to default aggregator 2412 * @pi: port information structure 2413 * @agg_info: aggregator info 2414 * @tc: traffic class number 2415 * @rm_vsi_info: true or false 2416 * 2417 * This function move all the VSI(s) to the default aggregator and delete 2418 * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The 2419 * caller holds the scheduler lock. 2420 */ 2421 static enum ice_status 2422 ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi, 2423 struct ice_sched_agg_info *agg_info, u8 tc, 2424 bool rm_vsi_info) 2425 { 2426 struct ice_sched_agg_vsi_info *agg_vsi_info; 2427 struct ice_sched_agg_vsi_info *tmp; 2428 enum ice_status status = ICE_SUCCESS; 2429 2430 LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, tmp, &agg_info->agg_vsi_list, 2431 ice_sched_agg_vsi_info, list_entry) { 2432 u16 vsi_handle = agg_vsi_info->vsi_handle; 2433 2434 /* Move VSI to default aggregator */ 2435 if (!ice_is_tc_ena(agg_vsi_info->tc_bitmap[0], tc)) 2436 continue; 2437 2438 status = ice_sched_move_vsi_to_agg(pi, vsi_handle, 2439 ICE_DFLT_AGG_ID, tc); 2440 if (status) 2441 break; 2442 2443 ice_clear_bit(tc, agg_vsi_info->tc_bitmap); 2444 if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) { 2445 LIST_DEL(&agg_vsi_info->list_entry); 2446 ice_free(pi->hw, agg_vsi_info); 2447 } 2448 } 2449 2450 return status; 2451 } 2452 2453 /** 2454 * ice_sched_is_agg_inuse - check whether the aggregator is in use or not 2455 * @pi: port information structure 2456 * @node: node pointer 2457 * 2458 * This function checks whether the aggregator is attached with any VSI or not. 2459 */ 2460 static bool 2461 ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node) 2462 { 2463 u8 vsil, i; 2464 2465 vsil = ice_sched_get_vsi_layer(pi->hw); 2466 if (node->tx_sched_layer < vsil - 1) { 2467 for (i = 0; i < node->num_children; i++) 2468 if (ice_sched_is_agg_inuse(pi, node->children[i])) 2469 return true; 2470 return false; 2471 } else { 2472 return node->num_children ? true : false; 2473 } 2474 } 2475 2476 /** 2477 * ice_sched_rm_agg_cfg - remove the aggregator node 2478 * @pi: port information structure 2479 * @agg_id: aggregator ID 2480 * @tc: TC number 2481 * 2482 * This function removes the aggregator node and intermediate nodes if any 2483 * from the given TC 2484 */ 2485 static enum ice_status 2486 ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) 2487 { 2488 struct ice_sched_node *tc_node, *agg_node; 2489 struct ice_hw *hw = pi->hw; 2490 2491 tc_node = ice_sched_get_tc_node(pi, tc); 2492 if (!tc_node) 2493 return ICE_ERR_CFG; 2494 2495 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 2496 if (!agg_node) 2497 return ICE_ERR_DOES_NOT_EXIST; 2498 2499 /* Can't remove the aggregator node if it has children */ 2500 if (ice_sched_is_agg_inuse(pi, agg_node)) 2501 return ICE_ERR_IN_USE; 2502 2503 /* need to remove the whole subtree if aggregator node is the 2504 * only child. 2505 */ 2506 while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) { 2507 struct ice_sched_node *parent = agg_node->parent; 2508 2509 if (!parent) 2510 return ICE_ERR_CFG; 2511 2512 if (parent->num_children > 1) 2513 break; 2514 2515 agg_node = parent; 2516 } 2517 2518 ice_free_sched_node(pi, agg_node); 2519 return ICE_SUCCESS; 2520 } 2521 2522 /** 2523 * ice_rm_agg_cfg_tc - remove aggregator configuration for TC 2524 * @pi: port information structure 2525 * @agg_info: aggregator ID 2526 * @tc: TC number 2527 * @rm_vsi_info: bool value true or false 2528 * 2529 * This function removes aggregator reference to VSI of given TC. It removes 2530 * the aggregator configuration completely for requested TC. The caller needs 2531 * to hold the scheduler lock. 2532 */ 2533 static enum ice_status 2534 ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info, 2535 u8 tc, bool rm_vsi_info) 2536 { 2537 enum ice_status status = ICE_SUCCESS; 2538 2539 /* If nothing to remove - return success */ 2540 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) 2541 goto exit_rm_agg_cfg_tc; 2542 2543 status = ice_move_all_vsi_to_dflt_agg(pi, agg_info, tc, rm_vsi_info); 2544 if (status) 2545 goto exit_rm_agg_cfg_tc; 2546 2547 /* Delete aggregator node(s) */ 2548 status = ice_sched_rm_agg_cfg(pi, agg_info->agg_id, tc); 2549 if (status) 2550 goto exit_rm_agg_cfg_tc; 2551 2552 ice_clear_bit(tc, agg_info->tc_bitmap); 2553 exit_rm_agg_cfg_tc: 2554 return status; 2555 } 2556 2557 /** 2558 * ice_save_agg_tc_bitmap - save aggregator TC bitmap 2559 * @pi: port information structure 2560 * @agg_id: aggregator ID 2561 * @tc_bitmap: 8 bits TC bitmap 2562 * 2563 * Save aggregator TC bitmap. This function needs to be called with scheduler 2564 * lock held. 2565 */ 2566 static enum ice_status 2567 ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, 2568 ice_bitmap_t *tc_bitmap) 2569 { 2570 struct ice_sched_agg_info *agg_info; 2571 2572 agg_info = ice_get_agg_info(pi->hw, agg_id); 2573 if (!agg_info) 2574 return ICE_ERR_PARAM; 2575 ice_cp_bitmap(agg_info->replay_tc_bitmap, tc_bitmap, 2576 ICE_MAX_TRAFFIC_CLASS); 2577 return ICE_SUCCESS; 2578 } 2579 2580 /** 2581 * ice_sched_add_agg_cfg - create an aggregator node 2582 * @pi: port information structure 2583 * @agg_id: aggregator ID 2584 * @tc: TC number 2585 * 2586 * This function creates an aggregator node and intermediate nodes if required 2587 * for the given TC 2588 */ 2589 static enum ice_status 2590 ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) 2591 { 2592 struct ice_sched_node *parent, *agg_node, *tc_node; 2593 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 2594 enum ice_status status = ICE_SUCCESS; 2595 struct ice_hw *hw = pi->hw; 2596 u32 first_node_teid; 2597 u16 num_nodes_added; 2598 u8 i, aggl; 2599 2600 tc_node = ice_sched_get_tc_node(pi, tc); 2601 if (!tc_node) 2602 return ICE_ERR_CFG; 2603 2604 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 2605 /* Does Agg node already exist ? */ 2606 if (agg_node) 2607 return status; 2608 2609 aggl = ice_sched_get_agg_layer(hw); 2610 2611 /* need one node in Agg layer */ 2612 num_nodes[aggl] = 1; 2613 2614 /* Check whether the intermediate nodes have space to add the 2615 * new aggregator. If they are full, then SW needs to allocate a new 2616 * intermediate node on those layers 2617 */ 2618 for (i = hw->sw_entry_point_layer; i < aggl; i++) { 2619 parent = ice_sched_get_first_node(pi, tc_node, i); 2620 2621 /* scan all the siblings */ 2622 while (parent) { 2623 if (parent->num_children < hw->max_children[i]) 2624 break; 2625 parent = parent->sibling; 2626 } 2627 2628 /* all the nodes are full, reserve one for this layer */ 2629 if (!parent) 2630 num_nodes[i]++; 2631 } 2632 2633 /* add the aggregator node */ 2634 parent = tc_node; 2635 for (i = hw->sw_entry_point_layer; i <= aggl; i++) { 2636 if (!parent) 2637 return ICE_ERR_CFG; 2638 2639 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, 2640 num_nodes[i], 2641 &first_node_teid, 2642 &num_nodes_added); 2643 if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added) 2644 return ICE_ERR_CFG; 2645 2646 /* The newly added node can be a new parent for the next 2647 * layer nodes 2648 */ 2649 if (num_nodes_added) { 2650 parent = ice_sched_find_node_by_teid(tc_node, 2651 first_node_teid); 2652 /* register aggregator ID with the aggregator node */ 2653 if (parent && i == aggl) 2654 parent->agg_id = agg_id; 2655 } else { 2656 parent = parent->children[0]; 2657 } 2658 } 2659 2660 return ICE_SUCCESS; 2661 } 2662 2663 /** 2664 * ice_sched_cfg_agg - configure aggregator node 2665 * @pi: port information structure 2666 * @agg_id: aggregator ID 2667 * @agg_type: aggregator type queue, VSI, or aggregator group 2668 * @tc_bitmap: bits TC bitmap 2669 * 2670 * It registers a unique aggregator node into scheduler services. It 2671 * allows a user to register with a unique ID to track it's resources. 2672 * The aggregator type determines if this is a queue group, VSI group 2673 * or aggregator group. It then creates the aggregator node(s) for requested 2674 * TC(s) or removes an existing aggregator node including its configuration 2675 * if indicated via tc_bitmap. Call ice_rm_agg_cfg to release aggregator 2676 * resources and remove aggregator ID. 2677 * This function needs to be called with scheduler lock held. 2678 */ 2679 static enum ice_status 2680 ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, 2681 enum ice_agg_type agg_type, ice_bitmap_t *tc_bitmap) 2682 { 2683 struct ice_sched_agg_info *agg_info; 2684 enum ice_status status = ICE_SUCCESS; 2685 struct ice_hw *hw = pi->hw; 2686 u8 tc; 2687 2688 agg_info = ice_get_agg_info(hw, agg_id); 2689 if (!agg_info) { 2690 /* Create new entry for new aggregator ID */ 2691 agg_info = (struct ice_sched_agg_info *) 2692 ice_malloc(hw, sizeof(*agg_info)); 2693 if (!agg_info) { 2694 status = ICE_ERR_NO_MEMORY; 2695 goto exit_reg_agg; 2696 } 2697 agg_info->agg_id = agg_id; 2698 agg_info->agg_type = agg_type; 2699 agg_info->tc_bitmap[0] = 0; 2700 2701 /* Initialize the aggregator VSI list head */ 2702 INIT_LIST_HEAD(&agg_info->agg_vsi_list); 2703 2704 /* Add new entry in aggregator list */ 2705 LIST_ADD(&agg_info->list_entry, &hw->agg_list); 2706 } 2707 /* Create aggregator node(s) for requested TC(s) */ 2708 ice_for_each_traffic_class(tc) { 2709 if (!ice_is_tc_ena(*tc_bitmap, tc)) { 2710 /* Delete aggregator cfg TC if it exists previously */ 2711 status = ice_rm_agg_cfg_tc(pi, agg_info, tc, false); 2712 if (status) 2713 break; 2714 continue; 2715 } 2716 2717 /* Check if aggregator node for TC already exists */ 2718 if (ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) 2719 continue; 2720 2721 /* Create new aggregator node for TC */ 2722 status = ice_sched_add_agg_cfg(pi, agg_id, tc); 2723 if (status) 2724 break; 2725 2726 /* Save aggregator node's TC information */ 2727 ice_set_bit(tc, agg_info->tc_bitmap); 2728 } 2729 exit_reg_agg: 2730 return status; 2731 } 2732 2733 /** 2734 * ice_cfg_agg - config aggregator node 2735 * @pi: port information structure 2736 * @agg_id: aggregator ID 2737 * @agg_type: aggregator type queue, VSI, or aggregator group 2738 * @tc_bitmap: bits TC bitmap 2739 * 2740 * This function configures aggregator node(s). 2741 */ 2742 enum ice_status 2743 ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, 2744 u8 tc_bitmap) 2745 { 2746 ice_bitmap_t bitmap = tc_bitmap; 2747 enum ice_status status; 2748 2749 ice_acquire_lock(&pi->sched_lock); 2750 status = ice_sched_cfg_agg(pi, agg_id, agg_type, 2751 (ice_bitmap_t *)&bitmap); 2752 if (!status) 2753 status = ice_save_agg_tc_bitmap(pi, agg_id, 2754 (ice_bitmap_t *)&bitmap); 2755 ice_release_lock(&pi->sched_lock); 2756 return status; 2757 } 2758 2759 /** 2760 * ice_get_agg_vsi_info - get the aggregator ID 2761 * @agg_info: aggregator info 2762 * @vsi_handle: software VSI handle 2763 * 2764 * The function returns aggregator VSI info based on VSI handle. This function 2765 * needs to be called with scheduler lock held. 2766 */ 2767 static struct ice_sched_agg_vsi_info * 2768 ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle) 2769 { 2770 struct ice_sched_agg_vsi_info *agg_vsi_info; 2771 2772 LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list, 2773 ice_sched_agg_vsi_info, list_entry) 2774 if (agg_vsi_info->vsi_handle == vsi_handle) 2775 return agg_vsi_info; 2776 2777 return NULL; 2778 } 2779 2780 /** 2781 * ice_get_vsi_agg_info - get the aggregator info of VSI 2782 * @hw: pointer to the hardware structure 2783 * @vsi_handle: Sw VSI handle 2784 * 2785 * The function returns aggregator info of VSI represented via vsi_handle. The 2786 * VSI has in this case a different aggregator than the default one. This 2787 * function needs to be called with scheduler lock held. 2788 */ 2789 static struct ice_sched_agg_info * 2790 ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle) 2791 { 2792 struct ice_sched_agg_info *agg_info; 2793 2794 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 2795 list_entry) { 2796 struct ice_sched_agg_vsi_info *agg_vsi_info; 2797 2798 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); 2799 if (agg_vsi_info) 2800 return agg_info; 2801 } 2802 return NULL; 2803 } 2804 2805 /** 2806 * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap 2807 * @pi: port information structure 2808 * @agg_id: aggregator ID 2809 * @vsi_handle: software VSI handle 2810 * @tc_bitmap: TC bitmap of enabled TC(s) 2811 * 2812 * Save VSI to aggregator TC bitmap. This function needs to call with scheduler 2813 * lock held. 2814 */ 2815 static enum ice_status 2816 ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, 2817 ice_bitmap_t *tc_bitmap) 2818 { 2819 struct ice_sched_agg_vsi_info *agg_vsi_info; 2820 struct ice_sched_agg_info *agg_info; 2821 2822 agg_info = ice_get_agg_info(pi->hw, agg_id); 2823 if (!agg_info) 2824 return ICE_ERR_PARAM; 2825 /* check if entry already exist */ 2826 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); 2827 if (!agg_vsi_info) 2828 return ICE_ERR_PARAM; 2829 ice_cp_bitmap(agg_vsi_info->replay_tc_bitmap, tc_bitmap, 2830 ICE_MAX_TRAFFIC_CLASS); 2831 return ICE_SUCCESS; 2832 } 2833 2834 /** 2835 * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator 2836 * @pi: port information structure 2837 * @agg_id: aggregator ID 2838 * @vsi_handle: software VSI handle 2839 * @tc_bitmap: TC bitmap of enabled TC(s) 2840 * 2841 * This function moves VSI to a new or default aggregator node. If VSI is 2842 * already associated to the aggregator node then no operation is performed on 2843 * the tree. This function needs to be called with scheduler lock held. 2844 */ 2845 static enum ice_status 2846 ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, 2847 u16 vsi_handle, ice_bitmap_t *tc_bitmap) 2848 { 2849 struct ice_sched_agg_vsi_info *agg_vsi_info; 2850 struct ice_sched_agg_info *agg_info; 2851 enum ice_status status = ICE_SUCCESS; 2852 struct ice_hw *hw = pi->hw; 2853 u8 tc; 2854 2855 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 2856 return ICE_ERR_PARAM; 2857 agg_info = ice_get_agg_info(hw, agg_id); 2858 if (!agg_info) 2859 return ICE_ERR_PARAM; 2860 /* check if entry already exist */ 2861 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); 2862 if (!agg_vsi_info) { 2863 /* Create new entry for VSI under aggregator list */ 2864 agg_vsi_info = (struct ice_sched_agg_vsi_info *) 2865 ice_malloc(hw, sizeof(*agg_vsi_info)); 2866 if (!agg_vsi_info) 2867 return ICE_ERR_PARAM; 2868 2869 /* add VSI ID into the aggregator list */ 2870 agg_vsi_info->vsi_handle = vsi_handle; 2871 LIST_ADD(&agg_vsi_info->list_entry, &agg_info->agg_vsi_list); 2872 } 2873 /* Move VSI node to new aggregator node for requested TC(s) */ 2874 ice_for_each_traffic_class(tc) { 2875 if (!ice_is_tc_ena(*tc_bitmap, tc)) 2876 continue; 2877 2878 /* Move VSI to new aggregator */ 2879 status = ice_sched_move_vsi_to_agg(pi, vsi_handle, agg_id, tc); 2880 if (status) 2881 break; 2882 2883 if (agg_id != ICE_DFLT_AGG_ID) 2884 ice_set_bit(tc, agg_vsi_info->tc_bitmap); 2885 else 2886 ice_clear_bit(tc, agg_vsi_info->tc_bitmap); 2887 } 2888 /* If VSI moved back to default aggregator, delete agg_vsi_info. */ 2889 if (!ice_is_any_bit_set(agg_vsi_info->tc_bitmap, 2890 ICE_MAX_TRAFFIC_CLASS)) { 2891 LIST_DEL(&agg_vsi_info->list_entry); 2892 ice_free(hw, agg_vsi_info); 2893 } 2894 return status; 2895 } 2896 2897 /** 2898 * ice_sched_rm_unused_rl_prof - remove unused RL profile 2899 * @pi: port information structure 2900 * 2901 * This function removes unused rate limit profiles from the HW and 2902 * SW DB. The caller needs to hold scheduler lock. 2903 */ 2904 static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi) 2905 { 2906 u16 ln; 2907 2908 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) { 2909 struct ice_aqc_rl_profile_info *rl_prof_elem; 2910 struct ice_aqc_rl_profile_info *rl_prof_tmp; 2911 2912 LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp, 2913 &pi->rl_prof_list[ln], 2914 ice_aqc_rl_profile_info, list_entry) { 2915 if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem)) 2916 ice_debug(pi->hw, ICE_DBG_SCHED, "Removed rl profile\n"); 2917 } 2918 } 2919 } 2920 2921 /** 2922 * ice_sched_update_elem - update element 2923 * @hw: pointer to the HW struct 2924 * @node: pointer to node 2925 * @info: node info to update 2926 * 2927 * Update the HW DB, and local SW DB of node. Update the scheduling 2928 * parameters of node from argument info data buffer (Info->data buf) and 2929 * returns success or error on config sched element failure. The caller 2930 * needs to hold scheduler lock. 2931 */ 2932 static enum ice_status 2933 ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, 2934 struct ice_aqc_txsched_elem_data *info) 2935 { 2936 struct ice_aqc_txsched_elem_data buf; 2937 enum ice_status status; 2938 u16 elem_cfgd = 0; 2939 u16 num_elems = 1; 2940 2941 buf = *info; 2942 /* Parent TEID is reserved field in this aq call */ 2943 buf.parent_teid = 0; 2944 /* Element type is reserved field in this aq call */ 2945 buf.data.elem_type = 0; 2946 /* Flags is reserved field in this aq call */ 2947 buf.data.flags = 0; 2948 2949 /* Update HW DB */ 2950 /* Configure element node */ 2951 status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf), 2952 &elem_cfgd, NULL); 2953 if (status || elem_cfgd != num_elems) { 2954 ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n"); 2955 return ICE_ERR_CFG; 2956 } 2957 2958 /* Config success case */ 2959 /* Now update local SW DB */ 2960 /* Only copy the data portion of info buffer */ 2961 node->info.data = info->data; 2962 return status; 2963 } 2964 2965 /** 2966 * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params 2967 * @hw: pointer to the HW struct 2968 * @node: sched node to configure 2969 * @rl_type: rate limit type CIR, EIR, or shared 2970 * @bw_alloc: BW weight/allocation 2971 * 2972 * This function configures node element's BW allocation. 2973 */ 2974 static enum ice_status 2975 ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, 2976 enum ice_rl_type rl_type, u16 bw_alloc) 2977 { 2978 struct ice_aqc_txsched_elem_data buf; 2979 struct ice_aqc_txsched_elem *data; 2980 enum ice_status status; 2981 2982 buf = node->info; 2983 data = &buf.data; 2984 if (rl_type == ICE_MIN_BW) { 2985 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; 2986 data->cir_bw.bw_alloc = CPU_TO_LE16(bw_alloc); 2987 } else if (rl_type == ICE_MAX_BW) { 2988 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; 2989 data->eir_bw.bw_alloc = CPU_TO_LE16(bw_alloc); 2990 } else { 2991 return ICE_ERR_PARAM; 2992 } 2993 2994 /* Configure element */ 2995 status = ice_sched_update_elem(hw, node, &buf); 2996 return status; 2997 } 2998 2999 /** 3000 * ice_move_vsi_to_agg - moves VSI to new or default aggregator 3001 * @pi: port information structure 3002 * @agg_id: aggregator ID 3003 * @vsi_handle: software VSI handle 3004 * @tc_bitmap: TC bitmap of enabled TC(s) 3005 * 3006 * Move or associate VSI to a new or default aggregator node. 3007 */ 3008 enum ice_status 3009 ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, 3010 u8 tc_bitmap) 3011 { 3012 ice_bitmap_t bitmap = tc_bitmap; 3013 enum ice_status status; 3014 3015 ice_acquire_lock(&pi->sched_lock); 3016 status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle, 3017 (ice_bitmap_t *)&bitmap); 3018 if (!status) 3019 status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle, 3020 (ice_bitmap_t *)&bitmap); 3021 ice_release_lock(&pi->sched_lock); 3022 return status; 3023 } 3024 3025 /** 3026 * ice_rm_agg_cfg - remove aggregator configuration 3027 * @pi: port information structure 3028 * @agg_id: aggregator ID 3029 * 3030 * This function removes aggregator reference to VSI and delete aggregator ID 3031 * info. It removes the aggregator configuration completely. 3032 */ 3033 enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id) 3034 { 3035 struct ice_sched_agg_info *agg_info; 3036 enum ice_status status = ICE_SUCCESS; 3037 u8 tc; 3038 3039 ice_acquire_lock(&pi->sched_lock); 3040 agg_info = ice_get_agg_info(pi->hw, agg_id); 3041 if (!agg_info) { 3042 status = ICE_ERR_DOES_NOT_EXIST; 3043 goto exit_ice_rm_agg_cfg; 3044 } 3045 3046 ice_for_each_traffic_class(tc) { 3047 status = ice_rm_agg_cfg_tc(pi, agg_info, tc, true); 3048 if (status) 3049 goto exit_ice_rm_agg_cfg; 3050 } 3051 3052 if (ice_is_any_bit_set(agg_info->tc_bitmap, ICE_MAX_TRAFFIC_CLASS)) { 3053 status = ICE_ERR_IN_USE; 3054 goto exit_ice_rm_agg_cfg; 3055 } 3056 3057 /* Safe to delete entry now */ 3058 LIST_DEL(&agg_info->list_entry); 3059 ice_free(pi->hw, agg_info); 3060 3061 /* Remove unused RL profile IDs from HW and SW DB */ 3062 ice_sched_rm_unused_rl_prof(pi); 3063 3064 exit_ice_rm_agg_cfg: 3065 ice_release_lock(&pi->sched_lock); 3066 return status; 3067 } 3068 3069 /** 3070 * ice_set_clear_cir_bw_alloc - set or clear CIR BW alloc information 3071 * @bw_t_info: bandwidth type information structure 3072 * @bw_alloc: Bandwidth allocation information 3073 * 3074 * Save or clear CIR BW alloc information (bw_alloc) in the passed param 3075 * bw_t_info. 3076 */ 3077 static void 3078 ice_set_clear_cir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc) 3079 { 3080 bw_t_info->cir_bw.bw_alloc = bw_alloc; 3081 if (bw_t_info->cir_bw.bw_alloc) 3082 ice_set_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap); 3083 else 3084 ice_clear_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap); 3085 } 3086 3087 /** 3088 * ice_set_clear_eir_bw_alloc - set or clear EIR BW alloc information 3089 * @bw_t_info: bandwidth type information structure 3090 * @bw_alloc: Bandwidth allocation information 3091 * 3092 * Save or clear EIR BW alloc information (bw_alloc) in the passed param 3093 * bw_t_info. 3094 */ 3095 static void 3096 ice_set_clear_eir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc) 3097 { 3098 bw_t_info->eir_bw.bw_alloc = bw_alloc; 3099 if (bw_t_info->eir_bw.bw_alloc) 3100 ice_set_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap); 3101 else 3102 ice_clear_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap); 3103 } 3104 3105 /** 3106 * ice_sched_save_vsi_bw_alloc - save VSI node's BW alloc information 3107 * @pi: port information structure 3108 * @vsi_handle: sw VSI handle 3109 * @tc: traffic class 3110 * @rl_type: rate limit type min or max 3111 * @bw_alloc: Bandwidth allocation information 3112 * 3113 * Save BW alloc information of VSI type node for post replay use. 3114 */ 3115 static enum ice_status 3116 ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3117 enum ice_rl_type rl_type, u16 bw_alloc) 3118 { 3119 struct ice_vsi_ctx *vsi_ctx; 3120 3121 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 3122 return ICE_ERR_PARAM; 3123 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 3124 if (!vsi_ctx) 3125 return ICE_ERR_PARAM; 3126 switch (rl_type) { 3127 case ICE_MIN_BW: 3128 ice_set_clear_cir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc], 3129 bw_alloc); 3130 break; 3131 case ICE_MAX_BW: 3132 ice_set_clear_eir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc], 3133 bw_alloc); 3134 break; 3135 default: 3136 return ICE_ERR_PARAM; 3137 } 3138 return ICE_SUCCESS; 3139 } 3140 3141 /** 3142 * ice_set_clear_cir_bw - set or clear CIR BW 3143 * @bw_t_info: bandwidth type information structure 3144 * @bw: bandwidth in Kbps - Kilo bits per sec 3145 * 3146 * Save or clear CIR bandwidth (BW) in the passed param bw_t_info. 3147 */ 3148 static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) 3149 { 3150 if (bw == ICE_SCHED_DFLT_BW) { 3151 ice_clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); 3152 bw_t_info->cir_bw.bw = 0; 3153 } else { 3154 /* Save type of BW information */ 3155 ice_set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); 3156 bw_t_info->cir_bw.bw = bw; 3157 } 3158 } 3159 3160 /** 3161 * ice_set_clear_eir_bw - set or clear EIR BW 3162 * @bw_t_info: bandwidth type information structure 3163 * @bw: bandwidth in Kbps - Kilo bits per sec 3164 * 3165 * Save or clear EIR bandwidth (BW) in the passed param bw_t_info. 3166 */ 3167 static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) 3168 { 3169 if (bw == ICE_SCHED_DFLT_BW) { 3170 ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); 3171 bw_t_info->eir_bw.bw = 0; 3172 } else { 3173 /* EIR BW and Shared BW profiles are mutually exclusive and 3174 * hence only one of them may be set for any given element. 3175 * First clear earlier saved shared BW information. 3176 */ 3177 ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); 3178 bw_t_info->shared_bw = 0; 3179 /* save EIR BW information */ 3180 ice_set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); 3181 bw_t_info->eir_bw.bw = bw; 3182 } 3183 } 3184 3185 /** 3186 * ice_set_clear_shared_bw - set or clear shared BW 3187 * @bw_t_info: bandwidth type information structure 3188 * @bw: bandwidth in Kbps - Kilo bits per sec 3189 * 3190 * Save or clear shared bandwidth (BW) in the passed param bw_t_info. 3191 */ 3192 static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw) 3193 { 3194 if (bw == ICE_SCHED_DFLT_BW) { 3195 ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); 3196 bw_t_info->shared_bw = 0; 3197 } else { 3198 /* EIR BW and Shared BW profiles are mutually exclusive and 3199 * hence only one of them may be set for any given element. 3200 * First clear earlier saved EIR BW information. 3201 */ 3202 ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); 3203 bw_t_info->eir_bw.bw = 0; 3204 /* save shared BW information */ 3205 ice_set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); 3206 bw_t_info->shared_bw = bw; 3207 } 3208 } 3209 3210 /** 3211 * ice_sched_save_vsi_bw - save VSI node's BW information 3212 * @pi: port information structure 3213 * @vsi_handle: sw VSI handle 3214 * @tc: traffic class 3215 * @rl_type: rate limit type min, max, or shared 3216 * @bw: bandwidth in Kbps - Kilo bits per sec 3217 * 3218 * Save BW information of VSI type node for post replay use. 3219 */ 3220 static enum ice_status 3221 ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3222 enum ice_rl_type rl_type, u32 bw) 3223 { 3224 struct ice_vsi_ctx *vsi_ctx; 3225 3226 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 3227 return ICE_ERR_PARAM; 3228 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 3229 if (!vsi_ctx) 3230 return ICE_ERR_PARAM; 3231 switch (rl_type) { 3232 case ICE_MIN_BW: 3233 ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw); 3234 break; 3235 case ICE_MAX_BW: 3236 ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw); 3237 break; 3238 case ICE_SHARED_BW: 3239 ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw); 3240 break; 3241 default: 3242 return ICE_ERR_PARAM; 3243 } 3244 return ICE_SUCCESS; 3245 } 3246 3247 /** 3248 * ice_set_clear_prio - set or clear priority information 3249 * @bw_t_info: bandwidth type information structure 3250 * @prio: priority to save 3251 * 3252 * Save or clear priority (prio) in the passed param bw_t_info. 3253 */ 3254 static void ice_set_clear_prio(struct ice_bw_type_info *bw_t_info, u8 prio) 3255 { 3256 bw_t_info->generic = prio; 3257 if (bw_t_info->generic) 3258 ice_set_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap); 3259 else 3260 ice_clear_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap); 3261 } 3262 3263 /** 3264 * ice_sched_save_vsi_prio - save VSI node's priority information 3265 * @pi: port information structure 3266 * @vsi_handle: Software VSI handle 3267 * @tc: traffic class 3268 * @prio: priority to save 3269 * 3270 * Save priority information of VSI type node for post replay use. 3271 */ 3272 static enum ice_status 3273 ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3274 u8 prio) 3275 { 3276 struct ice_vsi_ctx *vsi_ctx; 3277 3278 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 3279 return ICE_ERR_PARAM; 3280 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 3281 if (!vsi_ctx) 3282 return ICE_ERR_PARAM; 3283 if (tc >= ICE_MAX_TRAFFIC_CLASS) 3284 return ICE_ERR_PARAM; 3285 ice_set_clear_prio(&vsi_ctx->sched.bw_t_info[tc], prio); 3286 return ICE_SUCCESS; 3287 } 3288 3289 /** 3290 * ice_sched_save_agg_bw_alloc - save aggregator node's BW alloc information 3291 * @pi: port information structure 3292 * @agg_id: node aggregator ID 3293 * @tc: traffic class 3294 * @rl_type: rate limit type min or max 3295 * @bw_alloc: bandwidth alloc information 3296 * 3297 * Save BW alloc information of AGG type node for post replay use. 3298 */ 3299 static enum ice_status 3300 ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc, 3301 enum ice_rl_type rl_type, u16 bw_alloc) 3302 { 3303 struct ice_sched_agg_info *agg_info; 3304 3305 agg_info = ice_get_agg_info(pi->hw, agg_id); 3306 if (!agg_info) 3307 return ICE_ERR_PARAM; 3308 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) 3309 return ICE_ERR_PARAM; 3310 switch (rl_type) { 3311 case ICE_MIN_BW: 3312 ice_set_clear_cir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc); 3313 break; 3314 case ICE_MAX_BW: 3315 ice_set_clear_eir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc); 3316 break; 3317 default: 3318 return ICE_ERR_PARAM; 3319 } 3320 return ICE_SUCCESS; 3321 } 3322 3323 /** 3324 * ice_sched_save_agg_bw - save aggregator node's BW information 3325 * @pi: port information structure 3326 * @agg_id: node aggregator ID 3327 * @tc: traffic class 3328 * @rl_type: rate limit type min, max, or shared 3329 * @bw: bandwidth in Kbps - Kilo bits per sec 3330 * 3331 * Save BW information of AGG type node for post replay use. 3332 */ 3333 static enum ice_status 3334 ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, 3335 enum ice_rl_type rl_type, u32 bw) 3336 { 3337 struct ice_sched_agg_info *agg_info; 3338 3339 agg_info = ice_get_agg_info(pi->hw, agg_id); 3340 if (!agg_info) 3341 return ICE_ERR_PARAM; 3342 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) 3343 return ICE_ERR_PARAM; 3344 switch (rl_type) { 3345 case ICE_MIN_BW: 3346 ice_set_clear_cir_bw(&agg_info->bw_t_info[tc], bw); 3347 break; 3348 case ICE_MAX_BW: 3349 ice_set_clear_eir_bw(&agg_info->bw_t_info[tc], bw); 3350 break; 3351 case ICE_SHARED_BW: 3352 ice_set_clear_shared_bw(&agg_info->bw_t_info[tc], bw); 3353 break; 3354 default: 3355 return ICE_ERR_PARAM; 3356 } 3357 return ICE_SUCCESS; 3358 } 3359 3360 /** 3361 * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC 3362 * @pi: port information structure 3363 * @vsi_handle: software VSI handle 3364 * @tc: traffic class 3365 * @rl_type: min or max 3366 * @bw: bandwidth in Kbps 3367 * 3368 * This function configures BW limit of VSI scheduling node based on TC 3369 * information. 3370 */ 3371 enum ice_status 3372 ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3373 enum ice_rl_type rl_type, u32 bw) 3374 { 3375 enum ice_status status; 3376 3377 status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle, 3378 ICE_AGG_TYPE_VSI, 3379 tc, rl_type, bw); 3380 if (!status) { 3381 ice_acquire_lock(&pi->sched_lock); 3382 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw); 3383 ice_release_lock(&pi->sched_lock); 3384 } 3385 return status; 3386 } 3387 3388 /** 3389 * ice_cfg_dflt_vsi_bw_lmt_per_tc - configure default VSI BW limit per TC 3390 * @pi: port information structure 3391 * @vsi_handle: software VSI handle 3392 * @tc: traffic class 3393 * @rl_type: min or max 3394 * 3395 * This function configures default BW limit of VSI scheduling node based on TC 3396 * information. 3397 */ 3398 enum ice_status 3399 ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3400 enum ice_rl_type rl_type) 3401 { 3402 enum ice_status status; 3403 3404 status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle, 3405 ICE_AGG_TYPE_VSI, 3406 tc, rl_type, 3407 ICE_SCHED_DFLT_BW); 3408 if (!status) { 3409 ice_acquire_lock(&pi->sched_lock); 3410 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, 3411 ICE_SCHED_DFLT_BW); 3412 ice_release_lock(&pi->sched_lock); 3413 } 3414 return status; 3415 } 3416 3417 /** 3418 * ice_cfg_agg_bw_lmt_per_tc - configure aggregator BW limit per TC 3419 * @pi: port information structure 3420 * @agg_id: aggregator ID 3421 * @tc: traffic class 3422 * @rl_type: min or max 3423 * @bw: bandwidth in Kbps 3424 * 3425 * This function applies BW limit to aggregator scheduling node based on TC 3426 * information. 3427 */ 3428 enum ice_status 3429 ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, 3430 enum ice_rl_type rl_type, u32 bw) 3431 { 3432 enum ice_status status; 3433 3434 status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG, 3435 tc, rl_type, bw); 3436 if (!status) { 3437 ice_acquire_lock(&pi->sched_lock); 3438 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw); 3439 ice_release_lock(&pi->sched_lock); 3440 } 3441 return status; 3442 } 3443 3444 /** 3445 * ice_cfg_agg_bw_dflt_lmt_per_tc - configure aggregator BW default limit per TC 3446 * @pi: port information structure 3447 * @agg_id: aggregator ID 3448 * @tc: traffic class 3449 * @rl_type: min or max 3450 * 3451 * This function applies default BW limit to aggregator scheduling node based 3452 * on TC information. 3453 */ 3454 enum ice_status 3455 ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, 3456 enum ice_rl_type rl_type) 3457 { 3458 enum ice_status status; 3459 3460 status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG, 3461 tc, rl_type, 3462 ICE_SCHED_DFLT_BW); 3463 if (!status) { 3464 ice_acquire_lock(&pi->sched_lock); 3465 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, 3466 ICE_SCHED_DFLT_BW); 3467 ice_release_lock(&pi->sched_lock); 3468 } 3469 return status; 3470 } 3471 3472 /** 3473 * ice_cfg_vsi_bw_shared_lmt - configure VSI BW shared limit 3474 * @pi: port information structure 3475 * @vsi_handle: software VSI handle 3476 * @bw: bandwidth in Kbps 3477 * 3478 * This function Configures shared rate limiter(SRL) of all VSI type nodes 3479 * across all traffic classes for VSI matching handle. 3480 */ 3481 enum ice_status 3482 ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 bw) 3483 { 3484 return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, bw); 3485 } 3486 3487 /** 3488 * ice_cfg_vsi_bw_no_shared_lmt - configure VSI BW for no shared limiter 3489 * @pi: port information structure 3490 * @vsi_handle: software VSI handle 3491 * 3492 * This function removes the shared rate limiter(SRL) of all VSI type nodes 3493 * across all traffic classes for VSI matching handle. 3494 */ 3495 enum ice_status 3496 ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle) 3497 { 3498 return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, 3499 ICE_SCHED_DFLT_BW); 3500 } 3501 3502 /** 3503 * ice_cfg_agg_bw_shared_lmt - configure aggregator BW shared limit 3504 * @pi: port information structure 3505 * @agg_id: aggregator ID 3506 * @bw: bandwidth in Kbps 3507 * 3508 * This function configures the shared rate limiter(SRL) of all aggregator type 3509 * nodes across all traffic classes for aggregator matching agg_id. 3510 */ 3511 enum ice_status 3512 ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw) 3513 { 3514 return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, bw); 3515 } 3516 3517 /** 3518 * ice_cfg_agg_bw_no_shared_lmt - configure aggregator BW for no shared limiter 3519 * @pi: port information structure 3520 * @agg_id: aggregator ID 3521 * 3522 * This function removes the shared rate limiter(SRL) of all aggregator type 3523 * nodes across all traffic classes for aggregator matching agg_id. 3524 */ 3525 enum ice_status 3526 ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id) 3527 { 3528 return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, ICE_SCHED_DFLT_BW); 3529 } 3530 3531 /** 3532 * ice_config_vsi_queue_priority - config VSI queue priority of node 3533 * @pi: port information structure 3534 * @num_qs: number of VSI queues 3535 * @q_ids: queue IDs array 3536 * @q_prio: queue priority array 3537 * 3538 * This function configures the queue node priority (Sibling Priority) of the 3539 * passed in VSI's queue(s) for a given traffic class (TC). 3540 */ 3541 enum ice_status 3542 ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids, 3543 u8 *q_prio) 3544 { 3545 enum ice_status status = ICE_ERR_PARAM; 3546 u16 i; 3547 3548 ice_acquire_lock(&pi->sched_lock); 3549 3550 for (i = 0; i < num_qs; i++) { 3551 struct ice_sched_node *node; 3552 3553 node = ice_sched_find_node_by_teid(pi->root, q_ids[i]); 3554 if (!node || node->info.data.elem_type != 3555 ICE_AQC_ELEM_TYPE_LEAF) { 3556 status = ICE_ERR_PARAM; 3557 break; 3558 } 3559 /* Configure Priority */ 3560 status = ice_sched_cfg_sibl_node_prio(pi, node, q_prio[i]); 3561 if (status) 3562 break; 3563 } 3564 3565 ice_release_lock(&pi->sched_lock); 3566 return status; 3567 } 3568 3569 /** 3570 * ice_cfg_agg_vsi_priority_per_tc - config aggregator's VSI priority per TC 3571 * @pi: port information structure 3572 * @agg_id: Aggregator ID 3573 * @num_vsis: number of VSI(s) 3574 * @vsi_handle_arr: array of software VSI handles 3575 * @node_prio: pointer to node priority 3576 * @tc: traffic class 3577 * 3578 * This function configures the node priority (Sibling Priority) of the 3579 * passed in VSI's for a given traffic class (TC) of an Aggregator ID. 3580 */ 3581 enum ice_status 3582 ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id, 3583 u16 num_vsis, u16 *vsi_handle_arr, 3584 u8 *node_prio, u8 tc) 3585 { 3586 struct ice_sched_agg_vsi_info *agg_vsi_info; 3587 struct ice_sched_node *tc_node, *agg_node; 3588 enum ice_status status = ICE_ERR_PARAM; 3589 struct ice_sched_agg_info *agg_info; 3590 bool agg_id_present = false; 3591 struct ice_hw *hw = pi->hw; 3592 u16 i; 3593 3594 ice_acquire_lock(&pi->sched_lock); 3595 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 3596 list_entry) 3597 if (agg_info->agg_id == agg_id) { 3598 agg_id_present = true; 3599 break; 3600 } 3601 if (!agg_id_present) 3602 goto exit_agg_priority_per_tc; 3603 3604 tc_node = ice_sched_get_tc_node(pi, tc); 3605 if (!tc_node) 3606 goto exit_agg_priority_per_tc; 3607 3608 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 3609 if (!agg_node) 3610 goto exit_agg_priority_per_tc; 3611 3612 if (num_vsis > hw->max_children[agg_node->tx_sched_layer]) 3613 goto exit_agg_priority_per_tc; 3614 3615 for (i = 0; i < num_vsis; i++) { 3616 struct ice_sched_node *vsi_node; 3617 bool vsi_handle_valid = false; 3618 u16 vsi_handle; 3619 3620 status = ICE_ERR_PARAM; 3621 vsi_handle = vsi_handle_arr[i]; 3622 if (!ice_is_vsi_valid(hw, vsi_handle)) 3623 goto exit_agg_priority_per_tc; 3624 /* Verify child nodes before applying settings */ 3625 LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list, 3626 ice_sched_agg_vsi_info, list_entry) 3627 if (agg_vsi_info->vsi_handle == vsi_handle) { 3628 /* cppcheck-suppress unreadVariable */ 3629 vsi_handle_valid = true; 3630 break; 3631 } 3632 3633 if (!vsi_handle_valid) 3634 goto exit_agg_priority_per_tc; 3635 3636 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 3637 if (!vsi_node) 3638 goto exit_agg_priority_per_tc; 3639 3640 if (ice_sched_find_node_in_subtree(hw, agg_node, vsi_node)) { 3641 /* Configure Priority */ 3642 status = ice_sched_cfg_sibl_node_prio(pi, vsi_node, 3643 node_prio[i]); 3644 if (status) 3645 break; 3646 status = ice_sched_save_vsi_prio(pi, vsi_handle, tc, 3647 node_prio[i]); 3648 if (status) 3649 break; 3650 } 3651 } 3652 3653 exit_agg_priority_per_tc: 3654 ice_release_lock(&pi->sched_lock); 3655 return status; 3656 } 3657 3658 /** 3659 * ice_cfg_vsi_bw_alloc - config VSI BW alloc per TC 3660 * @pi: port information structure 3661 * @vsi_handle: software VSI handle 3662 * @ena_tcmap: enabled TC map 3663 * @rl_type: Rate limit type CIR/EIR 3664 * @bw_alloc: Array of BW alloc 3665 * 3666 * This function configures the BW allocation of the passed in VSI's 3667 * node(s) for enabled traffic class. 3668 */ 3669 enum ice_status 3670 ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap, 3671 enum ice_rl_type rl_type, u8 *bw_alloc) 3672 { 3673 enum ice_status status = ICE_SUCCESS; 3674 u8 tc; 3675 3676 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 3677 return ICE_ERR_PARAM; 3678 3679 ice_acquire_lock(&pi->sched_lock); 3680 3681 /* Return success if no nodes are present across TC */ 3682 ice_for_each_traffic_class(tc) { 3683 struct ice_sched_node *tc_node, *vsi_node; 3684 3685 if (!ice_is_tc_ena(ena_tcmap, tc)) 3686 continue; 3687 3688 tc_node = ice_sched_get_tc_node(pi, tc); 3689 if (!tc_node) 3690 continue; 3691 3692 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 3693 if (!vsi_node) 3694 continue; 3695 3696 status = ice_sched_cfg_node_bw_alloc(pi->hw, vsi_node, rl_type, 3697 bw_alloc[tc]); 3698 if (status) 3699 break; 3700 status = ice_sched_save_vsi_bw_alloc(pi, vsi_handle, tc, 3701 rl_type, bw_alloc[tc]); 3702 if (status) 3703 break; 3704 } 3705 3706 ice_release_lock(&pi->sched_lock); 3707 return status; 3708 } 3709 3710 /** 3711 * ice_cfg_agg_bw_alloc - config aggregator BW alloc 3712 * @pi: port information structure 3713 * @agg_id: aggregator ID 3714 * @ena_tcmap: enabled TC map 3715 * @rl_type: rate limit type CIR/EIR 3716 * @bw_alloc: array of BW alloc 3717 * 3718 * This function configures the BW allocation of passed in aggregator for 3719 * enabled traffic class(s). 3720 */ 3721 enum ice_status 3722 ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap, 3723 enum ice_rl_type rl_type, u8 *bw_alloc) 3724 { 3725 struct ice_sched_agg_info *agg_info; 3726 bool agg_id_present = false; 3727 enum ice_status status = ICE_SUCCESS; 3728 struct ice_hw *hw = pi->hw; 3729 u8 tc; 3730 3731 ice_acquire_lock(&pi->sched_lock); 3732 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 3733 list_entry) 3734 if (agg_info->agg_id == agg_id) { 3735 agg_id_present = true; 3736 break; 3737 } 3738 if (!agg_id_present) { 3739 status = ICE_ERR_PARAM; 3740 goto exit_cfg_agg_bw_alloc; 3741 } 3742 3743 /* Return success if no nodes are present across TC */ 3744 ice_for_each_traffic_class(tc) { 3745 struct ice_sched_node *tc_node, *agg_node; 3746 3747 if (!ice_is_tc_ena(ena_tcmap, tc)) 3748 continue; 3749 3750 tc_node = ice_sched_get_tc_node(pi, tc); 3751 if (!tc_node) 3752 continue; 3753 3754 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 3755 if (!agg_node) 3756 continue; 3757 3758 status = ice_sched_cfg_node_bw_alloc(hw, agg_node, rl_type, 3759 bw_alloc[tc]); 3760 if (status) 3761 break; 3762 status = ice_sched_save_agg_bw_alloc(pi, agg_id, tc, rl_type, 3763 bw_alloc[tc]); 3764 if (status) 3765 break; 3766 } 3767 3768 exit_cfg_agg_bw_alloc: 3769 ice_release_lock(&pi->sched_lock); 3770 return status; 3771 } 3772 3773 /** 3774 * ice_sched_calc_wakeup - calculate RL profile wakeup parameter 3775 * @hw: pointer to the HW struct 3776 * @bw: bandwidth in Kbps 3777 * 3778 * This function calculates the wakeup parameter of RL profile. 3779 */ 3780 static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) 3781 { 3782 s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f; 3783 s32 wakeup_f_int; 3784 u16 wakeup = 0; 3785 3786 /* Get the wakeup integer value */ 3787 bytes_per_sec = DIV_64BIT(((s64)bw * 1000), BITS_PER_BYTE); 3788 wakeup_int = DIV_64BIT(hw->psm_clk_freq, bytes_per_sec); 3789 if (wakeup_int > 63) { 3790 wakeup = (u16)((1 << 15) | wakeup_int); 3791 } else { 3792 /* Calculate fraction value up to 4 decimals 3793 * Convert Integer value to a constant multiplier 3794 */ 3795 wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int; 3796 wakeup_a = DIV_64BIT((s64)ICE_RL_PROF_MULTIPLIER * 3797 hw->psm_clk_freq, bytes_per_sec); 3798 3799 /* Get Fraction value */ 3800 wakeup_f = wakeup_a - wakeup_b; 3801 3802 /* Round up the Fractional value via Ceil(Fractional value) */ 3803 if (wakeup_f > DIV_64BIT(ICE_RL_PROF_MULTIPLIER, 2)) 3804 wakeup_f += 1; 3805 3806 wakeup_f_int = (s32)DIV_64BIT(wakeup_f * ICE_RL_PROF_FRACTION, 3807 ICE_RL_PROF_MULTIPLIER); 3808 wakeup |= (u16)(wakeup_int << 9); 3809 wakeup |= (u16)(0x1ff & wakeup_f_int); 3810 } 3811 3812 return wakeup; 3813 } 3814 3815 /** 3816 * ice_sched_bw_to_rl_profile - convert BW to profile parameters 3817 * @hw: pointer to the HW struct 3818 * @bw: bandwidth in Kbps 3819 * @profile: profile parameters to return 3820 * 3821 * This function converts the BW to profile structure format. 3822 */ 3823 static enum ice_status 3824 ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, 3825 struct ice_aqc_rl_profile_elem *profile) 3826 { 3827 enum ice_status status = ICE_ERR_PARAM; 3828 s64 bytes_per_sec, ts_rate, mv_tmp; 3829 bool found = false; 3830 s32 encode = 0; 3831 s64 mv = 0; 3832 s32 i; 3833 3834 /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */ 3835 if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW) 3836 return status; 3837 3838 /* Bytes per second from Kbps */ 3839 bytes_per_sec = DIV_64BIT(((s64)bw * 1000), BITS_PER_BYTE); 3840 3841 /* encode is 6 bits but really useful are 5 bits */ 3842 for (i = 0; i < 64; i++) { 3843 u64 pow_result = BIT_ULL(i); 3844 3845 ts_rate = DIV_64BIT((s64)hw->psm_clk_freq, 3846 pow_result * ICE_RL_PROF_TS_MULTIPLIER); 3847 if (ts_rate <= 0) 3848 continue; 3849 3850 /* Multiplier value */ 3851 mv_tmp = DIV_64BIT(bytes_per_sec * ICE_RL_PROF_MULTIPLIER, 3852 ts_rate); 3853 3854 /* Round to the nearest ICE_RL_PROF_MULTIPLIER */ 3855 mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER); 3856 3857 /* First multiplier value greater than the given 3858 * accuracy bytes 3859 */ 3860 if (mv > ICE_RL_PROF_ACCURACY_BYTES) { 3861 encode = i; 3862 found = true; 3863 break; 3864 } 3865 } 3866 if (found) { 3867 u16 wm; 3868 3869 wm = ice_sched_calc_wakeup(hw, bw); 3870 profile->rl_multiply = CPU_TO_LE16(mv); 3871 profile->wake_up_calc = CPU_TO_LE16(wm); 3872 profile->rl_encode = CPU_TO_LE16(encode); 3873 status = ICE_SUCCESS; 3874 } else { 3875 status = ICE_ERR_DOES_NOT_EXIST; 3876 } 3877 3878 return status; 3879 } 3880 3881 /** 3882 * ice_sched_add_rl_profile - add RL profile 3883 * @pi: port information structure 3884 * @rl_type: type of rate limit BW - min, max, or shared 3885 * @bw: bandwidth in Kbps - Kilo bits per sec 3886 * @layer_num: specifies in which layer to create profile 3887 * 3888 * This function first checks the existing list for corresponding BW 3889 * parameter. If it exists, it returns the associated profile otherwise 3890 * it creates a new rate limit profile for requested BW, and adds it to 3891 * the HW DB and local list. It returns the new profile or null on error. 3892 * The caller needs to hold the scheduler lock. 3893 */ 3894 static struct ice_aqc_rl_profile_info * 3895 ice_sched_add_rl_profile(struct ice_port_info *pi, 3896 enum ice_rl_type rl_type, u32 bw, u8 layer_num) 3897 { 3898 struct ice_aqc_rl_profile_info *rl_prof_elem; 3899 u16 profiles_added = 0, num_profiles = 1; 3900 struct ice_aqc_rl_profile_elem *buf; 3901 enum ice_status status; 3902 struct ice_hw *hw; 3903 u8 profile_type; 3904 3905 if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) 3906 return NULL; 3907 switch (rl_type) { 3908 case ICE_MIN_BW: 3909 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; 3910 break; 3911 case ICE_MAX_BW: 3912 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; 3913 break; 3914 case ICE_SHARED_BW: 3915 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; 3916 break; 3917 default: 3918 return NULL; 3919 } 3920 3921 if (!pi) 3922 return NULL; 3923 hw = pi->hw; 3924 LIST_FOR_EACH_ENTRY(rl_prof_elem, &pi->rl_prof_list[layer_num], 3925 ice_aqc_rl_profile_info, list_entry) 3926 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == 3927 profile_type && rl_prof_elem->bw == bw) 3928 /* Return existing profile ID info */ 3929 return rl_prof_elem; 3930 3931 /* Create new profile ID */ 3932 rl_prof_elem = (struct ice_aqc_rl_profile_info *) 3933 ice_malloc(hw, sizeof(*rl_prof_elem)); 3934 3935 if (!rl_prof_elem) 3936 return NULL; 3937 3938 status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile); 3939 if (status != ICE_SUCCESS) 3940 goto exit_add_rl_prof; 3941 3942 rl_prof_elem->bw = bw; 3943 /* layer_num is zero relative, and fw expects level from 1 to 9 */ 3944 rl_prof_elem->profile.level = layer_num + 1; 3945 rl_prof_elem->profile.flags = profile_type; 3946 rl_prof_elem->profile.max_burst_size = CPU_TO_LE16(hw->max_burst_size); 3947 3948 /* Create new entry in HW DB */ 3949 buf = &rl_prof_elem->profile; 3950 status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf), 3951 &profiles_added, NULL); 3952 if (status || profiles_added != num_profiles) 3953 goto exit_add_rl_prof; 3954 3955 /* Good entry - add in the list */ 3956 rl_prof_elem->prof_id_ref = 0; 3957 LIST_ADD(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]); 3958 return rl_prof_elem; 3959 3960 exit_add_rl_prof: 3961 ice_free(hw, rl_prof_elem); 3962 return NULL; 3963 } 3964 3965 /** 3966 * ice_sched_cfg_node_bw_lmt - configure node sched params 3967 * @hw: pointer to the HW struct 3968 * @node: sched node to configure 3969 * @rl_type: rate limit type CIR, EIR, or shared 3970 * @rl_prof_id: rate limit profile ID 3971 * 3972 * This function configures node element's BW limit. 3973 */ 3974 static enum ice_status 3975 ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, 3976 enum ice_rl_type rl_type, u16 rl_prof_id) 3977 { 3978 struct ice_aqc_txsched_elem_data buf; 3979 struct ice_aqc_txsched_elem *data; 3980 3981 buf = node->info; 3982 data = &buf.data; 3983 switch (rl_type) { 3984 case ICE_MIN_BW: 3985 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; 3986 data->cir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id); 3987 break; 3988 case ICE_MAX_BW: 3989 /* EIR BW and Shared BW profiles are mutually exclusive and 3990 * hence only one of them may be set for any given element 3991 */ 3992 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) 3993 return ICE_ERR_CFG; 3994 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; 3995 data->eir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id); 3996 break; 3997 case ICE_SHARED_BW: 3998 /* Check for removing shared BW */ 3999 if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) { 4000 /* remove shared profile */ 4001 data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED; 4002 data->srl_id = 0; /* clear SRL field */ 4003 4004 /* enable back EIR to default profile */ 4005 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; 4006 data->eir_bw.bw_profile_idx = 4007 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); 4008 break; 4009 } 4010 /* EIR BW and Shared BW profiles are mutually exclusive and 4011 * hence only one of them may be set for any given element 4012 */ 4013 if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) && 4014 (LE16_TO_CPU(data->eir_bw.bw_profile_idx) != 4015 ICE_SCHED_DFLT_RL_PROF_ID)) 4016 return ICE_ERR_CFG; 4017 /* EIR BW is set to default, disable it */ 4018 data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR; 4019 /* Okay to enable shared BW now */ 4020 data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED; 4021 data->srl_id = CPU_TO_LE16(rl_prof_id); 4022 break; 4023 default: 4024 /* Unknown rate limit type */ 4025 return ICE_ERR_PARAM; 4026 } 4027 4028 /* Configure element */ 4029 return ice_sched_update_elem(hw, node, &buf); 4030 } 4031 4032 /** 4033 * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID 4034 * @node: sched node 4035 * @rl_type: rate limit type 4036 * 4037 * If existing profile matches, it returns the corresponding rate 4038 * limit profile ID, otherwise it returns an invalid ID as error. 4039 */ 4040 static u16 4041 ice_sched_get_node_rl_prof_id(struct ice_sched_node *node, 4042 enum ice_rl_type rl_type) 4043 { 4044 u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID; 4045 struct ice_aqc_txsched_elem *data; 4046 4047 data = &node->info.data; 4048 switch (rl_type) { 4049 case ICE_MIN_BW: 4050 if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR) 4051 rl_prof_id = LE16_TO_CPU(data->cir_bw.bw_profile_idx); 4052 break; 4053 case ICE_MAX_BW: 4054 if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR) 4055 rl_prof_id = LE16_TO_CPU(data->eir_bw.bw_profile_idx); 4056 break; 4057 case ICE_SHARED_BW: 4058 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) 4059 rl_prof_id = LE16_TO_CPU(data->srl_id); 4060 break; 4061 default: 4062 break; 4063 } 4064 4065 return rl_prof_id; 4066 } 4067 4068 /** 4069 * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer 4070 * @pi: port information structure 4071 * @rl_type: type of rate limit BW - min, max, or shared 4072 * @layer_index: layer index 4073 * 4074 * This function returns requested profile creation layer. 4075 */ 4076 static u8 4077 ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type, 4078 u8 layer_index) 4079 { 4080 struct ice_hw *hw = pi->hw; 4081 4082 if (layer_index >= hw->num_tx_sched_layers) 4083 return ICE_SCHED_INVAL_LAYER_NUM; 4084 switch (rl_type) { 4085 case ICE_MIN_BW: 4086 if (hw->layer_info[layer_index].max_cir_rl_profiles) 4087 return layer_index; 4088 break; 4089 case ICE_MAX_BW: 4090 if (hw->layer_info[layer_index].max_eir_rl_profiles) 4091 return layer_index; 4092 break; 4093 case ICE_SHARED_BW: 4094 /* if current layer doesn't support SRL profile creation 4095 * then try a layer up or down. 4096 */ 4097 if (hw->layer_info[layer_index].max_srl_profiles) 4098 return layer_index; 4099 else if (layer_index < hw->num_tx_sched_layers - 1 && 4100 hw->layer_info[layer_index + 1].max_srl_profiles) 4101 return layer_index + 1; 4102 else if (layer_index > 0 && 4103 hw->layer_info[layer_index - 1].max_srl_profiles) 4104 return layer_index - 1; 4105 break; 4106 default: 4107 break; 4108 } 4109 return ICE_SCHED_INVAL_LAYER_NUM; 4110 } 4111 4112 /** 4113 * ice_sched_get_srl_node - get shared rate limit node 4114 * @node: tree node 4115 * @srl_layer: shared rate limit layer 4116 * 4117 * This function returns SRL node to be used for shared rate limit purpose. 4118 * The caller needs to hold scheduler lock. 4119 */ 4120 static struct ice_sched_node * 4121 ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer) 4122 { 4123 if (srl_layer > node->tx_sched_layer) 4124 return node->children[0]; 4125 else if (srl_layer < node->tx_sched_layer) 4126 /* Node can't be created without a parent. It will always 4127 * have a valid parent except root node. 4128 */ 4129 return node->parent; 4130 else 4131 return node; 4132 } 4133 4134 /** 4135 * ice_sched_rm_rl_profile - remove RL profile ID 4136 * @pi: port information structure 4137 * @layer_num: layer number where profiles are saved 4138 * @profile_type: profile type like EIR, CIR, or SRL 4139 * @profile_id: profile ID to remove 4140 * 4141 * This function removes rate limit profile from layer 'layer_num' of type 4142 * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold 4143 * scheduler lock. 4144 */ 4145 static enum ice_status 4146 ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, 4147 u16 profile_id) 4148 { 4149 struct ice_aqc_rl_profile_info *rl_prof_elem; 4150 enum ice_status status = ICE_SUCCESS; 4151 4152 if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) 4153 return ICE_ERR_PARAM; 4154 /* Check the existing list for RL profile */ 4155 LIST_FOR_EACH_ENTRY(rl_prof_elem, &pi->rl_prof_list[layer_num], 4156 ice_aqc_rl_profile_info, list_entry) 4157 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == 4158 profile_type && 4159 LE16_TO_CPU(rl_prof_elem->profile.profile_id) == 4160 profile_id) { 4161 if (rl_prof_elem->prof_id_ref) 4162 rl_prof_elem->prof_id_ref--; 4163 4164 /* Remove old profile ID from database */ 4165 status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem); 4166 if (status && status != ICE_ERR_IN_USE) 4167 ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); 4168 break; 4169 } 4170 if (status == ICE_ERR_IN_USE) 4171 status = ICE_SUCCESS; 4172 return status; 4173 } 4174 4175 /** 4176 * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default 4177 * @pi: port information structure 4178 * @node: pointer to node structure 4179 * @rl_type: rate limit type min, max, or shared 4180 * @layer_num: layer number where RL profiles are saved 4181 * 4182 * This function configures node element's BW rate limit profile ID of 4183 * type CIR, EIR, or SRL to default. This function needs to be called 4184 * with the scheduler lock held. 4185 */ 4186 static enum ice_status 4187 ice_sched_set_node_bw_dflt(struct ice_port_info *pi, 4188 struct ice_sched_node *node, 4189 enum ice_rl_type rl_type, u8 layer_num) 4190 { 4191 enum ice_status status; 4192 struct ice_hw *hw; 4193 u8 profile_type; 4194 u16 rl_prof_id; 4195 u16 old_id; 4196 4197 hw = pi->hw; 4198 switch (rl_type) { 4199 case ICE_MIN_BW: 4200 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; 4201 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; 4202 break; 4203 case ICE_MAX_BW: 4204 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; 4205 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; 4206 break; 4207 case ICE_SHARED_BW: 4208 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; 4209 /* No SRL is configured for default case */ 4210 rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID; 4211 break; 4212 default: 4213 return ICE_ERR_PARAM; 4214 } 4215 /* Save existing RL prof ID for later clean up */ 4216 old_id = ice_sched_get_node_rl_prof_id(node, rl_type); 4217 /* Configure BW scheduling parameters */ 4218 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); 4219 if (status) 4220 return status; 4221 4222 /* Remove stale RL profile ID */ 4223 if (old_id == ICE_SCHED_DFLT_RL_PROF_ID || 4224 old_id == ICE_SCHED_INVAL_PROF_ID) 4225 return ICE_SUCCESS; 4226 4227 return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id); 4228 } 4229 4230 /** 4231 * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness 4232 * @pi: port information structure 4233 * @node: pointer to node structure 4234 * @layer_num: layer number where rate limit profiles are saved 4235 * @rl_type: rate limit type min, max, or shared 4236 * @bw: bandwidth value 4237 * 4238 * This function prepares node element's bandwidth to SRL or EIR exclusively. 4239 * EIR BW and Shared BW profiles are mutually exclusive and hence only one of 4240 * them may be set for any given element. This function needs to be called 4241 * with the scheduler lock held. 4242 */ 4243 static enum ice_status 4244 ice_sched_set_eir_srl_excl(struct ice_port_info *pi, 4245 struct ice_sched_node *node, 4246 u8 layer_num, enum ice_rl_type rl_type, u32 bw) 4247 { 4248 if (rl_type == ICE_SHARED_BW) { 4249 /* SRL node passed in this case, it may be different node */ 4250 if (bw == ICE_SCHED_DFLT_BW) 4251 /* SRL being removed, ice_sched_cfg_node_bw_lmt() 4252 * enables EIR to default. EIR is not set in this 4253 * case, so no additional action is required. 4254 */ 4255 return ICE_SUCCESS; 4256 4257 /* SRL being configured, set EIR to default here. 4258 * ice_sched_cfg_node_bw_lmt() disables EIR when it 4259 * configures SRL 4260 */ 4261 return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW, 4262 layer_num); 4263 } else if (rl_type == ICE_MAX_BW && 4264 node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) { 4265 /* Remove Shared profile. Set default shared BW call 4266 * removes shared profile for a node. 4267 */ 4268 return ice_sched_set_node_bw_dflt(pi, node, 4269 ICE_SHARED_BW, 4270 layer_num); 4271 } 4272 return ICE_SUCCESS; 4273 } 4274 4275 /** 4276 * ice_sched_set_node_bw - set node's bandwidth 4277 * @pi: port information structure 4278 * @node: tree node 4279 * @rl_type: rate limit type min, max, or shared 4280 * @bw: bandwidth in Kbps - Kilo bits per sec 4281 * @layer_num: layer number 4282 * 4283 * This function adds new profile corresponding to requested BW, configures 4284 * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile 4285 * ID from local database. The caller needs to hold scheduler lock. 4286 */ 4287 static enum ice_status 4288 ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, 4289 enum ice_rl_type rl_type, u32 bw, u8 layer_num) 4290 { 4291 struct ice_aqc_rl_profile_info *rl_prof_info; 4292 enum ice_status status = ICE_ERR_PARAM; 4293 struct ice_hw *hw = pi->hw; 4294 u16 old_id, rl_prof_id; 4295 4296 rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num); 4297 if (!rl_prof_info) 4298 return status; 4299 4300 rl_prof_id = LE16_TO_CPU(rl_prof_info->profile.profile_id); 4301 4302 /* Save existing RL prof ID for later clean up */ 4303 old_id = ice_sched_get_node_rl_prof_id(node, rl_type); 4304 /* Configure BW scheduling parameters */ 4305 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); 4306 if (status) 4307 return status; 4308 4309 /* New changes has been applied */ 4310 /* Increment the profile ID reference count */ 4311 rl_prof_info->prof_id_ref++; 4312 4313 /* Check for old ID removal */ 4314 if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) || 4315 old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id) 4316 return ICE_SUCCESS; 4317 4318 return ice_sched_rm_rl_profile(pi, layer_num, 4319 rl_prof_info->profile.flags & 4320 ICE_AQC_RL_PROFILE_TYPE_M, old_id); 4321 } 4322 4323 /** 4324 * ice_sched_set_node_bw_lmt - set node's BW limit 4325 * @pi: port information structure 4326 * @node: tree node 4327 * @rl_type: rate limit type min, max, or shared 4328 * @bw: bandwidth in Kbps - Kilo bits per sec 4329 * 4330 * It updates node's BW limit parameters like BW RL profile ID of type CIR, 4331 * EIR, or SRL. The caller needs to hold scheduler lock. 4332 */ 4333 static enum ice_status 4334 ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, 4335 enum ice_rl_type rl_type, u32 bw) 4336 { 4337 struct ice_sched_node *cfg_node = node; 4338 enum ice_status status; 4339 4340 struct ice_hw *hw; 4341 u8 layer_num; 4342 4343 if (!pi) 4344 return ICE_ERR_PARAM; 4345 hw = pi->hw; 4346 /* Remove unused RL profile IDs from HW and SW DB */ 4347 ice_sched_rm_unused_rl_prof(pi); 4348 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, 4349 node->tx_sched_layer); 4350 if (layer_num >= hw->num_tx_sched_layers) 4351 return ICE_ERR_PARAM; 4352 4353 if (rl_type == ICE_SHARED_BW) { 4354 /* SRL node may be different */ 4355 cfg_node = ice_sched_get_srl_node(node, layer_num); 4356 if (!cfg_node) 4357 return ICE_ERR_CFG; 4358 } 4359 /* EIR BW and Shared BW profiles are mutually exclusive and 4360 * hence only one of them may be set for any given element 4361 */ 4362 status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type, 4363 bw); 4364 if (status) 4365 return status; 4366 if (bw == ICE_SCHED_DFLT_BW) 4367 return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type, 4368 layer_num); 4369 return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num); 4370 } 4371 4372 /** 4373 * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default 4374 * @pi: port information structure 4375 * @node: pointer to node structure 4376 * @rl_type: rate limit type min, max, or shared 4377 * 4378 * This function configures node element's BW rate limit profile ID of 4379 * type CIR, EIR, or SRL to default. This function needs to be called 4380 * with the scheduler lock held. 4381 */ 4382 static enum ice_status 4383 ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, 4384 struct ice_sched_node *node, 4385 enum ice_rl_type rl_type) 4386 { 4387 return ice_sched_set_node_bw_lmt(pi, node, rl_type, 4388 ICE_SCHED_DFLT_BW); 4389 } 4390 4391 /** 4392 * ice_sched_validate_srl_node - Check node for SRL applicability 4393 * @node: sched node to configure 4394 * @sel_layer: selected SRL layer 4395 * 4396 * This function checks if the SRL can be applied to a selceted layer node on 4397 * behalf of the requested node (first argument). This function needs to be 4398 * called with scheduler lock held. 4399 */ 4400 static enum ice_status 4401 ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) 4402 { 4403 /* SRL profiles are not available on all layers. Check if the 4404 * SRL profile can be applied to a node above or below the 4405 * requested node. SRL configuration is possible only if the 4406 * selected layer's node has single child. 4407 */ 4408 if (sel_layer == node->tx_sched_layer || 4409 ((sel_layer == node->tx_sched_layer + 1) && 4410 node->num_children == 1) || 4411 ((sel_layer == node->tx_sched_layer - 1) && 4412 (node->parent && node->parent->num_children == 1))) 4413 return ICE_SUCCESS; 4414 4415 return ICE_ERR_CFG; 4416 } 4417 4418 /** 4419 * ice_sched_save_q_bw - save queue node's BW information 4420 * @q_ctx: queue context structure 4421 * @rl_type: rate limit type min, max, or shared 4422 * @bw: bandwidth in Kbps - Kilo bits per sec 4423 * 4424 * Save BW information of queue type node for post replay use. 4425 */ 4426 static enum ice_status 4427 ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) 4428 { 4429 switch (rl_type) { 4430 case ICE_MIN_BW: 4431 ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw); 4432 break; 4433 case ICE_MAX_BW: 4434 ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw); 4435 break; 4436 case ICE_SHARED_BW: 4437 ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw); 4438 break; 4439 default: 4440 return ICE_ERR_PARAM; 4441 } 4442 return ICE_SUCCESS; 4443 } 4444 4445 /** 4446 * ice_sched_set_q_bw_lmt - sets queue BW limit 4447 * @pi: port information structure 4448 * @vsi_handle: sw VSI handle 4449 * @tc: traffic class 4450 * @q_handle: software queue handle 4451 * @rl_type: min, max, or shared 4452 * @bw: bandwidth in Kbps 4453 * 4454 * This function sets BW limit of queue scheduling node. 4455 */ 4456 static enum ice_status 4457 ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4458 u16 q_handle, enum ice_rl_type rl_type, u32 bw) 4459 { 4460 enum ice_status status = ICE_ERR_PARAM; 4461 struct ice_sched_node *node; 4462 struct ice_q_ctx *q_ctx; 4463 4464 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4465 return ICE_ERR_PARAM; 4466 ice_acquire_lock(&pi->sched_lock); 4467 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle); 4468 if (!q_ctx) 4469 goto exit_q_bw_lmt; 4470 node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); 4471 if (!node) { 4472 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n"); 4473 goto exit_q_bw_lmt; 4474 } 4475 4476 /* Return error if it is not a leaf node */ 4477 if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) 4478 goto exit_q_bw_lmt; 4479 4480 /* SRL bandwidth layer selection */ 4481 if (rl_type == ICE_SHARED_BW) { 4482 u8 sel_layer; /* selected layer */ 4483 4484 sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type, 4485 node->tx_sched_layer); 4486 if (sel_layer >= pi->hw->num_tx_sched_layers) { 4487 status = ICE_ERR_PARAM; 4488 goto exit_q_bw_lmt; 4489 } 4490 status = ice_sched_validate_srl_node(node, sel_layer); 4491 if (status) 4492 goto exit_q_bw_lmt; 4493 } 4494 4495 if (bw == ICE_SCHED_DFLT_BW) 4496 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type); 4497 else 4498 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw); 4499 4500 if (!status) 4501 status = ice_sched_save_q_bw(q_ctx, rl_type, bw); 4502 4503 exit_q_bw_lmt: 4504 ice_release_lock(&pi->sched_lock); 4505 return status; 4506 } 4507 4508 /** 4509 * ice_cfg_q_bw_lmt - configure queue BW limit 4510 * @pi: port information structure 4511 * @vsi_handle: sw VSI handle 4512 * @tc: traffic class 4513 * @q_handle: software queue handle 4514 * @rl_type: min, max, or shared 4515 * @bw: bandwidth in Kbps 4516 * 4517 * This function configures BW limit of queue scheduling node. 4518 */ 4519 enum ice_status 4520 ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4521 u16 q_handle, enum ice_rl_type rl_type, u32 bw) 4522 { 4523 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, 4524 bw); 4525 } 4526 4527 /** 4528 * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit 4529 * @pi: port information structure 4530 * @vsi_handle: sw VSI handle 4531 * @tc: traffic class 4532 * @q_handle: software queue handle 4533 * @rl_type: min, max, or shared 4534 * 4535 * This function configures BW default limit of queue scheduling node. 4536 */ 4537 enum ice_status 4538 ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4539 u16 q_handle, enum ice_rl_type rl_type) 4540 { 4541 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, 4542 ICE_SCHED_DFLT_BW); 4543 } 4544 4545 /** 4546 * ice_sched_save_tc_node_bw - save TC node BW limit 4547 * @pi: port information structure 4548 * @tc: TC number 4549 * @rl_type: min or max 4550 * @bw: bandwidth in Kbps 4551 * 4552 * This function saves the modified values of bandwidth settings for later 4553 * replay purpose (restore) after reset. 4554 */ 4555 static enum ice_status 4556 ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc, 4557 enum ice_rl_type rl_type, u32 bw) 4558 { 4559 if (tc >= ICE_MAX_TRAFFIC_CLASS) 4560 return ICE_ERR_PARAM; 4561 switch (rl_type) { 4562 case ICE_MIN_BW: 4563 ice_set_clear_cir_bw(&pi->tc_node_bw_t_info[tc], bw); 4564 break; 4565 case ICE_MAX_BW: 4566 ice_set_clear_eir_bw(&pi->tc_node_bw_t_info[tc], bw); 4567 break; 4568 case ICE_SHARED_BW: 4569 ice_set_clear_shared_bw(&pi->tc_node_bw_t_info[tc], bw); 4570 break; 4571 default: 4572 return ICE_ERR_PARAM; 4573 } 4574 return ICE_SUCCESS; 4575 } 4576 4577 /** 4578 * ice_sched_set_tc_node_bw_lmt - sets TC node BW limit 4579 * @pi: port information structure 4580 * @tc: TC number 4581 * @rl_type: min or max 4582 * @bw: bandwidth in Kbps 4583 * 4584 * This function configures bandwidth limit of TC node. 4585 */ 4586 static enum ice_status 4587 ice_sched_set_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, 4588 enum ice_rl_type rl_type, u32 bw) 4589 { 4590 enum ice_status status = ICE_ERR_PARAM; 4591 struct ice_sched_node *tc_node; 4592 4593 if (tc >= ICE_MAX_TRAFFIC_CLASS) 4594 return status; 4595 ice_acquire_lock(&pi->sched_lock); 4596 tc_node = ice_sched_get_tc_node(pi, tc); 4597 if (!tc_node) 4598 goto exit_set_tc_node_bw; 4599 if (bw == ICE_SCHED_DFLT_BW) 4600 status = ice_sched_set_node_bw_dflt_lmt(pi, tc_node, rl_type); 4601 else 4602 status = ice_sched_set_node_bw_lmt(pi, tc_node, rl_type, bw); 4603 if (!status) 4604 status = ice_sched_save_tc_node_bw(pi, tc, rl_type, bw); 4605 4606 exit_set_tc_node_bw: 4607 ice_release_lock(&pi->sched_lock); 4608 return status; 4609 } 4610 4611 /** 4612 * ice_cfg_tc_node_bw_lmt - configure TC node BW limit 4613 * @pi: port information structure 4614 * @tc: TC number 4615 * @rl_type: min or max 4616 * @bw: bandwidth in Kbps 4617 * 4618 * This function configures BW limit of TC node. 4619 * Note: The minimum guaranteed reservation is done via DCBX. 4620 */ 4621 enum ice_status 4622 ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, 4623 enum ice_rl_type rl_type, u32 bw) 4624 { 4625 return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, bw); 4626 } 4627 4628 /** 4629 * ice_cfg_tc_node_bw_dflt_lmt - configure TC node BW default limit 4630 * @pi: port information structure 4631 * @tc: TC number 4632 * @rl_type: min or max 4633 * 4634 * This function configures BW default limit of TC node. 4635 */ 4636 enum ice_status 4637 ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc, 4638 enum ice_rl_type rl_type) 4639 { 4640 return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, ICE_SCHED_DFLT_BW); 4641 } 4642 4643 /** 4644 * ice_sched_save_tc_node_bw_alloc - save TC node's BW alloc information 4645 * @pi: port information structure 4646 * @tc: traffic class 4647 * @rl_type: rate limit type min or max 4648 * @bw_alloc: Bandwidth allocation information 4649 * 4650 * Save BW alloc information of VSI type node for post replay use. 4651 */ 4652 static enum ice_status 4653 ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, 4654 enum ice_rl_type rl_type, u16 bw_alloc) 4655 { 4656 if (tc >= ICE_MAX_TRAFFIC_CLASS) 4657 return ICE_ERR_PARAM; 4658 switch (rl_type) { 4659 case ICE_MIN_BW: 4660 ice_set_clear_cir_bw_alloc(&pi->tc_node_bw_t_info[tc], 4661 bw_alloc); 4662 break; 4663 case ICE_MAX_BW: 4664 ice_set_clear_eir_bw_alloc(&pi->tc_node_bw_t_info[tc], 4665 bw_alloc); 4666 break; 4667 default: 4668 return ICE_ERR_PARAM; 4669 } 4670 return ICE_SUCCESS; 4671 } 4672 4673 /** 4674 * ice_sched_set_tc_node_bw_alloc - set TC node BW alloc 4675 * @pi: port information structure 4676 * @tc: TC number 4677 * @rl_type: min or max 4678 * @bw_alloc: bandwidth alloc 4679 * 4680 * This function configures bandwidth alloc of TC node, also saves the 4681 * changed settings for replay purpose, and return success if it succeeds 4682 * in modifying bandwidth alloc setting. 4683 */ 4684 static enum ice_status 4685 ice_sched_set_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, 4686 enum ice_rl_type rl_type, u8 bw_alloc) 4687 { 4688 enum ice_status status = ICE_ERR_PARAM; 4689 struct ice_sched_node *tc_node; 4690 4691 if (tc >= ICE_MAX_TRAFFIC_CLASS) 4692 return status; 4693 ice_acquire_lock(&pi->sched_lock); 4694 tc_node = ice_sched_get_tc_node(pi, tc); 4695 if (!tc_node) 4696 goto exit_set_tc_node_bw_alloc; 4697 status = ice_sched_cfg_node_bw_alloc(pi->hw, tc_node, rl_type, 4698 bw_alloc); 4699 if (status) 4700 goto exit_set_tc_node_bw_alloc; 4701 status = ice_sched_save_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc); 4702 4703 exit_set_tc_node_bw_alloc: 4704 ice_release_lock(&pi->sched_lock); 4705 return status; 4706 } 4707 4708 /** 4709 * ice_cfg_tc_node_bw_alloc - configure TC node BW alloc 4710 * @pi: port information structure 4711 * @tc: TC number 4712 * @rl_type: min or max 4713 * @bw_alloc: bandwidth alloc 4714 * 4715 * This function configures BW limit of TC node. 4716 * Note: The minimum guaranteed reservation is done via DCBX. 4717 */ 4718 enum ice_status 4719 ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, 4720 enum ice_rl_type rl_type, u8 bw_alloc) 4721 { 4722 return ice_sched_set_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc); 4723 } 4724 4725 /** 4726 * ice_sched_set_agg_bw_dflt_lmt - set aggregator node's BW limit to default 4727 * @pi: port information structure 4728 * @vsi_handle: software VSI handle 4729 * 4730 * This function retrieves the aggregator ID based on VSI ID and TC, 4731 * and sets node's BW limit to default. This function needs to be 4732 * called with the scheduler lock held. 4733 */ 4734 enum ice_status 4735 ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle) 4736 { 4737 struct ice_vsi_ctx *vsi_ctx; 4738 enum ice_status status = ICE_SUCCESS; 4739 u8 tc; 4740 4741 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4742 return ICE_ERR_PARAM; 4743 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 4744 if (!vsi_ctx) 4745 return ICE_ERR_PARAM; 4746 4747 ice_for_each_traffic_class(tc) { 4748 struct ice_sched_node *node; 4749 4750 node = vsi_ctx->sched.ag_node[tc]; 4751 if (!node) 4752 continue; 4753 4754 /* Set min profile to default */ 4755 status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MIN_BW); 4756 if (status) 4757 break; 4758 4759 /* Set max profile to default */ 4760 status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MAX_BW); 4761 if (status) 4762 break; 4763 4764 /* Remove shared profile, if there is one */ 4765 status = ice_sched_set_node_bw_dflt_lmt(pi, node, 4766 ICE_SHARED_BW); 4767 if (status) 4768 break; 4769 } 4770 4771 return status; 4772 } 4773 4774 /** 4775 * ice_sched_get_node_by_id_type - get node from ID type 4776 * @pi: port information structure 4777 * @id: identifier 4778 * @agg_type: type of aggregator 4779 * @tc: traffic class 4780 * 4781 * This function returns node identified by ID of type aggregator, and 4782 * based on traffic class (TC). This function needs to be called with 4783 * the scheduler lock held. 4784 */ 4785 static struct ice_sched_node * 4786 ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, 4787 enum ice_agg_type agg_type, u8 tc) 4788 { 4789 struct ice_sched_node *node = NULL; 4790 struct ice_sched_node *child_node; 4791 4792 switch (agg_type) { 4793 case ICE_AGG_TYPE_VSI: { 4794 struct ice_vsi_ctx *vsi_ctx; 4795 u16 vsi_handle = (u16)id; 4796 4797 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4798 break; 4799 /* Get sched_vsi_info */ 4800 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 4801 if (!vsi_ctx) 4802 break; 4803 node = vsi_ctx->sched.vsi_node[tc]; 4804 break; 4805 } 4806 4807 case ICE_AGG_TYPE_AGG: { 4808 struct ice_sched_node *tc_node; 4809 4810 tc_node = ice_sched_get_tc_node(pi, tc); 4811 if (tc_node) 4812 node = ice_sched_get_agg_node(pi, tc_node, id); 4813 break; 4814 } 4815 4816 case ICE_AGG_TYPE_Q: 4817 /* The current implementation allows single queue to modify */ 4818 node = ice_sched_get_node(pi, id); 4819 break; 4820 4821 case ICE_AGG_TYPE_QG: 4822 /* The current implementation allows single qg to modify */ 4823 child_node = ice_sched_get_node(pi, id); 4824 if (!child_node) 4825 break; 4826 node = child_node->parent; 4827 break; 4828 4829 default: 4830 break; 4831 } 4832 4833 return node; 4834 } 4835 4836 /** 4837 * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC 4838 * @pi: port information structure 4839 * @id: ID (software VSI handle or AGG ID) 4840 * @agg_type: aggregator type (VSI or AGG type node) 4841 * @tc: traffic class 4842 * @rl_type: min or max 4843 * @bw: bandwidth in Kbps 4844 * 4845 * This function sets BW limit of VSI or Aggregator scheduling node 4846 * based on TC information from passed in argument BW. 4847 */ 4848 enum ice_status 4849 ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, 4850 enum ice_agg_type agg_type, u8 tc, 4851 enum ice_rl_type rl_type, u32 bw) 4852 { 4853 enum ice_status status = ICE_ERR_PARAM; 4854 struct ice_sched_node *node; 4855 4856 if (!pi) 4857 return status; 4858 4859 if (rl_type == ICE_UNKNOWN_BW) 4860 return status; 4861 4862 ice_acquire_lock(&pi->sched_lock); 4863 node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc); 4864 if (!node) { 4865 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n"); 4866 goto exit_set_node_bw_lmt_per_tc; 4867 } 4868 if (bw == ICE_SCHED_DFLT_BW) 4869 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type); 4870 else 4871 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw); 4872 4873 exit_set_node_bw_lmt_per_tc: 4874 ice_release_lock(&pi->sched_lock); 4875 return status; 4876 } 4877 4878 /** 4879 * ice_sched_validate_vsi_srl_node - validate VSI SRL node 4880 * @pi: port information structure 4881 * @vsi_handle: software VSI handle 4882 * 4883 * This function validates SRL node of the VSI node if available SRL layer is 4884 * different than the VSI node layer on all TC(s).This function needs to be 4885 * called with scheduler lock held. 4886 */ 4887 static enum ice_status 4888 ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle) 4889 { 4890 u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM; 4891 u8 tc; 4892 4893 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4894 return ICE_ERR_PARAM; 4895 4896 /* Return success if no nodes are present across TC */ 4897 ice_for_each_traffic_class(tc) { 4898 struct ice_sched_node *tc_node, *vsi_node; 4899 enum ice_rl_type rl_type = ICE_SHARED_BW; 4900 enum ice_status status; 4901 4902 tc_node = ice_sched_get_tc_node(pi, tc); 4903 if (!tc_node) 4904 continue; 4905 4906 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 4907 if (!vsi_node) 4908 continue; 4909 4910 /* SRL bandwidth layer selection */ 4911 if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) { 4912 u8 node_layer = vsi_node->tx_sched_layer; 4913 u8 layer_num; 4914 4915 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, 4916 node_layer); 4917 if (layer_num >= pi->hw->num_tx_sched_layers) 4918 return ICE_ERR_PARAM; 4919 sel_layer = layer_num; 4920 } 4921 4922 status = ice_sched_validate_srl_node(vsi_node, sel_layer); 4923 if (status) 4924 return status; 4925 } 4926 return ICE_SUCCESS; 4927 } 4928 4929 /** 4930 * ice_sched_set_vsi_bw_shared_lmt - set VSI BW shared limit 4931 * @pi: port information structure 4932 * @vsi_handle: software VSI handle 4933 * @bw: bandwidth in Kbps 4934 * 4935 * This function Configures shared rate limiter(SRL) of all VSI type nodes 4936 * across all traffic classes for VSI matching handle. When BW value of 4937 * ICE_SCHED_DFLT_BW is passed, it removes the SRL from the node. 4938 */ 4939 enum ice_status 4940 ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, 4941 u32 bw) 4942 { 4943 enum ice_status status = ICE_SUCCESS; 4944 u8 tc; 4945 4946 if (!pi) 4947 return ICE_ERR_PARAM; 4948 4949 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4950 return ICE_ERR_PARAM; 4951 4952 ice_acquire_lock(&pi->sched_lock); 4953 status = ice_sched_validate_vsi_srl_node(pi, vsi_handle); 4954 if (status) 4955 goto exit_set_vsi_bw_shared_lmt; 4956 /* Return success if no nodes are present across TC */ 4957 ice_for_each_traffic_class(tc) { 4958 struct ice_sched_node *tc_node, *vsi_node; 4959 enum ice_rl_type rl_type = ICE_SHARED_BW; 4960 4961 tc_node = ice_sched_get_tc_node(pi, tc); 4962 if (!tc_node) 4963 continue; 4964 4965 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 4966 if (!vsi_node) 4967 continue; 4968 4969 if (bw == ICE_SCHED_DFLT_BW) 4970 /* It removes existing SRL from the node */ 4971 status = ice_sched_set_node_bw_dflt_lmt(pi, vsi_node, 4972 rl_type); 4973 else 4974 status = ice_sched_set_node_bw_lmt(pi, vsi_node, 4975 rl_type, bw); 4976 if (status) 4977 break; 4978 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw); 4979 if (status) 4980 break; 4981 } 4982 4983 exit_set_vsi_bw_shared_lmt: 4984 ice_release_lock(&pi->sched_lock); 4985 return status; 4986 } 4987 4988 /** 4989 * ice_sched_validate_agg_srl_node - validate AGG SRL node 4990 * @pi: port information structure 4991 * @agg_id: aggregator ID 4992 * 4993 * This function validates SRL node of the AGG node if available SRL layer is 4994 * different than the AGG node layer on all TC(s).This function needs to be 4995 * called with scheduler lock held. 4996 */ 4997 static enum ice_status 4998 ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id) 4999 { 5000 u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM; 5001 struct ice_sched_agg_info *agg_info; 5002 bool agg_id_present = false; 5003 enum ice_status status = ICE_SUCCESS; 5004 u8 tc; 5005 5006 LIST_FOR_EACH_ENTRY(agg_info, &pi->hw->agg_list, ice_sched_agg_info, 5007 list_entry) 5008 if (agg_info->agg_id == agg_id) { 5009 agg_id_present = true; 5010 break; 5011 } 5012 if (!agg_id_present) 5013 return ICE_ERR_PARAM; 5014 /* Return success if no nodes are present across TC */ 5015 ice_for_each_traffic_class(tc) { 5016 struct ice_sched_node *tc_node, *agg_node; 5017 enum ice_rl_type rl_type = ICE_SHARED_BW; 5018 5019 tc_node = ice_sched_get_tc_node(pi, tc); 5020 if (!tc_node) 5021 continue; 5022 5023 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 5024 if (!agg_node) 5025 continue; 5026 /* SRL bandwidth layer selection */ 5027 if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) { 5028 u8 node_layer = agg_node->tx_sched_layer; 5029 u8 layer_num; 5030 5031 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, 5032 node_layer); 5033 if (layer_num >= pi->hw->num_tx_sched_layers) 5034 return ICE_ERR_PARAM; 5035 sel_layer = layer_num; 5036 } 5037 5038 status = ice_sched_validate_srl_node(agg_node, sel_layer); 5039 if (status) 5040 break; 5041 } 5042 return status; 5043 } 5044 5045 /** 5046 * ice_sched_set_agg_bw_shared_lmt - set aggregator BW shared limit 5047 * @pi: port information structure 5048 * @agg_id: aggregator ID 5049 * @bw: bandwidth in Kbps 5050 * 5051 * This function configures the shared rate limiter(SRL) of all aggregator type 5052 * nodes across all traffic classes for aggregator matching agg_id. When 5053 * BW value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the 5054 * node(s). 5055 */ 5056 enum ice_status 5057 ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw) 5058 { 5059 struct ice_sched_agg_info *agg_info; 5060 struct ice_sched_agg_info *tmp; 5061 bool agg_id_present = false; 5062 enum ice_status status = ICE_SUCCESS; 5063 u8 tc; 5064 5065 if (!pi) 5066 return ICE_ERR_PARAM; 5067 5068 ice_acquire_lock(&pi->sched_lock); 5069 status = ice_sched_validate_agg_srl_node(pi, agg_id); 5070 if (status) 5071 goto exit_agg_bw_shared_lmt; 5072 5073 LIST_FOR_EACH_ENTRY_SAFE(agg_info, tmp, &pi->hw->agg_list, 5074 ice_sched_agg_info, list_entry) 5075 if (agg_info->agg_id == agg_id) { 5076 agg_id_present = true; 5077 break; 5078 } 5079 5080 if (!agg_id_present) { 5081 status = ICE_ERR_PARAM; 5082 goto exit_agg_bw_shared_lmt; 5083 } 5084 5085 /* Return success if no nodes are present across TC */ 5086 ice_for_each_traffic_class(tc) { 5087 enum ice_rl_type rl_type = ICE_SHARED_BW; 5088 struct ice_sched_node *tc_node, *agg_node; 5089 5090 tc_node = ice_sched_get_tc_node(pi, tc); 5091 if (!tc_node) 5092 continue; 5093 5094 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 5095 if (!agg_node) 5096 continue; 5097 5098 if (bw == ICE_SCHED_DFLT_BW) 5099 /* It removes existing SRL from the node */ 5100 status = ice_sched_set_node_bw_dflt_lmt(pi, agg_node, 5101 rl_type); 5102 else 5103 status = ice_sched_set_node_bw_lmt(pi, agg_node, 5104 rl_type, bw); 5105 if (status) 5106 break; 5107 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw); 5108 if (status) 5109 break; 5110 } 5111 5112 exit_agg_bw_shared_lmt: 5113 ice_release_lock(&pi->sched_lock); 5114 return status; 5115 } 5116 5117 /** 5118 * ice_sched_cfg_sibl_node_prio - configure node sibling priority 5119 * @pi: port information structure 5120 * @node: sched node to configure 5121 * @priority: sibling priority 5122 * 5123 * This function configures node element's sibling priority only. This 5124 * function needs to be called with scheduler lock held. 5125 */ 5126 enum ice_status 5127 ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi, 5128 struct ice_sched_node *node, u8 priority) 5129 { 5130 struct ice_aqc_txsched_elem_data buf; 5131 struct ice_aqc_txsched_elem *data; 5132 struct ice_hw *hw = pi->hw; 5133 enum ice_status status; 5134 5135 if (!hw) 5136 return ICE_ERR_PARAM; 5137 buf = node->info; 5138 data = &buf.data; 5139 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; 5140 priority = (priority << ICE_AQC_ELEM_GENERIC_PRIO_S) & 5141 ICE_AQC_ELEM_GENERIC_PRIO_M; 5142 data->generic &= ~ICE_AQC_ELEM_GENERIC_PRIO_M; 5143 data->generic |= priority; 5144 5145 /* Configure element */ 5146 status = ice_sched_update_elem(hw, node, &buf); 5147 return status; 5148 } 5149 5150 /** 5151 * ice_cfg_rl_burst_size - Set burst size value 5152 * @hw: pointer to the HW struct 5153 * @bytes: burst size in bytes 5154 * 5155 * This function configures/set the burst size to requested new value. The new 5156 * burst size value is used for future rate limit calls. It doesn't change the 5157 * existing or previously created RL profiles. 5158 */ 5159 enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) 5160 { 5161 u16 burst_size_to_prog; 5162 5163 if (bytes < ICE_MIN_BURST_SIZE_ALLOWED || 5164 bytes > ICE_MAX_BURST_SIZE_ALLOWED) 5165 return ICE_ERR_PARAM; 5166 if (ice_round_to_num(bytes, 64) <= 5167 ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) { 5168 /* 64 byte granularity case */ 5169 /* Disable MSB granularity bit */ 5170 burst_size_to_prog = ICE_64_BYTE_GRANULARITY; 5171 /* round number to nearest 64 byte granularity */ 5172 bytes = ice_round_to_num(bytes, 64); 5173 /* The value is in 64 byte chunks */ 5174 burst_size_to_prog |= (u16)(bytes / 64); 5175 } else { 5176 /* k bytes granularity case */ 5177 /* Enable MSB granularity bit */ 5178 burst_size_to_prog = ICE_KBYTE_GRANULARITY; 5179 /* round number to nearest 1024 granularity */ 5180 bytes = ice_round_to_num(bytes, 1024); 5181 /* check rounding doesn't go beyond allowed */ 5182 if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY) 5183 bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY; 5184 /* The value is in k bytes */ 5185 burst_size_to_prog |= (u16)(bytes / 1024); 5186 } 5187 hw->max_burst_size = burst_size_to_prog; 5188 return ICE_SUCCESS; 5189 } 5190 5191 /** 5192 * ice_sched_replay_node_prio - re-configure node priority 5193 * @hw: pointer to the HW struct 5194 * @node: sched node to configure 5195 * @priority: priority value 5196 * 5197 * This function configures node element's priority value. It 5198 * needs to be called with scheduler lock held. 5199 */ 5200 static enum ice_status 5201 ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, 5202 u8 priority) 5203 { 5204 struct ice_aqc_txsched_elem_data buf; 5205 struct ice_aqc_txsched_elem *data; 5206 enum ice_status status; 5207 5208 buf = node->info; 5209 data = &buf.data; 5210 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; 5211 data->generic = priority; 5212 5213 /* Configure element */ 5214 status = ice_sched_update_elem(hw, node, &buf); 5215 return status; 5216 } 5217 5218 /** 5219 * ice_sched_replay_node_bw - replay node(s) BW 5220 * @hw: pointer to the HW struct 5221 * @node: sched node to configure 5222 * @bw_t_info: BW type information 5223 * 5224 * This function restores node's BW from bw_t_info. The caller needs 5225 * to hold the scheduler lock. 5226 */ 5227 static enum ice_status 5228 ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node, 5229 struct ice_bw_type_info *bw_t_info) 5230 { 5231 struct ice_port_info *pi = hw->port_info; 5232 enum ice_status status = ICE_ERR_PARAM; 5233 u16 bw_alloc; 5234 5235 if (!node) 5236 return status; 5237 if (!ice_is_any_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT)) 5238 return ICE_SUCCESS; 5239 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_PRIO)) { 5240 status = ice_sched_replay_node_prio(hw, node, 5241 bw_t_info->generic); 5242 if (status) 5243 return status; 5244 } 5245 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR)) { 5246 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, 5247 bw_t_info->cir_bw.bw); 5248 if (status) 5249 return status; 5250 } 5251 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR_WT)) { 5252 bw_alloc = bw_t_info->cir_bw.bw_alloc; 5253 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW, 5254 bw_alloc); 5255 if (status) 5256 return status; 5257 } 5258 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR)) { 5259 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, 5260 bw_t_info->eir_bw.bw); 5261 if (status) 5262 return status; 5263 } 5264 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR_WT)) { 5265 bw_alloc = bw_t_info->eir_bw.bw_alloc; 5266 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW, 5267 bw_alloc); 5268 if (status) 5269 return status; 5270 } 5271 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_SHARED)) 5272 status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW, 5273 bw_t_info->shared_bw); 5274 return status; 5275 } 5276 5277 /** 5278 * ice_sched_replay_agg_bw - replay aggregator node(s) BW 5279 * @hw: pointer to the HW struct 5280 * @agg_info: aggregator data structure 5281 * 5282 * This function re-creates aggregator type nodes. The caller needs to hold 5283 * the scheduler lock. 5284 */ 5285 static enum ice_status 5286 ice_sched_replay_agg_bw(struct ice_hw *hw, struct ice_sched_agg_info *agg_info) 5287 { 5288 struct ice_sched_node *tc_node, *agg_node; 5289 enum ice_status status = ICE_SUCCESS; 5290 u8 tc; 5291 5292 if (!agg_info) 5293 return ICE_ERR_PARAM; 5294 ice_for_each_traffic_class(tc) { 5295 if (!ice_is_any_bit_set(agg_info->bw_t_info[tc].bw_t_bitmap, 5296 ICE_BW_TYPE_CNT)) 5297 continue; 5298 tc_node = ice_sched_get_tc_node(hw->port_info, tc); 5299 if (!tc_node) { 5300 status = ICE_ERR_PARAM; 5301 break; 5302 } 5303 agg_node = ice_sched_get_agg_node(hw->port_info, tc_node, 5304 agg_info->agg_id); 5305 if (!agg_node) { 5306 status = ICE_ERR_PARAM; 5307 break; 5308 } 5309 status = ice_sched_replay_node_bw(hw, agg_node, 5310 &agg_info->bw_t_info[tc]); 5311 if (status) 5312 break; 5313 } 5314 return status; 5315 } 5316 5317 /** 5318 * ice_sched_get_ena_tc_bitmap - get enabled TC bitmap 5319 * @pi: port info struct 5320 * @tc_bitmap: 8 bits TC bitmap to check 5321 * @ena_tc_bitmap: 8 bits enabled TC bitmap to return 5322 * 5323 * This function returns enabled TC bitmap in variable ena_tc_bitmap. Some TCs 5324 * may be missing, it returns enabled TCs. This function needs to be called with 5325 * scheduler lock held. 5326 */ 5327 static void 5328 ice_sched_get_ena_tc_bitmap(struct ice_port_info *pi, ice_bitmap_t *tc_bitmap, 5329 ice_bitmap_t *ena_tc_bitmap) 5330 { 5331 u8 tc; 5332 5333 /* Some TC(s) may be missing after reset, adjust for replay */ 5334 ice_for_each_traffic_class(tc) 5335 if (ice_is_tc_ena(*tc_bitmap, tc) && 5336 (ice_sched_get_tc_node(pi, tc))) 5337 ice_set_bit(tc, ena_tc_bitmap); 5338 } 5339 5340 /** 5341 * ice_sched_replay_agg - recreate aggregator node(s) 5342 * @hw: pointer to the HW struct 5343 * 5344 * This function recreate aggregator type nodes which are not replayed earlier. 5345 * It also replay aggregator BW information. These aggregator nodes are not 5346 * associated with VSI type node yet. 5347 */ 5348 void ice_sched_replay_agg(struct ice_hw *hw) 5349 { 5350 struct ice_port_info *pi = hw->port_info; 5351 struct ice_sched_agg_info *agg_info; 5352 5353 ice_acquire_lock(&pi->sched_lock); 5354 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 5355 list_entry) 5356 /* replay aggregator (re-create aggregator node) */ 5357 if (!ice_cmp_bitmap(agg_info->tc_bitmap, 5358 agg_info->replay_tc_bitmap, 5359 ICE_MAX_TRAFFIC_CLASS)) { 5360 ice_declare_bitmap(replay_bitmap, 5361 ICE_MAX_TRAFFIC_CLASS); 5362 enum ice_status status; 5363 5364 ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 5365 ice_sched_get_ena_tc_bitmap(pi, 5366 agg_info->replay_tc_bitmap, 5367 replay_bitmap); 5368 status = ice_sched_cfg_agg(hw->port_info, 5369 agg_info->agg_id, 5370 ICE_AGG_TYPE_AGG, 5371 replay_bitmap); 5372 if (status) { 5373 ice_info(hw, "Replay agg id[%d] failed\n", 5374 agg_info->agg_id); 5375 /* Move on to next one */ 5376 continue; 5377 } 5378 /* Replay aggregator node BW (restore aggregator BW) */ 5379 status = ice_sched_replay_agg_bw(hw, agg_info); 5380 if (status) 5381 ice_info(hw, "Replay agg bw [id=%d] failed\n", 5382 agg_info->agg_id); 5383 } 5384 ice_release_lock(&pi->sched_lock); 5385 } 5386 5387 /** 5388 * ice_sched_replay_agg_vsi_preinit - Agg/VSI replay pre initialization 5389 * @hw: pointer to the HW struct 5390 * 5391 * This function initialize aggregator(s) TC bitmap to zero. A required 5392 * preinit step for replaying aggregators. 5393 */ 5394 void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw) 5395 { 5396 struct ice_port_info *pi = hw->port_info; 5397 struct ice_sched_agg_info *agg_info; 5398 5399 ice_acquire_lock(&pi->sched_lock); 5400 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 5401 list_entry) { 5402 struct ice_sched_agg_vsi_info *agg_vsi_info; 5403 5404 agg_info->tc_bitmap[0] = 0; 5405 LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list, 5406 ice_sched_agg_vsi_info, list_entry) 5407 agg_vsi_info->tc_bitmap[0] = 0; 5408 } 5409 ice_release_lock(&pi->sched_lock); 5410 } 5411 5412 /** 5413 * ice_sched_replay_root_node_bw - replay root node BW 5414 * @pi: port information structure 5415 * 5416 * Replay root node BW settings. 5417 */ 5418 enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi) 5419 { 5420 enum ice_status status = ICE_SUCCESS; 5421 5422 if (!pi->hw) 5423 return ICE_ERR_PARAM; 5424 ice_acquire_lock(&pi->sched_lock); 5425 5426 status = ice_sched_replay_node_bw(pi->hw, pi->root, 5427 &pi->root_node_bw_t_info); 5428 ice_release_lock(&pi->sched_lock); 5429 return status; 5430 } 5431 5432 /** 5433 * ice_sched_replay_tc_node_bw - replay TC node(s) BW 5434 * @pi: port information structure 5435 * 5436 * This function replay TC nodes. 5437 */ 5438 enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi) 5439 { 5440 enum ice_status status = ICE_SUCCESS; 5441 u8 tc; 5442 5443 if (!pi->hw) 5444 return ICE_ERR_PARAM; 5445 ice_acquire_lock(&pi->sched_lock); 5446 ice_for_each_traffic_class(tc) { 5447 struct ice_sched_node *tc_node; 5448 5449 tc_node = ice_sched_get_tc_node(pi, tc); 5450 if (!tc_node) 5451 continue; /* TC not present */ 5452 status = ice_sched_replay_node_bw(pi->hw, tc_node, 5453 &pi->tc_node_bw_t_info[tc]); 5454 if (status) 5455 break; 5456 } 5457 ice_release_lock(&pi->sched_lock); 5458 return status; 5459 } 5460 5461 /** 5462 * ice_sched_replay_vsi_bw - replay VSI type node(s) BW 5463 * @hw: pointer to the HW struct 5464 * @vsi_handle: software VSI handle 5465 * @tc_bitmap: 8 bits TC bitmap 5466 * 5467 * This function replays VSI type nodes bandwidth. This function needs to be 5468 * called with scheduler lock held. 5469 */ 5470 static enum ice_status 5471 ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle, 5472 ice_bitmap_t *tc_bitmap) 5473 { 5474 struct ice_sched_node *vsi_node, *tc_node; 5475 struct ice_port_info *pi = hw->port_info; 5476 struct ice_bw_type_info *bw_t_info; 5477 struct ice_vsi_ctx *vsi_ctx; 5478 enum ice_status status = ICE_SUCCESS; 5479 u8 tc; 5480 5481 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 5482 if (!vsi_ctx) 5483 return ICE_ERR_PARAM; 5484 ice_for_each_traffic_class(tc) { 5485 if (!ice_is_tc_ena(*tc_bitmap, tc)) 5486 continue; 5487 tc_node = ice_sched_get_tc_node(pi, tc); 5488 if (!tc_node) 5489 continue; 5490 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 5491 if (!vsi_node) 5492 continue; 5493 bw_t_info = &vsi_ctx->sched.bw_t_info[tc]; 5494 status = ice_sched_replay_node_bw(hw, vsi_node, bw_t_info); 5495 if (status) 5496 break; 5497 } 5498 return status; 5499 } 5500 5501 /** 5502 * ice_sched_replay_vsi_agg - replay aggregator & VSI to aggregator node(s) 5503 * @hw: pointer to the HW struct 5504 * @vsi_handle: software VSI handle 5505 * 5506 * This function replays aggregator node, VSI to aggregator type nodes, and 5507 * their node bandwidth information. This function needs to be called with 5508 * scheduler lock held. 5509 */ 5510 static enum ice_status 5511 ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) 5512 { 5513 ice_declare_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 5514 struct ice_sched_agg_vsi_info *agg_vsi_info; 5515 struct ice_port_info *pi = hw->port_info; 5516 struct ice_sched_agg_info *agg_info; 5517 enum ice_status status; 5518 5519 ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 5520 if (!ice_is_vsi_valid(hw, vsi_handle)) 5521 return ICE_ERR_PARAM; 5522 agg_info = ice_get_vsi_agg_info(hw, vsi_handle); 5523 if (!agg_info) 5524 return ICE_SUCCESS; /* Not present in list - default Agg case */ 5525 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); 5526 if (!agg_vsi_info) 5527 return ICE_SUCCESS; /* Not present in list - default Agg case */ 5528 ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap, 5529 replay_bitmap); 5530 /* Replay aggregator node associated to vsi_handle */ 5531 status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id, 5532 ICE_AGG_TYPE_AGG, replay_bitmap); 5533 if (status) 5534 return status; 5535 /* Replay aggregator node BW (restore aggregator BW) */ 5536 status = ice_sched_replay_agg_bw(hw, agg_info); 5537 if (status) 5538 return status; 5539 5540 ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 5541 ice_sched_get_ena_tc_bitmap(pi, agg_vsi_info->replay_tc_bitmap, 5542 replay_bitmap); 5543 /* Move this VSI (vsi_handle) to above aggregator */ 5544 status = ice_sched_assoc_vsi_to_agg(pi, agg_info->agg_id, vsi_handle, 5545 replay_bitmap); 5546 if (status) 5547 return status; 5548 /* Replay VSI BW (restore VSI BW) */ 5549 return ice_sched_replay_vsi_bw(hw, vsi_handle, 5550 agg_vsi_info->tc_bitmap); 5551 } 5552 5553 /** 5554 * ice_replay_vsi_agg - replay VSI to aggregator node 5555 * @hw: pointer to the HW struct 5556 * @vsi_handle: software VSI handle 5557 * 5558 * This function replays association of VSI to aggregator type nodes, and 5559 * node bandwidth information. 5560 */ 5561 enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) 5562 { 5563 struct ice_port_info *pi = hw->port_info; 5564 enum ice_status status; 5565 5566 ice_acquire_lock(&pi->sched_lock); 5567 status = ice_sched_replay_vsi_agg(hw, vsi_handle); 5568 ice_release_lock(&pi->sched_lock); 5569 return status; 5570 } 5571 5572 /** 5573 * ice_sched_replay_q_bw - replay queue type node BW 5574 * @pi: port information structure 5575 * @q_ctx: queue context structure 5576 * 5577 * This function replays queue type node bandwidth. This function needs to be 5578 * called with scheduler lock held. 5579 */ 5580 enum ice_status 5581 ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx) 5582 { 5583 struct ice_sched_node *q_node; 5584 5585 /* Following also checks the presence of node in tree */ 5586 q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); 5587 if (!q_node) 5588 return ICE_ERR_PARAM; 5589 return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info); 5590 } 5591