1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2021, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*$FreeBSD$*/ 32 33 #include "ice_sched.h" 34 35 /** 36 * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB 37 * @pi: port information structure 38 * @info: Scheduler element information from firmware 39 * 40 * This function inserts the root node of the scheduling tree topology 41 * to the SW DB. 42 */ 43 static enum ice_status 44 ice_sched_add_root_node(struct ice_port_info *pi, 45 struct ice_aqc_txsched_elem_data *info) 46 { 47 struct ice_sched_node *root; 48 struct ice_hw *hw; 49 50 if (!pi) 51 return ICE_ERR_PARAM; 52 53 hw = pi->hw; 54 55 root = (struct ice_sched_node *)ice_malloc(hw, sizeof(*root)); 56 if (!root) 57 return ICE_ERR_NO_MEMORY; 58 59 /* coverity[suspicious_sizeof] */ 60 root->children = (struct ice_sched_node **) 61 ice_calloc(hw, hw->max_children[0], sizeof(*root)); 62 if (!root->children) { 63 ice_free(hw, root); 64 return ICE_ERR_NO_MEMORY; 65 } 66 67 ice_memcpy(&root->info, info, sizeof(*info), ICE_DMA_TO_NONDMA); 68 pi->root = root; 69 return ICE_SUCCESS; 70 } 71 72 /** 73 * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB 74 * @start_node: pointer to the starting ice_sched_node struct in a sub-tree 75 * @teid: node TEID to search 76 * 77 * This function searches for a node matching the TEID in the scheduling tree 78 * from the SW DB. The search is recursive and is restricted by the number of 79 * layers it has searched through; stopping at the max supported layer. 80 * 81 * This function needs to be called when holding the port_info->sched_lock 82 */ 83 struct ice_sched_node * 84 ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) 85 { 86 u16 i; 87 88 /* The TEID is same as that of the start_node */ 89 if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid) 90 return start_node; 91 92 /* The node has no children or is at the max layer */ 93 if (!start_node->num_children || 94 start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM || 95 start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) 96 return NULL; 97 98 /* Check if TEID matches to any of the children nodes */ 99 for (i = 0; i < start_node->num_children; i++) 100 if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid) 101 return start_node->children[i]; 102 103 /* Search within each child's sub-tree */ 104 for (i = 0; i < start_node->num_children; i++) { 105 struct ice_sched_node *tmp; 106 107 tmp = ice_sched_find_node_by_teid(start_node->children[i], 108 teid); 109 if (tmp) 110 return tmp; 111 } 112 113 return NULL; 114 } 115 116 /** 117 * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd 118 * @hw: pointer to the HW struct 119 * @cmd_opc: cmd opcode 120 * @elems_req: number of elements to request 121 * @buf: pointer to buffer 122 * @buf_size: buffer size in bytes 123 * @elems_resp: returns total number of elements response 124 * @cd: pointer to command details structure or NULL 125 * 126 * This function sends a scheduling elements cmd (cmd_opc) 127 */ 128 static enum ice_status 129 ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, 130 u16 elems_req, void *buf, u16 buf_size, 131 u16 *elems_resp, struct ice_sq_cd *cd) 132 { 133 struct ice_aqc_sched_elem_cmd *cmd; 134 struct ice_aq_desc desc; 135 enum ice_status status; 136 137 cmd = &desc.params.sched_elem_cmd; 138 ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc); 139 cmd->num_elem_req = CPU_TO_LE16(elems_req); 140 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 141 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 142 if (!status && elems_resp) 143 *elems_resp = LE16_TO_CPU(cmd->num_elem_resp); 144 145 return status; 146 } 147 148 /** 149 * ice_aq_query_sched_elems - query scheduler elements 150 * @hw: pointer to the HW struct 151 * @elems_req: number of elements to query 152 * @buf: pointer to buffer 153 * @buf_size: buffer size in bytes 154 * @elems_ret: returns total number of elements returned 155 * @cd: pointer to command details structure or NULL 156 * 157 * Query scheduling elements (0x0404) 158 */ 159 enum ice_status 160 ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, 161 struct ice_aqc_txsched_elem_data *buf, u16 buf_size, 162 u16 *elems_ret, struct ice_sq_cd *cd) 163 { 164 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems, 165 elems_req, (void *)buf, buf_size, 166 elems_ret, cd); 167 } 168 169 /** 170 * ice_sched_add_node - Insert the Tx scheduler node in SW DB 171 * @pi: port information structure 172 * @layer: Scheduler layer of the node 173 * @info: Scheduler element information from firmware 174 * 175 * This function inserts a scheduler node to the SW DB. 176 */ 177 enum ice_status 178 ice_sched_add_node(struct ice_port_info *pi, u8 layer, 179 struct ice_aqc_txsched_elem_data *info) 180 { 181 struct ice_aqc_txsched_elem_data elem; 182 struct ice_sched_node *parent; 183 struct ice_sched_node *node; 184 enum ice_status status; 185 struct ice_hw *hw; 186 187 if (!pi) 188 return ICE_ERR_PARAM; 189 190 hw = pi->hw; 191 192 /* A valid parent node should be there */ 193 parent = ice_sched_find_node_by_teid(pi->root, 194 LE32_TO_CPU(info->parent_teid)); 195 if (!parent) { 196 ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n", 197 LE32_TO_CPU(info->parent_teid)); 198 return ICE_ERR_PARAM; 199 } 200 201 /* query the current node information from FW before adding it 202 * to the SW DB 203 */ 204 status = ice_sched_query_elem(hw, LE32_TO_CPU(info->node_teid), &elem); 205 if (status) 206 return status; 207 node = (struct ice_sched_node *)ice_malloc(hw, sizeof(*node)); 208 if (!node) 209 return ICE_ERR_NO_MEMORY; 210 if (hw->max_children[layer]) { 211 /* coverity[suspicious_sizeof] */ 212 node->children = (struct ice_sched_node **) 213 ice_calloc(hw, hw->max_children[layer], sizeof(*node)); 214 if (!node->children) { 215 ice_free(hw, node); 216 return ICE_ERR_NO_MEMORY; 217 } 218 } 219 220 node->in_use = true; 221 node->parent = parent; 222 node->tx_sched_layer = layer; 223 parent->children[parent->num_children++] = node; 224 node->info = elem; 225 return ICE_SUCCESS; 226 } 227 228 /** 229 * ice_aq_delete_sched_elems - delete scheduler elements 230 * @hw: pointer to the HW struct 231 * @grps_req: number of groups to delete 232 * @buf: pointer to buffer 233 * @buf_size: buffer size in bytes 234 * @grps_del: returns total number of elements deleted 235 * @cd: pointer to command details structure or NULL 236 * 237 * Delete scheduling elements (0x040F) 238 */ 239 static enum ice_status 240 ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, 241 struct ice_aqc_delete_elem *buf, u16 buf_size, 242 u16 *grps_del, struct ice_sq_cd *cd) 243 { 244 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems, 245 grps_req, (void *)buf, buf_size, 246 grps_del, cd); 247 } 248 249 /** 250 * ice_sched_remove_elems - remove nodes from HW 251 * @hw: pointer to the HW struct 252 * @parent: pointer to the parent node 253 * @num_nodes: number of nodes 254 * @node_teids: array of node teids to be deleted 255 * 256 * This function remove nodes from HW 257 */ 258 static enum ice_status 259 ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, 260 u16 num_nodes, u32 *node_teids) 261 { 262 struct ice_aqc_delete_elem *buf; 263 u16 i, num_groups_removed = 0; 264 enum ice_status status; 265 u16 buf_size; 266 267 buf_size = ice_struct_size(buf, teid, num_nodes); 268 buf = (struct ice_aqc_delete_elem *)ice_malloc(hw, buf_size); 269 if (!buf) 270 return ICE_ERR_NO_MEMORY; 271 272 buf->hdr.parent_teid = parent->info.node_teid; 273 buf->hdr.num_elems = CPU_TO_LE16(num_nodes); 274 for (i = 0; i < num_nodes; i++) 275 buf->teid[i] = CPU_TO_LE32(node_teids[i]); 276 277 status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size, 278 &num_groups_removed, NULL); 279 if (status != ICE_SUCCESS || num_groups_removed != 1) 280 ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n", 281 hw->adminq.sq_last_status); 282 283 ice_free(hw, buf); 284 return status; 285 } 286 287 /** 288 * ice_sched_get_first_node - get the first node of the given layer 289 * @pi: port information structure 290 * @parent: pointer the base node of the subtree 291 * @layer: layer number 292 * 293 * This function retrieves the first node of the given layer from the subtree 294 */ 295 static struct ice_sched_node * 296 ice_sched_get_first_node(struct ice_port_info *pi, 297 struct ice_sched_node *parent, u8 layer) 298 { 299 return pi->sib_head[parent->tc_num][layer]; 300 } 301 302 /** 303 * ice_sched_get_tc_node - get pointer to TC node 304 * @pi: port information structure 305 * @tc: TC number 306 * 307 * This function returns the TC node pointer 308 */ 309 struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc) 310 { 311 u8 i; 312 313 if (!pi || !pi->root) 314 return NULL; 315 for (i = 0; i < pi->root->num_children; i++) 316 if (pi->root->children[i]->tc_num == tc) 317 return pi->root->children[i]; 318 return NULL; 319 } 320 321 /** 322 * ice_free_sched_node - Free a Tx scheduler node from SW DB 323 * @pi: port information structure 324 * @node: pointer to the ice_sched_node struct 325 * 326 * This function frees up a node from SW DB as well as from HW 327 * 328 * This function needs to be called with the port_info->sched_lock held 329 */ 330 void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) 331 { 332 struct ice_sched_node *parent; 333 struct ice_hw *hw = pi->hw; 334 u8 i, j; 335 336 /* Free the children before freeing up the parent node 337 * The parent array is updated below and that shifts the nodes 338 * in the array. So always pick the first child if num children > 0 339 */ 340 while (node->num_children) 341 ice_free_sched_node(pi, node->children[0]); 342 343 /* Leaf, TC and root nodes can't be deleted by SW */ 344 if (node->tx_sched_layer >= hw->sw_entry_point_layer && 345 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && 346 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT && 347 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) { 348 u32 teid = LE32_TO_CPU(node->info.node_teid); 349 350 ice_sched_remove_elems(hw, node->parent, 1, &teid); 351 } 352 parent = node->parent; 353 /* root has no parent */ 354 if (parent) { 355 struct ice_sched_node *p; 356 357 /* update the parent */ 358 for (i = 0; i < parent->num_children; i++) 359 if (parent->children[i] == node) { 360 for (j = i + 1; j < parent->num_children; j++) 361 parent->children[j - 1] = 362 parent->children[j]; 363 parent->num_children--; 364 break; 365 } 366 367 p = ice_sched_get_first_node(pi, node, node->tx_sched_layer); 368 while (p) { 369 if (p->sibling == node) { 370 p->sibling = node->sibling; 371 break; 372 } 373 p = p->sibling; 374 } 375 376 /* update the sibling head if head is getting removed */ 377 if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node) 378 pi->sib_head[node->tc_num][node->tx_sched_layer] = 379 node->sibling; 380 } 381 382 /* leaf nodes have no children */ 383 if (node->children) 384 ice_free(hw, node->children); 385 ice_free(hw, node); 386 } 387 388 /** 389 * ice_aq_get_dflt_topo - gets default scheduler topology 390 * @hw: pointer to the HW struct 391 * @lport: logical port number 392 * @buf: pointer to buffer 393 * @buf_size: buffer size in bytes 394 * @num_branches: returns total number of queue to port branches 395 * @cd: pointer to command details structure or NULL 396 * 397 * Get default scheduler topology (0x400) 398 */ 399 static enum ice_status 400 ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, 401 struct ice_aqc_get_topo_elem *buf, u16 buf_size, 402 u8 *num_branches, struct ice_sq_cd *cd) 403 { 404 struct ice_aqc_get_topo *cmd; 405 struct ice_aq_desc desc; 406 enum ice_status status; 407 408 cmd = &desc.params.get_topo; 409 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo); 410 cmd->port_num = lport; 411 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 412 if (!status && num_branches) 413 *num_branches = cmd->num_branches; 414 415 return status; 416 } 417 418 /** 419 * ice_aq_add_sched_elems - adds scheduling element 420 * @hw: pointer to the HW struct 421 * @grps_req: the number of groups that are requested to be added 422 * @buf: pointer to buffer 423 * @buf_size: buffer size in bytes 424 * @grps_added: returns total number of groups added 425 * @cd: pointer to command details structure or NULL 426 * 427 * Add scheduling elements (0x0401) 428 */ 429 static enum ice_status 430 ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, 431 struct ice_aqc_add_elem *buf, u16 buf_size, 432 u16 *grps_added, struct ice_sq_cd *cd) 433 { 434 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems, 435 grps_req, (void *)buf, buf_size, 436 grps_added, cd); 437 } 438 439 /** 440 * ice_aq_cfg_sched_elems - configures scheduler elements 441 * @hw: pointer to the HW struct 442 * @elems_req: number of elements to configure 443 * @buf: pointer to buffer 444 * @buf_size: buffer size in bytes 445 * @elems_cfgd: returns total number of elements configured 446 * @cd: pointer to command details structure or NULL 447 * 448 * Configure scheduling elements (0x0403) 449 */ 450 static enum ice_status 451 ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, 452 struct ice_aqc_txsched_elem_data *buf, u16 buf_size, 453 u16 *elems_cfgd, struct ice_sq_cd *cd) 454 { 455 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems, 456 elems_req, (void *)buf, buf_size, 457 elems_cfgd, cd); 458 } 459 460 /** 461 * ice_aq_move_sched_elems - move scheduler elements 462 * @hw: pointer to the HW struct 463 * @grps_req: number of groups to move 464 * @buf: pointer to buffer 465 * @buf_size: buffer size in bytes 466 * @grps_movd: returns total number of groups moved 467 * @cd: pointer to command details structure or NULL 468 * 469 * Move scheduling elements (0x0408) 470 */ 471 static enum ice_status 472 ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, 473 struct ice_aqc_move_elem *buf, u16 buf_size, 474 u16 *grps_movd, struct ice_sq_cd *cd) 475 { 476 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems, 477 grps_req, (void *)buf, buf_size, 478 grps_movd, cd); 479 } 480 481 /** 482 * ice_aq_suspend_sched_elems - suspend scheduler elements 483 * @hw: pointer to the HW struct 484 * @elems_req: number of elements to suspend 485 * @buf: pointer to buffer 486 * @buf_size: buffer size in bytes 487 * @elems_ret: returns total number of elements suspended 488 * @cd: pointer to command details structure or NULL 489 * 490 * Suspend scheduling elements (0x0409) 491 */ 492 static enum ice_status 493 ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, 494 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) 495 { 496 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems, 497 elems_req, (void *)buf, buf_size, 498 elems_ret, cd); 499 } 500 501 /** 502 * ice_aq_resume_sched_elems - resume scheduler elements 503 * @hw: pointer to the HW struct 504 * @elems_req: number of elements to resume 505 * @buf: pointer to buffer 506 * @buf_size: buffer size in bytes 507 * @elems_ret: returns total number of elements resumed 508 * @cd: pointer to command details structure or NULL 509 * 510 * resume scheduling elements (0x040A) 511 */ 512 static enum ice_status 513 ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, 514 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) 515 { 516 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems, 517 elems_req, (void *)buf, buf_size, 518 elems_ret, cd); 519 } 520 521 /** 522 * ice_aq_query_sched_res - query scheduler resource 523 * @hw: pointer to the HW struct 524 * @buf_size: buffer size in bytes 525 * @buf: pointer to buffer 526 * @cd: pointer to command details structure or NULL 527 * 528 * Query scheduler resource allocation (0x0412) 529 */ 530 static enum ice_status 531 ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, 532 struct ice_aqc_query_txsched_res_resp *buf, 533 struct ice_sq_cd *cd) 534 { 535 struct ice_aq_desc desc; 536 537 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res); 538 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 539 } 540 541 /** 542 * ice_sched_suspend_resume_elems - suspend or resume HW nodes 543 * @hw: pointer to the HW struct 544 * @num_nodes: number of nodes 545 * @node_teids: array of node teids to be suspended or resumed 546 * @suspend: true means suspend / false means resume 547 * 548 * This function suspends or resumes HW nodes 549 */ 550 static enum ice_status 551 ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, 552 bool suspend) 553 { 554 u16 i, buf_size, num_elem_ret = 0; 555 enum ice_status status; 556 __le32 *buf; 557 558 buf_size = sizeof(*buf) * num_nodes; 559 buf = (__le32 *)ice_malloc(hw, buf_size); 560 if (!buf) 561 return ICE_ERR_NO_MEMORY; 562 563 for (i = 0; i < num_nodes; i++) 564 buf[i] = CPU_TO_LE32(node_teids[i]); 565 566 if (suspend) 567 status = ice_aq_suspend_sched_elems(hw, num_nodes, buf, 568 buf_size, &num_elem_ret, 569 NULL); 570 else 571 status = ice_aq_resume_sched_elems(hw, num_nodes, buf, 572 buf_size, &num_elem_ret, 573 NULL); 574 if (status != ICE_SUCCESS || num_elem_ret != num_nodes) 575 ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n"); 576 577 ice_free(hw, buf); 578 return status; 579 } 580 581 /** 582 * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC 583 * @hw: pointer to the HW struct 584 * @vsi_handle: VSI handle 585 * @tc: TC number 586 * @new_numqs: number of queues 587 */ 588 static enum ice_status 589 ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) 590 { 591 struct ice_vsi_ctx *vsi_ctx; 592 struct ice_q_ctx *q_ctx; 593 594 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 595 if (!vsi_ctx) 596 return ICE_ERR_PARAM; 597 /* allocate LAN queue contexts */ 598 if (!vsi_ctx->lan_q_ctx[tc]) { 599 vsi_ctx->lan_q_ctx[tc] = (struct ice_q_ctx *) 600 ice_calloc(hw, new_numqs, sizeof(*q_ctx)); 601 if (!vsi_ctx->lan_q_ctx[tc]) 602 return ICE_ERR_NO_MEMORY; 603 vsi_ctx->num_lan_q_entries[tc] = new_numqs; 604 return ICE_SUCCESS; 605 } 606 /* num queues are increased, update the queue contexts */ 607 if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) { 608 u16 prev_num = vsi_ctx->num_lan_q_entries[tc]; 609 610 q_ctx = (struct ice_q_ctx *) 611 ice_calloc(hw, new_numqs, sizeof(*q_ctx)); 612 if (!q_ctx) 613 return ICE_ERR_NO_MEMORY; 614 ice_memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], 615 prev_num * sizeof(*q_ctx), ICE_DMA_TO_NONDMA); 616 ice_free(hw, vsi_ctx->lan_q_ctx[tc]); 617 vsi_ctx->lan_q_ctx[tc] = q_ctx; 618 vsi_ctx->num_lan_q_entries[tc] = new_numqs; 619 } 620 return ICE_SUCCESS; 621 } 622 623 /** 624 * ice_aq_rl_profile - performs a rate limiting task 625 * @hw: pointer to the HW struct 626 * @opcode: opcode for add, query, or remove profile(s) 627 * @num_profiles: the number of profiles 628 * @buf: pointer to buffer 629 * @buf_size: buffer size in bytes 630 * @num_processed: number of processed add or remove profile(s) to return 631 * @cd: pointer to command details structure 632 * 633 * RL profile function to add, query, or remove profile(s) 634 */ 635 static enum ice_status 636 ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, 637 u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, 638 u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd) 639 { 640 struct ice_aqc_rl_profile *cmd; 641 struct ice_aq_desc desc; 642 enum ice_status status; 643 644 cmd = &desc.params.rl_profile; 645 646 ice_fill_dflt_direct_cmd_desc(&desc, opcode); 647 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 648 cmd->num_profiles = CPU_TO_LE16(num_profiles); 649 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 650 if (!status && num_processed) 651 *num_processed = LE16_TO_CPU(cmd->num_processed); 652 return status; 653 } 654 655 /** 656 * ice_aq_add_rl_profile - adds rate limiting profile(s) 657 * @hw: pointer to the HW struct 658 * @num_profiles: the number of profile(s) to be add 659 * @buf: pointer to buffer 660 * @buf_size: buffer size in bytes 661 * @num_profiles_added: total number of profiles added to return 662 * @cd: pointer to command details structure 663 * 664 * Add RL profile (0x0410) 665 */ 666 static enum ice_status 667 ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, 668 struct ice_aqc_rl_profile_elem *buf, u16 buf_size, 669 u16 *num_profiles_added, struct ice_sq_cd *cd) 670 { 671 return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles, 672 buf, buf_size, num_profiles_added, cd); 673 } 674 675 /** 676 * ice_aq_query_rl_profile - query rate limiting profile(s) 677 * @hw: pointer to the HW struct 678 * @num_profiles: the number of profile(s) to query 679 * @buf: pointer to buffer 680 * @buf_size: buffer size in bytes 681 * @cd: pointer to command details structure 682 * 683 * Query RL profile (0x0411) 684 */ 685 enum ice_status 686 ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles, 687 struct ice_aqc_rl_profile_elem *buf, u16 buf_size, 688 struct ice_sq_cd *cd) 689 { 690 return ice_aq_rl_profile(hw, ice_aqc_opc_query_rl_profiles, 691 num_profiles, buf, buf_size, NULL, cd); 692 } 693 694 /** 695 * ice_aq_remove_rl_profile - removes RL profile(s) 696 * @hw: pointer to the HW struct 697 * @num_profiles: the number of profile(s) to remove 698 * @buf: pointer to buffer 699 * @buf_size: buffer size in bytes 700 * @num_profiles_removed: total number of profiles removed to return 701 * @cd: pointer to command details structure or NULL 702 * 703 * Remove RL profile (0x0415) 704 */ 705 static enum ice_status 706 ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, 707 struct ice_aqc_rl_profile_elem *buf, u16 buf_size, 708 u16 *num_profiles_removed, struct ice_sq_cd *cd) 709 { 710 return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles, 711 num_profiles, buf, buf_size, 712 num_profiles_removed, cd); 713 } 714 715 /** 716 * ice_sched_del_rl_profile - remove RL profile 717 * @hw: pointer to the HW struct 718 * @rl_info: rate limit profile information 719 * 720 * If the profile ID is not referenced anymore, it removes profile ID with 721 * its associated parameters from HW DB,and locally. The caller needs to 722 * hold scheduler lock. 723 */ 724 static enum ice_status 725 ice_sched_del_rl_profile(struct ice_hw *hw, 726 struct ice_aqc_rl_profile_info *rl_info) 727 { 728 struct ice_aqc_rl_profile_elem *buf; 729 u16 num_profiles_removed; 730 enum ice_status status; 731 u16 num_profiles = 1; 732 733 if (rl_info->prof_id_ref != 0) 734 return ICE_ERR_IN_USE; 735 736 /* Safe to remove profile ID */ 737 buf = &rl_info->profile; 738 status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf), 739 &num_profiles_removed, NULL); 740 if (status || num_profiles_removed != num_profiles) 741 return ICE_ERR_CFG; 742 743 /* Delete stale entry now */ 744 LIST_DEL(&rl_info->list_entry); 745 ice_free(hw, rl_info); 746 return status; 747 } 748 749 /** 750 * ice_sched_clear_rl_prof - clears RL prof entries 751 * @pi: port information structure 752 * 753 * This function removes all RL profile from HW as well as from SW DB. 754 */ 755 static void ice_sched_clear_rl_prof(struct ice_port_info *pi) 756 { 757 u16 ln; 758 struct ice_hw *hw = pi->hw; 759 760 for (ln = 0; ln < hw->num_tx_sched_layers; ln++) { 761 struct ice_aqc_rl_profile_info *rl_prof_elem; 762 struct ice_aqc_rl_profile_info *rl_prof_tmp; 763 764 LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp, 765 &hw->rl_prof_list[ln], 766 ice_aqc_rl_profile_info, list_entry) { 767 enum ice_status status; 768 769 rl_prof_elem->prof_id_ref = 0; 770 status = ice_sched_del_rl_profile(hw, rl_prof_elem); 771 if (status) { 772 ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); 773 /* On error, free mem required */ 774 LIST_DEL(&rl_prof_elem->list_entry); 775 ice_free(hw, rl_prof_elem); 776 } 777 } 778 } 779 } 780 781 /** 782 * ice_sched_clear_agg - clears the aggregator related information 783 * @hw: pointer to the hardware structure 784 * 785 * This function removes aggregator list and free up aggregator related memory 786 * previously allocated. 787 */ 788 void ice_sched_clear_agg(struct ice_hw *hw) 789 { 790 struct ice_sched_agg_info *agg_info; 791 struct ice_sched_agg_info *atmp; 792 793 LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &hw->agg_list, 794 ice_sched_agg_info, 795 list_entry) { 796 struct ice_sched_agg_vsi_info *agg_vsi_info; 797 struct ice_sched_agg_vsi_info *vtmp; 798 799 LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp, 800 &agg_info->agg_vsi_list, 801 ice_sched_agg_vsi_info, list_entry) { 802 LIST_DEL(&agg_vsi_info->list_entry); 803 ice_free(hw, agg_vsi_info); 804 } 805 LIST_DEL(&agg_info->list_entry); 806 ice_free(hw, agg_info); 807 } 808 } 809 810 /** 811 * ice_sched_clear_tx_topo - clears the scheduler tree nodes 812 * @pi: port information structure 813 * 814 * This function removes all the nodes from HW as well as from SW DB. 815 */ 816 static void ice_sched_clear_tx_topo(struct ice_port_info *pi) 817 { 818 if (!pi) 819 return; 820 /* remove RL profiles related lists */ 821 ice_sched_clear_rl_prof(pi); 822 if (pi->root) { 823 ice_free_sched_node(pi, pi->root); 824 pi->root = NULL; 825 } 826 } 827 828 /** 829 * ice_sched_clear_port - clear the scheduler elements from SW DB for a port 830 * @pi: port information structure 831 * 832 * Cleanup scheduling elements from SW DB 833 */ 834 void ice_sched_clear_port(struct ice_port_info *pi) 835 { 836 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 837 return; 838 839 pi->port_state = ICE_SCHED_PORT_STATE_INIT; 840 ice_acquire_lock(&pi->sched_lock); 841 ice_sched_clear_tx_topo(pi); 842 ice_release_lock(&pi->sched_lock); 843 ice_destroy_lock(&pi->sched_lock); 844 } 845 846 /** 847 * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports 848 * @hw: pointer to the HW struct 849 * 850 * Cleanup scheduling elements from SW DB for all the ports 851 */ 852 void ice_sched_cleanup_all(struct ice_hw *hw) 853 { 854 if (!hw) 855 return; 856 857 if (hw->layer_info) { 858 ice_free(hw, hw->layer_info); 859 hw->layer_info = NULL; 860 } 861 862 ice_sched_clear_port(hw->port_info); 863 864 hw->num_tx_sched_layers = 0; 865 hw->num_tx_sched_phys_layers = 0; 866 hw->flattened_layers = 0; 867 hw->max_cgds = 0; 868 } 869 870 /** 871 * ice_aq_cfg_l2_node_cgd - configures L2 node to CGD mapping 872 * @hw: pointer to the HW struct 873 * @num_l2_nodes: the number of L2 nodes whose CGDs to configure 874 * @buf: pointer to buffer 875 * @buf_size: buffer size in bytes 876 * @cd: pointer to command details structure or NULL 877 * 878 * Configure L2 Node CGD (0x0414) 879 */ 880 enum ice_status 881 ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes, 882 struct ice_aqc_cfg_l2_node_cgd_elem *buf, 883 u16 buf_size, struct ice_sq_cd *cd) 884 { 885 struct ice_aqc_cfg_l2_node_cgd *cmd; 886 struct ice_aq_desc desc; 887 888 cmd = &desc.params.cfg_l2_node_cgd; 889 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_l2_node_cgd); 890 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 891 892 cmd->num_l2_nodes = CPU_TO_LE16(num_l2_nodes); 893 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 894 } 895 896 /** 897 * ice_sched_add_elems - add nodes to HW and SW DB 898 * @pi: port information structure 899 * @tc_node: pointer to the branch node 900 * @parent: pointer to the parent node 901 * @layer: layer number to add nodes 902 * @num_nodes: number of nodes 903 * @num_nodes_added: pointer to num nodes added 904 * @first_node_teid: if new nodes are added then return the TEID of first node 905 * 906 * This function add nodes to HW as well as to SW DB for a given layer 907 */ 908 static enum ice_status 909 ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, 910 struct ice_sched_node *parent, u8 layer, u16 num_nodes, 911 u16 *num_nodes_added, u32 *first_node_teid) 912 { 913 struct ice_sched_node *prev, *new_node; 914 struct ice_aqc_add_elem *buf; 915 u16 i, num_groups_added = 0; 916 enum ice_status status = ICE_SUCCESS; 917 struct ice_hw *hw = pi->hw; 918 u16 buf_size; 919 u32 teid; 920 921 buf_size = ice_struct_size(buf, generic, num_nodes); 922 buf = (struct ice_aqc_add_elem *)ice_malloc(hw, buf_size); 923 if (!buf) 924 return ICE_ERR_NO_MEMORY; 925 926 buf->hdr.parent_teid = parent->info.node_teid; 927 buf->hdr.num_elems = CPU_TO_LE16(num_nodes); 928 for (i = 0; i < num_nodes; i++) { 929 buf->generic[i].parent_teid = parent->info.node_teid; 930 buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC; 931 buf->generic[i].data.valid_sections = 932 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 933 ICE_AQC_ELEM_VALID_EIR; 934 buf->generic[i].data.generic = 0; 935 buf->generic[i].data.cir_bw.bw_profile_idx = 936 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); 937 buf->generic[i].data.cir_bw.bw_alloc = 938 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); 939 buf->generic[i].data.eir_bw.bw_profile_idx = 940 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); 941 buf->generic[i].data.eir_bw.bw_alloc = 942 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); 943 } 944 945 status = ice_aq_add_sched_elems(hw, 1, buf, buf_size, 946 &num_groups_added, NULL); 947 if (status != ICE_SUCCESS || num_groups_added != 1) { 948 ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n", 949 hw->adminq.sq_last_status); 950 ice_free(hw, buf); 951 return ICE_ERR_CFG; 952 } 953 954 *num_nodes_added = num_nodes; 955 /* add nodes to the SW DB */ 956 for (i = 0; i < num_nodes; i++) { 957 status = ice_sched_add_node(pi, layer, &buf->generic[i]); 958 if (status != ICE_SUCCESS) { 959 ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n", 960 status); 961 break; 962 } 963 964 teid = LE32_TO_CPU(buf->generic[i].node_teid); 965 new_node = ice_sched_find_node_by_teid(parent, teid); 966 if (!new_node) { 967 ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid); 968 break; 969 } 970 971 new_node->sibling = NULL; 972 new_node->tc_num = tc_node->tc_num; 973 974 /* add it to previous node sibling pointer */ 975 /* Note: siblings are not linked across branches */ 976 prev = ice_sched_get_first_node(pi, tc_node, layer); 977 if (prev && prev != new_node) { 978 while (prev->sibling) 979 prev = prev->sibling; 980 prev->sibling = new_node; 981 } 982 983 /* initialize the sibling head */ 984 if (!pi->sib_head[tc_node->tc_num][layer]) 985 pi->sib_head[tc_node->tc_num][layer] = new_node; 986 987 if (i == 0) 988 *first_node_teid = teid; 989 } 990 991 ice_free(hw, buf); 992 return status; 993 } 994 995 /** 996 * ice_sched_add_nodes_to_layer - Add nodes to a given layer 997 * @pi: port information structure 998 * @tc_node: pointer to TC node 999 * @parent: pointer to parent node 1000 * @layer: layer number to add nodes 1001 * @num_nodes: number of nodes to be added 1002 * @first_node_teid: pointer to the first node TEID 1003 * @num_nodes_added: pointer to number of nodes added 1004 * 1005 * This function add nodes to a given layer. 1006 */ 1007 static enum ice_status 1008 ice_sched_add_nodes_to_layer(struct ice_port_info *pi, 1009 struct ice_sched_node *tc_node, 1010 struct ice_sched_node *parent, u8 layer, 1011 u16 num_nodes, u32 *first_node_teid, 1012 u16 *num_nodes_added) 1013 { 1014 u32 *first_teid_ptr = first_node_teid; 1015 u16 new_num_nodes, max_child_nodes; 1016 enum ice_status status = ICE_SUCCESS; 1017 struct ice_hw *hw = pi->hw; 1018 u16 num_added = 0; 1019 u32 temp; 1020 1021 *num_nodes_added = 0; 1022 1023 if (!num_nodes) 1024 return status; 1025 1026 if (!parent || layer < hw->sw_entry_point_layer) 1027 return ICE_ERR_PARAM; 1028 1029 /* max children per node per layer */ 1030 max_child_nodes = hw->max_children[parent->tx_sched_layer]; 1031 1032 /* current number of children + required nodes exceed max children ? */ 1033 if ((parent->num_children + num_nodes) > max_child_nodes) { 1034 /* Fail if the parent is a TC node */ 1035 if (parent == tc_node) 1036 return ICE_ERR_CFG; 1037 1038 /* utilize all the spaces if the parent is not full */ 1039 if (parent->num_children < max_child_nodes) { 1040 new_num_nodes = max_child_nodes - parent->num_children; 1041 /* this recursion is intentional, and wouldn't 1042 * go more than 2 calls 1043 */ 1044 status = ice_sched_add_nodes_to_layer(pi, tc_node, 1045 parent, layer, 1046 new_num_nodes, 1047 first_node_teid, 1048 &num_added); 1049 if (status != ICE_SUCCESS) 1050 return status; 1051 1052 *num_nodes_added += num_added; 1053 } 1054 /* Don't modify the first node TEID memory if the first node was 1055 * added already in the above call. Instead send some temp 1056 * memory for all other recursive calls. 1057 */ 1058 if (num_added) 1059 first_teid_ptr = &temp; 1060 1061 new_num_nodes = num_nodes - num_added; 1062 1063 /* This parent is full, try the next sibling */ 1064 parent = parent->sibling; 1065 1066 /* this recursion is intentional, for 1024 queues 1067 * per VSI, it goes max of 16 iterations. 1068 * 1024 / 8 = 128 layer 8 nodes 1069 * 128 /8 = 16 (add 8 nodes per iteration) 1070 */ 1071 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, 1072 layer, new_num_nodes, 1073 first_teid_ptr, 1074 &num_added); 1075 *num_nodes_added += num_added; 1076 return status; 1077 } 1078 1079 status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, 1080 num_nodes_added, first_node_teid); 1081 return status; 1082 } 1083 1084 /** 1085 * ice_sched_get_qgrp_layer - get the current queue group layer number 1086 * @hw: pointer to the HW struct 1087 * 1088 * This function returns the current queue group layer number 1089 */ 1090 static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw) 1091 { 1092 /* It's always total layers - 1, the array is 0 relative so -2 */ 1093 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET; 1094 } 1095 1096 /** 1097 * ice_sched_get_vsi_layer - get the current VSI layer number 1098 * @hw: pointer to the HW struct 1099 * 1100 * This function returns the current VSI layer number 1101 */ 1102 static u8 ice_sched_get_vsi_layer(struct ice_hw *hw) 1103 { 1104 /* Num Layers VSI layer 1105 * 9 6 1106 * 7 4 1107 * 5 or less sw_entry_point_layer 1108 */ 1109 /* calculate the VSI layer based on number of layers. */ 1110 if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) { 1111 u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; 1112 1113 if (layer > hw->sw_entry_point_layer) 1114 return layer; 1115 } 1116 return hw->sw_entry_point_layer; 1117 } 1118 1119 /** 1120 * ice_sched_get_agg_layer - get the current aggregator layer number 1121 * @hw: pointer to the HW struct 1122 * 1123 * This function returns the current aggregator layer number 1124 */ 1125 static u8 ice_sched_get_agg_layer(struct ice_hw *hw) 1126 { 1127 /* Num Layers aggregator layer 1128 * 9 4 1129 * 7 or less sw_entry_point_layer 1130 */ 1131 /* calculate the aggregator layer based on number of layers. */ 1132 if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) { 1133 u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET; 1134 1135 if (layer > hw->sw_entry_point_layer) 1136 return layer; 1137 } 1138 return hw->sw_entry_point_layer; 1139 } 1140 1141 /** 1142 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree 1143 * @pi: port information structure 1144 * 1145 * This function removes the leaf node that was created by the FW 1146 * during initialization 1147 */ 1148 static void ice_rm_dflt_leaf_node(struct ice_port_info *pi) 1149 { 1150 struct ice_sched_node *node; 1151 1152 node = pi->root; 1153 while (node) { 1154 if (!node->num_children) 1155 break; 1156 node = node->children[0]; 1157 } 1158 if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) { 1159 u32 teid = LE32_TO_CPU(node->info.node_teid); 1160 enum ice_status status; 1161 1162 /* remove the default leaf node */ 1163 status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid); 1164 if (!status) 1165 ice_free_sched_node(pi, node); 1166 } 1167 } 1168 1169 /** 1170 * ice_sched_rm_dflt_nodes - free the default nodes in the tree 1171 * @pi: port information structure 1172 * 1173 * This function frees all the nodes except root and TC that were created by 1174 * the FW during initialization 1175 */ 1176 static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi) 1177 { 1178 struct ice_sched_node *node; 1179 1180 ice_rm_dflt_leaf_node(pi); 1181 1182 /* remove the default nodes except TC and root nodes */ 1183 node = pi->root; 1184 while (node) { 1185 if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer && 1186 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && 1187 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) { 1188 ice_free_sched_node(pi, node); 1189 break; 1190 } 1191 1192 if (!node->num_children) 1193 break; 1194 node = node->children[0]; 1195 } 1196 } 1197 1198 /** 1199 * ice_sched_init_port - Initialize scheduler by querying information from FW 1200 * @pi: port info structure for the tree to cleanup 1201 * 1202 * This function is the initial call to find the total number of Tx scheduler 1203 * resources, default topology created by firmware and storing the information 1204 * in SW DB. 1205 */ 1206 enum ice_status ice_sched_init_port(struct ice_port_info *pi) 1207 { 1208 struct ice_aqc_get_topo_elem *buf; 1209 enum ice_status status; 1210 struct ice_hw *hw; 1211 u8 num_branches; 1212 u16 num_elems; 1213 u8 i, j; 1214 1215 if (!pi) 1216 return ICE_ERR_PARAM; 1217 hw = pi->hw; 1218 1219 /* Query the Default Topology from FW */ 1220 buf = (struct ice_aqc_get_topo_elem *)ice_malloc(hw, 1221 ICE_AQ_MAX_BUF_LEN); 1222 if (!buf) 1223 return ICE_ERR_NO_MEMORY; 1224 1225 /* Query default scheduling tree topology */ 1226 status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN, 1227 &num_branches, NULL); 1228 if (status) 1229 goto err_init_port; 1230 1231 /* num_branches should be between 1-8 */ 1232 if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) { 1233 ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n", 1234 num_branches); 1235 status = ICE_ERR_PARAM; 1236 goto err_init_port; 1237 } 1238 1239 /* get the number of elements on the default/first branch */ 1240 num_elems = LE16_TO_CPU(buf[0].hdr.num_elems); 1241 1242 /* num_elems should always be between 1-9 */ 1243 if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) { 1244 ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n", 1245 num_elems); 1246 status = ICE_ERR_PARAM; 1247 goto err_init_port; 1248 } 1249 1250 /* If the last node is a leaf node then the index of the queue group 1251 * layer is two less than the number of elements. 1252 */ 1253 if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type == 1254 ICE_AQC_ELEM_TYPE_LEAF) 1255 pi->last_node_teid = 1256 LE32_TO_CPU(buf[0].generic[num_elems - 2].node_teid); 1257 else 1258 pi->last_node_teid = 1259 LE32_TO_CPU(buf[0].generic[num_elems - 1].node_teid); 1260 1261 /* Insert the Tx Sched root node */ 1262 status = ice_sched_add_root_node(pi, &buf[0].generic[0]); 1263 if (status) 1264 goto err_init_port; 1265 1266 /* Parse the default tree and cache the information */ 1267 for (i = 0; i < num_branches; i++) { 1268 num_elems = LE16_TO_CPU(buf[i].hdr.num_elems); 1269 1270 /* Skip root element as already inserted */ 1271 for (j = 1; j < num_elems; j++) { 1272 /* update the sw entry point */ 1273 if (buf[0].generic[j].data.elem_type == 1274 ICE_AQC_ELEM_TYPE_ENTRY_POINT) 1275 hw->sw_entry_point_layer = j; 1276 1277 status = ice_sched_add_node(pi, j, &buf[i].generic[j]); 1278 if (status) 1279 goto err_init_port; 1280 } 1281 } 1282 1283 /* Remove the default nodes. */ 1284 if (pi->root) 1285 ice_sched_rm_dflt_nodes(pi); 1286 1287 /* initialize the port for handling the scheduler tree */ 1288 pi->port_state = ICE_SCHED_PORT_STATE_READY; 1289 ice_init_lock(&pi->sched_lock); 1290 for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) 1291 INIT_LIST_HEAD(&hw->rl_prof_list[i]); 1292 1293 err_init_port: 1294 if (status && pi->root) { 1295 ice_free_sched_node(pi, pi->root); 1296 pi->root = NULL; 1297 } 1298 1299 ice_free(hw, buf); 1300 return status; 1301 } 1302 1303 /** 1304 * ice_sched_get_node - Get the struct ice_sched_node for given TEID 1305 * @pi: port information structure 1306 * @teid: Scheduler node TEID 1307 * 1308 * This function retrieves the ice_sched_node struct for given TEID from 1309 * the SW DB and returns it to the caller. 1310 */ 1311 struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid) 1312 { 1313 struct ice_sched_node *node; 1314 1315 if (!pi) 1316 return NULL; 1317 1318 /* Find the node starting from root */ 1319 ice_acquire_lock(&pi->sched_lock); 1320 node = ice_sched_find_node_by_teid(pi->root, teid); 1321 ice_release_lock(&pi->sched_lock); 1322 1323 if (!node) 1324 ice_debug(pi->hw, ICE_DBG_SCHED, "Node not found for teid=0x%x\n", teid); 1325 1326 return node; 1327 } 1328 1329 /** 1330 * ice_sched_query_res_alloc - query the FW for num of logical sched layers 1331 * @hw: pointer to the HW struct 1332 * 1333 * query FW for allocated scheduler resources and store in HW struct 1334 */ 1335 enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) 1336 { 1337 struct ice_aqc_query_txsched_res_resp *buf; 1338 enum ice_status status = ICE_SUCCESS; 1339 __le16 max_sibl; 1340 u8 i; 1341 1342 if (hw->layer_info) 1343 return status; 1344 1345 buf = (struct ice_aqc_query_txsched_res_resp *) 1346 ice_malloc(hw, sizeof(*buf)); 1347 if (!buf) 1348 return ICE_ERR_NO_MEMORY; 1349 1350 status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL); 1351 if (status) 1352 goto sched_query_out; 1353 1354 hw->num_tx_sched_layers = LE16_TO_CPU(buf->sched_props.logical_levels); 1355 hw->num_tx_sched_phys_layers = 1356 LE16_TO_CPU(buf->sched_props.phys_levels); 1357 hw->flattened_layers = buf->sched_props.flattening_bitmap; 1358 hw->max_cgds = buf->sched_props.max_pf_cgds; 1359 1360 /* max sibling group size of current layer refers to the max children 1361 * of the below layer node. 1362 * layer 1 node max children will be layer 2 max sibling group size 1363 * layer 2 node max children will be layer 3 max sibling group size 1364 * and so on. This array will be populated from root (index 0) to 1365 * qgroup layer 7. Leaf node has no children. 1366 */ 1367 for (i = 0; i < hw->num_tx_sched_layers - 1; i++) { 1368 max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz; 1369 hw->max_children[i] = LE16_TO_CPU(max_sibl); 1370 } 1371 1372 hw->layer_info = (struct ice_aqc_layer_props *) 1373 ice_memdup(hw, buf->layer_props, 1374 (hw->num_tx_sched_layers * 1375 sizeof(*hw->layer_info)), 1376 ICE_DMA_TO_DMA); 1377 if (!hw->layer_info) { 1378 status = ICE_ERR_NO_MEMORY; 1379 goto sched_query_out; 1380 } 1381 1382 sched_query_out: 1383 ice_free(hw, buf); 1384 return status; 1385 } 1386 1387 /** 1388 * ice_sched_get_psm_clk_freq - determine the PSM clock frequency 1389 * @hw: pointer to the HW struct 1390 * 1391 * Determine the PSM clock frequency and store in HW struct 1392 */ 1393 void ice_sched_get_psm_clk_freq(struct ice_hw *hw) 1394 { 1395 u32 val, clk_src; 1396 1397 val = rd32(hw, GLGEN_CLKSTAT_SRC); 1398 clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >> 1399 GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S; 1400 1401 #define PSM_CLK_SRC_367_MHZ 0x0 1402 #define PSM_CLK_SRC_416_MHZ 0x1 1403 #define PSM_CLK_SRC_446_MHZ 0x2 1404 #define PSM_CLK_SRC_390_MHZ 0x3 1405 1406 switch (clk_src) { 1407 case PSM_CLK_SRC_367_MHZ: 1408 hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ; 1409 break; 1410 case PSM_CLK_SRC_416_MHZ: 1411 hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ; 1412 break; 1413 case PSM_CLK_SRC_446_MHZ: 1414 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ; 1415 break; 1416 case PSM_CLK_SRC_390_MHZ: 1417 hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ; 1418 break; 1419 default: 1420 ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n", 1421 clk_src); 1422 /* fall back to a safe default */ 1423 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ; 1424 } 1425 } 1426 1427 /** 1428 * ice_sched_find_node_in_subtree - Find node in part of base node subtree 1429 * @hw: pointer to the HW struct 1430 * @base: pointer to the base node 1431 * @node: pointer to the node to search 1432 * 1433 * This function checks whether a given node is part of the base node 1434 * subtree or not 1435 */ 1436 bool 1437 ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base, 1438 struct ice_sched_node *node) 1439 { 1440 u8 i; 1441 1442 for (i = 0; i < base->num_children; i++) { 1443 struct ice_sched_node *child = base->children[i]; 1444 1445 if (node == child) 1446 return true; 1447 1448 if (child->tx_sched_layer > node->tx_sched_layer) 1449 return false; 1450 1451 /* this recursion is intentional, and wouldn't 1452 * go more than 8 calls 1453 */ 1454 if (ice_sched_find_node_in_subtree(hw, child, node)) 1455 return true; 1456 } 1457 return false; 1458 } 1459 1460 /** 1461 * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node 1462 * @pi: port information structure 1463 * @vsi_node: software VSI handle 1464 * @qgrp_node: first queue group node identified for scanning 1465 * @owner: LAN or RDMA 1466 * 1467 * This function retrieves a free LAN or RDMA queue group node by scanning 1468 * qgrp_node and its siblings for the queue group with the fewest number 1469 * of queues currently assigned. 1470 */ 1471 static struct ice_sched_node * 1472 ice_sched_get_free_qgrp(struct ice_port_info *pi, 1473 struct ice_sched_node *vsi_node, 1474 struct ice_sched_node *qgrp_node, u8 owner) 1475 { 1476 struct ice_sched_node *min_qgrp; 1477 u8 min_children; 1478 1479 if (!qgrp_node) 1480 return qgrp_node; 1481 min_children = qgrp_node->num_children; 1482 if (!min_children) 1483 return qgrp_node; 1484 min_qgrp = qgrp_node; 1485 /* scan all queue groups until find a node which has less than the 1486 * minimum number of children. This way all queue group nodes get 1487 * equal number of shares and active. The bandwidth will be equally 1488 * distributed across all queues. 1489 */ 1490 while (qgrp_node) { 1491 /* make sure the qgroup node is part of the VSI subtree */ 1492 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) 1493 if (qgrp_node->num_children < min_children && 1494 qgrp_node->owner == owner) { 1495 /* replace the new min queue group node */ 1496 min_qgrp = qgrp_node; 1497 min_children = min_qgrp->num_children; 1498 /* break if it has no children, */ 1499 if (!min_children) 1500 break; 1501 } 1502 qgrp_node = qgrp_node->sibling; 1503 } 1504 return min_qgrp; 1505 } 1506 1507 /** 1508 * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node 1509 * @pi: port information structure 1510 * @vsi_handle: software VSI handle 1511 * @tc: branch number 1512 * @owner: LAN or RDMA 1513 * 1514 * This function retrieves a free LAN or RDMA queue group node 1515 */ 1516 struct ice_sched_node * 1517 ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 1518 u8 owner) 1519 { 1520 struct ice_sched_node *vsi_node, *qgrp_node; 1521 struct ice_vsi_ctx *vsi_ctx; 1522 u16 max_children; 1523 u8 qgrp_layer; 1524 1525 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); 1526 max_children = pi->hw->max_children[qgrp_layer]; 1527 1528 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 1529 if (!vsi_ctx) 1530 return NULL; 1531 vsi_node = vsi_ctx->sched.vsi_node[tc]; 1532 /* validate invalid VSI ID */ 1533 if (!vsi_node) 1534 return NULL; 1535 1536 /* get the first queue group node from VSI sub-tree */ 1537 qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); 1538 while (qgrp_node) { 1539 /* make sure the qgroup node is part of the VSI subtree */ 1540 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) 1541 if (qgrp_node->num_children < max_children && 1542 qgrp_node->owner == owner) 1543 break; 1544 qgrp_node = qgrp_node->sibling; 1545 } 1546 1547 /* Select the best queue group */ 1548 return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner); 1549 } 1550 1551 /** 1552 * ice_sched_get_vsi_node - Get a VSI node based on VSI ID 1553 * @pi: pointer to the port information structure 1554 * @tc_node: pointer to the TC node 1555 * @vsi_handle: software VSI handle 1556 * 1557 * This function retrieves a VSI node for a given VSI ID from a given 1558 * TC branch 1559 */ 1560 struct ice_sched_node * 1561 ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, 1562 u16 vsi_handle) 1563 { 1564 struct ice_sched_node *node; 1565 u8 vsi_layer; 1566 1567 vsi_layer = ice_sched_get_vsi_layer(pi->hw); 1568 node = ice_sched_get_first_node(pi, tc_node, vsi_layer); 1569 1570 /* Check whether it already exists */ 1571 while (node) { 1572 if (node->vsi_handle == vsi_handle) 1573 return node; 1574 node = node->sibling; 1575 } 1576 1577 return node; 1578 } 1579 1580 /** 1581 * ice_sched_get_agg_node - Get an aggregator node based on aggregator ID 1582 * @pi: pointer to the port information structure 1583 * @tc_node: pointer to the TC node 1584 * @agg_id: aggregator ID 1585 * 1586 * This function retrieves an aggregator node for a given aggregator ID from 1587 * a given TC branch 1588 */ 1589 static struct ice_sched_node * 1590 ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, 1591 u32 agg_id) 1592 { 1593 struct ice_sched_node *node; 1594 struct ice_hw *hw = pi->hw; 1595 u8 agg_layer; 1596 1597 if (!hw) 1598 return NULL; 1599 agg_layer = ice_sched_get_agg_layer(hw); 1600 node = ice_sched_get_first_node(pi, tc_node, agg_layer); 1601 1602 /* Check whether it already exists */ 1603 while (node) { 1604 if (node->agg_id == agg_id) 1605 return node; 1606 node = node->sibling; 1607 } 1608 1609 return node; 1610 } 1611 1612 /** 1613 * ice_sched_check_node - Compare node parameters between SW DB and HW DB 1614 * @hw: pointer to the HW struct 1615 * @node: pointer to the ice_sched_node struct 1616 * 1617 * This function queries and compares the HW element with SW DB node parameters 1618 */ 1619 static bool ice_sched_check_node(struct ice_hw *hw, struct ice_sched_node *node) 1620 { 1621 struct ice_aqc_txsched_elem_data buf; 1622 enum ice_status status; 1623 u32 node_teid; 1624 1625 node_teid = LE32_TO_CPU(node->info.node_teid); 1626 status = ice_sched_query_elem(hw, node_teid, &buf); 1627 if (status != ICE_SUCCESS) 1628 return false; 1629 1630 if (memcmp(&buf, &node->info, sizeof(buf))) { 1631 ice_debug(hw, ICE_DBG_SCHED, "Node mismatch for teid=0x%x\n", 1632 node_teid); 1633 return false; 1634 } 1635 1636 return true; 1637 } 1638 1639 /** 1640 * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes 1641 * @hw: pointer to the HW struct 1642 * @num_qs: number of queues 1643 * @num_nodes: num nodes array 1644 * 1645 * This function calculates the number of VSI child nodes based on the 1646 * number of queues. 1647 */ 1648 static void 1649 ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) 1650 { 1651 u16 num = num_qs; 1652 u8 i, qgl, vsil; 1653 1654 qgl = ice_sched_get_qgrp_layer(hw); 1655 vsil = ice_sched_get_vsi_layer(hw); 1656 1657 /* calculate num nodes from queue group to VSI layer */ 1658 for (i = qgl; i > vsil; i--) { 1659 /* round to the next integer if there is a remainder */ 1660 num = DIVIDE_AND_ROUND_UP(num, hw->max_children[i]); 1661 1662 /* need at least one node */ 1663 num_nodes[i] = num ? num : 1; 1664 } 1665 } 1666 1667 /** 1668 * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree 1669 * @pi: port information structure 1670 * @vsi_handle: software VSI handle 1671 * @tc_node: pointer to the TC node 1672 * @num_nodes: pointer to the num nodes that needs to be added per layer 1673 * @owner: node owner (LAN or RDMA) 1674 * 1675 * This function adds the VSI child nodes to tree. It gets called for 1676 * LAN and RDMA separately. 1677 */ 1678 static enum ice_status 1679 ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, 1680 struct ice_sched_node *tc_node, u16 *num_nodes, 1681 u8 owner) 1682 { 1683 struct ice_sched_node *parent, *node; 1684 struct ice_hw *hw = pi->hw; 1685 enum ice_status status; 1686 u32 first_node_teid; 1687 u16 num_added = 0; 1688 u8 i, qgl, vsil; 1689 1690 qgl = ice_sched_get_qgrp_layer(hw); 1691 vsil = ice_sched_get_vsi_layer(hw); 1692 parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 1693 for (i = vsil + 1; i <= qgl; i++) { 1694 if (!parent) 1695 return ICE_ERR_CFG; 1696 1697 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, 1698 num_nodes[i], 1699 &first_node_teid, 1700 &num_added); 1701 if (status != ICE_SUCCESS || num_nodes[i] != num_added) 1702 return ICE_ERR_CFG; 1703 1704 /* The newly added node can be a new parent for the next 1705 * layer nodes 1706 */ 1707 if (num_added) { 1708 parent = ice_sched_find_node_by_teid(tc_node, 1709 first_node_teid); 1710 node = parent; 1711 while (node) { 1712 node->owner = owner; 1713 node = node->sibling; 1714 } 1715 } else { 1716 parent = parent->children[0]; 1717 } 1718 } 1719 1720 return ICE_SUCCESS; 1721 } 1722 1723 /** 1724 * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes 1725 * @pi: pointer to the port info structure 1726 * @tc_node: pointer to TC node 1727 * @num_nodes: pointer to num nodes array 1728 * 1729 * This function calculates the number of supported nodes needed to add this 1730 * VSI into Tx tree including the VSI, parent and intermediate nodes in below 1731 * layers 1732 */ 1733 static void 1734 ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi, 1735 struct ice_sched_node *tc_node, u16 *num_nodes) 1736 { 1737 struct ice_sched_node *node; 1738 u8 vsil; 1739 int i; 1740 1741 vsil = ice_sched_get_vsi_layer(pi->hw); 1742 for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--) 1743 /* Add intermediate nodes if TC has no children and 1744 * need at least one node for VSI 1745 */ 1746 if (!tc_node->num_children || i == vsil) { 1747 num_nodes[i]++; 1748 } else { 1749 /* If intermediate nodes are reached max children 1750 * then add a new one. 1751 */ 1752 node = ice_sched_get_first_node(pi, tc_node, (u8)i); 1753 /* scan all the siblings */ 1754 while (node) { 1755 if (node->num_children < 1756 pi->hw->max_children[i]) 1757 break; 1758 node = node->sibling; 1759 } 1760 1761 /* tree has one intermediate node to add this new VSI. 1762 * So no need to calculate supported nodes for below 1763 * layers. 1764 */ 1765 if (node) 1766 break; 1767 /* all the nodes are full, allocate a new one */ 1768 num_nodes[i]++; 1769 } 1770 } 1771 1772 /** 1773 * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree 1774 * @pi: port information structure 1775 * @vsi_handle: software VSI handle 1776 * @tc_node: pointer to TC node 1777 * @num_nodes: pointer to num nodes array 1778 * 1779 * This function adds the VSI supported nodes into Tx tree including the 1780 * VSI, its parent and intermediate nodes in below layers 1781 */ 1782 static enum ice_status 1783 ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, 1784 struct ice_sched_node *tc_node, u16 *num_nodes) 1785 { 1786 struct ice_sched_node *parent = tc_node; 1787 enum ice_status status; 1788 u32 first_node_teid; 1789 u16 num_added = 0; 1790 u8 i, vsil; 1791 1792 if (!pi) 1793 return ICE_ERR_PARAM; 1794 1795 vsil = ice_sched_get_vsi_layer(pi->hw); 1796 for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { 1797 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, 1798 i, num_nodes[i], 1799 &first_node_teid, 1800 &num_added); 1801 if (status != ICE_SUCCESS || num_nodes[i] != num_added) 1802 return ICE_ERR_CFG; 1803 1804 /* The newly added node can be a new parent for the next 1805 * layer nodes 1806 */ 1807 if (num_added) 1808 parent = ice_sched_find_node_by_teid(tc_node, 1809 first_node_teid); 1810 else 1811 parent = parent->children[0]; 1812 1813 if (!parent) 1814 return ICE_ERR_CFG; 1815 1816 if (i == vsil) 1817 parent->vsi_handle = vsi_handle; 1818 } 1819 1820 return ICE_SUCCESS; 1821 } 1822 1823 /** 1824 * ice_sched_add_vsi_to_topo - add a new VSI into tree 1825 * @pi: port information structure 1826 * @vsi_handle: software VSI handle 1827 * @tc: TC number 1828 * 1829 * This function adds a new VSI into scheduler tree 1830 */ 1831 static enum ice_status 1832 ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) 1833 { 1834 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 1835 struct ice_sched_node *tc_node; 1836 1837 tc_node = ice_sched_get_tc_node(pi, tc); 1838 if (!tc_node) 1839 return ICE_ERR_PARAM; 1840 1841 /* calculate number of supported nodes needed for this VSI */ 1842 ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes); 1843 1844 /* add VSI supported nodes to TC subtree */ 1845 return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node, 1846 num_nodes); 1847 } 1848 1849 /** 1850 * ice_sched_update_vsi_child_nodes - update VSI child nodes 1851 * @pi: port information structure 1852 * @vsi_handle: software VSI handle 1853 * @tc: TC number 1854 * @new_numqs: new number of max queues 1855 * @owner: owner of this subtree 1856 * 1857 * This function updates the VSI child nodes based on the number of queues 1858 */ 1859 static enum ice_status 1860 ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, 1861 u8 tc, u16 new_numqs, u8 owner) 1862 { 1863 u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 1864 struct ice_sched_node *vsi_node; 1865 struct ice_sched_node *tc_node; 1866 struct ice_vsi_ctx *vsi_ctx; 1867 enum ice_status status = ICE_SUCCESS; 1868 struct ice_hw *hw = pi->hw; 1869 u16 prev_numqs; 1870 1871 tc_node = ice_sched_get_tc_node(pi, tc); 1872 if (!tc_node) 1873 return ICE_ERR_CFG; 1874 1875 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 1876 if (!vsi_node) 1877 return ICE_ERR_CFG; 1878 1879 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 1880 if (!vsi_ctx) 1881 return ICE_ERR_PARAM; 1882 1883 prev_numqs = vsi_ctx->sched.max_lanq[tc]; 1884 /* num queues are not changed or less than the previous number */ 1885 if (new_numqs <= prev_numqs) 1886 return status; 1887 status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs); 1888 if (status) 1889 return status; 1890 1891 if (new_numqs) 1892 ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); 1893 /* Keep the max number of queue configuration all the time. Update the 1894 * tree only if number of queues > previous number of queues. This may 1895 * leave some extra nodes in the tree if number of queues < previous 1896 * number but that wouldn't harm anything. Removing those extra nodes 1897 * may complicate the code if those nodes are part of SRL or 1898 * individually rate limited. 1899 */ 1900 status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node, 1901 new_num_nodes, owner); 1902 if (status) 1903 return status; 1904 vsi_ctx->sched.max_lanq[tc] = new_numqs; 1905 1906 return ICE_SUCCESS; 1907 } 1908 1909 /** 1910 * ice_sched_cfg_vsi - configure the new/existing VSI 1911 * @pi: port information structure 1912 * @vsi_handle: software VSI handle 1913 * @tc: TC number 1914 * @maxqs: max number of queues 1915 * @owner: LAN or RDMA 1916 * @enable: TC enabled or disabled 1917 * 1918 * This function adds/updates VSI nodes based on the number of queues. If TC is 1919 * enabled and VSI is in suspended state then resume the VSI back. If TC is 1920 * disabled then suspend the VSI if it is not already. 1921 */ 1922 enum ice_status 1923 ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, 1924 u8 owner, bool enable) 1925 { 1926 struct ice_sched_node *vsi_node, *tc_node; 1927 struct ice_vsi_ctx *vsi_ctx; 1928 enum ice_status status = ICE_SUCCESS; 1929 struct ice_hw *hw = pi->hw; 1930 1931 ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle); 1932 tc_node = ice_sched_get_tc_node(pi, tc); 1933 if (!tc_node) 1934 return ICE_ERR_PARAM; 1935 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 1936 if (!vsi_ctx) 1937 return ICE_ERR_PARAM; 1938 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 1939 1940 /* suspend the VSI if TC is not enabled */ 1941 if (!enable) { 1942 if (vsi_node && vsi_node->in_use) { 1943 u32 teid = LE32_TO_CPU(vsi_node->info.node_teid); 1944 1945 status = ice_sched_suspend_resume_elems(hw, 1, &teid, 1946 true); 1947 if (!status) 1948 vsi_node->in_use = false; 1949 } 1950 return status; 1951 } 1952 1953 /* TC is enabled, if it is a new VSI then add it to the tree */ 1954 if (!vsi_node) { 1955 status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc); 1956 if (status) 1957 return status; 1958 1959 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 1960 if (!vsi_node) 1961 return ICE_ERR_CFG; 1962 1963 vsi_ctx->sched.vsi_node[tc] = vsi_node; 1964 vsi_node->in_use = true; 1965 /* invalidate the max queues whenever VSI gets added first time 1966 * into the scheduler tree (boot or after reset). We need to 1967 * recreate the child nodes all the time in these cases. 1968 */ 1969 vsi_ctx->sched.max_lanq[tc] = 0; 1970 } 1971 1972 /* update the VSI child nodes */ 1973 status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs, 1974 owner); 1975 if (status) 1976 return status; 1977 1978 /* TC is enabled, resume the VSI if it is in the suspend state */ 1979 if (!vsi_node->in_use) { 1980 u32 teid = LE32_TO_CPU(vsi_node->info.node_teid); 1981 1982 status = ice_sched_suspend_resume_elems(hw, 1, &teid, false); 1983 if (!status) 1984 vsi_node->in_use = true; 1985 } 1986 1987 return status; 1988 } 1989 1990 /** 1991 * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry 1992 * @pi: port information structure 1993 * @vsi_handle: software VSI handle 1994 * 1995 * This function removes single aggregator VSI info entry from 1996 * aggregator list. 1997 */ 1998 static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle) 1999 { 2000 struct ice_sched_agg_info *agg_info; 2001 struct ice_sched_agg_info *atmp; 2002 2003 LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &pi->hw->agg_list, 2004 ice_sched_agg_info, 2005 list_entry) { 2006 struct ice_sched_agg_vsi_info *agg_vsi_info; 2007 struct ice_sched_agg_vsi_info *vtmp; 2008 2009 LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp, 2010 &agg_info->agg_vsi_list, 2011 ice_sched_agg_vsi_info, list_entry) 2012 if (agg_vsi_info->vsi_handle == vsi_handle) { 2013 LIST_DEL(&agg_vsi_info->list_entry); 2014 ice_free(pi->hw, agg_vsi_info); 2015 return; 2016 } 2017 } 2018 } 2019 2020 /** 2021 * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree 2022 * @node: pointer to the sub-tree node 2023 * 2024 * This function checks for a leaf node presence in a given sub-tree node. 2025 */ 2026 static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node) 2027 { 2028 u8 i; 2029 2030 for (i = 0; i < node->num_children; i++) 2031 if (ice_sched_is_leaf_node_present(node->children[i])) 2032 return true; 2033 /* check for a leaf node */ 2034 return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF); 2035 } 2036 2037 /** 2038 * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes 2039 * @pi: port information structure 2040 * @vsi_handle: software VSI handle 2041 * @owner: LAN or RDMA 2042 * 2043 * This function removes the VSI and its LAN or RDMA children nodes from the 2044 * scheduler tree. 2045 */ 2046 static enum ice_status 2047 ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) 2048 { 2049 enum ice_status status = ICE_ERR_PARAM; 2050 struct ice_vsi_ctx *vsi_ctx; 2051 u8 i; 2052 2053 ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle); 2054 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 2055 return status; 2056 ice_acquire_lock(&pi->sched_lock); 2057 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 2058 if (!vsi_ctx) 2059 goto exit_sched_rm_vsi_cfg; 2060 2061 ice_for_each_traffic_class(i) { 2062 struct ice_sched_node *vsi_node, *tc_node; 2063 u8 j = 0; 2064 2065 tc_node = ice_sched_get_tc_node(pi, i); 2066 if (!tc_node) 2067 continue; 2068 2069 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 2070 if (!vsi_node) 2071 continue; 2072 2073 if (ice_sched_is_leaf_node_present(vsi_node)) { 2074 ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i); 2075 status = ICE_ERR_IN_USE; 2076 goto exit_sched_rm_vsi_cfg; 2077 } 2078 while (j < vsi_node->num_children) { 2079 if (vsi_node->children[j]->owner == owner) { 2080 ice_free_sched_node(pi, vsi_node->children[j]); 2081 2082 /* reset the counter again since the num 2083 * children will be updated after node removal 2084 */ 2085 j = 0; 2086 } else { 2087 j++; 2088 } 2089 } 2090 /* remove the VSI if it has no children */ 2091 if (!vsi_node->num_children) { 2092 ice_free_sched_node(pi, vsi_node); 2093 vsi_ctx->sched.vsi_node[i] = NULL; 2094 2095 /* clean up aggregator related VSI info if any */ 2096 ice_sched_rm_agg_vsi_info(pi, vsi_handle); 2097 } 2098 if (owner == ICE_SCHED_NODE_OWNER_LAN) 2099 vsi_ctx->sched.max_lanq[i] = 0; 2100 } 2101 status = ICE_SUCCESS; 2102 2103 exit_sched_rm_vsi_cfg: 2104 ice_release_lock(&pi->sched_lock); 2105 return status; 2106 } 2107 2108 /** 2109 * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes 2110 * @pi: port information structure 2111 * @vsi_handle: software VSI handle 2112 * 2113 * This function clears the VSI and its LAN children nodes from scheduler tree 2114 * for all TCs. 2115 */ 2116 enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) 2117 { 2118 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN); 2119 } 2120 2121 /** 2122 * ice_sched_is_tree_balanced - Check tree nodes are identical or not 2123 * @hw: pointer to the HW struct 2124 * @node: pointer to the ice_sched_node struct 2125 * 2126 * This function compares all the nodes for a given tree against HW DB nodes 2127 * This function needs to be called with the port_info->sched_lock held 2128 */ 2129 bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node) 2130 { 2131 u8 i; 2132 2133 /* start from the leaf node */ 2134 for (i = 0; i < node->num_children; i++) 2135 /* Fail if node doesn't match with the SW DB 2136 * this recursion is intentional, and wouldn't 2137 * go more than 9 calls 2138 */ 2139 if (!ice_sched_is_tree_balanced(hw, node->children[i])) 2140 return false; 2141 2142 return ice_sched_check_node(hw, node); 2143 } 2144 2145 /** 2146 * ice_aq_query_node_to_root - retrieve the tree topology for a given node TEID 2147 * @hw: pointer to the HW struct 2148 * @node_teid: node TEID 2149 * @buf: pointer to buffer 2150 * @buf_size: buffer size in bytes 2151 * @cd: pointer to command details structure or NULL 2152 * 2153 * This function retrieves the tree topology from the firmware for a given 2154 * node TEID to the root node. 2155 */ 2156 enum ice_status 2157 ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid, 2158 struct ice_aqc_txsched_elem_data *buf, u16 buf_size, 2159 struct ice_sq_cd *cd) 2160 { 2161 struct ice_aqc_query_node_to_root *cmd; 2162 struct ice_aq_desc desc; 2163 2164 cmd = &desc.params.query_node_to_root; 2165 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_node_to_root); 2166 cmd->teid = CPU_TO_LE32(node_teid); 2167 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2168 } 2169 2170 /** 2171 * ice_get_agg_info - get the aggregator ID 2172 * @hw: pointer to the hardware structure 2173 * @agg_id: aggregator ID 2174 * 2175 * This function validates aggregator ID. The function returns info if 2176 * aggregator ID is present in list otherwise it returns null. 2177 */ 2178 static struct ice_sched_agg_info * 2179 ice_get_agg_info(struct ice_hw *hw, u32 agg_id) 2180 { 2181 struct ice_sched_agg_info *agg_info; 2182 2183 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 2184 list_entry) 2185 if (agg_info->agg_id == agg_id) 2186 return agg_info; 2187 2188 return NULL; 2189 } 2190 2191 /** 2192 * ice_sched_get_free_vsi_parent - Find a free parent node in aggregator subtree 2193 * @hw: pointer to the HW struct 2194 * @node: pointer to a child node 2195 * @num_nodes: num nodes count array 2196 * 2197 * This function walks through the aggregator subtree to find a free parent 2198 * node 2199 */ 2200 static struct ice_sched_node * 2201 ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node, 2202 u16 *num_nodes) 2203 { 2204 u8 l = node->tx_sched_layer; 2205 u8 vsil, i; 2206 2207 vsil = ice_sched_get_vsi_layer(hw); 2208 2209 /* Is it VSI parent layer ? */ 2210 if (l == vsil - 1) 2211 return (node->num_children < hw->max_children[l]) ? node : NULL; 2212 2213 /* We have intermediate nodes. Let's walk through the subtree. If the 2214 * intermediate node has space to add a new node then clear the count 2215 */ 2216 if (node->num_children < hw->max_children[l]) 2217 num_nodes[l] = 0; 2218 /* The below recursive call is intentional and wouldn't go more than 2219 * 2 or 3 iterations. 2220 */ 2221 2222 for (i = 0; i < node->num_children; i++) { 2223 struct ice_sched_node *parent; 2224 2225 parent = ice_sched_get_free_vsi_parent(hw, node->children[i], 2226 num_nodes); 2227 if (parent) 2228 return parent; 2229 } 2230 2231 return NULL; 2232 } 2233 2234 /** 2235 * ice_sched_update_parent - update the new parent in SW DB 2236 * @new_parent: pointer to a new parent node 2237 * @node: pointer to a child node 2238 * 2239 * This function removes the child from the old parent and adds it to a new 2240 * parent 2241 */ 2242 static void 2243 ice_sched_update_parent(struct ice_sched_node *new_parent, 2244 struct ice_sched_node *node) 2245 { 2246 struct ice_sched_node *old_parent; 2247 u8 i, j; 2248 2249 old_parent = node->parent; 2250 2251 /* update the old parent children */ 2252 for (i = 0; i < old_parent->num_children; i++) 2253 if (old_parent->children[i] == node) { 2254 for (j = i + 1; j < old_parent->num_children; j++) 2255 old_parent->children[j - 1] = 2256 old_parent->children[j]; 2257 old_parent->num_children--; 2258 break; 2259 } 2260 2261 /* now move the node to a new parent */ 2262 new_parent->children[new_parent->num_children++] = node; 2263 node->parent = new_parent; 2264 node->info.parent_teid = new_parent->info.node_teid; 2265 } 2266 2267 /** 2268 * ice_sched_move_nodes - move child nodes to a given parent 2269 * @pi: port information structure 2270 * @parent: pointer to parent node 2271 * @num_items: number of child nodes to be moved 2272 * @list: pointer to child node teids 2273 * 2274 * This function move the child nodes to a given parent. 2275 */ 2276 static enum ice_status 2277 ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, 2278 u16 num_items, u32 *list) 2279 { 2280 struct ice_aqc_move_elem *buf; 2281 struct ice_sched_node *node; 2282 enum ice_status status = ICE_SUCCESS; 2283 u16 i, grps_movd = 0; 2284 struct ice_hw *hw; 2285 u16 buf_len; 2286 2287 hw = pi->hw; 2288 2289 if (!parent || !num_items) 2290 return ICE_ERR_PARAM; 2291 2292 /* Does parent have enough space */ 2293 if (parent->num_children + num_items > 2294 hw->max_children[parent->tx_sched_layer]) 2295 return ICE_ERR_AQ_FULL; 2296 2297 buf_len = ice_struct_size(buf, teid, 1); 2298 buf = (struct ice_aqc_move_elem *)ice_malloc(hw, buf_len); 2299 if (!buf) 2300 return ICE_ERR_NO_MEMORY; 2301 2302 for (i = 0; i < num_items; i++) { 2303 node = ice_sched_find_node_by_teid(pi->root, list[i]); 2304 if (!node) { 2305 status = ICE_ERR_PARAM; 2306 goto move_err_exit; 2307 } 2308 2309 buf->hdr.src_parent_teid = node->info.parent_teid; 2310 buf->hdr.dest_parent_teid = parent->info.node_teid; 2311 buf->teid[0] = node->info.node_teid; 2312 buf->hdr.num_elems = CPU_TO_LE16(1); 2313 status = ice_aq_move_sched_elems(hw, 1, buf, buf_len, 2314 &grps_movd, NULL); 2315 if (status && grps_movd != 1) { 2316 status = ICE_ERR_CFG; 2317 goto move_err_exit; 2318 } 2319 2320 /* update the SW DB */ 2321 ice_sched_update_parent(parent, node); 2322 } 2323 2324 move_err_exit: 2325 ice_free(hw, buf); 2326 return status; 2327 } 2328 2329 /** 2330 * ice_sched_move_vsi_to_agg - move VSI to aggregator node 2331 * @pi: port information structure 2332 * @vsi_handle: software VSI handle 2333 * @agg_id: aggregator ID 2334 * @tc: TC number 2335 * 2336 * This function moves a VSI to an aggregator node or its subtree. 2337 * Intermediate nodes may be created if required. 2338 */ 2339 static enum ice_status 2340 ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, 2341 u8 tc) 2342 { 2343 struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent; 2344 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 2345 u32 first_node_teid, vsi_teid; 2346 enum ice_status status; 2347 u16 num_nodes_added; 2348 u8 aggl, vsil, i; 2349 2350 tc_node = ice_sched_get_tc_node(pi, tc); 2351 if (!tc_node) 2352 return ICE_ERR_CFG; 2353 2354 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 2355 if (!agg_node) 2356 return ICE_ERR_DOES_NOT_EXIST; 2357 2358 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 2359 if (!vsi_node) 2360 return ICE_ERR_DOES_NOT_EXIST; 2361 2362 /* Is this VSI already part of given aggregator? */ 2363 if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node)) 2364 return ICE_SUCCESS; 2365 2366 aggl = ice_sched_get_agg_layer(pi->hw); 2367 vsil = ice_sched_get_vsi_layer(pi->hw); 2368 2369 /* set intermediate node count to 1 between aggregator and VSI layers */ 2370 for (i = aggl + 1; i < vsil; i++) 2371 num_nodes[i] = 1; 2372 2373 /* Check if the aggregator subtree has any free node to add the VSI */ 2374 for (i = 0; i < agg_node->num_children; i++) { 2375 parent = ice_sched_get_free_vsi_parent(pi->hw, 2376 agg_node->children[i], 2377 num_nodes); 2378 if (parent) 2379 goto move_nodes; 2380 } 2381 2382 /* add new nodes */ 2383 parent = agg_node; 2384 for (i = aggl + 1; i < vsil; i++) { 2385 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, 2386 num_nodes[i], 2387 &first_node_teid, 2388 &num_nodes_added); 2389 if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added) 2390 return ICE_ERR_CFG; 2391 2392 /* The newly added node can be a new parent for the next 2393 * layer nodes 2394 */ 2395 if (num_nodes_added) 2396 parent = ice_sched_find_node_by_teid(tc_node, 2397 first_node_teid); 2398 else 2399 parent = parent->children[0]; 2400 2401 if (!parent) 2402 return ICE_ERR_CFG; 2403 } 2404 2405 move_nodes: 2406 vsi_teid = LE32_TO_CPU(vsi_node->info.node_teid); 2407 return ice_sched_move_nodes(pi, parent, 1, &vsi_teid); 2408 } 2409 2410 /** 2411 * ice_move_all_vsi_to_dflt_agg - move all VSI(s) to default aggregator 2412 * @pi: port information structure 2413 * @agg_info: aggregator info 2414 * @tc: traffic class number 2415 * @rm_vsi_info: true or false 2416 * 2417 * This function move all the VSI(s) to the default aggregator and delete 2418 * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The 2419 * caller holds the scheduler lock. 2420 */ 2421 static enum ice_status 2422 ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi, 2423 struct ice_sched_agg_info *agg_info, u8 tc, 2424 bool rm_vsi_info) 2425 { 2426 struct ice_sched_agg_vsi_info *agg_vsi_info; 2427 struct ice_sched_agg_vsi_info *tmp; 2428 enum ice_status status = ICE_SUCCESS; 2429 2430 LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, tmp, &agg_info->agg_vsi_list, 2431 ice_sched_agg_vsi_info, list_entry) { 2432 u16 vsi_handle = agg_vsi_info->vsi_handle; 2433 2434 /* Move VSI to default aggregator */ 2435 if (!ice_is_tc_ena(agg_vsi_info->tc_bitmap[0], tc)) 2436 continue; 2437 2438 status = ice_sched_move_vsi_to_agg(pi, vsi_handle, 2439 ICE_DFLT_AGG_ID, tc); 2440 if (status) 2441 break; 2442 2443 ice_clear_bit(tc, agg_vsi_info->tc_bitmap); 2444 if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) { 2445 LIST_DEL(&agg_vsi_info->list_entry); 2446 ice_free(pi->hw, agg_vsi_info); 2447 } 2448 } 2449 2450 return status; 2451 } 2452 2453 /** 2454 * ice_sched_is_agg_inuse - check whether the aggregator is in use or not 2455 * @pi: port information structure 2456 * @node: node pointer 2457 * 2458 * This function checks whether the aggregator is attached with any VSI or not. 2459 */ 2460 static bool 2461 ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node) 2462 { 2463 u8 vsil, i; 2464 2465 vsil = ice_sched_get_vsi_layer(pi->hw); 2466 if (node->tx_sched_layer < vsil - 1) { 2467 for (i = 0; i < node->num_children; i++) 2468 if (ice_sched_is_agg_inuse(pi, node->children[i])) 2469 return true; 2470 return false; 2471 } else { 2472 return node->num_children ? true : false; 2473 } 2474 } 2475 2476 /** 2477 * ice_sched_rm_agg_cfg - remove the aggregator node 2478 * @pi: port information structure 2479 * @agg_id: aggregator ID 2480 * @tc: TC number 2481 * 2482 * This function removes the aggregator node and intermediate nodes if any 2483 * from the given TC 2484 */ 2485 static enum ice_status 2486 ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) 2487 { 2488 struct ice_sched_node *tc_node, *agg_node; 2489 struct ice_hw *hw = pi->hw; 2490 2491 tc_node = ice_sched_get_tc_node(pi, tc); 2492 if (!tc_node) 2493 return ICE_ERR_CFG; 2494 2495 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 2496 if (!agg_node) 2497 return ICE_ERR_DOES_NOT_EXIST; 2498 2499 /* Can't remove the aggregator node if it has children */ 2500 if (ice_sched_is_agg_inuse(pi, agg_node)) 2501 return ICE_ERR_IN_USE; 2502 2503 /* need to remove the whole subtree if aggregator node is the 2504 * only child. 2505 */ 2506 while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) { 2507 struct ice_sched_node *parent = agg_node->parent; 2508 2509 if (!parent) 2510 return ICE_ERR_CFG; 2511 2512 if (parent->num_children > 1) 2513 break; 2514 2515 agg_node = parent; 2516 } 2517 2518 ice_free_sched_node(pi, agg_node); 2519 return ICE_SUCCESS; 2520 } 2521 2522 /** 2523 * ice_rm_agg_cfg_tc - remove aggregator configuration for TC 2524 * @pi: port information structure 2525 * @agg_info: aggregator ID 2526 * @tc: TC number 2527 * @rm_vsi_info: bool value true or false 2528 * 2529 * This function removes aggregator reference to VSI of given TC. It removes 2530 * the aggregator configuration completely for requested TC. The caller needs 2531 * to hold the scheduler lock. 2532 */ 2533 static enum ice_status 2534 ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info, 2535 u8 tc, bool rm_vsi_info) 2536 { 2537 enum ice_status status = ICE_SUCCESS; 2538 2539 /* If nothing to remove - return success */ 2540 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) 2541 goto exit_rm_agg_cfg_tc; 2542 2543 status = ice_move_all_vsi_to_dflt_agg(pi, agg_info, tc, rm_vsi_info); 2544 if (status) 2545 goto exit_rm_agg_cfg_tc; 2546 2547 /* Delete aggregator node(s) */ 2548 status = ice_sched_rm_agg_cfg(pi, agg_info->agg_id, tc); 2549 if (status) 2550 goto exit_rm_agg_cfg_tc; 2551 2552 ice_clear_bit(tc, agg_info->tc_bitmap); 2553 exit_rm_agg_cfg_tc: 2554 return status; 2555 } 2556 2557 /** 2558 * ice_save_agg_tc_bitmap - save aggregator TC bitmap 2559 * @pi: port information structure 2560 * @agg_id: aggregator ID 2561 * @tc_bitmap: 8 bits TC bitmap 2562 * 2563 * Save aggregator TC bitmap. This function needs to be called with scheduler 2564 * lock held. 2565 */ 2566 static enum ice_status 2567 ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, 2568 ice_bitmap_t *tc_bitmap) 2569 { 2570 struct ice_sched_agg_info *agg_info; 2571 2572 agg_info = ice_get_agg_info(pi->hw, agg_id); 2573 if (!agg_info) 2574 return ICE_ERR_PARAM; 2575 ice_cp_bitmap(agg_info->replay_tc_bitmap, tc_bitmap, 2576 ICE_MAX_TRAFFIC_CLASS); 2577 return ICE_SUCCESS; 2578 } 2579 2580 /** 2581 * ice_sched_add_agg_cfg - create an aggregator node 2582 * @pi: port information structure 2583 * @agg_id: aggregator ID 2584 * @tc: TC number 2585 * 2586 * This function creates an aggregator node and intermediate nodes if required 2587 * for the given TC 2588 */ 2589 static enum ice_status 2590 ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) 2591 { 2592 struct ice_sched_node *parent, *agg_node, *tc_node; 2593 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 2594 enum ice_status status = ICE_SUCCESS; 2595 struct ice_hw *hw = pi->hw; 2596 u32 first_node_teid; 2597 u16 num_nodes_added; 2598 u8 i, aggl; 2599 2600 tc_node = ice_sched_get_tc_node(pi, tc); 2601 if (!tc_node) 2602 return ICE_ERR_CFG; 2603 2604 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 2605 /* Does Agg node already exist ? */ 2606 if (agg_node) 2607 return status; 2608 2609 aggl = ice_sched_get_agg_layer(hw); 2610 2611 /* need one node in Agg layer */ 2612 num_nodes[aggl] = 1; 2613 2614 /* Check whether the intermediate nodes have space to add the 2615 * new aggregator. If they are full, then SW needs to allocate a new 2616 * intermediate node on those layers 2617 */ 2618 for (i = hw->sw_entry_point_layer; i < aggl; i++) { 2619 parent = ice_sched_get_first_node(pi, tc_node, i); 2620 2621 /* scan all the siblings */ 2622 while (parent) { 2623 if (parent->num_children < hw->max_children[i]) 2624 break; 2625 parent = parent->sibling; 2626 } 2627 2628 /* all the nodes are full, reserve one for this layer */ 2629 if (!parent) 2630 num_nodes[i]++; 2631 } 2632 2633 /* add the aggregator node */ 2634 parent = tc_node; 2635 for (i = hw->sw_entry_point_layer; i <= aggl; i++) { 2636 if (!parent) 2637 return ICE_ERR_CFG; 2638 2639 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, 2640 num_nodes[i], 2641 &first_node_teid, 2642 &num_nodes_added); 2643 if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added) 2644 return ICE_ERR_CFG; 2645 2646 /* The newly added node can be a new parent for the next 2647 * layer nodes 2648 */ 2649 if (num_nodes_added) { 2650 parent = ice_sched_find_node_by_teid(tc_node, 2651 first_node_teid); 2652 /* register aggregator ID with the aggregator node */ 2653 if (parent && i == aggl) 2654 parent->agg_id = agg_id; 2655 } else { 2656 parent = parent->children[0]; 2657 } 2658 } 2659 2660 return ICE_SUCCESS; 2661 } 2662 2663 /** 2664 * ice_sched_cfg_agg - configure aggregator node 2665 * @pi: port information structure 2666 * @agg_id: aggregator ID 2667 * @agg_type: aggregator type queue, VSI, or aggregator group 2668 * @tc_bitmap: bits TC bitmap 2669 * 2670 * It registers a unique aggregator node into scheduler services. It 2671 * allows a user to register with a unique ID to track it's resources. 2672 * The aggregator type determines if this is a queue group, VSI group 2673 * or aggregator group. It then creates the aggregator node(s) for requested 2674 * TC(s) or removes an existing aggregator node including its configuration 2675 * if indicated via tc_bitmap. Call ice_rm_agg_cfg to release aggregator 2676 * resources and remove aggregator ID. 2677 * This function needs to be called with scheduler lock held. 2678 */ 2679 static enum ice_status 2680 ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, 2681 enum ice_agg_type agg_type, ice_bitmap_t *tc_bitmap) 2682 { 2683 struct ice_sched_agg_info *agg_info; 2684 enum ice_status status = ICE_SUCCESS; 2685 struct ice_hw *hw = pi->hw; 2686 u8 tc; 2687 2688 agg_info = ice_get_agg_info(hw, agg_id); 2689 if (!agg_info) { 2690 /* Create new entry for new aggregator ID */ 2691 agg_info = (struct ice_sched_agg_info *) 2692 ice_malloc(hw, sizeof(*agg_info)); 2693 if (!agg_info) 2694 return ICE_ERR_NO_MEMORY; 2695 2696 agg_info->agg_id = agg_id; 2697 agg_info->agg_type = agg_type; 2698 agg_info->tc_bitmap[0] = 0; 2699 2700 /* Initialize the aggregator VSI list head */ 2701 INIT_LIST_HEAD(&agg_info->agg_vsi_list); 2702 2703 /* Add new entry in aggregator list */ 2704 LIST_ADD(&agg_info->list_entry, &hw->agg_list); 2705 } 2706 /* Create aggregator node(s) for requested TC(s) */ 2707 ice_for_each_traffic_class(tc) { 2708 if (!ice_is_tc_ena(*tc_bitmap, tc)) { 2709 /* Delete aggregator cfg TC if it exists previously */ 2710 status = ice_rm_agg_cfg_tc(pi, agg_info, tc, false); 2711 if (status) 2712 break; 2713 continue; 2714 } 2715 2716 /* Check if aggregator node for TC already exists */ 2717 if (ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) 2718 continue; 2719 2720 /* Create new aggregator node for TC */ 2721 status = ice_sched_add_agg_cfg(pi, agg_id, tc); 2722 if (status) 2723 break; 2724 2725 /* Save aggregator node's TC information */ 2726 ice_set_bit(tc, agg_info->tc_bitmap); 2727 } 2728 2729 return status; 2730 } 2731 2732 /** 2733 * ice_cfg_agg - config aggregator node 2734 * @pi: port information structure 2735 * @agg_id: aggregator ID 2736 * @agg_type: aggregator type queue, VSI, or aggregator group 2737 * @tc_bitmap: bits TC bitmap 2738 * 2739 * This function configures aggregator node(s). 2740 */ 2741 enum ice_status 2742 ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, 2743 u8 tc_bitmap) 2744 { 2745 ice_bitmap_t bitmap = tc_bitmap; 2746 enum ice_status status; 2747 2748 ice_acquire_lock(&pi->sched_lock); 2749 status = ice_sched_cfg_agg(pi, agg_id, agg_type, 2750 (ice_bitmap_t *)&bitmap); 2751 if (!status) 2752 status = ice_save_agg_tc_bitmap(pi, agg_id, 2753 (ice_bitmap_t *)&bitmap); 2754 ice_release_lock(&pi->sched_lock); 2755 return status; 2756 } 2757 2758 /** 2759 * ice_get_agg_vsi_info - get the aggregator ID 2760 * @agg_info: aggregator info 2761 * @vsi_handle: software VSI handle 2762 * 2763 * The function returns aggregator VSI info based on VSI handle. This function 2764 * needs to be called with scheduler lock held. 2765 */ 2766 static struct ice_sched_agg_vsi_info * 2767 ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle) 2768 { 2769 struct ice_sched_agg_vsi_info *agg_vsi_info; 2770 2771 LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list, 2772 ice_sched_agg_vsi_info, list_entry) 2773 if (agg_vsi_info->vsi_handle == vsi_handle) 2774 return agg_vsi_info; 2775 2776 return NULL; 2777 } 2778 2779 /** 2780 * ice_get_vsi_agg_info - get the aggregator info of VSI 2781 * @hw: pointer to the hardware structure 2782 * @vsi_handle: Sw VSI handle 2783 * 2784 * The function returns aggregator info of VSI represented via vsi_handle. The 2785 * VSI has in this case a different aggregator than the default one. This 2786 * function needs to be called with scheduler lock held. 2787 */ 2788 static struct ice_sched_agg_info * 2789 ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle) 2790 { 2791 struct ice_sched_agg_info *agg_info; 2792 2793 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 2794 list_entry) { 2795 struct ice_sched_agg_vsi_info *agg_vsi_info; 2796 2797 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); 2798 if (agg_vsi_info) 2799 return agg_info; 2800 } 2801 return NULL; 2802 } 2803 2804 /** 2805 * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap 2806 * @pi: port information structure 2807 * @agg_id: aggregator ID 2808 * @vsi_handle: software VSI handle 2809 * @tc_bitmap: TC bitmap of enabled TC(s) 2810 * 2811 * Save VSI to aggregator TC bitmap. This function needs to call with scheduler 2812 * lock held. 2813 */ 2814 static enum ice_status 2815 ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, 2816 ice_bitmap_t *tc_bitmap) 2817 { 2818 struct ice_sched_agg_vsi_info *agg_vsi_info; 2819 struct ice_sched_agg_info *agg_info; 2820 2821 agg_info = ice_get_agg_info(pi->hw, agg_id); 2822 if (!agg_info) 2823 return ICE_ERR_PARAM; 2824 /* check if entry already exist */ 2825 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); 2826 if (!agg_vsi_info) 2827 return ICE_ERR_PARAM; 2828 ice_cp_bitmap(agg_vsi_info->replay_tc_bitmap, tc_bitmap, 2829 ICE_MAX_TRAFFIC_CLASS); 2830 return ICE_SUCCESS; 2831 } 2832 2833 /** 2834 * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator 2835 * @pi: port information structure 2836 * @agg_id: aggregator ID 2837 * @vsi_handle: software VSI handle 2838 * @tc_bitmap: TC bitmap of enabled TC(s) 2839 * 2840 * This function moves VSI to a new or default aggregator node. If VSI is 2841 * already associated to the aggregator node then no operation is performed on 2842 * the tree. This function needs to be called with scheduler lock held. 2843 */ 2844 static enum ice_status 2845 ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, 2846 u16 vsi_handle, ice_bitmap_t *tc_bitmap) 2847 { 2848 struct ice_sched_agg_vsi_info *agg_vsi_info; 2849 struct ice_sched_agg_info *agg_info; 2850 enum ice_status status = ICE_SUCCESS; 2851 struct ice_hw *hw = pi->hw; 2852 u8 tc; 2853 2854 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 2855 return ICE_ERR_PARAM; 2856 agg_info = ice_get_agg_info(hw, agg_id); 2857 if (!agg_info) 2858 return ICE_ERR_PARAM; 2859 /* check if entry already exist */ 2860 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); 2861 if (!agg_vsi_info) { 2862 /* Create new entry for VSI under aggregator list */ 2863 agg_vsi_info = (struct ice_sched_agg_vsi_info *) 2864 ice_malloc(hw, sizeof(*agg_vsi_info)); 2865 if (!agg_vsi_info) 2866 return ICE_ERR_PARAM; 2867 2868 /* add VSI ID into the aggregator list */ 2869 agg_vsi_info->vsi_handle = vsi_handle; 2870 LIST_ADD(&agg_vsi_info->list_entry, &agg_info->agg_vsi_list); 2871 } 2872 /* Move VSI node to new aggregator node for requested TC(s) */ 2873 ice_for_each_traffic_class(tc) { 2874 if (!ice_is_tc_ena(*tc_bitmap, tc)) 2875 continue; 2876 2877 /* Move VSI to new aggregator */ 2878 status = ice_sched_move_vsi_to_agg(pi, vsi_handle, agg_id, tc); 2879 if (status) 2880 break; 2881 2882 ice_set_bit(tc, agg_vsi_info->tc_bitmap); 2883 } 2884 return status; 2885 } 2886 2887 /** 2888 * ice_sched_rm_unused_rl_prof - remove unused RL profile 2889 * @hw: pointer to the hardware structure 2890 * 2891 * This function removes unused rate limit profiles from the HW and 2892 * SW DB. The caller needs to hold scheduler lock. 2893 */ 2894 static void ice_sched_rm_unused_rl_prof(struct ice_hw *hw) 2895 { 2896 u16 ln; 2897 2898 for (ln = 0; ln < hw->num_tx_sched_layers; ln++) { 2899 struct ice_aqc_rl_profile_info *rl_prof_elem; 2900 struct ice_aqc_rl_profile_info *rl_prof_tmp; 2901 2902 LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp, 2903 &hw->rl_prof_list[ln], 2904 ice_aqc_rl_profile_info, list_entry) { 2905 if (!ice_sched_del_rl_profile(hw, rl_prof_elem)) 2906 ice_debug(hw, ICE_DBG_SCHED, "Removed rl profile\n"); 2907 } 2908 } 2909 } 2910 2911 /** 2912 * ice_sched_update_elem - update element 2913 * @hw: pointer to the HW struct 2914 * @node: pointer to node 2915 * @info: node info to update 2916 * 2917 * Update the HW DB, and local SW DB of node. Update the scheduling 2918 * parameters of node from argument info data buffer (Info->data buf) and 2919 * returns success or error on config sched element failure. The caller 2920 * needs to hold scheduler lock. 2921 */ 2922 static enum ice_status 2923 ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, 2924 struct ice_aqc_txsched_elem_data *info) 2925 { 2926 struct ice_aqc_txsched_elem_data buf; 2927 enum ice_status status; 2928 u16 elem_cfgd = 0; 2929 u16 num_elems = 1; 2930 2931 buf = *info; 2932 /* Parent TEID is reserved field in this aq call */ 2933 buf.parent_teid = 0; 2934 /* Element type is reserved field in this aq call */ 2935 buf.data.elem_type = 0; 2936 /* Flags is reserved field in this aq call */ 2937 buf.data.flags = 0; 2938 2939 /* Update HW DB */ 2940 /* Configure element node */ 2941 status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf), 2942 &elem_cfgd, NULL); 2943 if (status || elem_cfgd != num_elems) { 2944 ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n"); 2945 return ICE_ERR_CFG; 2946 } 2947 2948 /* Config success case */ 2949 /* Now update local SW DB */ 2950 /* Only copy the data portion of info buffer */ 2951 node->info.data = info->data; 2952 return status; 2953 } 2954 2955 /** 2956 * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params 2957 * @hw: pointer to the HW struct 2958 * @node: sched node to configure 2959 * @rl_type: rate limit type CIR, EIR, or shared 2960 * @bw_alloc: BW weight/allocation 2961 * 2962 * This function configures node element's BW allocation. 2963 */ 2964 static enum ice_status 2965 ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, 2966 enum ice_rl_type rl_type, u16 bw_alloc) 2967 { 2968 struct ice_aqc_txsched_elem_data buf; 2969 struct ice_aqc_txsched_elem *data; 2970 enum ice_status status; 2971 2972 buf = node->info; 2973 data = &buf.data; 2974 if (rl_type == ICE_MIN_BW) { 2975 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; 2976 data->cir_bw.bw_alloc = CPU_TO_LE16(bw_alloc); 2977 } else if (rl_type == ICE_MAX_BW) { 2978 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; 2979 data->eir_bw.bw_alloc = CPU_TO_LE16(bw_alloc); 2980 } else { 2981 return ICE_ERR_PARAM; 2982 } 2983 2984 /* Configure element */ 2985 status = ice_sched_update_elem(hw, node, &buf); 2986 return status; 2987 } 2988 2989 /** 2990 * ice_move_vsi_to_agg - moves VSI to new or default aggregator 2991 * @pi: port information structure 2992 * @agg_id: aggregator ID 2993 * @vsi_handle: software VSI handle 2994 * @tc_bitmap: TC bitmap of enabled TC(s) 2995 * 2996 * Move or associate VSI to a new or default aggregator node. 2997 */ 2998 enum ice_status 2999 ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, 3000 u8 tc_bitmap) 3001 { 3002 ice_bitmap_t bitmap = tc_bitmap; 3003 enum ice_status status; 3004 3005 ice_acquire_lock(&pi->sched_lock); 3006 status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle, 3007 (ice_bitmap_t *)&bitmap); 3008 if (!status) 3009 status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle, 3010 (ice_bitmap_t *)&bitmap); 3011 ice_release_lock(&pi->sched_lock); 3012 return status; 3013 } 3014 3015 /** 3016 * ice_rm_agg_cfg - remove aggregator configuration 3017 * @pi: port information structure 3018 * @agg_id: aggregator ID 3019 * 3020 * This function removes aggregator reference to VSI and delete aggregator ID 3021 * info. It removes the aggregator configuration completely. 3022 */ 3023 enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id) 3024 { 3025 struct ice_sched_agg_info *agg_info; 3026 enum ice_status status = ICE_SUCCESS; 3027 u8 tc; 3028 3029 ice_acquire_lock(&pi->sched_lock); 3030 agg_info = ice_get_agg_info(pi->hw, agg_id); 3031 if (!agg_info) { 3032 status = ICE_ERR_DOES_NOT_EXIST; 3033 goto exit_ice_rm_agg_cfg; 3034 } 3035 3036 ice_for_each_traffic_class(tc) { 3037 status = ice_rm_agg_cfg_tc(pi, agg_info, tc, true); 3038 if (status) 3039 goto exit_ice_rm_agg_cfg; 3040 } 3041 3042 if (ice_is_any_bit_set(agg_info->tc_bitmap, ICE_MAX_TRAFFIC_CLASS)) { 3043 status = ICE_ERR_IN_USE; 3044 goto exit_ice_rm_agg_cfg; 3045 } 3046 3047 /* Safe to delete entry now */ 3048 LIST_DEL(&agg_info->list_entry); 3049 ice_free(pi->hw, agg_info); 3050 3051 /* Remove unused RL profile IDs from HW and SW DB */ 3052 ice_sched_rm_unused_rl_prof(pi->hw); 3053 3054 exit_ice_rm_agg_cfg: 3055 ice_release_lock(&pi->sched_lock); 3056 return status; 3057 } 3058 3059 /** 3060 * ice_set_clear_cir_bw_alloc - set or clear CIR BW alloc information 3061 * @bw_t_info: bandwidth type information structure 3062 * @bw_alloc: Bandwidth allocation information 3063 * 3064 * Save or clear CIR BW alloc information (bw_alloc) in the passed param 3065 * bw_t_info. 3066 */ 3067 static void 3068 ice_set_clear_cir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc) 3069 { 3070 bw_t_info->cir_bw.bw_alloc = bw_alloc; 3071 if (bw_t_info->cir_bw.bw_alloc) 3072 ice_set_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap); 3073 else 3074 ice_clear_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap); 3075 } 3076 3077 /** 3078 * ice_set_clear_eir_bw_alloc - set or clear EIR BW alloc information 3079 * @bw_t_info: bandwidth type information structure 3080 * @bw_alloc: Bandwidth allocation information 3081 * 3082 * Save or clear EIR BW alloc information (bw_alloc) in the passed param 3083 * bw_t_info. 3084 */ 3085 static void 3086 ice_set_clear_eir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc) 3087 { 3088 bw_t_info->eir_bw.bw_alloc = bw_alloc; 3089 if (bw_t_info->eir_bw.bw_alloc) 3090 ice_set_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap); 3091 else 3092 ice_clear_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap); 3093 } 3094 3095 /** 3096 * ice_sched_save_vsi_bw_alloc - save VSI node's BW alloc information 3097 * @pi: port information structure 3098 * @vsi_handle: sw VSI handle 3099 * @tc: traffic class 3100 * @rl_type: rate limit type min or max 3101 * @bw_alloc: Bandwidth allocation information 3102 * 3103 * Save BW alloc information of VSI type node for post replay use. 3104 */ 3105 static enum ice_status 3106 ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3107 enum ice_rl_type rl_type, u16 bw_alloc) 3108 { 3109 struct ice_vsi_ctx *vsi_ctx; 3110 3111 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 3112 return ICE_ERR_PARAM; 3113 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 3114 if (!vsi_ctx) 3115 return ICE_ERR_PARAM; 3116 switch (rl_type) { 3117 case ICE_MIN_BW: 3118 ice_set_clear_cir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc], 3119 bw_alloc); 3120 break; 3121 case ICE_MAX_BW: 3122 ice_set_clear_eir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc], 3123 bw_alloc); 3124 break; 3125 default: 3126 return ICE_ERR_PARAM; 3127 } 3128 return ICE_SUCCESS; 3129 } 3130 3131 /** 3132 * ice_set_clear_cir_bw - set or clear CIR BW 3133 * @bw_t_info: bandwidth type information structure 3134 * @bw: bandwidth in Kbps - Kilo bits per sec 3135 * 3136 * Save or clear CIR bandwidth (BW) in the passed param bw_t_info. 3137 */ 3138 static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) 3139 { 3140 if (bw == ICE_SCHED_DFLT_BW) { 3141 ice_clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); 3142 bw_t_info->cir_bw.bw = 0; 3143 } else { 3144 /* Save type of BW information */ 3145 ice_set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); 3146 bw_t_info->cir_bw.bw = bw; 3147 } 3148 } 3149 3150 /** 3151 * ice_set_clear_eir_bw - set or clear EIR BW 3152 * @bw_t_info: bandwidth type information structure 3153 * @bw: bandwidth in Kbps - Kilo bits per sec 3154 * 3155 * Save or clear EIR bandwidth (BW) in the passed param bw_t_info. 3156 */ 3157 static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) 3158 { 3159 if (bw == ICE_SCHED_DFLT_BW) { 3160 ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); 3161 bw_t_info->eir_bw.bw = 0; 3162 } else { 3163 /* save EIR BW information */ 3164 ice_set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); 3165 bw_t_info->eir_bw.bw = bw; 3166 } 3167 } 3168 3169 /** 3170 * ice_set_clear_shared_bw - set or clear shared BW 3171 * @bw_t_info: bandwidth type information structure 3172 * @bw: bandwidth in Kbps - Kilo bits per sec 3173 * 3174 * Save or clear shared bandwidth (BW) in the passed param bw_t_info. 3175 */ 3176 static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw) 3177 { 3178 if (bw == ICE_SCHED_DFLT_BW) { 3179 ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); 3180 bw_t_info->shared_bw = 0; 3181 } else { 3182 /* save shared BW information */ 3183 ice_set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); 3184 bw_t_info->shared_bw = bw; 3185 } 3186 } 3187 3188 /** 3189 * ice_sched_save_vsi_bw - save VSI node's BW information 3190 * @pi: port information structure 3191 * @vsi_handle: sw VSI handle 3192 * @tc: traffic class 3193 * @rl_type: rate limit type min, max, or shared 3194 * @bw: bandwidth in Kbps - Kilo bits per sec 3195 * 3196 * Save BW information of VSI type node for post replay use. 3197 */ 3198 static enum ice_status 3199 ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3200 enum ice_rl_type rl_type, u32 bw) 3201 { 3202 struct ice_vsi_ctx *vsi_ctx; 3203 3204 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 3205 return ICE_ERR_PARAM; 3206 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 3207 if (!vsi_ctx) 3208 return ICE_ERR_PARAM; 3209 switch (rl_type) { 3210 case ICE_MIN_BW: 3211 ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw); 3212 break; 3213 case ICE_MAX_BW: 3214 ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw); 3215 break; 3216 case ICE_SHARED_BW: 3217 ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw); 3218 break; 3219 default: 3220 return ICE_ERR_PARAM; 3221 } 3222 return ICE_SUCCESS; 3223 } 3224 3225 /** 3226 * ice_set_clear_prio - set or clear priority information 3227 * @bw_t_info: bandwidth type information structure 3228 * @prio: priority to save 3229 * 3230 * Save or clear priority (prio) in the passed param bw_t_info. 3231 */ 3232 static void ice_set_clear_prio(struct ice_bw_type_info *bw_t_info, u8 prio) 3233 { 3234 bw_t_info->generic = prio; 3235 if (bw_t_info->generic) 3236 ice_set_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap); 3237 else 3238 ice_clear_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap); 3239 } 3240 3241 /** 3242 * ice_sched_save_vsi_prio - save VSI node's priority information 3243 * @pi: port information structure 3244 * @vsi_handle: Software VSI handle 3245 * @tc: traffic class 3246 * @prio: priority to save 3247 * 3248 * Save priority information of VSI type node for post replay use. 3249 */ 3250 static enum ice_status 3251 ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3252 u8 prio) 3253 { 3254 struct ice_vsi_ctx *vsi_ctx; 3255 3256 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 3257 return ICE_ERR_PARAM; 3258 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 3259 if (!vsi_ctx) 3260 return ICE_ERR_PARAM; 3261 if (tc >= ICE_MAX_TRAFFIC_CLASS) 3262 return ICE_ERR_PARAM; 3263 ice_set_clear_prio(&vsi_ctx->sched.bw_t_info[tc], prio); 3264 return ICE_SUCCESS; 3265 } 3266 3267 /** 3268 * ice_sched_save_agg_bw_alloc - save aggregator node's BW alloc information 3269 * @pi: port information structure 3270 * @agg_id: node aggregator ID 3271 * @tc: traffic class 3272 * @rl_type: rate limit type min or max 3273 * @bw_alloc: bandwidth alloc information 3274 * 3275 * Save BW alloc information of AGG type node for post replay use. 3276 */ 3277 static enum ice_status 3278 ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc, 3279 enum ice_rl_type rl_type, u16 bw_alloc) 3280 { 3281 struct ice_sched_agg_info *agg_info; 3282 3283 agg_info = ice_get_agg_info(pi->hw, agg_id); 3284 if (!agg_info) 3285 return ICE_ERR_PARAM; 3286 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) 3287 return ICE_ERR_PARAM; 3288 switch (rl_type) { 3289 case ICE_MIN_BW: 3290 ice_set_clear_cir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc); 3291 break; 3292 case ICE_MAX_BW: 3293 ice_set_clear_eir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc); 3294 break; 3295 default: 3296 return ICE_ERR_PARAM; 3297 } 3298 return ICE_SUCCESS; 3299 } 3300 3301 /** 3302 * ice_sched_save_agg_bw - save aggregator node's BW information 3303 * @pi: port information structure 3304 * @agg_id: node aggregator ID 3305 * @tc: traffic class 3306 * @rl_type: rate limit type min, max, or shared 3307 * @bw: bandwidth in Kbps - Kilo bits per sec 3308 * 3309 * Save BW information of AGG type node for post replay use. 3310 */ 3311 static enum ice_status 3312 ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, 3313 enum ice_rl_type rl_type, u32 bw) 3314 { 3315 struct ice_sched_agg_info *agg_info; 3316 3317 agg_info = ice_get_agg_info(pi->hw, agg_id); 3318 if (!agg_info) 3319 return ICE_ERR_PARAM; 3320 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) 3321 return ICE_ERR_PARAM; 3322 switch (rl_type) { 3323 case ICE_MIN_BW: 3324 ice_set_clear_cir_bw(&agg_info->bw_t_info[tc], bw); 3325 break; 3326 case ICE_MAX_BW: 3327 ice_set_clear_eir_bw(&agg_info->bw_t_info[tc], bw); 3328 break; 3329 case ICE_SHARED_BW: 3330 ice_set_clear_shared_bw(&agg_info->bw_t_info[tc], bw); 3331 break; 3332 default: 3333 return ICE_ERR_PARAM; 3334 } 3335 return ICE_SUCCESS; 3336 } 3337 3338 /** 3339 * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC 3340 * @pi: port information structure 3341 * @vsi_handle: software VSI handle 3342 * @tc: traffic class 3343 * @rl_type: min or max 3344 * @bw: bandwidth in Kbps 3345 * 3346 * This function configures BW limit of VSI scheduling node based on TC 3347 * information. 3348 */ 3349 enum ice_status 3350 ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3351 enum ice_rl_type rl_type, u32 bw) 3352 { 3353 enum ice_status status; 3354 3355 status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle, 3356 ICE_AGG_TYPE_VSI, 3357 tc, rl_type, bw); 3358 if (!status) { 3359 ice_acquire_lock(&pi->sched_lock); 3360 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw); 3361 ice_release_lock(&pi->sched_lock); 3362 } 3363 return status; 3364 } 3365 3366 /** 3367 * ice_cfg_dflt_vsi_bw_lmt_per_tc - configure default VSI BW limit per TC 3368 * @pi: port information structure 3369 * @vsi_handle: software VSI handle 3370 * @tc: traffic class 3371 * @rl_type: min or max 3372 * 3373 * This function configures default BW limit of VSI scheduling node based on TC 3374 * information. 3375 */ 3376 enum ice_status 3377 ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3378 enum ice_rl_type rl_type) 3379 { 3380 enum ice_status status; 3381 3382 status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle, 3383 ICE_AGG_TYPE_VSI, 3384 tc, rl_type, 3385 ICE_SCHED_DFLT_BW); 3386 if (!status) { 3387 ice_acquire_lock(&pi->sched_lock); 3388 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, 3389 ICE_SCHED_DFLT_BW); 3390 ice_release_lock(&pi->sched_lock); 3391 } 3392 return status; 3393 } 3394 3395 /** 3396 * ice_cfg_agg_bw_lmt_per_tc - configure aggregator BW limit per TC 3397 * @pi: port information structure 3398 * @agg_id: aggregator ID 3399 * @tc: traffic class 3400 * @rl_type: min or max 3401 * @bw: bandwidth in Kbps 3402 * 3403 * This function applies BW limit to aggregator scheduling node based on TC 3404 * information. 3405 */ 3406 enum ice_status 3407 ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, 3408 enum ice_rl_type rl_type, u32 bw) 3409 { 3410 enum ice_status status; 3411 3412 status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG, 3413 tc, rl_type, bw); 3414 if (!status) { 3415 ice_acquire_lock(&pi->sched_lock); 3416 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw); 3417 ice_release_lock(&pi->sched_lock); 3418 } 3419 return status; 3420 } 3421 3422 /** 3423 * ice_cfg_agg_bw_dflt_lmt_per_tc - configure aggregator BW default limit per TC 3424 * @pi: port information structure 3425 * @agg_id: aggregator ID 3426 * @tc: traffic class 3427 * @rl_type: min or max 3428 * 3429 * This function applies default BW limit to aggregator scheduling node based 3430 * on TC information. 3431 */ 3432 enum ice_status 3433 ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, 3434 enum ice_rl_type rl_type) 3435 { 3436 enum ice_status status; 3437 3438 status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG, 3439 tc, rl_type, 3440 ICE_SCHED_DFLT_BW); 3441 if (!status) { 3442 ice_acquire_lock(&pi->sched_lock); 3443 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, 3444 ICE_SCHED_DFLT_BW); 3445 ice_release_lock(&pi->sched_lock); 3446 } 3447 return status; 3448 } 3449 3450 /** 3451 * ice_cfg_vsi_bw_shared_lmt - configure VSI BW shared limit 3452 * @pi: port information structure 3453 * @vsi_handle: software VSI handle 3454 * @min_bw: minimum bandwidth in Kbps 3455 * @max_bw: maximum bandwidth in Kbps 3456 * @shared_bw: shared bandwidth in Kbps 3457 * 3458 * Configure shared rate limiter(SRL) of all VSI type nodes across all traffic 3459 * classes for VSI matching handle. 3460 */ 3461 enum ice_status 3462 ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw, 3463 u32 max_bw, u32 shared_bw) 3464 { 3465 return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, min_bw, max_bw, 3466 shared_bw); 3467 } 3468 3469 /** 3470 * ice_cfg_vsi_bw_no_shared_lmt - configure VSI BW for no shared limiter 3471 * @pi: port information structure 3472 * @vsi_handle: software VSI handle 3473 * 3474 * This function removes the shared rate limiter(SRL) of all VSI type nodes 3475 * across all traffic classes for VSI matching handle. 3476 */ 3477 enum ice_status 3478 ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle) 3479 { 3480 return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, 3481 ICE_SCHED_DFLT_BW, 3482 ICE_SCHED_DFLT_BW, 3483 ICE_SCHED_DFLT_BW); 3484 } 3485 3486 /** 3487 * ice_cfg_agg_bw_shared_lmt - configure aggregator BW shared limit 3488 * @pi: port information structure 3489 * @agg_id: aggregator ID 3490 * @min_bw: minimum bandwidth in Kbps 3491 * @max_bw: maximum bandwidth in Kbps 3492 * @shared_bw: shared bandwidth in Kbps 3493 * 3494 * This function configures the shared rate limiter(SRL) of all aggregator type 3495 * nodes across all traffic classes for aggregator matching agg_id. 3496 */ 3497 enum ice_status 3498 ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw, 3499 u32 max_bw, u32 shared_bw) 3500 { 3501 return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, min_bw, max_bw, 3502 shared_bw); 3503 } 3504 3505 /** 3506 * ice_cfg_agg_bw_no_shared_lmt - configure aggregator BW for no shared limiter 3507 * @pi: port information structure 3508 * @agg_id: aggregator ID 3509 * 3510 * This function removes the shared rate limiter(SRL) of all aggregator type 3511 * nodes across all traffic classes for aggregator matching agg_id. 3512 */ 3513 enum ice_status 3514 ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id) 3515 { 3516 return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, ICE_SCHED_DFLT_BW, 3517 ICE_SCHED_DFLT_BW, 3518 ICE_SCHED_DFLT_BW); 3519 } 3520 3521 /** 3522 * ice_cfg_agg_bw_shared_lmt_per_tc - configure aggregator BW shared limit per tc 3523 * @pi: port information structure 3524 * @agg_id: aggregator ID 3525 * @tc: traffic class 3526 * @min_bw: minimum bandwidth in Kbps 3527 * @max_bw: maximum bandwidth in Kbps 3528 * @shared_bw: shared bandwidth in Kbps 3529 * 3530 * This function configures the shared rate limiter(SRL) of all aggregator type 3531 * nodes across all traffic classes for aggregator matching agg_id. 3532 */ 3533 enum ice_status 3534 ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, 3535 u32 min_bw, u32 max_bw, u32 shared_bw) 3536 { 3537 return ice_sched_set_agg_bw_shared_lmt_per_tc(pi, agg_id, tc, min_bw, 3538 max_bw, shared_bw); 3539 } 3540 3541 /** 3542 * ice_cfg_agg_bw_shared_lmt_per_tc - configure aggregator BW shared limit per tc 3543 * @pi: port information structure 3544 * @agg_id: aggregator ID 3545 * @tc: traffic class 3546 * 3547 * This function configures the shared rate limiter(SRL) of all aggregator type 3548 * nodes across all traffic classes for aggregator matching agg_id. 3549 */ 3550 enum ice_status 3551 ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc) 3552 { 3553 return ice_sched_set_agg_bw_shared_lmt_per_tc(pi, agg_id, tc, 3554 ICE_SCHED_DFLT_BW, 3555 ICE_SCHED_DFLT_BW, 3556 ICE_SCHED_DFLT_BW); 3557 } 3558 3559 /** 3560 * ice_config_vsi_queue_priority - config VSI queue priority of node 3561 * @pi: port information structure 3562 * @num_qs: number of VSI queues 3563 * @q_ids: queue IDs array 3564 * @q_prio: queue priority array 3565 * 3566 * This function configures the queue node priority (Sibling Priority) of the 3567 * passed in VSI's queue(s) for a given traffic class (TC). 3568 */ 3569 enum ice_status 3570 ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids, 3571 u8 *q_prio) 3572 { 3573 enum ice_status status = ICE_ERR_PARAM; 3574 u16 i; 3575 3576 ice_acquire_lock(&pi->sched_lock); 3577 3578 for (i = 0; i < num_qs; i++) { 3579 struct ice_sched_node *node; 3580 3581 node = ice_sched_find_node_by_teid(pi->root, q_ids[i]); 3582 if (!node || node->info.data.elem_type != 3583 ICE_AQC_ELEM_TYPE_LEAF) { 3584 status = ICE_ERR_PARAM; 3585 break; 3586 } 3587 /* Configure Priority */ 3588 status = ice_sched_cfg_sibl_node_prio(pi, node, q_prio[i]); 3589 if (status) 3590 break; 3591 } 3592 3593 ice_release_lock(&pi->sched_lock); 3594 return status; 3595 } 3596 3597 /** 3598 * ice_cfg_agg_vsi_priority_per_tc - config aggregator's VSI priority per TC 3599 * @pi: port information structure 3600 * @agg_id: Aggregator ID 3601 * @num_vsis: number of VSI(s) 3602 * @vsi_handle_arr: array of software VSI handles 3603 * @node_prio: pointer to node priority 3604 * @tc: traffic class 3605 * 3606 * This function configures the node priority (Sibling Priority) of the 3607 * passed in VSI's for a given traffic class (TC) of an Aggregator ID. 3608 */ 3609 enum ice_status 3610 ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id, 3611 u16 num_vsis, u16 *vsi_handle_arr, 3612 u8 *node_prio, u8 tc) 3613 { 3614 struct ice_sched_agg_vsi_info *agg_vsi_info; 3615 struct ice_sched_node *tc_node, *agg_node; 3616 enum ice_status status = ICE_ERR_PARAM; 3617 struct ice_sched_agg_info *agg_info; 3618 bool agg_id_present = false; 3619 struct ice_hw *hw = pi->hw; 3620 u16 i; 3621 3622 ice_acquire_lock(&pi->sched_lock); 3623 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 3624 list_entry) 3625 if (agg_info->agg_id == agg_id) { 3626 agg_id_present = true; 3627 break; 3628 } 3629 if (!agg_id_present) 3630 goto exit_agg_priority_per_tc; 3631 3632 tc_node = ice_sched_get_tc_node(pi, tc); 3633 if (!tc_node) 3634 goto exit_agg_priority_per_tc; 3635 3636 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 3637 if (!agg_node) 3638 goto exit_agg_priority_per_tc; 3639 3640 if (num_vsis > hw->max_children[agg_node->tx_sched_layer]) 3641 goto exit_agg_priority_per_tc; 3642 3643 for (i = 0; i < num_vsis; i++) { 3644 struct ice_sched_node *vsi_node; 3645 bool vsi_handle_valid = false; 3646 u16 vsi_handle; 3647 3648 status = ICE_ERR_PARAM; 3649 vsi_handle = vsi_handle_arr[i]; 3650 if (!ice_is_vsi_valid(hw, vsi_handle)) 3651 goto exit_agg_priority_per_tc; 3652 /* Verify child nodes before applying settings */ 3653 LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list, 3654 ice_sched_agg_vsi_info, list_entry) 3655 if (agg_vsi_info->vsi_handle == vsi_handle) { 3656 /* cppcheck-suppress unreadVariable */ 3657 vsi_handle_valid = true; 3658 break; 3659 } 3660 3661 if (!vsi_handle_valid) 3662 goto exit_agg_priority_per_tc; 3663 3664 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 3665 if (!vsi_node) 3666 goto exit_agg_priority_per_tc; 3667 3668 if (ice_sched_find_node_in_subtree(hw, agg_node, vsi_node)) { 3669 /* Configure Priority */ 3670 status = ice_sched_cfg_sibl_node_prio(pi, vsi_node, 3671 node_prio[i]); 3672 if (status) 3673 break; 3674 status = ice_sched_save_vsi_prio(pi, vsi_handle, tc, 3675 node_prio[i]); 3676 if (status) 3677 break; 3678 } 3679 } 3680 3681 exit_agg_priority_per_tc: 3682 ice_release_lock(&pi->sched_lock); 3683 return status; 3684 } 3685 3686 /** 3687 * ice_cfg_vsi_bw_alloc - config VSI BW alloc per TC 3688 * @pi: port information structure 3689 * @vsi_handle: software VSI handle 3690 * @ena_tcmap: enabled TC map 3691 * @rl_type: Rate limit type CIR/EIR 3692 * @bw_alloc: Array of BW alloc 3693 * 3694 * This function configures the BW allocation of the passed in VSI's 3695 * node(s) for enabled traffic class. 3696 */ 3697 enum ice_status 3698 ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap, 3699 enum ice_rl_type rl_type, u8 *bw_alloc) 3700 { 3701 enum ice_status status = ICE_SUCCESS; 3702 u8 tc; 3703 3704 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 3705 return ICE_ERR_PARAM; 3706 3707 ice_acquire_lock(&pi->sched_lock); 3708 3709 /* Return success if no nodes are present across TC */ 3710 ice_for_each_traffic_class(tc) { 3711 struct ice_sched_node *tc_node, *vsi_node; 3712 3713 if (!ice_is_tc_ena(ena_tcmap, tc)) 3714 continue; 3715 3716 tc_node = ice_sched_get_tc_node(pi, tc); 3717 if (!tc_node) 3718 continue; 3719 3720 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 3721 if (!vsi_node) 3722 continue; 3723 3724 status = ice_sched_cfg_node_bw_alloc(pi->hw, vsi_node, rl_type, 3725 bw_alloc[tc]); 3726 if (status) 3727 break; 3728 status = ice_sched_save_vsi_bw_alloc(pi, vsi_handle, tc, 3729 rl_type, bw_alloc[tc]); 3730 if (status) 3731 break; 3732 } 3733 3734 ice_release_lock(&pi->sched_lock); 3735 return status; 3736 } 3737 3738 /** 3739 * ice_cfg_agg_bw_alloc - config aggregator BW alloc 3740 * @pi: port information structure 3741 * @agg_id: aggregator ID 3742 * @ena_tcmap: enabled TC map 3743 * @rl_type: rate limit type CIR/EIR 3744 * @bw_alloc: array of BW alloc 3745 * 3746 * This function configures the BW allocation of passed in aggregator for 3747 * enabled traffic class(s). 3748 */ 3749 enum ice_status 3750 ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap, 3751 enum ice_rl_type rl_type, u8 *bw_alloc) 3752 { 3753 struct ice_sched_agg_info *agg_info; 3754 bool agg_id_present = false; 3755 enum ice_status status = ICE_SUCCESS; 3756 struct ice_hw *hw = pi->hw; 3757 u8 tc; 3758 3759 ice_acquire_lock(&pi->sched_lock); 3760 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 3761 list_entry) 3762 if (agg_info->agg_id == agg_id) { 3763 agg_id_present = true; 3764 break; 3765 } 3766 if (!agg_id_present) { 3767 status = ICE_ERR_PARAM; 3768 goto exit_cfg_agg_bw_alloc; 3769 } 3770 3771 /* Return success if no nodes are present across TC */ 3772 ice_for_each_traffic_class(tc) { 3773 struct ice_sched_node *tc_node, *agg_node; 3774 3775 if (!ice_is_tc_ena(ena_tcmap, tc)) 3776 continue; 3777 3778 tc_node = ice_sched_get_tc_node(pi, tc); 3779 if (!tc_node) 3780 continue; 3781 3782 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 3783 if (!agg_node) 3784 continue; 3785 3786 status = ice_sched_cfg_node_bw_alloc(hw, agg_node, rl_type, 3787 bw_alloc[tc]); 3788 if (status) 3789 break; 3790 status = ice_sched_save_agg_bw_alloc(pi, agg_id, tc, rl_type, 3791 bw_alloc[tc]); 3792 if (status) 3793 break; 3794 } 3795 3796 exit_cfg_agg_bw_alloc: 3797 ice_release_lock(&pi->sched_lock); 3798 return status; 3799 } 3800 3801 /** 3802 * ice_sched_calc_wakeup - calculate RL profile wakeup parameter 3803 * @hw: pointer to the HW struct 3804 * @bw: bandwidth in Kbps 3805 * 3806 * This function calculates the wakeup parameter of RL profile. 3807 */ 3808 static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) 3809 { 3810 s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f; 3811 s32 wakeup_f_int; 3812 u16 wakeup = 0; 3813 3814 /* Get the wakeup integer value */ 3815 bytes_per_sec = DIV_64BIT(((s64)bw * 1000), BITS_PER_BYTE); 3816 wakeup_int = DIV_64BIT(hw->psm_clk_freq, bytes_per_sec); 3817 if (wakeup_int > 63) { 3818 wakeup = (u16)((1 << 15) | wakeup_int); 3819 } else { 3820 /* Calculate fraction value up to 4 decimals 3821 * Convert Integer value to a constant multiplier 3822 */ 3823 wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int; 3824 wakeup_a = DIV_64BIT((s64)ICE_RL_PROF_MULTIPLIER * 3825 hw->psm_clk_freq, bytes_per_sec); 3826 3827 /* Get Fraction value */ 3828 wakeup_f = wakeup_a - wakeup_b; 3829 3830 /* Round up the Fractional value via Ceil(Fractional value) */ 3831 if (wakeup_f > DIV_64BIT(ICE_RL_PROF_MULTIPLIER, 2)) 3832 wakeup_f += 1; 3833 3834 wakeup_f_int = (s32)DIV_64BIT(wakeup_f * ICE_RL_PROF_FRACTION, 3835 ICE_RL_PROF_MULTIPLIER); 3836 wakeup |= (u16)(wakeup_int << 9); 3837 wakeup |= (u16)(0x1ff & wakeup_f_int); 3838 } 3839 3840 return wakeup; 3841 } 3842 3843 /** 3844 * ice_sched_bw_to_rl_profile - convert BW to profile parameters 3845 * @hw: pointer to the HW struct 3846 * @bw: bandwidth in Kbps 3847 * @profile: profile parameters to return 3848 * 3849 * This function converts the BW to profile structure format. 3850 */ 3851 static enum ice_status 3852 ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, 3853 struct ice_aqc_rl_profile_elem *profile) 3854 { 3855 enum ice_status status = ICE_ERR_PARAM; 3856 s64 bytes_per_sec, ts_rate, mv_tmp; 3857 bool found = false; 3858 s32 encode = 0; 3859 s64 mv = 0; 3860 s32 i; 3861 3862 /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */ 3863 if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW) 3864 return status; 3865 3866 /* Bytes per second from Kbps */ 3867 bytes_per_sec = DIV_64BIT(((s64)bw * 1000), BITS_PER_BYTE); 3868 3869 /* encode is 6 bits but really useful are 5 bits */ 3870 for (i = 0; i < 64; i++) { 3871 u64 pow_result = BIT_ULL(i); 3872 3873 ts_rate = DIV_64BIT((s64)hw->psm_clk_freq, 3874 pow_result * ICE_RL_PROF_TS_MULTIPLIER); 3875 if (ts_rate <= 0) 3876 continue; 3877 3878 /* Multiplier value */ 3879 mv_tmp = DIV_64BIT(bytes_per_sec * ICE_RL_PROF_MULTIPLIER, 3880 ts_rate); 3881 3882 /* Round to the nearest ICE_RL_PROF_MULTIPLIER */ 3883 mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER); 3884 3885 /* First multiplier value greater than the given 3886 * accuracy bytes 3887 */ 3888 if (mv > ICE_RL_PROF_ACCURACY_BYTES) { 3889 encode = i; 3890 found = true; 3891 break; 3892 } 3893 } 3894 if (found) { 3895 u16 wm; 3896 3897 wm = ice_sched_calc_wakeup(hw, bw); 3898 profile->rl_multiply = CPU_TO_LE16(mv); 3899 profile->wake_up_calc = CPU_TO_LE16(wm); 3900 profile->rl_encode = CPU_TO_LE16(encode); 3901 status = ICE_SUCCESS; 3902 } else { 3903 status = ICE_ERR_DOES_NOT_EXIST; 3904 } 3905 3906 return status; 3907 } 3908 3909 /** 3910 * ice_sched_add_rl_profile - add RL profile 3911 * @hw: pointer to the hardware structure 3912 * @rl_type: type of rate limit BW - min, max, or shared 3913 * @bw: bandwidth in Kbps - Kilo bits per sec 3914 * @layer_num: specifies in which layer to create profile 3915 * 3916 * This function first checks the existing list for corresponding BW 3917 * parameter. If it exists, it returns the associated profile otherwise 3918 * it creates a new rate limit profile for requested BW, and adds it to 3919 * the HW DB and local list. It returns the new profile or null on error. 3920 * The caller needs to hold the scheduler lock. 3921 */ 3922 static struct ice_aqc_rl_profile_info * 3923 ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type, 3924 u32 bw, u8 layer_num) 3925 { 3926 struct ice_aqc_rl_profile_info *rl_prof_elem; 3927 u16 profiles_added = 0, num_profiles = 1; 3928 struct ice_aqc_rl_profile_elem *buf; 3929 enum ice_status status; 3930 u8 profile_type; 3931 3932 if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) 3933 return NULL; 3934 switch (rl_type) { 3935 case ICE_MIN_BW: 3936 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; 3937 break; 3938 case ICE_MAX_BW: 3939 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; 3940 break; 3941 case ICE_SHARED_BW: 3942 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; 3943 break; 3944 default: 3945 return NULL; 3946 } 3947 3948 if (!hw) 3949 return NULL; 3950 LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num], 3951 ice_aqc_rl_profile_info, list_entry) 3952 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == 3953 profile_type && rl_prof_elem->bw == bw) 3954 /* Return existing profile ID info */ 3955 return rl_prof_elem; 3956 3957 /* Create new profile ID */ 3958 rl_prof_elem = (struct ice_aqc_rl_profile_info *) 3959 ice_malloc(hw, sizeof(*rl_prof_elem)); 3960 3961 if (!rl_prof_elem) 3962 return NULL; 3963 3964 status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile); 3965 if (status != ICE_SUCCESS) 3966 goto exit_add_rl_prof; 3967 3968 rl_prof_elem->bw = bw; 3969 /* layer_num is zero relative, and fw expects level from 1 to 9 */ 3970 rl_prof_elem->profile.level = layer_num + 1; 3971 rl_prof_elem->profile.flags = profile_type; 3972 rl_prof_elem->profile.max_burst_size = CPU_TO_LE16(hw->max_burst_size); 3973 3974 /* Create new entry in HW DB */ 3975 buf = &rl_prof_elem->profile; 3976 status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf), 3977 &profiles_added, NULL); 3978 if (status || profiles_added != num_profiles) 3979 goto exit_add_rl_prof; 3980 3981 /* Good entry - add in the list */ 3982 rl_prof_elem->prof_id_ref = 0; 3983 LIST_ADD(&rl_prof_elem->list_entry, &hw->rl_prof_list[layer_num]); 3984 return rl_prof_elem; 3985 3986 exit_add_rl_prof: 3987 ice_free(hw, rl_prof_elem); 3988 return NULL; 3989 } 3990 3991 /** 3992 * ice_sched_cfg_node_bw_lmt - configure node sched params 3993 * @hw: pointer to the HW struct 3994 * @node: sched node to configure 3995 * @rl_type: rate limit type CIR, EIR, or shared 3996 * @rl_prof_id: rate limit profile ID 3997 * 3998 * This function configures node element's BW limit. 3999 */ 4000 static enum ice_status 4001 ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, 4002 enum ice_rl_type rl_type, u16 rl_prof_id) 4003 { 4004 struct ice_aqc_txsched_elem_data buf; 4005 struct ice_aqc_txsched_elem *data; 4006 4007 buf = node->info; 4008 data = &buf.data; 4009 switch (rl_type) { 4010 case ICE_MIN_BW: 4011 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; 4012 data->cir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id); 4013 break; 4014 case ICE_MAX_BW: 4015 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; 4016 data->eir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id); 4017 break; 4018 case ICE_SHARED_BW: 4019 data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED; 4020 data->srl_id = CPU_TO_LE16(rl_prof_id); 4021 break; 4022 default: 4023 /* Unknown rate limit type */ 4024 return ICE_ERR_PARAM; 4025 } 4026 4027 /* Configure element */ 4028 return ice_sched_update_elem(hw, node, &buf); 4029 } 4030 4031 /** 4032 * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID 4033 * @node: sched node 4034 * @rl_type: rate limit type 4035 * 4036 * If existing profile matches, it returns the corresponding rate 4037 * limit profile ID, otherwise it returns an invalid ID as error. 4038 */ 4039 static u16 4040 ice_sched_get_node_rl_prof_id(struct ice_sched_node *node, 4041 enum ice_rl_type rl_type) 4042 { 4043 u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID; 4044 struct ice_aqc_txsched_elem *data; 4045 4046 data = &node->info.data; 4047 switch (rl_type) { 4048 case ICE_MIN_BW: 4049 if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR) 4050 rl_prof_id = LE16_TO_CPU(data->cir_bw.bw_profile_idx); 4051 break; 4052 case ICE_MAX_BW: 4053 if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR) 4054 rl_prof_id = LE16_TO_CPU(data->eir_bw.bw_profile_idx); 4055 break; 4056 case ICE_SHARED_BW: 4057 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) 4058 rl_prof_id = LE16_TO_CPU(data->srl_id); 4059 break; 4060 default: 4061 break; 4062 } 4063 4064 return rl_prof_id; 4065 } 4066 4067 /** 4068 * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer 4069 * @pi: port information structure 4070 * @rl_type: type of rate limit BW - min, max, or shared 4071 * @layer_index: layer index 4072 * 4073 * This function returns requested profile creation layer. 4074 */ 4075 static u8 4076 ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type, 4077 u8 layer_index) 4078 { 4079 struct ice_hw *hw = pi->hw; 4080 4081 if (layer_index >= hw->num_tx_sched_layers) 4082 return ICE_SCHED_INVAL_LAYER_NUM; 4083 switch (rl_type) { 4084 case ICE_MIN_BW: 4085 if (hw->layer_info[layer_index].max_cir_rl_profiles) 4086 return layer_index; 4087 break; 4088 case ICE_MAX_BW: 4089 if (hw->layer_info[layer_index].max_eir_rl_profiles) 4090 return layer_index; 4091 break; 4092 case ICE_SHARED_BW: 4093 /* if current layer doesn't support SRL profile creation 4094 * then try a layer up or down. 4095 */ 4096 if (hw->layer_info[layer_index].max_srl_profiles) 4097 return layer_index; 4098 else if (layer_index < hw->num_tx_sched_layers - 1 && 4099 hw->layer_info[layer_index + 1].max_srl_profiles) 4100 return layer_index + 1; 4101 else if (layer_index > 0 && 4102 hw->layer_info[layer_index - 1].max_srl_profiles) 4103 return layer_index - 1; 4104 break; 4105 default: 4106 break; 4107 } 4108 return ICE_SCHED_INVAL_LAYER_NUM; 4109 } 4110 4111 /** 4112 * ice_sched_get_srl_node - get shared rate limit node 4113 * @node: tree node 4114 * @srl_layer: shared rate limit layer 4115 * 4116 * This function returns SRL node to be used for shared rate limit purpose. 4117 * The caller needs to hold scheduler lock. 4118 */ 4119 static struct ice_sched_node * 4120 ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer) 4121 { 4122 if (srl_layer > node->tx_sched_layer) 4123 return node->children[0]; 4124 else if (srl_layer < node->tx_sched_layer) 4125 /* Node can't be created without a parent. It will always 4126 * have a valid parent except root node. 4127 */ 4128 return node->parent; 4129 else 4130 return node; 4131 } 4132 4133 /** 4134 * ice_sched_rm_rl_profile - remove RL profile ID 4135 * @hw: pointer to the hardware structure 4136 * @layer_num: layer number where profiles are saved 4137 * @profile_type: profile type like EIR, CIR, or SRL 4138 * @profile_id: profile ID to remove 4139 * 4140 * This function removes rate limit profile from layer 'layer_num' of type 4141 * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold 4142 * scheduler lock. 4143 */ 4144 static enum ice_status 4145 ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type, 4146 u16 profile_id) 4147 { 4148 struct ice_aqc_rl_profile_info *rl_prof_elem; 4149 enum ice_status status = ICE_SUCCESS; 4150 4151 if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) 4152 return ICE_ERR_PARAM; 4153 /* Check the existing list for RL profile */ 4154 LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num], 4155 ice_aqc_rl_profile_info, list_entry) 4156 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == 4157 profile_type && 4158 LE16_TO_CPU(rl_prof_elem->profile.profile_id) == 4159 profile_id) { 4160 if (rl_prof_elem->prof_id_ref) 4161 rl_prof_elem->prof_id_ref--; 4162 4163 /* Remove old profile ID from database */ 4164 status = ice_sched_del_rl_profile(hw, rl_prof_elem); 4165 if (status && status != ICE_ERR_IN_USE) 4166 ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); 4167 break; 4168 } 4169 if (status == ICE_ERR_IN_USE) 4170 status = ICE_SUCCESS; 4171 return status; 4172 } 4173 4174 /** 4175 * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default 4176 * @pi: port information structure 4177 * @node: pointer to node structure 4178 * @rl_type: rate limit type min, max, or shared 4179 * @layer_num: layer number where RL profiles are saved 4180 * 4181 * This function configures node element's BW rate limit profile ID of 4182 * type CIR, EIR, or SRL to default. This function needs to be called 4183 * with the scheduler lock held. 4184 */ 4185 static enum ice_status 4186 ice_sched_set_node_bw_dflt(struct ice_port_info *pi, 4187 struct ice_sched_node *node, 4188 enum ice_rl_type rl_type, u8 layer_num) 4189 { 4190 enum ice_status status; 4191 struct ice_hw *hw; 4192 u8 profile_type; 4193 u16 rl_prof_id; 4194 u16 old_id; 4195 4196 hw = pi->hw; 4197 switch (rl_type) { 4198 case ICE_MIN_BW: 4199 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; 4200 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; 4201 break; 4202 case ICE_MAX_BW: 4203 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; 4204 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; 4205 break; 4206 case ICE_SHARED_BW: 4207 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; 4208 /* No SRL is configured for default case */ 4209 rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID; 4210 break; 4211 default: 4212 return ICE_ERR_PARAM; 4213 } 4214 /* Save existing RL prof ID for later clean up */ 4215 old_id = ice_sched_get_node_rl_prof_id(node, rl_type); 4216 /* Configure BW scheduling parameters */ 4217 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); 4218 if (status) 4219 return status; 4220 4221 /* Remove stale RL profile ID */ 4222 if (old_id == ICE_SCHED_DFLT_RL_PROF_ID || 4223 old_id == ICE_SCHED_INVAL_PROF_ID) 4224 return ICE_SUCCESS; 4225 4226 return ice_sched_rm_rl_profile(hw, layer_num, profile_type, old_id); 4227 } 4228 4229 /** 4230 * ice_sched_set_node_bw - set node's bandwidth 4231 * @pi: port information structure 4232 * @node: tree node 4233 * @rl_type: rate limit type min, max, or shared 4234 * @bw: bandwidth in Kbps - Kilo bits per sec 4235 * @layer_num: layer number 4236 * 4237 * This function adds new profile corresponding to requested BW, configures 4238 * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile 4239 * ID from local database. The caller needs to hold scheduler lock. 4240 */ 4241 static enum ice_status 4242 ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, 4243 enum ice_rl_type rl_type, u32 bw, u8 layer_num) 4244 { 4245 struct ice_aqc_rl_profile_info *rl_prof_info; 4246 enum ice_status status = ICE_ERR_PARAM; 4247 struct ice_hw *hw = pi->hw; 4248 u16 old_id, rl_prof_id; 4249 4250 rl_prof_info = ice_sched_add_rl_profile(hw, rl_type, bw, layer_num); 4251 if (!rl_prof_info) 4252 return status; 4253 4254 rl_prof_id = LE16_TO_CPU(rl_prof_info->profile.profile_id); 4255 4256 /* Save existing RL prof ID for later clean up */ 4257 old_id = ice_sched_get_node_rl_prof_id(node, rl_type); 4258 /* Configure BW scheduling parameters */ 4259 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); 4260 if (status) 4261 return status; 4262 4263 /* New changes has been applied */ 4264 /* Increment the profile ID reference count */ 4265 rl_prof_info->prof_id_ref++; 4266 4267 /* Check for old ID removal */ 4268 if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) || 4269 old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id) 4270 return ICE_SUCCESS; 4271 4272 return ice_sched_rm_rl_profile(hw, layer_num, 4273 rl_prof_info->profile.flags & 4274 ICE_AQC_RL_PROFILE_TYPE_M, old_id); 4275 } 4276 4277 /** 4278 * ice_sched_set_node_bw_lmt - set node's BW limit 4279 * @pi: port information structure 4280 * @node: tree node 4281 * @rl_type: rate limit type min, max, or shared 4282 * @bw: bandwidth in Kbps - Kilo bits per sec 4283 * 4284 * It updates node's BW limit parameters like BW RL profile ID of type CIR, 4285 * EIR, or SRL. The caller needs to hold scheduler lock. 4286 * 4287 * NOTE: Caller provides the correct SRL node in case of shared profile 4288 * settings. 4289 */ 4290 static enum ice_status 4291 ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, 4292 enum ice_rl_type rl_type, u32 bw) 4293 { 4294 struct ice_hw *hw; 4295 u8 layer_num; 4296 4297 if (!pi) 4298 return ICE_ERR_PARAM; 4299 hw = pi->hw; 4300 /* Remove unused RL profile IDs from HW and SW DB */ 4301 ice_sched_rm_unused_rl_prof(hw); 4302 4303 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, 4304 node->tx_sched_layer); 4305 if (layer_num >= hw->num_tx_sched_layers) 4306 return ICE_ERR_PARAM; 4307 4308 if (bw == ICE_SCHED_DFLT_BW) 4309 return ice_sched_set_node_bw_dflt(pi, node, rl_type, layer_num); 4310 return ice_sched_set_node_bw(pi, node, rl_type, bw, layer_num); 4311 } 4312 4313 /** 4314 * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default 4315 * @pi: port information structure 4316 * @node: pointer to node structure 4317 * @rl_type: rate limit type min, max, or shared 4318 * 4319 * This function configures node element's BW rate limit profile ID of 4320 * type CIR, EIR, or SRL to default. This function needs to be called 4321 * with the scheduler lock held. 4322 */ 4323 static enum ice_status 4324 ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, 4325 struct ice_sched_node *node, 4326 enum ice_rl_type rl_type) 4327 { 4328 return ice_sched_set_node_bw_lmt(pi, node, rl_type, 4329 ICE_SCHED_DFLT_BW); 4330 } 4331 4332 /** 4333 * ice_sched_validate_srl_node - Check node for SRL applicability 4334 * @node: sched node to configure 4335 * @sel_layer: selected SRL layer 4336 * 4337 * This function checks if the SRL can be applied to a selceted layer node on 4338 * behalf of the requested node (first argument). This function needs to be 4339 * called with scheduler lock held. 4340 */ 4341 static enum ice_status 4342 ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) 4343 { 4344 /* SRL profiles are not available on all layers. Check if the 4345 * SRL profile can be applied to a node above or below the 4346 * requested node. SRL configuration is possible only if the 4347 * selected layer's node has single child. 4348 */ 4349 if (sel_layer == node->tx_sched_layer || 4350 ((sel_layer == node->tx_sched_layer + 1) && 4351 node->num_children == 1) || 4352 ((sel_layer == node->tx_sched_layer - 1) && 4353 (node->parent && node->parent->num_children == 1))) 4354 return ICE_SUCCESS; 4355 4356 return ICE_ERR_CFG; 4357 } 4358 4359 /** 4360 * ice_sched_save_q_bw - save queue node's BW information 4361 * @q_ctx: queue context structure 4362 * @rl_type: rate limit type min, max, or shared 4363 * @bw: bandwidth in Kbps - Kilo bits per sec 4364 * 4365 * Save BW information of queue type node for post replay use. 4366 */ 4367 static enum ice_status 4368 ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) 4369 { 4370 switch (rl_type) { 4371 case ICE_MIN_BW: 4372 ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw); 4373 break; 4374 case ICE_MAX_BW: 4375 ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw); 4376 break; 4377 case ICE_SHARED_BW: 4378 ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw); 4379 break; 4380 default: 4381 return ICE_ERR_PARAM; 4382 } 4383 return ICE_SUCCESS; 4384 } 4385 4386 /** 4387 * ice_sched_set_q_bw_lmt - sets queue BW limit 4388 * @pi: port information structure 4389 * @vsi_handle: sw VSI handle 4390 * @tc: traffic class 4391 * @q_handle: software queue handle 4392 * @rl_type: min, max, or shared 4393 * @bw: bandwidth in Kbps 4394 * 4395 * This function sets BW limit of queue scheduling node. 4396 */ 4397 static enum ice_status 4398 ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4399 u16 q_handle, enum ice_rl_type rl_type, u32 bw) 4400 { 4401 enum ice_status status = ICE_ERR_PARAM; 4402 struct ice_sched_node *node; 4403 struct ice_q_ctx *q_ctx; 4404 4405 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4406 return ICE_ERR_PARAM; 4407 ice_acquire_lock(&pi->sched_lock); 4408 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle); 4409 if (!q_ctx) 4410 goto exit_q_bw_lmt; 4411 node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); 4412 if (!node) { 4413 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n"); 4414 goto exit_q_bw_lmt; 4415 } 4416 4417 /* Return error if it is not a leaf node */ 4418 if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) 4419 goto exit_q_bw_lmt; 4420 4421 /* SRL bandwidth layer selection */ 4422 if (rl_type == ICE_SHARED_BW) { 4423 u8 sel_layer; /* selected layer */ 4424 4425 sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type, 4426 node->tx_sched_layer); 4427 if (sel_layer >= pi->hw->num_tx_sched_layers) { 4428 status = ICE_ERR_PARAM; 4429 goto exit_q_bw_lmt; 4430 } 4431 status = ice_sched_validate_srl_node(node, sel_layer); 4432 if (status) 4433 goto exit_q_bw_lmt; 4434 } 4435 4436 if (bw == ICE_SCHED_DFLT_BW) 4437 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type); 4438 else 4439 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw); 4440 4441 if (!status) 4442 status = ice_sched_save_q_bw(q_ctx, rl_type, bw); 4443 4444 exit_q_bw_lmt: 4445 ice_release_lock(&pi->sched_lock); 4446 return status; 4447 } 4448 4449 /** 4450 * ice_cfg_q_bw_lmt - configure queue BW limit 4451 * @pi: port information structure 4452 * @vsi_handle: sw VSI handle 4453 * @tc: traffic class 4454 * @q_handle: software queue handle 4455 * @rl_type: min, max, or shared 4456 * @bw: bandwidth in Kbps 4457 * 4458 * This function configures BW limit of queue scheduling node. 4459 */ 4460 enum ice_status 4461 ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4462 u16 q_handle, enum ice_rl_type rl_type, u32 bw) 4463 { 4464 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, 4465 bw); 4466 } 4467 4468 /** 4469 * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit 4470 * @pi: port information structure 4471 * @vsi_handle: sw VSI handle 4472 * @tc: traffic class 4473 * @q_handle: software queue handle 4474 * @rl_type: min, max, or shared 4475 * 4476 * This function configures BW default limit of queue scheduling node. 4477 */ 4478 enum ice_status 4479 ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4480 u16 q_handle, enum ice_rl_type rl_type) 4481 { 4482 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, 4483 ICE_SCHED_DFLT_BW); 4484 } 4485 4486 /** 4487 * ice_sched_save_tc_node_bw - save TC node BW limit 4488 * @pi: port information structure 4489 * @tc: TC number 4490 * @rl_type: min or max 4491 * @bw: bandwidth in Kbps 4492 * 4493 * This function saves the modified values of bandwidth settings for later 4494 * replay purpose (restore) after reset. 4495 */ 4496 static enum ice_status 4497 ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc, 4498 enum ice_rl_type rl_type, u32 bw) 4499 { 4500 if (tc >= ICE_MAX_TRAFFIC_CLASS) 4501 return ICE_ERR_PARAM; 4502 switch (rl_type) { 4503 case ICE_MIN_BW: 4504 ice_set_clear_cir_bw(&pi->tc_node_bw_t_info[tc], bw); 4505 break; 4506 case ICE_MAX_BW: 4507 ice_set_clear_eir_bw(&pi->tc_node_bw_t_info[tc], bw); 4508 break; 4509 case ICE_SHARED_BW: 4510 ice_set_clear_shared_bw(&pi->tc_node_bw_t_info[tc], bw); 4511 break; 4512 default: 4513 return ICE_ERR_PARAM; 4514 } 4515 return ICE_SUCCESS; 4516 } 4517 4518 /** 4519 * ice_sched_set_tc_node_bw_lmt - sets TC node BW limit 4520 * @pi: port information structure 4521 * @tc: TC number 4522 * @rl_type: min or max 4523 * @bw: bandwidth in Kbps 4524 * 4525 * This function configures bandwidth limit of TC node. 4526 */ 4527 static enum ice_status 4528 ice_sched_set_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, 4529 enum ice_rl_type rl_type, u32 bw) 4530 { 4531 enum ice_status status = ICE_ERR_PARAM; 4532 struct ice_sched_node *tc_node; 4533 4534 if (tc >= ICE_MAX_TRAFFIC_CLASS) 4535 return status; 4536 ice_acquire_lock(&pi->sched_lock); 4537 tc_node = ice_sched_get_tc_node(pi, tc); 4538 if (!tc_node) 4539 goto exit_set_tc_node_bw; 4540 if (bw == ICE_SCHED_DFLT_BW) 4541 status = ice_sched_set_node_bw_dflt_lmt(pi, tc_node, rl_type); 4542 else 4543 status = ice_sched_set_node_bw_lmt(pi, tc_node, rl_type, bw); 4544 if (!status) 4545 status = ice_sched_save_tc_node_bw(pi, tc, rl_type, bw); 4546 4547 exit_set_tc_node_bw: 4548 ice_release_lock(&pi->sched_lock); 4549 return status; 4550 } 4551 4552 /** 4553 * ice_cfg_tc_node_bw_lmt - configure TC node BW limit 4554 * @pi: port information structure 4555 * @tc: TC number 4556 * @rl_type: min or max 4557 * @bw: bandwidth in Kbps 4558 * 4559 * This function configures BW limit of TC node. 4560 * Note: The minimum guaranteed reservation is done via DCBX. 4561 */ 4562 enum ice_status 4563 ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, 4564 enum ice_rl_type rl_type, u32 bw) 4565 { 4566 return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, bw); 4567 } 4568 4569 /** 4570 * ice_cfg_tc_node_bw_dflt_lmt - configure TC node BW default limit 4571 * @pi: port information structure 4572 * @tc: TC number 4573 * @rl_type: min or max 4574 * 4575 * This function configures BW default limit of TC node. 4576 */ 4577 enum ice_status 4578 ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc, 4579 enum ice_rl_type rl_type) 4580 { 4581 return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, ICE_SCHED_DFLT_BW); 4582 } 4583 4584 /** 4585 * ice_sched_save_tc_node_bw_alloc - save TC node's BW alloc information 4586 * @pi: port information structure 4587 * @tc: traffic class 4588 * @rl_type: rate limit type min or max 4589 * @bw_alloc: Bandwidth allocation information 4590 * 4591 * Save BW alloc information of VSI type node for post replay use. 4592 */ 4593 static enum ice_status 4594 ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, 4595 enum ice_rl_type rl_type, u16 bw_alloc) 4596 { 4597 if (tc >= ICE_MAX_TRAFFIC_CLASS) 4598 return ICE_ERR_PARAM; 4599 switch (rl_type) { 4600 case ICE_MIN_BW: 4601 ice_set_clear_cir_bw_alloc(&pi->tc_node_bw_t_info[tc], 4602 bw_alloc); 4603 break; 4604 case ICE_MAX_BW: 4605 ice_set_clear_eir_bw_alloc(&pi->tc_node_bw_t_info[tc], 4606 bw_alloc); 4607 break; 4608 default: 4609 return ICE_ERR_PARAM; 4610 } 4611 return ICE_SUCCESS; 4612 } 4613 4614 /** 4615 * ice_sched_set_tc_node_bw_alloc - set TC node BW alloc 4616 * @pi: port information structure 4617 * @tc: TC number 4618 * @rl_type: min or max 4619 * @bw_alloc: bandwidth alloc 4620 * 4621 * This function configures bandwidth alloc of TC node, also saves the 4622 * changed settings for replay purpose, and return success if it succeeds 4623 * in modifying bandwidth alloc setting. 4624 */ 4625 static enum ice_status 4626 ice_sched_set_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, 4627 enum ice_rl_type rl_type, u8 bw_alloc) 4628 { 4629 enum ice_status status = ICE_ERR_PARAM; 4630 struct ice_sched_node *tc_node; 4631 4632 if (tc >= ICE_MAX_TRAFFIC_CLASS) 4633 return status; 4634 ice_acquire_lock(&pi->sched_lock); 4635 tc_node = ice_sched_get_tc_node(pi, tc); 4636 if (!tc_node) 4637 goto exit_set_tc_node_bw_alloc; 4638 status = ice_sched_cfg_node_bw_alloc(pi->hw, tc_node, rl_type, 4639 bw_alloc); 4640 if (status) 4641 goto exit_set_tc_node_bw_alloc; 4642 status = ice_sched_save_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc); 4643 4644 exit_set_tc_node_bw_alloc: 4645 ice_release_lock(&pi->sched_lock); 4646 return status; 4647 } 4648 4649 /** 4650 * ice_cfg_tc_node_bw_alloc - configure TC node BW alloc 4651 * @pi: port information structure 4652 * @tc: TC number 4653 * @rl_type: min or max 4654 * @bw_alloc: bandwidth alloc 4655 * 4656 * This function configures BW limit of TC node. 4657 * Note: The minimum guaranteed reservation is done via DCBX. 4658 */ 4659 enum ice_status 4660 ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, 4661 enum ice_rl_type rl_type, u8 bw_alloc) 4662 { 4663 return ice_sched_set_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc); 4664 } 4665 4666 /** 4667 * ice_sched_set_agg_bw_dflt_lmt - set aggregator node's BW limit to default 4668 * @pi: port information structure 4669 * @vsi_handle: software VSI handle 4670 * 4671 * This function retrieves the aggregator ID based on VSI ID and TC, 4672 * and sets node's BW limit to default. This function needs to be 4673 * called with the scheduler lock held. 4674 */ 4675 enum ice_status 4676 ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle) 4677 { 4678 struct ice_vsi_ctx *vsi_ctx; 4679 enum ice_status status = ICE_SUCCESS; 4680 u8 tc; 4681 4682 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4683 return ICE_ERR_PARAM; 4684 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 4685 if (!vsi_ctx) 4686 return ICE_ERR_PARAM; 4687 4688 ice_for_each_traffic_class(tc) { 4689 struct ice_sched_node *node; 4690 4691 node = vsi_ctx->sched.ag_node[tc]; 4692 if (!node) 4693 continue; 4694 4695 /* Set min profile to default */ 4696 status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MIN_BW); 4697 if (status) 4698 break; 4699 4700 /* Set max profile to default */ 4701 status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MAX_BW); 4702 if (status) 4703 break; 4704 4705 /* Remove shared profile, if there is one */ 4706 status = ice_sched_set_node_bw_dflt_lmt(pi, node, 4707 ICE_SHARED_BW); 4708 if (status) 4709 break; 4710 } 4711 4712 return status; 4713 } 4714 4715 /** 4716 * ice_sched_get_node_by_id_type - get node from ID type 4717 * @pi: port information structure 4718 * @id: identifier 4719 * @agg_type: type of aggregator 4720 * @tc: traffic class 4721 * 4722 * This function returns node identified by ID of type aggregator, and 4723 * based on traffic class (TC). This function needs to be called with 4724 * the scheduler lock held. 4725 */ 4726 static struct ice_sched_node * 4727 ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, 4728 enum ice_agg_type agg_type, u8 tc) 4729 { 4730 struct ice_sched_node *node = NULL; 4731 struct ice_sched_node *child_node; 4732 4733 switch (agg_type) { 4734 case ICE_AGG_TYPE_VSI: { 4735 struct ice_vsi_ctx *vsi_ctx; 4736 u16 vsi_handle = (u16)id; 4737 4738 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4739 break; 4740 /* Get sched_vsi_info */ 4741 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 4742 if (!vsi_ctx) 4743 break; 4744 node = vsi_ctx->sched.vsi_node[tc]; 4745 break; 4746 } 4747 4748 case ICE_AGG_TYPE_AGG: { 4749 struct ice_sched_node *tc_node; 4750 4751 tc_node = ice_sched_get_tc_node(pi, tc); 4752 if (tc_node) 4753 node = ice_sched_get_agg_node(pi, tc_node, id); 4754 break; 4755 } 4756 4757 case ICE_AGG_TYPE_Q: 4758 /* The current implementation allows single queue to modify */ 4759 node = ice_sched_get_node(pi, id); 4760 break; 4761 4762 case ICE_AGG_TYPE_QG: 4763 /* The current implementation allows single qg to modify */ 4764 child_node = ice_sched_get_node(pi, id); 4765 if (!child_node) 4766 break; 4767 node = child_node->parent; 4768 break; 4769 4770 default: 4771 break; 4772 } 4773 4774 return node; 4775 } 4776 4777 /** 4778 * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC 4779 * @pi: port information structure 4780 * @id: ID (software VSI handle or AGG ID) 4781 * @agg_type: aggregator type (VSI or AGG type node) 4782 * @tc: traffic class 4783 * @rl_type: min or max 4784 * @bw: bandwidth in Kbps 4785 * 4786 * This function sets BW limit of VSI or Aggregator scheduling node 4787 * based on TC information from passed in argument BW. 4788 */ 4789 enum ice_status 4790 ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, 4791 enum ice_agg_type agg_type, u8 tc, 4792 enum ice_rl_type rl_type, u32 bw) 4793 { 4794 enum ice_status status = ICE_ERR_PARAM; 4795 struct ice_sched_node *node; 4796 4797 if (!pi) 4798 return status; 4799 4800 if (rl_type == ICE_UNKNOWN_BW) 4801 return status; 4802 4803 ice_acquire_lock(&pi->sched_lock); 4804 node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc); 4805 if (!node) { 4806 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n"); 4807 goto exit_set_node_bw_lmt_per_tc; 4808 } 4809 if (bw == ICE_SCHED_DFLT_BW) 4810 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type); 4811 else 4812 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw); 4813 4814 exit_set_node_bw_lmt_per_tc: 4815 ice_release_lock(&pi->sched_lock); 4816 return status; 4817 } 4818 4819 /** 4820 * ice_sched_validate_vsi_srl_node - validate VSI SRL node 4821 * @pi: port information structure 4822 * @vsi_handle: software VSI handle 4823 * 4824 * This function validates SRL node of the VSI node if available SRL layer is 4825 * different than the VSI node layer on all TC(s).This function needs to be 4826 * called with scheduler lock held. 4827 */ 4828 static enum ice_status 4829 ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle) 4830 { 4831 u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM; 4832 u8 tc; 4833 4834 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4835 return ICE_ERR_PARAM; 4836 4837 /* Return success if no nodes are present across TC */ 4838 ice_for_each_traffic_class(tc) { 4839 struct ice_sched_node *tc_node, *vsi_node; 4840 enum ice_rl_type rl_type = ICE_SHARED_BW; 4841 enum ice_status status; 4842 4843 tc_node = ice_sched_get_tc_node(pi, tc); 4844 if (!tc_node) 4845 continue; 4846 4847 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 4848 if (!vsi_node) 4849 continue; 4850 4851 /* SRL bandwidth layer selection */ 4852 if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) { 4853 u8 node_layer = vsi_node->tx_sched_layer; 4854 u8 layer_num; 4855 4856 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, 4857 node_layer); 4858 if (layer_num >= pi->hw->num_tx_sched_layers) 4859 return ICE_ERR_PARAM; 4860 sel_layer = layer_num; 4861 } 4862 4863 status = ice_sched_validate_srl_node(vsi_node, sel_layer); 4864 if (status) 4865 return status; 4866 } 4867 return ICE_SUCCESS; 4868 } 4869 4870 /** 4871 * ice_sched_set_save_vsi_srl_node_bw - set VSI shared limit values 4872 * @pi: port information structure 4873 * @vsi_handle: software VSI handle 4874 * @tc: traffic class 4875 * @srl_node: sched node to configure 4876 * @rl_type: rate limit type minimum, maximum, or shared 4877 * @bw: minimum, maximum, or shared bandwidth in Kbps 4878 * 4879 * Configure shared rate limiter(SRL) of VSI type nodes across given traffic 4880 * class, and saves those value for later use for replaying purposes. The 4881 * caller holds the scheduler lock. 4882 */ 4883 static enum ice_status 4884 ice_sched_set_save_vsi_srl_node_bw(struct ice_port_info *pi, u16 vsi_handle, 4885 u8 tc, struct ice_sched_node *srl_node, 4886 enum ice_rl_type rl_type, u32 bw) 4887 { 4888 enum ice_status status; 4889 4890 if (bw == ICE_SCHED_DFLT_BW) { 4891 status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type); 4892 } else { 4893 status = ice_sched_set_node_bw_lmt(pi, srl_node, rl_type, bw); 4894 if (status) 4895 return status; 4896 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw); 4897 } 4898 return status; 4899 } 4900 4901 /** 4902 * ice_sched_set_vsi_node_srl_per_tc - set VSI node BW shared limit for tc 4903 * @pi: port information structure 4904 * @vsi_handle: software VSI handle 4905 * @tc: traffic class 4906 * @min_bw: minimum bandwidth in Kbps 4907 * @max_bw: maximum bandwidth in Kbps 4908 * @shared_bw: shared bandwidth in Kbps 4909 * 4910 * Configure shared rate limiter(SRL) of VSI type nodes across requested 4911 * traffic class for VSI matching handle. When BW value of ICE_SCHED_DFLT_BW 4912 * is passed, it removes the corresponding bw from the node. The caller 4913 * holds scheduler lock. 4914 */ 4915 static enum ice_status 4916 ice_sched_set_vsi_node_srl_per_tc(struct ice_port_info *pi, u16 vsi_handle, 4917 u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw) 4918 { 4919 struct ice_sched_node *tc_node, *vsi_node, *cfg_node; 4920 enum ice_status status; 4921 u8 layer_num; 4922 4923 tc_node = ice_sched_get_tc_node(pi, tc); 4924 if (!tc_node) 4925 return ICE_ERR_CFG; 4926 4927 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 4928 if (!vsi_node) 4929 return ICE_ERR_CFG; 4930 4931 layer_num = ice_sched_get_rl_prof_layer(pi, ICE_SHARED_BW, 4932 vsi_node->tx_sched_layer); 4933 if (layer_num >= pi->hw->num_tx_sched_layers) 4934 return ICE_ERR_PARAM; 4935 4936 /* SRL node may be different */ 4937 cfg_node = ice_sched_get_srl_node(vsi_node, layer_num); 4938 if (!cfg_node) 4939 return ICE_ERR_CFG; 4940 4941 status = ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc, 4942 cfg_node, ICE_MIN_BW, 4943 min_bw); 4944 if (status) 4945 return status; 4946 4947 status = ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc, 4948 cfg_node, ICE_MAX_BW, 4949 max_bw); 4950 if (status) 4951 return status; 4952 4953 return ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc, cfg_node, 4954 ICE_SHARED_BW, shared_bw); 4955 } 4956 4957 /** 4958 * ice_sched_set_vsi_bw_shared_lmt - set VSI BW shared limit 4959 * @pi: port information structure 4960 * @vsi_handle: software VSI handle 4961 * @min_bw: minimum bandwidth in Kbps 4962 * @max_bw: maximum bandwidth in Kbps 4963 * @shared_bw: shared bandwidth in Kbps 4964 * 4965 * Configure shared rate limiter(SRL) of all VSI type nodes across all traffic 4966 * classes for VSI matching handle. When BW value of ICE_SCHED_DFLT_BW is 4967 * passed, it removes those value(s) from the node. 4968 */ 4969 enum ice_status 4970 ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, 4971 u32 min_bw, u32 max_bw, u32 shared_bw) 4972 { 4973 enum ice_status status = ICE_SUCCESS; 4974 u8 tc; 4975 4976 if (!pi) 4977 return ICE_ERR_PARAM; 4978 4979 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4980 return ICE_ERR_PARAM; 4981 4982 ice_acquire_lock(&pi->sched_lock); 4983 status = ice_sched_validate_vsi_srl_node(pi, vsi_handle); 4984 if (status) 4985 goto exit_set_vsi_bw_shared_lmt; 4986 /* Return success if no nodes are present across TC */ 4987 ice_for_each_traffic_class(tc) { 4988 struct ice_sched_node *tc_node, *vsi_node; 4989 4990 tc_node = ice_sched_get_tc_node(pi, tc); 4991 if (!tc_node) 4992 continue; 4993 4994 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 4995 if (!vsi_node) 4996 continue; 4997 4998 status = ice_sched_set_vsi_node_srl_per_tc(pi, vsi_handle, tc, 4999 min_bw, max_bw, 5000 shared_bw); 5001 if (status) 5002 break; 5003 } 5004 5005 exit_set_vsi_bw_shared_lmt: 5006 ice_release_lock(&pi->sched_lock); 5007 return status; 5008 } 5009 5010 /** 5011 * ice_sched_validate_agg_srl_node - validate AGG SRL node 5012 * @pi: port information structure 5013 * @agg_id: aggregator ID 5014 * 5015 * This function validates SRL node of the AGG node if available SRL layer is 5016 * different than the AGG node layer on all TC(s).This function needs to be 5017 * called with scheduler lock held. 5018 */ 5019 static enum ice_status 5020 ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id) 5021 { 5022 u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM; 5023 struct ice_sched_agg_info *agg_info; 5024 bool agg_id_present = false; 5025 enum ice_status status = ICE_SUCCESS; 5026 u8 tc; 5027 5028 LIST_FOR_EACH_ENTRY(agg_info, &pi->hw->agg_list, ice_sched_agg_info, 5029 list_entry) 5030 if (agg_info->agg_id == agg_id) { 5031 agg_id_present = true; 5032 break; 5033 } 5034 if (!agg_id_present) 5035 return ICE_ERR_PARAM; 5036 /* Return success if no nodes are present across TC */ 5037 ice_for_each_traffic_class(tc) { 5038 struct ice_sched_node *tc_node, *agg_node; 5039 enum ice_rl_type rl_type = ICE_SHARED_BW; 5040 5041 tc_node = ice_sched_get_tc_node(pi, tc); 5042 if (!tc_node) 5043 continue; 5044 5045 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 5046 if (!agg_node) 5047 continue; 5048 /* SRL bandwidth layer selection */ 5049 if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) { 5050 u8 node_layer = agg_node->tx_sched_layer; 5051 u8 layer_num; 5052 5053 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, 5054 node_layer); 5055 if (layer_num >= pi->hw->num_tx_sched_layers) 5056 return ICE_ERR_PARAM; 5057 sel_layer = layer_num; 5058 } 5059 5060 status = ice_sched_validate_srl_node(agg_node, sel_layer); 5061 if (status) 5062 break; 5063 } 5064 return status; 5065 } 5066 5067 /** 5068 * ice_sched_validate_agg_id - Validate aggregator id 5069 * @pi: port information structure 5070 * @agg_id: aggregator ID 5071 * 5072 * This function validates aggregator id. Caller holds the scheduler lock. 5073 */ 5074 static enum ice_status 5075 ice_sched_validate_agg_id(struct ice_port_info *pi, u32 agg_id) 5076 { 5077 struct ice_sched_agg_info *agg_info; 5078 struct ice_sched_agg_info *tmp; 5079 bool agg_id_present = false; 5080 enum ice_status status; 5081 5082 status = ice_sched_validate_agg_srl_node(pi, agg_id); 5083 if (status) 5084 return status; 5085 5086 LIST_FOR_EACH_ENTRY_SAFE(agg_info, tmp, &pi->hw->agg_list, 5087 ice_sched_agg_info, list_entry) 5088 if (agg_info->agg_id == agg_id) { 5089 agg_id_present = true; 5090 break; 5091 } 5092 5093 if (!agg_id_present) 5094 return ICE_ERR_PARAM; 5095 5096 return ICE_SUCCESS; 5097 } 5098 5099 /** 5100 * ice_sched_set_save_agg_srl_node_bw - set aggregator shared limit values 5101 * @pi: port information structure 5102 * @agg_id: aggregator ID 5103 * @tc: traffic class 5104 * @srl_node: sched node to configure 5105 * @rl_type: rate limit type minimum, maximum, or shared 5106 * @bw: minimum, maximum, or shared bandwidth in Kbps 5107 * 5108 * Configure shared rate limiter(SRL) of aggregator type nodes across 5109 * requested traffic class, and saves those value for later use for 5110 * replaying purposes. The caller holds the scheduler lock. 5111 */ 5112 static enum ice_status 5113 ice_sched_set_save_agg_srl_node_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, 5114 struct ice_sched_node *srl_node, 5115 enum ice_rl_type rl_type, u32 bw) 5116 { 5117 enum ice_status status; 5118 5119 if (bw == ICE_SCHED_DFLT_BW) { 5120 status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type); 5121 } else { 5122 status = ice_sched_set_node_bw_lmt(pi, srl_node, rl_type, bw); 5123 if (status) 5124 return status; 5125 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw); 5126 } 5127 return status; 5128 } 5129 5130 /** 5131 * ice_sched_set_agg_node_srl_per_tc - set aggregator SRL per tc 5132 * @pi: port information structure 5133 * @agg_id: aggregator ID 5134 * @tc: traffic class 5135 * @min_bw: minimum bandwidth in Kbps 5136 * @max_bw: maximum bandwidth in Kbps 5137 * @shared_bw: shared bandwidth in Kbps 5138 * 5139 * This function configures the shared rate limiter(SRL) of aggregator type 5140 * node for a given traffic class for aggregator matching agg_id. When BW 5141 * value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node. Caller 5142 * holds the scheduler lock. 5143 */ 5144 static enum ice_status 5145 ice_sched_set_agg_node_srl_per_tc(struct ice_port_info *pi, u32 agg_id, 5146 u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw) 5147 { 5148 struct ice_sched_node *tc_node, *agg_node, *cfg_node; 5149 enum ice_rl_type rl_type = ICE_SHARED_BW; 5150 enum ice_status status = ICE_ERR_CFG; 5151 u8 layer_num; 5152 5153 tc_node = ice_sched_get_tc_node(pi, tc); 5154 if (!tc_node) 5155 return ICE_ERR_CFG; 5156 5157 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 5158 if (!agg_node) 5159 return ICE_ERR_CFG; 5160 5161 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, 5162 agg_node->tx_sched_layer); 5163 if (layer_num >= pi->hw->num_tx_sched_layers) 5164 return ICE_ERR_PARAM; 5165 5166 /* SRL node may be different */ 5167 cfg_node = ice_sched_get_srl_node(agg_node, layer_num); 5168 if (!cfg_node) 5169 return ICE_ERR_CFG; 5170 5171 status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node, 5172 ICE_MIN_BW, min_bw); 5173 if (status) 5174 return status; 5175 5176 status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node, 5177 ICE_MAX_BW, max_bw); 5178 if (status) 5179 return status; 5180 5181 status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node, 5182 ICE_SHARED_BW, shared_bw); 5183 return status; 5184 } 5185 5186 /** 5187 * ice_sched_set_agg_bw_shared_lmt - set aggregator BW shared limit 5188 * @pi: port information structure 5189 * @agg_id: aggregator ID 5190 * @min_bw: minimum bandwidth in Kbps 5191 * @max_bw: maximum bandwidth in Kbps 5192 * @shared_bw: shared bandwidth in Kbps 5193 * 5194 * This function configures the shared rate limiter(SRL) of all aggregator type 5195 * nodes across all traffic classes for aggregator matching agg_id. When 5196 * BW value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the 5197 * node(s). 5198 */ 5199 enum ice_status 5200 ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, 5201 u32 min_bw, u32 max_bw, u32 shared_bw) 5202 { 5203 enum ice_status status; 5204 u8 tc; 5205 5206 if (!pi) 5207 return ICE_ERR_PARAM; 5208 5209 ice_acquire_lock(&pi->sched_lock); 5210 status = ice_sched_validate_agg_id(pi, agg_id); 5211 if (status) 5212 goto exit_agg_bw_shared_lmt; 5213 5214 /* Return success if no nodes are present across TC */ 5215 ice_for_each_traffic_class(tc) { 5216 struct ice_sched_node *tc_node, *agg_node; 5217 5218 tc_node = ice_sched_get_tc_node(pi, tc); 5219 if (!tc_node) 5220 continue; 5221 5222 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 5223 if (!agg_node) 5224 continue; 5225 5226 status = ice_sched_set_agg_node_srl_per_tc(pi, agg_id, tc, 5227 min_bw, max_bw, 5228 shared_bw); 5229 if (status) 5230 break; 5231 } 5232 5233 exit_agg_bw_shared_lmt: 5234 ice_release_lock(&pi->sched_lock); 5235 return status; 5236 } 5237 5238 /** 5239 * ice_sched_set_agg_bw_shared_lmt_per_tc - set aggregator BW shared lmt per tc 5240 * @pi: port information structure 5241 * @agg_id: aggregator ID 5242 * @tc: traffic class 5243 * @min_bw: minimum bandwidth in Kbps 5244 * @max_bw: maximum bandwidth in Kbps 5245 * @shared_bw: shared bandwidth in Kbps 5246 * 5247 * This function configures the shared rate limiter(SRL) of aggregator type 5248 * node for a given traffic class for aggregator matching agg_id. When BW 5249 * value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node. 5250 */ 5251 enum ice_status 5252 ice_sched_set_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, 5253 u8 tc, u32 min_bw, u32 max_bw, 5254 u32 shared_bw) 5255 { 5256 enum ice_status status; 5257 5258 if (!pi) 5259 return ICE_ERR_PARAM; 5260 ice_acquire_lock(&pi->sched_lock); 5261 status = ice_sched_validate_agg_id(pi, agg_id); 5262 if (status) 5263 goto exit_agg_bw_shared_lmt_per_tc; 5264 5265 status = ice_sched_set_agg_node_srl_per_tc(pi, agg_id, tc, min_bw, 5266 max_bw, shared_bw); 5267 5268 exit_agg_bw_shared_lmt_per_tc: 5269 ice_release_lock(&pi->sched_lock); 5270 return status; 5271 } 5272 5273 /** 5274 * ice_sched_cfg_sibl_node_prio - configure node sibling priority 5275 * @pi: port information structure 5276 * @node: sched node to configure 5277 * @priority: sibling priority 5278 * 5279 * This function configures node element's sibling priority only. This 5280 * function needs to be called with scheduler lock held. 5281 */ 5282 enum ice_status 5283 ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi, 5284 struct ice_sched_node *node, u8 priority) 5285 { 5286 struct ice_aqc_txsched_elem_data buf; 5287 struct ice_aqc_txsched_elem *data; 5288 struct ice_hw *hw = pi->hw; 5289 enum ice_status status; 5290 5291 if (!hw) 5292 return ICE_ERR_PARAM; 5293 buf = node->info; 5294 data = &buf.data; 5295 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; 5296 priority = (priority << ICE_AQC_ELEM_GENERIC_PRIO_S) & 5297 ICE_AQC_ELEM_GENERIC_PRIO_M; 5298 data->generic &= ~ICE_AQC_ELEM_GENERIC_PRIO_M; 5299 data->generic |= priority; 5300 5301 /* Configure element */ 5302 status = ice_sched_update_elem(hw, node, &buf); 5303 return status; 5304 } 5305 5306 /** 5307 * ice_cfg_rl_burst_size - Set burst size value 5308 * @hw: pointer to the HW struct 5309 * @bytes: burst size in bytes 5310 * 5311 * This function configures/set the burst size to requested new value. The new 5312 * burst size value is used for future rate limit calls. It doesn't change the 5313 * existing or previously created RL profiles. 5314 */ 5315 enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) 5316 { 5317 u16 burst_size_to_prog; 5318 5319 if (bytes < ICE_MIN_BURST_SIZE_ALLOWED || 5320 bytes > ICE_MAX_BURST_SIZE_ALLOWED) 5321 return ICE_ERR_PARAM; 5322 if (ice_round_to_num(bytes, 64) <= 5323 ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) { 5324 /* 64 byte granularity case */ 5325 /* Disable MSB granularity bit */ 5326 burst_size_to_prog = ICE_64_BYTE_GRANULARITY; 5327 /* round number to nearest 64 byte granularity */ 5328 bytes = ice_round_to_num(bytes, 64); 5329 /* The value is in 64 byte chunks */ 5330 burst_size_to_prog |= (u16)(bytes / 64); 5331 } else { 5332 /* k bytes granularity case */ 5333 /* Enable MSB granularity bit */ 5334 burst_size_to_prog = ICE_KBYTE_GRANULARITY; 5335 /* round number to nearest 1024 granularity */ 5336 bytes = ice_round_to_num(bytes, 1024); 5337 /* check rounding doesn't go beyond allowed */ 5338 if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY) 5339 bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY; 5340 /* The value is in k bytes */ 5341 burst_size_to_prog |= (u16)(bytes / 1024); 5342 } 5343 hw->max_burst_size = burst_size_to_prog; 5344 return ICE_SUCCESS; 5345 } 5346 5347 /** 5348 * ice_sched_replay_node_prio - re-configure node priority 5349 * @hw: pointer to the HW struct 5350 * @node: sched node to configure 5351 * @priority: priority value 5352 * 5353 * This function configures node element's priority value. It 5354 * needs to be called with scheduler lock held. 5355 */ 5356 static enum ice_status 5357 ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, 5358 u8 priority) 5359 { 5360 struct ice_aqc_txsched_elem_data buf; 5361 struct ice_aqc_txsched_elem *data; 5362 enum ice_status status; 5363 5364 buf = node->info; 5365 data = &buf.data; 5366 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; 5367 data->generic = priority; 5368 5369 /* Configure element */ 5370 status = ice_sched_update_elem(hw, node, &buf); 5371 return status; 5372 } 5373 5374 /** 5375 * ice_sched_replay_node_bw - replay node(s) BW 5376 * @hw: pointer to the HW struct 5377 * @node: sched node to configure 5378 * @bw_t_info: BW type information 5379 * 5380 * This function restores node's BW from bw_t_info. The caller needs 5381 * to hold the scheduler lock. 5382 */ 5383 static enum ice_status 5384 ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node, 5385 struct ice_bw_type_info *bw_t_info) 5386 { 5387 struct ice_port_info *pi = hw->port_info; 5388 enum ice_status status = ICE_ERR_PARAM; 5389 u16 bw_alloc; 5390 5391 if (!node) 5392 return status; 5393 if (!ice_is_any_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT)) 5394 return ICE_SUCCESS; 5395 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_PRIO)) { 5396 status = ice_sched_replay_node_prio(hw, node, 5397 bw_t_info->generic); 5398 if (status) 5399 return status; 5400 } 5401 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR)) { 5402 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, 5403 bw_t_info->cir_bw.bw); 5404 if (status) 5405 return status; 5406 } 5407 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR_WT)) { 5408 bw_alloc = bw_t_info->cir_bw.bw_alloc; 5409 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW, 5410 bw_alloc); 5411 if (status) 5412 return status; 5413 } 5414 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR)) { 5415 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, 5416 bw_t_info->eir_bw.bw); 5417 if (status) 5418 return status; 5419 } 5420 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR_WT)) { 5421 bw_alloc = bw_t_info->eir_bw.bw_alloc; 5422 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW, 5423 bw_alloc); 5424 if (status) 5425 return status; 5426 } 5427 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_SHARED)) 5428 status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW, 5429 bw_t_info->shared_bw); 5430 return status; 5431 } 5432 5433 /** 5434 * ice_sched_replay_agg_bw - replay aggregator node(s) BW 5435 * @hw: pointer to the HW struct 5436 * @agg_info: aggregator data structure 5437 * 5438 * This function re-creates aggregator type nodes. The caller needs to hold 5439 * the scheduler lock. 5440 */ 5441 static enum ice_status 5442 ice_sched_replay_agg_bw(struct ice_hw *hw, struct ice_sched_agg_info *agg_info) 5443 { 5444 struct ice_sched_node *tc_node, *agg_node; 5445 enum ice_status status = ICE_SUCCESS; 5446 u8 tc; 5447 5448 if (!agg_info) 5449 return ICE_ERR_PARAM; 5450 ice_for_each_traffic_class(tc) { 5451 if (!ice_is_any_bit_set(agg_info->bw_t_info[tc].bw_t_bitmap, 5452 ICE_BW_TYPE_CNT)) 5453 continue; 5454 tc_node = ice_sched_get_tc_node(hw->port_info, tc); 5455 if (!tc_node) { 5456 status = ICE_ERR_PARAM; 5457 break; 5458 } 5459 agg_node = ice_sched_get_agg_node(hw->port_info, tc_node, 5460 agg_info->agg_id); 5461 if (!agg_node) { 5462 status = ICE_ERR_PARAM; 5463 break; 5464 } 5465 status = ice_sched_replay_node_bw(hw, agg_node, 5466 &agg_info->bw_t_info[tc]); 5467 if (status) 5468 break; 5469 } 5470 return status; 5471 } 5472 5473 /** 5474 * ice_sched_get_ena_tc_bitmap - get enabled TC bitmap 5475 * @pi: port info struct 5476 * @tc_bitmap: 8 bits TC bitmap to check 5477 * @ena_tc_bitmap: 8 bits enabled TC bitmap to return 5478 * 5479 * This function returns enabled TC bitmap in variable ena_tc_bitmap. Some TCs 5480 * may be missing, it returns enabled TCs. This function needs to be called with 5481 * scheduler lock held. 5482 */ 5483 static void 5484 ice_sched_get_ena_tc_bitmap(struct ice_port_info *pi, ice_bitmap_t *tc_bitmap, 5485 ice_bitmap_t *ena_tc_bitmap) 5486 { 5487 u8 tc; 5488 5489 /* Some TC(s) may be missing after reset, adjust for replay */ 5490 ice_for_each_traffic_class(tc) 5491 if (ice_is_tc_ena(*tc_bitmap, tc) && 5492 (ice_sched_get_tc_node(pi, tc))) 5493 ice_set_bit(tc, ena_tc_bitmap); 5494 } 5495 5496 /** 5497 * ice_sched_replay_agg - recreate aggregator node(s) 5498 * @hw: pointer to the HW struct 5499 * 5500 * This function recreate aggregator type nodes which are not replayed earlier. 5501 * It also replay aggregator BW information. These aggregator nodes are not 5502 * associated with VSI type node yet. 5503 */ 5504 void ice_sched_replay_agg(struct ice_hw *hw) 5505 { 5506 struct ice_port_info *pi = hw->port_info; 5507 struct ice_sched_agg_info *agg_info; 5508 5509 ice_acquire_lock(&pi->sched_lock); 5510 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 5511 list_entry) 5512 /* replay aggregator (re-create aggregator node) */ 5513 if (!ice_cmp_bitmap(agg_info->tc_bitmap, 5514 agg_info->replay_tc_bitmap, 5515 ICE_MAX_TRAFFIC_CLASS)) { 5516 ice_declare_bitmap(replay_bitmap, 5517 ICE_MAX_TRAFFIC_CLASS); 5518 enum ice_status status; 5519 5520 ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 5521 ice_sched_get_ena_tc_bitmap(pi, 5522 agg_info->replay_tc_bitmap, 5523 replay_bitmap); 5524 status = ice_sched_cfg_agg(hw->port_info, 5525 agg_info->agg_id, 5526 ICE_AGG_TYPE_AGG, 5527 replay_bitmap); 5528 if (status) { 5529 ice_info(hw, "Replay agg id[%d] failed\n", 5530 agg_info->agg_id); 5531 /* Move on to next one */ 5532 continue; 5533 } 5534 /* Replay aggregator node BW (restore aggregator BW) */ 5535 status = ice_sched_replay_agg_bw(hw, agg_info); 5536 if (status) 5537 ice_info(hw, "Replay agg bw [id=%d] failed\n", 5538 agg_info->agg_id); 5539 } 5540 ice_release_lock(&pi->sched_lock); 5541 } 5542 5543 /** 5544 * ice_sched_replay_agg_vsi_preinit - Agg/VSI replay pre initialization 5545 * @hw: pointer to the HW struct 5546 * 5547 * This function initialize aggregator(s) TC bitmap to zero. A required 5548 * preinit step for replaying aggregators. 5549 */ 5550 void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw) 5551 { 5552 struct ice_port_info *pi = hw->port_info; 5553 struct ice_sched_agg_info *agg_info; 5554 5555 ice_acquire_lock(&pi->sched_lock); 5556 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 5557 list_entry) { 5558 struct ice_sched_agg_vsi_info *agg_vsi_info; 5559 5560 agg_info->tc_bitmap[0] = 0; 5561 LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list, 5562 ice_sched_agg_vsi_info, list_entry) 5563 agg_vsi_info->tc_bitmap[0] = 0; 5564 } 5565 ice_release_lock(&pi->sched_lock); 5566 } 5567 5568 /** 5569 * ice_sched_replay_root_node_bw - replay root node BW 5570 * @pi: port information structure 5571 * 5572 * Replay root node BW settings. 5573 */ 5574 enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi) 5575 { 5576 enum ice_status status = ICE_SUCCESS; 5577 5578 if (!pi->hw) 5579 return ICE_ERR_PARAM; 5580 ice_acquire_lock(&pi->sched_lock); 5581 5582 status = ice_sched_replay_node_bw(pi->hw, pi->root, 5583 &pi->root_node_bw_t_info); 5584 ice_release_lock(&pi->sched_lock); 5585 return status; 5586 } 5587 5588 /** 5589 * ice_sched_replay_tc_node_bw - replay TC node(s) BW 5590 * @pi: port information structure 5591 * 5592 * This function replay TC nodes. 5593 */ 5594 enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi) 5595 { 5596 enum ice_status status = ICE_SUCCESS; 5597 u8 tc; 5598 5599 if (!pi->hw) 5600 return ICE_ERR_PARAM; 5601 ice_acquire_lock(&pi->sched_lock); 5602 ice_for_each_traffic_class(tc) { 5603 struct ice_sched_node *tc_node; 5604 5605 tc_node = ice_sched_get_tc_node(pi, tc); 5606 if (!tc_node) 5607 continue; /* TC not present */ 5608 status = ice_sched_replay_node_bw(pi->hw, tc_node, 5609 &pi->tc_node_bw_t_info[tc]); 5610 if (status) 5611 break; 5612 } 5613 ice_release_lock(&pi->sched_lock); 5614 return status; 5615 } 5616 5617 /** 5618 * ice_sched_replay_vsi_bw - replay VSI type node(s) BW 5619 * @hw: pointer to the HW struct 5620 * @vsi_handle: software VSI handle 5621 * @tc_bitmap: 8 bits TC bitmap 5622 * 5623 * This function replays VSI type nodes bandwidth. This function needs to be 5624 * called with scheduler lock held. 5625 */ 5626 static enum ice_status 5627 ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle, 5628 ice_bitmap_t *tc_bitmap) 5629 { 5630 struct ice_sched_node *vsi_node, *tc_node; 5631 struct ice_port_info *pi = hw->port_info; 5632 struct ice_bw_type_info *bw_t_info; 5633 struct ice_vsi_ctx *vsi_ctx; 5634 enum ice_status status = ICE_SUCCESS; 5635 u8 tc; 5636 5637 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 5638 if (!vsi_ctx) 5639 return ICE_ERR_PARAM; 5640 ice_for_each_traffic_class(tc) { 5641 if (!ice_is_tc_ena(*tc_bitmap, tc)) 5642 continue; 5643 tc_node = ice_sched_get_tc_node(pi, tc); 5644 if (!tc_node) 5645 continue; 5646 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 5647 if (!vsi_node) 5648 continue; 5649 bw_t_info = &vsi_ctx->sched.bw_t_info[tc]; 5650 status = ice_sched_replay_node_bw(hw, vsi_node, bw_t_info); 5651 if (status) 5652 break; 5653 } 5654 return status; 5655 } 5656 5657 /** 5658 * ice_sched_replay_vsi_agg - replay aggregator & VSI to aggregator node(s) 5659 * @hw: pointer to the HW struct 5660 * @vsi_handle: software VSI handle 5661 * 5662 * This function replays aggregator node, VSI to aggregator type nodes, and 5663 * their node bandwidth information. This function needs to be called with 5664 * scheduler lock held. 5665 */ 5666 static enum ice_status 5667 ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) 5668 { 5669 ice_declare_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 5670 struct ice_sched_agg_vsi_info *agg_vsi_info; 5671 struct ice_port_info *pi = hw->port_info; 5672 struct ice_sched_agg_info *agg_info; 5673 enum ice_status status; 5674 5675 ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 5676 if (!ice_is_vsi_valid(hw, vsi_handle)) 5677 return ICE_ERR_PARAM; 5678 agg_info = ice_get_vsi_agg_info(hw, vsi_handle); 5679 if (!agg_info) 5680 return ICE_SUCCESS; /* Not present in list - default Agg case */ 5681 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); 5682 if (!agg_vsi_info) 5683 return ICE_SUCCESS; /* Not present in list - default Agg case */ 5684 ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap, 5685 replay_bitmap); 5686 /* Replay aggregator node associated to vsi_handle */ 5687 status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id, 5688 ICE_AGG_TYPE_AGG, replay_bitmap); 5689 if (status) 5690 return status; 5691 /* Replay aggregator node BW (restore aggregator BW) */ 5692 status = ice_sched_replay_agg_bw(hw, agg_info); 5693 if (status) 5694 return status; 5695 5696 ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 5697 ice_sched_get_ena_tc_bitmap(pi, agg_vsi_info->replay_tc_bitmap, 5698 replay_bitmap); 5699 /* Move this VSI (vsi_handle) to above aggregator */ 5700 status = ice_sched_assoc_vsi_to_agg(pi, agg_info->agg_id, vsi_handle, 5701 replay_bitmap); 5702 if (status) 5703 return status; 5704 /* Replay VSI BW (restore VSI BW) */ 5705 return ice_sched_replay_vsi_bw(hw, vsi_handle, 5706 agg_vsi_info->tc_bitmap); 5707 } 5708 5709 /** 5710 * ice_replay_vsi_agg - replay VSI to aggregator node 5711 * @hw: pointer to the HW struct 5712 * @vsi_handle: software VSI handle 5713 * 5714 * This function replays association of VSI to aggregator type nodes, and 5715 * node bandwidth information. 5716 */ 5717 enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) 5718 { 5719 struct ice_port_info *pi = hw->port_info; 5720 enum ice_status status; 5721 5722 ice_acquire_lock(&pi->sched_lock); 5723 status = ice_sched_replay_vsi_agg(hw, vsi_handle); 5724 ice_release_lock(&pi->sched_lock); 5725 return status; 5726 } 5727 5728 /** 5729 * ice_sched_replay_q_bw - replay queue type node BW 5730 * @pi: port information structure 5731 * @q_ctx: queue context structure 5732 * 5733 * This function replays queue type node bandwidth. This function needs to be 5734 * called with scheduler lock held. 5735 */ 5736 enum ice_status 5737 ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx) 5738 { 5739 struct ice_sched_node *q_node; 5740 5741 /* Following also checks the presence of node in tree */ 5742 q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); 5743 if (!q_node) 5744 return ICE_ERR_PARAM; 5745 return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info); 5746 } 5747