1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2023, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "ice_sched.h" 33 34 /** 35 * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB 36 * @pi: port information structure 37 * @info: Scheduler element information from firmware 38 * 39 * This function inserts the root node of the scheduling tree topology 40 * to the SW DB. 41 */ 42 static enum ice_status 43 ice_sched_add_root_node(struct ice_port_info *pi, 44 struct ice_aqc_txsched_elem_data *info) 45 { 46 struct ice_sched_node *root; 47 struct ice_hw *hw; 48 49 if (!pi) 50 return ICE_ERR_PARAM; 51 52 hw = pi->hw; 53 54 root = (struct ice_sched_node *)ice_malloc(hw, sizeof(*root)); 55 if (!root) 56 return ICE_ERR_NO_MEMORY; 57 58 /* coverity[suspicious_sizeof] */ 59 root->children = (struct ice_sched_node **) 60 ice_calloc(hw, hw->max_children[0], sizeof(*root)); 61 if (!root->children) { 62 ice_free(hw, root); 63 return ICE_ERR_NO_MEMORY; 64 } 65 66 ice_memcpy(&root->info, info, sizeof(*info), ICE_DMA_TO_NONDMA); 67 pi->root = root; 68 return ICE_SUCCESS; 69 } 70 71 /** 72 * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB 73 * @start_node: pointer to the starting ice_sched_node struct in a sub-tree 74 * @teid: node TEID to search 75 * 76 * This function searches for a node matching the TEID in the scheduling tree 77 * from the SW DB. The search is recursive and is restricted by the number of 78 * layers it has searched through; stopping at the max supported layer. 79 * 80 * This function needs to be called when holding the port_info->sched_lock 81 */ 82 struct ice_sched_node * 83 ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) 84 { 85 u16 i; 86 87 /* The TEID is same as that of the start_node */ 88 if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid) 89 return start_node; 90 91 /* The node has no children or is at the max layer */ 92 if (!start_node->num_children || 93 start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM || 94 start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) 95 return NULL; 96 97 /* Check if TEID matches to any of the children nodes */ 98 for (i = 0; i < start_node->num_children; i++) 99 if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid) 100 return start_node->children[i]; 101 102 /* Search within each child's sub-tree */ 103 for (i = 0; i < start_node->num_children; i++) { 104 struct ice_sched_node *tmp; 105 106 tmp = ice_sched_find_node_by_teid(start_node->children[i], 107 teid); 108 if (tmp) 109 return tmp; 110 } 111 112 return NULL; 113 } 114 115 /** 116 * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd 117 * @hw: pointer to the HW struct 118 * @cmd_opc: cmd opcode 119 * @elems_req: number of elements to request 120 * @buf: pointer to buffer 121 * @buf_size: buffer size in bytes 122 * @elems_resp: returns total number of elements response 123 * @cd: pointer to command details structure or NULL 124 * 125 * This function sends a scheduling elements cmd (cmd_opc) 126 */ 127 static enum ice_status 128 ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, 129 u16 elems_req, void *buf, u16 buf_size, 130 u16 *elems_resp, struct ice_sq_cd *cd) 131 { 132 struct ice_aqc_sched_elem_cmd *cmd; 133 struct ice_aq_desc desc; 134 enum ice_status status; 135 136 cmd = &desc.params.sched_elem_cmd; 137 ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc); 138 cmd->num_elem_req = CPU_TO_LE16(elems_req); 139 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 140 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 141 if (!status && elems_resp) 142 *elems_resp = LE16_TO_CPU(cmd->num_elem_resp); 143 144 return status; 145 } 146 147 /** 148 * ice_aq_query_sched_elems - query scheduler elements 149 * @hw: pointer to the HW struct 150 * @elems_req: number of elements to query 151 * @buf: pointer to buffer 152 * @buf_size: buffer size in bytes 153 * @elems_ret: returns total number of elements returned 154 * @cd: pointer to command details structure or NULL 155 * 156 * Query scheduling elements (0x0404) 157 */ 158 enum ice_status 159 ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, 160 struct ice_aqc_txsched_elem_data *buf, u16 buf_size, 161 u16 *elems_ret, struct ice_sq_cd *cd) 162 { 163 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems, 164 elems_req, (void *)buf, buf_size, 165 elems_ret, cd); 166 } 167 168 /** 169 * ice_sched_add_node - Insert the Tx scheduler node in SW DB 170 * @pi: port information structure 171 * @layer: Scheduler layer of the node 172 * @info: Scheduler element information from firmware 173 * @prealloc_node: preallocated ice_sched_node struct for SW DB 174 * 175 * This function inserts a scheduler node to the SW DB. 176 */ 177 enum ice_status 178 ice_sched_add_node(struct ice_port_info *pi, u8 layer, 179 struct ice_aqc_txsched_elem_data *info, 180 struct ice_sched_node *prealloc_node) 181 { 182 struct ice_aqc_txsched_elem_data elem; 183 struct ice_sched_node *parent; 184 struct ice_sched_node *node; 185 enum ice_status status; 186 struct ice_hw *hw; 187 188 if (!pi) 189 return ICE_ERR_PARAM; 190 191 hw = pi->hw; 192 193 /* A valid parent node should be there */ 194 parent = ice_sched_find_node_by_teid(pi->root, 195 LE32_TO_CPU(info->parent_teid)); 196 if (!parent) { 197 ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n", 198 LE32_TO_CPU(info->parent_teid)); 199 return ICE_ERR_PARAM; 200 } 201 202 /* query the current node information from FW before adding it 203 * to the SW DB 204 */ 205 status = ice_sched_query_elem(hw, LE32_TO_CPU(info->node_teid), &elem); 206 if (status) 207 return status; 208 209 if (prealloc_node) 210 node = prealloc_node; 211 else 212 node = (struct ice_sched_node *)ice_malloc(hw, sizeof(*node)); 213 if (!node) 214 return ICE_ERR_NO_MEMORY; 215 if (hw->max_children[layer]) { 216 /* coverity[suspicious_sizeof] */ 217 node->children = (struct ice_sched_node **) 218 ice_calloc(hw, hw->max_children[layer], sizeof(*node)); 219 if (!node->children) { 220 ice_free(hw, node); 221 return ICE_ERR_NO_MEMORY; 222 } 223 } 224 225 node->in_use = true; 226 node->parent = parent; 227 node->tx_sched_layer = layer; 228 parent->children[parent->num_children++] = node; 229 node->info = elem; 230 return ICE_SUCCESS; 231 } 232 233 /** 234 * ice_aq_delete_sched_elems - delete scheduler elements 235 * @hw: pointer to the HW struct 236 * @grps_req: number of groups to delete 237 * @buf: pointer to buffer 238 * @buf_size: buffer size in bytes 239 * @grps_del: returns total number of elements deleted 240 * @cd: pointer to command details structure or NULL 241 * 242 * Delete scheduling elements (0x040F) 243 */ 244 static enum ice_status 245 ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, 246 struct ice_aqc_delete_elem *buf, u16 buf_size, 247 u16 *grps_del, struct ice_sq_cd *cd) 248 { 249 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems, 250 grps_req, (void *)buf, buf_size, 251 grps_del, cd); 252 } 253 254 /** 255 * ice_sched_remove_elems - remove nodes from HW 256 * @hw: pointer to the HW struct 257 * @parent: pointer to the parent node 258 * @num_nodes: number of nodes 259 * @node_teids: array of node teids to be deleted 260 * 261 * This function remove nodes from HW 262 */ 263 static enum ice_status 264 ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, 265 u16 num_nodes, u32 *node_teids) 266 { 267 struct ice_aqc_delete_elem *buf; 268 u16 i, num_groups_removed = 0; 269 enum ice_status status; 270 u16 buf_size; 271 272 buf_size = ice_struct_size(buf, teid, num_nodes); 273 buf = (struct ice_aqc_delete_elem *)ice_malloc(hw, buf_size); 274 if (!buf) 275 return ICE_ERR_NO_MEMORY; 276 277 buf->hdr.parent_teid = parent->info.node_teid; 278 buf->hdr.num_elems = CPU_TO_LE16(num_nodes); 279 for (i = 0; i < num_nodes; i++) 280 buf->teid[i] = CPU_TO_LE32(node_teids[i]); 281 282 status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size, 283 &num_groups_removed, NULL); 284 if (status != ICE_SUCCESS || num_groups_removed != 1) 285 ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n", 286 hw->adminq.sq_last_status); 287 288 ice_free(hw, buf); 289 return status; 290 } 291 292 /** 293 * ice_sched_get_first_node - get the first node of the given layer 294 * @pi: port information structure 295 * @parent: pointer the base node of the subtree 296 * @layer: layer number 297 * 298 * This function retrieves the first node of the given layer from the subtree 299 */ 300 static struct ice_sched_node * 301 ice_sched_get_first_node(struct ice_port_info *pi, 302 struct ice_sched_node *parent, u8 layer) 303 { 304 return pi->sib_head[parent->tc_num][layer]; 305 } 306 307 /** 308 * ice_sched_get_tc_node - get pointer to TC node 309 * @pi: port information structure 310 * @tc: TC number 311 * 312 * This function returns the TC node pointer 313 */ 314 struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc) 315 { 316 u8 i; 317 318 if (!pi || !pi->root) 319 return NULL; 320 for (i = 0; i < pi->root->num_children; i++) 321 if (pi->root->children[i]->tc_num == tc) 322 return pi->root->children[i]; 323 return NULL; 324 } 325 326 /** 327 * ice_free_sched_node - Free a Tx scheduler node from SW DB 328 * @pi: port information structure 329 * @node: pointer to the ice_sched_node struct 330 * 331 * This function frees up a node from SW DB as well as from HW 332 * 333 * This function needs to be called with the port_info->sched_lock held 334 */ 335 void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) 336 { 337 struct ice_sched_node *parent; 338 struct ice_hw *hw = pi->hw; 339 u8 i, j; 340 341 /* Free the children before freeing up the parent node 342 * The parent array is updated below and that shifts the nodes 343 * in the array. So always pick the first child if num children > 0 344 */ 345 while (node->num_children) 346 ice_free_sched_node(pi, node->children[0]); 347 348 /* Leaf, TC and root nodes can't be deleted by SW */ 349 if (node->tx_sched_layer >= hw->sw_entry_point_layer && 350 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && 351 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT && 352 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) { 353 u32 teid = LE32_TO_CPU(node->info.node_teid); 354 355 ice_sched_remove_elems(hw, node->parent, 1, &teid); 356 } 357 parent = node->parent; 358 /* root has no parent */ 359 if (parent) { 360 struct ice_sched_node *p; 361 362 /* update the parent */ 363 for (i = 0; i < parent->num_children; i++) 364 if (parent->children[i] == node) { 365 for (j = i + 1; j < parent->num_children; j++) 366 parent->children[j - 1] = 367 parent->children[j]; 368 parent->num_children--; 369 break; 370 } 371 372 p = ice_sched_get_first_node(pi, node, node->tx_sched_layer); 373 while (p) { 374 if (p->sibling == node) { 375 p->sibling = node->sibling; 376 break; 377 } 378 p = p->sibling; 379 } 380 381 /* update the sibling head if head is getting removed */ 382 if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node) 383 pi->sib_head[node->tc_num][node->tx_sched_layer] = 384 node->sibling; 385 } 386 387 /* leaf nodes have no children */ 388 if (node->children) 389 ice_free(hw, node->children); 390 ice_free(hw, node); 391 } 392 393 /** 394 * ice_aq_get_dflt_topo - gets default scheduler topology 395 * @hw: pointer to the HW struct 396 * @lport: logical port number 397 * @buf: pointer to buffer 398 * @buf_size: buffer size in bytes 399 * @num_branches: returns total number of queue to port branches 400 * @cd: pointer to command details structure or NULL 401 * 402 * Get default scheduler topology (0x400) 403 */ 404 static enum ice_status 405 ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, 406 struct ice_aqc_get_topo_elem *buf, u16 buf_size, 407 u8 *num_branches, struct ice_sq_cd *cd) 408 { 409 struct ice_aqc_get_topo *cmd; 410 struct ice_aq_desc desc; 411 enum ice_status status; 412 413 cmd = &desc.params.get_topo; 414 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo); 415 cmd->port_num = lport; 416 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 417 if (!status && num_branches) 418 *num_branches = cmd->num_branches; 419 420 return status; 421 } 422 423 /** 424 * ice_aq_add_sched_elems - adds scheduling element 425 * @hw: pointer to the HW struct 426 * @grps_req: the number of groups that are requested to be added 427 * @buf: pointer to buffer 428 * @buf_size: buffer size in bytes 429 * @grps_added: returns total number of groups added 430 * @cd: pointer to command details structure or NULL 431 * 432 * Add scheduling elements (0x0401) 433 */ 434 static enum ice_status 435 ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, 436 struct ice_aqc_add_elem *buf, u16 buf_size, 437 u16 *grps_added, struct ice_sq_cd *cd) 438 { 439 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems, 440 grps_req, (void *)buf, buf_size, 441 grps_added, cd); 442 } 443 444 /** 445 * ice_aq_cfg_sched_elems - configures scheduler elements 446 * @hw: pointer to the HW struct 447 * @elems_req: number of elements to configure 448 * @buf: pointer to buffer 449 * @buf_size: buffer size in bytes 450 * @elems_cfgd: returns total number of elements configured 451 * @cd: pointer to command details structure or NULL 452 * 453 * Configure scheduling elements (0x0403) 454 */ 455 static enum ice_status 456 ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, 457 struct ice_aqc_txsched_elem_data *buf, u16 buf_size, 458 u16 *elems_cfgd, struct ice_sq_cd *cd) 459 { 460 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems, 461 elems_req, (void *)buf, buf_size, 462 elems_cfgd, cd); 463 } 464 465 /** 466 * ice_aq_move_sched_elems - move scheduler elements 467 * @hw: pointer to the HW struct 468 * @grps_req: number of groups to move 469 * @buf: pointer to buffer 470 * @buf_size: buffer size in bytes 471 * @grps_movd: returns total number of groups moved 472 * @cd: pointer to command details structure or NULL 473 * 474 * Move scheduling elements (0x0408) 475 */ 476 enum ice_status 477 ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, 478 struct ice_aqc_move_elem *buf, u16 buf_size, 479 u16 *grps_movd, struct ice_sq_cd *cd) 480 { 481 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems, 482 grps_req, (void *)buf, buf_size, 483 grps_movd, cd); 484 } 485 486 /** 487 * ice_aq_suspend_sched_elems - suspend scheduler elements 488 * @hw: pointer to the HW struct 489 * @elems_req: number of elements to suspend 490 * @buf: pointer to buffer 491 * @buf_size: buffer size in bytes 492 * @elems_ret: returns total number of elements suspended 493 * @cd: pointer to command details structure or NULL 494 * 495 * Suspend scheduling elements (0x0409) 496 */ 497 static enum ice_status 498 ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, 499 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) 500 { 501 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems, 502 elems_req, (void *)buf, buf_size, 503 elems_ret, cd); 504 } 505 506 /** 507 * ice_aq_resume_sched_elems - resume scheduler elements 508 * @hw: pointer to the HW struct 509 * @elems_req: number of elements to resume 510 * @buf: pointer to buffer 511 * @buf_size: buffer size in bytes 512 * @elems_ret: returns total number of elements resumed 513 * @cd: pointer to command details structure or NULL 514 * 515 * resume scheduling elements (0x040A) 516 */ 517 static enum ice_status 518 ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, 519 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) 520 { 521 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems, 522 elems_req, (void *)buf, buf_size, 523 elems_ret, cd); 524 } 525 526 /** 527 * ice_aq_query_sched_res - query scheduler resource 528 * @hw: pointer to the HW struct 529 * @buf_size: buffer size in bytes 530 * @buf: pointer to buffer 531 * @cd: pointer to command details structure or NULL 532 * 533 * Query scheduler resource allocation (0x0412) 534 */ 535 static enum ice_status 536 ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, 537 struct ice_aqc_query_txsched_res_resp *buf, 538 struct ice_sq_cd *cd) 539 { 540 struct ice_aq_desc desc; 541 542 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res); 543 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 544 } 545 546 /** 547 * ice_sched_suspend_resume_elems - suspend or resume HW nodes 548 * @hw: pointer to the HW struct 549 * @num_nodes: number of nodes 550 * @node_teids: array of node teids to be suspended or resumed 551 * @suspend: true means suspend / false means resume 552 * 553 * This function suspends or resumes HW nodes 554 */ 555 static enum ice_status 556 ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, 557 bool suspend) 558 { 559 u16 i, buf_size, num_elem_ret = 0; 560 enum ice_status status; 561 __le32 *buf; 562 563 buf_size = sizeof(*buf) * num_nodes; 564 buf = (__le32 *)ice_malloc(hw, buf_size); 565 if (!buf) 566 return ICE_ERR_NO_MEMORY; 567 568 for (i = 0; i < num_nodes; i++) 569 buf[i] = CPU_TO_LE32(node_teids[i]); 570 571 if (suspend) 572 status = ice_aq_suspend_sched_elems(hw, num_nodes, buf, 573 buf_size, &num_elem_ret, 574 NULL); 575 else 576 status = ice_aq_resume_sched_elems(hw, num_nodes, buf, 577 buf_size, &num_elem_ret, 578 NULL); 579 if (status != ICE_SUCCESS || num_elem_ret != num_nodes) 580 ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n"); 581 582 ice_free(hw, buf); 583 return status; 584 } 585 586 /** 587 * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC 588 * @hw: pointer to the HW struct 589 * @vsi_handle: VSI handle 590 * @tc: TC number 591 * @new_numqs: number of queues 592 */ 593 static enum ice_status 594 ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) 595 { 596 struct ice_vsi_ctx *vsi_ctx; 597 struct ice_q_ctx *q_ctx; 598 599 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 600 if (!vsi_ctx) 601 return ICE_ERR_PARAM; 602 /* allocate LAN queue contexts */ 603 if (!vsi_ctx->lan_q_ctx[tc]) { 604 vsi_ctx->lan_q_ctx[tc] = (struct ice_q_ctx *) 605 ice_calloc(hw, new_numqs, sizeof(*q_ctx)); 606 if (!vsi_ctx->lan_q_ctx[tc]) 607 return ICE_ERR_NO_MEMORY; 608 vsi_ctx->num_lan_q_entries[tc] = new_numqs; 609 return ICE_SUCCESS; 610 } 611 /* num queues are increased, update the queue contexts */ 612 if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) { 613 u16 prev_num = vsi_ctx->num_lan_q_entries[tc]; 614 615 q_ctx = (struct ice_q_ctx *) 616 ice_calloc(hw, new_numqs, sizeof(*q_ctx)); 617 if (!q_ctx) 618 return ICE_ERR_NO_MEMORY; 619 ice_memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], 620 prev_num * sizeof(*q_ctx), ICE_DMA_TO_NONDMA); 621 ice_free(hw, vsi_ctx->lan_q_ctx[tc]); 622 vsi_ctx->lan_q_ctx[tc] = q_ctx; 623 vsi_ctx->num_lan_q_entries[tc] = new_numqs; 624 } 625 return ICE_SUCCESS; 626 } 627 628 /** 629 * ice_alloc_rdma_q_ctx - allocate RDMA queue contexts for the given VSI and TC 630 * @hw: pointer to the HW struct 631 * @vsi_handle: VSI handle 632 * @tc: TC number 633 * @new_numqs: number of queues 634 */ 635 static enum ice_status 636 ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) 637 { 638 struct ice_vsi_ctx *vsi_ctx; 639 struct ice_q_ctx *q_ctx; 640 641 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 642 if (!vsi_ctx) 643 return ICE_ERR_PARAM; 644 /* allocate RDMA queue contexts */ 645 if (!vsi_ctx->rdma_q_ctx[tc]) { 646 vsi_ctx->rdma_q_ctx[tc] = (struct ice_q_ctx *) 647 ice_calloc(hw, new_numqs, sizeof(*q_ctx)); 648 if (!vsi_ctx->rdma_q_ctx[tc]) 649 return ICE_ERR_NO_MEMORY; 650 vsi_ctx->num_rdma_q_entries[tc] = new_numqs; 651 return ICE_SUCCESS; 652 } 653 /* num queues are increased, update the queue contexts */ 654 if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) { 655 u16 prev_num = vsi_ctx->num_rdma_q_entries[tc]; 656 657 q_ctx = (struct ice_q_ctx *) 658 ice_calloc(hw, new_numqs, sizeof(*q_ctx)); 659 if (!q_ctx) 660 return ICE_ERR_NO_MEMORY; 661 ice_memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc], 662 prev_num * sizeof(*q_ctx), ICE_DMA_TO_NONDMA); 663 ice_free(hw, vsi_ctx->rdma_q_ctx[tc]); 664 vsi_ctx->rdma_q_ctx[tc] = q_ctx; 665 vsi_ctx->num_rdma_q_entries[tc] = new_numqs; 666 } 667 return ICE_SUCCESS; 668 } 669 670 /** 671 * ice_aq_rl_profile - performs a rate limiting task 672 * @hw: pointer to the HW struct 673 * @opcode: opcode for add, query, or remove profile(s) 674 * @num_profiles: the number of profiles 675 * @buf: pointer to buffer 676 * @buf_size: buffer size in bytes 677 * @num_processed: number of processed add or remove profile(s) to return 678 * @cd: pointer to command details structure 679 * 680 * RL profile function to add, query, or remove profile(s) 681 */ 682 static enum ice_status 683 ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, 684 u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, 685 u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd) 686 { 687 struct ice_aqc_rl_profile *cmd; 688 struct ice_aq_desc desc; 689 enum ice_status status; 690 691 cmd = &desc.params.rl_profile; 692 693 ice_fill_dflt_direct_cmd_desc(&desc, opcode); 694 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 695 cmd->num_profiles = CPU_TO_LE16(num_profiles); 696 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 697 if (!status && num_processed) 698 *num_processed = LE16_TO_CPU(cmd->num_processed); 699 return status; 700 } 701 702 /** 703 * ice_aq_add_rl_profile - adds rate limiting profile(s) 704 * @hw: pointer to the HW struct 705 * @num_profiles: the number of profile(s) to be add 706 * @buf: pointer to buffer 707 * @buf_size: buffer size in bytes 708 * @num_profiles_added: total number of profiles added to return 709 * @cd: pointer to command details structure 710 * 711 * Add RL profile (0x0410) 712 */ 713 static enum ice_status 714 ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, 715 struct ice_aqc_rl_profile_elem *buf, u16 buf_size, 716 u16 *num_profiles_added, struct ice_sq_cd *cd) 717 { 718 return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles, 719 buf, buf_size, num_profiles_added, cd); 720 } 721 722 /** 723 * ice_aq_query_rl_profile - query rate limiting profile(s) 724 * @hw: pointer to the HW struct 725 * @num_profiles: the number of profile(s) to query 726 * @buf: pointer to buffer 727 * @buf_size: buffer size in bytes 728 * @cd: pointer to command details structure 729 * 730 * Query RL profile (0x0411) 731 */ 732 enum ice_status 733 ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles, 734 struct ice_aqc_rl_profile_elem *buf, u16 buf_size, 735 struct ice_sq_cd *cd) 736 { 737 return ice_aq_rl_profile(hw, ice_aqc_opc_query_rl_profiles, 738 num_profiles, buf, buf_size, NULL, cd); 739 } 740 741 /** 742 * ice_aq_remove_rl_profile - removes RL profile(s) 743 * @hw: pointer to the HW struct 744 * @num_profiles: the number of profile(s) to remove 745 * @buf: pointer to buffer 746 * @buf_size: buffer size in bytes 747 * @num_profiles_removed: total number of profiles removed to return 748 * @cd: pointer to command details structure or NULL 749 * 750 * Remove RL profile (0x0415) 751 */ 752 static enum ice_status 753 ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, 754 struct ice_aqc_rl_profile_elem *buf, u16 buf_size, 755 u16 *num_profiles_removed, struct ice_sq_cd *cd) 756 { 757 return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles, 758 num_profiles, buf, buf_size, 759 num_profiles_removed, cd); 760 } 761 762 /** 763 * ice_sched_del_rl_profile - remove RL profile 764 * @hw: pointer to the HW struct 765 * @rl_info: rate limit profile information 766 * 767 * If the profile ID is not referenced anymore, it removes profile ID with 768 * its associated parameters from HW DB,and locally. The caller needs to 769 * hold scheduler lock. 770 */ 771 static enum ice_status 772 ice_sched_del_rl_profile(struct ice_hw *hw, 773 struct ice_aqc_rl_profile_info *rl_info) 774 { 775 struct ice_aqc_rl_profile_elem *buf; 776 u16 num_profiles_removed; 777 enum ice_status status; 778 u16 num_profiles = 1; 779 780 if (rl_info->prof_id_ref != 0) 781 return ICE_ERR_IN_USE; 782 783 /* Safe to remove profile ID */ 784 buf = &rl_info->profile; 785 status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf), 786 &num_profiles_removed, NULL); 787 if (status || num_profiles_removed != num_profiles) 788 return ICE_ERR_CFG; 789 790 /* Delete stale entry now */ 791 LIST_DEL(&rl_info->list_entry); 792 ice_free(hw, rl_info); 793 return status; 794 } 795 796 /** 797 * ice_sched_clear_rl_prof - clears RL prof entries 798 * @pi: port information structure 799 * 800 * This function removes all RL profile from HW as well as from SW DB. 801 */ 802 static void ice_sched_clear_rl_prof(struct ice_port_info *pi) 803 { 804 u16 ln; 805 struct ice_hw *hw = pi->hw; 806 807 for (ln = 0; ln < hw->num_tx_sched_layers; ln++) { 808 struct ice_aqc_rl_profile_info *rl_prof_elem; 809 struct ice_aqc_rl_profile_info *rl_prof_tmp; 810 811 LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp, 812 &hw->rl_prof_list[ln], 813 ice_aqc_rl_profile_info, list_entry) { 814 enum ice_status status; 815 816 rl_prof_elem->prof_id_ref = 0; 817 status = ice_sched_del_rl_profile(hw, rl_prof_elem); 818 if (status) { 819 ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); 820 /* On error, free mem required */ 821 LIST_DEL(&rl_prof_elem->list_entry); 822 ice_free(hw, rl_prof_elem); 823 } 824 } 825 } 826 } 827 828 /** 829 * ice_sched_clear_agg - clears the aggregator related information 830 * @hw: pointer to the hardware structure 831 * 832 * This function removes aggregator list and free up aggregator related memory 833 * previously allocated. 834 */ 835 void ice_sched_clear_agg(struct ice_hw *hw) 836 { 837 struct ice_sched_agg_info *agg_info; 838 struct ice_sched_agg_info *atmp; 839 840 LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &hw->agg_list, 841 ice_sched_agg_info, 842 list_entry) { 843 struct ice_sched_agg_vsi_info *agg_vsi_info; 844 struct ice_sched_agg_vsi_info *vtmp; 845 846 LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp, 847 &agg_info->agg_vsi_list, 848 ice_sched_agg_vsi_info, list_entry) { 849 LIST_DEL(&agg_vsi_info->list_entry); 850 ice_free(hw, agg_vsi_info); 851 } 852 LIST_DEL(&agg_info->list_entry); 853 ice_free(hw, agg_info); 854 } 855 } 856 857 /** 858 * ice_sched_clear_tx_topo - clears the scheduler tree nodes 859 * @pi: port information structure 860 * 861 * This function removes all the nodes from HW as well as from SW DB. 862 */ 863 static void ice_sched_clear_tx_topo(struct ice_port_info *pi) 864 { 865 if (!pi) 866 return; 867 /* remove RL profiles related lists */ 868 ice_sched_clear_rl_prof(pi); 869 if (pi->root) { 870 ice_free_sched_node(pi, pi->root); 871 pi->root = NULL; 872 } 873 } 874 875 /** 876 * ice_sched_clear_port - clear the scheduler elements from SW DB for a port 877 * @pi: port information structure 878 * 879 * Cleanup scheduling elements from SW DB 880 */ 881 void ice_sched_clear_port(struct ice_port_info *pi) 882 { 883 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 884 return; 885 886 pi->port_state = ICE_SCHED_PORT_STATE_INIT; 887 ice_acquire_lock(&pi->sched_lock); 888 ice_sched_clear_tx_topo(pi); 889 ice_release_lock(&pi->sched_lock); 890 ice_destroy_lock(&pi->sched_lock); 891 } 892 893 /** 894 * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports 895 * @hw: pointer to the HW struct 896 * 897 * Cleanup scheduling elements from SW DB for all the ports 898 */ 899 void ice_sched_cleanup_all(struct ice_hw *hw) 900 { 901 if (!hw) 902 return; 903 904 if (hw->layer_info) { 905 ice_free(hw, hw->layer_info); 906 hw->layer_info = NULL; 907 } 908 909 ice_sched_clear_port(hw->port_info); 910 911 hw->num_tx_sched_layers = 0; 912 hw->num_tx_sched_phys_layers = 0; 913 hw->flattened_layers = 0; 914 hw->max_cgds = 0; 915 } 916 917 /** 918 * ice_aq_cfg_node_attr - configure nodes' per-cone flattening attributes 919 * @hw: pointer to the HW struct 920 * @num_nodes: the number of nodes whose attributes to configure 921 * @buf: pointer to buffer 922 * @buf_size: buffer size in bytes 923 * @cd: pointer to command details structure or NULL 924 * 925 * Configure Node Attributes (0x0417) 926 */ 927 enum ice_status 928 ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes, 929 struct ice_aqc_node_attr_elem *buf, u16 buf_size, 930 struct ice_sq_cd *cd) 931 { 932 struct ice_aqc_node_attr *cmd; 933 struct ice_aq_desc desc; 934 935 cmd = &desc.params.node_attr; 936 ice_fill_dflt_direct_cmd_desc(&desc, 937 ice_aqc_opc_cfg_node_attr); 938 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 939 940 cmd->num_entries = CPU_TO_LE16(num_nodes); 941 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 942 } 943 944 /** 945 * ice_aq_cfg_l2_node_cgd - configures L2 node to CGD mapping 946 * @hw: pointer to the HW struct 947 * @num_l2_nodes: the number of L2 nodes whose CGDs to configure 948 * @buf: pointer to buffer 949 * @buf_size: buffer size in bytes 950 * @cd: pointer to command details structure or NULL 951 * 952 * Configure L2 Node CGD (0x0414) 953 */ 954 enum ice_status 955 ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes, 956 struct ice_aqc_cfg_l2_node_cgd_elem *buf, 957 u16 buf_size, struct ice_sq_cd *cd) 958 { 959 struct ice_aqc_cfg_l2_node_cgd *cmd; 960 struct ice_aq_desc desc; 961 962 cmd = &desc.params.cfg_l2_node_cgd; 963 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_l2_node_cgd); 964 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 965 966 cmd->num_l2_nodes = CPU_TO_LE16(num_l2_nodes); 967 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 968 } 969 970 /** 971 * ice_sched_add_elems - add nodes to HW and SW DB 972 * @pi: port information structure 973 * @tc_node: pointer to the branch node 974 * @parent: pointer to the parent node 975 * @layer: layer number to add nodes 976 * @num_nodes: number of nodes 977 * @num_nodes_added: pointer to num nodes added 978 * @first_node_teid: if new nodes are added then return the TEID of first node 979 * @prealloc_nodes: preallocated nodes struct for software DB 980 * 981 * This function add nodes to HW as well as to SW DB for a given layer 982 */ 983 enum ice_status 984 ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, 985 struct ice_sched_node *parent, u8 layer, u16 num_nodes, 986 u16 *num_nodes_added, u32 *first_node_teid, 987 struct ice_sched_node **prealloc_nodes) 988 { 989 struct ice_sched_node *prev, *new_node; 990 struct ice_aqc_add_elem *buf; 991 u16 i, num_groups_added = 0; 992 enum ice_status status = ICE_SUCCESS; 993 struct ice_hw *hw = pi->hw; 994 u16 buf_size; 995 u32 teid; 996 997 buf_size = ice_struct_size(buf, generic, num_nodes); 998 buf = (struct ice_aqc_add_elem *)ice_malloc(hw, buf_size); 999 if (!buf) 1000 return ICE_ERR_NO_MEMORY; 1001 1002 buf->hdr.parent_teid = parent->info.node_teid; 1003 buf->hdr.num_elems = CPU_TO_LE16(num_nodes); 1004 for (i = 0; i < num_nodes; i++) { 1005 buf->generic[i].parent_teid = parent->info.node_teid; 1006 buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC; 1007 buf->generic[i].data.valid_sections = 1008 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 1009 ICE_AQC_ELEM_VALID_EIR; 1010 buf->generic[i].data.generic = 0; 1011 buf->generic[i].data.cir_bw.bw_profile_idx = 1012 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); 1013 buf->generic[i].data.cir_bw.bw_alloc = 1014 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); 1015 buf->generic[i].data.eir_bw.bw_profile_idx = 1016 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); 1017 buf->generic[i].data.eir_bw.bw_alloc = 1018 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); 1019 } 1020 1021 status = ice_aq_add_sched_elems(hw, 1, buf, buf_size, 1022 &num_groups_added, NULL); 1023 if (status != ICE_SUCCESS || num_groups_added != 1) { 1024 ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n", 1025 hw->adminq.sq_last_status); 1026 ice_free(hw, buf); 1027 return ICE_ERR_CFG; 1028 } 1029 1030 *num_nodes_added = num_nodes; 1031 /* add nodes to the SW DB */ 1032 for (i = 0; i < num_nodes; i++) { 1033 if (prealloc_nodes) 1034 status = ice_sched_add_node(pi, layer, &buf->generic[i], prealloc_nodes[i]); 1035 else 1036 status = ice_sched_add_node(pi, layer, &buf->generic[i], NULL); 1037 1038 if (status != ICE_SUCCESS) { 1039 ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n", 1040 status); 1041 break; 1042 } 1043 1044 teid = LE32_TO_CPU(buf->generic[i].node_teid); 1045 new_node = ice_sched_find_node_by_teid(parent, teid); 1046 if (!new_node) { 1047 ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid); 1048 break; 1049 } 1050 1051 new_node->sibling = NULL; 1052 new_node->tc_num = tc_node->tc_num; 1053 1054 /* add it to previous node sibling pointer */ 1055 /* Note: siblings are not linked across branches */ 1056 prev = ice_sched_get_first_node(pi, tc_node, layer); 1057 if (prev && prev != new_node) { 1058 while (prev->sibling) 1059 prev = prev->sibling; 1060 prev->sibling = new_node; 1061 } 1062 1063 /* initialize the sibling head */ 1064 if (!pi->sib_head[tc_node->tc_num][layer]) 1065 pi->sib_head[tc_node->tc_num][layer] = new_node; 1066 1067 if (i == 0) 1068 *first_node_teid = teid; 1069 } 1070 1071 ice_free(hw, buf); 1072 return status; 1073 } 1074 1075 /** 1076 * ice_sched_add_nodes_to_hw_layer - Add nodes to hw layer 1077 * @pi: port information structure 1078 * @tc_node: pointer to TC node 1079 * @parent: pointer to parent node 1080 * @layer: layer number to add nodes 1081 * @num_nodes: number of nodes to be added 1082 * @first_node_teid: pointer to the first node TEID 1083 * @num_nodes_added: pointer to number of nodes added 1084 * 1085 * Add nodes into specific hw layer. 1086 */ 1087 static enum ice_status 1088 ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, 1089 struct ice_sched_node *tc_node, 1090 struct ice_sched_node *parent, u8 layer, 1091 u16 num_nodes, u32 *first_node_teid, 1092 u16 *num_nodes_added) 1093 { 1094 u16 max_child_nodes; 1095 1096 *num_nodes_added = 0; 1097 1098 if (!num_nodes) 1099 return ICE_SUCCESS; 1100 1101 if (!parent || layer < pi->hw->sw_entry_point_layer) 1102 return ICE_ERR_PARAM; 1103 1104 /* max children per node per layer */ 1105 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; 1106 1107 /* current number of children + required nodes exceed max children */ 1108 if ((parent->num_children + num_nodes) > max_child_nodes) { 1109 /* Fail if the parent is a TC node */ 1110 if (parent == tc_node) 1111 return ICE_ERR_CFG; 1112 return ICE_ERR_MAX_LIMIT; 1113 } 1114 1115 return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, 1116 num_nodes_added, first_node_teid, NULL); 1117 } 1118 1119 /** 1120 * ice_sched_add_nodes_to_layer - Add nodes to a given layer 1121 * @pi: port information structure 1122 * @tc_node: pointer to TC node 1123 * @parent: pointer to parent node 1124 * @layer: layer number to add nodes 1125 * @num_nodes: number of nodes to be added 1126 * @first_node_teid: pointer to the first node TEID 1127 * @num_nodes_added: pointer to number of nodes added 1128 * 1129 * This function add nodes to a given layer. 1130 */ 1131 static enum ice_status 1132 ice_sched_add_nodes_to_layer(struct ice_port_info *pi, 1133 struct ice_sched_node *tc_node, 1134 struct ice_sched_node *parent, u8 layer, 1135 u16 num_nodes, u32 *first_node_teid, 1136 u16 *num_nodes_added) 1137 { 1138 u32 *first_teid_ptr = first_node_teid; 1139 u16 new_num_nodes = num_nodes; 1140 enum ice_status status = ICE_SUCCESS; 1141 u32 temp; 1142 1143 *num_nodes_added = 0; 1144 while (*num_nodes_added < num_nodes) { 1145 u16 max_child_nodes, num_added = 0; 1146 1147 status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent, 1148 layer, new_num_nodes, 1149 first_teid_ptr, 1150 &num_added); 1151 if (status == ICE_SUCCESS) 1152 *num_nodes_added += num_added; 1153 /* added more nodes than requested ? */ 1154 if (*num_nodes_added > num_nodes) { 1155 ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes, 1156 *num_nodes_added); 1157 status = ICE_ERR_CFG; 1158 break; 1159 } 1160 /* break if all the nodes are added successfully */ 1161 if (status == ICE_SUCCESS && (*num_nodes_added == num_nodes)) 1162 break; 1163 /* break if the error is not max limit */ 1164 if (status != ICE_SUCCESS && status != ICE_ERR_MAX_LIMIT) 1165 break; 1166 /* Exceeded the max children */ 1167 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; 1168 /* utilize all the spaces if the parent is not full */ 1169 if (parent->num_children < max_child_nodes) { 1170 new_num_nodes = max_child_nodes - parent->num_children; 1171 } else { 1172 /* This parent is full, try the next sibling */ 1173 parent = parent->sibling; 1174 /* Don't modify the first node TEID memory if the 1175 * first node was added already in the above call. 1176 * Instead send some temp memory for all other 1177 * recursive calls. 1178 */ 1179 if (num_added) 1180 first_teid_ptr = &temp; 1181 1182 new_num_nodes = num_nodes - *num_nodes_added; 1183 } 1184 } 1185 return status; 1186 } 1187 1188 /** 1189 * ice_sched_get_qgrp_layer - get the current queue group layer number 1190 * @hw: pointer to the HW struct 1191 * 1192 * This function returns the current queue group layer number 1193 */ 1194 static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw) 1195 { 1196 /* It's always total layers - 1, the array is 0 relative so -2 */ 1197 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET; 1198 } 1199 1200 /** 1201 * ice_sched_get_vsi_layer - get the current VSI layer number 1202 * @hw: pointer to the HW struct 1203 * 1204 * This function returns the current VSI layer number 1205 */ 1206 static u8 ice_sched_get_vsi_layer(struct ice_hw *hw) 1207 { 1208 /* Num Layers VSI layer 1209 * 9 6 1210 * 7 4 1211 * 5 or less sw_entry_point_layer 1212 */ 1213 /* calculate the VSI layer based on number of layers. */ 1214 if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) 1215 return hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; 1216 else if (hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) 1217 /* qgroup and VSI layers are same */ 1218 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET; 1219 return hw->sw_entry_point_layer; 1220 } 1221 1222 /** 1223 * ice_sched_get_agg_layer - get the current aggregator layer number 1224 * @hw: pointer to the HW struct 1225 * 1226 * This function returns the current aggregator layer number 1227 */ 1228 static u8 ice_sched_get_agg_layer(struct ice_hw *hw) 1229 { 1230 /* Num Layers aggregator layer 1231 * 9 4 1232 * 7 or less sw_entry_point_layer 1233 */ 1234 /* calculate the aggregator layer based on number of layers. */ 1235 if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) 1236 return hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET; 1237 return hw->sw_entry_point_layer; 1238 } 1239 1240 /** 1241 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree 1242 * @pi: port information structure 1243 * 1244 * This function removes the leaf node that was created by the FW 1245 * during initialization 1246 */ 1247 static void ice_rm_dflt_leaf_node(struct ice_port_info *pi) 1248 { 1249 struct ice_sched_node *node; 1250 1251 node = pi->root; 1252 while (node) { 1253 if (!node->num_children) 1254 break; 1255 node = node->children[0]; 1256 } 1257 if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) { 1258 u32 teid = LE32_TO_CPU(node->info.node_teid); 1259 enum ice_status status; 1260 1261 /* remove the default leaf node */ 1262 status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid); 1263 if (!status) 1264 ice_free_sched_node(pi, node); 1265 } 1266 } 1267 1268 /** 1269 * ice_sched_rm_dflt_nodes - free the default nodes in the tree 1270 * @pi: port information structure 1271 * 1272 * This function frees all the nodes except root and TC that were created by 1273 * the FW during initialization 1274 */ 1275 static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi) 1276 { 1277 struct ice_sched_node *node; 1278 1279 ice_rm_dflt_leaf_node(pi); 1280 1281 /* remove the default nodes except TC and root nodes */ 1282 node = pi->root; 1283 while (node) { 1284 if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer && 1285 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && 1286 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) { 1287 ice_free_sched_node(pi, node); 1288 break; 1289 } 1290 1291 if (!node->num_children) 1292 break; 1293 node = node->children[0]; 1294 } 1295 } 1296 1297 /** 1298 * ice_sched_init_port - Initialize scheduler by querying information from FW 1299 * @pi: port info structure for the tree to cleanup 1300 * 1301 * This function is the initial call to find the total number of Tx scheduler 1302 * resources, default topology created by firmware and storing the information 1303 * in SW DB. 1304 */ 1305 enum ice_status ice_sched_init_port(struct ice_port_info *pi) 1306 { 1307 struct ice_aqc_get_topo_elem *buf; 1308 enum ice_status status; 1309 struct ice_hw *hw; 1310 u8 num_branches; 1311 u16 num_elems; 1312 u8 i, j; 1313 1314 if (!pi) 1315 return ICE_ERR_PARAM; 1316 hw = pi->hw; 1317 1318 /* Query the Default Topology from FW */ 1319 buf = (struct ice_aqc_get_topo_elem *)ice_malloc(hw, 1320 ICE_AQ_MAX_BUF_LEN); 1321 if (!buf) 1322 return ICE_ERR_NO_MEMORY; 1323 1324 /* Query default scheduling tree topology */ 1325 status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN, 1326 &num_branches, NULL); 1327 if (status) 1328 goto err_init_port; 1329 1330 /* num_branches should be between 1-8 */ 1331 if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) { 1332 ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n", 1333 num_branches); 1334 status = ICE_ERR_PARAM; 1335 goto err_init_port; 1336 } 1337 1338 /* get the number of elements on the default/first branch */ 1339 num_elems = LE16_TO_CPU(buf[0].hdr.num_elems); 1340 1341 /* num_elems should always be between 1-9 */ 1342 if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) { 1343 ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n", 1344 num_elems); 1345 status = ICE_ERR_PARAM; 1346 goto err_init_port; 1347 } 1348 1349 /* If the last node is a leaf node then the index of the queue group 1350 * layer is two less than the number of elements. 1351 */ 1352 if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type == 1353 ICE_AQC_ELEM_TYPE_LEAF) 1354 pi->last_node_teid = 1355 LE32_TO_CPU(buf[0].generic[num_elems - 2].node_teid); 1356 else 1357 pi->last_node_teid = 1358 LE32_TO_CPU(buf[0].generic[num_elems - 1].node_teid); 1359 1360 /* Insert the Tx Sched root node */ 1361 status = ice_sched_add_root_node(pi, &buf[0].generic[0]); 1362 if (status) 1363 goto err_init_port; 1364 1365 /* Parse the default tree and cache the information */ 1366 for (i = 0; i < num_branches; i++) { 1367 num_elems = LE16_TO_CPU(buf[i].hdr.num_elems); 1368 1369 /* Skip root element as already inserted */ 1370 for (j = 1; j < num_elems; j++) { 1371 /* update the sw entry point */ 1372 if (buf[0].generic[j].data.elem_type == 1373 ICE_AQC_ELEM_TYPE_ENTRY_POINT) 1374 hw->sw_entry_point_layer = j; 1375 1376 status = ice_sched_add_node(pi, j, &buf[i].generic[j], NULL); 1377 if (status) 1378 goto err_init_port; 1379 } 1380 } 1381 1382 /* Remove the default nodes. */ 1383 if (pi->root) 1384 ice_sched_rm_dflt_nodes(pi); 1385 1386 /* initialize the port for handling the scheduler tree */ 1387 pi->port_state = ICE_SCHED_PORT_STATE_READY; 1388 ice_init_lock(&pi->sched_lock); 1389 for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) 1390 INIT_LIST_HEAD(&hw->rl_prof_list[i]); 1391 1392 err_init_port: 1393 if (status && pi->root) { 1394 ice_free_sched_node(pi, pi->root); 1395 pi->root = NULL; 1396 } 1397 1398 ice_free(hw, buf); 1399 return status; 1400 } 1401 1402 /** 1403 * ice_sched_get_node - Get the struct ice_sched_node for given TEID 1404 * @pi: port information structure 1405 * @teid: Scheduler node TEID 1406 * 1407 * This function retrieves the ice_sched_node struct for given TEID from 1408 * the SW DB and returns it to the caller. 1409 */ 1410 struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid) 1411 { 1412 struct ice_sched_node *node; 1413 1414 if (!pi) 1415 return NULL; 1416 1417 /* Find the node starting from root */ 1418 ice_acquire_lock(&pi->sched_lock); 1419 node = ice_sched_find_node_by_teid(pi->root, teid); 1420 ice_release_lock(&pi->sched_lock); 1421 1422 if (!node) 1423 ice_debug(pi->hw, ICE_DBG_SCHED, "Node not found for teid=0x%x\n", teid); 1424 1425 return node; 1426 } 1427 1428 /** 1429 * ice_sched_query_res_alloc - query the FW for num of logical sched layers 1430 * @hw: pointer to the HW struct 1431 * 1432 * query FW for allocated scheduler resources and store in HW struct 1433 */ 1434 enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) 1435 { 1436 struct ice_aqc_query_txsched_res_resp *buf; 1437 enum ice_status status = ICE_SUCCESS; 1438 __le16 max_sibl; 1439 u8 i; 1440 1441 if (hw->layer_info) 1442 return status; 1443 1444 buf = (struct ice_aqc_query_txsched_res_resp *) 1445 ice_malloc(hw, sizeof(*buf)); 1446 if (!buf) 1447 return ICE_ERR_NO_MEMORY; 1448 1449 status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL); 1450 if (status) 1451 goto sched_query_out; 1452 1453 hw->num_tx_sched_layers = 1454 (u8)LE16_TO_CPU(buf->sched_props.logical_levels); 1455 hw->num_tx_sched_phys_layers = 1456 (u8)LE16_TO_CPU(buf->sched_props.phys_levels); 1457 hw->flattened_layers = buf->sched_props.flattening_bitmap; 1458 hw->max_cgds = buf->sched_props.max_pf_cgds; 1459 1460 /* max sibling group size of current layer refers to the max children 1461 * of the below layer node. 1462 * layer 1 node max children will be layer 2 max sibling group size 1463 * layer 2 node max children will be layer 3 max sibling group size 1464 * and so on. This array will be populated from root (index 0) to 1465 * qgroup layer 7. Leaf node has no children. 1466 */ 1467 for (i = 0; i < hw->num_tx_sched_layers - 1; i++) { 1468 max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz; 1469 hw->max_children[i] = LE16_TO_CPU(max_sibl); 1470 } 1471 1472 hw->layer_info = (struct ice_aqc_layer_props *) 1473 ice_memdup(hw, buf->layer_props, 1474 (hw->num_tx_sched_layers * 1475 sizeof(*hw->layer_info)), 1476 ICE_NONDMA_TO_NONDMA); 1477 if (!hw->layer_info) { 1478 status = ICE_ERR_NO_MEMORY; 1479 goto sched_query_out; 1480 } 1481 1482 sched_query_out: 1483 ice_free(hw, buf); 1484 return status; 1485 } 1486 1487 /** 1488 * ice_sched_get_psm_clk_freq - determine the PSM clock frequency 1489 * @hw: pointer to the HW struct 1490 * 1491 * Determine the PSM clock frequency and store in HW struct 1492 */ 1493 void ice_sched_get_psm_clk_freq(struct ice_hw *hw) 1494 { 1495 u32 val, clk_src; 1496 1497 val = rd32(hw, GLGEN_CLKSTAT_SRC); 1498 clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >> 1499 GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S; 1500 1501 switch (clk_src) { 1502 case PSM_CLK_SRC_367_MHZ: 1503 hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ; 1504 break; 1505 case PSM_CLK_SRC_416_MHZ: 1506 hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ; 1507 break; 1508 case PSM_CLK_SRC_446_MHZ: 1509 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ; 1510 break; 1511 case PSM_CLK_SRC_390_MHZ: 1512 hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ; 1513 break; 1514 1515 /* default condition is not required as clk_src is restricted 1516 * to a 2-bit value from GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M mask. 1517 * The above switch statements cover the possible values of 1518 * this variable. 1519 */ 1520 } 1521 } 1522 1523 /** 1524 * ice_sched_find_node_in_subtree - Find node in part of base node subtree 1525 * @hw: pointer to the HW struct 1526 * @base: pointer to the base node 1527 * @node: pointer to the node to search 1528 * 1529 * This function checks whether a given node is part of the base node 1530 * subtree or not 1531 */ 1532 bool 1533 ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base, 1534 struct ice_sched_node *node) 1535 { 1536 u8 i; 1537 1538 for (i = 0; i < base->num_children; i++) { 1539 struct ice_sched_node *child = base->children[i]; 1540 1541 if (node == child) 1542 return true; 1543 1544 if (child->tx_sched_layer > node->tx_sched_layer) 1545 return false; 1546 1547 /* this recursion is intentional, and wouldn't 1548 * go more than 8 calls 1549 */ 1550 if (ice_sched_find_node_in_subtree(hw, child, node)) 1551 return true; 1552 } 1553 return false; 1554 } 1555 1556 /** 1557 * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node 1558 * @pi: port information structure 1559 * @vsi_node: software VSI handle 1560 * @qgrp_node: first queue group node identified for scanning 1561 * @owner: LAN or RDMA 1562 * 1563 * This function retrieves a free LAN or RDMA queue group node by scanning 1564 * qgrp_node and its siblings for the queue group with the fewest number 1565 * of queues currently assigned. 1566 */ 1567 static struct ice_sched_node * 1568 ice_sched_get_free_qgrp(struct ice_port_info *pi, 1569 struct ice_sched_node *vsi_node, 1570 struct ice_sched_node *qgrp_node, u8 owner) 1571 { 1572 struct ice_sched_node *min_qgrp; 1573 u8 min_children; 1574 1575 if (!qgrp_node) 1576 return qgrp_node; 1577 min_children = qgrp_node->num_children; 1578 if (!min_children) 1579 return qgrp_node; 1580 min_qgrp = qgrp_node; 1581 /* scan all queue groups until find a node which has less than the 1582 * minimum number of children. This way all queue group nodes get 1583 * equal number of shares and active. The bandwidth will be equally 1584 * distributed across all queues. 1585 */ 1586 while (qgrp_node) { 1587 /* make sure the qgroup node is part of the VSI subtree */ 1588 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) 1589 if (qgrp_node->num_children < min_children && 1590 qgrp_node->owner == owner) { 1591 /* replace the new min queue group node */ 1592 min_qgrp = qgrp_node; 1593 min_children = min_qgrp->num_children; 1594 /* break if it has no children, */ 1595 if (!min_children) 1596 break; 1597 } 1598 qgrp_node = qgrp_node->sibling; 1599 } 1600 return min_qgrp; 1601 } 1602 1603 /** 1604 * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node 1605 * @pi: port information structure 1606 * @vsi_handle: software VSI handle 1607 * @tc: branch number 1608 * @owner: LAN or RDMA 1609 * 1610 * This function retrieves a free LAN or RDMA queue group node 1611 */ 1612 struct ice_sched_node * 1613 ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 1614 u8 owner) 1615 { 1616 struct ice_sched_node *vsi_node, *qgrp_node; 1617 struct ice_vsi_ctx *vsi_ctx; 1618 u8 qgrp_layer, vsi_layer; 1619 u16 max_children; 1620 1621 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); 1622 vsi_layer = ice_sched_get_vsi_layer(pi->hw); 1623 max_children = pi->hw->max_children[qgrp_layer]; 1624 1625 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 1626 if (!vsi_ctx) 1627 return NULL; 1628 vsi_node = vsi_ctx->sched.vsi_node[tc]; 1629 /* validate invalid VSI ID */ 1630 if (!vsi_node) 1631 return NULL; 1632 1633 /* If the queue group and vsi layer are same then queues 1634 * are all attached directly to VSI 1635 */ 1636 if (qgrp_layer == vsi_layer) 1637 return vsi_node; 1638 1639 /* get the first queue group node from VSI sub-tree */ 1640 qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); 1641 while (qgrp_node) { 1642 /* make sure the qgroup node is part of the VSI subtree */ 1643 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) 1644 if (qgrp_node->num_children < max_children && 1645 qgrp_node->owner == owner) 1646 break; 1647 qgrp_node = qgrp_node->sibling; 1648 } 1649 1650 /* Select the best queue group */ 1651 return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner); 1652 } 1653 1654 /** 1655 * ice_sched_get_vsi_node - Get a VSI node based on VSI ID 1656 * @pi: pointer to the port information structure 1657 * @tc_node: pointer to the TC node 1658 * @vsi_handle: software VSI handle 1659 * 1660 * This function retrieves a VSI node for a given VSI ID from a given 1661 * TC branch 1662 */ 1663 struct ice_sched_node * 1664 ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, 1665 u16 vsi_handle) 1666 { 1667 struct ice_sched_node *node; 1668 u8 vsi_layer; 1669 1670 vsi_layer = ice_sched_get_vsi_layer(pi->hw); 1671 node = ice_sched_get_first_node(pi, tc_node, vsi_layer); 1672 1673 /* Check whether it already exists */ 1674 while (node) { 1675 if (node->vsi_handle == vsi_handle) 1676 return node; 1677 node = node->sibling; 1678 } 1679 1680 return node; 1681 } 1682 1683 /** 1684 * ice_sched_get_agg_node - Get an aggregator node based on aggregator ID 1685 * @pi: pointer to the port information structure 1686 * @tc_node: pointer to the TC node 1687 * @agg_id: aggregator ID 1688 * 1689 * This function retrieves an aggregator node for a given aggregator ID from 1690 * a given TC branch 1691 */ 1692 static struct ice_sched_node * 1693 ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, 1694 u32 agg_id) 1695 { 1696 struct ice_sched_node *node; 1697 struct ice_hw *hw = pi->hw; 1698 u8 agg_layer; 1699 1700 if (!hw) 1701 return NULL; 1702 agg_layer = ice_sched_get_agg_layer(hw); 1703 node = ice_sched_get_first_node(pi, tc_node, agg_layer); 1704 1705 /* Check whether it already exists */ 1706 while (node) { 1707 if (node->agg_id == agg_id) 1708 return node; 1709 node = node->sibling; 1710 } 1711 1712 return node; 1713 } 1714 1715 /** 1716 * ice_sched_check_node - Compare node parameters between SW DB and HW DB 1717 * @hw: pointer to the HW struct 1718 * @node: pointer to the ice_sched_node struct 1719 * 1720 * This function queries and compares the HW element with SW DB node parameters 1721 */ 1722 static bool ice_sched_check_node(struct ice_hw *hw, struct ice_sched_node *node) 1723 { 1724 struct ice_aqc_txsched_elem_data buf; 1725 enum ice_status status; 1726 u32 node_teid; 1727 1728 node_teid = LE32_TO_CPU(node->info.node_teid); 1729 status = ice_sched_query_elem(hw, node_teid, &buf); 1730 if (status != ICE_SUCCESS) 1731 return false; 1732 1733 if (memcmp(&buf, &node->info, sizeof(buf))) { 1734 ice_debug(hw, ICE_DBG_SCHED, "Node mismatch for teid=0x%x\n", 1735 node_teid); 1736 return false; 1737 } 1738 1739 return true; 1740 } 1741 1742 /** 1743 * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes 1744 * @hw: pointer to the HW struct 1745 * @num_qs: number of queues 1746 * @num_nodes: num nodes array 1747 * 1748 * This function calculates the number of VSI child nodes based on the 1749 * number of queues. 1750 */ 1751 static void 1752 ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) 1753 { 1754 u16 num = num_qs; 1755 u8 i, qgl, vsil; 1756 1757 qgl = ice_sched_get_qgrp_layer(hw); 1758 vsil = ice_sched_get_vsi_layer(hw); 1759 1760 /* calculate num nodes from queue group to VSI layer */ 1761 for (i = qgl; i > vsil; i--) { 1762 /* round to the next integer if there is a remainder */ 1763 num = DIVIDE_AND_ROUND_UP(num, hw->max_children[i]); 1764 1765 /* need at least one node */ 1766 num_nodes[i] = num ? num : 1; 1767 } 1768 } 1769 1770 /** 1771 * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree 1772 * @pi: port information structure 1773 * @vsi_handle: software VSI handle 1774 * @tc_node: pointer to the TC node 1775 * @num_nodes: pointer to the num nodes that needs to be added per layer 1776 * @owner: node owner (LAN or RDMA) 1777 * 1778 * This function adds the VSI child nodes to tree. It gets called for 1779 * LAN and RDMA separately. 1780 */ 1781 static enum ice_status 1782 ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, 1783 struct ice_sched_node *tc_node, u16 *num_nodes, 1784 u8 owner) 1785 { 1786 struct ice_sched_node *parent, *node; 1787 struct ice_hw *hw = pi->hw; 1788 u32 first_node_teid; 1789 u16 num_added = 0; 1790 u8 i, qgl, vsil; 1791 1792 qgl = ice_sched_get_qgrp_layer(hw); 1793 vsil = ice_sched_get_vsi_layer(hw); 1794 parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 1795 for (i = vsil + 1; i <= qgl; i++) { 1796 enum ice_status status; 1797 1798 if (!parent) 1799 return ICE_ERR_CFG; 1800 1801 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, 1802 num_nodes[i], 1803 &first_node_teid, 1804 &num_added); 1805 if (status != ICE_SUCCESS || num_nodes[i] != num_added) 1806 return ICE_ERR_CFG; 1807 1808 /* The newly added node can be a new parent for the next 1809 * layer nodes 1810 */ 1811 if (num_added) { 1812 parent = ice_sched_find_node_by_teid(tc_node, 1813 first_node_teid); 1814 node = parent; 1815 while (node) { 1816 node->owner = owner; 1817 node = node->sibling; 1818 } 1819 } else { 1820 parent = parent->children[0]; 1821 } 1822 } 1823 1824 return ICE_SUCCESS; 1825 } 1826 1827 /** 1828 * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes 1829 * @pi: pointer to the port info structure 1830 * @tc_node: pointer to TC node 1831 * @num_nodes: pointer to num nodes array 1832 * 1833 * This function calculates the number of supported nodes needed to add this 1834 * VSI into Tx tree including the VSI, parent and intermediate nodes in below 1835 * layers 1836 */ 1837 static void 1838 ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi, 1839 struct ice_sched_node *tc_node, u16 *num_nodes) 1840 { 1841 struct ice_sched_node *node; 1842 u8 vsil; 1843 int i; 1844 1845 vsil = ice_sched_get_vsi_layer(pi->hw); 1846 for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--) 1847 /* Add intermediate nodes if TC has no children and 1848 * need at least one node for VSI 1849 */ 1850 if (!tc_node->num_children || i == vsil) { 1851 num_nodes[i]++; 1852 } else { 1853 /* If intermediate nodes are reached max children 1854 * then add a new one. 1855 */ 1856 node = ice_sched_get_first_node(pi, tc_node, (u8)i); 1857 /* scan all the siblings */ 1858 while (node) { 1859 if (node->num_children < 1860 pi->hw->max_children[i]) 1861 break; 1862 node = node->sibling; 1863 } 1864 1865 /* tree has one intermediate node to add this new VSI. 1866 * So no need to calculate supported nodes for below 1867 * layers. 1868 */ 1869 if (node) 1870 break; 1871 /* all the nodes are full, allocate a new one */ 1872 num_nodes[i]++; 1873 } 1874 } 1875 1876 /** 1877 * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree 1878 * @pi: port information structure 1879 * @vsi_handle: software VSI handle 1880 * @tc_node: pointer to TC node 1881 * @num_nodes: pointer to num nodes array 1882 * 1883 * This function adds the VSI supported nodes into Tx tree including the 1884 * VSI, its parent and intermediate nodes in below layers 1885 */ 1886 static enum ice_status 1887 ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, 1888 struct ice_sched_node *tc_node, u16 *num_nodes) 1889 { 1890 struct ice_sched_node *parent = tc_node; 1891 u32 first_node_teid; 1892 u16 num_added = 0; 1893 u8 i, vsil; 1894 1895 if (!pi) 1896 return ICE_ERR_PARAM; 1897 1898 vsil = ice_sched_get_vsi_layer(pi->hw); 1899 for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { 1900 enum ice_status status; 1901 1902 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, 1903 i, num_nodes[i], 1904 &first_node_teid, 1905 &num_added); 1906 if (status != ICE_SUCCESS || num_nodes[i] != num_added) 1907 return ICE_ERR_CFG; 1908 1909 /* The newly added node can be a new parent for the next 1910 * layer nodes 1911 */ 1912 if (num_added) 1913 parent = ice_sched_find_node_by_teid(tc_node, 1914 first_node_teid); 1915 else 1916 parent = parent->children[0]; 1917 1918 if (!parent) 1919 return ICE_ERR_CFG; 1920 1921 if (i == vsil) 1922 parent->vsi_handle = vsi_handle; 1923 } 1924 1925 return ICE_SUCCESS; 1926 } 1927 1928 /** 1929 * ice_sched_add_vsi_to_topo - add a new VSI into tree 1930 * @pi: port information structure 1931 * @vsi_handle: software VSI handle 1932 * @tc: TC number 1933 * 1934 * This function adds a new VSI into scheduler tree 1935 */ 1936 static enum ice_status 1937 ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) 1938 { 1939 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 1940 struct ice_sched_node *tc_node; 1941 1942 tc_node = ice_sched_get_tc_node(pi, tc); 1943 if (!tc_node) 1944 return ICE_ERR_PARAM; 1945 1946 /* calculate number of supported nodes needed for this VSI */ 1947 ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes); 1948 1949 /* add VSI supported nodes to TC subtree */ 1950 return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node, 1951 num_nodes); 1952 } 1953 1954 /** 1955 * ice_sched_update_vsi_child_nodes - update VSI child nodes 1956 * @pi: port information structure 1957 * @vsi_handle: software VSI handle 1958 * @tc: TC number 1959 * @new_numqs: new number of max queues 1960 * @owner: owner of this subtree 1961 * 1962 * This function updates the VSI child nodes based on the number of queues 1963 */ 1964 static enum ice_status 1965 ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, 1966 u8 tc, u16 new_numqs, u8 owner) 1967 { 1968 u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 1969 struct ice_sched_node *vsi_node; 1970 struct ice_sched_node *tc_node; 1971 struct ice_vsi_ctx *vsi_ctx; 1972 enum ice_status status = ICE_SUCCESS; 1973 struct ice_hw *hw = pi->hw; 1974 u16 prev_numqs; 1975 1976 tc_node = ice_sched_get_tc_node(pi, tc); 1977 if (!tc_node) 1978 return ICE_ERR_CFG; 1979 1980 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 1981 if (!vsi_node) 1982 return ICE_ERR_CFG; 1983 1984 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 1985 if (!vsi_ctx) 1986 return ICE_ERR_PARAM; 1987 1988 if (owner == ICE_SCHED_NODE_OWNER_LAN) 1989 prev_numqs = vsi_ctx->sched.max_lanq[tc]; 1990 else 1991 prev_numqs = vsi_ctx->sched.max_rdmaq[tc]; 1992 /* num queues are not changed or less than the previous number */ 1993 if (new_numqs <= prev_numqs) 1994 return status; 1995 if (owner == ICE_SCHED_NODE_OWNER_LAN) { 1996 status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs); 1997 if (status) 1998 return status; 1999 } else { 2000 status = ice_alloc_rdma_q_ctx(hw, vsi_handle, tc, new_numqs); 2001 if (status) 2002 return status; 2003 } 2004 2005 if (new_numqs) 2006 ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); 2007 /* Keep the max number of queue configuration all the time. Update the 2008 * tree only if number of queues > previous number of queues. This may 2009 * leave some extra nodes in the tree if number of queues < previous 2010 * number but that wouldn't harm anything. Removing those extra nodes 2011 * may complicate the code if those nodes are part of SRL or 2012 * individually rate limited. 2013 */ 2014 status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node, 2015 new_num_nodes, owner); 2016 if (status) 2017 return status; 2018 if (owner == ICE_SCHED_NODE_OWNER_LAN) 2019 vsi_ctx->sched.max_lanq[tc] = new_numqs; 2020 else 2021 vsi_ctx->sched.max_rdmaq[tc] = new_numqs; 2022 2023 return ICE_SUCCESS; 2024 } 2025 2026 /** 2027 * ice_sched_cfg_vsi - configure the new/existing VSI 2028 * @pi: port information structure 2029 * @vsi_handle: software VSI handle 2030 * @tc: TC number 2031 * @maxqs: max number of queues 2032 * @owner: LAN or RDMA 2033 * @enable: TC enabled or disabled 2034 * 2035 * This function adds/updates VSI nodes based on the number of queues. If TC is 2036 * enabled and VSI is in suspended state then resume the VSI back. If TC is 2037 * disabled then suspend the VSI if it is not already. 2038 */ 2039 enum ice_status 2040 ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, 2041 u8 owner, bool enable) 2042 { 2043 struct ice_sched_node *vsi_node, *tc_node; 2044 struct ice_vsi_ctx *vsi_ctx; 2045 enum ice_status status = ICE_SUCCESS; 2046 struct ice_hw *hw = pi->hw; 2047 2048 ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle); 2049 tc_node = ice_sched_get_tc_node(pi, tc); 2050 if (!tc_node) 2051 return ICE_ERR_PARAM; 2052 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 2053 if (!vsi_ctx) 2054 return ICE_ERR_PARAM; 2055 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 2056 2057 /* suspend the VSI if TC is not enabled */ 2058 if (!enable) { 2059 if (vsi_node && vsi_node->in_use) { 2060 u32 teid = LE32_TO_CPU(vsi_node->info.node_teid); 2061 2062 status = ice_sched_suspend_resume_elems(hw, 1, &teid, 2063 true); 2064 if (!status) 2065 vsi_node->in_use = false; 2066 } 2067 return status; 2068 } 2069 2070 /* TC is enabled, if it is a new VSI then add it to the tree */ 2071 if (!vsi_node) { 2072 status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc); 2073 if (status) 2074 return status; 2075 2076 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 2077 if (!vsi_node) 2078 return ICE_ERR_CFG; 2079 2080 vsi_ctx->sched.vsi_node[tc] = vsi_node; 2081 vsi_node->in_use = true; 2082 /* invalidate the max queues whenever VSI gets added first time 2083 * into the scheduler tree (boot or after reset). We need to 2084 * recreate the child nodes all the time in these cases. 2085 */ 2086 vsi_ctx->sched.max_lanq[tc] = 0; 2087 vsi_ctx->sched.max_rdmaq[tc] = 0; 2088 } 2089 2090 /* update the VSI child nodes */ 2091 status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs, 2092 owner); 2093 if (status) 2094 return status; 2095 2096 /* TC is enabled, resume the VSI if it is in the suspend state */ 2097 if (!vsi_node->in_use) { 2098 u32 teid = LE32_TO_CPU(vsi_node->info.node_teid); 2099 2100 status = ice_sched_suspend_resume_elems(hw, 1, &teid, false); 2101 if (!status) 2102 vsi_node->in_use = true; 2103 } 2104 2105 return status; 2106 } 2107 2108 /** 2109 * ice_sched_rm_agg_vsi_info - remove aggregator related VSI info entry 2110 * @pi: port information structure 2111 * @vsi_handle: software VSI handle 2112 * 2113 * This function removes single aggregator VSI info entry from 2114 * aggregator list. 2115 */ 2116 static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle) 2117 { 2118 struct ice_sched_agg_info *agg_info; 2119 struct ice_sched_agg_info *atmp; 2120 2121 LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &pi->hw->agg_list, 2122 ice_sched_agg_info, 2123 list_entry) { 2124 struct ice_sched_agg_vsi_info *agg_vsi_info; 2125 struct ice_sched_agg_vsi_info *vtmp; 2126 2127 LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp, 2128 &agg_info->agg_vsi_list, 2129 ice_sched_agg_vsi_info, list_entry) 2130 if (agg_vsi_info->vsi_handle == vsi_handle) { 2131 LIST_DEL(&agg_vsi_info->list_entry); 2132 ice_free(pi->hw, agg_vsi_info); 2133 return; 2134 } 2135 } 2136 } 2137 2138 /** 2139 * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree 2140 * @node: pointer to the sub-tree node 2141 * 2142 * This function checks for a leaf node presence in a given sub-tree node. 2143 */ 2144 static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node) 2145 { 2146 u8 i; 2147 2148 for (i = 0; i < node->num_children; i++) 2149 if (ice_sched_is_leaf_node_present(node->children[i])) 2150 return true; 2151 /* check for a leaf node */ 2152 return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF); 2153 } 2154 2155 /** 2156 * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes 2157 * @pi: port information structure 2158 * @vsi_handle: software VSI handle 2159 * @owner: LAN or RDMA 2160 * 2161 * This function removes the VSI and its LAN or RDMA children nodes from the 2162 * scheduler tree. 2163 */ 2164 static enum ice_status 2165 ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) 2166 { 2167 enum ice_status status = ICE_ERR_PARAM; 2168 struct ice_vsi_ctx *vsi_ctx; 2169 u8 i; 2170 2171 ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle); 2172 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 2173 return status; 2174 ice_acquire_lock(&pi->sched_lock); 2175 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 2176 if (!vsi_ctx) 2177 goto exit_sched_rm_vsi_cfg; 2178 2179 ice_for_each_traffic_class(i) { 2180 struct ice_sched_node *vsi_node, *tc_node; 2181 u8 j = 0; 2182 2183 tc_node = ice_sched_get_tc_node(pi, i); 2184 if (!tc_node) 2185 continue; 2186 2187 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 2188 if (!vsi_node) 2189 continue; 2190 2191 if (ice_sched_is_leaf_node_present(vsi_node)) { 2192 ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i); 2193 status = ICE_ERR_IN_USE; 2194 goto exit_sched_rm_vsi_cfg; 2195 } 2196 while (j < vsi_node->num_children) { 2197 if (vsi_node->children[j]->owner == owner) { 2198 ice_free_sched_node(pi, vsi_node->children[j]); 2199 2200 /* reset the counter again since the num 2201 * children will be updated after node removal 2202 */ 2203 j = 0; 2204 } else { 2205 j++; 2206 } 2207 } 2208 /* remove the VSI if it has no children */ 2209 if (!vsi_node->num_children) { 2210 ice_free_sched_node(pi, vsi_node); 2211 vsi_ctx->sched.vsi_node[i] = NULL; 2212 2213 /* clean up aggregator related VSI info if any */ 2214 ice_sched_rm_agg_vsi_info(pi, vsi_handle); 2215 } 2216 if (owner == ICE_SCHED_NODE_OWNER_LAN) 2217 vsi_ctx->sched.max_lanq[i] = 0; 2218 else 2219 vsi_ctx->sched.max_rdmaq[i] = 0; 2220 } 2221 status = ICE_SUCCESS; 2222 2223 exit_sched_rm_vsi_cfg: 2224 ice_release_lock(&pi->sched_lock); 2225 return status; 2226 } 2227 2228 /** 2229 * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes 2230 * @pi: port information structure 2231 * @vsi_handle: software VSI handle 2232 * 2233 * This function clears the VSI and its LAN children nodes from scheduler tree 2234 * for all TCs. 2235 */ 2236 enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) 2237 { 2238 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN); 2239 } 2240 2241 /** 2242 * ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes 2243 * @pi: port information structure 2244 * @vsi_handle: software VSI handle 2245 * 2246 * This function clears the VSI and its RDMA children nodes from scheduler tree 2247 * for all TCs. 2248 */ 2249 enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle) 2250 { 2251 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA); 2252 } 2253 2254 /** 2255 * ice_sched_is_tree_balanced - Check tree nodes are identical or not 2256 * @hw: pointer to the HW struct 2257 * @node: pointer to the ice_sched_node struct 2258 * 2259 * This function compares all the nodes for a given tree against HW DB nodes 2260 * This function needs to be called with the port_info->sched_lock held 2261 */ 2262 bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node) 2263 { 2264 u8 i; 2265 2266 /* start from the leaf node */ 2267 for (i = 0; i < node->num_children; i++) 2268 /* Fail if node doesn't match with the SW DB 2269 * this recursion is intentional, and wouldn't 2270 * go more than 9 calls 2271 */ 2272 if (!ice_sched_is_tree_balanced(hw, node->children[i])) 2273 return false; 2274 2275 return ice_sched_check_node(hw, node); 2276 } 2277 2278 /** 2279 * ice_aq_query_node_to_root - retrieve the tree topology for a given node TEID 2280 * @hw: pointer to the HW struct 2281 * @node_teid: node TEID 2282 * @buf: pointer to buffer 2283 * @buf_size: buffer size in bytes 2284 * @cd: pointer to command details structure or NULL 2285 * 2286 * This function retrieves the tree topology from the firmware for a given 2287 * node TEID to the root node. 2288 */ 2289 enum ice_status 2290 ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid, 2291 struct ice_aqc_txsched_elem_data *buf, u16 buf_size, 2292 struct ice_sq_cd *cd) 2293 { 2294 struct ice_aqc_query_node_to_root *cmd; 2295 struct ice_aq_desc desc; 2296 2297 cmd = &desc.params.query_node_to_root; 2298 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_node_to_root); 2299 cmd->teid = CPU_TO_LE32(node_teid); 2300 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2301 } 2302 2303 /** 2304 * ice_get_agg_info - get the aggregator ID 2305 * @hw: pointer to the hardware structure 2306 * @agg_id: aggregator ID 2307 * 2308 * This function validates aggregator ID. The function returns info if 2309 * aggregator ID is present in list otherwise it returns null. 2310 */ 2311 static struct ice_sched_agg_info * 2312 ice_get_agg_info(struct ice_hw *hw, u32 agg_id) 2313 { 2314 struct ice_sched_agg_info *agg_info; 2315 2316 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 2317 list_entry) 2318 if (agg_info->agg_id == agg_id) 2319 return agg_info; 2320 2321 return NULL; 2322 } 2323 2324 /** 2325 * ice_sched_get_free_vsi_parent - Find a free parent node in aggregator subtree 2326 * @hw: pointer to the HW struct 2327 * @node: pointer to a child node 2328 * @num_nodes: num nodes count array 2329 * 2330 * This function walks through the aggregator subtree to find a free parent 2331 * node 2332 */ 2333 static struct ice_sched_node * 2334 ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node, 2335 u16 *num_nodes) 2336 { 2337 u8 l = node->tx_sched_layer; 2338 u8 vsil, i; 2339 2340 vsil = ice_sched_get_vsi_layer(hw); 2341 2342 /* Is it VSI parent layer ? */ 2343 if (l == vsil - 1) 2344 return (node->num_children < hw->max_children[l]) ? node : NULL; 2345 2346 /* We have intermediate nodes. Let's walk through the subtree. If the 2347 * intermediate node has space to add a new node then clear the count 2348 */ 2349 if (node->num_children < hw->max_children[l]) 2350 num_nodes[l] = 0; 2351 /* The below recursive call is intentional and wouldn't go more than 2352 * 2 or 3 iterations. 2353 */ 2354 2355 for (i = 0; i < node->num_children; i++) { 2356 struct ice_sched_node *parent; 2357 2358 parent = ice_sched_get_free_vsi_parent(hw, node->children[i], 2359 num_nodes); 2360 if (parent) 2361 return parent; 2362 } 2363 2364 return NULL; 2365 } 2366 2367 /** 2368 * ice_sched_update_parent - update the new parent in SW DB 2369 * @new_parent: pointer to a new parent node 2370 * @node: pointer to a child node 2371 * 2372 * This function removes the child from the old parent and adds it to a new 2373 * parent 2374 */ 2375 void 2376 ice_sched_update_parent(struct ice_sched_node *new_parent, 2377 struct ice_sched_node *node) 2378 { 2379 struct ice_sched_node *old_parent; 2380 u8 i, j; 2381 2382 old_parent = node->parent; 2383 2384 /* update the old parent children */ 2385 for (i = 0; i < old_parent->num_children; i++) 2386 if (old_parent->children[i] == node) { 2387 for (j = i + 1; j < old_parent->num_children; j++) 2388 old_parent->children[j - 1] = 2389 old_parent->children[j]; 2390 old_parent->num_children--; 2391 break; 2392 } 2393 2394 /* now move the node to a new parent */ 2395 new_parent->children[new_parent->num_children++] = node; 2396 node->parent = new_parent; 2397 node->info.parent_teid = new_parent->info.node_teid; 2398 } 2399 2400 /** 2401 * ice_sched_move_nodes - move child nodes to a given parent 2402 * @pi: port information structure 2403 * @parent: pointer to parent node 2404 * @num_items: number of child nodes to be moved 2405 * @list: pointer to child node teids 2406 * 2407 * This function move the child nodes to a given parent. 2408 */ 2409 enum ice_status 2410 ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, 2411 u16 num_items, u32 *list) 2412 { 2413 struct ice_aqc_move_elem *buf; 2414 struct ice_sched_node *node; 2415 enum ice_status status = ICE_SUCCESS; 2416 u16 i, grps_movd = 0; 2417 struct ice_hw *hw; 2418 u16 buf_len; 2419 2420 hw = pi->hw; 2421 2422 if (!parent || !num_items) 2423 return ICE_ERR_PARAM; 2424 2425 /* Does parent have enough space */ 2426 if (parent->num_children + num_items > 2427 hw->max_children[parent->tx_sched_layer]) 2428 return ICE_ERR_AQ_FULL; 2429 2430 buf_len = ice_struct_size(buf, teid, 1); 2431 buf = (struct ice_aqc_move_elem *)ice_malloc(hw, buf_len); 2432 if (!buf) 2433 return ICE_ERR_NO_MEMORY; 2434 2435 for (i = 0; i < num_items; i++) { 2436 node = ice_sched_find_node_by_teid(pi->root, list[i]); 2437 if (!node) { 2438 status = ICE_ERR_PARAM; 2439 goto move_err_exit; 2440 } 2441 2442 buf->hdr.src_parent_teid = node->info.parent_teid; 2443 buf->hdr.dest_parent_teid = parent->info.node_teid; 2444 buf->teid[0] = node->info.node_teid; 2445 buf->hdr.num_elems = CPU_TO_LE16(1); 2446 status = ice_aq_move_sched_elems(hw, 1, buf, buf_len, 2447 &grps_movd, NULL); 2448 if (status && grps_movd != 1) { 2449 status = ICE_ERR_CFG; 2450 goto move_err_exit; 2451 } 2452 2453 /* update the SW DB */ 2454 ice_sched_update_parent(parent, node); 2455 } 2456 2457 move_err_exit: 2458 ice_free(hw, buf); 2459 return status; 2460 } 2461 2462 /** 2463 * ice_sched_move_vsi_to_agg - move VSI to aggregator node 2464 * @pi: port information structure 2465 * @vsi_handle: software VSI handle 2466 * @agg_id: aggregator ID 2467 * @tc: TC number 2468 * 2469 * This function moves a VSI to an aggregator node or its subtree. 2470 * Intermediate nodes may be created if required. 2471 */ 2472 static enum ice_status 2473 ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, 2474 u8 tc) 2475 { 2476 struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent; 2477 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 2478 u32 first_node_teid, vsi_teid; 2479 enum ice_status status; 2480 u16 num_nodes_added; 2481 u8 aggl, vsil, i; 2482 2483 tc_node = ice_sched_get_tc_node(pi, tc); 2484 if (!tc_node) 2485 return ICE_ERR_CFG; 2486 2487 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 2488 if (!agg_node) 2489 return ICE_ERR_DOES_NOT_EXIST; 2490 2491 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 2492 if (!vsi_node) 2493 return ICE_ERR_DOES_NOT_EXIST; 2494 2495 /* Is this VSI already part of given aggregator? */ 2496 if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node)) 2497 return ICE_SUCCESS; 2498 2499 aggl = ice_sched_get_agg_layer(pi->hw); 2500 vsil = ice_sched_get_vsi_layer(pi->hw); 2501 2502 /* set intermediate node count to 1 between aggregator and VSI layers */ 2503 for (i = aggl + 1; i < vsil; i++) 2504 num_nodes[i] = 1; 2505 2506 /* Check if the aggregator subtree has any free node to add the VSI */ 2507 for (i = 0; i < agg_node->num_children; i++) { 2508 parent = ice_sched_get_free_vsi_parent(pi->hw, 2509 agg_node->children[i], 2510 num_nodes); 2511 if (parent) 2512 goto move_nodes; 2513 } 2514 2515 /* add new nodes */ 2516 parent = agg_node; 2517 for (i = aggl + 1; i < vsil; i++) { 2518 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, 2519 num_nodes[i], 2520 &first_node_teid, 2521 &num_nodes_added); 2522 if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added) 2523 return ICE_ERR_CFG; 2524 2525 /* The newly added node can be a new parent for the next 2526 * layer nodes 2527 */ 2528 if (num_nodes_added) 2529 parent = ice_sched_find_node_by_teid(tc_node, 2530 first_node_teid); 2531 else 2532 parent = parent->children[0]; 2533 2534 if (!parent) 2535 return ICE_ERR_CFG; 2536 } 2537 2538 move_nodes: 2539 vsi_teid = LE32_TO_CPU(vsi_node->info.node_teid); 2540 return ice_sched_move_nodes(pi, parent, 1, &vsi_teid); 2541 } 2542 2543 /** 2544 * ice_move_all_vsi_to_dflt_agg - move all VSI(s) to default aggregator 2545 * @pi: port information structure 2546 * @agg_info: aggregator info 2547 * @tc: traffic class number 2548 * @rm_vsi_info: true or false 2549 * 2550 * This function move all the VSI(s) to the default aggregator and delete 2551 * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The 2552 * caller holds the scheduler lock. 2553 */ 2554 static enum ice_status 2555 ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi, 2556 struct ice_sched_agg_info *agg_info, u8 tc, 2557 bool rm_vsi_info) 2558 { 2559 struct ice_sched_agg_vsi_info *agg_vsi_info; 2560 struct ice_sched_agg_vsi_info *tmp; 2561 enum ice_status status = ICE_SUCCESS; 2562 2563 LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, tmp, &agg_info->agg_vsi_list, 2564 ice_sched_agg_vsi_info, list_entry) { 2565 u16 vsi_handle = agg_vsi_info->vsi_handle; 2566 2567 /* Move VSI to default aggregator */ 2568 if (!ice_is_tc_ena(agg_vsi_info->tc_bitmap[0], tc)) 2569 continue; 2570 2571 status = ice_sched_move_vsi_to_agg(pi, vsi_handle, 2572 ICE_DFLT_AGG_ID, tc); 2573 if (status) 2574 break; 2575 2576 ice_clear_bit(tc, agg_vsi_info->tc_bitmap); 2577 if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) { 2578 LIST_DEL(&agg_vsi_info->list_entry); 2579 ice_free(pi->hw, agg_vsi_info); 2580 } 2581 } 2582 2583 return status; 2584 } 2585 2586 /** 2587 * ice_sched_is_agg_inuse - check whether the aggregator is in use or not 2588 * @pi: port information structure 2589 * @node: node pointer 2590 * 2591 * This function checks whether the aggregator is attached with any VSI or not. 2592 */ 2593 static bool 2594 ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node) 2595 { 2596 u8 vsil, i; 2597 2598 vsil = ice_sched_get_vsi_layer(pi->hw); 2599 if (node->tx_sched_layer < vsil - 1) { 2600 for (i = 0; i < node->num_children; i++) 2601 if (ice_sched_is_agg_inuse(pi, node->children[i])) 2602 return true; 2603 return false; 2604 } else { 2605 return node->num_children ? true : false; 2606 } 2607 } 2608 2609 /** 2610 * ice_sched_rm_agg_cfg - remove the aggregator node 2611 * @pi: port information structure 2612 * @agg_id: aggregator ID 2613 * @tc: TC number 2614 * 2615 * This function removes the aggregator node and intermediate nodes if any 2616 * from the given TC 2617 */ 2618 static enum ice_status 2619 ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) 2620 { 2621 struct ice_sched_node *tc_node, *agg_node; 2622 struct ice_hw *hw = pi->hw; 2623 2624 tc_node = ice_sched_get_tc_node(pi, tc); 2625 if (!tc_node) 2626 return ICE_ERR_CFG; 2627 2628 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 2629 if (!agg_node) 2630 return ICE_ERR_DOES_NOT_EXIST; 2631 2632 /* Can't remove the aggregator node if it has children */ 2633 if (ice_sched_is_agg_inuse(pi, agg_node)) 2634 return ICE_ERR_IN_USE; 2635 2636 /* need to remove the whole subtree if aggregator node is the 2637 * only child. 2638 */ 2639 while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) { 2640 struct ice_sched_node *parent = agg_node->parent; 2641 2642 if (!parent) 2643 return ICE_ERR_CFG; 2644 2645 if (parent->num_children > 1) 2646 break; 2647 2648 agg_node = parent; 2649 } 2650 2651 ice_free_sched_node(pi, agg_node); 2652 return ICE_SUCCESS; 2653 } 2654 2655 /** 2656 * ice_rm_agg_cfg_tc - remove aggregator configuration for TC 2657 * @pi: port information structure 2658 * @agg_info: aggregator ID 2659 * @tc: TC number 2660 * @rm_vsi_info: bool value true or false 2661 * 2662 * This function removes aggregator reference to VSI of given TC. It removes 2663 * the aggregator configuration completely for requested TC. The caller needs 2664 * to hold the scheduler lock. 2665 */ 2666 static enum ice_status 2667 ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info, 2668 u8 tc, bool rm_vsi_info) 2669 { 2670 enum ice_status status = ICE_SUCCESS; 2671 2672 /* If nothing to remove - return success */ 2673 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) 2674 goto exit_rm_agg_cfg_tc; 2675 2676 status = ice_move_all_vsi_to_dflt_agg(pi, agg_info, tc, rm_vsi_info); 2677 if (status) 2678 goto exit_rm_agg_cfg_tc; 2679 2680 /* Delete aggregator node(s) */ 2681 status = ice_sched_rm_agg_cfg(pi, agg_info->agg_id, tc); 2682 if (status) 2683 goto exit_rm_agg_cfg_tc; 2684 2685 ice_clear_bit(tc, agg_info->tc_bitmap); 2686 exit_rm_agg_cfg_tc: 2687 return status; 2688 } 2689 2690 /** 2691 * ice_save_agg_tc_bitmap - save aggregator TC bitmap 2692 * @pi: port information structure 2693 * @agg_id: aggregator ID 2694 * @tc_bitmap: 8 bits TC bitmap 2695 * 2696 * Save aggregator TC bitmap. This function needs to be called with scheduler 2697 * lock held. 2698 */ 2699 static enum ice_status 2700 ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, 2701 ice_bitmap_t *tc_bitmap) 2702 { 2703 struct ice_sched_agg_info *agg_info; 2704 2705 agg_info = ice_get_agg_info(pi->hw, agg_id); 2706 if (!agg_info) 2707 return ICE_ERR_PARAM; 2708 ice_cp_bitmap(agg_info->replay_tc_bitmap, tc_bitmap, 2709 ICE_MAX_TRAFFIC_CLASS); 2710 return ICE_SUCCESS; 2711 } 2712 2713 /** 2714 * ice_sched_add_agg_cfg - create an aggregator node 2715 * @pi: port information structure 2716 * @agg_id: aggregator ID 2717 * @tc: TC number 2718 * 2719 * This function creates an aggregator node and intermediate nodes if required 2720 * for the given TC 2721 */ 2722 static enum ice_status 2723 ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) 2724 { 2725 struct ice_sched_node *parent, *agg_node, *tc_node; 2726 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 2727 enum ice_status status = ICE_SUCCESS; 2728 struct ice_hw *hw = pi->hw; 2729 u32 first_node_teid; 2730 u16 num_nodes_added; 2731 u8 i, aggl; 2732 2733 tc_node = ice_sched_get_tc_node(pi, tc); 2734 if (!tc_node) 2735 return ICE_ERR_CFG; 2736 2737 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 2738 /* Does Agg node already exist ? */ 2739 if (agg_node) 2740 return status; 2741 2742 aggl = ice_sched_get_agg_layer(hw); 2743 2744 /* need one node in Agg layer */ 2745 num_nodes[aggl] = 1; 2746 2747 /* Check whether the intermediate nodes have space to add the 2748 * new aggregator. If they are full, then SW needs to allocate a new 2749 * intermediate node on those layers 2750 */ 2751 for (i = hw->sw_entry_point_layer; i < aggl; i++) { 2752 parent = ice_sched_get_first_node(pi, tc_node, i); 2753 2754 /* scan all the siblings */ 2755 while (parent) { 2756 if (parent->num_children < hw->max_children[i]) 2757 break; 2758 parent = parent->sibling; 2759 } 2760 2761 /* all the nodes are full, reserve one for this layer */ 2762 if (!parent) 2763 num_nodes[i]++; 2764 } 2765 2766 /* add the aggregator node */ 2767 parent = tc_node; 2768 for (i = hw->sw_entry_point_layer; i <= aggl; i++) { 2769 if (!parent) 2770 return ICE_ERR_CFG; 2771 2772 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, 2773 num_nodes[i], 2774 &first_node_teid, 2775 &num_nodes_added); 2776 if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added) 2777 return ICE_ERR_CFG; 2778 2779 /* The newly added node can be a new parent for the next 2780 * layer nodes 2781 */ 2782 if (num_nodes_added) { 2783 parent = ice_sched_find_node_by_teid(tc_node, 2784 first_node_teid); 2785 /* register aggregator ID with the aggregator node */ 2786 if (parent && i == aggl) 2787 parent->agg_id = agg_id; 2788 } else { 2789 parent = parent->children[0]; 2790 } 2791 } 2792 2793 return ICE_SUCCESS; 2794 } 2795 2796 /** 2797 * ice_sched_cfg_agg - configure aggregator node 2798 * @pi: port information structure 2799 * @agg_id: aggregator ID 2800 * @agg_type: aggregator type queue, VSI, or aggregator group 2801 * @tc_bitmap: bits TC bitmap 2802 * 2803 * It registers a unique aggregator node into scheduler services. It 2804 * allows a user to register with a unique ID to track it's resources. 2805 * The aggregator type determines if this is a queue group, VSI group 2806 * or aggregator group. It then creates the aggregator node(s) for requested 2807 * TC(s) or removes an existing aggregator node including its configuration 2808 * if indicated via tc_bitmap. Call ice_rm_agg_cfg to release aggregator 2809 * resources and remove aggregator ID. 2810 * This function needs to be called with scheduler lock held. 2811 */ 2812 static enum ice_status 2813 ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, 2814 enum ice_agg_type agg_type, ice_bitmap_t *tc_bitmap) 2815 { 2816 struct ice_sched_agg_info *agg_info; 2817 enum ice_status status = ICE_SUCCESS; 2818 struct ice_hw *hw = pi->hw; 2819 u8 tc; 2820 2821 agg_info = ice_get_agg_info(hw, agg_id); 2822 if (!agg_info) { 2823 /* Create new entry for new aggregator ID */ 2824 agg_info = (struct ice_sched_agg_info *) 2825 ice_malloc(hw, sizeof(*agg_info)); 2826 if (!agg_info) 2827 return ICE_ERR_NO_MEMORY; 2828 2829 agg_info->agg_id = agg_id; 2830 agg_info->agg_type = agg_type; 2831 agg_info->tc_bitmap[0] = 0; 2832 2833 /* Initialize the aggregator VSI list head */ 2834 INIT_LIST_HEAD(&agg_info->agg_vsi_list); 2835 2836 /* Add new entry in aggregator list */ 2837 LIST_ADD(&agg_info->list_entry, &hw->agg_list); 2838 } 2839 /* Create aggregator node(s) for requested TC(s) */ 2840 ice_for_each_traffic_class(tc) { 2841 if (!ice_is_tc_ena(*tc_bitmap, tc)) { 2842 /* Delete aggregator cfg TC if it exists previously */ 2843 status = ice_rm_agg_cfg_tc(pi, agg_info, tc, false); 2844 if (status) 2845 break; 2846 continue; 2847 } 2848 2849 /* Check if aggregator node for TC already exists */ 2850 if (ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) 2851 continue; 2852 2853 /* Create new aggregator node for TC */ 2854 status = ice_sched_add_agg_cfg(pi, agg_id, tc); 2855 if (status) 2856 break; 2857 2858 /* Save aggregator node's TC information */ 2859 ice_set_bit(tc, agg_info->tc_bitmap); 2860 } 2861 2862 return status; 2863 } 2864 2865 /** 2866 * ice_cfg_agg - config aggregator node 2867 * @pi: port information structure 2868 * @agg_id: aggregator ID 2869 * @agg_type: aggregator type queue, VSI, or aggregator group 2870 * @tc_bitmap: bits TC bitmap 2871 * 2872 * This function configures aggregator node(s). 2873 */ 2874 enum ice_status 2875 ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, 2876 u8 tc_bitmap) 2877 { 2878 ice_bitmap_t bitmap = tc_bitmap; 2879 enum ice_status status; 2880 2881 ice_acquire_lock(&pi->sched_lock); 2882 status = ice_sched_cfg_agg(pi, agg_id, agg_type, 2883 (ice_bitmap_t *)&bitmap); 2884 if (!status) 2885 status = ice_save_agg_tc_bitmap(pi, agg_id, 2886 (ice_bitmap_t *)&bitmap); 2887 ice_release_lock(&pi->sched_lock); 2888 return status; 2889 } 2890 2891 /** 2892 * ice_get_agg_vsi_info - get the aggregator ID 2893 * @agg_info: aggregator info 2894 * @vsi_handle: software VSI handle 2895 * 2896 * The function returns aggregator VSI info based on VSI handle. This function 2897 * needs to be called with scheduler lock held. 2898 */ 2899 static struct ice_sched_agg_vsi_info * 2900 ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle) 2901 { 2902 struct ice_sched_agg_vsi_info *agg_vsi_info; 2903 2904 LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list, 2905 ice_sched_agg_vsi_info, list_entry) 2906 if (agg_vsi_info->vsi_handle == vsi_handle) 2907 return agg_vsi_info; 2908 2909 return NULL; 2910 } 2911 2912 /** 2913 * ice_get_vsi_agg_info - get the aggregator info of VSI 2914 * @hw: pointer to the hardware structure 2915 * @vsi_handle: Sw VSI handle 2916 * 2917 * The function returns aggregator info of VSI represented via vsi_handle. The 2918 * VSI has in this case a different aggregator than the default one. This 2919 * function needs to be called with scheduler lock held. 2920 */ 2921 static struct ice_sched_agg_info * 2922 ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle) 2923 { 2924 struct ice_sched_agg_info *agg_info; 2925 2926 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 2927 list_entry) { 2928 struct ice_sched_agg_vsi_info *agg_vsi_info; 2929 2930 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); 2931 if (agg_vsi_info) 2932 return agg_info; 2933 } 2934 return NULL; 2935 } 2936 2937 /** 2938 * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap 2939 * @pi: port information structure 2940 * @agg_id: aggregator ID 2941 * @vsi_handle: software VSI handle 2942 * @tc_bitmap: TC bitmap of enabled TC(s) 2943 * 2944 * Save VSI to aggregator TC bitmap. This function needs to call with scheduler 2945 * lock held. 2946 */ 2947 static enum ice_status 2948 ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, 2949 ice_bitmap_t *tc_bitmap) 2950 { 2951 struct ice_sched_agg_vsi_info *agg_vsi_info; 2952 struct ice_sched_agg_info *agg_info; 2953 2954 agg_info = ice_get_agg_info(pi->hw, agg_id); 2955 if (!agg_info) 2956 return ICE_ERR_PARAM; 2957 /* check if entry already exist */ 2958 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); 2959 if (!agg_vsi_info) 2960 return ICE_ERR_PARAM; 2961 ice_cp_bitmap(agg_vsi_info->replay_tc_bitmap, tc_bitmap, 2962 ICE_MAX_TRAFFIC_CLASS); 2963 return ICE_SUCCESS; 2964 } 2965 2966 /** 2967 * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator 2968 * @pi: port information structure 2969 * @agg_id: aggregator ID 2970 * @vsi_handle: software VSI handle 2971 * @tc_bitmap: TC bitmap of enabled TC(s) 2972 * 2973 * This function moves VSI to a new or default aggregator node. If VSI is 2974 * already associated to the aggregator node then no operation is performed on 2975 * the tree. This function needs to be called with scheduler lock held. 2976 */ 2977 static enum ice_status 2978 ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, 2979 u16 vsi_handle, ice_bitmap_t *tc_bitmap) 2980 { 2981 struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL; 2982 struct ice_sched_agg_info *agg_info, *old_agg_info; 2983 enum ice_status status = ICE_SUCCESS; 2984 struct ice_hw *hw = pi->hw; 2985 u8 tc; 2986 2987 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 2988 return ICE_ERR_PARAM; 2989 agg_info = ice_get_agg_info(hw, agg_id); 2990 if (!agg_info) 2991 return ICE_ERR_PARAM; 2992 /* If the vsi is already part of another aggregator then update 2993 * its vsi info list 2994 */ 2995 old_agg_info = ice_get_vsi_agg_info(hw, vsi_handle); 2996 if (old_agg_info && old_agg_info != agg_info) { 2997 struct ice_sched_agg_vsi_info *vtmp; 2998 2999 LIST_FOR_EACH_ENTRY_SAFE(old_agg_vsi_info, vtmp, 3000 &old_agg_info->agg_vsi_list, 3001 ice_sched_agg_vsi_info, list_entry) 3002 if (old_agg_vsi_info->vsi_handle == vsi_handle) 3003 break; 3004 } 3005 3006 /* check if entry already exist */ 3007 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); 3008 if (!agg_vsi_info) { 3009 /* Create new entry for VSI under aggregator list */ 3010 agg_vsi_info = (struct ice_sched_agg_vsi_info *) 3011 ice_malloc(hw, sizeof(*agg_vsi_info)); 3012 if (!agg_vsi_info) 3013 return ICE_ERR_PARAM; 3014 3015 /* add VSI ID into the aggregator list */ 3016 agg_vsi_info->vsi_handle = vsi_handle; 3017 LIST_ADD(&agg_vsi_info->list_entry, &agg_info->agg_vsi_list); 3018 } 3019 /* Move VSI node to new aggregator node for requested TC(s) */ 3020 ice_for_each_traffic_class(tc) { 3021 if (!ice_is_tc_ena(*tc_bitmap, tc)) 3022 continue; 3023 3024 /* Move VSI to new aggregator */ 3025 status = ice_sched_move_vsi_to_agg(pi, vsi_handle, agg_id, tc); 3026 if (status) 3027 break; 3028 3029 ice_set_bit(tc, agg_vsi_info->tc_bitmap); 3030 if (old_agg_vsi_info) 3031 ice_clear_bit(tc, old_agg_vsi_info->tc_bitmap); 3032 } 3033 if (old_agg_vsi_info && !old_agg_vsi_info->tc_bitmap[0]) { 3034 LIST_DEL(&old_agg_vsi_info->list_entry); 3035 ice_free(pi->hw, old_agg_vsi_info); 3036 } 3037 return status; 3038 } 3039 3040 /** 3041 * ice_sched_rm_unused_rl_prof - remove unused RL profile 3042 * @hw: pointer to the hardware structure 3043 * 3044 * This function removes unused rate limit profiles from the HW and 3045 * SW DB. The caller needs to hold scheduler lock. 3046 */ 3047 static void ice_sched_rm_unused_rl_prof(struct ice_hw *hw) 3048 { 3049 u16 ln; 3050 3051 for (ln = 0; ln < hw->num_tx_sched_layers; ln++) { 3052 struct ice_aqc_rl_profile_info *rl_prof_elem; 3053 struct ice_aqc_rl_profile_info *rl_prof_tmp; 3054 3055 LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp, 3056 &hw->rl_prof_list[ln], 3057 ice_aqc_rl_profile_info, list_entry) { 3058 if (!ice_sched_del_rl_profile(hw, rl_prof_elem)) 3059 ice_debug(hw, ICE_DBG_SCHED, "Removed rl profile\n"); 3060 } 3061 } 3062 } 3063 3064 /** 3065 * ice_sched_update_elem - update element 3066 * @hw: pointer to the HW struct 3067 * @node: pointer to node 3068 * @info: node info to update 3069 * 3070 * Update the HW DB, and local SW DB of node. Update the scheduling 3071 * parameters of node from argument info data buffer (Info->data buf) and 3072 * returns success or error on config sched element failure. The caller 3073 * needs to hold scheduler lock. 3074 */ 3075 static enum ice_status 3076 ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, 3077 struct ice_aqc_txsched_elem_data *info) 3078 { 3079 struct ice_aqc_txsched_elem_data buf; 3080 enum ice_status status; 3081 u16 elem_cfgd = 0; 3082 u16 num_elems = 1; 3083 3084 buf = *info; 3085 /* For TC nodes, CIR config is not supported */ 3086 if (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_TC) 3087 buf.data.valid_sections &= ~ICE_AQC_ELEM_VALID_CIR; 3088 /* Parent TEID is reserved field in this aq call */ 3089 buf.parent_teid = 0; 3090 /* Element type is reserved field in this aq call */ 3091 buf.data.elem_type = 0; 3092 /* Flags is reserved field in this aq call */ 3093 buf.data.flags = 0; 3094 3095 /* Update HW DB */ 3096 /* Configure element node */ 3097 status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf), 3098 &elem_cfgd, NULL); 3099 if (status || elem_cfgd != num_elems) { 3100 ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n"); 3101 return ICE_ERR_CFG; 3102 } 3103 3104 /* Config success case */ 3105 /* Now update local SW DB */ 3106 /* Only copy the data portion of info buffer */ 3107 node->info.data = info->data; 3108 return status; 3109 } 3110 3111 /** 3112 * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params 3113 * @hw: pointer to the HW struct 3114 * @node: sched node to configure 3115 * @rl_type: rate limit type CIR, EIR, or shared 3116 * @bw_alloc: BW weight/allocation 3117 * 3118 * This function configures node element's BW allocation. 3119 */ 3120 static enum ice_status 3121 ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, 3122 enum ice_rl_type rl_type, u16 bw_alloc) 3123 { 3124 struct ice_aqc_txsched_elem_data buf; 3125 struct ice_aqc_txsched_elem *data; 3126 enum ice_status status; 3127 3128 buf = node->info; 3129 data = &buf.data; 3130 if (rl_type == ICE_MIN_BW) { 3131 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; 3132 data->cir_bw.bw_alloc = CPU_TO_LE16(bw_alloc); 3133 } else if (rl_type == ICE_MAX_BW) { 3134 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; 3135 data->eir_bw.bw_alloc = CPU_TO_LE16(bw_alloc); 3136 } else { 3137 return ICE_ERR_PARAM; 3138 } 3139 3140 /* Configure element */ 3141 status = ice_sched_update_elem(hw, node, &buf); 3142 return status; 3143 } 3144 3145 /** 3146 * ice_move_vsi_to_agg - moves VSI to new or default aggregator 3147 * @pi: port information structure 3148 * @agg_id: aggregator ID 3149 * @vsi_handle: software VSI handle 3150 * @tc_bitmap: TC bitmap of enabled TC(s) 3151 * 3152 * Move or associate VSI to a new or default aggregator node. 3153 */ 3154 enum ice_status 3155 ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, 3156 u8 tc_bitmap) 3157 { 3158 ice_bitmap_t bitmap = tc_bitmap; 3159 enum ice_status status; 3160 3161 ice_acquire_lock(&pi->sched_lock); 3162 status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle, 3163 (ice_bitmap_t *)&bitmap); 3164 if (!status) 3165 status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle, 3166 (ice_bitmap_t *)&bitmap); 3167 ice_release_lock(&pi->sched_lock); 3168 return status; 3169 } 3170 3171 /** 3172 * ice_rm_agg_cfg - remove aggregator configuration 3173 * @pi: port information structure 3174 * @agg_id: aggregator ID 3175 * 3176 * This function removes aggregator reference to VSI and delete aggregator ID 3177 * info. It removes the aggregator configuration completely. 3178 */ 3179 enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id) 3180 { 3181 struct ice_sched_agg_info *agg_info; 3182 enum ice_status status = ICE_SUCCESS; 3183 u8 tc; 3184 3185 ice_acquire_lock(&pi->sched_lock); 3186 agg_info = ice_get_agg_info(pi->hw, agg_id); 3187 if (!agg_info) { 3188 status = ICE_ERR_DOES_NOT_EXIST; 3189 goto exit_ice_rm_agg_cfg; 3190 } 3191 3192 ice_for_each_traffic_class(tc) { 3193 status = ice_rm_agg_cfg_tc(pi, agg_info, tc, true); 3194 if (status) 3195 goto exit_ice_rm_agg_cfg; 3196 } 3197 3198 if (ice_is_any_bit_set(agg_info->tc_bitmap, ICE_MAX_TRAFFIC_CLASS)) { 3199 status = ICE_ERR_IN_USE; 3200 goto exit_ice_rm_agg_cfg; 3201 } 3202 3203 /* Safe to delete entry now */ 3204 LIST_DEL(&agg_info->list_entry); 3205 ice_free(pi->hw, agg_info); 3206 3207 /* Remove unused RL profile IDs from HW and SW DB */ 3208 ice_sched_rm_unused_rl_prof(pi->hw); 3209 3210 exit_ice_rm_agg_cfg: 3211 ice_release_lock(&pi->sched_lock); 3212 return status; 3213 } 3214 3215 /** 3216 * ice_set_clear_cir_bw_alloc - set or clear CIR BW alloc information 3217 * @bw_t_info: bandwidth type information structure 3218 * @bw_alloc: Bandwidth allocation information 3219 * 3220 * Save or clear CIR BW alloc information (bw_alloc) in the passed param 3221 * bw_t_info. 3222 */ 3223 static void 3224 ice_set_clear_cir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc) 3225 { 3226 bw_t_info->cir_bw.bw_alloc = bw_alloc; 3227 if (bw_t_info->cir_bw.bw_alloc) 3228 ice_set_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap); 3229 else 3230 ice_clear_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap); 3231 } 3232 3233 /** 3234 * ice_set_clear_eir_bw_alloc - set or clear EIR BW alloc information 3235 * @bw_t_info: bandwidth type information structure 3236 * @bw_alloc: Bandwidth allocation information 3237 * 3238 * Save or clear EIR BW alloc information (bw_alloc) in the passed param 3239 * bw_t_info. 3240 */ 3241 static void 3242 ice_set_clear_eir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc) 3243 { 3244 bw_t_info->eir_bw.bw_alloc = bw_alloc; 3245 if (bw_t_info->eir_bw.bw_alloc) 3246 ice_set_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap); 3247 else 3248 ice_clear_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap); 3249 } 3250 3251 /** 3252 * ice_sched_save_vsi_bw_alloc - save VSI node's BW alloc information 3253 * @pi: port information structure 3254 * @vsi_handle: sw VSI handle 3255 * @tc: traffic class 3256 * @rl_type: rate limit type min or max 3257 * @bw_alloc: Bandwidth allocation information 3258 * 3259 * Save BW alloc information of VSI type node for post replay use. 3260 */ 3261 static enum ice_status 3262 ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3263 enum ice_rl_type rl_type, u16 bw_alloc) 3264 { 3265 struct ice_vsi_ctx *vsi_ctx; 3266 3267 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 3268 return ICE_ERR_PARAM; 3269 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 3270 if (!vsi_ctx) 3271 return ICE_ERR_PARAM; 3272 switch (rl_type) { 3273 case ICE_MIN_BW: 3274 ice_set_clear_cir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc], 3275 bw_alloc); 3276 break; 3277 case ICE_MAX_BW: 3278 ice_set_clear_eir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc], 3279 bw_alloc); 3280 break; 3281 default: 3282 return ICE_ERR_PARAM; 3283 } 3284 return ICE_SUCCESS; 3285 } 3286 3287 /** 3288 * ice_set_clear_cir_bw - set or clear CIR BW 3289 * @bw_t_info: bandwidth type information structure 3290 * @bw: bandwidth in Kbps - Kilo bits per sec 3291 * 3292 * Save or clear CIR bandwidth (BW) in the passed param bw_t_info. 3293 */ 3294 static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) 3295 { 3296 if (bw == ICE_SCHED_DFLT_BW) { 3297 ice_clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); 3298 bw_t_info->cir_bw.bw = 0; 3299 } else { 3300 /* Save type of BW information */ 3301 ice_set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); 3302 bw_t_info->cir_bw.bw = bw; 3303 } 3304 } 3305 3306 /** 3307 * ice_set_clear_eir_bw - set or clear EIR BW 3308 * @bw_t_info: bandwidth type information structure 3309 * @bw: bandwidth in Kbps - Kilo bits per sec 3310 * 3311 * Save or clear EIR bandwidth (BW) in the passed param bw_t_info. 3312 */ 3313 static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) 3314 { 3315 if (bw == ICE_SCHED_DFLT_BW) { 3316 ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); 3317 bw_t_info->eir_bw.bw = 0; 3318 } else { 3319 /* save EIR BW information */ 3320 ice_set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); 3321 bw_t_info->eir_bw.bw = bw; 3322 } 3323 } 3324 3325 /** 3326 * ice_set_clear_shared_bw - set or clear shared BW 3327 * @bw_t_info: bandwidth type information structure 3328 * @bw: bandwidth in Kbps - Kilo bits per sec 3329 * 3330 * Save or clear shared bandwidth (BW) in the passed param bw_t_info. 3331 */ 3332 static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw) 3333 { 3334 if (bw == ICE_SCHED_DFLT_BW) { 3335 ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); 3336 bw_t_info->shared_bw = 0; 3337 } else { 3338 /* save shared BW information */ 3339 ice_set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); 3340 bw_t_info->shared_bw = bw; 3341 } 3342 } 3343 3344 /** 3345 * ice_sched_save_vsi_bw - save VSI node's BW information 3346 * @pi: port information structure 3347 * @vsi_handle: sw VSI handle 3348 * @tc: traffic class 3349 * @rl_type: rate limit type min, max, or shared 3350 * @bw: bandwidth in Kbps - Kilo bits per sec 3351 * 3352 * Save BW information of VSI type node for post replay use. 3353 */ 3354 static enum ice_status 3355 ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3356 enum ice_rl_type rl_type, u32 bw) 3357 { 3358 struct ice_vsi_ctx *vsi_ctx; 3359 3360 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 3361 return ICE_ERR_PARAM; 3362 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 3363 if (!vsi_ctx) 3364 return ICE_ERR_PARAM; 3365 switch (rl_type) { 3366 case ICE_MIN_BW: 3367 ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw); 3368 break; 3369 case ICE_MAX_BW: 3370 ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw); 3371 break; 3372 case ICE_SHARED_BW: 3373 ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw); 3374 break; 3375 default: 3376 return ICE_ERR_PARAM; 3377 } 3378 return ICE_SUCCESS; 3379 } 3380 3381 /** 3382 * ice_set_clear_prio - set or clear priority information 3383 * @bw_t_info: bandwidth type information structure 3384 * @prio: priority to save 3385 * 3386 * Save or clear priority (prio) in the passed param bw_t_info. 3387 */ 3388 static void ice_set_clear_prio(struct ice_bw_type_info *bw_t_info, u8 prio) 3389 { 3390 bw_t_info->generic = prio; 3391 if (bw_t_info->generic) 3392 ice_set_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap); 3393 else 3394 ice_clear_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap); 3395 } 3396 3397 /** 3398 * ice_sched_save_vsi_prio - save VSI node's priority information 3399 * @pi: port information structure 3400 * @vsi_handle: Software VSI handle 3401 * @tc: traffic class 3402 * @prio: priority to save 3403 * 3404 * Save priority information of VSI type node for post replay use. 3405 */ 3406 static enum ice_status 3407 ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3408 u8 prio) 3409 { 3410 struct ice_vsi_ctx *vsi_ctx; 3411 3412 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 3413 return ICE_ERR_PARAM; 3414 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 3415 if (!vsi_ctx) 3416 return ICE_ERR_PARAM; 3417 if (tc >= ICE_MAX_TRAFFIC_CLASS) 3418 return ICE_ERR_PARAM; 3419 ice_set_clear_prio(&vsi_ctx->sched.bw_t_info[tc], prio); 3420 return ICE_SUCCESS; 3421 } 3422 3423 /** 3424 * ice_sched_save_agg_bw_alloc - save aggregator node's BW alloc information 3425 * @pi: port information structure 3426 * @agg_id: node aggregator ID 3427 * @tc: traffic class 3428 * @rl_type: rate limit type min or max 3429 * @bw_alloc: bandwidth alloc information 3430 * 3431 * Save BW alloc information of AGG type node for post replay use. 3432 */ 3433 static enum ice_status 3434 ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc, 3435 enum ice_rl_type rl_type, u16 bw_alloc) 3436 { 3437 struct ice_sched_agg_info *agg_info; 3438 3439 agg_info = ice_get_agg_info(pi->hw, agg_id); 3440 if (!agg_info) 3441 return ICE_ERR_PARAM; 3442 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) 3443 return ICE_ERR_PARAM; 3444 switch (rl_type) { 3445 case ICE_MIN_BW: 3446 ice_set_clear_cir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc); 3447 break; 3448 case ICE_MAX_BW: 3449 ice_set_clear_eir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc); 3450 break; 3451 default: 3452 return ICE_ERR_PARAM; 3453 } 3454 return ICE_SUCCESS; 3455 } 3456 3457 /** 3458 * ice_sched_save_agg_bw - save aggregator node's BW information 3459 * @pi: port information structure 3460 * @agg_id: node aggregator ID 3461 * @tc: traffic class 3462 * @rl_type: rate limit type min, max, or shared 3463 * @bw: bandwidth in Kbps - Kilo bits per sec 3464 * 3465 * Save BW information of AGG type node for post replay use. 3466 */ 3467 static enum ice_status 3468 ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, 3469 enum ice_rl_type rl_type, u32 bw) 3470 { 3471 struct ice_sched_agg_info *agg_info; 3472 3473 agg_info = ice_get_agg_info(pi->hw, agg_id); 3474 if (!agg_info) 3475 return ICE_ERR_PARAM; 3476 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) 3477 return ICE_ERR_PARAM; 3478 switch (rl_type) { 3479 case ICE_MIN_BW: 3480 ice_set_clear_cir_bw(&agg_info->bw_t_info[tc], bw); 3481 break; 3482 case ICE_MAX_BW: 3483 ice_set_clear_eir_bw(&agg_info->bw_t_info[tc], bw); 3484 break; 3485 case ICE_SHARED_BW: 3486 ice_set_clear_shared_bw(&agg_info->bw_t_info[tc], bw); 3487 break; 3488 default: 3489 return ICE_ERR_PARAM; 3490 } 3491 return ICE_SUCCESS; 3492 } 3493 3494 /** 3495 * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC 3496 * @pi: port information structure 3497 * @vsi_handle: software VSI handle 3498 * @tc: traffic class 3499 * @rl_type: min or max 3500 * @bw: bandwidth in Kbps 3501 * 3502 * This function configures BW limit of VSI scheduling node based on TC 3503 * information. 3504 */ 3505 enum ice_status 3506 ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3507 enum ice_rl_type rl_type, u32 bw) 3508 { 3509 enum ice_status status; 3510 3511 status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle, 3512 ICE_AGG_TYPE_VSI, 3513 tc, rl_type, bw); 3514 if (!status) { 3515 ice_acquire_lock(&pi->sched_lock); 3516 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw); 3517 ice_release_lock(&pi->sched_lock); 3518 } 3519 return status; 3520 } 3521 3522 /** 3523 * ice_cfg_vsi_bw_dflt_lmt_per_tc - configure default VSI BW limit per TC 3524 * @pi: port information structure 3525 * @vsi_handle: software VSI handle 3526 * @tc: traffic class 3527 * @rl_type: min or max 3528 * 3529 * This function configures default BW limit of VSI scheduling node based on TC 3530 * information. 3531 */ 3532 enum ice_status 3533 ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3534 enum ice_rl_type rl_type) 3535 { 3536 enum ice_status status; 3537 3538 status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle, 3539 ICE_AGG_TYPE_VSI, 3540 tc, rl_type, 3541 ICE_SCHED_DFLT_BW); 3542 if (!status) { 3543 ice_acquire_lock(&pi->sched_lock); 3544 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, 3545 ICE_SCHED_DFLT_BW); 3546 ice_release_lock(&pi->sched_lock); 3547 } 3548 return status; 3549 } 3550 3551 /** 3552 * ice_cfg_agg_bw_lmt_per_tc - configure aggregator BW limit per TC 3553 * @pi: port information structure 3554 * @agg_id: aggregator ID 3555 * @tc: traffic class 3556 * @rl_type: min or max 3557 * @bw: bandwidth in Kbps 3558 * 3559 * This function applies BW limit to aggregator scheduling node based on TC 3560 * information. 3561 */ 3562 enum ice_status 3563 ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, 3564 enum ice_rl_type rl_type, u32 bw) 3565 { 3566 enum ice_status status; 3567 3568 status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG, 3569 tc, rl_type, bw); 3570 if (!status) { 3571 ice_acquire_lock(&pi->sched_lock); 3572 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw); 3573 ice_release_lock(&pi->sched_lock); 3574 } 3575 return status; 3576 } 3577 3578 /** 3579 * ice_cfg_agg_bw_dflt_lmt_per_tc - configure aggregator BW default limit per TC 3580 * @pi: port information structure 3581 * @agg_id: aggregator ID 3582 * @tc: traffic class 3583 * @rl_type: min or max 3584 * 3585 * This function applies default BW limit to aggregator scheduling node based 3586 * on TC information. 3587 */ 3588 enum ice_status 3589 ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, 3590 enum ice_rl_type rl_type) 3591 { 3592 enum ice_status status; 3593 3594 status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG, 3595 tc, rl_type, 3596 ICE_SCHED_DFLT_BW); 3597 if (!status) { 3598 ice_acquire_lock(&pi->sched_lock); 3599 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, 3600 ICE_SCHED_DFLT_BW); 3601 ice_release_lock(&pi->sched_lock); 3602 } 3603 return status; 3604 } 3605 3606 /** 3607 * ice_cfg_vsi_bw_shared_lmt - configure VSI BW shared limit 3608 * @pi: port information structure 3609 * @vsi_handle: software VSI handle 3610 * @min_bw: minimum bandwidth in Kbps 3611 * @max_bw: maximum bandwidth in Kbps 3612 * @shared_bw: shared bandwidth in Kbps 3613 * 3614 * Configure shared rate limiter(SRL) of all VSI type nodes across all traffic 3615 * classes for VSI matching handle. 3616 */ 3617 enum ice_status 3618 ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw, 3619 u32 max_bw, u32 shared_bw) 3620 { 3621 return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, min_bw, max_bw, 3622 shared_bw); 3623 } 3624 3625 /** 3626 * ice_cfg_vsi_bw_no_shared_lmt - configure VSI BW for no shared limiter 3627 * @pi: port information structure 3628 * @vsi_handle: software VSI handle 3629 * 3630 * This function removes the shared rate limiter(SRL) of all VSI type nodes 3631 * across all traffic classes for VSI matching handle. 3632 */ 3633 enum ice_status 3634 ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle) 3635 { 3636 return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, 3637 ICE_SCHED_DFLT_BW, 3638 ICE_SCHED_DFLT_BW, 3639 ICE_SCHED_DFLT_BW); 3640 } 3641 3642 /** 3643 * ice_cfg_agg_bw_shared_lmt - configure aggregator BW shared limit 3644 * @pi: port information structure 3645 * @agg_id: aggregator ID 3646 * @min_bw: minimum bandwidth in Kbps 3647 * @max_bw: maximum bandwidth in Kbps 3648 * @shared_bw: shared bandwidth in Kbps 3649 * 3650 * This function configures the shared rate limiter(SRL) of all aggregator type 3651 * nodes across all traffic classes for aggregator matching agg_id. 3652 */ 3653 enum ice_status 3654 ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw, 3655 u32 max_bw, u32 shared_bw) 3656 { 3657 return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, min_bw, max_bw, 3658 shared_bw); 3659 } 3660 3661 /** 3662 * ice_cfg_agg_bw_no_shared_lmt - configure aggregator BW for no shared limiter 3663 * @pi: port information structure 3664 * @agg_id: aggregator ID 3665 * 3666 * This function removes the shared rate limiter(SRL) of all aggregator type 3667 * nodes across all traffic classes for aggregator matching agg_id. 3668 */ 3669 enum ice_status 3670 ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id) 3671 { 3672 return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, ICE_SCHED_DFLT_BW, 3673 ICE_SCHED_DFLT_BW, 3674 ICE_SCHED_DFLT_BW); 3675 } 3676 3677 /** 3678 * ice_cfg_agg_bw_shared_lmt_per_tc - config aggregator BW shared limit per tc 3679 * @pi: port information structure 3680 * @agg_id: aggregator ID 3681 * @tc: traffic class 3682 * @min_bw: minimum bandwidth in Kbps 3683 * @max_bw: maximum bandwidth in Kbps 3684 * @shared_bw: shared bandwidth in Kbps 3685 * 3686 * This function configures the shared rate limiter(SRL) of all aggregator type 3687 * nodes across all traffic classes for aggregator matching agg_id. 3688 */ 3689 enum ice_status 3690 ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, 3691 u32 min_bw, u32 max_bw, u32 shared_bw) 3692 { 3693 return ice_sched_set_agg_bw_shared_lmt_per_tc(pi, agg_id, tc, min_bw, 3694 max_bw, shared_bw); 3695 } 3696 3697 /** 3698 * ice_cfg_agg_bw_no_shared_lmt_per_tc - cfg aggregator BW shared limit per tc 3699 * @pi: port information structure 3700 * @agg_id: aggregator ID 3701 * @tc: traffic class 3702 * 3703 * This function configures the shared rate limiter(SRL) of all aggregator type 3704 * nodes across all traffic classes for aggregator matching agg_id. 3705 */ 3706 enum ice_status 3707 ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc) 3708 { 3709 return ice_sched_set_agg_bw_shared_lmt_per_tc(pi, agg_id, tc, 3710 ICE_SCHED_DFLT_BW, 3711 ICE_SCHED_DFLT_BW, 3712 ICE_SCHED_DFLT_BW); 3713 } 3714 3715 /** 3716 * ice_cfg_vsi_q_priority - config VSI queue priority of node 3717 * @pi: port information structure 3718 * @num_qs: number of VSI queues 3719 * @q_ids: queue IDs array 3720 * @q_prio: queue priority array 3721 * 3722 * This function configures the queue node priority (Sibling Priority) of the 3723 * passed in VSI's queue(s) for a given traffic class (TC). 3724 */ 3725 enum ice_status 3726 ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids, 3727 u8 *q_prio) 3728 { 3729 enum ice_status status = ICE_ERR_PARAM; 3730 u16 i; 3731 3732 ice_acquire_lock(&pi->sched_lock); 3733 3734 for (i = 0; i < num_qs; i++) { 3735 struct ice_sched_node *node; 3736 3737 node = ice_sched_find_node_by_teid(pi->root, q_ids[i]); 3738 if (!node || node->info.data.elem_type != 3739 ICE_AQC_ELEM_TYPE_LEAF) { 3740 status = ICE_ERR_PARAM; 3741 break; 3742 } 3743 /* Configure Priority */ 3744 status = ice_sched_cfg_sibl_node_prio(pi, node, q_prio[i]); 3745 if (status) 3746 break; 3747 } 3748 3749 ice_release_lock(&pi->sched_lock); 3750 return status; 3751 } 3752 3753 /** 3754 * ice_cfg_agg_vsi_priority_per_tc - config aggregator's VSI priority per TC 3755 * @pi: port information structure 3756 * @agg_id: Aggregator ID 3757 * @num_vsis: number of VSI(s) 3758 * @vsi_handle_arr: array of software VSI handles 3759 * @node_prio: pointer to node priority 3760 * @tc: traffic class 3761 * 3762 * This function configures the node priority (Sibling Priority) of the 3763 * passed in VSI's for a given traffic class (TC) of an Aggregator ID. 3764 */ 3765 enum ice_status 3766 ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id, 3767 u16 num_vsis, u16 *vsi_handle_arr, 3768 u8 *node_prio, u8 tc) 3769 { 3770 struct ice_sched_agg_vsi_info *agg_vsi_info; 3771 struct ice_sched_node *tc_node, *agg_node; 3772 enum ice_status status = ICE_ERR_PARAM; 3773 struct ice_sched_agg_info *agg_info; 3774 bool agg_id_present = false; 3775 struct ice_hw *hw = pi->hw; 3776 u16 i; 3777 3778 ice_acquire_lock(&pi->sched_lock); 3779 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 3780 list_entry) 3781 if (agg_info->agg_id == agg_id) { 3782 agg_id_present = true; 3783 break; 3784 } 3785 if (!agg_id_present) 3786 goto exit_agg_priority_per_tc; 3787 3788 tc_node = ice_sched_get_tc_node(pi, tc); 3789 if (!tc_node) 3790 goto exit_agg_priority_per_tc; 3791 3792 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 3793 if (!agg_node) 3794 goto exit_agg_priority_per_tc; 3795 3796 if (num_vsis > hw->max_children[agg_node->tx_sched_layer]) 3797 goto exit_agg_priority_per_tc; 3798 3799 for (i = 0; i < num_vsis; i++) { 3800 struct ice_sched_node *vsi_node; 3801 bool vsi_handle_valid = false; 3802 u16 vsi_handle; 3803 3804 status = ICE_ERR_PARAM; 3805 vsi_handle = vsi_handle_arr[i]; 3806 if (!ice_is_vsi_valid(hw, vsi_handle)) 3807 goto exit_agg_priority_per_tc; 3808 /* Verify child nodes before applying settings */ 3809 LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list, 3810 ice_sched_agg_vsi_info, list_entry) 3811 if (agg_vsi_info->vsi_handle == vsi_handle) { 3812 vsi_handle_valid = true; 3813 break; 3814 } 3815 3816 if (!vsi_handle_valid) 3817 goto exit_agg_priority_per_tc; 3818 3819 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 3820 if (!vsi_node) 3821 goto exit_agg_priority_per_tc; 3822 3823 if (ice_sched_find_node_in_subtree(hw, agg_node, vsi_node)) { 3824 /* Configure Priority */ 3825 status = ice_sched_cfg_sibl_node_prio(pi, vsi_node, 3826 node_prio[i]); 3827 if (status) 3828 break; 3829 status = ice_sched_save_vsi_prio(pi, vsi_handle, tc, 3830 node_prio[i]); 3831 if (status) 3832 break; 3833 } 3834 } 3835 3836 exit_agg_priority_per_tc: 3837 ice_release_lock(&pi->sched_lock); 3838 return status; 3839 } 3840 3841 /** 3842 * ice_cfg_vsi_bw_alloc - config VSI BW alloc per TC 3843 * @pi: port information structure 3844 * @vsi_handle: software VSI handle 3845 * @ena_tcmap: enabled TC map 3846 * @rl_type: Rate limit type CIR/EIR 3847 * @bw_alloc: Array of BW alloc 3848 * 3849 * This function configures the BW allocation of the passed in VSI's 3850 * node(s) for enabled traffic class. 3851 */ 3852 enum ice_status 3853 ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap, 3854 enum ice_rl_type rl_type, u8 *bw_alloc) 3855 { 3856 enum ice_status status = ICE_SUCCESS; 3857 u8 tc; 3858 3859 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 3860 return ICE_ERR_PARAM; 3861 3862 ice_acquire_lock(&pi->sched_lock); 3863 3864 /* Return success if no nodes are present across TC */ 3865 ice_for_each_traffic_class(tc) { 3866 struct ice_sched_node *tc_node, *vsi_node; 3867 3868 if (!ice_is_tc_ena(ena_tcmap, tc)) 3869 continue; 3870 3871 tc_node = ice_sched_get_tc_node(pi, tc); 3872 if (!tc_node) 3873 continue; 3874 3875 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 3876 if (!vsi_node) 3877 continue; 3878 3879 status = ice_sched_cfg_node_bw_alloc(pi->hw, vsi_node, rl_type, 3880 bw_alloc[tc]); 3881 if (status) 3882 break; 3883 status = ice_sched_save_vsi_bw_alloc(pi, vsi_handle, tc, 3884 rl_type, bw_alloc[tc]); 3885 if (status) 3886 break; 3887 } 3888 3889 ice_release_lock(&pi->sched_lock); 3890 return status; 3891 } 3892 3893 /** 3894 * ice_cfg_agg_bw_alloc - config aggregator BW alloc 3895 * @pi: port information structure 3896 * @agg_id: aggregator ID 3897 * @ena_tcmap: enabled TC map 3898 * @rl_type: rate limit type CIR/EIR 3899 * @bw_alloc: array of BW alloc 3900 * 3901 * This function configures the BW allocation of passed in aggregator for 3902 * enabled traffic class(s). 3903 */ 3904 enum ice_status 3905 ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap, 3906 enum ice_rl_type rl_type, u8 *bw_alloc) 3907 { 3908 struct ice_sched_agg_info *agg_info; 3909 bool agg_id_present = false; 3910 enum ice_status status = ICE_SUCCESS; 3911 struct ice_hw *hw = pi->hw; 3912 u8 tc; 3913 3914 ice_acquire_lock(&pi->sched_lock); 3915 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 3916 list_entry) 3917 if (agg_info->agg_id == agg_id) { 3918 agg_id_present = true; 3919 break; 3920 } 3921 if (!agg_id_present) { 3922 status = ICE_ERR_PARAM; 3923 goto exit_cfg_agg_bw_alloc; 3924 } 3925 3926 /* Return success if no nodes are present across TC */ 3927 ice_for_each_traffic_class(tc) { 3928 struct ice_sched_node *tc_node, *agg_node; 3929 3930 if (!ice_is_tc_ena(ena_tcmap, tc)) 3931 continue; 3932 3933 tc_node = ice_sched_get_tc_node(pi, tc); 3934 if (!tc_node) 3935 continue; 3936 3937 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 3938 if (!agg_node) 3939 continue; 3940 3941 status = ice_sched_cfg_node_bw_alloc(hw, agg_node, rl_type, 3942 bw_alloc[tc]); 3943 if (status) 3944 break; 3945 status = ice_sched_save_agg_bw_alloc(pi, agg_id, tc, rl_type, 3946 bw_alloc[tc]); 3947 if (status) 3948 break; 3949 } 3950 3951 exit_cfg_agg_bw_alloc: 3952 ice_release_lock(&pi->sched_lock); 3953 return status; 3954 } 3955 3956 /** 3957 * ice_sched_calc_wakeup - calculate RL profile wakeup parameter 3958 * @hw: pointer to the HW struct 3959 * @bw: bandwidth in Kbps 3960 * 3961 * This function calculates the wakeup parameter of RL profile. 3962 */ 3963 static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) 3964 { 3965 s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f; 3966 s32 wakeup_f_int; 3967 u16 wakeup = 0; 3968 3969 /* Get the wakeup integer value */ 3970 bytes_per_sec = DIV_S64((s64)bw * 1000, BITS_PER_BYTE); 3971 wakeup_int = DIV_S64(hw->psm_clk_freq, bytes_per_sec); 3972 if (wakeup_int > 63) { 3973 wakeup = (u16)((1 << 15) | wakeup_int); 3974 } else { 3975 /* Calculate fraction value up to 4 decimals 3976 * Convert Integer value to a constant multiplier 3977 */ 3978 wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int; 3979 wakeup_a = DIV_S64((s64)ICE_RL_PROF_MULTIPLIER * 3980 hw->psm_clk_freq, bytes_per_sec); 3981 3982 /* Get Fraction value */ 3983 wakeup_f = wakeup_a - wakeup_b; 3984 3985 /* Round up the Fractional value via Ceil(Fractional value) */ 3986 if (wakeup_f > DIV_S64(ICE_RL_PROF_MULTIPLIER, 2)) 3987 wakeup_f += 1; 3988 3989 wakeup_f_int = (s32)DIV_S64(wakeup_f * ICE_RL_PROF_FRACTION, 3990 ICE_RL_PROF_MULTIPLIER); 3991 wakeup |= (u16)(wakeup_int << 9); 3992 wakeup |= (u16)(0x1ff & wakeup_f_int); 3993 } 3994 3995 return wakeup; 3996 } 3997 3998 /** 3999 * ice_sched_bw_to_rl_profile - convert BW to profile parameters 4000 * @hw: pointer to the HW struct 4001 * @bw: bandwidth in Kbps 4002 * @profile: profile parameters to return 4003 * 4004 * This function converts the BW to profile structure format. 4005 */ 4006 static enum ice_status 4007 ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, 4008 struct ice_aqc_rl_profile_elem *profile) 4009 { 4010 enum ice_status status = ICE_ERR_PARAM; 4011 s64 bytes_per_sec, ts_rate, mv_tmp; 4012 bool found = false; 4013 s32 encode = 0; 4014 s64 mv = 0; 4015 s32 i; 4016 4017 /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */ 4018 if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW) 4019 return status; 4020 4021 /* Bytes per second from Kbps */ 4022 bytes_per_sec = DIV_S64((s64)bw * 1000, BITS_PER_BYTE); 4023 4024 /* encode is 6 bits but really useful are 5 bits */ 4025 for (i = 0; i < 64; i++) { 4026 u64 pow_result = BIT_ULL(i); 4027 4028 ts_rate = DIV_S64((s64)hw->psm_clk_freq, 4029 pow_result * ICE_RL_PROF_TS_MULTIPLIER); 4030 if (ts_rate <= 0) 4031 continue; 4032 4033 /* Multiplier value */ 4034 mv_tmp = DIV_S64(bytes_per_sec * ICE_RL_PROF_MULTIPLIER, 4035 ts_rate); 4036 4037 /* Round to the nearest ICE_RL_PROF_MULTIPLIER */ 4038 mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER); 4039 4040 /* First multiplier value greater than the given 4041 * accuracy bytes 4042 */ 4043 if (mv > ICE_RL_PROF_ACCURACY_BYTES) { 4044 encode = i; 4045 found = true; 4046 break; 4047 } 4048 } 4049 if (found) { 4050 u16 wm; 4051 4052 wm = ice_sched_calc_wakeup(hw, bw); 4053 profile->rl_multiply = CPU_TO_LE16(mv); 4054 profile->wake_up_calc = CPU_TO_LE16(wm); 4055 profile->rl_encode = CPU_TO_LE16(encode); 4056 status = ICE_SUCCESS; 4057 } else { 4058 status = ICE_ERR_DOES_NOT_EXIST; 4059 } 4060 4061 return status; 4062 } 4063 4064 /** 4065 * ice_sched_add_rl_profile - add RL profile 4066 * @hw: pointer to the hardware structure 4067 * @rl_type: type of rate limit BW - min, max, or shared 4068 * @bw: bandwidth in Kbps - Kilo bits per sec 4069 * @layer_num: specifies in which layer to create profile 4070 * 4071 * This function first checks the existing list for corresponding BW 4072 * parameter. If it exists, it returns the associated profile otherwise 4073 * it creates a new rate limit profile for requested BW, and adds it to 4074 * the HW DB and local list. It returns the new profile or null on error. 4075 * The caller needs to hold the scheduler lock. 4076 */ 4077 static struct ice_aqc_rl_profile_info * 4078 ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type, 4079 u32 bw, u8 layer_num) 4080 { 4081 struct ice_aqc_rl_profile_info *rl_prof_elem; 4082 u16 profiles_added = 0, num_profiles = 1; 4083 struct ice_aqc_rl_profile_elem *buf; 4084 enum ice_status status; 4085 u8 profile_type; 4086 4087 if (!hw || layer_num >= hw->num_tx_sched_layers) 4088 return NULL; 4089 switch (rl_type) { 4090 case ICE_MIN_BW: 4091 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; 4092 break; 4093 case ICE_MAX_BW: 4094 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; 4095 break; 4096 case ICE_SHARED_BW: 4097 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; 4098 break; 4099 default: 4100 return NULL; 4101 } 4102 4103 LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num], 4104 ice_aqc_rl_profile_info, list_entry) 4105 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == 4106 profile_type && rl_prof_elem->bw == bw) 4107 /* Return existing profile ID info */ 4108 return rl_prof_elem; 4109 4110 /* Create new profile ID */ 4111 rl_prof_elem = (struct ice_aqc_rl_profile_info *) 4112 ice_malloc(hw, sizeof(*rl_prof_elem)); 4113 4114 if (!rl_prof_elem) 4115 return NULL; 4116 4117 status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile); 4118 if (status != ICE_SUCCESS) 4119 goto exit_add_rl_prof; 4120 4121 rl_prof_elem->bw = bw; 4122 /* layer_num is zero relative, and fw expects level from 1 to 9 */ 4123 rl_prof_elem->profile.level = layer_num + 1; 4124 rl_prof_elem->profile.flags = profile_type; 4125 rl_prof_elem->profile.max_burst_size = CPU_TO_LE16(hw->max_burst_size); 4126 4127 /* Create new entry in HW DB */ 4128 buf = &rl_prof_elem->profile; 4129 status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf), 4130 &profiles_added, NULL); 4131 if (status || profiles_added != num_profiles) 4132 goto exit_add_rl_prof; 4133 4134 /* Good entry - add in the list */ 4135 rl_prof_elem->prof_id_ref = 0; 4136 LIST_ADD(&rl_prof_elem->list_entry, &hw->rl_prof_list[layer_num]); 4137 return rl_prof_elem; 4138 4139 exit_add_rl_prof: 4140 ice_free(hw, rl_prof_elem); 4141 return NULL; 4142 } 4143 4144 /** 4145 * ice_sched_cfg_node_bw_lmt - configure node sched params 4146 * @hw: pointer to the HW struct 4147 * @node: sched node to configure 4148 * @rl_type: rate limit type CIR, EIR, or shared 4149 * @rl_prof_id: rate limit profile ID 4150 * 4151 * This function configures node element's BW limit. 4152 */ 4153 static enum ice_status 4154 ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, 4155 enum ice_rl_type rl_type, u16 rl_prof_id) 4156 { 4157 struct ice_aqc_txsched_elem_data buf; 4158 struct ice_aqc_txsched_elem *data; 4159 4160 buf = node->info; 4161 data = &buf.data; 4162 switch (rl_type) { 4163 case ICE_MIN_BW: 4164 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; 4165 data->cir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id); 4166 break; 4167 case ICE_MAX_BW: 4168 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; 4169 data->eir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id); 4170 break; 4171 case ICE_SHARED_BW: 4172 data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED; 4173 data->srl_id = CPU_TO_LE16(rl_prof_id); 4174 break; 4175 default: 4176 /* Unknown rate limit type */ 4177 return ICE_ERR_PARAM; 4178 } 4179 4180 /* Configure element */ 4181 return ice_sched_update_elem(hw, node, &buf); 4182 } 4183 4184 /** 4185 * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID 4186 * @node: sched node 4187 * @rl_type: rate limit type 4188 * 4189 * If existing profile matches, it returns the corresponding rate 4190 * limit profile ID, otherwise it returns an invalid ID as error. 4191 */ 4192 static u16 4193 ice_sched_get_node_rl_prof_id(struct ice_sched_node *node, 4194 enum ice_rl_type rl_type) 4195 { 4196 u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID; 4197 struct ice_aqc_txsched_elem *data; 4198 4199 data = &node->info.data; 4200 switch (rl_type) { 4201 case ICE_MIN_BW: 4202 if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR) 4203 rl_prof_id = LE16_TO_CPU(data->cir_bw.bw_profile_idx); 4204 break; 4205 case ICE_MAX_BW: 4206 if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR) 4207 rl_prof_id = LE16_TO_CPU(data->eir_bw.bw_profile_idx); 4208 break; 4209 case ICE_SHARED_BW: 4210 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) 4211 rl_prof_id = LE16_TO_CPU(data->srl_id); 4212 break; 4213 default: 4214 break; 4215 } 4216 4217 return rl_prof_id; 4218 } 4219 4220 /** 4221 * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer 4222 * @pi: port information structure 4223 * @rl_type: type of rate limit BW - min, max, or shared 4224 * @layer_index: layer index 4225 * 4226 * This function returns requested profile creation layer. 4227 */ 4228 static u8 4229 ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type, 4230 u8 layer_index) 4231 { 4232 struct ice_hw *hw = pi->hw; 4233 4234 if (layer_index >= hw->num_tx_sched_layers) 4235 return ICE_SCHED_INVAL_LAYER_NUM; 4236 switch (rl_type) { 4237 case ICE_MIN_BW: 4238 if (hw->layer_info[layer_index].max_cir_rl_profiles) 4239 return layer_index; 4240 break; 4241 case ICE_MAX_BW: 4242 if (hw->layer_info[layer_index].max_eir_rl_profiles) 4243 return layer_index; 4244 break; 4245 case ICE_SHARED_BW: 4246 /* if current layer doesn't support SRL profile creation 4247 * then try a layer up or down. 4248 */ 4249 if (hw->layer_info[layer_index].max_srl_profiles) 4250 return layer_index; 4251 else if (layer_index < hw->num_tx_sched_layers - 1 && 4252 hw->layer_info[layer_index + 1].max_srl_profiles) 4253 return layer_index + 1; 4254 else if (layer_index > 0 && 4255 hw->layer_info[layer_index - 1].max_srl_profiles) 4256 return layer_index - 1; 4257 break; 4258 default: 4259 break; 4260 } 4261 return ICE_SCHED_INVAL_LAYER_NUM; 4262 } 4263 4264 /** 4265 * ice_sched_get_srl_node - get shared rate limit node 4266 * @node: tree node 4267 * @srl_layer: shared rate limit layer 4268 * 4269 * This function returns SRL node to be used for shared rate limit purpose. 4270 * The caller needs to hold scheduler lock. 4271 */ 4272 static struct ice_sched_node * 4273 ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer) 4274 { 4275 if (srl_layer > node->tx_sched_layer) 4276 return node->children[0]; 4277 else if (srl_layer < node->tx_sched_layer) 4278 /* Node can't be created without a parent. It will always 4279 * have a valid parent except root node. 4280 */ 4281 return node->parent; 4282 else 4283 return node; 4284 } 4285 4286 /** 4287 * ice_sched_rm_rl_profile - remove RL profile ID 4288 * @hw: pointer to the hardware structure 4289 * @layer_num: layer number where profiles are saved 4290 * @profile_type: profile type like EIR, CIR, or SRL 4291 * @profile_id: profile ID to remove 4292 * 4293 * This function removes rate limit profile from layer 'layer_num' of type 4294 * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold 4295 * scheduler lock. 4296 */ 4297 static enum ice_status 4298 ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type, 4299 u16 profile_id) 4300 { 4301 struct ice_aqc_rl_profile_info *rl_prof_elem; 4302 enum ice_status status = ICE_SUCCESS; 4303 4304 if (!hw || layer_num >= hw->num_tx_sched_layers) 4305 return ICE_ERR_PARAM; 4306 /* Check the existing list for RL profile */ 4307 LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num], 4308 ice_aqc_rl_profile_info, list_entry) 4309 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == 4310 profile_type && 4311 LE16_TO_CPU(rl_prof_elem->profile.profile_id) == 4312 profile_id) { 4313 if (rl_prof_elem->prof_id_ref) 4314 rl_prof_elem->prof_id_ref--; 4315 4316 /* Remove old profile ID from database */ 4317 status = ice_sched_del_rl_profile(hw, rl_prof_elem); 4318 if (status && status != ICE_ERR_IN_USE) 4319 ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); 4320 break; 4321 } 4322 if (status == ICE_ERR_IN_USE) 4323 status = ICE_SUCCESS; 4324 return status; 4325 } 4326 4327 /** 4328 * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default 4329 * @pi: port information structure 4330 * @node: pointer to node structure 4331 * @rl_type: rate limit type min, max, or shared 4332 * @layer_num: layer number where RL profiles are saved 4333 * 4334 * This function configures node element's BW rate limit profile ID of 4335 * type CIR, EIR, or SRL to default. This function needs to be called 4336 * with the scheduler lock held. 4337 */ 4338 static enum ice_status 4339 ice_sched_set_node_bw_dflt(struct ice_port_info *pi, 4340 struct ice_sched_node *node, 4341 enum ice_rl_type rl_type, u8 layer_num) 4342 { 4343 enum ice_status status; 4344 struct ice_hw *hw; 4345 u8 profile_type; 4346 u16 rl_prof_id; 4347 u16 old_id; 4348 4349 hw = pi->hw; 4350 switch (rl_type) { 4351 case ICE_MIN_BW: 4352 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; 4353 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; 4354 break; 4355 case ICE_MAX_BW: 4356 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; 4357 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; 4358 break; 4359 case ICE_SHARED_BW: 4360 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; 4361 /* No SRL is configured for default case */ 4362 rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID; 4363 break; 4364 default: 4365 return ICE_ERR_PARAM; 4366 } 4367 /* Save existing RL prof ID for later clean up */ 4368 old_id = ice_sched_get_node_rl_prof_id(node, rl_type); 4369 /* Configure BW scheduling parameters */ 4370 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); 4371 if (status) 4372 return status; 4373 4374 /* Remove stale RL profile ID */ 4375 if (old_id == ICE_SCHED_DFLT_RL_PROF_ID || 4376 old_id == ICE_SCHED_INVAL_PROF_ID) 4377 return ICE_SUCCESS; 4378 4379 return ice_sched_rm_rl_profile(hw, layer_num, profile_type, old_id); 4380 } 4381 4382 /** 4383 * ice_sched_set_node_bw - set node's bandwidth 4384 * @pi: port information structure 4385 * @node: tree node 4386 * @rl_type: rate limit type min, max, or shared 4387 * @bw: bandwidth in Kbps - Kilo bits per sec 4388 * @layer_num: layer number 4389 * 4390 * This function adds new profile corresponding to requested BW, configures 4391 * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile 4392 * ID from local database. The caller needs to hold scheduler lock. 4393 */ 4394 enum ice_status 4395 ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, 4396 enum ice_rl_type rl_type, u32 bw, u8 layer_num) 4397 { 4398 struct ice_aqc_rl_profile_info *rl_prof_info; 4399 enum ice_status status = ICE_ERR_PARAM; 4400 struct ice_hw *hw = pi->hw; 4401 u16 old_id, rl_prof_id; 4402 4403 rl_prof_info = ice_sched_add_rl_profile(hw, rl_type, bw, layer_num); 4404 if (!rl_prof_info) 4405 return status; 4406 4407 rl_prof_id = LE16_TO_CPU(rl_prof_info->profile.profile_id); 4408 4409 /* Save existing RL prof ID for later clean up */ 4410 old_id = ice_sched_get_node_rl_prof_id(node, rl_type); 4411 /* Configure BW scheduling parameters */ 4412 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); 4413 if (status) 4414 return status; 4415 4416 /* New changes has been applied */ 4417 /* Increment the profile ID reference count */ 4418 rl_prof_info->prof_id_ref++; 4419 4420 /* Check for old ID removal */ 4421 if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) || 4422 old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id) 4423 return ICE_SUCCESS; 4424 4425 return ice_sched_rm_rl_profile(hw, layer_num, 4426 rl_prof_info->profile.flags & 4427 ICE_AQC_RL_PROFILE_TYPE_M, old_id); 4428 } 4429 4430 /** 4431 * ice_sched_set_node_priority - set node's priority 4432 * @pi: port information structure 4433 * @node: tree node 4434 * @priority: number 0-7 representing priority among siblings 4435 * 4436 * This function sets priority of a node among it's siblings. 4437 */ 4438 enum ice_status 4439 ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node, 4440 u16 priority) 4441 { 4442 struct ice_aqc_txsched_elem_data buf; 4443 struct ice_aqc_txsched_elem *data; 4444 4445 buf = node->info; 4446 data = &buf.data; 4447 4448 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; 4449 data->generic |= ICE_AQC_ELEM_GENERIC_PRIO_M & 4450 (priority << ICE_AQC_ELEM_GENERIC_PRIO_S); 4451 4452 return ice_sched_update_elem(pi->hw, node, &buf); 4453 } 4454 4455 /** 4456 * ice_sched_set_node_weight - set node's weight 4457 * @pi: port information structure 4458 * @node: tree node 4459 * @weight: number 1-200 representing weight for WFQ 4460 * 4461 * This function sets weight of the node for WFQ algorithm. 4462 */ 4463 enum ice_status 4464 ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight) 4465 { 4466 struct ice_aqc_txsched_elem_data buf; 4467 struct ice_aqc_txsched_elem *data; 4468 4469 buf = node->info; 4470 data = &buf.data; 4471 4472 data->valid_sections = ICE_AQC_ELEM_VALID_CIR | ICE_AQC_ELEM_VALID_EIR | 4473 ICE_AQC_ELEM_VALID_GENERIC; 4474 data->cir_bw.bw_alloc = CPU_TO_LE16(weight); 4475 data->eir_bw.bw_alloc = CPU_TO_LE16(weight); 4476 data->generic |= ICE_AQC_ELEM_GENERIC_SP_M & 4477 (0x0 << ICE_AQC_ELEM_GENERIC_SP_S); 4478 4479 return ice_sched_update_elem(pi->hw, node, &buf); 4480 } 4481 4482 /** 4483 * ice_sched_set_node_bw_lmt - set node's BW limit 4484 * @pi: port information structure 4485 * @node: tree node 4486 * @rl_type: rate limit type min, max, or shared 4487 * @bw: bandwidth in Kbps - Kilo bits per sec 4488 * 4489 * It updates node's BW limit parameters like BW RL profile ID of type CIR, 4490 * EIR, or SRL. The caller needs to hold scheduler lock. 4491 * 4492 * NOTE: Caller provides the correct SRL node in case of shared profile 4493 * settings. 4494 */ 4495 enum ice_status 4496 ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, 4497 enum ice_rl_type rl_type, u32 bw) 4498 { 4499 struct ice_hw *hw; 4500 u8 layer_num; 4501 4502 if (!pi) 4503 return ICE_ERR_PARAM; 4504 hw = pi->hw; 4505 /* Remove unused RL profile IDs from HW and SW DB */ 4506 ice_sched_rm_unused_rl_prof(hw); 4507 4508 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, 4509 node->tx_sched_layer); 4510 if (layer_num >= hw->num_tx_sched_layers) 4511 return ICE_ERR_PARAM; 4512 4513 if (bw == ICE_SCHED_DFLT_BW) 4514 return ice_sched_set_node_bw_dflt(pi, node, rl_type, layer_num); 4515 return ice_sched_set_node_bw(pi, node, rl_type, bw, layer_num); 4516 } 4517 4518 /** 4519 * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default 4520 * @pi: port information structure 4521 * @node: pointer to node structure 4522 * @rl_type: rate limit type min, max, or shared 4523 * 4524 * This function configures node element's BW rate limit profile ID of 4525 * type CIR, EIR, or SRL to default. This function needs to be called 4526 * with the scheduler lock held. 4527 */ 4528 static enum ice_status 4529 ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, 4530 struct ice_sched_node *node, 4531 enum ice_rl_type rl_type) 4532 { 4533 return ice_sched_set_node_bw_lmt(pi, node, rl_type, 4534 ICE_SCHED_DFLT_BW); 4535 } 4536 4537 /** 4538 * ice_sched_validate_srl_node - Check node for SRL applicability 4539 * @node: sched node to configure 4540 * @sel_layer: selected SRL layer 4541 * 4542 * This function checks if the SRL can be applied to a selceted layer node on 4543 * behalf of the requested node (first argument). This function needs to be 4544 * called with scheduler lock held. 4545 */ 4546 static enum ice_status 4547 ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) 4548 { 4549 /* SRL profiles are not available on all layers. Check if the 4550 * SRL profile can be applied to a node above or below the 4551 * requested node. SRL configuration is possible only if the 4552 * selected layer's node has single child. 4553 */ 4554 if (sel_layer == node->tx_sched_layer || 4555 ((sel_layer == node->tx_sched_layer + 1) && 4556 node->num_children == 1) || 4557 ((sel_layer == node->tx_sched_layer - 1) && 4558 (node->parent && node->parent->num_children == 1))) 4559 return ICE_SUCCESS; 4560 4561 return ICE_ERR_CFG; 4562 } 4563 4564 /** 4565 * ice_sched_save_q_bw - save queue node's BW information 4566 * @q_ctx: queue context structure 4567 * @rl_type: rate limit type min, max, or shared 4568 * @bw: bandwidth in Kbps - Kilo bits per sec 4569 * 4570 * Save BW information of queue type node for post replay use. 4571 */ 4572 static enum ice_status 4573 ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) 4574 { 4575 switch (rl_type) { 4576 case ICE_MIN_BW: 4577 ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw); 4578 break; 4579 case ICE_MAX_BW: 4580 ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw); 4581 break; 4582 case ICE_SHARED_BW: 4583 ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw); 4584 break; 4585 default: 4586 return ICE_ERR_PARAM; 4587 } 4588 return ICE_SUCCESS; 4589 } 4590 4591 /** 4592 * ice_sched_set_q_bw_lmt - sets queue BW limit 4593 * @pi: port information structure 4594 * @vsi_handle: sw VSI handle 4595 * @tc: traffic class 4596 * @q_handle: software queue handle 4597 * @rl_type: min, max, or shared 4598 * @bw: bandwidth in Kbps 4599 * 4600 * This function sets BW limit of queue scheduling node. 4601 */ 4602 static enum ice_status 4603 ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4604 u16 q_handle, enum ice_rl_type rl_type, u32 bw) 4605 { 4606 enum ice_status status = ICE_ERR_PARAM; 4607 struct ice_sched_node *node; 4608 struct ice_q_ctx *q_ctx; 4609 4610 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4611 return ICE_ERR_PARAM; 4612 ice_acquire_lock(&pi->sched_lock); 4613 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle); 4614 if (!q_ctx) 4615 goto exit_q_bw_lmt; 4616 node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); 4617 if (!node) { 4618 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n"); 4619 goto exit_q_bw_lmt; 4620 } 4621 4622 /* Return error if it is not a leaf node */ 4623 if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) 4624 goto exit_q_bw_lmt; 4625 4626 /* SRL bandwidth layer selection */ 4627 if (rl_type == ICE_SHARED_BW) { 4628 u8 sel_layer; /* selected layer */ 4629 4630 sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type, 4631 node->tx_sched_layer); 4632 if (sel_layer >= pi->hw->num_tx_sched_layers) { 4633 status = ICE_ERR_PARAM; 4634 goto exit_q_bw_lmt; 4635 } 4636 status = ice_sched_validate_srl_node(node, sel_layer); 4637 if (status) 4638 goto exit_q_bw_lmt; 4639 } 4640 4641 if (bw == ICE_SCHED_DFLT_BW) 4642 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type); 4643 else 4644 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw); 4645 4646 if (!status) 4647 status = ice_sched_save_q_bw(q_ctx, rl_type, bw); 4648 4649 exit_q_bw_lmt: 4650 ice_release_lock(&pi->sched_lock); 4651 return status; 4652 } 4653 4654 /** 4655 * ice_cfg_q_bw_lmt - configure queue BW limit 4656 * @pi: port information structure 4657 * @vsi_handle: sw VSI handle 4658 * @tc: traffic class 4659 * @q_handle: software queue handle 4660 * @rl_type: min, max, or shared 4661 * @bw: bandwidth in Kbps 4662 * 4663 * This function configures BW limit of queue scheduling node. 4664 */ 4665 enum ice_status 4666 ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4667 u16 q_handle, enum ice_rl_type rl_type, u32 bw) 4668 { 4669 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, 4670 bw); 4671 } 4672 4673 /** 4674 * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit 4675 * @pi: port information structure 4676 * @vsi_handle: sw VSI handle 4677 * @tc: traffic class 4678 * @q_handle: software queue handle 4679 * @rl_type: min, max, or shared 4680 * 4681 * This function configures BW default limit of queue scheduling node. 4682 */ 4683 enum ice_status 4684 ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4685 u16 q_handle, enum ice_rl_type rl_type) 4686 { 4687 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, 4688 ICE_SCHED_DFLT_BW); 4689 } 4690 4691 /** 4692 * ice_sched_save_tc_node_bw - save TC node BW limit 4693 * @pi: port information structure 4694 * @tc: TC number 4695 * @rl_type: min or max 4696 * @bw: bandwidth in Kbps 4697 * 4698 * This function saves the modified values of bandwidth settings for later 4699 * replay purpose (restore) after reset. 4700 */ 4701 static enum ice_status 4702 ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc, 4703 enum ice_rl_type rl_type, u32 bw) 4704 { 4705 if (tc >= ICE_MAX_TRAFFIC_CLASS) 4706 return ICE_ERR_PARAM; 4707 switch (rl_type) { 4708 case ICE_MIN_BW: 4709 ice_set_clear_cir_bw(&pi->tc_node_bw_t_info[tc], bw); 4710 break; 4711 case ICE_MAX_BW: 4712 ice_set_clear_eir_bw(&pi->tc_node_bw_t_info[tc], bw); 4713 break; 4714 case ICE_SHARED_BW: 4715 ice_set_clear_shared_bw(&pi->tc_node_bw_t_info[tc], bw); 4716 break; 4717 default: 4718 return ICE_ERR_PARAM; 4719 } 4720 return ICE_SUCCESS; 4721 } 4722 4723 /** 4724 * ice_sched_set_tc_node_bw_lmt - sets TC node BW limit 4725 * @pi: port information structure 4726 * @tc: TC number 4727 * @rl_type: min or max 4728 * @bw: bandwidth in Kbps 4729 * 4730 * This function configures bandwidth limit of TC node. 4731 */ 4732 static enum ice_status 4733 ice_sched_set_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, 4734 enum ice_rl_type rl_type, u32 bw) 4735 { 4736 enum ice_status status = ICE_ERR_PARAM; 4737 struct ice_sched_node *tc_node; 4738 4739 if (tc >= ICE_MAX_TRAFFIC_CLASS) 4740 return status; 4741 ice_acquire_lock(&pi->sched_lock); 4742 tc_node = ice_sched_get_tc_node(pi, tc); 4743 if (!tc_node) 4744 goto exit_set_tc_node_bw; 4745 if (bw == ICE_SCHED_DFLT_BW) 4746 status = ice_sched_set_node_bw_dflt_lmt(pi, tc_node, rl_type); 4747 else 4748 status = ice_sched_set_node_bw_lmt(pi, tc_node, rl_type, bw); 4749 if (!status) 4750 status = ice_sched_save_tc_node_bw(pi, tc, rl_type, bw); 4751 4752 exit_set_tc_node_bw: 4753 ice_release_lock(&pi->sched_lock); 4754 return status; 4755 } 4756 4757 /** 4758 * ice_cfg_tc_node_bw_lmt - configure TC node BW limit 4759 * @pi: port information structure 4760 * @tc: TC number 4761 * @rl_type: min or max 4762 * @bw: bandwidth in Kbps 4763 * 4764 * This function configures BW limit of TC node. 4765 * Note: The minimum guaranteed reservation is done via DCBX. 4766 */ 4767 enum ice_status 4768 ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, 4769 enum ice_rl_type rl_type, u32 bw) 4770 { 4771 return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, bw); 4772 } 4773 4774 /** 4775 * ice_cfg_tc_node_bw_dflt_lmt - configure TC node BW default limit 4776 * @pi: port information structure 4777 * @tc: TC number 4778 * @rl_type: min or max 4779 * 4780 * This function configures BW default limit of TC node. 4781 */ 4782 enum ice_status 4783 ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc, 4784 enum ice_rl_type rl_type) 4785 { 4786 return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, ICE_SCHED_DFLT_BW); 4787 } 4788 4789 /** 4790 * ice_sched_save_tc_node_bw_alloc - save TC node's BW alloc information 4791 * @pi: port information structure 4792 * @tc: traffic class 4793 * @rl_type: rate limit type min or max 4794 * @bw_alloc: Bandwidth allocation information 4795 * 4796 * Save BW alloc information of VSI type node for post replay use. 4797 */ 4798 static enum ice_status 4799 ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, 4800 enum ice_rl_type rl_type, u16 bw_alloc) 4801 { 4802 if (tc >= ICE_MAX_TRAFFIC_CLASS) 4803 return ICE_ERR_PARAM; 4804 switch (rl_type) { 4805 case ICE_MIN_BW: 4806 ice_set_clear_cir_bw_alloc(&pi->tc_node_bw_t_info[tc], 4807 bw_alloc); 4808 break; 4809 case ICE_MAX_BW: 4810 ice_set_clear_eir_bw_alloc(&pi->tc_node_bw_t_info[tc], 4811 bw_alloc); 4812 break; 4813 default: 4814 return ICE_ERR_PARAM; 4815 } 4816 return ICE_SUCCESS; 4817 } 4818 4819 /** 4820 * ice_sched_set_tc_node_bw_alloc - set TC node BW alloc 4821 * @pi: port information structure 4822 * @tc: TC number 4823 * @rl_type: min or max 4824 * @bw_alloc: bandwidth alloc 4825 * 4826 * This function configures bandwidth alloc of TC node, also saves the 4827 * changed settings for replay purpose, and return success if it succeeds 4828 * in modifying bandwidth alloc setting. 4829 */ 4830 static enum ice_status 4831 ice_sched_set_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, 4832 enum ice_rl_type rl_type, u8 bw_alloc) 4833 { 4834 enum ice_status status = ICE_ERR_PARAM; 4835 struct ice_sched_node *tc_node; 4836 4837 if (tc >= ICE_MAX_TRAFFIC_CLASS) 4838 return status; 4839 ice_acquire_lock(&pi->sched_lock); 4840 tc_node = ice_sched_get_tc_node(pi, tc); 4841 if (!tc_node) 4842 goto exit_set_tc_node_bw_alloc; 4843 status = ice_sched_cfg_node_bw_alloc(pi->hw, tc_node, rl_type, 4844 bw_alloc); 4845 if (status) 4846 goto exit_set_tc_node_bw_alloc; 4847 status = ice_sched_save_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc); 4848 4849 exit_set_tc_node_bw_alloc: 4850 ice_release_lock(&pi->sched_lock); 4851 return status; 4852 } 4853 4854 /** 4855 * ice_cfg_tc_node_bw_alloc - configure TC node BW alloc 4856 * @pi: port information structure 4857 * @tc: TC number 4858 * @rl_type: min or max 4859 * @bw_alloc: bandwidth alloc 4860 * 4861 * This function configures BW limit of TC node. 4862 * Note: The minimum guaranteed reservation is done via DCBX. 4863 */ 4864 enum ice_status 4865 ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, 4866 enum ice_rl_type rl_type, u8 bw_alloc) 4867 { 4868 return ice_sched_set_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc); 4869 } 4870 4871 /** 4872 * ice_sched_set_agg_bw_dflt_lmt - set aggregator node's BW limit to default 4873 * @pi: port information structure 4874 * @vsi_handle: software VSI handle 4875 * 4876 * This function retrieves the aggregator ID based on VSI ID and TC, 4877 * and sets node's BW limit to default. This function needs to be 4878 * called with the scheduler lock held. 4879 */ 4880 enum ice_status 4881 ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle) 4882 { 4883 struct ice_vsi_ctx *vsi_ctx; 4884 enum ice_status status = ICE_SUCCESS; 4885 u8 tc; 4886 4887 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4888 return ICE_ERR_PARAM; 4889 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 4890 if (!vsi_ctx) 4891 return ICE_ERR_PARAM; 4892 4893 ice_for_each_traffic_class(tc) { 4894 struct ice_sched_node *node; 4895 4896 node = vsi_ctx->sched.ag_node[tc]; 4897 if (!node) 4898 continue; 4899 4900 /* Set min profile to default */ 4901 status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MIN_BW); 4902 if (status) 4903 break; 4904 4905 /* Set max profile to default */ 4906 status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MAX_BW); 4907 if (status) 4908 break; 4909 4910 /* Remove shared profile, if there is one */ 4911 status = ice_sched_set_node_bw_dflt_lmt(pi, node, 4912 ICE_SHARED_BW); 4913 if (status) 4914 break; 4915 } 4916 4917 return status; 4918 } 4919 4920 /** 4921 * ice_sched_get_node_by_id_type - get node from ID type 4922 * @pi: port information structure 4923 * @id: identifier 4924 * @agg_type: type of aggregator 4925 * @tc: traffic class 4926 * 4927 * This function returns node identified by ID of type aggregator, and 4928 * based on traffic class (TC). This function needs to be called with 4929 * the scheduler lock held. 4930 */ 4931 static struct ice_sched_node * 4932 ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, 4933 enum ice_agg_type agg_type, u8 tc) 4934 { 4935 struct ice_sched_node *node = NULL; 4936 4937 switch (agg_type) { 4938 case ICE_AGG_TYPE_VSI: { 4939 struct ice_vsi_ctx *vsi_ctx; 4940 u16 vsi_handle = (u16)id; 4941 4942 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4943 break; 4944 /* Get sched_vsi_info */ 4945 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 4946 if (!vsi_ctx) 4947 break; 4948 node = vsi_ctx->sched.vsi_node[tc]; 4949 break; 4950 } 4951 4952 case ICE_AGG_TYPE_AGG: { 4953 struct ice_sched_node *tc_node; 4954 4955 tc_node = ice_sched_get_tc_node(pi, tc); 4956 if (tc_node) 4957 node = ice_sched_get_agg_node(pi, tc_node, id); 4958 break; 4959 } 4960 4961 case ICE_AGG_TYPE_Q: 4962 /* The current implementation allows single queue to modify */ 4963 node = ice_sched_find_node_by_teid(pi->root, id); 4964 break; 4965 4966 case ICE_AGG_TYPE_QG: { 4967 struct ice_sched_node *child_node; 4968 4969 /* The current implementation allows single qg to modify */ 4970 child_node = ice_sched_find_node_by_teid(pi->root, id); 4971 if (!child_node) 4972 break; 4973 node = child_node->parent; 4974 break; 4975 } 4976 4977 default: 4978 break; 4979 } 4980 4981 return node; 4982 } 4983 4984 /** 4985 * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC 4986 * @pi: port information structure 4987 * @id: ID (software VSI handle or AGG ID) 4988 * @agg_type: aggregator type (VSI or AGG type node) 4989 * @tc: traffic class 4990 * @rl_type: min or max 4991 * @bw: bandwidth in Kbps 4992 * 4993 * This function sets BW limit of VSI or Aggregator scheduling node 4994 * based on TC information from passed in argument BW. 4995 */ 4996 enum ice_status 4997 ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, 4998 enum ice_agg_type agg_type, u8 tc, 4999 enum ice_rl_type rl_type, u32 bw) 5000 { 5001 enum ice_status status = ICE_ERR_PARAM; 5002 struct ice_sched_node *node; 5003 5004 if (!pi) 5005 return status; 5006 5007 if (rl_type == ICE_UNKNOWN_BW) 5008 return status; 5009 5010 ice_acquire_lock(&pi->sched_lock); 5011 node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc); 5012 if (!node) { 5013 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n"); 5014 goto exit_set_node_bw_lmt_per_tc; 5015 } 5016 if (bw == ICE_SCHED_DFLT_BW) 5017 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type); 5018 else 5019 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw); 5020 5021 exit_set_node_bw_lmt_per_tc: 5022 ice_release_lock(&pi->sched_lock); 5023 return status; 5024 } 5025 5026 /** 5027 * ice_sched_validate_vsi_srl_node - validate VSI SRL node 5028 * @pi: port information structure 5029 * @vsi_handle: software VSI handle 5030 * 5031 * This function validates SRL node of the VSI node if available SRL layer is 5032 * different than the VSI node layer on all TC(s).This function needs to be 5033 * called with scheduler lock held. 5034 */ 5035 static enum ice_status 5036 ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle) 5037 { 5038 u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM; 5039 u8 tc; 5040 5041 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 5042 return ICE_ERR_PARAM; 5043 5044 /* Return success if no nodes are present across TC */ 5045 ice_for_each_traffic_class(tc) { 5046 struct ice_sched_node *tc_node, *vsi_node; 5047 enum ice_rl_type rl_type = ICE_SHARED_BW; 5048 enum ice_status status; 5049 5050 tc_node = ice_sched_get_tc_node(pi, tc); 5051 if (!tc_node) 5052 continue; 5053 5054 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 5055 if (!vsi_node) 5056 continue; 5057 5058 /* SRL bandwidth layer selection */ 5059 if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) { 5060 u8 node_layer = vsi_node->tx_sched_layer; 5061 u8 layer_num; 5062 5063 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, 5064 node_layer); 5065 if (layer_num >= pi->hw->num_tx_sched_layers) 5066 return ICE_ERR_PARAM; 5067 sel_layer = layer_num; 5068 } 5069 5070 status = ice_sched_validate_srl_node(vsi_node, sel_layer); 5071 if (status) 5072 return status; 5073 } 5074 return ICE_SUCCESS; 5075 } 5076 5077 /** 5078 * ice_sched_set_save_vsi_srl_node_bw - set VSI shared limit values 5079 * @pi: port information structure 5080 * @vsi_handle: software VSI handle 5081 * @tc: traffic class 5082 * @srl_node: sched node to configure 5083 * @rl_type: rate limit type minimum, maximum, or shared 5084 * @bw: minimum, maximum, or shared bandwidth in Kbps 5085 * 5086 * Configure shared rate limiter(SRL) of VSI type nodes across given traffic 5087 * class, and saves those value for later use for replaying purposes. The 5088 * caller holds the scheduler lock. 5089 */ 5090 static enum ice_status 5091 ice_sched_set_save_vsi_srl_node_bw(struct ice_port_info *pi, u16 vsi_handle, 5092 u8 tc, struct ice_sched_node *srl_node, 5093 enum ice_rl_type rl_type, u32 bw) 5094 { 5095 enum ice_status status; 5096 5097 if (bw == ICE_SCHED_DFLT_BW) { 5098 status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type); 5099 } else { 5100 status = ice_sched_set_node_bw_lmt(pi, srl_node, rl_type, bw); 5101 if (status) 5102 return status; 5103 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw); 5104 } 5105 return status; 5106 } 5107 5108 /** 5109 * ice_sched_set_vsi_node_srl_per_tc - set VSI node BW shared limit for tc 5110 * @pi: port information structure 5111 * @vsi_handle: software VSI handle 5112 * @tc: traffic class 5113 * @min_bw: minimum bandwidth in Kbps 5114 * @max_bw: maximum bandwidth in Kbps 5115 * @shared_bw: shared bandwidth in Kbps 5116 * 5117 * Configure shared rate limiter(SRL) of VSI type nodes across requested 5118 * traffic class for VSI matching handle. When BW value of ICE_SCHED_DFLT_BW 5119 * is passed, it removes the corresponding bw from the node. The caller 5120 * holds scheduler lock. 5121 */ 5122 static enum ice_status 5123 ice_sched_set_vsi_node_srl_per_tc(struct ice_port_info *pi, u16 vsi_handle, 5124 u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw) 5125 { 5126 struct ice_sched_node *tc_node, *vsi_node, *cfg_node; 5127 enum ice_status status; 5128 u8 layer_num; 5129 5130 tc_node = ice_sched_get_tc_node(pi, tc); 5131 if (!tc_node) 5132 return ICE_ERR_CFG; 5133 5134 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 5135 if (!vsi_node) 5136 return ICE_ERR_CFG; 5137 5138 layer_num = ice_sched_get_rl_prof_layer(pi, ICE_SHARED_BW, 5139 vsi_node->tx_sched_layer); 5140 if (layer_num >= pi->hw->num_tx_sched_layers) 5141 return ICE_ERR_PARAM; 5142 5143 /* SRL node may be different */ 5144 cfg_node = ice_sched_get_srl_node(vsi_node, layer_num); 5145 if (!cfg_node) 5146 return ICE_ERR_CFG; 5147 5148 status = ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc, 5149 cfg_node, ICE_MIN_BW, 5150 min_bw); 5151 if (status) 5152 return status; 5153 5154 status = ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc, 5155 cfg_node, ICE_MAX_BW, 5156 max_bw); 5157 if (status) 5158 return status; 5159 5160 return ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc, cfg_node, 5161 ICE_SHARED_BW, shared_bw); 5162 } 5163 5164 /** 5165 * ice_sched_set_vsi_bw_shared_lmt - set VSI BW shared limit 5166 * @pi: port information structure 5167 * @vsi_handle: software VSI handle 5168 * @min_bw: minimum bandwidth in Kbps 5169 * @max_bw: maximum bandwidth in Kbps 5170 * @shared_bw: shared bandwidth in Kbps 5171 * 5172 * Configure shared rate limiter(SRL) of all VSI type nodes across all traffic 5173 * classes for VSI matching handle. When BW value of ICE_SCHED_DFLT_BW is 5174 * passed, it removes those value(s) from the node. 5175 */ 5176 enum ice_status 5177 ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, 5178 u32 min_bw, u32 max_bw, u32 shared_bw) 5179 { 5180 enum ice_status status = ICE_SUCCESS; 5181 u8 tc; 5182 5183 if (!pi) 5184 return ICE_ERR_PARAM; 5185 5186 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 5187 return ICE_ERR_PARAM; 5188 5189 ice_acquire_lock(&pi->sched_lock); 5190 status = ice_sched_validate_vsi_srl_node(pi, vsi_handle); 5191 if (status) 5192 goto exit_set_vsi_bw_shared_lmt; 5193 /* Return success if no nodes are present across TC */ 5194 ice_for_each_traffic_class(tc) { 5195 struct ice_sched_node *tc_node, *vsi_node; 5196 5197 tc_node = ice_sched_get_tc_node(pi, tc); 5198 if (!tc_node) 5199 continue; 5200 5201 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 5202 if (!vsi_node) 5203 continue; 5204 5205 status = ice_sched_set_vsi_node_srl_per_tc(pi, vsi_handle, tc, 5206 min_bw, max_bw, 5207 shared_bw); 5208 if (status) 5209 break; 5210 } 5211 5212 exit_set_vsi_bw_shared_lmt: 5213 ice_release_lock(&pi->sched_lock); 5214 return status; 5215 } 5216 5217 /** 5218 * ice_sched_validate_agg_srl_node - validate AGG SRL node 5219 * @pi: port information structure 5220 * @agg_id: aggregator ID 5221 * 5222 * This function validates SRL node of the AGG node if available SRL layer is 5223 * different than the AGG node layer on all TC(s).This function needs to be 5224 * called with scheduler lock held. 5225 */ 5226 static enum ice_status 5227 ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id) 5228 { 5229 u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM; 5230 struct ice_sched_agg_info *agg_info; 5231 bool agg_id_present = false; 5232 enum ice_status status = ICE_SUCCESS; 5233 u8 tc; 5234 5235 LIST_FOR_EACH_ENTRY(agg_info, &pi->hw->agg_list, ice_sched_agg_info, 5236 list_entry) 5237 if (agg_info->agg_id == agg_id) { 5238 agg_id_present = true; 5239 break; 5240 } 5241 if (!agg_id_present) 5242 return ICE_ERR_PARAM; 5243 /* Return success if no nodes are present across TC */ 5244 ice_for_each_traffic_class(tc) { 5245 struct ice_sched_node *tc_node, *agg_node; 5246 enum ice_rl_type rl_type = ICE_SHARED_BW; 5247 5248 tc_node = ice_sched_get_tc_node(pi, tc); 5249 if (!tc_node) 5250 continue; 5251 5252 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 5253 if (!agg_node) 5254 continue; 5255 /* SRL bandwidth layer selection */ 5256 if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) { 5257 u8 node_layer = agg_node->tx_sched_layer; 5258 u8 layer_num; 5259 5260 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, 5261 node_layer); 5262 if (layer_num >= pi->hw->num_tx_sched_layers) 5263 return ICE_ERR_PARAM; 5264 sel_layer = layer_num; 5265 } 5266 5267 status = ice_sched_validate_srl_node(agg_node, sel_layer); 5268 if (status) 5269 break; 5270 } 5271 return status; 5272 } 5273 5274 /** 5275 * ice_sched_validate_agg_id - Validate aggregator id 5276 * @pi: port information structure 5277 * @agg_id: aggregator ID 5278 * 5279 * This function validates aggregator id. Caller holds the scheduler lock. 5280 */ 5281 static enum ice_status 5282 ice_sched_validate_agg_id(struct ice_port_info *pi, u32 agg_id) 5283 { 5284 struct ice_sched_agg_info *agg_info; 5285 struct ice_sched_agg_info *tmp; 5286 bool agg_id_present = false; 5287 enum ice_status status; 5288 5289 status = ice_sched_validate_agg_srl_node(pi, agg_id); 5290 if (status) 5291 return status; 5292 5293 LIST_FOR_EACH_ENTRY_SAFE(agg_info, tmp, &pi->hw->agg_list, 5294 ice_sched_agg_info, list_entry) 5295 if (agg_info->agg_id == agg_id) { 5296 agg_id_present = true; 5297 break; 5298 } 5299 5300 if (!agg_id_present) 5301 return ICE_ERR_PARAM; 5302 5303 return ICE_SUCCESS; 5304 } 5305 5306 /** 5307 * ice_sched_set_save_agg_srl_node_bw - set aggregator shared limit values 5308 * @pi: port information structure 5309 * @agg_id: aggregator ID 5310 * @tc: traffic class 5311 * @srl_node: sched node to configure 5312 * @rl_type: rate limit type minimum, maximum, or shared 5313 * @bw: minimum, maximum, or shared bandwidth in Kbps 5314 * 5315 * Configure shared rate limiter(SRL) of aggregator type nodes across 5316 * requested traffic class, and saves those value for later use for 5317 * replaying purposes. The caller holds the scheduler lock. 5318 */ 5319 static enum ice_status 5320 ice_sched_set_save_agg_srl_node_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, 5321 struct ice_sched_node *srl_node, 5322 enum ice_rl_type rl_type, u32 bw) 5323 { 5324 enum ice_status status; 5325 5326 if (bw == ICE_SCHED_DFLT_BW) { 5327 status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type); 5328 } else { 5329 status = ice_sched_set_node_bw_lmt(pi, srl_node, rl_type, bw); 5330 if (status) 5331 return status; 5332 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw); 5333 } 5334 return status; 5335 } 5336 5337 /** 5338 * ice_sched_set_agg_node_srl_per_tc - set aggregator SRL per tc 5339 * @pi: port information structure 5340 * @agg_id: aggregator ID 5341 * @tc: traffic class 5342 * @min_bw: minimum bandwidth in Kbps 5343 * @max_bw: maximum bandwidth in Kbps 5344 * @shared_bw: shared bandwidth in Kbps 5345 * 5346 * This function configures the shared rate limiter(SRL) of aggregator type 5347 * node for a given traffic class for aggregator matching agg_id. When BW 5348 * value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node. Caller 5349 * holds the scheduler lock. 5350 */ 5351 static enum ice_status 5352 ice_sched_set_agg_node_srl_per_tc(struct ice_port_info *pi, u32 agg_id, 5353 u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw) 5354 { 5355 struct ice_sched_node *tc_node, *agg_node, *cfg_node; 5356 enum ice_rl_type rl_type = ICE_SHARED_BW; 5357 enum ice_status status = ICE_ERR_CFG; 5358 u8 layer_num; 5359 5360 tc_node = ice_sched_get_tc_node(pi, tc); 5361 if (!tc_node) 5362 return ICE_ERR_CFG; 5363 5364 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 5365 if (!agg_node) 5366 return ICE_ERR_CFG; 5367 5368 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, 5369 agg_node->tx_sched_layer); 5370 if (layer_num >= pi->hw->num_tx_sched_layers) 5371 return ICE_ERR_PARAM; 5372 5373 /* SRL node may be different */ 5374 cfg_node = ice_sched_get_srl_node(agg_node, layer_num); 5375 if (!cfg_node) 5376 return ICE_ERR_CFG; 5377 5378 status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node, 5379 ICE_MIN_BW, min_bw); 5380 if (status) 5381 return status; 5382 5383 status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node, 5384 ICE_MAX_BW, max_bw); 5385 if (status) 5386 return status; 5387 5388 status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node, 5389 ICE_SHARED_BW, shared_bw); 5390 return status; 5391 } 5392 5393 /** 5394 * ice_sched_set_agg_bw_shared_lmt - set aggregator BW shared limit 5395 * @pi: port information structure 5396 * @agg_id: aggregator ID 5397 * @min_bw: minimum bandwidth in Kbps 5398 * @max_bw: maximum bandwidth in Kbps 5399 * @shared_bw: shared bandwidth in Kbps 5400 * 5401 * This function configures the shared rate limiter(SRL) of all aggregator type 5402 * nodes across all traffic classes for aggregator matching agg_id. When 5403 * BW value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the 5404 * node(s). 5405 */ 5406 enum ice_status 5407 ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, 5408 u32 min_bw, u32 max_bw, u32 shared_bw) 5409 { 5410 enum ice_status status; 5411 u8 tc; 5412 5413 if (!pi) 5414 return ICE_ERR_PARAM; 5415 5416 ice_acquire_lock(&pi->sched_lock); 5417 status = ice_sched_validate_agg_id(pi, agg_id); 5418 if (status) 5419 goto exit_agg_bw_shared_lmt; 5420 5421 /* Return success if no nodes are present across TC */ 5422 ice_for_each_traffic_class(tc) { 5423 struct ice_sched_node *tc_node, *agg_node; 5424 5425 tc_node = ice_sched_get_tc_node(pi, tc); 5426 if (!tc_node) 5427 continue; 5428 5429 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 5430 if (!agg_node) 5431 continue; 5432 5433 status = ice_sched_set_agg_node_srl_per_tc(pi, agg_id, tc, 5434 min_bw, max_bw, 5435 shared_bw); 5436 if (status) 5437 break; 5438 } 5439 5440 exit_agg_bw_shared_lmt: 5441 ice_release_lock(&pi->sched_lock); 5442 return status; 5443 } 5444 5445 /** 5446 * ice_sched_set_agg_bw_shared_lmt_per_tc - set aggregator BW shared lmt per tc 5447 * @pi: port information structure 5448 * @agg_id: aggregator ID 5449 * @tc: traffic class 5450 * @min_bw: minimum bandwidth in Kbps 5451 * @max_bw: maximum bandwidth in Kbps 5452 * @shared_bw: shared bandwidth in Kbps 5453 * 5454 * This function configures the shared rate limiter(SRL) of aggregator type 5455 * node for a given traffic class for aggregator matching agg_id. When BW 5456 * value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node. 5457 */ 5458 enum ice_status 5459 ice_sched_set_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, 5460 u8 tc, u32 min_bw, u32 max_bw, 5461 u32 shared_bw) 5462 { 5463 enum ice_status status; 5464 5465 if (!pi) 5466 return ICE_ERR_PARAM; 5467 ice_acquire_lock(&pi->sched_lock); 5468 status = ice_sched_validate_agg_id(pi, agg_id); 5469 if (status) 5470 goto exit_agg_bw_shared_lmt_per_tc; 5471 5472 status = ice_sched_set_agg_node_srl_per_tc(pi, agg_id, tc, min_bw, 5473 max_bw, shared_bw); 5474 5475 exit_agg_bw_shared_lmt_per_tc: 5476 ice_release_lock(&pi->sched_lock); 5477 return status; 5478 } 5479 5480 /** 5481 * ice_sched_cfg_sibl_node_prio - configure node sibling priority 5482 * @pi: port information structure 5483 * @node: sched node to configure 5484 * @priority: sibling priority 5485 * 5486 * This function configures node element's sibling priority only. This 5487 * function needs to be called with scheduler lock held. 5488 */ 5489 enum ice_status 5490 ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi, 5491 struct ice_sched_node *node, u8 priority) 5492 { 5493 struct ice_aqc_txsched_elem_data buf; 5494 struct ice_aqc_txsched_elem *data; 5495 struct ice_hw *hw = pi->hw; 5496 enum ice_status status; 5497 5498 if (!hw) 5499 return ICE_ERR_PARAM; 5500 buf = node->info; 5501 data = &buf.data; 5502 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; 5503 priority = (priority << ICE_AQC_ELEM_GENERIC_PRIO_S) & 5504 ICE_AQC_ELEM_GENERIC_PRIO_M; 5505 data->generic &= ~ICE_AQC_ELEM_GENERIC_PRIO_M; 5506 data->generic |= priority; 5507 5508 /* Configure element */ 5509 status = ice_sched_update_elem(hw, node, &buf); 5510 return status; 5511 } 5512 5513 /** 5514 * ice_cfg_rl_burst_size - Set burst size value 5515 * @hw: pointer to the HW struct 5516 * @bytes: burst size in bytes 5517 * 5518 * This function configures/set the burst size to requested new value. The new 5519 * burst size value is used for future rate limit calls. It doesn't change the 5520 * existing or previously created RL profiles. 5521 */ 5522 enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) 5523 { 5524 u16 burst_size_to_prog; 5525 5526 if (bytes < ICE_MIN_BURST_SIZE_ALLOWED || 5527 bytes > ICE_MAX_BURST_SIZE_ALLOWED) 5528 return ICE_ERR_PARAM; 5529 if (ice_round_to_num(bytes, 64) <= 5530 ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) { 5531 /* 64 byte granularity case */ 5532 /* Disable MSB granularity bit */ 5533 burst_size_to_prog = ICE_64_BYTE_GRANULARITY; 5534 /* round number to nearest 64 byte granularity */ 5535 bytes = ice_round_to_num(bytes, 64); 5536 /* The value is in 64 byte chunks */ 5537 burst_size_to_prog |= (u16)(bytes / 64); 5538 } else { 5539 /* k bytes granularity case */ 5540 /* Enable MSB granularity bit */ 5541 burst_size_to_prog = ICE_KBYTE_GRANULARITY; 5542 /* round number to nearest 1024 granularity */ 5543 bytes = ice_round_to_num(bytes, 1024); 5544 /* check rounding doesn't go beyond allowed */ 5545 if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY) 5546 bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY; 5547 /* The value is in k bytes */ 5548 burst_size_to_prog |= (u16)(bytes / 1024); 5549 } 5550 hw->max_burst_size = burst_size_to_prog; 5551 return ICE_SUCCESS; 5552 } 5553 5554 /** 5555 * ice_sched_replay_node_prio - re-configure node priority 5556 * @hw: pointer to the HW struct 5557 * @node: sched node to configure 5558 * @priority: priority value 5559 * 5560 * This function configures node element's priority value. It 5561 * needs to be called with scheduler lock held. 5562 */ 5563 static enum ice_status 5564 ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, 5565 u8 priority) 5566 { 5567 struct ice_aqc_txsched_elem_data buf; 5568 struct ice_aqc_txsched_elem *data; 5569 enum ice_status status; 5570 5571 buf = node->info; 5572 data = &buf.data; 5573 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; 5574 data->generic = priority; 5575 5576 /* Configure element */ 5577 status = ice_sched_update_elem(hw, node, &buf); 5578 return status; 5579 } 5580 5581 /** 5582 * ice_sched_replay_node_bw - replay node(s) BW 5583 * @hw: pointer to the HW struct 5584 * @node: sched node to configure 5585 * @bw_t_info: BW type information 5586 * 5587 * This function restores node's BW from bw_t_info. The caller needs 5588 * to hold the scheduler lock. 5589 */ 5590 static enum ice_status 5591 ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node, 5592 struct ice_bw_type_info *bw_t_info) 5593 { 5594 struct ice_port_info *pi = hw->port_info; 5595 enum ice_status status = ICE_ERR_PARAM; 5596 u16 bw_alloc; 5597 5598 if (!node) 5599 return status; 5600 if (!ice_is_any_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT)) 5601 return ICE_SUCCESS; 5602 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_PRIO)) { 5603 status = ice_sched_replay_node_prio(hw, node, 5604 bw_t_info->generic); 5605 if (status) 5606 return status; 5607 } 5608 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR)) { 5609 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, 5610 bw_t_info->cir_bw.bw); 5611 if (status) 5612 return status; 5613 } 5614 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR_WT)) { 5615 bw_alloc = bw_t_info->cir_bw.bw_alloc; 5616 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW, 5617 bw_alloc); 5618 if (status) 5619 return status; 5620 } 5621 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR)) { 5622 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, 5623 bw_t_info->eir_bw.bw); 5624 if (status) 5625 return status; 5626 } 5627 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR_WT)) { 5628 bw_alloc = bw_t_info->eir_bw.bw_alloc; 5629 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW, 5630 bw_alloc); 5631 if (status) 5632 return status; 5633 } 5634 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_SHARED)) 5635 status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW, 5636 bw_t_info->shared_bw); 5637 return status; 5638 } 5639 5640 /** 5641 * ice_sched_replay_agg_bw - replay aggregator node(s) BW 5642 * @hw: pointer to the HW struct 5643 * @agg_info: aggregator data structure 5644 * 5645 * This function re-creates aggregator type nodes. The caller needs to hold 5646 * the scheduler lock. 5647 */ 5648 static enum ice_status 5649 ice_sched_replay_agg_bw(struct ice_hw *hw, struct ice_sched_agg_info *agg_info) 5650 { 5651 struct ice_sched_node *tc_node, *agg_node; 5652 enum ice_status status = ICE_SUCCESS; 5653 u8 tc; 5654 5655 if (!agg_info) 5656 return ICE_ERR_PARAM; 5657 ice_for_each_traffic_class(tc) { 5658 if (!ice_is_any_bit_set(agg_info->bw_t_info[tc].bw_t_bitmap, 5659 ICE_BW_TYPE_CNT)) 5660 continue; 5661 tc_node = ice_sched_get_tc_node(hw->port_info, tc); 5662 if (!tc_node) { 5663 status = ICE_ERR_PARAM; 5664 break; 5665 } 5666 agg_node = ice_sched_get_agg_node(hw->port_info, tc_node, 5667 agg_info->agg_id); 5668 if (!agg_node) { 5669 status = ICE_ERR_PARAM; 5670 break; 5671 } 5672 status = ice_sched_replay_node_bw(hw, agg_node, 5673 &agg_info->bw_t_info[tc]); 5674 if (status) 5675 break; 5676 } 5677 return status; 5678 } 5679 5680 /** 5681 * ice_sched_get_ena_tc_bitmap - get enabled TC bitmap 5682 * @pi: port info struct 5683 * @tc_bitmap: 8 bits TC bitmap to check 5684 * @ena_tc_bitmap: 8 bits enabled TC bitmap to return 5685 * 5686 * This function returns enabled TC bitmap in variable ena_tc_bitmap. Some TCs 5687 * may be missing, it returns enabled TCs. This function needs to be called with 5688 * scheduler lock held. 5689 */ 5690 static void 5691 ice_sched_get_ena_tc_bitmap(struct ice_port_info *pi, ice_bitmap_t *tc_bitmap, 5692 ice_bitmap_t *ena_tc_bitmap) 5693 { 5694 u8 tc; 5695 5696 /* Some TC(s) may be missing after reset, adjust for replay */ 5697 ice_for_each_traffic_class(tc) 5698 if (ice_is_tc_ena(*tc_bitmap, tc) && 5699 (ice_sched_get_tc_node(pi, tc))) 5700 ice_set_bit(tc, ena_tc_bitmap); 5701 } 5702 5703 /** 5704 * ice_sched_replay_agg - recreate aggregator node(s) 5705 * @hw: pointer to the HW struct 5706 * 5707 * This function recreate aggregator type nodes which are not replayed earlier. 5708 * It also replay aggregator BW information. These aggregator nodes are not 5709 * associated with VSI type node yet. 5710 */ 5711 void ice_sched_replay_agg(struct ice_hw *hw) 5712 { 5713 struct ice_port_info *pi = hw->port_info; 5714 struct ice_sched_agg_info *agg_info; 5715 5716 ice_acquire_lock(&pi->sched_lock); 5717 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 5718 list_entry) 5719 /* replay aggregator (re-create aggregator node) */ 5720 if (!ice_cmp_bitmap(agg_info->tc_bitmap, 5721 agg_info->replay_tc_bitmap, 5722 ICE_MAX_TRAFFIC_CLASS)) { 5723 ice_declare_bitmap(replay_bitmap, 5724 ICE_MAX_TRAFFIC_CLASS); 5725 enum ice_status status; 5726 5727 ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 5728 ice_sched_get_ena_tc_bitmap(pi, 5729 agg_info->replay_tc_bitmap, 5730 replay_bitmap); 5731 status = ice_sched_cfg_agg(hw->port_info, 5732 agg_info->agg_id, 5733 ICE_AGG_TYPE_AGG, 5734 replay_bitmap); 5735 if (status) { 5736 ice_info(hw, "Replay agg id[%d] failed\n", 5737 agg_info->agg_id); 5738 /* Move on to next one */ 5739 continue; 5740 } 5741 /* Replay aggregator node BW (restore aggregator BW) */ 5742 status = ice_sched_replay_agg_bw(hw, agg_info); 5743 if (status) 5744 ice_info(hw, "Replay agg bw [id=%d] failed\n", 5745 agg_info->agg_id); 5746 } 5747 ice_release_lock(&pi->sched_lock); 5748 } 5749 5750 /** 5751 * ice_sched_replay_agg_vsi_preinit - Agg/VSI replay pre initialization 5752 * @hw: pointer to the HW struct 5753 * 5754 * This function initialize aggregator(s) TC bitmap to zero. A required 5755 * preinit step for replaying aggregators. 5756 */ 5757 void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw) 5758 { 5759 struct ice_port_info *pi = hw->port_info; 5760 struct ice_sched_agg_info *agg_info; 5761 5762 ice_acquire_lock(&pi->sched_lock); 5763 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 5764 list_entry) { 5765 struct ice_sched_agg_vsi_info *agg_vsi_info; 5766 5767 agg_info->tc_bitmap[0] = 0; 5768 LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list, 5769 ice_sched_agg_vsi_info, list_entry) 5770 agg_vsi_info->tc_bitmap[0] = 0; 5771 } 5772 ice_release_lock(&pi->sched_lock); 5773 } 5774 5775 /** 5776 * ice_sched_replay_root_node_bw - replay root node BW 5777 * @pi: port information structure 5778 * 5779 * Replay root node BW settings. 5780 */ 5781 enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi) 5782 { 5783 enum ice_status status = ICE_SUCCESS; 5784 5785 if (!pi->hw) 5786 return ICE_ERR_PARAM; 5787 ice_acquire_lock(&pi->sched_lock); 5788 5789 status = ice_sched_replay_node_bw(pi->hw, pi->root, 5790 &pi->root_node_bw_t_info); 5791 ice_release_lock(&pi->sched_lock); 5792 return status; 5793 } 5794 5795 /** 5796 * ice_sched_replay_tc_node_bw - replay TC node(s) BW 5797 * @pi: port information structure 5798 * 5799 * This function replay TC nodes. 5800 */ 5801 enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi) 5802 { 5803 enum ice_status status = ICE_SUCCESS; 5804 u8 tc; 5805 5806 if (!pi->hw) 5807 return ICE_ERR_PARAM; 5808 ice_acquire_lock(&pi->sched_lock); 5809 ice_for_each_traffic_class(tc) { 5810 struct ice_sched_node *tc_node; 5811 5812 tc_node = ice_sched_get_tc_node(pi, tc); 5813 if (!tc_node) 5814 continue; /* TC not present */ 5815 status = ice_sched_replay_node_bw(pi->hw, tc_node, 5816 &pi->tc_node_bw_t_info[tc]); 5817 if (status) 5818 break; 5819 } 5820 ice_release_lock(&pi->sched_lock); 5821 return status; 5822 } 5823 5824 /** 5825 * ice_sched_replay_vsi_bw - replay VSI type node(s) BW 5826 * @hw: pointer to the HW struct 5827 * @vsi_handle: software VSI handle 5828 * @tc_bitmap: 8 bits TC bitmap 5829 * 5830 * This function replays VSI type nodes bandwidth. This function needs to be 5831 * called with scheduler lock held. 5832 */ 5833 static enum ice_status 5834 ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle, 5835 ice_bitmap_t *tc_bitmap) 5836 { 5837 struct ice_sched_node *vsi_node, *tc_node; 5838 struct ice_port_info *pi = hw->port_info; 5839 struct ice_bw_type_info *bw_t_info; 5840 struct ice_vsi_ctx *vsi_ctx; 5841 enum ice_status status = ICE_SUCCESS; 5842 u8 tc; 5843 5844 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 5845 if (!vsi_ctx) 5846 return ICE_ERR_PARAM; 5847 ice_for_each_traffic_class(tc) { 5848 if (!ice_is_tc_ena(*tc_bitmap, tc)) 5849 continue; 5850 tc_node = ice_sched_get_tc_node(pi, tc); 5851 if (!tc_node) 5852 continue; 5853 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 5854 if (!vsi_node) 5855 continue; 5856 bw_t_info = &vsi_ctx->sched.bw_t_info[tc]; 5857 status = ice_sched_replay_node_bw(hw, vsi_node, bw_t_info); 5858 if (status) 5859 break; 5860 } 5861 return status; 5862 } 5863 5864 /** 5865 * ice_sched_replay_vsi_agg - replay aggregator & VSI to aggregator node(s) 5866 * @hw: pointer to the HW struct 5867 * @vsi_handle: software VSI handle 5868 * 5869 * This function replays aggregator node, VSI to aggregator type nodes, and 5870 * their node bandwidth information. This function needs to be called with 5871 * scheduler lock held. 5872 */ 5873 static enum ice_status 5874 ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) 5875 { 5876 ice_declare_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 5877 struct ice_sched_agg_vsi_info *agg_vsi_info; 5878 struct ice_port_info *pi = hw->port_info; 5879 struct ice_sched_agg_info *agg_info; 5880 enum ice_status status; 5881 5882 ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 5883 if (!ice_is_vsi_valid(hw, vsi_handle)) 5884 return ICE_ERR_PARAM; 5885 agg_info = ice_get_vsi_agg_info(hw, vsi_handle); 5886 if (!agg_info) 5887 return ICE_SUCCESS; /* Not present in list - default Agg case */ 5888 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); 5889 if (!agg_vsi_info) 5890 return ICE_SUCCESS; /* Not present in list - default Agg case */ 5891 ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap, 5892 replay_bitmap); 5893 /* Replay aggregator node associated to vsi_handle */ 5894 status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id, 5895 ICE_AGG_TYPE_AGG, replay_bitmap); 5896 if (status) 5897 return status; 5898 /* Replay aggregator node BW (restore aggregator BW) */ 5899 status = ice_sched_replay_agg_bw(hw, agg_info); 5900 if (status) 5901 return status; 5902 5903 ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 5904 ice_sched_get_ena_tc_bitmap(pi, agg_vsi_info->replay_tc_bitmap, 5905 replay_bitmap); 5906 /* Move this VSI (vsi_handle) to above aggregator */ 5907 status = ice_sched_assoc_vsi_to_agg(pi, agg_info->agg_id, vsi_handle, 5908 replay_bitmap); 5909 if (status) 5910 return status; 5911 /* Replay VSI BW (restore VSI BW) */ 5912 return ice_sched_replay_vsi_bw(hw, vsi_handle, 5913 agg_vsi_info->tc_bitmap); 5914 } 5915 5916 /** 5917 * ice_replay_vsi_agg - replay VSI to aggregator node 5918 * @hw: pointer to the HW struct 5919 * @vsi_handle: software VSI handle 5920 * 5921 * This function replays association of VSI to aggregator type nodes, and 5922 * node bandwidth information. 5923 */ 5924 enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) 5925 { 5926 struct ice_port_info *pi = hw->port_info; 5927 enum ice_status status; 5928 5929 ice_acquire_lock(&pi->sched_lock); 5930 status = ice_sched_replay_vsi_agg(hw, vsi_handle); 5931 ice_release_lock(&pi->sched_lock); 5932 return status; 5933 } 5934 5935 /** 5936 * ice_sched_replay_q_bw - replay queue type node BW 5937 * @pi: port information structure 5938 * @q_ctx: queue context structure 5939 * 5940 * This function replays queue type node bandwidth. This function needs to be 5941 * called with scheduler lock held. 5942 */ 5943 enum ice_status 5944 ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx) 5945 { 5946 struct ice_sched_node *q_node; 5947 5948 /* Following also checks the presence of node in tree */ 5949 q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); 5950 if (!q_node) 5951 return ICE_ERR_PARAM; 5952 return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info); 5953 } 5954