1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2023, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "ice_sched.h" 33 34 /** 35 * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB 36 * @pi: port information structure 37 * @info: Scheduler element information from firmware 38 * 39 * This function inserts the root node of the scheduling tree topology 40 * to the SW DB. 41 */ 42 static enum ice_status 43 ice_sched_add_root_node(struct ice_port_info *pi, 44 struct ice_aqc_txsched_elem_data *info) 45 { 46 struct ice_sched_node *root; 47 struct ice_hw *hw; 48 49 if (!pi) 50 return ICE_ERR_PARAM; 51 52 hw = pi->hw; 53 54 root = (struct ice_sched_node *)ice_malloc(hw, sizeof(*root)); 55 if (!root) 56 return ICE_ERR_NO_MEMORY; 57 58 /* coverity[suspicious_sizeof] */ 59 root->children = (struct ice_sched_node **) 60 ice_calloc(hw, hw->max_children[0], sizeof(*root)); 61 if (!root->children) { 62 ice_free(hw, root); 63 return ICE_ERR_NO_MEMORY; 64 } 65 66 ice_memcpy(&root->info, info, sizeof(*info), ICE_DMA_TO_NONDMA); 67 pi->root = root; 68 return ICE_SUCCESS; 69 } 70 71 /** 72 * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB 73 * @start_node: pointer to the starting ice_sched_node struct in a sub-tree 74 * @teid: node TEID to search 75 * 76 * This function searches for a node matching the TEID in the scheduling tree 77 * from the SW DB. The search is recursive and is restricted by the number of 78 * layers it has searched through; stopping at the max supported layer. 79 * 80 * This function needs to be called when holding the port_info->sched_lock 81 */ 82 struct ice_sched_node * 83 ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) 84 { 85 u16 i; 86 87 /* The TEID is same as that of the start_node */ 88 if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid) 89 return start_node; 90 91 /* The node has no children or is at the max layer */ 92 if (!start_node->num_children || 93 start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM || 94 start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) 95 return NULL; 96 97 /* Check if TEID matches to any of the children nodes */ 98 for (i = 0; i < start_node->num_children; i++) 99 if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid) 100 return start_node->children[i]; 101 102 /* Search within each child's sub-tree */ 103 for (i = 0; i < start_node->num_children; i++) { 104 struct ice_sched_node *tmp; 105 106 tmp = ice_sched_find_node_by_teid(start_node->children[i], 107 teid); 108 if (tmp) 109 return tmp; 110 } 111 112 return NULL; 113 } 114 115 /** 116 * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd 117 * @hw: pointer to the HW struct 118 * @cmd_opc: cmd opcode 119 * @elems_req: number of elements to request 120 * @buf: pointer to buffer 121 * @buf_size: buffer size in bytes 122 * @elems_resp: returns total number of elements response 123 * @cd: pointer to command details structure or NULL 124 * 125 * This function sends a scheduling elements cmd (cmd_opc) 126 */ 127 static enum ice_status 128 ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, 129 u16 elems_req, void *buf, u16 buf_size, 130 u16 *elems_resp, struct ice_sq_cd *cd) 131 { 132 struct ice_aqc_sched_elem_cmd *cmd; 133 struct ice_aq_desc desc; 134 enum ice_status status; 135 136 cmd = &desc.params.sched_elem_cmd; 137 ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc); 138 cmd->num_elem_req = CPU_TO_LE16(elems_req); 139 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 140 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 141 if (!status && elems_resp) 142 *elems_resp = LE16_TO_CPU(cmd->num_elem_resp); 143 144 return status; 145 } 146 147 /** 148 * ice_aq_query_sched_elems - query scheduler elements 149 * @hw: pointer to the HW struct 150 * @elems_req: number of elements to query 151 * @buf: pointer to buffer 152 * @buf_size: buffer size in bytes 153 * @elems_ret: returns total number of elements returned 154 * @cd: pointer to command details structure or NULL 155 * 156 * Query scheduling elements (0x0404) 157 */ 158 enum ice_status 159 ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, 160 struct ice_aqc_txsched_elem_data *buf, u16 buf_size, 161 u16 *elems_ret, struct ice_sq_cd *cd) 162 { 163 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems, 164 elems_req, (void *)buf, buf_size, 165 elems_ret, cd); 166 } 167 168 /** 169 * ice_sched_add_node - Insert the Tx scheduler node in SW DB 170 * @pi: port information structure 171 * @layer: Scheduler layer of the node 172 * @info: Scheduler element information from firmware 173 * 174 * This function inserts a scheduler node to the SW DB. 175 */ 176 enum ice_status 177 ice_sched_add_node(struct ice_port_info *pi, u8 layer, 178 struct ice_aqc_txsched_elem_data *info) 179 { 180 struct ice_aqc_txsched_elem_data elem; 181 struct ice_sched_node *parent; 182 struct ice_sched_node *node; 183 enum ice_status status; 184 struct ice_hw *hw; 185 186 if (!pi) 187 return ICE_ERR_PARAM; 188 189 hw = pi->hw; 190 191 /* A valid parent node should be there */ 192 parent = ice_sched_find_node_by_teid(pi->root, 193 LE32_TO_CPU(info->parent_teid)); 194 if (!parent) { 195 ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n", 196 LE32_TO_CPU(info->parent_teid)); 197 return ICE_ERR_PARAM; 198 } 199 200 /* query the current node information from FW before adding it 201 * to the SW DB 202 */ 203 status = ice_sched_query_elem(hw, LE32_TO_CPU(info->node_teid), &elem); 204 if (status) 205 return status; 206 node = (struct ice_sched_node *)ice_malloc(hw, sizeof(*node)); 207 if (!node) 208 return ICE_ERR_NO_MEMORY; 209 if (hw->max_children[layer]) { 210 /* coverity[suspicious_sizeof] */ 211 node->children = (struct ice_sched_node **) 212 ice_calloc(hw, hw->max_children[layer], sizeof(*node)); 213 if (!node->children) { 214 ice_free(hw, node); 215 return ICE_ERR_NO_MEMORY; 216 } 217 } 218 219 node->in_use = true; 220 node->parent = parent; 221 node->tx_sched_layer = layer; 222 parent->children[parent->num_children++] = node; 223 node->info = elem; 224 return ICE_SUCCESS; 225 } 226 227 /** 228 * ice_aq_delete_sched_elems - delete scheduler elements 229 * @hw: pointer to the HW struct 230 * @grps_req: number of groups to delete 231 * @buf: pointer to buffer 232 * @buf_size: buffer size in bytes 233 * @grps_del: returns total number of elements deleted 234 * @cd: pointer to command details structure or NULL 235 * 236 * Delete scheduling elements (0x040F) 237 */ 238 static enum ice_status 239 ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, 240 struct ice_aqc_delete_elem *buf, u16 buf_size, 241 u16 *grps_del, struct ice_sq_cd *cd) 242 { 243 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems, 244 grps_req, (void *)buf, buf_size, 245 grps_del, cd); 246 } 247 248 /** 249 * ice_sched_remove_elems - remove nodes from HW 250 * @hw: pointer to the HW struct 251 * @parent: pointer to the parent node 252 * @num_nodes: number of nodes 253 * @node_teids: array of node teids to be deleted 254 * 255 * This function remove nodes from HW 256 */ 257 static enum ice_status 258 ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, 259 u16 num_nodes, u32 *node_teids) 260 { 261 struct ice_aqc_delete_elem *buf; 262 u16 i, num_groups_removed = 0; 263 enum ice_status status; 264 u16 buf_size; 265 266 buf_size = ice_struct_size(buf, teid, num_nodes); 267 buf = (struct ice_aqc_delete_elem *)ice_malloc(hw, buf_size); 268 if (!buf) 269 return ICE_ERR_NO_MEMORY; 270 271 buf->hdr.parent_teid = parent->info.node_teid; 272 buf->hdr.num_elems = CPU_TO_LE16(num_nodes); 273 for (i = 0; i < num_nodes; i++) 274 buf->teid[i] = CPU_TO_LE32(node_teids[i]); 275 276 status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size, 277 &num_groups_removed, NULL); 278 if (status != ICE_SUCCESS || num_groups_removed != 1) 279 ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n", 280 hw->adminq.sq_last_status); 281 282 ice_free(hw, buf); 283 return status; 284 } 285 286 /** 287 * ice_sched_get_first_node - get the first node of the given layer 288 * @pi: port information structure 289 * @parent: pointer the base node of the subtree 290 * @layer: layer number 291 * 292 * This function retrieves the first node of the given layer from the subtree 293 */ 294 static struct ice_sched_node * 295 ice_sched_get_first_node(struct ice_port_info *pi, 296 struct ice_sched_node *parent, u8 layer) 297 { 298 return pi->sib_head[parent->tc_num][layer]; 299 } 300 301 /** 302 * ice_sched_get_tc_node - get pointer to TC node 303 * @pi: port information structure 304 * @tc: TC number 305 * 306 * This function returns the TC node pointer 307 */ 308 struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc) 309 { 310 u8 i; 311 312 if (!pi || !pi->root) 313 return NULL; 314 for (i = 0; i < pi->root->num_children; i++) 315 if (pi->root->children[i]->tc_num == tc) 316 return pi->root->children[i]; 317 return NULL; 318 } 319 320 /** 321 * ice_free_sched_node - Free a Tx scheduler node from SW DB 322 * @pi: port information structure 323 * @node: pointer to the ice_sched_node struct 324 * 325 * This function frees up a node from SW DB as well as from HW 326 * 327 * This function needs to be called with the port_info->sched_lock held 328 */ 329 void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) 330 { 331 struct ice_sched_node *parent; 332 struct ice_hw *hw = pi->hw; 333 u8 i, j; 334 335 /* Free the children before freeing up the parent node 336 * The parent array is updated below and that shifts the nodes 337 * in the array. So always pick the first child if num children > 0 338 */ 339 while (node->num_children) 340 ice_free_sched_node(pi, node->children[0]); 341 342 /* Leaf, TC and root nodes can't be deleted by SW */ 343 if (node->tx_sched_layer >= hw->sw_entry_point_layer && 344 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && 345 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT && 346 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) { 347 u32 teid = LE32_TO_CPU(node->info.node_teid); 348 349 ice_sched_remove_elems(hw, node->parent, 1, &teid); 350 } 351 parent = node->parent; 352 /* root has no parent */ 353 if (parent) { 354 struct ice_sched_node *p; 355 356 /* update the parent */ 357 for (i = 0; i < parent->num_children; i++) 358 if (parent->children[i] == node) { 359 for (j = i + 1; j < parent->num_children; j++) 360 parent->children[j - 1] = 361 parent->children[j]; 362 parent->num_children--; 363 break; 364 } 365 366 p = ice_sched_get_first_node(pi, node, node->tx_sched_layer); 367 while (p) { 368 if (p->sibling == node) { 369 p->sibling = node->sibling; 370 break; 371 } 372 p = p->sibling; 373 } 374 375 /* update the sibling head if head is getting removed */ 376 if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node) 377 pi->sib_head[node->tc_num][node->tx_sched_layer] = 378 node->sibling; 379 } 380 381 /* leaf nodes have no children */ 382 if (node->children) 383 ice_free(hw, node->children); 384 ice_free(hw, node); 385 } 386 387 /** 388 * ice_aq_get_dflt_topo - gets default scheduler topology 389 * @hw: pointer to the HW struct 390 * @lport: logical port number 391 * @buf: pointer to buffer 392 * @buf_size: buffer size in bytes 393 * @num_branches: returns total number of queue to port branches 394 * @cd: pointer to command details structure or NULL 395 * 396 * Get default scheduler topology (0x400) 397 */ 398 static enum ice_status 399 ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, 400 struct ice_aqc_get_topo_elem *buf, u16 buf_size, 401 u8 *num_branches, struct ice_sq_cd *cd) 402 { 403 struct ice_aqc_get_topo *cmd; 404 struct ice_aq_desc desc; 405 enum ice_status status; 406 407 cmd = &desc.params.get_topo; 408 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo); 409 cmd->port_num = lport; 410 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 411 if (!status && num_branches) 412 *num_branches = cmd->num_branches; 413 414 return status; 415 } 416 417 /** 418 * ice_aq_add_sched_elems - adds scheduling element 419 * @hw: pointer to the HW struct 420 * @grps_req: the number of groups that are requested to be added 421 * @buf: pointer to buffer 422 * @buf_size: buffer size in bytes 423 * @grps_added: returns total number of groups added 424 * @cd: pointer to command details structure or NULL 425 * 426 * Add scheduling elements (0x0401) 427 */ 428 static enum ice_status 429 ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, 430 struct ice_aqc_add_elem *buf, u16 buf_size, 431 u16 *grps_added, struct ice_sq_cd *cd) 432 { 433 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems, 434 grps_req, (void *)buf, buf_size, 435 grps_added, cd); 436 } 437 438 /** 439 * ice_aq_cfg_sched_elems - configures scheduler elements 440 * @hw: pointer to the HW struct 441 * @elems_req: number of elements to configure 442 * @buf: pointer to buffer 443 * @buf_size: buffer size in bytes 444 * @elems_cfgd: returns total number of elements configured 445 * @cd: pointer to command details structure or NULL 446 * 447 * Configure scheduling elements (0x0403) 448 */ 449 static enum ice_status 450 ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, 451 struct ice_aqc_txsched_elem_data *buf, u16 buf_size, 452 u16 *elems_cfgd, struct ice_sq_cd *cd) 453 { 454 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems, 455 elems_req, (void *)buf, buf_size, 456 elems_cfgd, cd); 457 } 458 459 /** 460 * ice_aq_move_sched_elems - move scheduler elements 461 * @hw: pointer to the HW struct 462 * @grps_req: number of groups to move 463 * @buf: pointer to buffer 464 * @buf_size: buffer size in bytes 465 * @grps_movd: returns total number of groups moved 466 * @cd: pointer to command details structure or NULL 467 * 468 * Move scheduling elements (0x0408) 469 */ 470 enum ice_status 471 ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, 472 struct ice_aqc_move_elem *buf, u16 buf_size, 473 u16 *grps_movd, struct ice_sq_cd *cd) 474 { 475 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems, 476 grps_req, (void *)buf, buf_size, 477 grps_movd, cd); 478 } 479 480 /** 481 * ice_aq_suspend_sched_elems - suspend scheduler elements 482 * @hw: pointer to the HW struct 483 * @elems_req: number of elements to suspend 484 * @buf: pointer to buffer 485 * @buf_size: buffer size in bytes 486 * @elems_ret: returns total number of elements suspended 487 * @cd: pointer to command details structure or NULL 488 * 489 * Suspend scheduling elements (0x0409) 490 */ 491 static enum ice_status 492 ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, 493 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) 494 { 495 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems, 496 elems_req, (void *)buf, buf_size, 497 elems_ret, cd); 498 } 499 500 /** 501 * ice_aq_resume_sched_elems - resume scheduler elements 502 * @hw: pointer to the HW struct 503 * @elems_req: number of elements to resume 504 * @buf: pointer to buffer 505 * @buf_size: buffer size in bytes 506 * @elems_ret: returns total number of elements resumed 507 * @cd: pointer to command details structure or NULL 508 * 509 * resume scheduling elements (0x040A) 510 */ 511 static enum ice_status 512 ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, 513 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) 514 { 515 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems, 516 elems_req, (void *)buf, buf_size, 517 elems_ret, cd); 518 } 519 520 /** 521 * ice_aq_query_sched_res - query scheduler resource 522 * @hw: pointer to the HW struct 523 * @buf_size: buffer size in bytes 524 * @buf: pointer to buffer 525 * @cd: pointer to command details structure or NULL 526 * 527 * Query scheduler resource allocation (0x0412) 528 */ 529 static enum ice_status 530 ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, 531 struct ice_aqc_query_txsched_res_resp *buf, 532 struct ice_sq_cd *cd) 533 { 534 struct ice_aq_desc desc; 535 536 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res); 537 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 538 } 539 540 /** 541 * ice_sched_suspend_resume_elems - suspend or resume HW nodes 542 * @hw: pointer to the HW struct 543 * @num_nodes: number of nodes 544 * @node_teids: array of node teids to be suspended or resumed 545 * @suspend: true means suspend / false means resume 546 * 547 * This function suspends or resumes HW nodes 548 */ 549 static enum ice_status 550 ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, 551 bool suspend) 552 { 553 u16 i, buf_size, num_elem_ret = 0; 554 enum ice_status status; 555 __le32 *buf; 556 557 buf_size = sizeof(*buf) * num_nodes; 558 buf = (__le32 *)ice_malloc(hw, buf_size); 559 if (!buf) 560 return ICE_ERR_NO_MEMORY; 561 562 for (i = 0; i < num_nodes; i++) 563 buf[i] = CPU_TO_LE32(node_teids[i]); 564 565 if (suspend) 566 status = ice_aq_suspend_sched_elems(hw, num_nodes, buf, 567 buf_size, &num_elem_ret, 568 NULL); 569 else 570 status = ice_aq_resume_sched_elems(hw, num_nodes, buf, 571 buf_size, &num_elem_ret, 572 NULL); 573 if (status != ICE_SUCCESS || num_elem_ret != num_nodes) 574 ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n"); 575 576 ice_free(hw, buf); 577 return status; 578 } 579 580 /** 581 * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC 582 * @hw: pointer to the HW struct 583 * @vsi_handle: VSI handle 584 * @tc: TC number 585 * @new_numqs: number of queues 586 */ 587 static enum ice_status 588 ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) 589 { 590 struct ice_vsi_ctx *vsi_ctx; 591 struct ice_q_ctx *q_ctx; 592 593 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 594 if (!vsi_ctx) 595 return ICE_ERR_PARAM; 596 /* allocate LAN queue contexts */ 597 if (!vsi_ctx->lan_q_ctx[tc]) { 598 vsi_ctx->lan_q_ctx[tc] = (struct ice_q_ctx *) 599 ice_calloc(hw, new_numqs, sizeof(*q_ctx)); 600 if (!vsi_ctx->lan_q_ctx[tc]) 601 return ICE_ERR_NO_MEMORY; 602 vsi_ctx->num_lan_q_entries[tc] = new_numqs; 603 return ICE_SUCCESS; 604 } 605 /* num queues are increased, update the queue contexts */ 606 if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) { 607 u16 prev_num = vsi_ctx->num_lan_q_entries[tc]; 608 609 q_ctx = (struct ice_q_ctx *) 610 ice_calloc(hw, new_numqs, sizeof(*q_ctx)); 611 if (!q_ctx) 612 return ICE_ERR_NO_MEMORY; 613 ice_memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], 614 prev_num * sizeof(*q_ctx), ICE_DMA_TO_NONDMA); 615 ice_free(hw, vsi_ctx->lan_q_ctx[tc]); 616 vsi_ctx->lan_q_ctx[tc] = q_ctx; 617 vsi_ctx->num_lan_q_entries[tc] = new_numqs; 618 } 619 return ICE_SUCCESS; 620 } 621 622 /** 623 * ice_alloc_rdma_q_ctx - allocate RDMA queue contexts for the given VSI and TC 624 * @hw: pointer to the HW struct 625 * @vsi_handle: VSI handle 626 * @tc: TC number 627 * @new_numqs: number of queues 628 */ 629 static enum ice_status 630 ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) 631 { 632 struct ice_vsi_ctx *vsi_ctx; 633 struct ice_q_ctx *q_ctx; 634 635 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 636 if (!vsi_ctx) 637 return ICE_ERR_PARAM; 638 /* allocate RDMA queue contexts */ 639 if (!vsi_ctx->rdma_q_ctx[tc]) { 640 vsi_ctx->rdma_q_ctx[tc] = (struct ice_q_ctx *) 641 ice_calloc(hw, new_numqs, sizeof(*q_ctx)); 642 if (!vsi_ctx->rdma_q_ctx[tc]) 643 return ICE_ERR_NO_MEMORY; 644 vsi_ctx->num_rdma_q_entries[tc] = new_numqs; 645 return ICE_SUCCESS; 646 } 647 /* num queues are increased, update the queue contexts */ 648 if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) { 649 u16 prev_num = vsi_ctx->num_rdma_q_entries[tc]; 650 651 q_ctx = (struct ice_q_ctx *) 652 ice_calloc(hw, new_numqs, sizeof(*q_ctx)); 653 if (!q_ctx) 654 return ICE_ERR_NO_MEMORY; 655 ice_memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc], 656 prev_num * sizeof(*q_ctx), ICE_DMA_TO_NONDMA); 657 ice_free(hw, vsi_ctx->rdma_q_ctx[tc]); 658 vsi_ctx->rdma_q_ctx[tc] = q_ctx; 659 vsi_ctx->num_rdma_q_entries[tc] = new_numqs; 660 } 661 return ICE_SUCCESS; 662 } 663 664 /** 665 * ice_aq_rl_profile - performs a rate limiting task 666 * @hw: pointer to the HW struct 667 * @opcode: opcode for add, query, or remove profile(s) 668 * @num_profiles: the number of profiles 669 * @buf: pointer to buffer 670 * @buf_size: buffer size in bytes 671 * @num_processed: number of processed add or remove profile(s) to return 672 * @cd: pointer to command details structure 673 * 674 * RL profile function to add, query, or remove profile(s) 675 */ 676 static enum ice_status 677 ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, 678 u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, 679 u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd) 680 { 681 struct ice_aqc_rl_profile *cmd; 682 struct ice_aq_desc desc; 683 enum ice_status status; 684 685 cmd = &desc.params.rl_profile; 686 687 ice_fill_dflt_direct_cmd_desc(&desc, opcode); 688 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 689 cmd->num_profiles = CPU_TO_LE16(num_profiles); 690 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 691 if (!status && num_processed) 692 *num_processed = LE16_TO_CPU(cmd->num_processed); 693 return status; 694 } 695 696 /** 697 * ice_aq_add_rl_profile - adds rate limiting profile(s) 698 * @hw: pointer to the HW struct 699 * @num_profiles: the number of profile(s) to be add 700 * @buf: pointer to buffer 701 * @buf_size: buffer size in bytes 702 * @num_profiles_added: total number of profiles added to return 703 * @cd: pointer to command details structure 704 * 705 * Add RL profile (0x0410) 706 */ 707 static enum ice_status 708 ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, 709 struct ice_aqc_rl_profile_elem *buf, u16 buf_size, 710 u16 *num_profiles_added, struct ice_sq_cd *cd) 711 { 712 return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles, 713 buf, buf_size, num_profiles_added, cd); 714 } 715 716 /** 717 * ice_aq_query_rl_profile - query rate limiting profile(s) 718 * @hw: pointer to the HW struct 719 * @num_profiles: the number of profile(s) to query 720 * @buf: pointer to buffer 721 * @buf_size: buffer size in bytes 722 * @cd: pointer to command details structure 723 * 724 * Query RL profile (0x0411) 725 */ 726 enum ice_status 727 ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles, 728 struct ice_aqc_rl_profile_elem *buf, u16 buf_size, 729 struct ice_sq_cd *cd) 730 { 731 return ice_aq_rl_profile(hw, ice_aqc_opc_query_rl_profiles, 732 num_profiles, buf, buf_size, NULL, cd); 733 } 734 735 /** 736 * ice_aq_remove_rl_profile - removes RL profile(s) 737 * @hw: pointer to the HW struct 738 * @num_profiles: the number of profile(s) to remove 739 * @buf: pointer to buffer 740 * @buf_size: buffer size in bytes 741 * @num_profiles_removed: total number of profiles removed to return 742 * @cd: pointer to command details structure or NULL 743 * 744 * Remove RL profile (0x0415) 745 */ 746 static enum ice_status 747 ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, 748 struct ice_aqc_rl_profile_elem *buf, u16 buf_size, 749 u16 *num_profiles_removed, struct ice_sq_cd *cd) 750 { 751 return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles, 752 num_profiles, buf, buf_size, 753 num_profiles_removed, cd); 754 } 755 756 /** 757 * ice_sched_del_rl_profile - remove RL profile 758 * @hw: pointer to the HW struct 759 * @rl_info: rate limit profile information 760 * 761 * If the profile ID is not referenced anymore, it removes profile ID with 762 * its associated parameters from HW DB,and locally. The caller needs to 763 * hold scheduler lock. 764 */ 765 static enum ice_status 766 ice_sched_del_rl_profile(struct ice_hw *hw, 767 struct ice_aqc_rl_profile_info *rl_info) 768 { 769 struct ice_aqc_rl_profile_elem *buf; 770 u16 num_profiles_removed; 771 enum ice_status status; 772 u16 num_profiles = 1; 773 774 if (rl_info->prof_id_ref != 0) 775 return ICE_ERR_IN_USE; 776 777 /* Safe to remove profile ID */ 778 buf = &rl_info->profile; 779 status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf), 780 &num_profiles_removed, NULL); 781 if (status || num_profiles_removed != num_profiles) 782 return ICE_ERR_CFG; 783 784 /* Delete stale entry now */ 785 LIST_DEL(&rl_info->list_entry); 786 ice_free(hw, rl_info); 787 return status; 788 } 789 790 /** 791 * ice_sched_clear_rl_prof - clears RL prof entries 792 * @pi: port information structure 793 * 794 * This function removes all RL profile from HW as well as from SW DB. 795 */ 796 static void ice_sched_clear_rl_prof(struct ice_port_info *pi) 797 { 798 u16 ln; 799 struct ice_hw *hw = pi->hw; 800 801 for (ln = 0; ln < hw->num_tx_sched_layers; ln++) { 802 struct ice_aqc_rl_profile_info *rl_prof_elem; 803 struct ice_aqc_rl_profile_info *rl_prof_tmp; 804 805 LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp, 806 &hw->rl_prof_list[ln], 807 ice_aqc_rl_profile_info, list_entry) { 808 enum ice_status status; 809 810 rl_prof_elem->prof_id_ref = 0; 811 status = ice_sched_del_rl_profile(hw, rl_prof_elem); 812 if (status) { 813 ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); 814 /* On error, free mem required */ 815 LIST_DEL(&rl_prof_elem->list_entry); 816 ice_free(hw, rl_prof_elem); 817 } 818 } 819 } 820 } 821 822 /** 823 * ice_sched_clear_agg - clears the aggregator related information 824 * @hw: pointer to the hardware structure 825 * 826 * This function removes aggregator list and free up aggregator related memory 827 * previously allocated. 828 */ 829 void ice_sched_clear_agg(struct ice_hw *hw) 830 { 831 struct ice_sched_agg_info *agg_info; 832 struct ice_sched_agg_info *atmp; 833 834 LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &hw->agg_list, 835 ice_sched_agg_info, 836 list_entry) { 837 struct ice_sched_agg_vsi_info *agg_vsi_info; 838 struct ice_sched_agg_vsi_info *vtmp; 839 840 LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp, 841 &agg_info->agg_vsi_list, 842 ice_sched_agg_vsi_info, list_entry) { 843 LIST_DEL(&agg_vsi_info->list_entry); 844 ice_free(hw, agg_vsi_info); 845 } 846 LIST_DEL(&agg_info->list_entry); 847 ice_free(hw, agg_info); 848 } 849 } 850 851 /** 852 * ice_sched_clear_tx_topo - clears the scheduler tree nodes 853 * @pi: port information structure 854 * 855 * This function removes all the nodes from HW as well as from SW DB. 856 */ 857 static void ice_sched_clear_tx_topo(struct ice_port_info *pi) 858 { 859 if (!pi) 860 return; 861 /* remove RL profiles related lists */ 862 ice_sched_clear_rl_prof(pi); 863 if (pi->root) { 864 ice_free_sched_node(pi, pi->root); 865 pi->root = NULL; 866 } 867 } 868 869 /** 870 * ice_sched_clear_port - clear the scheduler elements from SW DB for a port 871 * @pi: port information structure 872 * 873 * Cleanup scheduling elements from SW DB 874 */ 875 void ice_sched_clear_port(struct ice_port_info *pi) 876 { 877 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 878 return; 879 880 pi->port_state = ICE_SCHED_PORT_STATE_INIT; 881 ice_acquire_lock(&pi->sched_lock); 882 ice_sched_clear_tx_topo(pi); 883 ice_release_lock(&pi->sched_lock); 884 ice_destroy_lock(&pi->sched_lock); 885 } 886 887 /** 888 * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports 889 * @hw: pointer to the HW struct 890 * 891 * Cleanup scheduling elements from SW DB for all the ports 892 */ 893 void ice_sched_cleanup_all(struct ice_hw *hw) 894 { 895 if (!hw) 896 return; 897 898 if (hw->layer_info) { 899 ice_free(hw, hw->layer_info); 900 hw->layer_info = NULL; 901 } 902 903 ice_sched_clear_port(hw->port_info); 904 905 hw->num_tx_sched_layers = 0; 906 hw->num_tx_sched_phys_layers = 0; 907 hw->flattened_layers = 0; 908 hw->max_cgds = 0; 909 } 910 911 /** 912 * ice_aq_cfg_node_attr - configure nodes' per-cone flattening attributes 913 * @hw: pointer to the HW struct 914 * @num_nodes: the number of nodes whose attributes to configure 915 * @buf: pointer to buffer 916 * @buf_size: buffer size in bytes 917 * @cd: pointer to command details structure or NULL 918 * 919 * Configure Node Attributes (0x0417) 920 */ 921 enum ice_status 922 ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes, 923 struct ice_aqc_node_attr_elem *buf, u16 buf_size, 924 struct ice_sq_cd *cd) 925 { 926 struct ice_aqc_node_attr *cmd; 927 struct ice_aq_desc desc; 928 929 cmd = &desc.params.node_attr; 930 ice_fill_dflt_direct_cmd_desc(&desc, 931 ice_aqc_opc_cfg_node_attr); 932 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 933 934 cmd->num_entries = CPU_TO_LE16(num_nodes); 935 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 936 } 937 938 /** 939 * ice_aq_cfg_l2_node_cgd - configures L2 node to CGD mapping 940 * @hw: pointer to the HW struct 941 * @num_l2_nodes: the number of L2 nodes whose CGDs to configure 942 * @buf: pointer to buffer 943 * @buf_size: buffer size in bytes 944 * @cd: pointer to command details structure or NULL 945 * 946 * Configure L2 Node CGD (0x0414) 947 */ 948 enum ice_status 949 ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes, 950 struct ice_aqc_cfg_l2_node_cgd_elem *buf, 951 u16 buf_size, struct ice_sq_cd *cd) 952 { 953 struct ice_aqc_cfg_l2_node_cgd *cmd; 954 struct ice_aq_desc desc; 955 956 cmd = &desc.params.cfg_l2_node_cgd; 957 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_l2_node_cgd); 958 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 959 960 cmd->num_l2_nodes = CPU_TO_LE16(num_l2_nodes); 961 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 962 } 963 964 /** 965 * ice_sched_add_elems - add nodes to HW and SW DB 966 * @pi: port information structure 967 * @tc_node: pointer to the branch node 968 * @parent: pointer to the parent node 969 * @layer: layer number to add nodes 970 * @num_nodes: number of nodes 971 * @num_nodes_added: pointer to num nodes added 972 * @first_node_teid: if new nodes are added then return the TEID of first node 973 * 974 * This function add nodes to HW as well as to SW DB for a given layer 975 */ 976 static enum ice_status 977 ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, 978 struct ice_sched_node *parent, u8 layer, u16 num_nodes, 979 u16 *num_nodes_added, u32 *first_node_teid) 980 { 981 struct ice_sched_node *prev, *new_node; 982 struct ice_aqc_add_elem *buf; 983 u16 i, num_groups_added = 0; 984 enum ice_status status = ICE_SUCCESS; 985 struct ice_hw *hw = pi->hw; 986 u16 buf_size; 987 u32 teid; 988 989 buf_size = ice_struct_size(buf, generic, num_nodes); 990 buf = (struct ice_aqc_add_elem *)ice_malloc(hw, buf_size); 991 if (!buf) 992 return ICE_ERR_NO_MEMORY; 993 994 buf->hdr.parent_teid = parent->info.node_teid; 995 buf->hdr.num_elems = CPU_TO_LE16(num_nodes); 996 for (i = 0; i < num_nodes; i++) { 997 buf->generic[i].parent_teid = parent->info.node_teid; 998 buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC; 999 buf->generic[i].data.valid_sections = 1000 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 1001 ICE_AQC_ELEM_VALID_EIR; 1002 buf->generic[i].data.generic = 0; 1003 buf->generic[i].data.cir_bw.bw_profile_idx = 1004 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); 1005 buf->generic[i].data.cir_bw.bw_alloc = 1006 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); 1007 buf->generic[i].data.eir_bw.bw_profile_idx = 1008 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); 1009 buf->generic[i].data.eir_bw.bw_alloc = 1010 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); 1011 } 1012 1013 status = ice_aq_add_sched_elems(hw, 1, buf, buf_size, 1014 &num_groups_added, NULL); 1015 if (status != ICE_SUCCESS || num_groups_added != 1) { 1016 ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n", 1017 hw->adminq.sq_last_status); 1018 ice_free(hw, buf); 1019 return ICE_ERR_CFG; 1020 } 1021 1022 *num_nodes_added = num_nodes; 1023 /* add nodes to the SW DB */ 1024 for (i = 0; i < num_nodes; i++) { 1025 status = ice_sched_add_node(pi, layer, &buf->generic[i]); 1026 if (status != ICE_SUCCESS) { 1027 ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n", 1028 status); 1029 break; 1030 } 1031 1032 teid = LE32_TO_CPU(buf->generic[i].node_teid); 1033 new_node = ice_sched_find_node_by_teid(parent, teid); 1034 if (!new_node) { 1035 ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid); 1036 break; 1037 } 1038 1039 new_node->sibling = NULL; 1040 new_node->tc_num = tc_node->tc_num; 1041 1042 /* add it to previous node sibling pointer */ 1043 /* Note: siblings are not linked across branches */ 1044 prev = ice_sched_get_first_node(pi, tc_node, layer); 1045 if (prev && prev != new_node) { 1046 while (prev->sibling) 1047 prev = prev->sibling; 1048 prev->sibling = new_node; 1049 } 1050 1051 /* initialize the sibling head */ 1052 if (!pi->sib_head[tc_node->tc_num][layer]) 1053 pi->sib_head[tc_node->tc_num][layer] = new_node; 1054 1055 if (i == 0) 1056 *first_node_teid = teid; 1057 } 1058 1059 ice_free(hw, buf); 1060 return status; 1061 } 1062 1063 /** 1064 * ice_sched_add_nodes_to_hw_layer - Add nodes to hw layer 1065 * @pi: port information structure 1066 * @tc_node: pointer to TC node 1067 * @parent: pointer to parent node 1068 * @layer: layer number to add nodes 1069 * @num_nodes: number of nodes to be added 1070 * @first_node_teid: pointer to the first node TEID 1071 * @num_nodes_added: pointer to number of nodes added 1072 * 1073 * Add nodes into specific hw layer. 1074 */ 1075 static enum ice_status 1076 ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, 1077 struct ice_sched_node *tc_node, 1078 struct ice_sched_node *parent, u8 layer, 1079 u16 num_nodes, u32 *first_node_teid, 1080 u16 *num_nodes_added) 1081 { 1082 u16 max_child_nodes; 1083 1084 *num_nodes_added = 0; 1085 1086 if (!num_nodes) 1087 return ICE_SUCCESS; 1088 1089 if (!parent || layer < pi->hw->sw_entry_point_layer) 1090 return ICE_ERR_PARAM; 1091 1092 /* max children per node per layer */ 1093 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; 1094 1095 /* current number of children + required nodes exceed max children */ 1096 if ((parent->num_children + num_nodes) > max_child_nodes) { 1097 /* Fail if the parent is a TC node */ 1098 if (parent == tc_node) 1099 return ICE_ERR_CFG; 1100 return ICE_ERR_MAX_LIMIT; 1101 } 1102 1103 return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, 1104 num_nodes_added, first_node_teid); 1105 } 1106 1107 /** 1108 * ice_sched_add_nodes_to_layer - Add nodes to a given layer 1109 * @pi: port information structure 1110 * @tc_node: pointer to TC node 1111 * @parent: pointer to parent node 1112 * @layer: layer number to add nodes 1113 * @num_nodes: number of nodes to be added 1114 * @first_node_teid: pointer to the first node TEID 1115 * @num_nodes_added: pointer to number of nodes added 1116 * 1117 * This function add nodes to a given layer. 1118 */ 1119 static enum ice_status 1120 ice_sched_add_nodes_to_layer(struct ice_port_info *pi, 1121 struct ice_sched_node *tc_node, 1122 struct ice_sched_node *parent, u8 layer, 1123 u16 num_nodes, u32 *first_node_teid, 1124 u16 *num_nodes_added) 1125 { 1126 u32 *first_teid_ptr = first_node_teid; 1127 u16 new_num_nodes = num_nodes; 1128 enum ice_status status = ICE_SUCCESS; 1129 1130 *num_nodes_added = 0; 1131 while (*num_nodes_added < num_nodes) { 1132 u16 max_child_nodes, num_added = 0; 1133 u32 temp; 1134 1135 status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent, 1136 layer, new_num_nodes, 1137 first_teid_ptr, 1138 &num_added); 1139 if (status == ICE_SUCCESS) 1140 *num_nodes_added += num_added; 1141 /* added more nodes than requested ? */ 1142 if (*num_nodes_added > num_nodes) { 1143 ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes, 1144 *num_nodes_added); 1145 status = ICE_ERR_CFG; 1146 break; 1147 } 1148 /* break if all the nodes are added successfully */ 1149 if (status == ICE_SUCCESS && (*num_nodes_added == num_nodes)) 1150 break; 1151 /* break if the error is not max limit */ 1152 if (status != ICE_SUCCESS && status != ICE_ERR_MAX_LIMIT) 1153 break; 1154 /* Exceeded the max children */ 1155 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; 1156 /* utilize all the spaces if the parent is not full */ 1157 if (parent->num_children < max_child_nodes) { 1158 new_num_nodes = max_child_nodes - parent->num_children; 1159 } else { 1160 /* This parent is full, try the next sibling */ 1161 parent = parent->sibling; 1162 /* Don't modify the first node TEID memory if the 1163 * first node was added already in the above call. 1164 * Instead send some temp memory for all other 1165 * recursive calls. 1166 */ 1167 if (num_added) 1168 first_teid_ptr = &temp; 1169 1170 new_num_nodes = num_nodes - *num_nodes_added; 1171 } 1172 } 1173 return status; 1174 } 1175 1176 /** 1177 * ice_sched_get_qgrp_layer - get the current queue group layer number 1178 * @hw: pointer to the HW struct 1179 * 1180 * This function returns the current queue group layer number 1181 */ 1182 static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw) 1183 { 1184 /* It's always total layers - 1, the array is 0 relative so -2 */ 1185 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET; 1186 } 1187 1188 /** 1189 * ice_sched_get_vsi_layer - get the current VSI layer number 1190 * @hw: pointer to the HW struct 1191 * 1192 * This function returns the current VSI layer number 1193 */ 1194 static u8 ice_sched_get_vsi_layer(struct ice_hw *hw) 1195 { 1196 /* Num Layers VSI layer 1197 * 9 6 1198 * 7 4 1199 * 5 or less sw_entry_point_layer 1200 */ 1201 /* calculate the VSI layer based on number of layers. */ 1202 if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) 1203 return hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; 1204 else if (hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) 1205 /* qgroup and VSI layers are same */ 1206 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET; 1207 return hw->sw_entry_point_layer; 1208 } 1209 1210 /** 1211 * ice_sched_get_agg_layer - get the current aggregator layer number 1212 * @hw: pointer to the HW struct 1213 * 1214 * This function returns the current aggregator layer number 1215 */ 1216 static u8 ice_sched_get_agg_layer(struct ice_hw *hw) 1217 { 1218 /* Num Layers aggregator layer 1219 * 9 4 1220 * 7 or less sw_entry_point_layer 1221 */ 1222 /* calculate the aggregator layer based on number of layers. */ 1223 if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) 1224 return hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET; 1225 return hw->sw_entry_point_layer; 1226 } 1227 1228 /** 1229 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree 1230 * @pi: port information structure 1231 * 1232 * This function removes the leaf node that was created by the FW 1233 * during initialization 1234 */ 1235 static void ice_rm_dflt_leaf_node(struct ice_port_info *pi) 1236 { 1237 struct ice_sched_node *node; 1238 1239 node = pi->root; 1240 while (node) { 1241 if (!node->num_children) 1242 break; 1243 node = node->children[0]; 1244 } 1245 if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) { 1246 u32 teid = LE32_TO_CPU(node->info.node_teid); 1247 enum ice_status status; 1248 1249 /* remove the default leaf node */ 1250 status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid); 1251 if (!status) 1252 ice_free_sched_node(pi, node); 1253 } 1254 } 1255 1256 /** 1257 * ice_sched_rm_dflt_nodes - free the default nodes in the tree 1258 * @pi: port information structure 1259 * 1260 * This function frees all the nodes except root and TC that were created by 1261 * the FW during initialization 1262 */ 1263 static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi) 1264 { 1265 struct ice_sched_node *node; 1266 1267 ice_rm_dflt_leaf_node(pi); 1268 1269 /* remove the default nodes except TC and root nodes */ 1270 node = pi->root; 1271 while (node) { 1272 if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer && 1273 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && 1274 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) { 1275 ice_free_sched_node(pi, node); 1276 break; 1277 } 1278 1279 if (!node->num_children) 1280 break; 1281 node = node->children[0]; 1282 } 1283 } 1284 1285 /** 1286 * ice_sched_init_port - Initialize scheduler by querying information from FW 1287 * @pi: port info structure for the tree to cleanup 1288 * 1289 * This function is the initial call to find the total number of Tx scheduler 1290 * resources, default topology created by firmware and storing the information 1291 * in SW DB. 1292 */ 1293 enum ice_status ice_sched_init_port(struct ice_port_info *pi) 1294 { 1295 struct ice_aqc_get_topo_elem *buf; 1296 enum ice_status status; 1297 struct ice_hw *hw; 1298 u8 num_branches; 1299 u16 num_elems; 1300 u8 i, j; 1301 1302 if (!pi) 1303 return ICE_ERR_PARAM; 1304 hw = pi->hw; 1305 1306 /* Query the Default Topology from FW */ 1307 buf = (struct ice_aqc_get_topo_elem *)ice_malloc(hw, 1308 ICE_AQ_MAX_BUF_LEN); 1309 if (!buf) 1310 return ICE_ERR_NO_MEMORY; 1311 1312 /* Query default scheduling tree topology */ 1313 status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN, 1314 &num_branches, NULL); 1315 if (status) 1316 goto err_init_port; 1317 1318 /* num_branches should be between 1-8 */ 1319 if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) { 1320 ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n", 1321 num_branches); 1322 status = ICE_ERR_PARAM; 1323 goto err_init_port; 1324 } 1325 1326 /* get the number of elements on the default/first branch */ 1327 num_elems = LE16_TO_CPU(buf[0].hdr.num_elems); 1328 1329 /* num_elems should always be between 1-9 */ 1330 if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) { 1331 ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n", 1332 num_elems); 1333 status = ICE_ERR_PARAM; 1334 goto err_init_port; 1335 } 1336 1337 /* If the last node is a leaf node then the index of the queue group 1338 * layer is two less than the number of elements. 1339 */ 1340 if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type == 1341 ICE_AQC_ELEM_TYPE_LEAF) 1342 pi->last_node_teid = 1343 LE32_TO_CPU(buf[0].generic[num_elems - 2].node_teid); 1344 else 1345 pi->last_node_teid = 1346 LE32_TO_CPU(buf[0].generic[num_elems - 1].node_teid); 1347 1348 /* Insert the Tx Sched root node */ 1349 status = ice_sched_add_root_node(pi, &buf[0].generic[0]); 1350 if (status) 1351 goto err_init_port; 1352 1353 /* Parse the default tree and cache the information */ 1354 for (i = 0; i < num_branches; i++) { 1355 num_elems = LE16_TO_CPU(buf[i].hdr.num_elems); 1356 1357 /* Skip root element as already inserted */ 1358 for (j = 1; j < num_elems; j++) { 1359 /* update the sw entry point */ 1360 if (buf[0].generic[j].data.elem_type == 1361 ICE_AQC_ELEM_TYPE_ENTRY_POINT) 1362 hw->sw_entry_point_layer = j; 1363 1364 status = ice_sched_add_node(pi, j, &buf[i].generic[j]); 1365 if (status) 1366 goto err_init_port; 1367 } 1368 } 1369 1370 /* Remove the default nodes. */ 1371 if (pi->root) 1372 ice_sched_rm_dflt_nodes(pi); 1373 1374 /* initialize the port for handling the scheduler tree */ 1375 pi->port_state = ICE_SCHED_PORT_STATE_READY; 1376 ice_init_lock(&pi->sched_lock); 1377 for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) 1378 INIT_LIST_HEAD(&hw->rl_prof_list[i]); 1379 1380 err_init_port: 1381 if (status && pi->root) { 1382 ice_free_sched_node(pi, pi->root); 1383 pi->root = NULL; 1384 } 1385 1386 ice_free(hw, buf); 1387 return status; 1388 } 1389 1390 /** 1391 * ice_sched_get_node - Get the struct ice_sched_node for given TEID 1392 * @pi: port information structure 1393 * @teid: Scheduler node TEID 1394 * 1395 * This function retrieves the ice_sched_node struct for given TEID from 1396 * the SW DB and returns it to the caller. 1397 */ 1398 struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid) 1399 { 1400 struct ice_sched_node *node; 1401 1402 if (!pi) 1403 return NULL; 1404 1405 /* Find the node starting from root */ 1406 ice_acquire_lock(&pi->sched_lock); 1407 node = ice_sched_find_node_by_teid(pi->root, teid); 1408 ice_release_lock(&pi->sched_lock); 1409 1410 if (!node) 1411 ice_debug(pi->hw, ICE_DBG_SCHED, "Node not found for teid=0x%x\n", teid); 1412 1413 return node; 1414 } 1415 1416 /** 1417 * ice_sched_query_res_alloc - query the FW for num of logical sched layers 1418 * @hw: pointer to the HW struct 1419 * 1420 * query FW for allocated scheduler resources and store in HW struct 1421 */ 1422 enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) 1423 { 1424 struct ice_aqc_query_txsched_res_resp *buf; 1425 enum ice_status status = ICE_SUCCESS; 1426 __le16 max_sibl; 1427 u8 i; 1428 1429 if (hw->layer_info) 1430 return status; 1431 1432 buf = (struct ice_aqc_query_txsched_res_resp *) 1433 ice_malloc(hw, sizeof(*buf)); 1434 if (!buf) 1435 return ICE_ERR_NO_MEMORY; 1436 1437 status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL); 1438 if (status) 1439 goto sched_query_out; 1440 1441 hw->num_tx_sched_layers = 1442 (u8)LE16_TO_CPU(buf->sched_props.logical_levels); 1443 hw->num_tx_sched_phys_layers = 1444 (u8)LE16_TO_CPU(buf->sched_props.phys_levels); 1445 hw->flattened_layers = buf->sched_props.flattening_bitmap; 1446 hw->max_cgds = buf->sched_props.max_pf_cgds; 1447 1448 /* max sibling group size of current layer refers to the max children 1449 * of the below layer node. 1450 * layer 1 node max children will be layer 2 max sibling group size 1451 * layer 2 node max children will be layer 3 max sibling group size 1452 * and so on. This array will be populated from root (index 0) to 1453 * qgroup layer 7. Leaf node has no children. 1454 */ 1455 for (i = 0; i < hw->num_tx_sched_layers - 1; i++) { 1456 max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz; 1457 hw->max_children[i] = LE16_TO_CPU(max_sibl); 1458 } 1459 1460 hw->layer_info = (struct ice_aqc_layer_props *) 1461 ice_memdup(hw, buf->layer_props, 1462 (hw->num_tx_sched_layers * 1463 sizeof(*hw->layer_info)), 1464 ICE_NONDMA_TO_NONDMA); 1465 if (!hw->layer_info) { 1466 status = ICE_ERR_NO_MEMORY; 1467 goto sched_query_out; 1468 } 1469 1470 sched_query_out: 1471 ice_free(hw, buf); 1472 return status; 1473 } 1474 1475 /** 1476 * ice_sched_get_psm_clk_freq - determine the PSM clock frequency 1477 * @hw: pointer to the HW struct 1478 * 1479 * Determine the PSM clock frequency and store in HW struct 1480 */ 1481 void ice_sched_get_psm_clk_freq(struct ice_hw *hw) 1482 { 1483 u32 val, clk_src; 1484 1485 val = rd32(hw, GLGEN_CLKSTAT_SRC); 1486 clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >> 1487 GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S; 1488 1489 #define PSM_CLK_SRC_367_MHZ 0x0 1490 #define PSM_CLK_SRC_416_MHZ 0x1 1491 #define PSM_CLK_SRC_446_MHZ 0x2 1492 #define PSM_CLK_SRC_390_MHZ 0x3 1493 1494 switch (clk_src) { 1495 case PSM_CLK_SRC_367_MHZ: 1496 hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ; 1497 break; 1498 case PSM_CLK_SRC_416_MHZ: 1499 hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ; 1500 break; 1501 case PSM_CLK_SRC_446_MHZ: 1502 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ; 1503 break; 1504 case PSM_CLK_SRC_390_MHZ: 1505 hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ; 1506 break; 1507 default: 1508 ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n", 1509 clk_src); 1510 /* fall back to a safe default */ 1511 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ; 1512 } 1513 } 1514 1515 /** 1516 * ice_sched_find_node_in_subtree - Find node in part of base node subtree 1517 * @hw: pointer to the HW struct 1518 * @base: pointer to the base node 1519 * @node: pointer to the node to search 1520 * 1521 * This function checks whether a given node is part of the base node 1522 * subtree or not 1523 */ 1524 bool 1525 ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base, 1526 struct ice_sched_node *node) 1527 { 1528 u8 i; 1529 1530 for (i = 0; i < base->num_children; i++) { 1531 struct ice_sched_node *child = base->children[i]; 1532 1533 if (node == child) 1534 return true; 1535 1536 if (child->tx_sched_layer > node->tx_sched_layer) 1537 return false; 1538 1539 /* this recursion is intentional, and wouldn't 1540 * go more than 8 calls 1541 */ 1542 if (ice_sched_find_node_in_subtree(hw, child, node)) 1543 return true; 1544 } 1545 return false; 1546 } 1547 1548 /** 1549 * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node 1550 * @pi: port information structure 1551 * @vsi_node: software VSI handle 1552 * @qgrp_node: first queue group node identified for scanning 1553 * @owner: LAN or RDMA 1554 * 1555 * This function retrieves a free LAN or RDMA queue group node by scanning 1556 * qgrp_node and its siblings for the queue group with the fewest number 1557 * of queues currently assigned. 1558 */ 1559 static struct ice_sched_node * 1560 ice_sched_get_free_qgrp(struct ice_port_info *pi, 1561 struct ice_sched_node *vsi_node, 1562 struct ice_sched_node *qgrp_node, u8 owner) 1563 { 1564 struct ice_sched_node *min_qgrp; 1565 u8 min_children; 1566 1567 if (!qgrp_node) 1568 return qgrp_node; 1569 min_children = qgrp_node->num_children; 1570 if (!min_children) 1571 return qgrp_node; 1572 min_qgrp = qgrp_node; 1573 /* scan all queue groups until find a node which has less than the 1574 * minimum number of children. This way all queue group nodes get 1575 * equal number of shares and active. The bandwidth will be equally 1576 * distributed across all queues. 1577 */ 1578 while (qgrp_node) { 1579 /* make sure the qgroup node is part of the VSI subtree */ 1580 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) 1581 if (qgrp_node->num_children < min_children && 1582 qgrp_node->owner == owner) { 1583 /* replace the new min queue group node */ 1584 min_qgrp = qgrp_node; 1585 min_children = min_qgrp->num_children; 1586 /* break if it has no children, */ 1587 if (!min_children) 1588 break; 1589 } 1590 qgrp_node = qgrp_node->sibling; 1591 } 1592 return min_qgrp; 1593 } 1594 1595 /** 1596 * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node 1597 * @pi: port information structure 1598 * @vsi_handle: software VSI handle 1599 * @tc: branch number 1600 * @owner: LAN or RDMA 1601 * 1602 * This function retrieves a free LAN or RDMA queue group node 1603 */ 1604 struct ice_sched_node * 1605 ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 1606 u8 owner) 1607 { 1608 struct ice_sched_node *vsi_node, *qgrp_node; 1609 struct ice_vsi_ctx *vsi_ctx; 1610 u8 qgrp_layer, vsi_layer; 1611 u16 max_children; 1612 1613 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); 1614 vsi_layer = ice_sched_get_vsi_layer(pi->hw); 1615 max_children = pi->hw->max_children[qgrp_layer]; 1616 1617 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 1618 if (!vsi_ctx) 1619 return NULL; 1620 vsi_node = vsi_ctx->sched.vsi_node[tc]; 1621 /* validate invalid VSI ID */ 1622 if (!vsi_node) 1623 return NULL; 1624 1625 /* If the queue group and vsi layer are same then queues 1626 * are all attached directly to VSI 1627 */ 1628 if (qgrp_layer == vsi_layer) 1629 return vsi_node; 1630 1631 /* get the first queue group node from VSI sub-tree */ 1632 qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); 1633 while (qgrp_node) { 1634 /* make sure the qgroup node is part of the VSI subtree */ 1635 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) 1636 if (qgrp_node->num_children < max_children && 1637 qgrp_node->owner == owner) 1638 break; 1639 qgrp_node = qgrp_node->sibling; 1640 } 1641 1642 /* Select the best queue group */ 1643 return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner); 1644 } 1645 1646 /** 1647 * ice_sched_get_vsi_node - Get a VSI node based on VSI ID 1648 * @pi: pointer to the port information structure 1649 * @tc_node: pointer to the TC node 1650 * @vsi_handle: software VSI handle 1651 * 1652 * This function retrieves a VSI node for a given VSI ID from a given 1653 * TC branch 1654 */ 1655 struct ice_sched_node * 1656 ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, 1657 u16 vsi_handle) 1658 { 1659 struct ice_sched_node *node; 1660 u8 vsi_layer; 1661 1662 vsi_layer = ice_sched_get_vsi_layer(pi->hw); 1663 node = ice_sched_get_first_node(pi, tc_node, vsi_layer); 1664 1665 /* Check whether it already exists */ 1666 while (node) { 1667 if (node->vsi_handle == vsi_handle) 1668 return node; 1669 node = node->sibling; 1670 } 1671 1672 return node; 1673 } 1674 1675 /** 1676 * ice_sched_get_agg_node - Get an aggregator node based on aggregator ID 1677 * @pi: pointer to the port information structure 1678 * @tc_node: pointer to the TC node 1679 * @agg_id: aggregator ID 1680 * 1681 * This function retrieves an aggregator node for a given aggregator ID from 1682 * a given TC branch 1683 */ 1684 static struct ice_sched_node * 1685 ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, 1686 u32 agg_id) 1687 { 1688 struct ice_sched_node *node; 1689 struct ice_hw *hw = pi->hw; 1690 u8 agg_layer; 1691 1692 if (!hw) 1693 return NULL; 1694 agg_layer = ice_sched_get_agg_layer(hw); 1695 node = ice_sched_get_first_node(pi, tc_node, agg_layer); 1696 1697 /* Check whether it already exists */ 1698 while (node) { 1699 if (node->agg_id == agg_id) 1700 return node; 1701 node = node->sibling; 1702 } 1703 1704 return node; 1705 } 1706 1707 /** 1708 * ice_sched_check_node - Compare node parameters between SW DB and HW DB 1709 * @hw: pointer to the HW struct 1710 * @node: pointer to the ice_sched_node struct 1711 * 1712 * This function queries and compares the HW element with SW DB node parameters 1713 */ 1714 static bool ice_sched_check_node(struct ice_hw *hw, struct ice_sched_node *node) 1715 { 1716 struct ice_aqc_txsched_elem_data buf; 1717 enum ice_status status; 1718 u32 node_teid; 1719 1720 node_teid = LE32_TO_CPU(node->info.node_teid); 1721 status = ice_sched_query_elem(hw, node_teid, &buf); 1722 if (status != ICE_SUCCESS) 1723 return false; 1724 1725 if (memcmp(&buf, &node->info, sizeof(buf))) { 1726 ice_debug(hw, ICE_DBG_SCHED, "Node mismatch for teid=0x%x\n", 1727 node_teid); 1728 return false; 1729 } 1730 1731 return true; 1732 } 1733 1734 /** 1735 * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes 1736 * @hw: pointer to the HW struct 1737 * @num_qs: number of queues 1738 * @num_nodes: num nodes array 1739 * 1740 * This function calculates the number of VSI child nodes based on the 1741 * number of queues. 1742 */ 1743 static void 1744 ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) 1745 { 1746 u16 num = num_qs; 1747 u8 i, qgl, vsil; 1748 1749 qgl = ice_sched_get_qgrp_layer(hw); 1750 vsil = ice_sched_get_vsi_layer(hw); 1751 1752 /* calculate num nodes from queue group to VSI layer */ 1753 for (i = qgl; i > vsil; i--) { 1754 /* round to the next integer if there is a remainder */ 1755 num = DIVIDE_AND_ROUND_UP(num, hw->max_children[i]); 1756 1757 /* need at least one node */ 1758 num_nodes[i] = num ? num : 1; 1759 } 1760 } 1761 1762 /** 1763 * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree 1764 * @pi: port information structure 1765 * @vsi_handle: software VSI handle 1766 * @tc_node: pointer to the TC node 1767 * @num_nodes: pointer to the num nodes that needs to be added per layer 1768 * @owner: node owner (LAN or RDMA) 1769 * 1770 * This function adds the VSI child nodes to tree. It gets called for 1771 * LAN and RDMA separately. 1772 */ 1773 static enum ice_status 1774 ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, 1775 struct ice_sched_node *tc_node, u16 *num_nodes, 1776 u8 owner) 1777 { 1778 struct ice_sched_node *parent, *node; 1779 struct ice_hw *hw = pi->hw; 1780 u32 first_node_teid; 1781 u16 num_added = 0; 1782 u8 i, qgl, vsil; 1783 1784 qgl = ice_sched_get_qgrp_layer(hw); 1785 vsil = ice_sched_get_vsi_layer(hw); 1786 parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 1787 for (i = vsil + 1; i <= qgl; i++) { 1788 enum ice_status status; 1789 1790 if (!parent) 1791 return ICE_ERR_CFG; 1792 1793 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, 1794 num_nodes[i], 1795 &first_node_teid, 1796 &num_added); 1797 if (status != ICE_SUCCESS || num_nodes[i] != num_added) 1798 return ICE_ERR_CFG; 1799 1800 /* The newly added node can be a new parent for the next 1801 * layer nodes 1802 */ 1803 if (num_added) { 1804 parent = ice_sched_find_node_by_teid(tc_node, 1805 first_node_teid); 1806 node = parent; 1807 while (node) { 1808 node->owner = owner; 1809 node = node->sibling; 1810 } 1811 } else { 1812 parent = parent->children[0]; 1813 } 1814 } 1815 1816 return ICE_SUCCESS; 1817 } 1818 1819 /** 1820 * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes 1821 * @pi: pointer to the port info structure 1822 * @tc_node: pointer to TC node 1823 * @num_nodes: pointer to num nodes array 1824 * 1825 * This function calculates the number of supported nodes needed to add this 1826 * VSI into Tx tree including the VSI, parent and intermediate nodes in below 1827 * layers 1828 */ 1829 static void 1830 ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi, 1831 struct ice_sched_node *tc_node, u16 *num_nodes) 1832 { 1833 struct ice_sched_node *node; 1834 u8 vsil; 1835 int i; 1836 1837 vsil = ice_sched_get_vsi_layer(pi->hw); 1838 for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--) 1839 /* Add intermediate nodes if TC has no children and 1840 * need at least one node for VSI 1841 */ 1842 if (!tc_node->num_children || i == vsil) { 1843 num_nodes[i]++; 1844 } else { 1845 /* If intermediate nodes are reached max children 1846 * then add a new one. 1847 */ 1848 node = ice_sched_get_first_node(pi, tc_node, (u8)i); 1849 /* scan all the siblings */ 1850 while (node) { 1851 if (node->num_children < 1852 pi->hw->max_children[i]) 1853 break; 1854 node = node->sibling; 1855 } 1856 1857 /* tree has one intermediate node to add this new VSI. 1858 * So no need to calculate supported nodes for below 1859 * layers. 1860 */ 1861 if (node) 1862 break; 1863 /* all the nodes are full, allocate a new one */ 1864 num_nodes[i]++; 1865 } 1866 } 1867 1868 /** 1869 * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree 1870 * @pi: port information structure 1871 * @vsi_handle: software VSI handle 1872 * @tc_node: pointer to TC node 1873 * @num_nodes: pointer to num nodes array 1874 * 1875 * This function adds the VSI supported nodes into Tx tree including the 1876 * VSI, its parent and intermediate nodes in below layers 1877 */ 1878 static enum ice_status 1879 ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, 1880 struct ice_sched_node *tc_node, u16 *num_nodes) 1881 { 1882 struct ice_sched_node *parent = tc_node; 1883 u32 first_node_teid; 1884 u16 num_added = 0; 1885 u8 i, vsil; 1886 1887 if (!pi) 1888 return ICE_ERR_PARAM; 1889 1890 vsil = ice_sched_get_vsi_layer(pi->hw); 1891 for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { 1892 enum ice_status status; 1893 1894 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, 1895 i, num_nodes[i], 1896 &first_node_teid, 1897 &num_added); 1898 if (status != ICE_SUCCESS || num_nodes[i] != num_added) 1899 return ICE_ERR_CFG; 1900 1901 /* The newly added node can be a new parent for the next 1902 * layer nodes 1903 */ 1904 if (num_added) 1905 parent = ice_sched_find_node_by_teid(tc_node, 1906 first_node_teid); 1907 else 1908 parent = parent->children[0]; 1909 1910 if (!parent) 1911 return ICE_ERR_CFG; 1912 1913 if (i == vsil) 1914 parent->vsi_handle = vsi_handle; 1915 } 1916 1917 return ICE_SUCCESS; 1918 } 1919 1920 /** 1921 * ice_sched_add_vsi_to_topo - add a new VSI into tree 1922 * @pi: port information structure 1923 * @vsi_handle: software VSI handle 1924 * @tc: TC number 1925 * 1926 * This function adds a new VSI into scheduler tree 1927 */ 1928 static enum ice_status 1929 ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) 1930 { 1931 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 1932 struct ice_sched_node *tc_node; 1933 1934 tc_node = ice_sched_get_tc_node(pi, tc); 1935 if (!tc_node) 1936 return ICE_ERR_PARAM; 1937 1938 /* calculate number of supported nodes needed for this VSI */ 1939 ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes); 1940 1941 /* add VSI supported nodes to TC subtree */ 1942 return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node, 1943 num_nodes); 1944 } 1945 1946 /** 1947 * ice_sched_update_vsi_child_nodes - update VSI child nodes 1948 * @pi: port information structure 1949 * @vsi_handle: software VSI handle 1950 * @tc: TC number 1951 * @new_numqs: new number of max queues 1952 * @owner: owner of this subtree 1953 * 1954 * This function updates the VSI child nodes based on the number of queues 1955 */ 1956 static enum ice_status 1957 ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, 1958 u8 tc, u16 new_numqs, u8 owner) 1959 { 1960 u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 1961 struct ice_sched_node *vsi_node; 1962 struct ice_sched_node *tc_node; 1963 struct ice_vsi_ctx *vsi_ctx; 1964 enum ice_status status = ICE_SUCCESS; 1965 struct ice_hw *hw = pi->hw; 1966 u16 prev_numqs; 1967 1968 tc_node = ice_sched_get_tc_node(pi, tc); 1969 if (!tc_node) 1970 return ICE_ERR_CFG; 1971 1972 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 1973 if (!vsi_node) 1974 return ICE_ERR_CFG; 1975 1976 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 1977 if (!vsi_ctx) 1978 return ICE_ERR_PARAM; 1979 1980 if (owner == ICE_SCHED_NODE_OWNER_LAN) 1981 prev_numqs = vsi_ctx->sched.max_lanq[tc]; 1982 else 1983 prev_numqs = vsi_ctx->sched.max_rdmaq[tc]; 1984 /* num queues are not changed or less than the previous number */ 1985 if (new_numqs <= prev_numqs) 1986 return status; 1987 if (owner == ICE_SCHED_NODE_OWNER_LAN) { 1988 status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs); 1989 if (status) 1990 return status; 1991 } else { 1992 status = ice_alloc_rdma_q_ctx(hw, vsi_handle, tc, new_numqs); 1993 if (status) 1994 return status; 1995 } 1996 1997 if (new_numqs) 1998 ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); 1999 /* Keep the max number of queue configuration all the time. Update the 2000 * tree only if number of queues > previous number of queues. This may 2001 * leave some extra nodes in the tree if number of queues < previous 2002 * number but that wouldn't harm anything. Removing those extra nodes 2003 * may complicate the code if those nodes are part of SRL or 2004 * individually rate limited. 2005 */ 2006 status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node, 2007 new_num_nodes, owner); 2008 if (status) 2009 return status; 2010 if (owner == ICE_SCHED_NODE_OWNER_LAN) 2011 vsi_ctx->sched.max_lanq[tc] = new_numqs; 2012 else 2013 vsi_ctx->sched.max_rdmaq[tc] = new_numqs; 2014 2015 return ICE_SUCCESS; 2016 } 2017 2018 /** 2019 * ice_sched_cfg_vsi - configure the new/existing VSI 2020 * @pi: port information structure 2021 * @vsi_handle: software VSI handle 2022 * @tc: TC number 2023 * @maxqs: max number of queues 2024 * @owner: LAN or RDMA 2025 * @enable: TC enabled or disabled 2026 * 2027 * This function adds/updates VSI nodes based on the number of queues. If TC is 2028 * enabled and VSI is in suspended state then resume the VSI back. If TC is 2029 * disabled then suspend the VSI if it is not already. 2030 */ 2031 enum ice_status 2032 ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, 2033 u8 owner, bool enable) 2034 { 2035 struct ice_sched_node *vsi_node, *tc_node; 2036 struct ice_vsi_ctx *vsi_ctx; 2037 enum ice_status status = ICE_SUCCESS; 2038 struct ice_hw *hw = pi->hw; 2039 2040 ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle); 2041 tc_node = ice_sched_get_tc_node(pi, tc); 2042 if (!tc_node) 2043 return ICE_ERR_PARAM; 2044 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 2045 if (!vsi_ctx) 2046 return ICE_ERR_PARAM; 2047 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 2048 2049 /* suspend the VSI if TC is not enabled */ 2050 if (!enable) { 2051 if (vsi_node && vsi_node->in_use) { 2052 u32 teid = LE32_TO_CPU(vsi_node->info.node_teid); 2053 2054 status = ice_sched_suspend_resume_elems(hw, 1, &teid, 2055 true); 2056 if (!status) 2057 vsi_node->in_use = false; 2058 } 2059 return status; 2060 } 2061 2062 /* TC is enabled, if it is a new VSI then add it to the tree */ 2063 if (!vsi_node) { 2064 status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc); 2065 if (status) 2066 return status; 2067 2068 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 2069 if (!vsi_node) 2070 return ICE_ERR_CFG; 2071 2072 vsi_ctx->sched.vsi_node[tc] = vsi_node; 2073 vsi_node->in_use = true; 2074 /* invalidate the max queues whenever VSI gets added first time 2075 * into the scheduler tree (boot or after reset). We need to 2076 * recreate the child nodes all the time in these cases. 2077 */ 2078 vsi_ctx->sched.max_lanq[tc] = 0; 2079 vsi_ctx->sched.max_rdmaq[tc] = 0; 2080 } 2081 2082 /* update the VSI child nodes */ 2083 status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs, 2084 owner); 2085 if (status) 2086 return status; 2087 2088 /* TC is enabled, resume the VSI if it is in the suspend state */ 2089 if (!vsi_node->in_use) { 2090 u32 teid = LE32_TO_CPU(vsi_node->info.node_teid); 2091 2092 status = ice_sched_suspend_resume_elems(hw, 1, &teid, false); 2093 if (!status) 2094 vsi_node->in_use = true; 2095 } 2096 2097 return status; 2098 } 2099 2100 /** 2101 * ice_sched_rm_agg_vsi_info - remove aggregator related VSI info entry 2102 * @pi: port information structure 2103 * @vsi_handle: software VSI handle 2104 * 2105 * This function removes single aggregator VSI info entry from 2106 * aggregator list. 2107 */ 2108 static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle) 2109 { 2110 struct ice_sched_agg_info *agg_info; 2111 struct ice_sched_agg_info *atmp; 2112 2113 LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &pi->hw->agg_list, 2114 ice_sched_agg_info, 2115 list_entry) { 2116 struct ice_sched_agg_vsi_info *agg_vsi_info; 2117 struct ice_sched_agg_vsi_info *vtmp; 2118 2119 LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp, 2120 &agg_info->agg_vsi_list, 2121 ice_sched_agg_vsi_info, list_entry) 2122 if (agg_vsi_info->vsi_handle == vsi_handle) { 2123 LIST_DEL(&agg_vsi_info->list_entry); 2124 ice_free(pi->hw, agg_vsi_info); 2125 return; 2126 } 2127 } 2128 } 2129 2130 /** 2131 * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree 2132 * @node: pointer to the sub-tree node 2133 * 2134 * This function checks for a leaf node presence in a given sub-tree node. 2135 */ 2136 static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node) 2137 { 2138 u8 i; 2139 2140 for (i = 0; i < node->num_children; i++) 2141 if (ice_sched_is_leaf_node_present(node->children[i])) 2142 return true; 2143 /* check for a leaf node */ 2144 return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF); 2145 } 2146 2147 /** 2148 * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes 2149 * @pi: port information structure 2150 * @vsi_handle: software VSI handle 2151 * @owner: LAN or RDMA 2152 * 2153 * This function removes the VSI and its LAN or RDMA children nodes from the 2154 * scheduler tree. 2155 */ 2156 static enum ice_status 2157 ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) 2158 { 2159 enum ice_status status = ICE_ERR_PARAM; 2160 struct ice_vsi_ctx *vsi_ctx; 2161 u8 i; 2162 2163 ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle); 2164 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 2165 return status; 2166 ice_acquire_lock(&pi->sched_lock); 2167 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 2168 if (!vsi_ctx) 2169 goto exit_sched_rm_vsi_cfg; 2170 2171 ice_for_each_traffic_class(i) { 2172 struct ice_sched_node *vsi_node, *tc_node; 2173 u8 j = 0; 2174 2175 tc_node = ice_sched_get_tc_node(pi, i); 2176 if (!tc_node) 2177 continue; 2178 2179 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 2180 if (!vsi_node) 2181 continue; 2182 2183 if (ice_sched_is_leaf_node_present(vsi_node)) { 2184 ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i); 2185 status = ICE_ERR_IN_USE; 2186 goto exit_sched_rm_vsi_cfg; 2187 } 2188 while (j < vsi_node->num_children) { 2189 if (vsi_node->children[j]->owner == owner) { 2190 ice_free_sched_node(pi, vsi_node->children[j]); 2191 2192 /* reset the counter again since the num 2193 * children will be updated after node removal 2194 */ 2195 j = 0; 2196 } else { 2197 j++; 2198 } 2199 } 2200 /* remove the VSI if it has no children */ 2201 if (!vsi_node->num_children) { 2202 ice_free_sched_node(pi, vsi_node); 2203 vsi_ctx->sched.vsi_node[i] = NULL; 2204 2205 /* clean up aggregator related VSI info if any */ 2206 ice_sched_rm_agg_vsi_info(pi, vsi_handle); 2207 } 2208 if (owner == ICE_SCHED_NODE_OWNER_LAN) 2209 vsi_ctx->sched.max_lanq[i] = 0; 2210 else 2211 vsi_ctx->sched.max_rdmaq[i] = 0; 2212 } 2213 status = ICE_SUCCESS; 2214 2215 exit_sched_rm_vsi_cfg: 2216 ice_release_lock(&pi->sched_lock); 2217 return status; 2218 } 2219 2220 /** 2221 * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes 2222 * @pi: port information structure 2223 * @vsi_handle: software VSI handle 2224 * 2225 * This function clears the VSI and its LAN children nodes from scheduler tree 2226 * for all TCs. 2227 */ 2228 enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) 2229 { 2230 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN); 2231 } 2232 2233 /** 2234 * ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes 2235 * @pi: port information structure 2236 * @vsi_handle: software VSI handle 2237 * 2238 * This function clears the VSI and its RDMA children nodes from scheduler tree 2239 * for all TCs. 2240 */ 2241 enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle) 2242 { 2243 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA); 2244 } 2245 2246 /** 2247 * ice_sched_is_tree_balanced - Check tree nodes are identical or not 2248 * @hw: pointer to the HW struct 2249 * @node: pointer to the ice_sched_node struct 2250 * 2251 * This function compares all the nodes for a given tree against HW DB nodes 2252 * This function needs to be called with the port_info->sched_lock held 2253 */ 2254 bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node) 2255 { 2256 u8 i; 2257 2258 /* start from the leaf node */ 2259 for (i = 0; i < node->num_children; i++) 2260 /* Fail if node doesn't match with the SW DB 2261 * this recursion is intentional, and wouldn't 2262 * go more than 9 calls 2263 */ 2264 if (!ice_sched_is_tree_balanced(hw, node->children[i])) 2265 return false; 2266 2267 return ice_sched_check_node(hw, node); 2268 } 2269 2270 /** 2271 * ice_aq_query_node_to_root - retrieve the tree topology for a given node TEID 2272 * @hw: pointer to the HW struct 2273 * @node_teid: node TEID 2274 * @buf: pointer to buffer 2275 * @buf_size: buffer size in bytes 2276 * @cd: pointer to command details structure or NULL 2277 * 2278 * This function retrieves the tree topology from the firmware for a given 2279 * node TEID to the root node. 2280 */ 2281 enum ice_status 2282 ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid, 2283 struct ice_aqc_txsched_elem_data *buf, u16 buf_size, 2284 struct ice_sq_cd *cd) 2285 { 2286 struct ice_aqc_query_node_to_root *cmd; 2287 struct ice_aq_desc desc; 2288 2289 cmd = &desc.params.query_node_to_root; 2290 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_node_to_root); 2291 cmd->teid = CPU_TO_LE32(node_teid); 2292 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2293 } 2294 2295 /** 2296 * ice_get_agg_info - get the aggregator ID 2297 * @hw: pointer to the hardware structure 2298 * @agg_id: aggregator ID 2299 * 2300 * This function validates aggregator ID. The function returns info if 2301 * aggregator ID is present in list otherwise it returns null. 2302 */ 2303 static struct ice_sched_agg_info * 2304 ice_get_agg_info(struct ice_hw *hw, u32 agg_id) 2305 { 2306 struct ice_sched_agg_info *agg_info; 2307 2308 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 2309 list_entry) 2310 if (agg_info->agg_id == agg_id) 2311 return agg_info; 2312 2313 return NULL; 2314 } 2315 2316 /** 2317 * ice_sched_get_free_vsi_parent - Find a free parent node in aggregator subtree 2318 * @hw: pointer to the HW struct 2319 * @node: pointer to a child node 2320 * @num_nodes: num nodes count array 2321 * 2322 * This function walks through the aggregator subtree to find a free parent 2323 * node 2324 */ 2325 static struct ice_sched_node * 2326 ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node, 2327 u16 *num_nodes) 2328 { 2329 u8 l = node->tx_sched_layer; 2330 u8 vsil, i; 2331 2332 vsil = ice_sched_get_vsi_layer(hw); 2333 2334 /* Is it VSI parent layer ? */ 2335 if (l == vsil - 1) 2336 return (node->num_children < hw->max_children[l]) ? node : NULL; 2337 2338 /* We have intermediate nodes. Let's walk through the subtree. If the 2339 * intermediate node has space to add a new node then clear the count 2340 */ 2341 if (node->num_children < hw->max_children[l]) 2342 num_nodes[l] = 0; 2343 /* The below recursive call is intentional and wouldn't go more than 2344 * 2 or 3 iterations. 2345 */ 2346 2347 for (i = 0; i < node->num_children; i++) { 2348 struct ice_sched_node *parent; 2349 2350 parent = ice_sched_get_free_vsi_parent(hw, node->children[i], 2351 num_nodes); 2352 if (parent) 2353 return parent; 2354 } 2355 2356 return NULL; 2357 } 2358 2359 /** 2360 * ice_sched_update_parent - update the new parent in SW DB 2361 * @new_parent: pointer to a new parent node 2362 * @node: pointer to a child node 2363 * 2364 * This function removes the child from the old parent and adds it to a new 2365 * parent 2366 */ 2367 static void 2368 ice_sched_update_parent(struct ice_sched_node *new_parent, 2369 struct ice_sched_node *node) 2370 { 2371 struct ice_sched_node *old_parent; 2372 u8 i, j; 2373 2374 old_parent = node->parent; 2375 2376 /* update the old parent children */ 2377 for (i = 0; i < old_parent->num_children; i++) 2378 if (old_parent->children[i] == node) { 2379 for (j = i + 1; j < old_parent->num_children; j++) 2380 old_parent->children[j - 1] = 2381 old_parent->children[j]; 2382 old_parent->num_children--; 2383 break; 2384 } 2385 2386 /* now move the node to a new parent */ 2387 new_parent->children[new_parent->num_children++] = node; 2388 node->parent = new_parent; 2389 node->info.parent_teid = new_parent->info.node_teid; 2390 } 2391 2392 /** 2393 * ice_sched_move_nodes - move child nodes to a given parent 2394 * @pi: port information structure 2395 * @parent: pointer to parent node 2396 * @num_items: number of child nodes to be moved 2397 * @list: pointer to child node teids 2398 * 2399 * This function move the child nodes to a given parent. 2400 */ 2401 static enum ice_status 2402 ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, 2403 u16 num_items, u32 *list) 2404 { 2405 struct ice_aqc_move_elem *buf; 2406 struct ice_sched_node *node; 2407 enum ice_status status = ICE_SUCCESS; 2408 u16 i, grps_movd = 0; 2409 struct ice_hw *hw; 2410 u16 buf_len; 2411 2412 hw = pi->hw; 2413 2414 if (!parent || !num_items) 2415 return ICE_ERR_PARAM; 2416 2417 /* Does parent have enough space */ 2418 if (parent->num_children + num_items > 2419 hw->max_children[parent->tx_sched_layer]) 2420 return ICE_ERR_AQ_FULL; 2421 2422 buf_len = ice_struct_size(buf, teid, 1); 2423 buf = (struct ice_aqc_move_elem *)ice_malloc(hw, buf_len); 2424 if (!buf) 2425 return ICE_ERR_NO_MEMORY; 2426 2427 for (i = 0; i < num_items; i++) { 2428 node = ice_sched_find_node_by_teid(pi->root, list[i]); 2429 if (!node) { 2430 status = ICE_ERR_PARAM; 2431 goto move_err_exit; 2432 } 2433 2434 buf->hdr.src_parent_teid = node->info.parent_teid; 2435 buf->hdr.dest_parent_teid = parent->info.node_teid; 2436 buf->teid[0] = node->info.node_teid; 2437 buf->hdr.num_elems = CPU_TO_LE16(1); 2438 status = ice_aq_move_sched_elems(hw, 1, buf, buf_len, 2439 &grps_movd, NULL); 2440 if (status && grps_movd != 1) { 2441 status = ICE_ERR_CFG; 2442 goto move_err_exit; 2443 } 2444 2445 /* update the SW DB */ 2446 ice_sched_update_parent(parent, node); 2447 } 2448 2449 move_err_exit: 2450 ice_free(hw, buf); 2451 return status; 2452 } 2453 2454 /** 2455 * ice_sched_move_vsi_to_agg - move VSI to aggregator node 2456 * @pi: port information structure 2457 * @vsi_handle: software VSI handle 2458 * @agg_id: aggregator ID 2459 * @tc: TC number 2460 * 2461 * This function moves a VSI to an aggregator node or its subtree. 2462 * Intermediate nodes may be created if required. 2463 */ 2464 static enum ice_status 2465 ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, 2466 u8 tc) 2467 { 2468 struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent; 2469 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 2470 u32 first_node_teid, vsi_teid; 2471 enum ice_status status; 2472 u16 num_nodes_added; 2473 u8 aggl, vsil, i; 2474 2475 tc_node = ice_sched_get_tc_node(pi, tc); 2476 if (!tc_node) 2477 return ICE_ERR_CFG; 2478 2479 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 2480 if (!agg_node) 2481 return ICE_ERR_DOES_NOT_EXIST; 2482 2483 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 2484 if (!vsi_node) 2485 return ICE_ERR_DOES_NOT_EXIST; 2486 2487 /* Is this VSI already part of given aggregator? */ 2488 if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node)) 2489 return ICE_SUCCESS; 2490 2491 aggl = ice_sched_get_agg_layer(pi->hw); 2492 vsil = ice_sched_get_vsi_layer(pi->hw); 2493 2494 /* set intermediate node count to 1 between aggregator and VSI layers */ 2495 for (i = aggl + 1; i < vsil; i++) 2496 num_nodes[i] = 1; 2497 2498 /* Check if the aggregator subtree has any free node to add the VSI */ 2499 for (i = 0; i < agg_node->num_children; i++) { 2500 parent = ice_sched_get_free_vsi_parent(pi->hw, 2501 agg_node->children[i], 2502 num_nodes); 2503 if (parent) 2504 goto move_nodes; 2505 } 2506 2507 /* add new nodes */ 2508 parent = agg_node; 2509 for (i = aggl + 1; i < vsil; i++) { 2510 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, 2511 num_nodes[i], 2512 &first_node_teid, 2513 &num_nodes_added); 2514 if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added) 2515 return ICE_ERR_CFG; 2516 2517 /* The newly added node can be a new parent for the next 2518 * layer nodes 2519 */ 2520 if (num_nodes_added) 2521 parent = ice_sched_find_node_by_teid(tc_node, 2522 first_node_teid); 2523 else 2524 parent = parent->children[0]; 2525 2526 if (!parent) 2527 return ICE_ERR_CFG; 2528 } 2529 2530 move_nodes: 2531 vsi_teid = LE32_TO_CPU(vsi_node->info.node_teid); 2532 return ice_sched_move_nodes(pi, parent, 1, &vsi_teid); 2533 } 2534 2535 /** 2536 * ice_move_all_vsi_to_dflt_agg - move all VSI(s) to default aggregator 2537 * @pi: port information structure 2538 * @agg_info: aggregator info 2539 * @tc: traffic class number 2540 * @rm_vsi_info: true or false 2541 * 2542 * This function move all the VSI(s) to the default aggregator and delete 2543 * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The 2544 * caller holds the scheduler lock. 2545 */ 2546 static enum ice_status 2547 ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi, 2548 struct ice_sched_agg_info *agg_info, u8 tc, 2549 bool rm_vsi_info) 2550 { 2551 struct ice_sched_agg_vsi_info *agg_vsi_info; 2552 struct ice_sched_agg_vsi_info *tmp; 2553 enum ice_status status = ICE_SUCCESS; 2554 2555 LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, tmp, &agg_info->agg_vsi_list, 2556 ice_sched_agg_vsi_info, list_entry) { 2557 u16 vsi_handle = agg_vsi_info->vsi_handle; 2558 2559 /* Move VSI to default aggregator */ 2560 if (!ice_is_tc_ena(agg_vsi_info->tc_bitmap[0], tc)) 2561 continue; 2562 2563 status = ice_sched_move_vsi_to_agg(pi, vsi_handle, 2564 ICE_DFLT_AGG_ID, tc); 2565 if (status) 2566 break; 2567 2568 ice_clear_bit(tc, agg_vsi_info->tc_bitmap); 2569 if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) { 2570 LIST_DEL(&agg_vsi_info->list_entry); 2571 ice_free(pi->hw, agg_vsi_info); 2572 } 2573 } 2574 2575 return status; 2576 } 2577 2578 /** 2579 * ice_sched_is_agg_inuse - check whether the aggregator is in use or not 2580 * @pi: port information structure 2581 * @node: node pointer 2582 * 2583 * This function checks whether the aggregator is attached with any VSI or not. 2584 */ 2585 static bool 2586 ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node) 2587 { 2588 u8 vsil, i; 2589 2590 vsil = ice_sched_get_vsi_layer(pi->hw); 2591 if (node->tx_sched_layer < vsil - 1) { 2592 for (i = 0; i < node->num_children; i++) 2593 if (ice_sched_is_agg_inuse(pi, node->children[i])) 2594 return true; 2595 return false; 2596 } else { 2597 return node->num_children ? true : false; 2598 } 2599 } 2600 2601 /** 2602 * ice_sched_rm_agg_cfg - remove the aggregator node 2603 * @pi: port information structure 2604 * @agg_id: aggregator ID 2605 * @tc: TC number 2606 * 2607 * This function removes the aggregator node and intermediate nodes if any 2608 * from the given TC 2609 */ 2610 static enum ice_status 2611 ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) 2612 { 2613 struct ice_sched_node *tc_node, *agg_node; 2614 struct ice_hw *hw = pi->hw; 2615 2616 tc_node = ice_sched_get_tc_node(pi, tc); 2617 if (!tc_node) 2618 return ICE_ERR_CFG; 2619 2620 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 2621 if (!agg_node) 2622 return ICE_ERR_DOES_NOT_EXIST; 2623 2624 /* Can't remove the aggregator node if it has children */ 2625 if (ice_sched_is_agg_inuse(pi, agg_node)) 2626 return ICE_ERR_IN_USE; 2627 2628 /* need to remove the whole subtree if aggregator node is the 2629 * only child. 2630 */ 2631 while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) { 2632 struct ice_sched_node *parent = agg_node->parent; 2633 2634 if (!parent) 2635 return ICE_ERR_CFG; 2636 2637 if (parent->num_children > 1) 2638 break; 2639 2640 agg_node = parent; 2641 } 2642 2643 ice_free_sched_node(pi, agg_node); 2644 return ICE_SUCCESS; 2645 } 2646 2647 /** 2648 * ice_rm_agg_cfg_tc - remove aggregator configuration for TC 2649 * @pi: port information structure 2650 * @agg_info: aggregator ID 2651 * @tc: TC number 2652 * @rm_vsi_info: bool value true or false 2653 * 2654 * This function removes aggregator reference to VSI of given TC. It removes 2655 * the aggregator configuration completely for requested TC. The caller needs 2656 * to hold the scheduler lock. 2657 */ 2658 static enum ice_status 2659 ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info, 2660 u8 tc, bool rm_vsi_info) 2661 { 2662 enum ice_status status = ICE_SUCCESS; 2663 2664 /* If nothing to remove - return success */ 2665 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) 2666 goto exit_rm_agg_cfg_tc; 2667 2668 status = ice_move_all_vsi_to_dflt_agg(pi, agg_info, tc, rm_vsi_info); 2669 if (status) 2670 goto exit_rm_agg_cfg_tc; 2671 2672 /* Delete aggregator node(s) */ 2673 status = ice_sched_rm_agg_cfg(pi, agg_info->agg_id, tc); 2674 if (status) 2675 goto exit_rm_agg_cfg_tc; 2676 2677 ice_clear_bit(tc, agg_info->tc_bitmap); 2678 exit_rm_agg_cfg_tc: 2679 return status; 2680 } 2681 2682 /** 2683 * ice_save_agg_tc_bitmap - save aggregator TC bitmap 2684 * @pi: port information structure 2685 * @agg_id: aggregator ID 2686 * @tc_bitmap: 8 bits TC bitmap 2687 * 2688 * Save aggregator TC bitmap. This function needs to be called with scheduler 2689 * lock held. 2690 */ 2691 static enum ice_status 2692 ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, 2693 ice_bitmap_t *tc_bitmap) 2694 { 2695 struct ice_sched_agg_info *agg_info; 2696 2697 agg_info = ice_get_agg_info(pi->hw, agg_id); 2698 if (!agg_info) 2699 return ICE_ERR_PARAM; 2700 ice_cp_bitmap(agg_info->replay_tc_bitmap, tc_bitmap, 2701 ICE_MAX_TRAFFIC_CLASS); 2702 return ICE_SUCCESS; 2703 } 2704 2705 /** 2706 * ice_sched_add_agg_cfg - create an aggregator node 2707 * @pi: port information structure 2708 * @agg_id: aggregator ID 2709 * @tc: TC number 2710 * 2711 * This function creates an aggregator node and intermediate nodes if required 2712 * for the given TC 2713 */ 2714 static enum ice_status 2715 ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) 2716 { 2717 struct ice_sched_node *parent, *agg_node, *tc_node; 2718 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 2719 enum ice_status status = ICE_SUCCESS; 2720 struct ice_hw *hw = pi->hw; 2721 u32 first_node_teid; 2722 u16 num_nodes_added; 2723 u8 i, aggl; 2724 2725 tc_node = ice_sched_get_tc_node(pi, tc); 2726 if (!tc_node) 2727 return ICE_ERR_CFG; 2728 2729 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 2730 /* Does Agg node already exist ? */ 2731 if (agg_node) 2732 return status; 2733 2734 aggl = ice_sched_get_agg_layer(hw); 2735 2736 /* need one node in Agg layer */ 2737 num_nodes[aggl] = 1; 2738 2739 /* Check whether the intermediate nodes have space to add the 2740 * new aggregator. If they are full, then SW needs to allocate a new 2741 * intermediate node on those layers 2742 */ 2743 for (i = hw->sw_entry_point_layer; i < aggl; i++) { 2744 parent = ice_sched_get_first_node(pi, tc_node, i); 2745 2746 /* scan all the siblings */ 2747 while (parent) { 2748 if (parent->num_children < hw->max_children[i]) 2749 break; 2750 parent = parent->sibling; 2751 } 2752 2753 /* all the nodes are full, reserve one for this layer */ 2754 if (!parent) 2755 num_nodes[i]++; 2756 } 2757 2758 /* add the aggregator node */ 2759 parent = tc_node; 2760 for (i = hw->sw_entry_point_layer; i <= aggl; i++) { 2761 if (!parent) 2762 return ICE_ERR_CFG; 2763 2764 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, 2765 num_nodes[i], 2766 &first_node_teid, 2767 &num_nodes_added); 2768 if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added) 2769 return ICE_ERR_CFG; 2770 2771 /* The newly added node can be a new parent for the next 2772 * layer nodes 2773 */ 2774 if (num_nodes_added) { 2775 parent = ice_sched_find_node_by_teid(tc_node, 2776 first_node_teid); 2777 /* register aggregator ID with the aggregator node */ 2778 if (parent && i == aggl) 2779 parent->agg_id = agg_id; 2780 } else { 2781 parent = parent->children[0]; 2782 } 2783 } 2784 2785 return ICE_SUCCESS; 2786 } 2787 2788 /** 2789 * ice_sched_cfg_agg - configure aggregator node 2790 * @pi: port information structure 2791 * @agg_id: aggregator ID 2792 * @agg_type: aggregator type queue, VSI, or aggregator group 2793 * @tc_bitmap: bits TC bitmap 2794 * 2795 * It registers a unique aggregator node into scheduler services. It 2796 * allows a user to register with a unique ID to track it's resources. 2797 * The aggregator type determines if this is a queue group, VSI group 2798 * or aggregator group. It then creates the aggregator node(s) for requested 2799 * TC(s) or removes an existing aggregator node including its configuration 2800 * if indicated via tc_bitmap. Call ice_rm_agg_cfg to release aggregator 2801 * resources and remove aggregator ID. 2802 * This function needs to be called with scheduler lock held. 2803 */ 2804 static enum ice_status 2805 ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, 2806 enum ice_agg_type agg_type, ice_bitmap_t *tc_bitmap) 2807 { 2808 struct ice_sched_agg_info *agg_info; 2809 enum ice_status status = ICE_SUCCESS; 2810 struct ice_hw *hw = pi->hw; 2811 u8 tc; 2812 2813 agg_info = ice_get_agg_info(hw, agg_id); 2814 if (!agg_info) { 2815 /* Create new entry for new aggregator ID */ 2816 agg_info = (struct ice_sched_agg_info *) 2817 ice_malloc(hw, sizeof(*agg_info)); 2818 if (!agg_info) 2819 return ICE_ERR_NO_MEMORY; 2820 2821 agg_info->agg_id = agg_id; 2822 agg_info->agg_type = agg_type; 2823 agg_info->tc_bitmap[0] = 0; 2824 2825 /* Initialize the aggregator VSI list head */ 2826 INIT_LIST_HEAD(&agg_info->agg_vsi_list); 2827 2828 /* Add new entry in aggregator list */ 2829 LIST_ADD(&agg_info->list_entry, &hw->agg_list); 2830 } 2831 /* Create aggregator node(s) for requested TC(s) */ 2832 ice_for_each_traffic_class(tc) { 2833 if (!ice_is_tc_ena(*tc_bitmap, tc)) { 2834 /* Delete aggregator cfg TC if it exists previously */ 2835 status = ice_rm_agg_cfg_tc(pi, agg_info, tc, false); 2836 if (status) 2837 break; 2838 continue; 2839 } 2840 2841 /* Check if aggregator node for TC already exists */ 2842 if (ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) 2843 continue; 2844 2845 /* Create new aggregator node for TC */ 2846 status = ice_sched_add_agg_cfg(pi, agg_id, tc); 2847 if (status) 2848 break; 2849 2850 /* Save aggregator node's TC information */ 2851 ice_set_bit(tc, agg_info->tc_bitmap); 2852 } 2853 2854 return status; 2855 } 2856 2857 /** 2858 * ice_cfg_agg - config aggregator node 2859 * @pi: port information structure 2860 * @agg_id: aggregator ID 2861 * @agg_type: aggregator type queue, VSI, or aggregator group 2862 * @tc_bitmap: bits TC bitmap 2863 * 2864 * This function configures aggregator node(s). 2865 */ 2866 enum ice_status 2867 ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, 2868 u8 tc_bitmap) 2869 { 2870 ice_bitmap_t bitmap = tc_bitmap; 2871 enum ice_status status; 2872 2873 ice_acquire_lock(&pi->sched_lock); 2874 status = ice_sched_cfg_agg(pi, agg_id, agg_type, 2875 (ice_bitmap_t *)&bitmap); 2876 if (!status) 2877 status = ice_save_agg_tc_bitmap(pi, agg_id, 2878 (ice_bitmap_t *)&bitmap); 2879 ice_release_lock(&pi->sched_lock); 2880 return status; 2881 } 2882 2883 /** 2884 * ice_get_agg_vsi_info - get the aggregator ID 2885 * @agg_info: aggregator info 2886 * @vsi_handle: software VSI handle 2887 * 2888 * The function returns aggregator VSI info based on VSI handle. This function 2889 * needs to be called with scheduler lock held. 2890 */ 2891 static struct ice_sched_agg_vsi_info * 2892 ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle) 2893 { 2894 struct ice_sched_agg_vsi_info *agg_vsi_info; 2895 2896 LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list, 2897 ice_sched_agg_vsi_info, list_entry) 2898 if (agg_vsi_info->vsi_handle == vsi_handle) 2899 return agg_vsi_info; 2900 2901 return NULL; 2902 } 2903 2904 /** 2905 * ice_get_vsi_agg_info - get the aggregator info of VSI 2906 * @hw: pointer to the hardware structure 2907 * @vsi_handle: Sw VSI handle 2908 * 2909 * The function returns aggregator info of VSI represented via vsi_handle. The 2910 * VSI has in this case a different aggregator than the default one. This 2911 * function needs to be called with scheduler lock held. 2912 */ 2913 static struct ice_sched_agg_info * 2914 ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle) 2915 { 2916 struct ice_sched_agg_info *agg_info; 2917 2918 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 2919 list_entry) { 2920 struct ice_sched_agg_vsi_info *agg_vsi_info; 2921 2922 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); 2923 if (agg_vsi_info) 2924 return agg_info; 2925 } 2926 return NULL; 2927 } 2928 2929 /** 2930 * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap 2931 * @pi: port information structure 2932 * @agg_id: aggregator ID 2933 * @vsi_handle: software VSI handle 2934 * @tc_bitmap: TC bitmap of enabled TC(s) 2935 * 2936 * Save VSI to aggregator TC bitmap. This function needs to call with scheduler 2937 * lock held. 2938 */ 2939 static enum ice_status 2940 ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, 2941 ice_bitmap_t *tc_bitmap) 2942 { 2943 struct ice_sched_agg_vsi_info *agg_vsi_info; 2944 struct ice_sched_agg_info *agg_info; 2945 2946 agg_info = ice_get_agg_info(pi->hw, agg_id); 2947 if (!agg_info) 2948 return ICE_ERR_PARAM; 2949 /* check if entry already exist */ 2950 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); 2951 if (!agg_vsi_info) 2952 return ICE_ERR_PARAM; 2953 ice_cp_bitmap(agg_vsi_info->replay_tc_bitmap, tc_bitmap, 2954 ICE_MAX_TRAFFIC_CLASS); 2955 return ICE_SUCCESS; 2956 } 2957 2958 /** 2959 * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator 2960 * @pi: port information structure 2961 * @agg_id: aggregator ID 2962 * @vsi_handle: software VSI handle 2963 * @tc_bitmap: TC bitmap of enabled TC(s) 2964 * 2965 * This function moves VSI to a new or default aggregator node. If VSI is 2966 * already associated to the aggregator node then no operation is performed on 2967 * the tree. This function needs to be called with scheduler lock held. 2968 */ 2969 static enum ice_status 2970 ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, 2971 u16 vsi_handle, ice_bitmap_t *tc_bitmap) 2972 { 2973 struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL; 2974 struct ice_sched_agg_info *agg_info, *old_agg_info; 2975 enum ice_status status = ICE_SUCCESS; 2976 struct ice_hw *hw = pi->hw; 2977 u8 tc; 2978 2979 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 2980 return ICE_ERR_PARAM; 2981 agg_info = ice_get_agg_info(hw, agg_id); 2982 if (!agg_info) 2983 return ICE_ERR_PARAM; 2984 /* If the vsi is already part of another aggregator then update 2985 * its vsi info list 2986 */ 2987 old_agg_info = ice_get_vsi_agg_info(hw, vsi_handle); 2988 if (old_agg_info && old_agg_info != agg_info) { 2989 struct ice_sched_agg_vsi_info *vtmp; 2990 2991 LIST_FOR_EACH_ENTRY_SAFE(old_agg_vsi_info, vtmp, 2992 &old_agg_info->agg_vsi_list, 2993 ice_sched_agg_vsi_info, list_entry) 2994 if (old_agg_vsi_info->vsi_handle == vsi_handle) 2995 break; 2996 } 2997 2998 /* check if entry already exist */ 2999 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); 3000 if (!agg_vsi_info) { 3001 /* Create new entry for VSI under aggregator list */ 3002 agg_vsi_info = (struct ice_sched_agg_vsi_info *) 3003 ice_malloc(hw, sizeof(*agg_vsi_info)); 3004 if (!agg_vsi_info) 3005 return ICE_ERR_PARAM; 3006 3007 /* add VSI ID into the aggregator list */ 3008 agg_vsi_info->vsi_handle = vsi_handle; 3009 LIST_ADD(&agg_vsi_info->list_entry, &agg_info->agg_vsi_list); 3010 } 3011 /* Move VSI node to new aggregator node for requested TC(s) */ 3012 ice_for_each_traffic_class(tc) { 3013 if (!ice_is_tc_ena(*tc_bitmap, tc)) 3014 continue; 3015 3016 /* Move VSI to new aggregator */ 3017 status = ice_sched_move_vsi_to_agg(pi, vsi_handle, agg_id, tc); 3018 if (status) 3019 break; 3020 3021 ice_set_bit(tc, agg_vsi_info->tc_bitmap); 3022 if (old_agg_vsi_info) 3023 ice_clear_bit(tc, old_agg_vsi_info->tc_bitmap); 3024 } 3025 if (old_agg_vsi_info && !old_agg_vsi_info->tc_bitmap[0]) { 3026 LIST_DEL(&old_agg_vsi_info->list_entry); 3027 ice_free(pi->hw, old_agg_vsi_info); 3028 } 3029 return status; 3030 } 3031 3032 /** 3033 * ice_sched_rm_unused_rl_prof - remove unused RL profile 3034 * @hw: pointer to the hardware structure 3035 * 3036 * This function removes unused rate limit profiles from the HW and 3037 * SW DB. The caller needs to hold scheduler lock. 3038 */ 3039 static void ice_sched_rm_unused_rl_prof(struct ice_hw *hw) 3040 { 3041 u16 ln; 3042 3043 for (ln = 0; ln < hw->num_tx_sched_layers; ln++) { 3044 struct ice_aqc_rl_profile_info *rl_prof_elem; 3045 struct ice_aqc_rl_profile_info *rl_prof_tmp; 3046 3047 LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp, 3048 &hw->rl_prof_list[ln], 3049 ice_aqc_rl_profile_info, list_entry) { 3050 if (!ice_sched_del_rl_profile(hw, rl_prof_elem)) 3051 ice_debug(hw, ICE_DBG_SCHED, "Removed rl profile\n"); 3052 } 3053 } 3054 } 3055 3056 /** 3057 * ice_sched_update_elem - update element 3058 * @hw: pointer to the HW struct 3059 * @node: pointer to node 3060 * @info: node info to update 3061 * 3062 * Update the HW DB, and local SW DB of node. Update the scheduling 3063 * parameters of node from argument info data buffer (Info->data buf) and 3064 * returns success or error on config sched element failure. The caller 3065 * needs to hold scheduler lock. 3066 */ 3067 static enum ice_status 3068 ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, 3069 struct ice_aqc_txsched_elem_data *info) 3070 { 3071 struct ice_aqc_txsched_elem_data buf; 3072 enum ice_status status; 3073 u16 elem_cfgd = 0; 3074 u16 num_elems = 1; 3075 3076 buf = *info; 3077 /* For TC nodes, CIR config is not supported */ 3078 if (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_TC) 3079 buf.data.valid_sections &= ~ICE_AQC_ELEM_VALID_CIR; 3080 /* Parent TEID is reserved field in this aq call */ 3081 buf.parent_teid = 0; 3082 /* Element type is reserved field in this aq call */ 3083 buf.data.elem_type = 0; 3084 /* Flags is reserved field in this aq call */ 3085 buf.data.flags = 0; 3086 3087 /* Update HW DB */ 3088 /* Configure element node */ 3089 status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf), 3090 &elem_cfgd, NULL); 3091 if (status || elem_cfgd != num_elems) { 3092 ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n"); 3093 return ICE_ERR_CFG; 3094 } 3095 3096 /* Config success case */ 3097 /* Now update local SW DB */ 3098 /* Only copy the data portion of info buffer */ 3099 node->info.data = info->data; 3100 return status; 3101 } 3102 3103 /** 3104 * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params 3105 * @hw: pointer to the HW struct 3106 * @node: sched node to configure 3107 * @rl_type: rate limit type CIR, EIR, or shared 3108 * @bw_alloc: BW weight/allocation 3109 * 3110 * This function configures node element's BW allocation. 3111 */ 3112 static enum ice_status 3113 ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, 3114 enum ice_rl_type rl_type, u16 bw_alloc) 3115 { 3116 struct ice_aqc_txsched_elem_data buf; 3117 struct ice_aqc_txsched_elem *data; 3118 enum ice_status status; 3119 3120 buf = node->info; 3121 data = &buf.data; 3122 if (rl_type == ICE_MIN_BW) { 3123 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; 3124 data->cir_bw.bw_alloc = CPU_TO_LE16(bw_alloc); 3125 } else if (rl_type == ICE_MAX_BW) { 3126 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; 3127 data->eir_bw.bw_alloc = CPU_TO_LE16(bw_alloc); 3128 } else { 3129 return ICE_ERR_PARAM; 3130 } 3131 3132 /* Configure element */ 3133 status = ice_sched_update_elem(hw, node, &buf); 3134 return status; 3135 } 3136 3137 /** 3138 * ice_move_vsi_to_agg - moves VSI to new or default aggregator 3139 * @pi: port information structure 3140 * @agg_id: aggregator ID 3141 * @vsi_handle: software VSI handle 3142 * @tc_bitmap: TC bitmap of enabled TC(s) 3143 * 3144 * Move or associate VSI to a new or default aggregator node. 3145 */ 3146 enum ice_status 3147 ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, 3148 u8 tc_bitmap) 3149 { 3150 ice_bitmap_t bitmap = tc_bitmap; 3151 enum ice_status status; 3152 3153 ice_acquire_lock(&pi->sched_lock); 3154 status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle, 3155 (ice_bitmap_t *)&bitmap); 3156 if (!status) 3157 status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle, 3158 (ice_bitmap_t *)&bitmap); 3159 ice_release_lock(&pi->sched_lock); 3160 return status; 3161 } 3162 3163 /** 3164 * ice_rm_agg_cfg - remove aggregator configuration 3165 * @pi: port information structure 3166 * @agg_id: aggregator ID 3167 * 3168 * This function removes aggregator reference to VSI and delete aggregator ID 3169 * info. It removes the aggregator configuration completely. 3170 */ 3171 enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id) 3172 { 3173 struct ice_sched_agg_info *agg_info; 3174 enum ice_status status = ICE_SUCCESS; 3175 u8 tc; 3176 3177 ice_acquire_lock(&pi->sched_lock); 3178 agg_info = ice_get_agg_info(pi->hw, agg_id); 3179 if (!agg_info) { 3180 status = ICE_ERR_DOES_NOT_EXIST; 3181 goto exit_ice_rm_agg_cfg; 3182 } 3183 3184 ice_for_each_traffic_class(tc) { 3185 status = ice_rm_agg_cfg_tc(pi, agg_info, tc, true); 3186 if (status) 3187 goto exit_ice_rm_agg_cfg; 3188 } 3189 3190 if (ice_is_any_bit_set(agg_info->tc_bitmap, ICE_MAX_TRAFFIC_CLASS)) { 3191 status = ICE_ERR_IN_USE; 3192 goto exit_ice_rm_agg_cfg; 3193 } 3194 3195 /* Safe to delete entry now */ 3196 LIST_DEL(&agg_info->list_entry); 3197 ice_free(pi->hw, agg_info); 3198 3199 /* Remove unused RL profile IDs from HW and SW DB */ 3200 ice_sched_rm_unused_rl_prof(pi->hw); 3201 3202 exit_ice_rm_agg_cfg: 3203 ice_release_lock(&pi->sched_lock); 3204 return status; 3205 } 3206 3207 /** 3208 * ice_set_clear_cir_bw_alloc - set or clear CIR BW alloc information 3209 * @bw_t_info: bandwidth type information structure 3210 * @bw_alloc: Bandwidth allocation information 3211 * 3212 * Save or clear CIR BW alloc information (bw_alloc) in the passed param 3213 * bw_t_info. 3214 */ 3215 static void 3216 ice_set_clear_cir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc) 3217 { 3218 bw_t_info->cir_bw.bw_alloc = bw_alloc; 3219 if (bw_t_info->cir_bw.bw_alloc) 3220 ice_set_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap); 3221 else 3222 ice_clear_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap); 3223 } 3224 3225 /** 3226 * ice_set_clear_eir_bw_alloc - set or clear EIR BW alloc information 3227 * @bw_t_info: bandwidth type information structure 3228 * @bw_alloc: Bandwidth allocation information 3229 * 3230 * Save or clear EIR BW alloc information (bw_alloc) in the passed param 3231 * bw_t_info. 3232 */ 3233 static void 3234 ice_set_clear_eir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc) 3235 { 3236 bw_t_info->eir_bw.bw_alloc = bw_alloc; 3237 if (bw_t_info->eir_bw.bw_alloc) 3238 ice_set_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap); 3239 else 3240 ice_clear_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap); 3241 } 3242 3243 /** 3244 * ice_sched_save_vsi_bw_alloc - save VSI node's BW alloc information 3245 * @pi: port information structure 3246 * @vsi_handle: sw VSI handle 3247 * @tc: traffic class 3248 * @rl_type: rate limit type min or max 3249 * @bw_alloc: Bandwidth allocation information 3250 * 3251 * Save BW alloc information of VSI type node for post replay use. 3252 */ 3253 static enum ice_status 3254 ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3255 enum ice_rl_type rl_type, u16 bw_alloc) 3256 { 3257 struct ice_vsi_ctx *vsi_ctx; 3258 3259 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 3260 return ICE_ERR_PARAM; 3261 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 3262 if (!vsi_ctx) 3263 return ICE_ERR_PARAM; 3264 switch (rl_type) { 3265 case ICE_MIN_BW: 3266 ice_set_clear_cir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc], 3267 bw_alloc); 3268 break; 3269 case ICE_MAX_BW: 3270 ice_set_clear_eir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc], 3271 bw_alloc); 3272 break; 3273 default: 3274 return ICE_ERR_PARAM; 3275 } 3276 return ICE_SUCCESS; 3277 } 3278 3279 /** 3280 * ice_set_clear_cir_bw - set or clear CIR BW 3281 * @bw_t_info: bandwidth type information structure 3282 * @bw: bandwidth in Kbps - Kilo bits per sec 3283 * 3284 * Save or clear CIR bandwidth (BW) in the passed param bw_t_info. 3285 */ 3286 static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) 3287 { 3288 if (bw == ICE_SCHED_DFLT_BW) { 3289 ice_clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); 3290 bw_t_info->cir_bw.bw = 0; 3291 } else { 3292 /* Save type of BW information */ 3293 ice_set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); 3294 bw_t_info->cir_bw.bw = bw; 3295 } 3296 } 3297 3298 /** 3299 * ice_set_clear_eir_bw - set or clear EIR BW 3300 * @bw_t_info: bandwidth type information structure 3301 * @bw: bandwidth in Kbps - Kilo bits per sec 3302 * 3303 * Save or clear EIR bandwidth (BW) in the passed param bw_t_info. 3304 */ 3305 static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) 3306 { 3307 if (bw == ICE_SCHED_DFLT_BW) { 3308 ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); 3309 bw_t_info->eir_bw.bw = 0; 3310 } else { 3311 /* save EIR BW information */ 3312 ice_set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); 3313 bw_t_info->eir_bw.bw = bw; 3314 } 3315 } 3316 3317 /** 3318 * ice_set_clear_shared_bw - set or clear shared BW 3319 * @bw_t_info: bandwidth type information structure 3320 * @bw: bandwidth in Kbps - Kilo bits per sec 3321 * 3322 * Save or clear shared bandwidth (BW) in the passed param bw_t_info. 3323 */ 3324 static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw) 3325 { 3326 if (bw == ICE_SCHED_DFLT_BW) { 3327 ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); 3328 bw_t_info->shared_bw = 0; 3329 } else { 3330 /* save shared BW information */ 3331 ice_set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); 3332 bw_t_info->shared_bw = bw; 3333 } 3334 } 3335 3336 /** 3337 * ice_sched_save_vsi_bw - save VSI node's BW information 3338 * @pi: port information structure 3339 * @vsi_handle: sw VSI handle 3340 * @tc: traffic class 3341 * @rl_type: rate limit type min, max, or shared 3342 * @bw: bandwidth in Kbps - Kilo bits per sec 3343 * 3344 * Save BW information of VSI type node for post replay use. 3345 */ 3346 static enum ice_status 3347 ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3348 enum ice_rl_type rl_type, u32 bw) 3349 { 3350 struct ice_vsi_ctx *vsi_ctx; 3351 3352 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 3353 return ICE_ERR_PARAM; 3354 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 3355 if (!vsi_ctx) 3356 return ICE_ERR_PARAM; 3357 switch (rl_type) { 3358 case ICE_MIN_BW: 3359 ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw); 3360 break; 3361 case ICE_MAX_BW: 3362 ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw); 3363 break; 3364 case ICE_SHARED_BW: 3365 ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw); 3366 break; 3367 default: 3368 return ICE_ERR_PARAM; 3369 } 3370 return ICE_SUCCESS; 3371 } 3372 3373 /** 3374 * ice_set_clear_prio - set or clear priority information 3375 * @bw_t_info: bandwidth type information structure 3376 * @prio: priority to save 3377 * 3378 * Save or clear priority (prio) in the passed param bw_t_info. 3379 */ 3380 static void ice_set_clear_prio(struct ice_bw_type_info *bw_t_info, u8 prio) 3381 { 3382 bw_t_info->generic = prio; 3383 if (bw_t_info->generic) 3384 ice_set_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap); 3385 else 3386 ice_clear_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap); 3387 } 3388 3389 /** 3390 * ice_sched_save_vsi_prio - save VSI node's priority information 3391 * @pi: port information structure 3392 * @vsi_handle: Software VSI handle 3393 * @tc: traffic class 3394 * @prio: priority to save 3395 * 3396 * Save priority information of VSI type node for post replay use. 3397 */ 3398 static enum ice_status 3399 ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3400 u8 prio) 3401 { 3402 struct ice_vsi_ctx *vsi_ctx; 3403 3404 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 3405 return ICE_ERR_PARAM; 3406 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 3407 if (!vsi_ctx) 3408 return ICE_ERR_PARAM; 3409 if (tc >= ICE_MAX_TRAFFIC_CLASS) 3410 return ICE_ERR_PARAM; 3411 ice_set_clear_prio(&vsi_ctx->sched.bw_t_info[tc], prio); 3412 return ICE_SUCCESS; 3413 } 3414 3415 /** 3416 * ice_sched_save_agg_bw_alloc - save aggregator node's BW alloc information 3417 * @pi: port information structure 3418 * @agg_id: node aggregator ID 3419 * @tc: traffic class 3420 * @rl_type: rate limit type min or max 3421 * @bw_alloc: bandwidth alloc information 3422 * 3423 * Save BW alloc information of AGG type node for post replay use. 3424 */ 3425 static enum ice_status 3426 ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc, 3427 enum ice_rl_type rl_type, u16 bw_alloc) 3428 { 3429 struct ice_sched_agg_info *agg_info; 3430 3431 agg_info = ice_get_agg_info(pi->hw, agg_id); 3432 if (!agg_info) 3433 return ICE_ERR_PARAM; 3434 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) 3435 return ICE_ERR_PARAM; 3436 switch (rl_type) { 3437 case ICE_MIN_BW: 3438 ice_set_clear_cir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc); 3439 break; 3440 case ICE_MAX_BW: 3441 ice_set_clear_eir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc); 3442 break; 3443 default: 3444 return ICE_ERR_PARAM; 3445 } 3446 return ICE_SUCCESS; 3447 } 3448 3449 /** 3450 * ice_sched_save_agg_bw - save aggregator node's BW information 3451 * @pi: port information structure 3452 * @agg_id: node aggregator ID 3453 * @tc: traffic class 3454 * @rl_type: rate limit type min, max, or shared 3455 * @bw: bandwidth in Kbps - Kilo bits per sec 3456 * 3457 * Save BW information of AGG type node for post replay use. 3458 */ 3459 static enum ice_status 3460 ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, 3461 enum ice_rl_type rl_type, u32 bw) 3462 { 3463 struct ice_sched_agg_info *agg_info; 3464 3465 agg_info = ice_get_agg_info(pi->hw, agg_id); 3466 if (!agg_info) 3467 return ICE_ERR_PARAM; 3468 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) 3469 return ICE_ERR_PARAM; 3470 switch (rl_type) { 3471 case ICE_MIN_BW: 3472 ice_set_clear_cir_bw(&agg_info->bw_t_info[tc], bw); 3473 break; 3474 case ICE_MAX_BW: 3475 ice_set_clear_eir_bw(&agg_info->bw_t_info[tc], bw); 3476 break; 3477 case ICE_SHARED_BW: 3478 ice_set_clear_shared_bw(&agg_info->bw_t_info[tc], bw); 3479 break; 3480 default: 3481 return ICE_ERR_PARAM; 3482 } 3483 return ICE_SUCCESS; 3484 } 3485 3486 /** 3487 * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC 3488 * @pi: port information structure 3489 * @vsi_handle: software VSI handle 3490 * @tc: traffic class 3491 * @rl_type: min or max 3492 * @bw: bandwidth in Kbps 3493 * 3494 * This function configures BW limit of VSI scheduling node based on TC 3495 * information. 3496 */ 3497 enum ice_status 3498 ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3499 enum ice_rl_type rl_type, u32 bw) 3500 { 3501 enum ice_status status; 3502 3503 status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle, 3504 ICE_AGG_TYPE_VSI, 3505 tc, rl_type, bw); 3506 if (!status) { 3507 ice_acquire_lock(&pi->sched_lock); 3508 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw); 3509 ice_release_lock(&pi->sched_lock); 3510 } 3511 return status; 3512 } 3513 3514 /** 3515 * ice_cfg_vsi_bw_dflt_lmt_per_tc - configure default VSI BW limit per TC 3516 * @pi: port information structure 3517 * @vsi_handle: software VSI handle 3518 * @tc: traffic class 3519 * @rl_type: min or max 3520 * 3521 * This function configures default BW limit of VSI scheduling node based on TC 3522 * information. 3523 */ 3524 enum ice_status 3525 ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3526 enum ice_rl_type rl_type) 3527 { 3528 enum ice_status status; 3529 3530 status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle, 3531 ICE_AGG_TYPE_VSI, 3532 tc, rl_type, 3533 ICE_SCHED_DFLT_BW); 3534 if (!status) { 3535 ice_acquire_lock(&pi->sched_lock); 3536 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, 3537 ICE_SCHED_DFLT_BW); 3538 ice_release_lock(&pi->sched_lock); 3539 } 3540 return status; 3541 } 3542 3543 /** 3544 * ice_cfg_agg_bw_lmt_per_tc - configure aggregator BW limit per TC 3545 * @pi: port information structure 3546 * @agg_id: aggregator ID 3547 * @tc: traffic class 3548 * @rl_type: min or max 3549 * @bw: bandwidth in Kbps 3550 * 3551 * This function applies BW limit to aggregator scheduling node based on TC 3552 * information. 3553 */ 3554 enum ice_status 3555 ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, 3556 enum ice_rl_type rl_type, u32 bw) 3557 { 3558 enum ice_status status; 3559 3560 status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG, 3561 tc, rl_type, bw); 3562 if (!status) { 3563 ice_acquire_lock(&pi->sched_lock); 3564 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw); 3565 ice_release_lock(&pi->sched_lock); 3566 } 3567 return status; 3568 } 3569 3570 /** 3571 * ice_cfg_agg_bw_dflt_lmt_per_tc - configure aggregator BW default limit per TC 3572 * @pi: port information structure 3573 * @agg_id: aggregator ID 3574 * @tc: traffic class 3575 * @rl_type: min or max 3576 * 3577 * This function applies default BW limit to aggregator scheduling node based 3578 * on TC information. 3579 */ 3580 enum ice_status 3581 ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, 3582 enum ice_rl_type rl_type) 3583 { 3584 enum ice_status status; 3585 3586 status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG, 3587 tc, rl_type, 3588 ICE_SCHED_DFLT_BW); 3589 if (!status) { 3590 ice_acquire_lock(&pi->sched_lock); 3591 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, 3592 ICE_SCHED_DFLT_BW); 3593 ice_release_lock(&pi->sched_lock); 3594 } 3595 return status; 3596 } 3597 3598 /** 3599 * ice_cfg_vsi_bw_shared_lmt - configure VSI BW shared limit 3600 * @pi: port information structure 3601 * @vsi_handle: software VSI handle 3602 * @min_bw: minimum bandwidth in Kbps 3603 * @max_bw: maximum bandwidth in Kbps 3604 * @shared_bw: shared bandwidth in Kbps 3605 * 3606 * Configure shared rate limiter(SRL) of all VSI type nodes across all traffic 3607 * classes for VSI matching handle. 3608 */ 3609 enum ice_status 3610 ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw, 3611 u32 max_bw, u32 shared_bw) 3612 { 3613 return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, min_bw, max_bw, 3614 shared_bw); 3615 } 3616 3617 /** 3618 * ice_cfg_vsi_bw_no_shared_lmt - configure VSI BW for no shared limiter 3619 * @pi: port information structure 3620 * @vsi_handle: software VSI handle 3621 * 3622 * This function removes the shared rate limiter(SRL) of all VSI type nodes 3623 * across all traffic classes for VSI matching handle. 3624 */ 3625 enum ice_status 3626 ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle) 3627 { 3628 return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, 3629 ICE_SCHED_DFLT_BW, 3630 ICE_SCHED_DFLT_BW, 3631 ICE_SCHED_DFLT_BW); 3632 } 3633 3634 /** 3635 * ice_cfg_agg_bw_shared_lmt - configure aggregator BW shared limit 3636 * @pi: port information structure 3637 * @agg_id: aggregator ID 3638 * @min_bw: minimum bandwidth in Kbps 3639 * @max_bw: maximum bandwidth in Kbps 3640 * @shared_bw: shared bandwidth in Kbps 3641 * 3642 * This function configures the shared rate limiter(SRL) of all aggregator type 3643 * nodes across all traffic classes for aggregator matching agg_id. 3644 */ 3645 enum ice_status 3646 ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw, 3647 u32 max_bw, u32 shared_bw) 3648 { 3649 return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, min_bw, max_bw, 3650 shared_bw); 3651 } 3652 3653 /** 3654 * ice_cfg_agg_bw_no_shared_lmt - configure aggregator BW for no shared limiter 3655 * @pi: port information structure 3656 * @agg_id: aggregator ID 3657 * 3658 * This function removes the shared rate limiter(SRL) of all aggregator type 3659 * nodes across all traffic classes for aggregator matching agg_id. 3660 */ 3661 enum ice_status 3662 ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id) 3663 { 3664 return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, ICE_SCHED_DFLT_BW, 3665 ICE_SCHED_DFLT_BW, 3666 ICE_SCHED_DFLT_BW); 3667 } 3668 3669 /** 3670 * ice_cfg_agg_bw_shared_lmt_per_tc - config aggregator BW shared limit per tc 3671 * @pi: port information structure 3672 * @agg_id: aggregator ID 3673 * @tc: traffic class 3674 * @min_bw: minimum bandwidth in Kbps 3675 * @max_bw: maximum bandwidth in Kbps 3676 * @shared_bw: shared bandwidth in Kbps 3677 * 3678 * This function configures the shared rate limiter(SRL) of all aggregator type 3679 * nodes across all traffic classes for aggregator matching agg_id. 3680 */ 3681 enum ice_status 3682 ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, 3683 u32 min_bw, u32 max_bw, u32 shared_bw) 3684 { 3685 return ice_sched_set_agg_bw_shared_lmt_per_tc(pi, agg_id, tc, min_bw, 3686 max_bw, shared_bw); 3687 } 3688 3689 /** 3690 * ice_cfg_agg_bw_no_shared_lmt_per_tc - cfg aggregator BW shared limit per tc 3691 * @pi: port information structure 3692 * @agg_id: aggregator ID 3693 * @tc: traffic class 3694 * 3695 * This function configures the shared rate limiter(SRL) of all aggregator type 3696 * nodes across all traffic classes for aggregator matching agg_id. 3697 */ 3698 enum ice_status 3699 ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc) 3700 { 3701 return ice_sched_set_agg_bw_shared_lmt_per_tc(pi, agg_id, tc, 3702 ICE_SCHED_DFLT_BW, 3703 ICE_SCHED_DFLT_BW, 3704 ICE_SCHED_DFLT_BW); 3705 } 3706 3707 /** 3708 * ice_cfg_vsi_q_priority - config VSI queue priority of node 3709 * @pi: port information structure 3710 * @num_qs: number of VSI queues 3711 * @q_ids: queue IDs array 3712 * @q_prio: queue priority array 3713 * 3714 * This function configures the queue node priority (Sibling Priority) of the 3715 * passed in VSI's queue(s) for a given traffic class (TC). 3716 */ 3717 enum ice_status 3718 ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids, 3719 u8 *q_prio) 3720 { 3721 enum ice_status status = ICE_ERR_PARAM; 3722 u16 i; 3723 3724 ice_acquire_lock(&pi->sched_lock); 3725 3726 for (i = 0; i < num_qs; i++) { 3727 struct ice_sched_node *node; 3728 3729 node = ice_sched_find_node_by_teid(pi->root, q_ids[i]); 3730 if (!node || node->info.data.elem_type != 3731 ICE_AQC_ELEM_TYPE_LEAF) { 3732 status = ICE_ERR_PARAM; 3733 break; 3734 } 3735 /* Configure Priority */ 3736 status = ice_sched_cfg_sibl_node_prio(pi, node, q_prio[i]); 3737 if (status) 3738 break; 3739 } 3740 3741 ice_release_lock(&pi->sched_lock); 3742 return status; 3743 } 3744 3745 /** 3746 * ice_cfg_agg_vsi_priority_per_tc - config aggregator's VSI priority per TC 3747 * @pi: port information structure 3748 * @agg_id: Aggregator ID 3749 * @num_vsis: number of VSI(s) 3750 * @vsi_handle_arr: array of software VSI handles 3751 * @node_prio: pointer to node priority 3752 * @tc: traffic class 3753 * 3754 * This function configures the node priority (Sibling Priority) of the 3755 * passed in VSI's for a given traffic class (TC) of an Aggregator ID. 3756 */ 3757 enum ice_status 3758 ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id, 3759 u16 num_vsis, u16 *vsi_handle_arr, 3760 u8 *node_prio, u8 tc) 3761 { 3762 struct ice_sched_agg_vsi_info *agg_vsi_info; 3763 struct ice_sched_node *tc_node, *agg_node; 3764 enum ice_status status = ICE_ERR_PARAM; 3765 struct ice_sched_agg_info *agg_info; 3766 bool agg_id_present = false; 3767 struct ice_hw *hw = pi->hw; 3768 u16 i; 3769 3770 ice_acquire_lock(&pi->sched_lock); 3771 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 3772 list_entry) 3773 if (agg_info->agg_id == agg_id) { 3774 agg_id_present = true; 3775 break; 3776 } 3777 if (!agg_id_present) 3778 goto exit_agg_priority_per_tc; 3779 3780 tc_node = ice_sched_get_tc_node(pi, tc); 3781 if (!tc_node) 3782 goto exit_agg_priority_per_tc; 3783 3784 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 3785 if (!agg_node) 3786 goto exit_agg_priority_per_tc; 3787 3788 if (num_vsis > hw->max_children[agg_node->tx_sched_layer]) 3789 goto exit_agg_priority_per_tc; 3790 3791 for (i = 0; i < num_vsis; i++) { 3792 struct ice_sched_node *vsi_node; 3793 bool vsi_handle_valid = false; 3794 u16 vsi_handle; 3795 3796 status = ICE_ERR_PARAM; 3797 vsi_handle = vsi_handle_arr[i]; 3798 if (!ice_is_vsi_valid(hw, vsi_handle)) 3799 goto exit_agg_priority_per_tc; 3800 /* Verify child nodes before applying settings */ 3801 LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list, 3802 ice_sched_agg_vsi_info, list_entry) 3803 if (agg_vsi_info->vsi_handle == vsi_handle) { 3804 vsi_handle_valid = true; 3805 break; 3806 } 3807 3808 if (!vsi_handle_valid) 3809 goto exit_agg_priority_per_tc; 3810 3811 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 3812 if (!vsi_node) 3813 goto exit_agg_priority_per_tc; 3814 3815 if (ice_sched_find_node_in_subtree(hw, agg_node, vsi_node)) { 3816 /* Configure Priority */ 3817 status = ice_sched_cfg_sibl_node_prio(pi, vsi_node, 3818 node_prio[i]); 3819 if (status) 3820 break; 3821 status = ice_sched_save_vsi_prio(pi, vsi_handle, tc, 3822 node_prio[i]); 3823 if (status) 3824 break; 3825 } 3826 } 3827 3828 exit_agg_priority_per_tc: 3829 ice_release_lock(&pi->sched_lock); 3830 return status; 3831 } 3832 3833 /** 3834 * ice_cfg_vsi_bw_alloc - config VSI BW alloc per TC 3835 * @pi: port information structure 3836 * @vsi_handle: software VSI handle 3837 * @ena_tcmap: enabled TC map 3838 * @rl_type: Rate limit type CIR/EIR 3839 * @bw_alloc: Array of BW alloc 3840 * 3841 * This function configures the BW allocation of the passed in VSI's 3842 * node(s) for enabled traffic class. 3843 */ 3844 enum ice_status 3845 ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap, 3846 enum ice_rl_type rl_type, u8 *bw_alloc) 3847 { 3848 enum ice_status status = ICE_SUCCESS; 3849 u8 tc; 3850 3851 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 3852 return ICE_ERR_PARAM; 3853 3854 ice_acquire_lock(&pi->sched_lock); 3855 3856 /* Return success if no nodes are present across TC */ 3857 ice_for_each_traffic_class(tc) { 3858 struct ice_sched_node *tc_node, *vsi_node; 3859 3860 if (!ice_is_tc_ena(ena_tcmap, tc)) 3861 continue; 3862 3863 tc_node = ice_sched_get_tc_node(pi, tc); 3864 if (!tc_node) 3865 continue; 3866 3867 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 3868 if (!vsi_node) 3869 continue; 3870 3871 status = ice_sched_cfg_node_bw_alloc(pi->hw, vsi_node, rl_type, 3872 bw_alloc[tc]); 3873 if (status) 3874 break; 3875 status = ice_sched_save_vsi_bw_alloc(pi, vsi_handle, tc, 3876 rl_type, bw_alloc[tc]); 3877 if (status) 3878 break; 3879 } 3880 3881 ice_release_lock(&pi->sched_lock); 3882 return status; 3883 } 3884 3885 /** 3886 * ice_cfg_agg_bw_alloc - config aggregator BW alloc 3887 * @pi: port information structure 3888 * @agg_id: aggregator ID 3889 * @ena_tcmap: enabled TC map 3890 * @rl_type: rate limit type CIR/EIR 3891 * @bw_alloc: array of BW alloc 3892 * 3893 * This function configures the BW allocation of passed in aggregator for 3894 * enabled traffic class(s). 3895 */ 3896 enum ice_status 3897 ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap, 3898 enum ice_rl_type rl_type, u8 *bw_alloc) 3899 { 3900 struct ice_sched_agg_info *agg_info; 3901 bool agg_id_present = false; 3902 enum ice_status status = ICE_SUCCESS; 3903 struct ice_hw *hw = pi->hw; 3904 u8 tc; 3905 3906 ice_acquire_lock(&pi->sched_lock); 3907 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 3908 list_entry) 3909 if (agg_info->agg_id == agg_id) { 3910 agg_id_present = true; 3911 break; 3912 } 3913 if (!agg_id_present) { 3914 status = ICE_ERR_PARAM; 3915 goto exit_cfg_agg_bw_alloc; 3916 } 3917 3918 /* Return success if no nodes are present across TC */ 3919 ice_for_each_traffic_class(tc) { 3920 struct ice_sched_node *tc_node, *agg_node; 3921 3922 if (!ice_is_tc_ena(ena_tcmap, tc)) 3923 continue; 3924 3925 tc_node = ice_sched_get_tc_node(pi, tc); 3926 if (!tc_node) 3927 continue; 3928 3929 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 3930 if (!agg_node) 3931 continue; 3932 3933 status = ice_sched_cfg_node_bw_alloc(hw, agg_node, rl_type, 3934 bw_alloc[tc]); 3935 if (status) 3936 break; 3937 status = ice_sched_save_agg_bw_alloc(pi, agg_id, tc, rl_type, 3938 bw_alloc[tc]); 3939 if (status) 3940 break; 3941 } 3942 3943 exit_cfg_agg_bw_alloc: 3944 ice_release_lock(&pi->sched_lock); 3945 return status; 3946 } 3947 3948 /** 3949 * ice_sched_calc_wakeup - calculate RL profile wakeup parameter 3950 * @hw: pointer to the HW struct 3951 * @bw: bandwidth in Kbps 3952 * 3953 * This function calculates the wakeup parameter of RL profile. 3954 */ 3955 static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) 3956 { 3957 s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f; 3958 s32 wakeup_f_int; 3959 u16 wakeup = 0; 3960 3961 /* Get the wakeup integer value */ 3962 bytes_per_sec = DIV_S64((s64)bw * 1000, BITS_PER_BYTE); 3963 wakeup_int = DIV_S64(hw->psm_clk_freq, bytes_per_sec); 3964 if (wakeup_int > 63) { 3965 wakeup = (u16)((1 << 15) | wakeup_int); 3966 } else { 3967 /* Calculate fraction value up to 4 decimals 3968 * Convert Integer value to a constant multiplier 3969 */ 3970 wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int; 3971 wakeup_a = DIV_S64((s64)ICE_RL_PROF_MULTIPLIER * 3972 hw->psm_clk_freq, bytes_per_sec); 3973 3974 /* Get Fraction value */ 3975 wakeup_f = wakeup_a - wakeup_b; 3976 3977 /* Round up the Fractional value via Ceil(Fractional value) */ 3978 if (wakeup_f > DIV_S64(ICE_RL_PROF_MULTIPLIER, 2)) 3979 wakeup_f += 1; 3980 3981 wakeup_f_int = (s32)DIV_S64(wakeup_f * ICE_RL_PROF_FRACTION, 3982 ICE_RL_PROF_MULTIPLIER); 3983 wakeup |= (u16)(wakeup_int << 9); 3984 wakeup |= (u16)(0x1ff & wakeup_f_int); 3985 } 3986 3987 return wakeup; 3988 } 3989 3990 /** 3991 * ice_sched_bw_to_rl_profile - convert BW to profile parameters 3992 * @hw: pointer to the HW struct 3993 * @bw: bandwidth in Kbps 3994 * @profile: profile parameters to return 3995 * 3996 * This function converts the BW to profile structure format. 3997 */ 3998 static enum ice_status 3999 ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, 4000 struct ice_aqc_rl_profile_elem *profile) 4001 { 4002 enum ice_status status = ICE_ERR_PARAM; 4003 s64 bytes_per_sec, ts_rate, mv_tmp; 4004 bool found = false; 4005 s32 encode = 0; 4006 s64 mv = 0; 4007 s32 i; 4008 4009 /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */ 4010 if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW) 4011 return status; 4012 4013 /* Bytes per second from Kbps */ 4014 bytes_per_sec = DIV_S64((s64)bw * 1000, BITS_PER_BYTE); 4015 4016 /* encode is 6 bits but really useful are 5 bits */ 4017 for (i = 0; i < 64; i++) { 4018 u64 pow_result = BIT_ULL(i); 4019 4020 ts_rate = DIV_S64((s64)hw->psm_clk_freq, 4021 pow_result * ICE_RL_PROF_TS_MULTIPLIER); 4022 if (ts_rate <= 0) 4023 continue; 4024 4025 /* Multiplier value */ 4026 mv_tmp = DIV_S64(bytes_per_sec * ICE_RL_PROF_MULTIPLIER, 4027 ts_rate); 4028 4029 /* Round to the nearest ICE_RL_PROF_MULTIPLIER */ 4030 mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER); 4031 4032 /* First multiplier value greater than the given 4033 * accuracy bytes 4034 */ 4035 if (mv > ICE_RL_PROF_ACCURACY_BYTES) { 4036 encode = i; 4037 found = true; 4038 break; 4039 } 4040 } 4041 if (found) { 4042 u16 wm; 4043 4044 wm = ice_sched_calc_wakeup(hw, bw); 4045 profile->rl_multiply = CPU_TO_LE16(mv); 4046 profile->wake_up_calc = CPU_TO_LE16(wm); 4047 profile->rl_encode = CPU_TO_LE16(encode); 4048 status = ICE_SUCCESS; 4049 } else { 4050 status = ICE_ERR_DOES_NOT_EXIST; 4051 } 4052 4053 return status; 4054 } 4055 4056 /** 4057 * ice_sched_add_rl_profile - add RL profile 4058 * @hw: pointer to the hardware structure 4059 * @rl_type: type of rate limit BW - min, max, or shared 4060 * @bw: bandwidth in Kbps - Kilo bits per sec 4061 * @layer_num: specifies in which layer to create profile 4062 * 4063 * This function first checks the existing list for corresponding BW 4064 * parameter. If it exists, it returns the associated profile otherwise 4065 * it creates a new rate limit profile for requested BW, and adds it to 4066 * the HW DB and local list. It returns the new profile or null on error. 4067 * The caller needs to hold the scheduler lock. 4068 */ 4069 static struct ice_aqc_rl_profile_info * 4070 ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type, 4071 u32 bw, u8 layer_num) 4072 { 4073 struct ice_aqc_rl_profile_info *rl_prof_elem; 4074 u16 profiles_added = 0, num_profiles = 1; 4075 struct ice_aqc_rl_profile_elem *buf; 4076 enum ice_status status; 4077 u8 profile_type; 4078 4079 if (!hw || layer_num >= hw->num_tx_sched_layers) 4080 return NULL; 4081 switch (rl_type) { 4082 case ICE_MIN_BW: 4083 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; 4084 break; 4085 case ICE_MAX_BW: 4086 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; 4087 break; 4088 case ICE_SHARED_BW: 4089 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; 4090 break; 4091 default: 4092 return NULL; 4093 } 4094 4095 LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num], 4096 ice_aqc_rl_profile_info, list_entry) 4097 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == 4098 profile_type && rl_prof_elem->bw == bw) 4099 /* Return existing profile ID info */ 4100 return rl_prof_elem; 4101 4102 /* Create new profile ID */ 4103 rl_prof_elem = (struct ice_aqc_rl_profile_info *) 4104 ice_malloc(hw, sizeof(*rl_prof_elem)); 4105 4106 if (!rl_prof_elem) 4107 return NULL; 4108 4109 status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile); 4110 if (status != ICE_SUCCESS) 4111 goto exit_add_rl_prof; 4112 4113 rl_prof_elem->bw = bw; 4114 /* layer_num is zero relative, and fw expects level from 1 to 9 */ 4115 rl_prof_elem->profile.level = layer_num + 1; 4116 rl_prof_elem->profile.flags = profile_type; 4117 rl_prof_elem->profile.max_burst_size = CPU_TO_LE16(hw->max_burst_size); 4118 4119 /* Create new entry in HW DB */ 4120 buf = &rl_prof_elem->profile; 4121 status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf), 4122 &profiles_added, NULL); 4123 if (status || profiles_added != num_profiles) 4124 goto exit_add_rl_prof; 4125 4126 /* Good entry - add in the list */ 4127 rl_prof_elem->prof_id_ref = 0; 4128 LIST_ADD(&rl_prof_elem->list_entry, &hw->rl_prof_list[layer_num]); 4129 return rl_prof_elem; 4130 4131 exit_add_rl_prof: 4132 ice_free(hw, rl_prof_elem); 4133 return NULL; 4134 } 4135 4136 /** 4137 * ice_sched_cfg_node_bw_lmt - configure node sched params 4138 * @hw: pointer to the HW struct 4139 * @node: sched node to configure 4140 * @rl_type: rate limit type CIR, EIR, or shared 4141 * @rl_prof_id: rate limit profile ID 4142 * 4143 * This function configures node element's BW limit. 4144 */ 4145 static enum ice_status 4146 ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, 4147 enum ice_rl_type rl_type, u16 rl_prof_id) 4148 { 4149 struct ice_aqc_txsched_elem_data buf; 4150 struct ice_aqc_txsched_elem *data; 4151 4152 buf = node->info; 4153 data = &buf.data; 4154 switch (rl_type) { 4155 case ICE_MIN_BW: 4156 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; 4157 data->cir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id); 4158 break; 4159 case ICE_MAX_BW: 4160 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; 4161 data->eir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id); 4162 break; 4163 case ICE_SHARED_BW: 4164 data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED; 4165 data->srl_id = CPU_TO_LE16(rl_prof_id); 4166 break; 4167 default: 4168 /* Unknown rate limit type */ 4169 return ICE_ERR_PARAM; 4170 } 4171 4172 /* Configure element */ 4173 return ice_sched_update_elem(hw, node, &buf); 4174 } 4175 4176 /** 4177 * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID 4178 * @node: sched node 4179 * @rl_type: rate limit type 4180 * 4181 * If existing profile matches, it returns the corresponding rate 4182 * limit profile ID, otherwise it returns an invalid ID as error. 4183 */ 4184 static u16 4185 ice_sched_get_node_rl_prof_id(struct ice_sched_node *node, 4186 enum ice_rl_type rl_type) 4187 { 4188 u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID; 4189 struct ice_aqc_txsched_elem *data; 4190 4191 data = &node->info.data; 4192 switch (rl_type) { 4193 case ICE_MIN_BW: 4194 if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR) 4195 rl_prof_id = LE16_TO_CPU(data->cir_bw.bw_profile_idx); 4196 break; 4197 case ICE_MAX_BW: 4198 if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR) 4199 rl_prof_id = LE16_TO_CPU(data->eir_bw.bw_profile_idx); 4200 break; 4201 case ICE_SHARED_BW: 4202 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) 4203 rl_prof_id = LE16_TO_CPU(data->srl_id); 4204 break; 4205 default: 4206 break; 4207 } 4208 4209 return rl_prof_id; 4210 } 4211 4212 /** 4213 * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer 4214 * @pi: port information structure 4215 * @rl_type: type of rate limit BW - min, max, or shared 4216 * @layer_index: layer index 4217 * 4218 * This function returns requested profile creation layer. 4219 */ 4220 static u8 4221 ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type, 4222 u8 layer_index) 4223 { 4224 struct ice_hw *hw = pi->hw; 4225 4226 if (layer_index >= hw->num_tx_sched_layers) 4227 return ICE_SCHED_INVAL_LAYER_NUM; 4228 switch (rl_type) { 4229 case ICE_MIN_BW: 4230 if (hw->layer_info[layer_index].max_cir_rl_profiles) 4231 return layer_index; 4232 break; 4233 case ICE_MAX_BW: 4234 if (hw->layer_info[layer_index].max_eir_rl_profiles) 4235 return layer_index; 4236 break; 4237 case ICE_SHARED_BW: 4238 /* if current layer doesn't support SRL profile creation 4239 * then try a layer up or down. 4240 */ 4241 if (hw->layer_info[layer_index].max_srl_profiles) 4242 return layer_index; 4243 else if (layer_index < hw->num_tx_sched_layers - 1 && 4244 hw->layer_info[layer_index + 1].max_srl_profiles) 4245 return layer_index + 1; 4246 else if (layer_index > 0 && 4247 hw->layer_info[layer_index - 1].max_srl_profiles) 4248 return layer_index - 1; 4249 break; 4250 default: 4251 break; 4252 } 4253 return ICE_SCHED_INVAL_LAYER_NUM; 4254 } 4255 4256 /** 4257 * ice_sched_get_srl_node - get shared rate limit node 4258 * @node: tree node 4259 * @srl_layer: shared rate limit layer 4260 * 4261 * This function returns SRL node to be used for shared rate limit purpose. 4262 * The caller needs to hold scheduler lock. 4263 */ 4264 static struct ice_sched_node * 4265 ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer) 4266 { 4267 if (srl_layer > node->tx_sched_layer) 4268 return node->children[0]; 4269 else if (srl_layer < node->tx_sched_layer) 4270 /* Node can't be created without a parent. It will always 4271 * have a valid parent except root node. 4272 */ 4273 return node->parent; 4274 else 4275 return node; 4276 } 4277 4278 /** 4279 * ice_sched_rm_rl_profile - remove RL profile ID 4280 * @hw: pointer to the hardware structure 4281 * @layer_num: layer number where profiles are saved 4282 * @profile_type: profile type like EIR, CIR, or SRL 4283 * @profile_id: profile ID to remove 4284 * 4285 * This function removes rate limit profile from layer 'layer_num' of type 4286 * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold 4287 * scheduler lock. 4288 */ 4289 static enum ice_status 4290 ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type, 4291 u16 profile_id) 4292 { 4293 struct ice_aqc_rl_profile_info *rl_prof_elem; 4294 enum ice_status status = ICE_SUCCESS; 4295 4296 if (!hw || layer_num >= hw->num_tx_sched_layers) 4297 return ICE_ERR_PARAM; 4298 /* Check the existing list for RL profile */ 4299 LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num], 4300 ice_aqc_rl_profile_info, list_entry) 4301 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == 4302 profile_type && 4303 LE16_TO_CPU(rl_prof_elem->profile.profile_id) == 4304 profile_id) { 4305 if (rl_prof_elem->prof_id_ref) 4306 rl_prof_elem->prof_id_ref--; 4307 4308 /* Remove old profile ID from database */ 4309 status = ice_sched_del_rl_profile(hw, rl_prof_elem); 4310 if (status && status != ICE_ERR_IN_USE) 4311 ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); 4312 break; 4313 } 4314 if (status == ICE_ERR_IN_USE) 4315 status = ICE_SUCCESS; 4316 return status; 4317 } 4318 4319 /** 4320 * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default 4321 * @pi: port information structure 4322 * @node: pointer to node structure 4323 * @rl_type: rate limit type min, max, or shared 4324 * @layer_num: layer number where RL profiles are saved 4325 * 4326 * This function configures node element's BW rate limit profile ID of 4327 * type CIR, EIR, or SRL to default. This function needs to be called 4328 * with the scheduler lock held. 4329 */ 4330 static enum ice_status 4331 ice_sched_set_node_bw_dflt(struct ice_port_info *pi, 4332 struct ice_sched_node *node, 4333 enum ice_rl_type rl_type, u8 layer_num) 4334 { 4335 enum ice_status status; 4336 struct ice_hw *hw; 4337 u8 profile_type; 4338 u16 rl_prof_id; 4339 u16 old_id; 4340 4341 hw = pi->hw; 4342 switch (rl_type) { 4343 case ICE_MIN_BW: 4344 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; 4345 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; 4346 break; 4347 case ICE_MAX_BW: 4348 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; 4349 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; 4350 break; 4351 case ICE_SHARED_BW: 4352 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; 4353 /* No SRL is configured for default case */ 4354 rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID; 4355 break; 4356 default: 4357 return ICE_ERR_PARAM; 4358 } 4359 /* Save existing RL prof ID for later clean up */ 4360 old_id = ice_sched_get_node_rl_prof_id(node, rl_type); 4361 /* Configure BW scheduling parameters */ 4362 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); 4363 if (status) 4364 return status; 4365 4366 /* Remove stale RL profile ID */ 4367 if (old_id == ICE_SCHED_DFLT_RL_PROF_ID || 4368 old_id == ICE_SCHED_INVAL_PROF_ID) 4369 return ICE_SUCCESS; 4370 4371 return ice_sched_rm_rl_profile(hw, layer_num, profile_type, old_id); 4372 } 4373 4374 /** 4375 * ice_sched_set_node_bw - set node's bandwidth 4376 * @pi: port information structure 4377 * @node: tree node 4378 * @rl_type: rate limit type min, max, or shared 4379 * @bw: bandwidth in Kbps - Kilo bits per sec 4380 * @layer_num: layer number 4381 * 4382 * This function adds new profile corresponding to requested BW, configures 4383 * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile 4384 * ID from local database. The caller needs to hold scheduler lock. 4385 */ 4386 static enum ice_status 4387 ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, 4388 enum ice_rl_type rl_type, u32 bw, u8 layer_num) 4389 { 4390 struct ice_aqc_rl_profile_info *rl_prof_info; 4391 enum ice_status status = ICE_ERR_PARAM; 4392 struct ice_hw *hw = pi->hw; 4393 u16 old_id, rl_prof_id; 4394 4395 rl_prof_info = ice_sched_add_rl_profile(hw, rl_type, bw, layer_num); 4396 if (!rl_prof_info) 4397 return status; 4398 4399 rl_prof_id = LE16_TO_CPU(rl_prof_info->profile.profile_id); 4400 4401 /* Save existing RL prof ID for later clean up */ 4402 old_id = ice_sched_get_node_rl_prof_id(node, rl_type); 4403 /* Configure BW scheduling parameters */ 4404 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); 4405 if (status) 4406 return status; 4407 4408 /* New changes has been applied */ 4409 /* Increment the profile ID reference count */ 4410 rl_prof_info->prof_id_ref++; 4411 4412 /* Check for old ID removal */ 4413 if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) || 4414 old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id) 4415 return ICE_SUCCESS; 4416 4417 return ice_sched_rm_rl_profile(hw, layer_num, 4418 rl_prof_info->profile.flags & 4419 ICE_AQC_RL_PROFILE_TYPE_M, old_id); 4420 } 4421 4422 /** 4423 * ice_sched_set_node_bw_lmt - set node's BW limit 4424 * @pi: port information structure 4425 * @node: tree node 4426 * @rl_type: rate limit type min, max, or shared 4427 * @bw: bandwidth in Kbps - Kilo bits per sec 4428 * 4429 * It updates node's BW limit parameters like BW RL profile ID of type CIR, 4430 * EIR, or SRL. The caller needs to hold scheduler lock. 4431 * 4432 * NOTE: Caller provides the correct SRL node in case of shared profile 4433 * settings. 4434 */ 4435 static enum ice_status 4436 ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, 4437 enum ice_rl_type rl_type, u32 bw) 4438 { 4439 struct ice_hw *hw; 4440 u8 layer_num; 4441 4442 if (!pi) 4443 return ICE_ERR_PARAM; 4444 hw = pi->hw; 4445 /* Remove unused RL profile IDs from HW and SW DB */ 4446 ice_sched_rm_unused_rl_prof(hw); 4447 4448 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, 4449 node->tx_sched_layer); 4450 if (layer_num >= hw->num_tx_sched_layers) 4451 return ICE_ERR_PARAM; 4452 4453 if (bw == ICE_SCHED_DFLT_BW) 4454 return ice_sched_set_node_bw_dflt(pi, node, rl_type, layer_num); 4455 return ice_sched_set_node_bw(pi, node, rl_type, bw, layer_num); 4456 } 4457 4458 /** 4459 * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default 4460 * @pi: port information structure 4461 * @node: pointer to node structure 4462 * @rl_type: rate limit type min, max, or shared 4463 * 4464 * This function configures node element's BW rate limit profile ID of 4465 * type CIR, EIR, or SRL to default. This function needs to be called 4466 * with the scheduler lock held. 4467 */ 4468 static enum ice_status 4469 ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, 4470 struct ice_sched_node *node, 4471 enum ice_rl_type rl_type) 4472 { 4473 return ice_sched_set_node_bw_lmt(pi, node, rl_type, 4474 ICE_SCHED_DFLT_BW); 4475 } 4476 4477 /** 4478 * ice_sched_validate_srl_node - Check node for SRL applicability 4479 * @node: sched node to configure 4480 * @sel_layer: selected SRL layer 4481 * 4482 * This function checks if the SRL can be applied to a selceted layer node on 4483 * behalf of the requested node (first argument). This function needs to be 4484 * called with scheduler lock held. 4485 */ 4486 static enum ice_status 4487 ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) 4488 { 4489 /* SRL profiles are not available on all layers. Check if the 4490 * SRL profile can be applied to a node above or below the 4491 * requested node. SRL configuration is possible only if the 4492 * selected layer's node has single child. 4493 */ 4494 if (sel_layer == node->tx_sched_layer || 4495 ((sel_layer == node->tx_sched_layer + 1) && 4496 node->num_children == 1) || 4497 ((sel_layer == node->tx_sched_layer - 1) && 4498 (node->parent && node->parent->num_children == 1))) 4499 return ICE_SUCCESS; 4500 4501 return ICE_ERR_CFG; 4502 } 4503 4504 /** 4505 * ice_sched_save_q_bw - save queue node's BW information 4506 * @q_ctx: queue context structure 4507 * @rl_type: rate limit type min, max, or shared 4508 * @bw: bandwidth in Kbps - Kilo bits per sec 4509 * 4510 * Save BW information of queue type node for post replay use. 4511 */ 4512 static enum ice_status 4513 ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) 4514 { 4515 switch (rl_type) { 4516 case ICE_MIN_BW: 4517 ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw); 4518 break; 4519 case ICE_MAX_BW: 4520 ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw); 4521 break; 4522 case ICE_SHARED_BW: 4523 ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw); 4524 break; 4525 default: 4526 return ICE_ERR_PARAM; 4527 } 4528 return ICE_SUCCESS; 4529 } 4530 4531 /** 4532 * ice_sched_set_q_bw_lmt - sets queue BW limit 4533 * @pi: port information structure 4534 * @vsi_handle: sw VSI handle 4535 * @tc: traffic class 4536 * @q_handle: software queue handle 4537 * @rl_type: min, max, or shared 4538 * @bw: bandwidth in Kbps 4539 * 4540 * This function sets BW limit of queue scheduling node. 4541 */ 4542 static enum ice_status 4543 ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4544 u16 q_handle, enum ice_rl_type rl_type, u32 bw) 4545 { 4546 enum ice_status status = ICE_ERR_PARAM; 4547 struct ice_sched_node *node; 4548 struct ice_q_ctx *q_ctx; 4549 4550 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4551 return ICE_ERR_PARAM; 4552 ice_acquire_lock(&pi->sched_lock); 4553 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle); 4554 if (!q_ctx) 4555 goto exit_q_bw_lmt; 4556 node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); 4557 if (!node) { 4558 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n"); 4559 goto exit_q_bw_lmt; 4560 } 4561 4562 /* Return error if it is not a leaf node */ 4563 if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) 4564 goto exit_q_bw_lmt; 4565 4566 /* SRL bandwidth layer selection */ 4567 if (rl_type == ICE_SHARED_BW) { 4568 u8 sel_layer; /* selected layer */ 4569 4570 sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type, 4571 node->tx_sched_layer); 4572 if (sel_layer >= pi->hw->num_tx_sched_layers) { 4573 status = ICE_ERR_PARAM; 4574 goto exit_q_bw_lmt; 4575 } 4576 status = ice_sched_validate_srl_node(node, sel_layer); 4577 if (status) 4578 goto exit_q_bw_lmt; 4579 } 4580 4581 if (bw == ICE_SCHED_DFLT_BW) 4582 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type); 4583 else 4584 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw); 4585 4586 if (!status) 4587 status = ice_sched_save_q_bw(q_ctx, rl_type, bw); 4588 4589 exit_q_bw_lmt: 4590 ice_release_lock(&pi->sched_lock); 4591 return status; 4592 } 4593 4594 /** 4595 * ice_cfg_q_bw_lmt - configure queue BW limit 4596 * @pi: port information structure 4597 * @vsi_handle: sw VSI handle 4598 * @tc: traffic class 4599 * @q_handle: software queue handle 4600 * @rl_type: min, max, or shared 4601 * @bw: bandwidth in Kbps 4602 * 4603 * This function configures BW limit of queue scheduling node. 4604 */ 4605 enum ice_status 4606 ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4607 u16 q_handle, enum ice_rl_type rl_type, u32 bw) 4608 { 4609 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, 4610 bw); 4611 } 4612 4613 /** 4614 * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit 4615 * @pi: port information structure 4616 * @vsi_handle: sw VSI handle 4617 * @tc: traffic class 4618 * @q_handle: software queue handle 4619 * @rl_type: min, max, or shared 4620 * 4621 * This function configures BW default limit of queue scheduling node. 4622 */ 4623 enum ice_status 4624 ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4625 u16 q_handle, enum ice_rl_type rl_type) 4626 { 4627 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, 4628 ICE_SCHED_DFLT_BW); 4629 } 4630 4631 /** 4632 * ice_sched_save_tc_node_bw - save TC node BW limit 4633 * @pi: port information structure 4634 * @tc: TC number 4635 * @rl_type: min or max 4636 * @bw: bandwidth in Kbps 4637 * 4638 * This function saves the modified values of bandwidth settings for later 4639 * replay purpose (restore) after reset. 4640 */ 4641 static enum ice_status 4642 ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc, 4643 enum ice_rl_type rl_type, u32 bw) 4644 { 4645 if (tc >= ICE_MAX_TRAFFIC_CLASS) 4646 return ICE_ERR_PARAM; 4647 switch (rl_type) { 4648 case ICE_MIN_BW: 4649 ice_set_clear_cir_bw(&pi->tc_node_bw_t_info[tc], bw); 4650 break; 4651 case ICE_MAX_BW: 4652 ice_set_clear_eir_bw(&pi->tc_node_bw_t_info[tc], bw); 4653 break; 4654 case ICE_SHARED_BW: 4655 ice_set_clear_shared_bw(&pi->tc_node_bw_t_info[tc], bw); 4656 break; 4657 default: 4658 return ICE_ERR_PARAM; 4659 } 4660 return ICE_SUCCESS; 4661 } 4662 4663 /** 4664 * ice_sched_set_tc_node_bw_lmt - sets TC node BW limit 4665 * @pi: port information structure 4666 * @tc: TC number 4667 * @rl_type: min or max 4668 * @bw: bandwidth in Kbps 4669 * 4670 * This function configures bandwidth limit of TC node. 4671 */ 4672 static enum ice_status 4673 ice_sched_set_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, 4674 enum ice_rl_type rl_type, u32 bw) 4675 { 4676 enum ice_status status = ICE_ERR_PARAM; 4677 struct ice_sched_node *tc_node; 4678 4679 if (tc >= ICE_MAX_TRAFFIC_CLASS) 4680 return status; 4681 ice_acquire_lock(&pi->sched_lock); 4682 tc_node = ice_sched_get_tc_node(pi, tc); 4683 if (!tc_node) 4684 goto exit_set_tc_node_bw; 4685 if (bw == ICE_SCHED_DFLT_BW) 4686 status = ice_sched_set_node_bw_dflt_lmt(pi, tc_node, rl_type); 4687 else 4688 status = ice_sched_set_node_bw_lmt(pi, tc_node, rl_type, bw); 4689 if (!status) 4690 status = ice_sched_save_tc_node_bw(pi, tc, rl_type, bw); 4691 4692 exit_set_tc_node_bw: 4693 ice_release_lock(&pi->sched_lock); 4694 return status; 4695 } 4696 4697 /** 4698 * ice_cfg_tc_node_bw_lmt - configure TC node BW limit 4699 * @pi: port information structure 4700 * @tc: TC number 4701 * @rl_type: min or max 4702 * @bw: bandwidth in Kbps 4703 * 4704 * This function configures BW limit of TC node. 4705 * Note: The minimum guaranteed reservation is done via DCBX. 4706 */ 4707 enum ice_status 4708 ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, 4709 enum ice_rl_type rl_type, u32 bw) 4710 { 4711 return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, bw); 4712 } 4713 4714 /** 4715 * ice_cfg_tc_node_bw_dflt_lmt - configure TC node BW default limit 4716 * @pi: port information structure 4717 * @tc: TC number 4718 * @rl_type: min or max 4719 * 4720 * This function configures BW default limit of TC node. 4721 */ 4722 enum ice_status 4723 ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc, 4724 enum ice_rl_type rl_type) 4725 { 4726 return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, ICE_SCHED_DFLT_BW); 4727 } 4728 4729 /** 4730 * ice_sched_save_tc_node_bw_alloc - save TC node's BW alloc information 4731 * @pi: port information structure 4732 * @tc: traffic class 4733 * @rl_type: rate limit type min or max 4734 * @bw_alloc: Bandwidth allocation information 4735 * 4736 * Save BW alloc information of VSI type node for post replay use. 4737 */ 4738 static enum ice_status 4739 ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, 4740 enum ice_rl_type rl_type, u16 bw_alloc) 4741 { 4742 if (tc >= ICE_MAX_TRAFFIC_CLASS) 4743 return ICE_ERR_PARAM; 4744 switch (rl_type) { 4745 case ICE_MIN_BW: 4746 ice_set_clear_cir_bw_alloc(&pi->tc_node_bw_t_info[tc], 4747 bw_alloc); 4748 break; 4749 case ICE_MAX_BW: 4750 ice_set_clear_eir_bw_alloc(&pi->tc_node_bw_t_info[tc], 4751 bw_alloc); 4752 break; 4753 default: 4754 return ICE_ERR_PARAM; 4755 } 4756 return ICE_SUCCESS; 4757 } 4758 4759 /** 4760 * ice_sched_set_tc_node_bw_alloc - set TC node BW alloc 4761 * @pi: port information structure 4762 * @tc: TC number 4763 * @rl_type: min or max 4764 * @bw_alloc: bandwidth alloc 4765 * 4766 * This function configures bandwidth alloc of TC node, also saves the 4767 * changed settings for replay purpose, and return success if it succeeds 4768 * in modifying bandwidth alloc setting. 4769 */ 4770 static enum ice_status 4771 ice_sched_set_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, 4772 enum ice_rl_type rl_type, u8 bw_alloc) 4773 { 4774 enum ice_status status = ICE_ERR_PARAM; 4775 struct ice_sched_node *tc_node; 4776 4777 if (tc >= ICE_MAX_TRAFFIC_CLASS) 4778 return status; 4779 ice_acquire_lock(&pi->sched_lock); 4780 tc_node = ice_sched_get_tc_node(pi, tc); 4781 if (!tc_node) 4782 goto exit_set_tc_node_bw_alloc; 4783 status = ice_sched_cfg_node_bw_alloc(pi->hw, tc_node, rl_type, 4784 bw_alloc); 4785 if (status) 4786 goto exit_set_tc_node_bw_alloc; 4787 status = ice_sched_save_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc); 4788 4789 exit_set_tc_node_bw_alloc: 4790 ice_release_lock(&pi->sched_lock); 4791 return status; 4792 } 4793 4794 /** 4795 * ice_cfg_tc_node_bw_alloc - configure TC node BW alloc 4796 * @pi: port information structure 4797 * @tc: TC number 4798 * @rl_type: min or max 4799 * @bw_alloc: bandwidth alloc 4800 * 4801 * This function configures BW limit of TC node. 4802 * Note: The minimum guaranteed reservation is done via DCBX. 4803 */ 4804 enum ice_status 4805 ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, 4806 enum ice_rl_type rl_type, u8 bw_alloc) 4807 { 4808 return ice_sched_set_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc); 4809 } 4810 4811 /** 4812 * ice_sched_set_agg_bw_dflt_lmt - set aggregator node's BW limit to default 4813 * @pi: port information structure 4814 * @vsi_handle: software VSI handle 4815 * 4816 * This function retrieves the aggregator ID based on VSI ID and TC, 4817 * and sets node's BW limit to default. This function needs to be 4818 * called with the scheduler lock held. 4819 */ 4820 enum ice_status 4821 ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle) 4822 { 4823 struct ice_vsi_ctx *vsi_ctx; 4824 enum ice_status status = ICE_SUCCESS; 4825 u8 tc; 4826 4827 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4828 return ICE_ERR_PARAM; 4829 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 4830 if (!vsi_ctx) 4831 return ICE_ERR_PARAM; 4832 4833 ice_for_each_traffic_class(tc) { 4834 struct ice_sched_node *node; 4835 4836 node = vsi_ctx->sched.ag_node[tc]; 4837 if (!node) 4838 continue; 4839 4840 /* Set min profile to default */ 4841 status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MIN_BW); 4842 if (status) 4843 break; 4844 4845 /* Set max profile to default */ 4846 status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MAX_BW); 4847 if (status) 4848 break; 4849 4850 /* Remove shared profile, if there is one */ 4851 status = ice_sched_set_node_bw_dflt_lmt(pi, node, 4852 ICE_SHARED_BW); 4853 if (status) 4854 break; 4855 } 4856 4857 return status; 4858 } 4859 4860 /** 4861 * ice_sched_get_node_by_id_type - get node from ID type 4862 * @pi: port information structure 4863 * @id: identifier 4864 * @agg_type: type of aggregator 4865 * @tc: traffic class 4866 * 4867 * This function returns node identified by ID of type aggregator, and 4868 * based on traffic class (TC). This function needs to be called with 4869 * the scheduler lock held. 4870 */ 4871 static struct ice_sched_node * 4872 ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, 4873 enum ice_agg_type agg_type, u8 tc) 4874 { 4875 struct ice_sched_node *node = NULL; 4876 4877 switch (agg_type) { 4878 case ICE_AGG_TYPE_VSI: { 4879 struct ice_vsi_ctx *vsi_ctx; 4880 u16 vsi_handle = (u16)id; 4881 4882 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4883 break; 4884 /* Get sched_vsi_info */ 4885 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 4886 if (!vsi_ctx) 4887 break; 4888 node = vsi_ctx->sched.vsi_node[tc]; 4889 break; 4890 } 4891 4892 case ICE_AGG_TYPE_AGG: { 4893 struct ice_sched_node *tc_node; 4894 4895 tc_node = ice_sched_get_tc_node(pi, tc); 4896 if (tc_node) 4897 node = ice_sched_get_agg_node(pi, tc_node, id); 4898 break; 4899 } 4900 4901 case ICE_AGG_TYPE_Q: 4902 /* The current implementation allows single queue to modify */ 4903 node = ice_sched_find_node_by_teid(pi->root, id); 4904 break; 4905 4906 case ICE_AGG_TYPE_QG: { 4907 struct ice_sched_node *child_node; 4908 4909 /* The current implementation allows single qg to modify */ 4910 child_node = ice_sched_find_node_by_teid(pi->root, id); 4911 if (!child_node) 4912 break; 4913 node = child_node->parent; 4914 break; 4915 } 4916 4917 default: 4918 break; 4919 } 4920 4921 return node; 4922 } 4923 4924 /** 4925 * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC 4926 * @pi: port information structure 4927 * @id: ID (software VSI handle or AGG ID) 4928 * @agg_type: aggregator type (VSI or AGG type node) 4929 * @tc: traffic class 4930 * @rl_type: min or max 4931 * @bw: bandwidth in Kbps 4932 * 4933 * This function sets BW limit of VSI or Aggregator scheduling node 4934 * based on TC information from passed in argument BW. 4935 */ 4936 enum ice_status 4937 ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, 4938 enum ice_agg_type agg_type, u8 tc, 4939 enum ice_rl_type rl_type, u32 bw) 4940 { 4941 enum ice_status status = ICE_ERR_PARAM; 4942 struct ice_sched_node *node; 4943 4944 if (!pi) 4945 return status; 4946 4947 if (rl_type == ICE_UNKNOWN_BW) 4948 return status; 4949 4950 ice_acquire_lock(&pi->sched_lock); 4951 node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc); 4952 if (!node) { 4953 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n"); 4954 goto exit_set_node_bw_lmt_per_tc; 4955 } 4956 if (bw == ICE_SCHED_DFLT_BW) 4957 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type); 4958 else 4959 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw); 4960 4961 exit_set_node_bw_lmt_per_tc: 4962 ice_release_lock(&pi->sched_lock); 4963 return status; 4964 } 4965 4966 /** 4967 * ice_sched_validate_vsi_srl_node - validate VSI SRL node 4968 * @pi: port information structure 4969 * @vsi_handle: software VSI handle 4970 * 4971 * This function validates SRL node of the VSI node if available SRL layer is 4972 * different than the VSI node layer on all TC(s).This function needs to be 4973 * called with scheduler lock held. 4974 */ 4975 static enum ice_status 4976 ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle) 4977 { 4978 u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM; 4979 u8 tc; 4980 4981 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4982 return ICE_ERR_PARAM; 4983 4984 /* Return success if no nodes are present across TC */ 4985 ice_for_each_traffic_class(tc) { 4986 struct ice_sched_node *tc_node, *vsi_node; 4987 enum ice_rl_type rl_type = ICE_SHARED_BW; 4988 enum ice_status status; 4989 4990 tc_node = ice_sched_get_tc_node(pi, tc); 4991 if (!tc_node) 4992 continue; 4993 4994 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 4995 if (!vsi_node) 4996 continue; 4997 4998 /* SRL bandwidth layer selection */ 4999 if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) { 5000 u8 node_layer = vsi_node->tx_sched_layer; 5001 u8 layer_num; 5002 5003 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, 5004 node_layer); 5005 if (layer_num >= pi->hw->num_tx_sched_layers) 5006 return ICE_ERR_PARAM; 5007 sel_layer = layer_num; 5008 } 5009 5010 status = ice_sched_validate_srl_node(vsi_node, sel_layer); 5011 if (status) 5012 return status; 5013 } 5014 return ICE_SUCCESS; 5015 } 5016 5017 /** 5018 * ice_sched_set_save_vsi_srl_node_bw - set VSI shared limit values 5019 * @pi: port information structure 5020 * @vsi_handle: software VSI handle 5021 * @tc: traffic class 5022 * @srl_node: sched node to configure 5023 * @rl_type: rate limit type minimum, maximum, or shared 5024 * @bw: minimum, maximum, or shared bandwidth in Kbps 5025 * 5026 * Configure shared rate limiter(SRL) of VSI type nodes across given traffic 5027 * class, and saves those value for later use for replaying purposes. The 5028 * caller holds the scheduler lock. 5029 */ 5030 static enum ice_status 5031 ice_sched_set_save_vsi_srl_node_bw(struct ice_port_info *pi, u16 vsi_handle, 5032 u8 tc, struct ice_sched_node *srl_node, 5033 enum ice_rl_type rl_type, u32 bw) 5034 { 5035 enum ice_status status; 5036 5037 if (bw == ICE_SCHED_DFLT_BW) { 5038 status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type); 5039 } else { 5040 status = ice_sched_set_node_bw_lmt(pi, srl_node, rl_type, bw); 5041 if (status) 5042 return status; 5043 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw); 5044 } 5045 return status; 5046 } 5047 5048 /** 5049 * ice_sched_set_vsi_node_srl_per_tc - set VSI node BW shared limit for tc 5050 * @pi: port information structure 5051 * @vsi_handle: software VSI handle 5052 * @tc: traffic class 5053 * @min_bw: minimum bandwidth in Kbps 5054 * @max_bw: maximum bandwidth in Kbps 5055 * @shared_bw: shared bandwidth in Kbps 5056 * 5057 * Configure shared rate limiter(SRL) of VSI type nodes across requested 5058 * traffic class for VSI matching handle. When BW value of ICE_SCHED_DFLT_BW 5059 * is passed, it removes the corresponding bw from the node. The caller 5060 * holds scheduler lock. 5061 */ 5062 static enum ice_status 5063 ice_sched_set_vsi_node_srl_per_tc(struct ice_port_info *pi, u16 vsi_handle, 5064 u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw) 5065 { 5066 struct ice_sched_node *tc_node, *vsi_node, *cfg_node; 5067 enum ice_status status; 5068 u8 layer_num; 5069 5070 tc_node = ice_sched_get_tc_node(pi, tc); 5071 if (!tc_node) 5072 return ICE_ERR_CFG; 5073 5074 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 5075 if (!vsi_node) 5076 return ICE_ERR_CFG; 5077 5078 layer_num = ice_sched_get_rl_prof_layer(pi, ICE_SHARED_BW, 5079 vsi_node->tx_sched_layer); 5080 if (layer_num >= pi->hw->num_tx_sched_layers) 5081 return ICE_ERR_PARAM; 5082 5083 /* SRL node may be different */ 5084 cfg_node = ice_sched_get_srl_node(vsi_node, layer_num); 5085 if (!cfg_node) 5086 return ICE_ERR_CFG; 5087 5088 status = ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc, 5089 cfg_node, ICE_MIN_BW, 5090 min_bw); 5091 if (status) 5092 return status; 5093 5094 status = ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc, 5095 cfg_node, ICE_MAX_BW, 5096 max_bw); 5097 if (status) 5098 return status; 5099 5100 return ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc, cfg_node, 5101 ICE_SHARED_BW, shared_bw); 5102 } 5103 5104 /** 5105 * ice_sched_set_vsi_bw_shared_lmt - set VSI BW shared limit 5106 * @pi: port information structure 5107 * @vsi_handle: software VSI handle 5108 * @min_bw: minimum bandwidth in Kbps 5109 * @max_bw: maximum bandwidth in Kbps 5110 * @shared_bw: shared bandwidth in Kbps 5111 * 5112 * Configure shared rate limiter(SRL) of all VSI type nodes across all traffic 5113 * classes for VSI matching handle. When BW value of ICE_SCHED_DFLT_BW is 5114 * passed, it removes those value(s) from the node. 5115 */ 5116 enum ice_status 5117 ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, 5118 u32 min_bw, u32 max_bw, u32 shared_bw) 5119 { 5120 enum ice_status status = ICE_SUCCESS; 5121 u8 tc; 5122 5123 if (!pi) 5124 return ICE_ERR_PARAM; 5125 5126 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 5127 return ICE_ERR_PARAM; 5128 5129 ice_acquire_lock(&pi->sched_lock); 5130 status = ice_sched_validate_vsi_srl_node(pi, vsi_handle); 5131 if (status) 5132 goto exit_set_vsi_bw_shared_lmt; 5133 /* Return success if no nodes are present across TC */ 5134 ice_for_each_traffic_class(tc) { 5135 struct ice_sched_node *tc_node, *vsi_node; 5136 5137 tc_node = ice_sched_get_tc_node(pi, tc); 5138 if (!tc_node) 5139 continue; 5140 5141 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 5142 if (!vsi_node) 5143 continue; 5144 5145 status = ice_sched_set_vsi_node_srl_per_tc(pi, vsi_handle, tc, 5146 min_bw, max_bw, 5147 shared_bw); 5148 if (status) 5149 break; 5150 } 5151 5152 exit_set_vsi_bw_shared_lmt: 5153 ice_release_lock(&pi->sched_lock); 5154 return status; 5155 } 5156 5157 /** 5158 * ice_sched_validate_agg_srl_node - validate AGG SRL node 5159 * @pi: port information structure 5160 * @agg_id: aggregator ID 5161 * 5162 * This function validates SRL node of the AGG node if available SRL layer is 5163 * different than the AGG node layer on all TC(s).This function needs to be 5164 * called with scheduler lock held. 5165 */ 5166 static enum ice_status 5167 ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id) 5168 { 5169 u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM; 5170 struct ice_sched_agg_info *agg_info; 5171 bool agg_id_present = false; 5172 enum ice_status status = ICE_SUCCESS; 5173 u8 tc; 5174 5175 LIST_FOR_EACH_ENTRY(agg_info, &pi->hw->agg_list, ice_sched_agg_info, 5176 list_entry) 5177 if (agg_info->agg_id == agg_id) { 5178 agg_id_present = true; 5179 break; 5180 } 5181 if (!agg_id_present) 5182 return ICE_ERR_PARAM; 5183 /* Return success if no nodes are present across TC */ 5184 ice_for_each_traffic_class(tc) { 5185 struct ice_sched_node *tc_node, *agg_node; 5186 enum ice_rl_type rl_type = ICE_SHARED_BW; 5187 5188 tc_node = ice_sched_get_tc_node(pi, tc); 5189 if (!tc_node) 5190 continue; 5191 5192 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 5193 if (!agg_node) 5194 continue; 5195 /* SRL bandwidth layer selection */ 5196 if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) { 5197 u8 node_layer = agg_node->tx_sched_layer; 5198 u8 layer_num; 5199 5200 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, 5201 node_layer); 5202 if (layer_num >= pi->hw->num_tx_sched_layers) 5203 return ICE_ERR_PARAM; 5204 sel_layer = layer_num; 5205 } 5206 5207 status = ice_sched_validate_srl_node(agg_node, sel_layer); 5208 if (status) 5209 break; 5210 } 5211 return status; 5212 } 5213 5214 /** 5215 * ice_sched_validate_agg_id - Validate aggregator id 5216 * @pi: port information structure 5217 * @agg_id: aggregator ID 5218 * 5219 * This function validates aggregator id. Caller holds the scheduler lock. 5220 */ 5221 static enum ice_status 5222 ice_sched_validate_agg_id(struct ice_port_info *pi, u32 agg_id) 5223 { 5224 struct ice_sched_agg_info *agg_info; 5225 struct ice_sched_agg_info *tmp; 5226 bool agg_id_present = false; 5227 enum ice_status status; 5228 5229 status = ice_sched_validate_agg_srl_node(pi, agg_id); 5230 if (status) 5231 return status; 5232 5233 LIST_FOR_EACH_ENTRY_SAFE(agg_info, tmp, &pi->hw->agg_list, 5234 ice_sched_agg_info, list_entry) 5235 if (agg_info->agg_id == agg_id) { 5236 agg_id_present = true; 5237 break; 5238 } 5239 5240 if (!agg_id_present) 5241 return ICE_ERR_PARAM; 5242 5243 return ICE_SUCCESS; 5244 } 5245 5246 /** 5247 * ice_sched_set_save_agg_srl_node_bw - set aggregator shared limit values 5248 * @pi: port information structure 5249 * @agg_id: aggregator ID 5250 * @tc: traffic class 5251 * @srl_node: sched node to configure 5252 * @rl_type: rate limit type minimum, maximum, or shared 5253 * @bw: minimum, maximum, or shared bandwidth in Kbps 5254 * 5255 * Configure shared rate limiter(SRL) of aggregator type nodes across 5256 * requested traffic class, and saves those value for later use for 5257 * replaying purposes. The caller holds the scheduler lock. 5258 */ 5259 static enum ice_status 5260 ice_sched_set_save_agg_srl_node_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, 5261 struct ice_sched_node *srl_node, 5262 enum ice_rl_type rl_type, u32 bw) 5263 { 5264 enum ice_status status; 5265 5266 if (bw == ICE_SCHED_DFLT_BW) { 5267 status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type); 5268 } else { 5269 status = ice_sched_set_node_bw_lmt(pi, srl_node, rl_type, bw); 5270 if (status) 5271 return status; 5272 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw); 5273 } 5274 return status; 5275 } 5276 5277 /** 5278 * ice_sched_set_agg_node_srl_per_tc - set aggregator SRL per tc 5279 * @pi: port information structure 5280 * @agg_id: aggregator ID 5281 * @tc: traffic class 5282 * @min_bw: minimum bandwidth in Kbps 5283 * @max_bw: maximum bandwidth in Kbps 5284 * @shared_bw: shared bandwidth in Kbps 5285 * 5286 * This function configures the shared rate limiter(SRL) of aggregator type 5287 * node for a given traffic class for aggregator matching agg_id. When BW 5288 * value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node. Caller 5289 * holds the scheduler lock. 5290 */ 5291 static enum ice_status 5292 ice_sched_set_agg_node_srl_per_tc(struct ice_port_info *pi, u32 agg_id, 5293 u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw) 5294 { 5295 struct ice_sched_node *tc_node, *agg_node, *cfg_node; 5296 enum ice_rl_type rl_type = ICE_SHARED_BW; 5297 enum ice_status status = ICE_ERR_CFG; 5298 u8 layer_num; 5299 5300 tc_node = ice_sched_get_tc_node(pi, tc); 5301 if (!tc_node) 5302 return ICE_ERR_CFG; 5303 5304 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 5305 if (!agg_node) 5306 return ICE_ERR_CFG; 5307 5308 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, 5309 agg_node->tx_sched_layer); 5310 if (layer_num >= pi->hw->num_tx_sched_layers) 5311 return ICE_ERR_PARAM; 5312 5313 /* SRL node may be different */ 5314 cfg_node = ice_sched_get_srl_node(agg_node, layer_num); 5315 if (!cfg_node) 5316 return ICE_ERR_CFG; 5317 5318 status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node, 5319 ICE_MIN_BW, min_bw); 5320 if (status) 5321 return status; 5322 5323 status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node, 5324 ICE_MAX_BW, max_bw); 5325 if (status) 5326 return status; 5327 5328 status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node, 5329 ICE_SHARED_BW, shared_bw); 5330 return status; 5331 } 5332 5333 /** 5334 * ice_sched_set_agg_bw_shared_lmt - set aggregator BW shared limit 5335 * @pi: port information structure 5336 * @agg_id: aggregator ID 5337 * @min_bw: minimum bandwidth in Kbps 5338 * @max_bw: maximum bandwidth in Kbps 5339 * @shared_bw: shared bandwidth in Kbps 5340 * 5341 * This function configures the shared rate limiter(SRL) of all aggregator type 5342 * nodes across all traffic classes for aggregator matching agg_id. When 5343 * BW value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the 5344 * node(s). 5345 */ 5346 enum ice_status 5347 ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, 5348 u32 min_bw, u32 max_bw, u32 shared_bw) 5349 { 5350 enum ice_status status; 5351 u8 tc; 5352 5353 if (!pi) 5354 return ICE_ERR_PARAM; 5355 5356 ice_acquire_lock(&pi->sched_lock); 5357 status = ice_sched_validate_agg_id(pi, agg_id); 5358 if (status) 5359 goto exit_agg_bw_shared_lmt; 5360 5361 /* Return success if no nodes are present across TC */ 5362 ice_for_each_traffic_class(tc) { 5363 struct ice_sched_node *tc_node, *agg_node; 5364 5365 tc_node = ice_sched_get_tc_node(pi, tc); 5366 if (!tc_node) 5367 continue; 5368 5369 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 5370 if (!agg_node) 5371 continue; 5372 5373 status = ice_sched_set_agg_node_srl_per_tc(pi, agg_id, tc, 5374 min_bw, max_bw, 5375 shared_bw); 5376 if (status) 5377 break; 5378 } 5379 5380 exit_agg_bw_shared_lmt: 5381 ice_release_lock(&pi->sched_lock); 5382 return status; 5383 } 5384 5385 /** 5386 * ice_sched_set_agg_bw_shared_lmt_per_tc - set aggregator BW shared lmt per tc 5387 * @pi: port information structure 5388 * @agg_id: aggregator ID 5389 * @tc: traffic class 5390 * @min_bw: minimum bandwidth in Kbps 5391 * @max_bw: maximum bandwidth in Kbps 5392 * @shared_bw: shared bandwidth in Kbps 5393 * 5394 * This function configures the shared rate limiter(SRL) of aggregator type 5395 * node for a given traffic class for aggregator matching agg_id. When BW 5396 * value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node. 5397 */ 5398 enum ice_status 5399 ice_sched_set_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, 5400 u8 tc, u32 min_bw, u32 max_bw, 5401 u32 shared_bw) 5402 { 5403 enum ice_status status; 5404 5405 if (!pi) 5406 return ICE_ERR_PARAM; 5407 ice_acquire_lock(&pi->sched_lock); 5408 status = ice_sched_validate_agg_id(pi, agg_id); 5409 if (status) 5410 goto exit_agg_bw_shared_lmt_per_tc; 5411 5412 status = ice_sched_set_agg_node_srl_per_tc(pi, agg_id, tc, min_bw, 5413 max_bw, shared_bw); 5414 5415 exit_agg_bw_shared_lmt_per_tc: 5416 ice_release_lock(&pi->sched_lock); 5417 return status; 5418 } 5419 5420 /** 5421 * ice_sched_cfg_sibl_node_prio - configure node sibling priority 5422 * @pi: port information structure 5423 * @node: sched node to configure 5424 * @priority: sibling priority 5425 * 5426 * This function configures node element's sibling priority only. This 5427 * function needs to be called with scheduler lock held. 5428 */ 5429 enum ice_status 5430 ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi, 5431 struct ice_sched_node *node, u8 priority) 5432 { 5433 struct ice_aqc_txsched_elem_data buf; 5434 struct ice_aqc_txsched_elem *data; 5435 struct ice_hw *hw = pi->hw; 5436 enum ice_status status; 5437 5438 if (!hw) 5439 return ICE_ERR_PARAM; 5440 buf = node->info; 5441 data = &buf.data; 5442 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; 5443 priority = (priority << ICE_AQC_ELEM_GENERIC_PRIO_S) & 5444 ICE_AQC_ELEM_GENERIC_PRIO_M; 5445 data->generic &= ~ICE_AQC_ELEM_GENERIC_PRIO_M; 5446 data->generic |= priority; 5447 5448 /* Configure element */ 5449 status = ice_sched_update_elem(hw, node, &buf); 5450 return status; 5451 } 5452 5453 /** 5454 * ice_cfg_rl_burst_size - Set burst size value 5455 * @hw: pointer to the HW struct 5456 * @bytes: burst size in bytes 5457 * 5458 * This function configures/set the burst size to requested new value. The new 5459 * burst size value is used for future rate limit calls. It doesn't change the 5460 * existing or previously created RL profiles. 5461 */ 5462 enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) 5463 { 5464 u16 burst_size_to_prog; 5465 5466 if (bytes < ICE_MIN_BURST_SIZE_ALLOWED || 5467 bytes > ICE_MAX_BURST_SIZE_ALLOWED) 5468 return ICE_ERR_PARAM; 5469 if (ice_round_to_num(bytes, 64) <= 5470 ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) { 5471 /* 64 byte granularity case */ 5472 /* Disable MSB granularity bit */ 5473 burst_size_to_prog = ICE_64_BYTE_GRANULARITY; 5474 /* round number to nearest 64 byte granularity */ 5475 bytes = ice_round_to_num(bytes, 64); 5476 /* The value is in 64 byte chunks */ 5477 burst_size_to_prog |= (u16)(bytes / 64); 5478 } else { 5479 /* k bytes granularity case */ 5480 /* Enable MSB granularity bit */ 5481 burst_size_to_prog = ICE_KBYTE_GRANULARITY; 5482 /* round number to nearest 1024 granularity */ 5483 bytes = ice_round_to_num(bytes, 1024); 5484 /* check rounding doesn't go beyond allowed */ 5485 if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY) 5486 bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY; 5487 /* The value is in k bytes */ 5488 burst_size_to_prog |= (u16)(bytes / 1024); 5489 } 5490 hw->max_burst_size = burst_size_to_prog; 5491 return ICE_SUCCESS; 5492 } 5493 5494 /** 5495 * ice_sched_replay_node_prio - re-configure node priority 5496 * @hw: pointer to the HW struct 5497 * @node: sched node to configure 5498 * @priority: priority value 5499 * 5500 * This function configures node element's priority value. It 5501 * needs to be called with scheduler lock held. 5502 */ 5503 static enum ice_status 5504 ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, 5505 u8 priority) 5506 { 5507 struct ice_aqc_txsched_elem_data buf; 5508 struct ice_aqc_txsched_elem *data; 5509 enum ice_status status; 5510 5511 buf = node->info; 5512 data = &buf.data; 5513 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; 5514 data->generic = priority; 5515 5516 /* Configure element */ 5517 status = ice_sched_update_elem(hw, node, &buf); 5518 return status; 5519 } 5520 5521 /** 5522 * ice_sched_replay_node_bw - replay node(s) BW 5523 * @hw: pointer to the HW struct 5524 * @node: sched node to configure 5525 * @bw_t_info: BW type information 5526 * 5527 * This function restores node's BW from bw_t_info. The caller needs 5528 * to hold the scheduler lock. 5529 */ 5530 static enum ice_status 5531 ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node, 5532 struct ice_bw_type_info *bw_t_info) 5533 { 5534 struct ice_port_info *pi = hw->port_info; 5535 enum ice_status status = ICE_ERR_PARAM; 5536 u16 bw_alloc; 5537 5538 if (!node) 5539 return status; 5540 if (!ice_is_any_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT)) 5541 return ICE_SUCCESS; 5542 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_PRIO)) { 5543 status = ice_sched_replay_node_prio(hw, node, 5544 bw_t_info->generic); 5545 if (status) 5546 return status; 5547 } 5548 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR)) { 5549 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, 5550 bw_t_info->cir_bw.bw); 5551 if (status) 5552 return status; 5553 } 5554 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR_WT)) { 5555 bw_alloc = bw_t_info->cir_bw.bw_alloc; 5556 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW, 5557 bw_alloc); 5558 if (status) 5559 return status; 5560 } 5561 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR)) { 5562 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, 5563 bw_t_info->eir_bw.bw); 5564 if (status) 5565 return status; 5566 } 5567 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR_WT)) { 5568 bw_alloc = bw_t_info->eir_bw.bw_alloc; 5569 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW, 5570 bw_alloc); 5571 if (status) 5572 return status; 5573 } 5574 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_SHARED)) 5575 status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW, 5576 bw_t_info->shared_bw); 5577 return status; 5578 } 5579 5580 /** 5581 * ice_sched_replay_agg_bw - replay aggregator node(s) BW 5582 * @hw: pointer to the HW struct 5583 * @agg_info: aggregator data structure 5584 * 5585 * This function re-creates aggregator type nodes. The caller needs to hold 5586 * the scheduler lock. 5587 */ 5588 static enum ice_status 5589 ice_sched_replay_agg_bw(struct ice_hw *hw, struct ice_sched_agg_info *agg_info) 5590 { 5591 struct ice_sched_node *tc_node, *agg_node; 5592 enum ice_status status = ICE_SUCCESS; 5593 u8 tc; 5594 5595 if (!agg_info) 5596 return ICE_ERR_PARAM; 5597 ice_for_each_traffic_class(tc) { 5598 if (!ice_is_any_bit_set(agg_info->bw_t_info[tc].bw_t_bitmap, 5599 ICE_BW_TYPE_CNT)) 5600 continue; 5601 tc_node = ice_sched_get_tc_node(hw->port_info, tc); 5602 if (!tc_node) { 5603 status = ICE_ERR_PARAM; 5604 break; 5605 } 5606 agg_node = ice_sched_get_agg_node(hw->port_info, tc_node, 5607 agg_info->agg_id); 5608 if (!agg_node) { 5609 status = ICE_ERR_PARAM; 5610 break; 5611 } 5612 status = ice_sched_replay_node_bw(hw, agg_node, 5613 &agg_info->bw_t_info[tc]); 5614 if (status) 5615 break; 5616 } 5617 return status; 5618 } 5619 5620 /** 5621 * ice_sched_get_ena_tc_bitmap - get enabled TC bitmap 5622 * @pi: port info struct 5623 * @tc_bitmap: 8 bits TC bitmap to check 5624 * @ena_tc_bitmap: 8 bits enabled TC bitmap to return 5625 * 5626 * This function returns enabled TC bitmap in variable ena_tc_bitmap. Some TCs 5627 * may be missing, it returns enabled TCs. This function needs to be called with 5628 * scheduler lock held. 5629 */ 5630 static void 5631 ice_sched_get_ena_tc_bitmap(struct ice_port_info *pi, ice_bitmap_t *tc_bitmap, 5632 ice_bitmap_t *ena_tc_bitmap) 5633 { 5634 u8 tc; 5635 5636 /* Some TC(s) may be missing after reset, adjust for replay */ 5637 ice_for_each_traffic_class(tc) 5638 if (ice_is_tc_ena(*tc_bitmap, tc) && 5639 (ice_sched_get_tc_node(pi, tc))) 5640 ice_set_bit(tc, ena_tc_bitmap); 5641 } 5642 5643 /** 5644 * ice_sched_replay_agg - recreate aggregator node(s) 5645 * @hw: pointer to the HW struct 5646 * 5647 * This function recreate aggregator type nodes which are not replayed earlier. 5648 * It also replay aggregator BW information. These aggregator nodes are not 5649 * associated with VSI type node yet. 5650 */ 5651 void ice_sched_replay_agg(struct ice_hw *hw) 5652 { 5653 struct ice_port_info *pi = hw->port_info; 5654 struct ice_sched_agg_info *agg_info; 5655 5656 ice_acquire_lock(&pi->sched_lock); 5657 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 5658 list_entry) 5659 /* replay aggregator (re-create aggregator node) */ 5660 if (!ice_cmp_bitmap(agg_info->tc_bitmap, 5661 agg_info->replay_tc_bitmap, 5662 ICE_MAX_TRAFFIC_CLASS)) { 5663 ice_declare_bitmap(replay_bitmap, 5664 ICE_MAX_TRAFFIC_CLASS); 5665 enum ice_status status; 5666 5667 ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 5668 ice_sched_get_ena_tc_bitmap(pi, 5669 agg_info->replay_tc_bitmap, 5670 replay_bitmap); 5671 status = ice_sched_cfg_agg(hw->port_info, 5672 agg_info->agg_id, 5673 ICE_AGG_TYPE_AGG, 5674 replay_bitmap); 5675 if (status) { 5676 ice_info(hw, "Replay agg id[%d] failed\n", 5677 agg_info->agg_id); 5678 /* Move on to next one */ 5679 continue; 5680 } 5681 /* Replay aggregator node BW (restore aggregator BW) */ 5682 status = ice_sched_replay_agg_bw(hw, agg_info); 5683 if (status) 5684 ice_info(hw, "Replay agg bw [id=%d] failed\n", 5685 agg_info->agg_id); 5686 } 5687 ice_release_lock(&pi->sched_lock); 5688 } 5689 5690 /** 5691 * ice_sched_replay_agg_vsi_preinit - Agg/VSI replay pre initialization 5692 * @hw: pointer to the HW struct 5693 * 5694 * This function initialize aggregator(s) TC bitmap to zero. A required 5695 * preinit step for replaying aggregators. 5696 */ 5697 void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw) 5698 { 5699 struct ice_port_info *pi = hw->port_info; 5700 struct ice_sched_agg_info *agg_info; 5701 5702 ice_acquire_lock(&pi->sched_lock); 5703 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, 5704 list_entry) { 5705 struct ice_sched_agg_vsi_info *agg_vsi_info; 5706 5707 agg_info->tc_bitmap[0] = 0; 5708 LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list, 5709 ice_sched_agg_vsi_info, list_entry) 5710 agg_vsi_info->tc_bitmap[0] = 0; 5711 } 5712 ice_release_lock(&pi->sched_lock); 5713 } 5714 5715 /** 5716 * ice_sched_replay_root_node_bw - replay root node BW 5717 * @pi: port information structure 5718 * 5719 * Replay root node BW settings. 5720 */ 5721 enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi) 5722 { 5723 enum ice_status status = ICE_SUCCESS; 5724 5725 if (!pi->hw) 5726 return ICE_ERR_PARAM; 5727 ice_acquire_lock(&pi->sched_lock); 5728 5729 status = ice_sched_replay_node_bw(pi->hw, pi->root, 5730 &pi->root_node_bw_t_info); 5731 ice_release_lock(&pi->sched_lock); 5732 return status; 5733 } 5734 5735 /** 5736 * ice_sched_replay_tc_node_bw - replay TC node(s) BW 5737 * @pi: port information structure 5738 * 5739 * This function replay TC nodes. 5740 */ 5741 enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi) 5742 { 5743 enum ice_status status = ICE_SUCCESS; 5744 u8 tc; 5745 5746 if (!pi->hw) 5747 return ICE_ERR_PARAM; 5748 ice_acquire_lock(&pi->sched_lock); 5749 ice_for_each_traffic_class(tc) { 5750 struct ice_sched_node *tc_node; 5751 5752 tc_node = ice_sched_get_tc_node(pi, tc); 5753 if (!tc_node) 5754 continue; /* TC not present */ 5755 status = ice_sched_replay_node_bw(pi->hw, tc_node, 5756 &pi->tc_node_bw_t_info[tc]); 5757 if (status) 5758 break; 5759 } 5760 ice_release_lock(&pi->sched_lock); 5761 return status; 5762 } 5763 5764 /** 5765 * ice_sched_replay_vsi_bw - replay VSI type node(s) BW 5766 * @hw: pointer to the HW struct 5767 * @vsi_handle: software VSI handle 5768 * @tc_bitmap: 8 bits TC bitmap 5769 * 5770 * This function replays VSI type nodes bandwidth. This function needs to be 5771 * called with scheduler lock held. 5772 */ 5773 static enum ice_status 5774 ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle, 5775 ice_bitmap_t *tc_bitmap) 5776 { 5777 struct ice_sched_node *vsi_node, *tc_node; 5778 struct ice_port_info *pi = hw->port_info; 5779 struct ice_bw_type_info *bw_t_info; 5780 struct ice_vsi_ctx *vsi_ctx; 5781 enum ice_status status = ICE_SUCCESS; 5782 u8 tc; 5783 5784 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 5785 if (!vsi_ctx) 5786 return ICE_ERR_PARAM; 5787 ice_for_each_traffic_class(tc) { 5788 if (!ice_is_tc_ena(*tc_bitmap, tc)) 5789 continue; 5790 tc_node = ice_sched_get_tc_node(pi, tc); 5791 if (!tc_node) 5792 continue; 5793 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 5794 if (!vsi_node) 5795 continue; 5796 bw_t_info = &vsi_ctx->sched.bw_t_info[tc]; 5797 status = ice_sched_replay_node_bw(hw, vsi_node, bw_t_info); 5798 if (status) 5799 break; 5800 } 5801 return status; 5802 } 5803 5804 /** 5805 * ice_sched_replay_vsi_agg - replay aggregator & VSI to aggregator node(s) 5806 * @hw: pointer to the HW struct 5807 * @vsi_handle: software VSI handle 5808 * 5809 * This function replays aggregator node, VSI to aggregator type nodes, and 5810 * their node bandwidth information. This function needs to be called with 5811 * scheduler lock held. 5812 */ 5813 static enum ice_status 5814 ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) 5815 { 5816 ice_declare_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 5817 struct ice_sched_agg_vsi_info *agg_vsi_info; 5818 struct ice_port_info *pi = hw->port_info; 5819 struct ice_sched_agg_info *agg_info; 5820 enum ice_status status; 5821 5822 ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 5823 if (!ice_is_vsi_valid(hw, vsi_handle)) 5824 return ICE_ERR_PARAM; 5825 agg_info = ice_get_vsi_agg_info(hw, vsi_handle); 5826 if (!agg_info) 5827 return ICE_SUCCESS; /* Not present in list - default Agg case */ 5828 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); 5829 if (!agg_vsi_info) 5830 return ICE_SUCCESS; /* Not present in list - default Agg case */ 5831 ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap, 5832 replay_bitmap); 5833 /* Replay aggregator node associated to vsi_handle */ 5834 status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id, 5835 ICE_AGG_TYPE_AGG, replay_bitmap); 5836 if (status) 5837 return status; 5838 /* Replay aggregator node BW (restore aggregator BW) */ 5839 status = ice_sched_replay_agg_bw(hw, agg_info); 5840 if (status) 5841 return status; 5842 5843 ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 5844 ice_sched_get_ena_tc_bitmap(pi, agg_vsi_info->replay_tc_bitmap, 5845 replay_bitmap); 5846 /* Move this VSI (vsi_handle) to above aggregator */ 5847 status = ice_sched_assoc_vsi_to_agg(pi, agg_info->agg_id, vsi_handle, 5848 replay_bitmap); 5849 if (status) 5850 return status; 5851 /* Replay VSI BW (restore VSI BW) */ 5852 return ice_sched_replay_vsi_bw(hw, vsi_handle, 5853 agg_vsi_info->tc_bitmap); 5854 } 5855 5856 /** 5857 * ice_replay_vsi_agg - replay VSI to aggregator node 5858 * @hw: pointer to the HW struct 5859 * @vsi_handle: software VSI handle 5860 * 5861 * This function replays association of VSI to aggregator type nodes, and 5862 * node bandwidth information. 5863 */ 5864 enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) 5865 { 5866 struct ice_port_info *pi = hw->port_info; 5867 enum ice_status status; 5868 5869 ice_acquire_lock(&pi->sched_lock); 5870 status = ice_sched_replay_vsi_agg(hw, vsi_handle); 5871 ice_release_lock(&pi->sched_lock); 5872 return status; 5873 } 5874 5875 /** 5876 * ice_sched_replay_q_bw - replay queue type node BW 5877 * @pi: port information structure 5878 * @q_ctx: queue context structure 5879 * 5880 * This function replays queue type node bandwidth. This function needs to be 5881 * called with scheduler lock held. 5882 */ 5883 enum ice_status 5884 ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx) 5885 { 5886 struct ice_sched_node *q_node; 5887 5888 /* Following also checks the presence of node in tree */ 5889 q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); 5890 if (!q_node) 5891 return ICE_ERR_PARAM; 5892 return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info); 5893 } 5894