1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include <net/devlink.h> 5 #include "ice_sched.h" 6 7 /** 8 * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB 9 * @pi: port information structure 10 * @info: Scheduler element information from firmware 11 * 12 * This function inserts the root node of the scheduling tree topology 13 * to the SW DB. 14 */ 15 static int 16 ice_sched_add_root_node(struct ice_port_info *pi, 17 struct ice_aqc_txsched_elem_data *info) 18 { 19 struct ice_sched_node *root; 20 struct ice_hw *hw; 21 22 if (!pi) 23 return -EINVAL; 24 25 hw = pi->hw; 26 27 root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL); 28 if (!root) 29 return -ENOMEM; 30 31 root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0], 32 sizeof(*root->children), GFP_KERNEL); 33 if (!root->children) { 34 devm_kfree(ice_hw_to_dev(hw), root); 35 return -ENOMEM; 36 } 37 38 memcpy(&root->info, info, sizeof(*info)); 39 pi->root = root; 40 return 0; 41 } 42 43 /** 44 * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB 45 * @start_node: pointer to the starting ice_sched_node struct in a sub-tree 46 * @teid: node TEID to search 47 * 48 * This function searches for a node matching the TEID in the scheduling tree 49 * from the SW DB. The search is recursive and is restricted by the number of 50 * layers it has searched through; stopping at the max supported layer. 51 * 52 * This function needs to be called when holding the port_info->sched_lock 53 */ 54 struct ice_sched_node * 55 ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) 56 { 57 u16 i; 58 59 /* The TEID is same as that of the start_node */ 60 if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid) 61 return start_node; 62 63 /* The node has no children or is at the max layer */ 64 if (!start_node->num_children || 65 start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM || 66 start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) 67 return NULL; 68 69 /* Check if TEID matches to any of the children nodes */ 70 for (i = 0; i < start_node->num_children; i++) 71 if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid) 72 return start_node->children[i]; 73 74 /* Search within each child's sub-tree */ 75 for (i = 0; i < start_node->num_children; i++) { 76 struct ice_sched_node *tmp; 77 78 tmp = ice_sched_find_node_by_teid(start_node->children[i], 79 teid); 80 if (tmp) 81 return tmp; 82 } 83 84 return NULL; 85 } 86 87 /** 88 * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd 89 * @hw: pointer to the HW struct 90 * @cmd_opc: cmd opcode 91 * @elems_req: number of elements to request 92 * @buf: pointer to buffer 93 * @buf_size: buffer size in bytes 94 * @elems_resp: returns total number of elements response 95 * @cd: pointer to command details structure or NULL 96 * 97 * This function sends a scheduling elements cmd (cmd_opc) 98 */ 99 static int 100 ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, 101 u16 elems_req, void *buf, u16 buf_size, 102 u16 *elems_resp, struct ice_sq_cd *cd) 103 { 104 struct ice_aqc_sched_elem_cmd *cmd; 105 struct ice_aq_desc desc; 106 int status; 107 108 cmd = &desc.params.sched_elem_cmd; 109 ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc); 110 cmd->num_elem_req = cpu_to_le16(elems_req); 111 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 112 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 113 if (!status && elems_resp) 114 *elems_resp = le16_to_cpu(cmd->num_elem_resp); 115 116 return status; 117 } 118 119 /** 120 * ice_aq_query_sched_elems - query scheduler elements 121 * @hw: pointer to the HW struct 122 * @elems_req: number of elements to query 123 * @buf: pointer to buffer 124 * @buf_size: buffer size in bytes 125 * @elems_ret: returns total number of elements returned 126 * @cd: pointer to command details structure or NULL 127 * 128 * Query scheduling elements (0x0404) 129 */ 130 int 131 ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, 132 struct ice_aqc_txsched_elem_data *buf, u16 buf_size, 133 u16 *elems_ret, struct ice_sq_cd *cd) 134 { 135 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems, 136 elems_req, (void *)buf, buf_size, 137 elems_ret, cd); 138 } 139 140 /** 141 * ice_sched_add_node - Insert the Tx scheduler node in SW DB 142 * @pi: port information structure 143 * @layer: Scheduler layer of the node 144 * @info: Scheduler element information from firmware 145 * @prealloc_node: preallocated ice_sched_node struct for SW DB 146 * 147 * This function inserts a scheduler node to the SW DB. 148 */ 149 int 150 ice_sched_add_node(struct ice_port_info *pi, u8 layer, 151 struct ice_aqc_txsched_elem_data *info, 152 struct ice_sched_node *prealloc_node) 153 { 154 struct ice_aqc_txsched_elem_data elem; 155 struct ice_sched_node *parent; 156 struct ice_sched_node *node; 157 struct ice_hw *hw; 158 int status; 159 160 if (!pi) 161 return -EINVAL; 162 163 hw = pi->hw; 164 165 /* A valid parent node should be there */ 166 parent = ice_sched_find_node_by_teid(pi->root, 167 le32_to_cpu(info->parent_teid)); 168 if (!parent) { 169 ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n", 170 le32_to_cpu(info->parent_teid)); 171 return -EINVAL; 172 } 173 174 /* query the current node information from FW before adding it 175 * to the SW DB 176 */ 177 status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem); 178 if (status) 179 return status; 180 181 if (prealloc_node) 182 node = prealloc_node; 183 else 184 node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL); 185 if (!node) 186 return -ENOMEM; 187 if (hw->max_children[layer]) { 188 node->children = devm_kcalloc(ice_hw_to_dev(hw), 189 hw->max_children[layer], 190 sizeof(*node->children), GFP_KERNEL); 191 if (!node->children) { 192 devm_kfree(ice_hw_to_dev(hw), node); 193 return -ENOMEM; 194 } 195 } 196 197 node->in_use = true; 198 node->parent = parent; 199 node->tx_sched_layer = layer; 200 parent->children[parent->num_children++] = node; 201 node->info = elem; 202 return 0; 203 } 204 205 /** 206 * ice_aq_delete_sched_elems - delete scheduler elements 207 * @hw: pointer to the HW struct 208 * @grps_req: number of groups to delete 209 * @buf: pointer to buffer 210 * @buf_size: buffer size in bytes 211 * @grps_del: returns total number of elements deleted 212 * @cd: pointer to command details structure or NULL 213 * 214 * Delete scheduling elements (0x040F) 215 */ 216 static int 217 ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, 218 struct ice_aqc_delete_elem *buf, u16 buf_size, 219 u16 *grps_del, struct ice_sq_cd *cd) 220 { 221 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems, 222 grps_req, (void *)buf, buf_size, 223 grps_del, cd); 224 } 225 226 /** 227 * ice_sched_remove_elems - remove nodes from HW 228 * @hw: pointer to the HW struct 229 * @parent: pointer to the parent node 230 * @node_teid: node teid to be deleted 231 * 232 * This function remove nodes from HW 233 */ 234 static int 235 ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, 236 u32 node_teid) 237 { 238 DEFINE_RAW_FLEX(struct ice_aqc_delete_elem, buf, teid, 1); 239 u16 buf_size = __struct_size(buf); 240 u16 num_groups_removed = 0; 241 int status; 242 243 buf->hdr.parent_teid = parent->info.node_teid; 244 buf->hdr.num_elems = cpu_to_le16(1); 245 buf->teid[0] = cpu_to_le32(node_teid); 246 247 status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size, 248 &num_groups_removed, NULL); 249 if (status || num_groups_removed != 1) 250 ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n", 251 hw->adminq.sq_last_status); 252 253 return status; 254 } 255 256 /** 257 * ice_sched_get_first_node - get the first node of the given layer 258 * @pi: port information structure 259 * @parent: pointer the base node of the subtree 260 * @layer: layer number 261 * 262 * This function retrieves the first node of the given layer from the subtree 263 */ 264 static struct ice_sched_node * 265 ice_sched_get_first_node(struct ice_port_info *pi, 266 struct ice_sched_node *parent, u8 layer) 267 { 268 return pi->sib_head[parent->tc_num][layer]; 269 } 270 271 /** 272 * ice_sched_get_tc_node - get pointer to TC node 273 * @pi: port information structure 274 * @tc: TC number 275 * 276 * This function returns the TC node pointer 277 */ 278 struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc) 279 { 280 u8 i; 281 282 if (!pi || !pi->root) 283 return NULL; 284 for (i = 0; i < pi->root->num_children; i++) 285 if (pi->root->children[i]->tc_num == tc) 286 return pi->root->children[i]; 287 return NULL; 288 } 289 290 /** 291 * ice_free_sched_node - Free a Tx scheduler node from SW DB 292 * @pi: port information structure 293 * @node: pointer to the ice_sched_node struct 294 * 295 * This function frees up a node from SW DB as well as from HW 296 * 297 * This function needs to be called with the port_info->sched_lock held 298 */ 299 void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) 300 { 301 struct ice_sched_node *parent; 302 struct ice_hw *hw = pi->hw; 303 u8 i, j; 304 305 /* Free the children before freeing up the parent node 306 * The parent array is updated below and that shifts the nodes 307 * in the array. So always pick the first child if num children > 0 308 */ 309 while (node->num_children) 310 ice_free_sched_node(pi, node->children[0]); 311 312 /* Leaf, TC and root nodes can't be deleted by SW */ 313 if (node->tx_sched_layer >= hw->sw_entry_point_layer && 314 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && 315 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT && 316 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) { 317 u32 teid = le32_to_cpu(node->info.node_teid); 318 319 ice_sched_remove_elems(hw, node->parent, teid); 320 } 321 parent = node->parent; 322 /* root has no parent */ 323 if (parent) { 324 struct ice_sched_node *p; 325 326 /* update the parent */ 327 for (i = 0; i < parent->num_children; i++) 328 if (parent->children[i] == node) { 329 for (j = i + 1; j < parent->num_children; j++) 330 parent->children[j - 1] = 331 parent->children[j]; 332 parent->num_children--; 333 break; 334 } 335 336 p = ice_sched_get_first_node(pi, node, node->tx_sched_layer); 337 while (p) { 338 if (p->sibling == node) { 339 p->sibling = node->sibling; 340 break; 341 } 342 p = p->sibling; 343 } 344 345 /* update the sibling head if head is getting removed */ 346 if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node) 347 pi->sib_head[node->tc_num][node->tx_sched_layer] = 348 node->sibling; 349 } 350 351 devm_kfree(ice_hw_to_dev(hw), node->children); 352 kfree(node->name); 353 xa_erase(&pi->sched_node_ids, node->id); 354 devm_kfree(ice_hw_to_dev(hw), node); 355 } 356 357 /** 358 * ice_aq_get_dflt_topo - gets default scheduler topology 359 * @hw: pointer to the HW struct 360 * @lport: logical port number 361 * @buf: pointer to buffer 362 * @buf_size: buffer size in bytes 363 * @num_branches: returns total number of queue to port branches 364 * @cd: pointer to command details structure or NULL 365 * 366 * Get default scheduler topology (0x400) 367 */ 368 static int 369 ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, 370 struct ice_aqc_get_topo_elem *buf, u16 buf_size, 371 u8 *num_branches, struct ice_sq_cd *cd) 372 { 373 struct ice_aqc_get_topo *cmd; 374 struct ice_aq_desc desc; 375 int status; 376 377 cmd = &desc.params.get_topo; 378 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo); 379 cmd->port_num = lport; 380 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 381 if (!status && num_branches) 382 *num_branches = cmd->num_branches; 383 384 return status; 385 } 386 387 /** 388 * ice_aq_add_sched_elems - adds scheduling element 389 * @hw: pointer to the HW struct 390 * @grps_req: the number of groups that are requested to be added 391 * @buf: pointer to buffer 392 * @buf_size: buffer size in bytes 393 * @grps_added: returns total number of groups added 394 * @cd: pointer to command details structure or NULL 395 * 396 * Add scheduling elements (0x0401) 397 */ 398 static int 399 ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, 400 struct ice_aqc_add_elem *buf, u16 buf_size, 401 u16 *grps_added, struct ice_sq_cd *cd) 402 { 403 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems, 404 grps_req, (void *)buf, buf_size, 405 grps_added, cd); 406 } 407 408 /** 409 * ice_aq_cfg_sched_elems - configures scheduler elements 410 * @hw: pointer to the HW struct 411 * @elems_req: number of elements to configure 412 * @buf: pointer to buffer 413 * @buf_size: buffer size in bytes 414 * @elems_cfgd: returns total number of elements configured 415 * @cd: pointer to command details structure or NULL 416 * 417 * Configure scheduling elements (0x0403) 418 */ 419 static int 420 ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, 421 struct ice_aqc_txsched_elem_data *buf, u16 buf_size, 422 u16 *elems_cfgd, struct ice_sq_cd *cd) 423 { 424 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems, 425 elems_req, (void *)buf, buf_size, 426 elems_cfgd, cd); 427 } 428 429 /** 430 * ice_aq_move_sched_elems - move scheduler element (just 1 group) 431 * @hw: pointer to the HW struct 432 * @buf: pointer to buffer 433 * @buf_size: buffer size in bytes 434 * @grps_movd: returns total number of groups moved 435 * 436 * Move scheduling elements (0x0408) 437 */ 438 int 439 ice_aq_move_sched_elems(struct ice_hw *hw, struct ice_aqc_move_elem *buf, 440 u16 buf_size, u16 *grps_movd) 441 { 442 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems, 443 1, buf, buf_size, grps_movd, NULL); 444 } 445 446 /** 447 * ice_aq_suspend_sched_elems - suspend scheduler elements 448 * @hw: pointer to the HW struct 449 * @elems_req: number of elements to suspend 450 * @buf: pointer to buffer 451 * @buf_size: buffer size in bytes 452 * @elems_ret: returns total number of elements suspended 453 * @cd: pointer to command details structure or NULL 454 * 455 * Suspend scheduling elements (0x0409) 456 */ 457 static int 458 ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, 459 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) 460 { 461 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems, 462 elems_req, (void *)buf, buf_size, 463 elems_ret, cd); 464 } 465 466 /** 467 * ice_aq_resume_sched_elems - resume scheduler elements 468 * @hw: pointer to the HW struct 469 * @elems_req: number of elements to resume 470 * @buf: pointer to buffer 471 * @buf_size: buffer size in bytes 472 * @elems_ret: returns total number of elements resumed 473 * @cd: pointer to command details structure or NULL 474 * 475 * resume scheduling elements (0x040A) 476 */ 477 static int 478 ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, 479 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) 480 { 481 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems, 482 elems_req, (void *)buf, buf_size, 483 elems_ret, cd); 484 } 485 486 /** 487 * ice_aq_query_sched_res - query scheduler resource 488 * @hw: pointer to the HW struct 489 * @buf_size: buffer size in bytes 490 * @buf: pointer to buffer 491 * @cd: pointer to command details structure or NULL 492 * 493 * Query scheduler resource allocation (0x0412) 494 */ 495 static int 496 ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, 497 struct ice_aqc_query_txsched_res_resp *buf, 498 struct ice_sq_cd *cd) 499 { 500 struct ice_aq_desc desc; 501 502 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res); 503 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 504 } 505 506 /** 507 * ice_sched_suspend_resume_elems - suspend or resume HW nodes 508 * @hw: pointer to the HW struct 509 * @num_nodes: number of nodes 510 * @node_teids: array of node teids to be suspended or resumed 511 * @suspend: true means suspend / false means resume 512 * 513 * This function suspends or resumes HW nodes 514 */ 515 int 516 ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, 517 bool suspend) 518 { 519 u16 i, buf_size, num_elem_ret = 0; 520 __le32 *buf; 521 int status; 522 523 buf_size = sizeof(*buf) * num_nodes; 524 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); 525 if (!buf) 526 return -ENOMEM; 527 528 for (i = 0; i < num_nodes; i++) 529 buf[i] = cpu_to_le32(node_teids[i]); 530 531 if (suspend) 532 status = ice_aq_suspend_sched_elems(hw, num_nodes, buf, 533 buf_size, &num_elem_ret, 534 NULL); 535 else 536 status = ice_aq_resume_sched_elems(hw, num_nodes, buf, 537 buf_size, &num_elem_ret, 538 NULL); 539 if (status || num_elem_ret != num_nodes) 540 ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n"); 541 542 devm_kfree(ice_hw_to_dev(hw), buf); 543 return status; 544 } 545 546 /** 547 * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC 548 * @hw: pointer to the HW struct 549 * @vsi_handle: VSI handle 550 * @tc: TC number 551 * @new_numqs: number of queues 552 */ 553 static int 554 ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) 555 { 556 struct ice_vsi_ctx *vsi_ctx; 557 struct ice_q_ctx *q_ctx; 558 u16 idx; 559 560 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 561 if (!vsi_ctx) 562 return -EINVAL; 563 /* allocate LAN queue contexts */ 564 if (!vsi_ctx->lan_q_ctx[tc]) { 565 q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, 566 sizeof(*q_ctx), GFP_KERNEL); 567 if (!q_ctx) 568 return -ENOMEM; 569 570 for (idx = 0; idx < new_numqs; idx++) { 571 q_ctx[idx].q_handle = ICE_INVAL_Q_HANDLE; 572 q_ctx[idx].q_teid = ICE_INVAL_TEID; 573 } 574 575 vsi_ctx->lan_q_ctx[tc] = q_ctx; 576 vsi_ctx->num_lan_q_entries[tc] = new_numqs; 577 return 0; 578 } 579 /* num queues are increased, update the queue contexts */ 580 if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) { 581 u16 prev_num = vsi_ctx->num_lan_q_entries[tc]; 582 583 q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, 584 sizeof(*q_ctx), GFP_KERNEL); 585 if (!q_ctx) 586 return -ENOMEM; 587 588 memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], 589 prev_num * sizeof(*q_ctx)); 590 devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]); 591 592 for (idx = prev_num; idx < new_numqs; idx++) { 593 q_ctx[idx].q_handle = ICE_INVAL_Q_HANDLE; 594 q_ctx[idx].q_teid = ICE_INVAL_TEID; 595 } 596 597 vsi_ctx->lan_q_ctx[tc] = q_ctx; 598 vsi_ctx->num_lan_q_entries[tc] = new_numqs; 599 } 600 return 0; 601 } 602 603 /** 604 * ice_alloc_rdma_q_ctx - allocate RDMA queue contexts for the given VSI and TC 605 * @hw: pointer to the HW struct 606 * @vsi_handle: VSI handle 607 * @tc: TC number 608 * @new_numqs: number of queues 609 */ 610 static int 611 ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) 612 { 613 struct ice_vsi_ctx *vsi_ctx; 614 struct ice_q_ctx *q_ctx; 615 616 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 617 if (!vsi_ctx) 618 return -EINVAL; 619 /* allocate RDMA queue contexts */ 620 if (!vsi_ctx->rdma_q_ctx[tc]) { 621 vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), 622 new_numqs, 623 sizeof(*q_ctx), 624 GFP_KERNEL); 625 if (!vsi_ctx->rdma_q_ctx[tc]) 626 return -ENOMEM; 627 vsi_ctx->num_rdma_q_entries[tc] = new_numqs; 628 return 0; 629 } 630 /* num queues are increased, update the queue contexts */ 631 if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) { 632 u16 prev_num = vsi_ctx->num_rdma_q_entries[tc]; 633 634 q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, 635 sizeof(*q_ctx), GFP_KERNEL); 636 if (!q_ctx) 637 return -ENOMEM; 638 memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc], 639 prev_num * sizeof(*q_ctx)); 640 devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]); 641 vsi_ctx->rdma_q_ctx[tc] = q_ctx; 642 vsi_ctx->num_rdma_q_entries[tc] = new_numqs; 643 } 644 return 0; 645 } 646 647 /** 648 * ice_aq_rl_profile - performs a rate limiting task 649 * @hw: pointer to the HW struct 650 * @opcode: opcode for add, query, or remove profile(s) 651 * @num_profiles: the number of profiles 652 * @buf: pointer to buffer 653 * @buf_size: buffer size in bytes 654 * @num_processed: number of processed add or remove profile(s) to return 655 * @cd: pointer to command details structure 656 * 657 * RL profile function to add, query, or remove profile(s) 658 */ 659 static int 660 ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, 661 u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, 662 u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd) 663 { 664 struct ice_aqc_rl_profile *cmd; 665 struct ice_aq_desc desc; 666 int status; 667 668 cmd = &desc.params.rl_profile; 669 670 ice_fill_dflt_direct_cmd_desc(&desc, opcode); 671 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 672 cmd->num_profiles = cpu_to_le16(num_profiles); 673 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 674 if (!status && num_processed) 675 *num_processed = le16_to_cpu(cmd->num_processed); 676 return status; 677 } 678 679 /** 680 * ice_aq_add_rl_profile - adds rate limiting profile(s) 681 * @hw: pointer to the HW struct 682 * @num_profiles: the number of profile(s) to be add 683 * @buf: pointer to buffer 684 * @buf_size: buffer size in bytes 685 * @num_profiles_added: total number of profiles added to return 686 * @cd: pointer to command details structure 687 * 688 * Add RL profile (0x0410) 689 */ 690 static int 691 ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, 692 struct ice_aqc_rl_profile_elem *buf, u16 buf_size, 693 u16 *num_profiles_added, struct ice_sq_cd *cd) 694 { 695 return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles, 696 buf, buf_size, num_profiles_added, cd); 697 } 698 699 /** 700 * ice_aq_remove_rl_profile - removes RL profile(s) 701 * @hw: pointer to the HW struct 702 * @num_profiles: the number of profile(s) to remove 703 * @buf: pointer to buffer 704 * @buf_size: buffer size in bytes 705 * @num_profiles_removed: total number of profiles removed to return 706 * @cd: pointer to command details structure or NULL 707 * 708 * Remove RL profile (0x0415) 709 */ 710 static int 711 ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, 712 struct ice_aqc_rl_profile_elem *buf, u16 buf_size, 713 u16 *num_profiles_removed, struct ice_sq_cd *cd) 714 { 715 return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles, 716 num_profiles, buf, buf_size, 717 num_profiles_removed, cd); 718 } 719 720 /** 721 * ice_sched_del_rl_profile - remove RL profile 722 * @hw: pointer to the HW struct 723 * @rl_info: rate limit profile information 724 * 725 * If the profile ID is not referenced anymore, it removes profile ID with 726 * its associated parameters from HW DB,and locally. The caller needs to 727 * hold scheduler lock. 728 */ 729 static int 730 ice_sched_del_rl_profile(struct ice_hw *hw, 731 struct ice_aqc_rl_profile_info *rl_info) 732 { 733 struct ice_aqc_rl_profile_elem *buf; 734 u16 num_profiles_removed; 735 u16 num_profiles = 1; 736 int status; 737 738 if (rl_info->prof_id_ref != 0) 739 return -EBUSY; 740 741 /* Safe to remove profile ID */ 742 buf = &rl_info->profile; 743 status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf), 744 &num_profiles_removed, NULL); 745 if (status || num_profiles_removed != num_profiles) 746 return -EIO; 747 748 /* Delete stale entry now */ 749 list_del(&rl_info->list_entry); 750 devm_kfree(ice_hw_to_dev(hw), rl_info); 751 return status; 752 } 753 754 /** 755 * ice_sched_clear_rl_prof - clears RL prof entries 756 * @pi: port information structure 757 * 758 * This function removes all RL profile from HW as well as from SW DB. 759 */ 760 static void ice_sched_clear_rl_prof(struct ice_port_info *pi) 761 { 762 u16 ln; 763 764 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) { 765 struct ice_aqc_rl_profile_info *rl_prof_elem; 766 struct ice_aqc_rl_profile_info *rl_prof_tmp; 767 768 list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp, 769 &pi->rl_prof_list[ln], list_entry) { 770 struct ice_hw *hw = pi->hw; 771 int status; 772 773 rl_prof_elem->prof_id_ref = 0; 774 status = ice_sched_del_rl_profile(hw, rl_prof_elem); 775 if (status) { 776 ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); 777 /* On error, free mem required */ 778 list_del(&rl_prof_elem->list_entry); 779 devm_kfree(ice_hw_to_dev(hw), rl_prof_elem); 780 } 781 } 782 } 783 } 784 785 /** 786 * ice_sched_clear_agg - clears the aggregator related information 787 * @hw: pointer to the hardware structure 788 * 789 * This function removes aggregator list and free up aggregator related memory 790 * previously allocated. 791 */ 792 void ice_sched_clear_agg(struct ice_hw *hw) 793 { 794 struct ice_sched_agg_info *agg_info; 795 struct ice_sched_agg_info *atmp; 796 797 list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) { 798 struct ice_sched_agg_vsi_info *agg_vsi_info; 799 struct ice_sched_agg_vsi_info *vtmp; 800 801 list_for_each_entry_safe(agg_vsi_info, vtmp, 802 &agg_info->agg_vsi_list, list_entry) { 803 list_del(&agg_vsi_info->list_entry); 804 devm_kfree(ice_hw_to_dev(hw), agg_vsi_info); 805 } 806 list_del(&agg_info->list_entry); 807 devm_kfree(ice_hw_to_dev(hw), agg_info); 808 } 809 } 810 811 /** 812 * ice_sched_clear_tx_topo - clears the scheduler tree nodes 813 * @pi: port information structure 814 * 815 * This function removes all the nodes from HW as well as from SW DB. 816 */ 817 static void ice_sched_clear_tx_topo(struct ice_port_info *pi) 818 { 819 if (!pi) 820 return; 821 /* remove RL profiles related lists */ 822 ice_sched_clear_rl_prof(pi); 823 if (pi->root) { 824 ice_free_sched_node(pi, pi->root); 825 pi->root = NULL; 826 } 827 } 828 829 /** 830 * ice_sched_clear_port - clear the scheduler elements from SW DB for a port 831 * @pi: port information structure 832 * 833 * Cleanup scheduling elements from SW DB 834 */ 835 void ice_sched_clear_port(struct ice_port_info *pi) 836 { 837 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 838 return; 839 840 pi->port_state = ICE_SCHED_PORT_STATE_INIT; 841 mutex_lock(&pi->sched_lock); 842 ice_sched_clear_tx_topo(pi); 843 mutex_unlock(&pi->sched_lock); 844 mutex_destroy(&pi->sched_lock); 845 } 846 847 /** 848 * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports 849 * @hw: pointer to the HW struct 850 * 851 * Cleanup scheduling elements from SW DB for all the ports 852 */ 853 void ice_sched_cleanup_all(struct ice_hw *hw) 854 { 855 if (!hw) 856 return; 857 858 devm_kfree(ice_hw_to_dev(hw), hw->layer_info); 859 hw->layer_info = NULL; 860 861 ice_sched_clear_port(hw->port_info); 862 863 hw->num_tx_sched_layers = 0; 864 hw->num_tx_sched_phys_layers = 0; 865 hw->flattened_layers = 0; 866 hw->max_cgds = 0; 867 } 868 869 /** 870 * ice_sched_add_elems - add nodes to HW and SW DB 871 * @pi: port information structure 872 * @tc_node: pointer to the branch node 873 * @parent: pointer to the parent node 874 * @layer: layer number to add nodes 875 * @num_nodes: number of nodes 876 * @num_nodes_added: pointer to num nodes added 877 * @first_node_teid: if new nodes are added then return the TEID of first node 878 * @prealloc_nodes: preallocated nodes struct for software DB 879 * 880 * This function add nodes to HW as well as to SW DB for a given layer 881 */ 882 int 883 ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, 884 struct ice_sched_node *parent, u8 layer, u16 num_nodes, 885 u16 *num_nodes_added, u32 *first_node_teid, 886 struct ice_sched_node **prealloc_nodes) 887 { 888 struct ice_sched_node *prev, *new_node; 889 struct ice_aqc_add_elem *buf; 890 u16 i, num_groups_added = 0; 891 struct ice_hw *hw = pi->hw; 892 size_t buf_size; 893 int status = 0; 894 u32 teid; 895 896 buf_size = struct_size(buf, generic, num_nodes); 897 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); 898 if (!buf) 899 return -ENOMEM; 900 901 buf->hdr.parent_teid = parent->info.node_teid; 902 buf->hdr.num_elems = cpu_to_le16(num_nodes); 903 for (i = 0; i < num_nodes; i++) { 904 buf->generic[i].parent_teid = parent->info.node_teid; 905 buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC; 906 buf->generic[i].data.valid_sections = 907 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 908 ICE_AQC_ELEM_VALID_EIR; 909 buf->generic[i].data.generic = 0; 910 buf->generic[i].data.cir_bw.bw_profile_idx = 911 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 912 buf->generic[i].data.cir_bw.bw_alloc = 913 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 914 buf->generic[i].data.eir_bw.bw_profile_idx = 915 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 916 buf->generic[i].data.eir_bw.bw_alloc = 917 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 918 } 919 920 status = ice_aq_add_sched_elems(hw, 1, buf, buf_size, 921 &num_groups_added, NULL); 922 if (status || num_groups_added != 1) { 923 ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n", 924 hw->adminq.sq_last_status); 925 devm_kfree(ice_hw_to_dev(hw), buf); 926 return -EIO; 927 } 928 929 *num_nodes_added = num_nodes; 930 /* add nodes to the SW DB */ 931 for (i = 0; i < num_nodes; i++) { 932 if (prealloc_nodes) 933 status = ice_sched_add_node(pi, layer, &buf->generic[i], prealloc_nodes[i]); 934 else 935 status = ice_sched_add_node(pi, layer, &buf->generic[i], NULL); 936 937 if (status) { 938 ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n", 939 status); 940 break; 941 } 942 943 teid = le32_to_cpu(buf->generic[i].node_teid); 944 new_node = ice_sched_find_node_by_teid(parent, teid); 945 if (!new_node) { 946 ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid); 947 break; 948 } 949 950 new_node->sibling = NULL; 951 new_node->tc_num = tc_node->tc_num; 952 new_node->tx_weight = ICE_SCHED_DFLT_BW_WT; 953 new_node->tx_share = ICE_SCHED_DFLT_BW; 954 new_node->tx_max = ICE_SCHED_DFLT_BW; 955 new_node->name = kzalloc(SCHED_NODE_NAME_MAX_LEN, GFP_KERNEL); 956 if (!new_node->name) 957 return -ENOMEM; 958 959 status = xa_alloc(&pi->sched_node_ids, &new_node->id, NULL, XA_LIMIT(0, UINT_MAX), 960 GFP_KERNEL); 961 if (status) { 962 ice_debug(hw, ICE_DBG_SCHED, "xa_alloc failed for sched node status =%d\n", 963 status); 964 break; 965 } 966 967 snprintf(new_node->name, SCHED_NODE_NAME_MAX_LEN, "node_%u", new_node->id); 968 969 /* add it to previous node sibling pointer */ 970 /* Note: siblings are not linked across branches */ 971 prev = ice_sched_get_first_node(pi, tc_node, layer); 972 if (prev && prev != new_node) { 973 while (prev->sibling) 974 prev = prev->sibling; 975 prev->sibling = new_node; 976 } 977 978 /* initialize the sibling head */ 979 if (!pi->sib_head[tc_node->tc_num][layer]) 980 pi->sib_head[tc_node->tc_num][layer] = new_node; 981 982 if (i == 0) 983 *first_node_teid = teid; 984 } 985 986 devm_kfree(ice_hw_to_dev(hw), buf); 987 return status; 988 } 989 990 /** 991 * ice_sched_add_nodes_to_hw_layer - Add nodes to HW layer 992 * @pi: port information structure 993 * @tc_node: pointer to TC node 994 * @parent: pointer to parent node 995 * @layer: layer number to add nodes 996 * @num_nodes: number of nodes to be added 997 * @first_node_teid: pointer to the first node TEID 998 * @num_nodes_added: pointer to number of nodes added 999 * 1000 * Add nodes into specific HW layer. 1001 */ 1002 static int 1003 ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, 1004 struct ice_sched_node *tc_node, 1005 struct ice_sched_node *parent, u8 layer, 1006 u16 num_nodes, u32 *first_node_teid, 1007 u16 *num_nodes_added) 1008 { 1009 u16 max_child_nodes; 1010 1011 *num_nodes_added = 0; 1012 1013 if (!num_nodes) 1014 return 0; 1015 1016 if (!parent || layer < pi->hw->sw_entry_point_layer) 1017 return -EINVAL; 1018 1019 /* max children per node per layer */ 1020 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; 1021 1022 /* current number of children + required nodes exceed max children */ 1023 if ((parent->num_children + num_nodes) > max_child_nodes) { 1024 /* Fail if the parent is a TC node */ 1025 if (parent == tc_node) 1026 return -EIO; 1027 return -ENOSPC; 1028 } 1029 1030 return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, 1031 num_nodes_added, first_node_teid, NULL); 1032 } 1033 1034 /** 1035 * ice_sched_add_nodes_to_layer - Add nodes to a given layer 1036 * @pi: port information structure 1037 * @tc_node: pointer to TC node 1038 * @parent: pointer to parent node 1039 * @layer: layer number to add nodes 1040 * @num_nodes: number of nodes to be added 1041 * @first_node_teid: pointer to the first node TEID 1042 * @num_nodes_added: pointer to number of nodes added 1043 * 1044 * This function add nodes to a given layer. 1045 */ 1046 int 1047 ice_sched_add_nodes_to_layer(struct ice_port_info *pi, 1048 struct ice_sched_node *tc_node, 1049 struct ice_sched_node *parent, u8 layer, 1050 u16 num_nodes, u32 *first_node_teid, 1051 u16 *num_nodes_added) 1052 { 1053 u32 *first_teid_ptr = first_node_teid; 1054 u16 new_num_nodes = num_nodes; 1055 int status = 0; 1056 1057 *num_nodes_added = 0; 1058 while (*num_nodes_added < num_nodes) { 1059 u16 max_child_nodes, num_added = 0; 1060 u32 temp; 1061 1062 status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent, 1063 layer, new_num_nodes, 1064 first_teid_ptr, 1065 &num_added); 1066 if (!status) 1067 *num_nodes_added += num_added; 1068 /* added more nodes than requested ? */ 1069 if (*num_nodes_added > num_nodes) { 1070 ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes, 1071 *num_nodes_added); 1072 status = -EIO; 1073 break; 1074 } 1075 /* break if all the nodes are added successfully */ 1076 if (!status && (*num_nodes_added == num_nodes)) 1077 break; 1078 /* break if the error is not max limit */ 1079 if (status && status != -ENOSPC) 1080 break; 1081 /* Exceeded the max children */ 1082 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; 1083 /* utilize all the spaces if the parent is not full */ 1084 if (parent->num_children < max_child_nodes) { 1085 new_num_nodes = max_child_nodes - parent->num_children; 1086 } else { 1087 /* This parent is full, try the next sibling */ 1088 parent = parent->sibling; 1089 /* Don't modify the first node TEID memory if the 1090 * first node was added already in the above call. 1091 * Instead send some temp memory for all other 1092 * recursive calls. 1093 */ 1094 if (num_added) 1095 first_teid_ptr = &temp; 1096 1097 new_num_nodes = num_nodes - *num_nodes_added; 1098 } 1099 } 1100 return status; 1101 } 1102 1103 /** 1104 * ice_sched_get_qgrp_layer - get the current queue group layer number 1105 * @hw: pointer to the HW struct 1106 * 1107 * This function returns the current queue group layer number 1108 */ 1109 static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw) 1110 { 1111 /* It's always total layers - 1, the array is 0 relative so -2 */ 1112 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET; 1113 } 1114 1115 /** 1116 * ice_sched_get_vsi_layer - get the current VSI layer number 1117 * @hw: pointer to the HW struct 1118 * 1119 * This function returns the current VSI layer number 1120 */ 1121 u8 ice_sched_get_vsi_layer(struct ice_hw *hw) 1122 { 1123 /* Num Layers VSI layer 1124 * 9 6 1125 * 7 4 1126 * 5 or less sw_entry_point_layer 1127 */ 1128 /* calculate the VSI layer based on number of layers. */ 1129 if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) 1130 return hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; 1131 else if (hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) 1132 /* qgroup and VSI layers are same */ 1133 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET; 1134 return hw->sw_entry_point_layer; 1135 } 1136 1137 /** 1138 * ice_sched_get_agg_layer - get the current aggregator layer number 1139 * @hw: pointer to the HW struct 1140 * 1141 * This function returns the current aggregator layer number 1142 */ 1143 u8 ice_sched_get_agg_layer(struct ice_hw *hw) 1144 { 1145 /* Num Layers aggregator layer 1146 * 9 4 1147 * 7 or less sw_entry_point_layer 1148 */ 1149 /* calculate the aggregator layer based on number of layers. */ 1150 if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) 1151 return hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET; 1152 else 1153 return hw->sw_entry_point_layer; 1154 } 1155 1156 /** 1157 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree 1158 * @pi: port information structure 1159 * 1160 * This function removes the leaf node that was created by the FW 1161 * during initialization 1162 */ 1163 static void ice_rm_dflt_leaf_node(struct ice_port_info *pi) 1164 { 1165 struct ice_sched_node *node; 1166 1167 node = pi->root; 1168 while (node) { 1169 if (!node->num_children) 1170 break; 1171 node = node->children[0]; 1172 } 1173 if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) { 1174 u32 teid = le32_to_cpu(node->info.node_teid); 1175 int status; 1176 1177 /* remove the default leaf node */ 1178 status = ice_sched_remove_elems(pi->hw, node->parent, teid); 1179 if (!status) 1180 ice_free_sched_node(pi, node); 1181 } 1182 } 1183 1184 /** 1185 * ice_sched_rm_dflt_nodes - free the default nodes in the tree 1186 * @pi: port information structure 1187 * 1188 * This function frees all the nodes except root and TC that were created by 1189 * the FW during initialization 1190 */ 1191 static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi) 1192 { 1193 struct ice_sched_node *node; 1194 1195 ice_rm_dflt_leaf_node(pi); 1196 1197 /* remove the default nodes except TC and root nodes */ 1198 node = pi->root; 1199 while (node) { 1200 if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer && 1201 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && 1202 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) { 1203 ice_free_sched_node(pi, node); 1204 break; 1205 } 1206 1207 if (!node->num_children) 1208 break; 1209 node = node->children[0]; 1210 } 1211 } 1212 1213 /** 1214 * ice_sched_init_port - Initialize scheduler by querying information from FW 1215 * @pi: port info structure for the tree to cleanup 1216 * 1217 * This function is the initial call to find the total number of Tx scheduler 1218 * resources, default topology created by firmware and storing the information 1219 * in SW DB. 1220 */ 1221 int ice_sched_init_port(struct ice_port_info *pi) 1222 { 1223 struct ice_aqc_get_topo_elem *buf; 1224 struct ice_hw *hw; 1225 u8 num_branches; 1226 u16 num_elems; 1227 int status; 1228 u8 i, j; 1229 1230 if (!pi) 1231 return -EINVAL; 1232 hw = pi->hw; 1233 1234 /* Query the Default Topology from FW */ 1235 buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 1236 if (!buf) 1237 return -ENOMEM; 1238 1239 /* Query default scheduling tree topology */ 1240 status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN, 1241 &num_branches, NULL); 1242 if (status) 1243 goto err_init_port; 1244 1245 /* num_branches should be between 1-8 */ 1246 if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) { 1247 ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n", 1248 num_branches); 1249 status = -EINVAL; 1250 goto err_init_port; 1251 } 1252 1253 /* get the number of elements on the default/first branch */ 1254 num_elems = le16_to_cpu(buf[0].hdr.num_elems); 1255 1256 /* num_elems should always be between 1-9 */ 1257 if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) { 1258 ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n", 1259 num_elems); 1260 status = -EINVAL; 1261 goto err_init_port; 1262 } 1263 1264 /* If the last node is a leaf node then the index of the queue group 1265 * layer is two less than the number of elements. 1266 */ 1267 if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type == 1268 ICE_AQC_ELEM_TYPE_LEAF) 1269 pi->last_node_teid = 1270 le32_to_cpu(buf[0].generic[num_elems - 2].node_teid); 1271 else 1272 pi->last_node_teid = 1273 le32_to_cpu(buf[0].generic[num_elems - 1].node_teid); 1274 1275 /* Insert the Tx Sched root node */ 1276 status = ice_sched_add_root_node(pi, &buf[0].generic[0]); 1277 if (status) 1278 goto err_init_port; 1279 1280 /* Parse the default tree and cache the information */ 1281 for (i = 0; i < num_branches; i++) { 1282 num_elems = le16_to_cpu(buf[i].hdr.num_elems); 1283 1284 /* Skip root element as already inserted */ 1285 for (j = 1; j < num_elems; j++) { 1286 /* update the sw entry point */ 1287 if (buf[0].generic[j].data.elem_type == 1288 ICE_AQC_ELEM_TYPE_ENTRY_POINT) 1289 hw->sw_entry_point_layer = j; 1290 1291 status = ice_sched_add_node(pi, j, &buf[i].generic[j], NULL); 1292 if (status) 1293 goto err_init_port; 1294 } 1295 } 1296 1297 /* Remove the default nodes. */ 1298 if (pi->root) 1299 ice_sched_rm_dflt_nodes(pi); 1300 1301 /* initialize the port for handling the scheduler tree */ 1302 pi->port_state = ICE_SCHED_PORT_STATE_READY; 1303 mutex_init(&pi->sched_lock); 1304 for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) 1305 INIT_LIST_HEAD(&pi->rl_prof_list[i]); 1306 1307 err_init_port: 1308 if (status && pi->root) { 1309 ice_free_sched_node(pi, pi->root); 1310 pi->root = NULL; 1311 } 1312 1313 kfree(buf); 1314 return status; 1315 } 1316 1317 /** 1318 * ice_sched_query_res_alloc - query the FW for num of logical sched layers 1319 * @hw: pointer to the HW struct 1320 * 1321 * query FW for allocated scheduler resources and store in HW struct 1322 */ 1323 int ice_sched_query_res_alloc(struct ice_hw *hw) 1324 { 1325 struct ice_aqc_query_txsched_res_resp *buf; 1326 __le16 max_sibl; 1327 int status = 0; 1328 u16 i; 1329 1330 if (hw->layer_info) 1331 return status; 1332 1333 buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL); 1334 if (!buf) 1335 return -ENOMEM; 1336 1337 status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL); 1338 if (status) 1339 goto sched_query_out; 1340 1341 hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels); 1342 hw->num_tx_sched_phys_layers = 1343 le16_to_cpu(buf->sched_props.phys_levels); 1344 hw->flattened_layers = buf->sched_props.flattening_bitmap; 1345 hw->max_cgds = buf->sched_props.max_pf_cgds; 1346 1347 /* max sibling group size of current layer refers to the max children 1348 * of the below layer node. 1349 * layer 1 node max children will be layer 2 max sibling group size 1350 * layer 2 node max children will be layer 3 max sibling group size 1351 * and so on. This array will be populated from root (index 0) to 1352 * qgroup layer 7. Leaf node has no children. 1353 */ 1354 for (i = 0; i < hw->num_tx_sched_layers - 1; i++) { 1355 max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz; 1356 hw->max_children[i] = le16_to_cpu(max_sibl); 1357 } 1358 1359 hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props, 1360 (hw->num_tx_sched_layers * 1361 sizeof(*hw->layer_info)), 1362 GFP_KERNEL); 1363 if (!hw->layer_info) { 1364 status = -ENOMEM; 1365 goto sched_query_out; 1366 } 1367 1368 sched_query_out: 1369 devm_kfree(ice_hw_to_dev(hw), buf); 1370 return status; 1371 } 1372 1373 /** 1374 * ice_sched_get_psm_clk_freq - determine the PSM clock frequency 1375 * @hw: pointer to the HW struct 1376 * 1377 * Determine the PSM clock frequency and store in HW struct 1378 */ 1379 void ice_sched_get_psm_clk_freq(struct ice_hw *hw) 1380 { 1381 u32 val, clk_src; 1382 1383 val = rd32(hw, GLGEN_CLKSTAT_SRC); 1384 clk_src = FIELD_GET(GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M, val); 1385 1386 #define PSM_CLK_SRC_367_MHZ 0x0 1387 #define PSM_CLK_SRC_416_MHZ 0x1 1388 #define PSM_CLK_SRC_446_MHZ 0x2 1389 #define PSM_CLK_SRC_390_MHZ 0x3 1390 1391 switch (clk_src) { 1392 case PSM_CLK_SRC_367_MHZ: 1393 hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ; 1394 break; 1395 case PSM_CLK_SRC_416_MHZ: 1396 hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ; 1397 break; 1398 case PSM_CLK_SRC_446_MHZ: 1399 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ; 1400 break; 1401 case PSM_CLK_SRC_390_MHZ: 1402 hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ; 1403 break; 1404 default: 1405 ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n", 1406 clk_src); 1407 /* fall back to a safe default */ 1408 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ; 1409 } 1410 } 1411 1412 /** 1413 * ice_sched_find_node_in_subtree - Find node in part of base node subtree 1414 * @hw: pointer to the HW struct 1415 * @base: pointer to the base node 1416 * @node: pointer to the node to search 1417 * 1418 * This function checks whether a given node is part of the base node 1419 * subtree or not 1420 */ 1421 static bool 1422 ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base, 1423 struct ice_sched_node *node) 1424 { 1425 u8 i; 1426 1427 for (i = 0; i < base->num_children; i++) { 1428 struct ice_sched_node *child = base->children[i]; 1429 1430 if (node == child) 1431 return true; 1432 1433 if (child->tx_sched_layer > node->tx_sched_layer) 1434 return false; 1435 1436 /* this recursion is intentional, and wouldn't 1437 * go more than 8 calls 1438 */ 1439 if (ice_sched_find_node_in_subtree(hw, child, node)) 1440 return true; 1441 } 1442 return false; 1443 } 1444 1445 /** 1446 * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node 1447 * @pi: port information structure 1448 * @vsi_node: software VSI handle 1449 * @qgrp_node: first queue group node identified for scanning 1450 * @owner: LAN or RDMA 1451 * 1452 * This function retrieves a free LAN or RDMA queue group node by scanning 1453 * qgrp_node and its siblings for the queue group with the fewest number 1454 * of queues currently assigned. 1455 */ 1456 static struct ice_sched_node * 1457 ice_sched_get_free_qgrp(struct ice_port_info *pi, 1458 struct ice_sched_node *vsi_node, 1459 struct ice_sched_node *qgrp_node, u8 owner) 1460 { 1461 struct ice_sched_node *min_qgrp; 1462 u8 min_children; 1463 1464 if (!qgrp_node) 1465 return qgrp_node; 1466 min_children = qgrp_node->num_children; 1467 if (!min_children) 1468 return qgrp_node; 1469 min_qgrp = qgrp_node; 1470 /* scan all queue groups until find a node which has less than the 1471 * minimum number of children. This way all queue group nodes get 1472 * equal number of shares and active. The bandwidth will be equally 1473 * distributed across all queues. 1474 */ 1475 while (qgrp_node) { 1476 /* make sure the qgroup node is part of the VSI subtree */ 1477 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) 1478 if (qgrp_node->num_children < min_children && 1479 qgrp_node->owner == owner) { 1480 /* replace the new min queue group node */ 1481 min_qgrp = qgrp_node; 1482 min_children = min_qgrp->num_children; 1483 /* break if it has no children, */ 1484 if (!min_children) 1485 break; 1486 } 1487 qgrp_node = qgrp_node->sibling; 1488 } 1489 return min_qgrp; 1490 } 1491 1492 /** 1493 * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node 1494 * @pi: port information structure 1495 * @vsi_handle: software VSI handle 1496 * @tc: branch number 1497 * @owner: LAN or RDMA 1498 * 1499 * This function retrieves a free LAN or RDMA queue group node 1500 */ 1501 struct ice_sched_node * 1502 ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 1503 u8 owner) 1504 { 1505 struct ice_sched_node *vsi_node, *qgrp_node; 1506 struct ice_vsi_ctx *vsi_ctx; 1507 u8 qgrp_layer, vsi_layer; 1508 u16 max_children; 1509 1510 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); 1511 vsi_layer = ice_sched_get_vsi_layer(pi->hw); 1512 max_children = pi->hw->max_children[qgrp_layer]; 1513 1514 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 1515 if (!vsi_ctx) 1516 return NULL; 1517 vsi_node = vsi_ctx->sched.vsi_node[tc]; 1518 /* validate invalid VSI ID */ 1519 if (!vsi_node) 1520 return NULL; 1521 1522 /* If the queue group and VSI layer are same then queues 1523 * are all attached directly to VSI 1524 */ 1525 if (qgrp_layer == vsi_layer) 1526 return vsi_node; 1527 1528 /* get the first queue group node from VSI sub-tree */ 1529 qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); 1530 while (qgrp_node) { 1531 /* make sure the qgroup node is part of the VSI subtree */ 1532 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) 1533 if (qgrp_node->num_children < max_children && 1534 qgrp_node->owner == owner) 1535 break; 1536 qgrp_node = qgrp_node->sibling; 1537 } 1538 1539 /* Select the best queue group */ 1540 return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner); 1541 } 1542 1543 /** 1544 * ice_sched_get_vsi_node - Get a VSI node based on VSI ID 1545 * @pi: pointer to the port information structure 1546 * @tc_node: pointer to the TC node 1547 * @vsi_handle: software VSI handle 1548 * 1549 * This function retrieves a VSI node for a given VSI ID from a given 1550 * TC branch 1551 */ 1552 static struct ice_sched_node * 1553 ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, 1554 u16 vsi_handle) 1555 { 1556 struct ice_sched_node *node; 1557 u8 vsi_layer; 1558 1559 vsi_layer = ice_sched_get_vsi_layer(pi->hw); 1560 node = ice_sched_get_first_node(pi, tc_node, vsi_layer); 1561 1562 /* Check whether it already exists */ 1563 while (node) { 1564 if (node->vsi_handle == vsi_handle) 1565 return node; 1566 node = node->sibling; 1567 } 1568 1569 return node; 1570 } 1571 1572 /** 1573 * ice_sched_get_agg_node - Get an aggregator node based on aggregator ID 1574 * @pi: pointer to the port information structure 1575 * @tc_node: pointer to the TC node 1576 * @agg_id: aggregator ID 1577 * 1578 * This function retrieves an aggregator node for a given aggregator ID from 1579 * a given TC branch 1580 */ 1581 struct ice_sched_node * 1582 ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, 1583 u32 agg_id) 1584 { 1585 struct ice_sched_node *node; 1586 struct ice_hw *hw = pi->hw; 1587 u8 agg_layer; 1588 1589 if (!hw) 1590 return NULL; 1591 agg_layer = ice_sched_get_agg_layer(hw); 1592 node = ice_sched_get_first_node(pi, tc_node, agg_layer); 1593 1594 /* Check whether it already exists */ 1595 while (node) { 1596 if (node->agg_id == agg_id) 1597 return node; 1598 node = node->sibling; 1599 } 1600 1601 return node; 1602 } 1603 1604 /** 1605 * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes 1606 * @hw: pointer to the HW struct 1607 * @num_qs: number of queues 1608 * @num_nodes: num nodes array 1609 * 1610 * This function calculates the number of VSI child nodes based on the 1611 * number of queues. 1612 */ 1613 static void 1614 ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) 1615 { 1616 u16 num = num_qs; 1617 u8 i, qgl, vsil; 1618 1619 qgl = ice_sched_get_qgrp_layer(hw); 1620 vsil = ice_sched_get_vsi_layer(hw); 1621 1622 /* calculate num nodes from queue group to VSI layer */ 1623 for (i = qgl; i > vsil; i--) { 1624 /* round to the next integer if there is a remainder */ 1625 num = DIV_ROUND_UP(num, hw->max_children[i]); 1626 1627 /* need at least one node */ 1628 num_nodes[i] = num ? num : 1; 1629 } 1630 } 1631 1632 /** 1633 * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree 1634 * @pi: port information structure 1635 * @vsi_handle: software VSI handle 1636 * @tc_node: pointer to the TC node 1637 * @num_nodes: pointer to the num nodes that needs to be added per layer 1638 * @owner: node owner (LAN or RDMA) 1639 * 1640 * This function adds the VSI child nodes to tree. It gets called for 1641 * LAN and RDMA separately. 1642 */ 1643 static int 1644 ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, 1645 struct ice_sched_node *tc_node, u16 *num_nodes, 1646 u8 owner) 1647 { 1648 struct ice_sched_node *parent, *node; 1649 struct ice_hw *hw = pi->hw; 1650 u32 first_node_teid; 1651 u16 num_added = 0; 1652 u8 i, qgl, vsil; 1653 1654 qgl = ice_sched_get_qgrp_layer(hw); 1655 vsil = ice_sched_get_vsi_layer(hw); 1656 parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 1657 for (i = vsil + 1; i <= qgl; i++) { 1658 int status; 1659 1660 if (!parent) 1661 return -EIO; 1662 1663 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, 1664 num_nodes[i], 1665 &first_node_teid, 1666 &num_added); 1667 if (status || num_nodes[i] != num_added) 1668 return -EIO; 1669 1670 /* The newly added node can be a new parent for the next 1671 * layer nodes 1672 */ 1673 if (num_added) { 1674 parent = ice_sched_find_node_by_teid(tc_node, 1675 first_node_teid); 1676 node = parent; 1677 while (node) { 1678 node->owner = owner; 1679 node = node->sibling; 1680 } 1681 } else { 1682 parent = parent->children[0]; 1683 } 1684 } 1685 1686 return 0; 1687 } 1688 1689 /** 1690 * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes 1691 * @pi: pointer to the port info structure 1692 * @tc_node: pointer to TC node 1693 * @num_nodes: pointer to num nodes array 1694 * 1695 * This function calculates the number of supported nodes needed to add this 1696 * VSI into Tx tree including the VSI, parent and intermediate nodes in below 1697 * layers 1698 */ 1699 static void 1700 ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi, 1701 struct ice_sched_node *tc_node, u16 *num_nodes) 1702 { 1703 struct ice_sched_node *node; 1704 u8 vsil; 1705 int i; 1706 1707 vsil = ice_sched_get_vsi_layer(pi->hw); 1708 for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--) 1709 /* Add intermediate nodes if TC has no children and 1710 * need at least one node for VSI 1711 */ 1712 if (!tc_node->num_children || i == vsil) { 1713 num_nodes[i]++; 1714 } else { 1715 /* If intermediate nodes are reached max children 1716 * then add a new one. 1717 */ 1718 node = ice_sched_get_first_node(pi, tc_node, (u8)i); 1719 /* scan all the siblings */ 1720 while (node) { 1721 if (node->num_children < pi->hw->max_children[i]) 1722 break; 1723 node = node->sibling; 1724 } 1725 1726 /* tree has one intermediate node to add this new VSI. 1727 * So no need to calculate supported nodes for below 1728 * layers. 1729 */ 1730 if (node) 1731 break; 1732 /* all the nodes are full, allocate a new one */ 1733 num_nodes[i]++; 1734 } 1735 } 1736 1737 /** 1738 * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree 1739 * @pi: port information structure 1740 * @vsi_handle: software VSI handle 1741 * @tc_node: pointer to TC node 1742 * @num_nodes: pointer to num nodes array 1743 * 1744 * This function adds the VSI supported nodes into Tx tree including the 1745 * VSI, its parent and intermediate nodes in below layers 1746 */ 1747 static int 1748 ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, 1749 struct ice_sched_node *tc_node, u16 *num_nodes) 1750 { 1751 struct ice_sched_node *parent = tc_node; 1752 u32 first_node_teid; 1753 u16 num_added = 0; 1754 u8 i, vsil; 1755 1756 if (!pi) 1757 return -EINVAL; 1758 1759 vsil = ice_sched_get_vsi_layer(pi->hw); 1760 for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { 1761 int status; 1762 1763 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, 1764 i, num_nodes[i], 1765 &first_node_teid, 1766 &num_added); 1767 if (status || num_nodes[i] != num_added) 1768 return -EIO; 1769 1770 /* The newly added node can be a new parent for the next 1771 * layer nodes 1772 */ 1773 if (num_added) 1774 parent = ice_sched_find_node_by_teid(tc_node, 1775 first_node_teid); 1776 else 1777 parent = parent->children[0]; 1778 1779 if (!parent) 1780 return -EIO; 1781 1782 if (i == vsil) 1783 parent->vsi_handle = vsi_handle; 1784 } 1785 1786 return 0; 1787 } 1788 1789 /** 1790 * ice_sched_add_vsi_to_topo - add a new VSI into tree 1791 * @pi: port information structure 1792 * @vsi_handle: software VSI handle 1793 * @tc: TC number 1794 * 1795 * This function adds a new VSI into scheduler tree 1796 */ 1797 static int 1798 ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) 1799 { 1800 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 1801 struct ice_sched_node *tc_node; 1802 1803 tc_node = ice_sched_get_tc_node(pi, tc); 1804 if (!tc_node) 1805 return -EINVAL; 1806 1807 /* calculate number of supported nodes needed for this VSI */ 1808 ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes); 1809 1810 /* add VSI supported nodes to TC subtree */ 1811 return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node, 1812 num_nodes); 1813 } 1814 1815 /** 1816 * ice_sched_update_vsi_child_nodes - update VSI child nodes 1817 * @pi: port information structure 1818 * @vsi_handle: software VSI handle 1819 * @tc: TC number 1820 * @new_numqs: new number of max queues 1821 * @owner: owner of this subtree 1822 * 1823 * This function updates the VSI child nodes based on the number of queues 1824 */ 1825 static int 1826 ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, 1827 u8 tc, u16 new_numqs, u8 owner) 1828 { 1829 u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 1830 struct ice_sched_node *vsi_node; 1831 struct ice_sched_node *tc_node; 1832 struct ice_vsi_ctx *vsi_ctx; 1833 struct ice_hw *hw = pi->hw; 1834 u16 prev_numqs; 1835 int status = 0; 1836 1837 tc_node = ice_sched_get_tc_node(pi, tc); 1838 if (!tc_node) 1839 return -EIO; 1840 1841 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 1842 if (!vsi_node) 1843 return -EIO; 1844 1845 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 1846 if (!vsi_ctx) 1847 return -EINVAL; 1848 1849 if (owner == ICE_SCHED_NODE_OWNER_LAN) 1850 prev_numqs = vsi_ctx->sched.max_lanq[tc]; 1851 else 1852 prev_numqs = vsi_ctx->sched.max_rdmaq[tc]; 1853 /* num queues are not changed or less than the previous number */ 1854 if (new_numqs <= prev_numqs) 1855 return status; 1856 if (owner == ICE_SCHED_NODE_OWNER_LAN) { 1857 status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs); 1858 if (status) 1859 return status; 1860 } else { 1861 status = ice_alloc_rdma_q_ctx(hw, vsi_handle, tc, new_numqs); 1862 if (status) 1863 return status; 1864 } 1865 1866 if (new_numqs) 1867 ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); 1868 /* Keep the max number of queue configuration all the time. Update the 1869 * tree only if number of queues > previous number of queues. This may 1870 * leave some extra nodes in the tree if number of queues < previous 1871 * number but that wouldn't harm anything. Removing those extra nodes 1872 * may complicate the code if those nodes are part of SRL or 1873 * individually rate limited. 1874 */ 1875 status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node, 1876 new_num_nodes, owner); 1877 if (status) 1878 return status; 1879 if (owner == ICE_SCHED_NODE_OWNER_LAN) 1880 vsi_ctx->sched.max_lanq[tc] = new_numqs; 1881 else 1882 vsi_ctx->sched.max_rdmaq[tc] = new_numqs; 1883 1884 return 0; 1885 } 1886 1887 /** 1888 * ice_sched_cfg_vsi - configure the new/existing VSI 1889 * @pi: port information structure 1890 * @vsi_handle: software VSI handle 1891 * @tc: TC number 1892 * @maxqs: max number of queues 1893 * @owner: LAN or RDMA 1894 * @enable: TC enabled or disabled 1895 * 1896 * This function adds/updates VSI nodes based on the number of queues. If TC is 1897 * enabled and VSI is in suspended state then resume the VSI back. If TC is 1898 * disabled then suspend the VSI if it is not already. 1899 */ 1900 int 1901 ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, 1902 u8 owner, bool enable) 1903 { 1904 struct ice_sched_node *vsi_node, *tc_node; 1905 struct ice_vsi_ctx *vsi_ctx; 1906 struct ice_hw *hw = pi->hw; 1907 int status = 0; 1908 1909 ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle); 1910 tc_node = ice_sched_get_tc_node(pi, tc); 1911 if (!tc_node) 1912 return -EINVAL; 1913 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 1914 if (!vsi_ctx) 1915 return -EINVAL; 1916 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 1917 1918 /* suspend the VSI if TC is not enabled */ 1919 if (!enable) { 1920 if (vsi_node && vsi_node->in_use) { 1921 u32 teid = le32_to_cpu(vsi_node->info.node_teid); 1922 1923 status = ice_sched_suspend_resume_elems(hw, 1, &teid, 1924 true); 1925 if (!status) 1926 vsi_node->in_use = false; 1927 } 1928 return status; 1929 } 1930 1931 /* TC is enabled, if it is a new VSI then add it to the tree */ 1932 if (!vsi_node) { 1933 status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc); 1934 if (status) 1935 return status; 1936 1937 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 1938 if (!vsi_node) 1939 return -EIO; 1940 1941 vsi_ctx->sched.vsi_node[tc] = vsi_node; 1942 vsi_node->in_use = true; 1943 /* invalidate the max queues whenever VSI gets added first time 1944 * into the scheduler tree (boot or after reset). We need to 1945 * recreate the child nodes all the time in these cases. 1946 */ 1947 vsi_ctx->sched.max_lanq[tc] = 0; 1948 vsi_ctx->sched.max_rdmaq[tc] = 0; 1949 } 1950 1951 /* update the VSI child nodes */ 1952 status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs, 1953 owner); 1954 if (status) 1955 return status; 1956 1957 /* TC is enabled, resume the VSI if it is in the suspend state */ 1958 if (!vsi_node->in_use) { 1959 u32 teid = le32_to_cpu(vsi_node->info.node_teid); 1960 1961 status = ice_sched_suspend_resume_elems(hw, 1, &teid, false); 1962 if (!status) 1963 vsi_node->in_use = true; 1964 } 1965 1966 return status; 1967 } 1968 1969 /** 1970 * ice_sched_rm_agg_vsi_info - remove aggregator related VSI info entry 1971 * @pi: port information structure 1972 * @vsi_handle: software VSI handle 1973 * 1974 * This function removes single aggregator VSI info entry from 1975 * aggregator list. 1976 */ 1977 static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle) 1978 { 1979 struct ice_sched_agg_info *agg_info; 1980 struct ice_sched_agg_info *atmp; 1981 1982 list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list, 1983 list_entry) { 1984 struct ice_sched_agg_vsi_info *agg_vsi_info; 1985 struct ice_sched_agg_vsi_info *vtmp; 1986 1987 list_for_each_entry_safe(agg_vsi_info, vtmp, 1988 &agg_info->agg_vsi_list, list_entry) 1989 if (agg_vsi_info->vsi_handle == vsi_handle) { 1990 list_del(&agg_vsi_info->list_entry); 1991 devm_kfree(ice_hw_to_dev(pi->hw), 1992 agg_vsi_info); 1993 return; 1994 } 1995 } 1996 } 1997 1998 /** 1999 * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree 2000 * @node: pointer to the sub-tree node 2001 * 2002 * This function checks for a leaf node presence in a given sub-tree node. 2003 */ 2004 static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node) 2005 { 2006 u8 i; 2007 2008 for (i = 0; i < node->num_children; i++) 2009 if (ice_sched_is_leaf_node_present(node->children[i])) 2010 return true; 2011 /* check for a leaf node */ 2012 return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF); 2013 } 2014 2015 /** 2016 * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes 2017 * @pi: port information structure 2018 * @vsi_handle: software VSI handle 2019 * @owner: LAN or RDMA 2020 * 2021 * This function removes the VSI and its LAN or RDMA children nodes from the 2022 * scheduler tree. 2023 */ 2024 static int 2025 ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) 2026 { 2027 struct ice_vsi_ctx *vsi_ctx; 2028 int status = -EINVAL; 2029 u8 i; 2030 2031 ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle); 2032 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 2033 return status; 2034 mutex_lock(&pi->sched_lock); 2035 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 2036 if (!vsi_ctx) 2037 goto exit_sched_rm_vsi_cfg; 2038 2039 ice_for_each_traffic_class(i) { 2040 struct ice_sched_node *vsi_node, *tc_node; 2041 u8 j = 0; 2042 2043 tc_node = ice_sched_get_tc_node(pi, i); 2044 if (!tc_node) 2045 continue; 2046 2047 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 2048 if (!vsi_node) 2049 continue; 2050 2051 if (ice_sched_is_leaf_node_present(vsi_node)) { 2052 ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i); 2053 status = -EBUSY; 2054 goto exit_sched_rm_vsi_cfg; 2055 } 2056 while (j < vsi_node->num_children) { 2057 if (vsi_node->children[j]->owner == owner) { 2058 ice_free_sched_node(pi, vsi_node->children[j]); 2059 2060 /* reset the counter again since the num 2061 * children will be updated after node removal 2062 */ 2063 j = 0; 2064 } else { 2065 j++; 2066 } 2067 } 2068 /* remove the VSI if it has no children */ 2069 if (!vsi_node->num_children) { 2070 ice_free_sched_node(pi, vsi_node); 2071 vsi_ctx->sched.vsi_node[i] = NULL; 2072 2073 /* clean up aggregator related VSI info if any */ 2074 ice_sched_rm_agg_vsi_info(pi, vsi_handle); 2075 } 2076 if (owner == ICE_SCHED_NODE_OWNER_LAN) 2077 vsi_ctx->sched.max_lanq[i] = 0; 2078 else 2079 vsi_ctx->sched.max_rdmaq[i] = 0; 2080 } 2081 status = 0; 2082 2083 exit_sched_rm_vsi_cfg: 2084 mutex_unlock(&pi->sched_lock); 2085 return status; 2086 } 2087 2088 /** 2089 * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes 2090 * @pi: port information structure 2091 * @vsi_handle: software VSI handle 2092 * 2093 * This function clears the VSI and its LAN children nodes from scheduler tree 2094 * for all TCs. 2095 */ 2096 int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) 2097 { 2098 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN); 2099 } 2100 2101 /** 2102 * ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes 2103 * @pi: port information structure 2104 * @vsi_handle: software VSI handle 2105 * 2106 * This function clears the VSI and its RDMA children nodes from scheduler tree 2107 * for all TCs. 2108 */ 2109 int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle) 2110 { 2111 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA); 2112 } 2113 2114 /** 2115 * ice_get_agg_info - get the aggregator ID 2116 * @hw: pointer to the hardware structure 2117 * @agg_id: aggregator ID 2118 * 2119 * This function validates aggregator ID. The function returns info if 2120 * aggregator ID is present in list otherwise it returns null. 2121 */ 2122 static struct ice_sched_agg_info * 2123 ice_get_agg_info(struct ice_hw *hw, u32 agg_id) 2124 { 2125 struct ice_sched_agg_info *agg_info; 2126 2127 list_for_each_entry(agg_info, &hw->agg_list, list_entry) 2128 if (agg_info->agg_id == agg_id) 2129 return agg_info; 2130 2131 return NULL; 2132 } 2133 2134 /** 2135 * ice_sched_get_free_vsi_parent - Find a free parent node in aggregator subtree 2136 * @hw: pointer to the HW struct 2137 * @node: pointer to a child node 2138 * @num_nodes: num nodes count array 2139 * 2140 * This function walks through the aggregator subtree to find a free parent 2141 * node 2142 */ 2143 struct ice_sched_node * 2144 ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node, 2145 u16 *num_nodes) 2146 { 2147 u8 l = node->tx_sched_layer; 2148 u8 vsil, i; 2149 2150 vsil = ice_sched_get_vsi_layer(hw); 2151 2152 /* Is it VSI parent layer ? */ 2153 if (l == vsil - 1) 2154 return (node->num_children < hw->max_children[l]) ? node : NULL; 2155 2156 /* We have intermediate nodes. Let's walk through the subtree. If the 2157 * intermediate node has space to add a new node then clear the count 2158 */ 2159 if (node->num_children < hw->max_children[l]) 2160 num_nodes[l] = 0; 2161 /* The below recursive call is intentional and wouldn't go more than 2162 * 2 or 3 iterations. 2163 */ 2164 2165 for (i = 0; i < node->num_children; i++) { 2166 struct ice_sched_node *parent; 2167 2168 parent = ice_sched_get_free_vsi_parent(hw, node->children[i], 2169 num_nodes); 2170 if (parent) 2171 return parent; 2172 } 2173 2174 return NULL; 2175 } 2176 2177 /** 2178 * ice_sched_update_parent - update the new parent in SW DB 2179 * @new_parent: pointer to a new parent node 2180 * @node: pointer to a child node 2181 * 2182 * This function removes the child from the old parent and adds it to a new 2183 * parent 2184 */ 2185 void 2186 ice_sched_update_parent(struct ice_sched_node *new_parent, 2187 struct ice_sched_node *node) 2188 { 2189 struct ice_sched_node *old_parent; 2190 u8 i, j; 2191 2192 old_parent = node->parent; 2193 2194 /* update the old parent children */ 2195 for (i = 0; i < old_parent->num_children; i++) 2196 if (old_parent->children[i] == node) { 2197 for (j = i + 1; j < old_parent->num_children; j++) 2198 old_parent->children[j - 1] = 2199 old_parent->children[j]; 2200 old_parent->num_children--; 2201 break; 2202 } 2203 2204 /* now move the node to a new parent */ 2205 new_parent->children[new_parent->num_children++] = node; 2206 node->parent = new_parent; 2207 node->info.parent_teid = new_parent->info.node_teid; 2208 } 2209 2210 /** 2211 * ice_sched_move_nodes - move child nodes to a given parent 2212 * @pi: port information structure 2213 * @parent: pointer to parent node 2214 * @num_items: number of child nodes to be moved 2215 * @list: pointer to child node teids 2216 * 2217 * This function move the child nodes to a given parent. 2218 */ 2219 int 2220 ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, 2221 u16 num_items, u32 *list) 2222 { 2223 DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1); 2224 u16 buf_len = __struct_size(buf); 2225 struct ice_sched_node *node; 2226 u16 i, grps_movd = 0; 2227 struct ice_hw *hw; 2228 int status = 0; 2229 2230 hw = pi->hw; 2231 2232 if (!parent || !num_items) 2233 return -EINVAL; 2234 2235 /* Does parent have enough space */ 2236 if (parent->num_children + num_items > 2237 hw->max_children[parent->tx_sched_layer]) 2238 return -ENOSPC; 2239 2240 for (i = 0; i < num_items; i++) { 2241 node = ice_sched_find_node_by_teid(pi->root, list[i]); 2242 if (!node) { 2243 status = -EINVAL; 2244 break; 2245 } 2246 2247 buf->hdr.src_parent_teid = node->info.parent_teid; 2248 buf->hdr.dest_parent_teid = parent->info.node_teid; 2249 buf->teid[0] = node->info.node_teid; 2250 buf->hdr.num_elems = cpu_to_le16(1); 2251 status = ice_aq_move_sched_elems(hw, buf, buf_len, &grps_movd); 2252 if (status && grps_movd != 1) { 2253 status = -EIO; 2254 break; 2255 } 2256 2257 /* update the SW DB */ 2258 ice_sched_update_parent(parent, node); 2259 } 2260 2261 return status; 2262 } 2263 2264 /** 2265 * ice_sched_move_vsi_to_agg - move VSI to aggregator node 2266 * @pi: port information structure 2267 * @vsi_handle: software VSI handle 2268 * @agg_id: aggregator ID 2269 * @tc: TC number 2270 * 2271 * This function moves a VSI to an aggregator node or its subtree. 2272 * Intermediate nodes may be created if required. 2273 */ 2274 static int 2275 ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, 2276 u8 tc) 2277 { 2278 struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent; 2279 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 2280 u32 first_node_teid, vsi_teid; 2281 u16 num_nodes_added; 2282 u8 aggl, vsil, i; 2283 int status; 2284 2285 tc_node = ice_sched_get_tc_node(pi, tc); 2286 if (!tc_node) 2287 return -EIO; 2288 2289 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 2290 if (!agg_node) 2291 return -ENOENT; 2292 2293 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); 2294 if (!vsi_node) 2295 return -ENOENT; 2296 2297 /* Is this VSI already part of given aggregator? */ 2298 if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node)) 2299 return 0; 2300 2301 aggl = ice_sched_get_agg_layer(pi->hw); 2302 vsil = ice_sched_get_vsi_layer(pi->hw); 2303 2304 /* set intermediate node count to 1 between aggregator and VSI layers */ 2305 for (i = aggl + 1; i < vsil; i++) 2306 num_nodes[i] = 1; 2307 2308 /* Check if the aggregator subtree has any free node to add the VSI */ 2309 for (i = 0; i < agg_node->num_children; i++) { 2310 parent = ice_sched_get_free_vsi_parent(pi->hw, 2311 agg_node->children[i], 2312 num_nodes); 2313 if (parent) 2314 goto move_nodes; 2315 } 2316 2317 /* add new nodes */ 2318 parent = agg_node; 2319 for (i = aggl + 1; i < vsil; i++) { 2320 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, 2321 num_nodes[i], 2322 &first_node_teid, 2323 &num_nodes_added); 2324 if (status || num_nodes[i] != num_nodes_added) 2325 return -EIO; 2326 2327 /* The newly added node can be a new parent for the next 2328 * layer nodes 2329 */ 2330 if (num_nodes_added) 2331 parent = ice_sched_find_node_by_teid(tc_node, 2332 first_node_teid); 2333 else 2334 parent = parent->children[0]; 2335 2336 if (!parent) 2337 return -EIO; 2338 } 2339 2340 move_nodes: 2341 vsi_teid = le32_to_cpu(vsi_node->info.node_teid); 2342 return ice_sched_move_nodes(pi, parent, 1, &vsi_teid); 2343 } 2344 2345 /** 2346 * ice_move_all_vsi_to_dflt_agg - move all VSI(s) to default aggregator 2347 * @pi: port information structure 2348 * @agg_info: aggregator info 2349 * @tc: traffic class number 2350 * @rm_vsi_info: true or false 2351 * 2352 * This function move all the VSI(s) to the default aggregator and delete 2353 * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The 2354 * caller holds the scheduler lock. 2355 */ 2356 static int 2357 ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi, 2358 struct ice_sched_agg_info *agg_info, u8 tc, 2359 bool rm_vsi_info) 2360 { 2361 struct ice_sched_agg_vsi_info *agg_vsi_info; 2362 struct ice_sched_agg_vsi_info *tmp; 2363 int status = 0; 2364 2365 list_for_each_entry_safe(agg_vsi_info, tmp, &agg_info->agg_vsi_list, 2366 list_entry) { 2367 u16 vsi_handle = agg_vsi_info->vsi_handle; 2368 2369 /* Move VSI to default aggregator */ 2370 if (!ice_is_tc_ena(agg_vsi_info->tc_bitmap[0], tc)) 2371 continue; 2372 2373 status = ice_sched_move_vsi_to_agg(pi, vsi_handle, 2374 ICE_DFLT_AGG_ID, tc); 2375 if (status) 2376 break; 2377 2378 clear_bit(tc, agg_vsi_info->tc_bitmap); 2379 if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) { 2380 list_del(&agg_vsi_info->list_entry); 2381 devm_kfree(ice_hw_to_dev(pi->hw), agg_vsi_info); 2382 } 2383 } 2384 2385 return status; 2386 } 2387 2388 /** 2389 * ice_sched_is_agg_inuse - check whether the aggregator is in use or not 2390 * @pi: port information structure 2391 * @node: node pointer 2392 * 2393 * This function checks whether the aggregator is attached with any VSI or not. 2394 */ 2395 static bool 2396 ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node) 2397 { 2398 u8 vsil, i; 2399 2400 vsil = ice_sched_get_vsi_layer(pi->hw); 2401 if (node->tx_sched_layer < vsil - 1) { 2402 for (i = 0; i < node->num_children; i++) 2403 if (ice_sched_is_agg_inuse(pi, node->children[i])) 2404 return true; 2405 return false; 2406 } else { 2407 return node->num_children ? true : false; 2408 } 2409 } 2410 2411 /** 2412 * ice_sched_rm_agg_cfg - remove the aggregator node 2413 * @pi: port information structure 2414 * @agg_id: aggregator ID 2415 * @tc: TC number 2416 * 2417 * This function removes the aggregator node and intermediate nodes if any 2418 * from the given TC 2419 */ 2420 static int 2421 ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) 2422 { 2423 struct ice_sched_node *tc_node, *agg_node; 2424 struct ice_hw *hw = pi->hw; 2425 2426 tc_node = ice_sched_get_tc_node(pi, tc); 2427 if (!tc_node) 2428 return -EIO; 2429 2430 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 2431 if (!agg_node) 2432 return -ENOENT; 2433 2434 /* Can't remove the aggregator node if it has children */ 2435 if (ice_sched_is_agg_inuse(pi, agg_node)) 2436 return -EBUSY; 2437 2438 /* need to remove the whole subtree if aggregator node is the 2439 * only child. 2440 */ 2441 while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) { 2442 struct ice_sched_node *parent = agg_node->parent; 2443 2444 if (!parent) 2445 return -EIO; 2446 2447 if (parent->num_children > 1) 2448 break; 2449 2450 agg_node = parent; 2451 } 2452 2453 ice_free_sched_node(pi, agg_node); 2454 return 0; 2455 } 2456 2457 /** 2458 * ice_rm_agg_cfg_tc - remove aggregator configuration for TC 2459 * @pi: port information structure 2460 * @agg_info: aggregator ID 2461 * @tc: TC number 2462 * @rm_vsi_info: bool value true or false 2463 * 2464 * This function removes aggregator reference to VSI of given TC. It removes 2465 * the aggregator configuration completely for requested TC. The caller needs 2466 * to hold the scheduler lock. 2467 */ 2468 static int 2469 ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info, 2470 u8 tc, bool rm_vsi_info) 2471 { 2472 int status = 0; 2473 2474 /* If nothing to remove - return success */ 2475 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) 2476 goto exit_rm_agg_cfg_tc; 2477 2478 status = ice_move_all_vsi_to_dflt_agg(pi, agg_info, tc, rm_vsi_info); 2479 if (status) 2480 goto exit_rm_agg_cfg_tc; 2481 2482 /* Delete aggregator node(s) */ 2483 status = ice_sched_rm_agg_cfg(pi, agg_info->agg_id, tc); 2484 if (status) 2485 goto exit_rm_agg_cfg_tc; 2486 2487 clear_bit(tc, agg_info->tc_bitmap); 2488 exit_rm_agg_cfg_tc: 2489 return status; 2490 } 2491 2492 /** 2493 * ice_save_agg_tc_bitmap - save aggregator TC bitmap 2494 * @pi: port information structure 2495 * @agg_id: aggregator ID 2496 * @tc_bitmap: 8 bits TC bitmap 2497 * 2498 * Save aggregator TC bitmap. This function needs to be called with scheduler 2499 * lock held. 2500 */ 2501 static int 2502 ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, 2503 unsigned long *tc_bitmap) 2504 { 2505 struct ice_sched_agg_info *agg_info; 2506 2507 agg_info = ice_get_agg_info(pi->hw, agg_id); 2508 if (!agg_info) 2509 return -EINVAL; 2510 bitmap_copy(agg_info->replay_tc_bitmap, tc_bitmap, 2511 ICE_MAX_TRAFFIC_CLASS); 2512 return 0; 2513 } 2514 2515 /** 2516 * ice_sched_add_agg_cfg - create an aggregator node 2517 * @pi: port information structure 2518 * @agg_id: aggregator ID 2519 * @tc: TC number 2520 * 2521 * This function creates an aggregator node and intermediate nodes if required 2522 * for the given TC 2523 */ 2524 static int 2525 ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) 2526 { 2527 struct ice_sched_node *parent, *agg_node, *tc_node; 2528 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 2529 struct ice_hw *hw = pi->hw; 2530 u32 first_node_teid; 2531 u16 num_nodes_added; 2532 int status = 0; 2533 u8 i, aggl; 2534 2535 tc_node = ice_sched_get_tc_node(pi, tc); 2536 if (!tc_node) 2537 return -EIO; 2538 2539 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); 2540 /* Does Agg node already exist ? */ 2541 if (agg_node) 2542 return status; 2543 2544 aggl = ice_sched_get_agg_layer(hw); 2545 2546 /* need one node in Agg layer */ 2547 num_nodes[aggl] = 1; 2548 2549 /* Check whether the intermediate nodes have space to add the 2550 * new aggregator. If they are full, then SW needs to allocate a new 2551 * intermediate node on those layers 2552 */ 2553 for (i = hw->sw_entry_point_layer; i < aggl; i++) { 2554 parent = ice_sched_get_first_node(pi, tc_node, i); 2555 2556 /* scan all the siblings */ 2557 while (parent) { 2558 if (parent->num_children < hw->max_children[i]) 2559 break; 2560 parent = parent->sibling; 2561 } 2562 2563 /* all the nodes are full, reserve one for this layer */ 2564 if (!parent) 2565 num_nodes[i]++; 2566 } 2567 2568 /* add the aggregator node */ 2569 parent = tc_node; 2570 for (i = hw->sw_entry_point_layer; i <= aggl; i++) { 2571 if (!parent) 2572 return -EIO; 2573 2574 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, 2575 num_nodes[i], 2576 &first_node_teid, 2577 &num_nodes_added); 2578 if (status || num_nodes[i] != num_nodes_added) 2579 return -EIO; 2580 2581 /* The newly added node can be a new parent for the next 2582 * layer nodes 2583 */ 2584 if (num_nodes_added) { 2585 parent = ice_sched_find_node_by_teid(tc_node, 2586 first_node_teid); 2587 /* register aggregator ID with the aggregator node */ 2588 if (parent && i == aggl) 2589 parent->agg_id = agg_id; 2590 } else { 2591 parent = parent->children[0]; 2592 } 2593 } 2594 2595 return 0; 2596 } 2597 2598 /** 2599 * ice_sched_cfg_agg - configure aggregator node 2600 * @pi: port information structure 2601 * @agg_id: aggregator ID 2602 * @agg_type: aggregator type queue, VSI, or aggregator group 2603 * @tc_bitmap: bits TC bitmap 2604 * 2605 * It registers a unique aggregator node into scheduler services. It 2606 * allows a user to register with a unique ID to track it's resources. 2607 * The aggregator type determines if this is a queue group, VSI group 2608 * or aggregator group. It then creates the aggregator node(s) for requested 2609 * TC(s) or removes an existing aggregator node including its configuration 2610 * if indicated via tc_bitmap. Call ice_rm_agg_cfg to release aggregator 2611 * resources and remove aggregator ID. 2612 * This function needs to be called with scheduler lock held. 2613 */ 2614 static int 2615 ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, 2616 enum ice_agg_type agg_type, unsigned long *tc_bitmap) 2617 { 2618 struct ice_sched_agg_info *agg_info; 2619 struct ice_hw *hw = pi->hw; 2620 int status = 0; 2621 u8 tc; 2622 2623 agg_info = ice_get_agg_info(hw, agg_id); 2624 if (!agg_info) { 2625 /* Create new entry for new aggregator ID */ 2626 agg_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_info), 2627 GFP_KERNEL); 2628 if (!agg_info) 2629 return -ENOMEM; 2630 2631 agg_info->agg_id = agg_id; 2632 agg_info->agg_type = agg_type; 2633 agg_info->tc_bitmap[0] = 0; 2634 2635 /* Initialize the aggregator VSI list head */ 2636 INIT_LIST_HEAD(&agg_info->agg_vsi_list); 2637 2638 /* Add new entry in aggregator list */ 2639 list_add(&agg_info->list_entry, &hw->agg_list); 2640 } 2641 /* Create aggregator node(s) for requested TC(s) */ 2642 ice_for_each_traffic_class(tc) { 2643 if (!ice_is_tc_ena(*tc_bitmap, tc)) { 2644 /* Delete aggregator cfg TC if it exists previously */ 2645 status = ice_rm_agg_cfg_tc(pi, agg_info, tc, false); 2646 if (status) 2647 break; 2648 continue; 2649 } 2650 2651 /* Check if aggregator node for TC already exists */ 2652 if (ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) 2653 continue; 2654 2655 /* Create new aggregator node for TC */ 2656 status = ice_sched_add_agg_cfg(pi, agg_id, tc); 2657 if (status) 2658 break; 2659 2660 /* Save aggregator node's TC information */ 2661 set_bit(tc, agg_info->tc_bitmap); 2662 } 2663 2664 return status; 2665 } 2666 2667 /** 2668 * ice_cfg_agg - config aggregator node 2669 * @pi: port information structure 2670 * @agg_id: aggregator ID 2671 * @agg_type: aggregator type queue, VSI, or aggregator group 2672 * @tc_bitmap: bits TC bitmap 2673 * 2674 * This function configures aggregator node(s). 2675 */ 2676 int 2677 ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, 2678 u8 tc_bitmap) 2679 { 2680 unsigned long bitmap = tc_bitmap; 2681 int status; 2682 2683 mutex_lock(&pi->sched_lock); 2684 status = ice_sched_cfg_agg(pi, agg_id, agg_type, &bitmap); 2685 if (!status) 2686 status = ice_save_agg_tc_bitmap(pi, agg_id, &bitmap); 2687 mutex_unlock(&pi->sched_lock); 2688 return status; 2689 } 2690 2691 /** 2692 * ice_get_agg_vsi_info - get the aggregator ID 2693 * @agg_info: aggregator info 2694 * @vsi_handle: software VSI handle 2695 * 2696 * The function returns aggregator VSI info based on VSI handle. This function 2697 * needs to be called with scheduler lock held. 2698 */ 2699 static struct ice_sched_agg_vsi_info * 2700 ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle) 2701 { 2702 struct ice_sched_agg_vsi_info *agg_vsi_info; 2703 2704 list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list, list_entry) 2705 if (agg_vsi_info->vsi_handle == vsi_handle) 2706 return agg_vsi_info; 2707 2708 return NULL; 2709 } 2710 2711 /** 2712 * ice_get_vsi_agg_info - get the aggregator info of VSI 2713 * @hw: pointer to the hardware structure 2714 * @vsi_handle: Sw VSI handle 2715 * 2716 * The function returns aggregator info of VSI represented via vsi_handle. The 2717 * VSI has in this case a different aggregator than the default one. This 2718 * function needs to be called with scheduler lock held. 2719 */ 2720 static struct ice_sched_agg_info * 2721 ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle) 2722 { 2723 struct ice_sched_agg_info *agg_info; 2724 2725 list_for_each_entry(agg_info, &hw->agg_list, list_entry) { 2726 struct ice_sched_agg_vsi_info *agg_vsi_info; 2727 2728 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); 2729 if (agg_vsi_info) 2730 return agg_info; 2731 } 2732 return NULL; 2733 } 2734 2735 /** 2736 * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap 2737 * @pi: port information structure 2738 * @agg_id: aggregator ID 2739 * @vsi_handle: software VSI handle 2740 * @tc_bitmap: TC bitmap of enabled TC(s) 2741 * 2742 * Save VSI to aggregator TC bitmap. This function needs to call with scheduler 2743 * lock held. 2744 */ 2745 static int 2746 ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, 2747 unsigned long *tc_bitmap) 2748 { 2749 struct ice_sched_agg_vsi_info *agg_vsi_info; 2750 struct ice_sched_agg_info *agg_info; 2751 2752 agg_info = ice_get_agg_info(pi->hw, agg_id); 2753 if (!agg_info) 2754 return -EINVAL; 2755 /* check if entry already exist */ 2756 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); 2757 if (!agg_vsi_info) 2758 return -EINVAL; 2759 bitmap_copy(agg_vsi_info->replay_tc_bitmap, tc_bitmap, 2760 ICE_MAX_TRAFFIC_CLASS); 2761 return 0; 2762 } 2763 2764 /** 2765 * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator 2766 * @pi: port information structure 2767 * @agg_id: aggregator ID 2768 * @vsi_handle: software VSI handle 2769 * @tc_bitmap: TC bitmap of enabled TC(s) 2770 * 2771 * This function moves VSI to a new or default aggregator node. If VSI is 2772 * already associated to the aggregator node then no operation is performed on 2773 * the tree. This function needs to be called with scheduler lock held. 2774 */ 2775 static int 2776 ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, 2777 u16 vsi_handle, unsigned long *tc_bitmap) 2778 { 2779 struct ice_sched_agg_vsi_info *agg_vsi_info, *iter, *old_agg_vsi_info = NULL; 2780 struct ice_sched_agg_info *agg_info, *old_agg_info; 2781 struct ice_hw *hw = pi->hw; 2782 int status = 0; 2783 u8 tc; 2784 2785 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 2786 return -EINVAL; 2787 agg_info = ice_get_agg_info(hw, agg_id); 2788 if (!agg_info) 2789 return -EINVAL; 2790 /* If the VSI is already part of another aggregator then update 2791 * its VSI info list 2792 */ 2793 old_agg_info = ice_get_vsi_agg_info(hw, vsi_handle); 2794 if (old_agg_info && old_agg_info != agg_info) { 2795 struct ice_sched_agg_vsi_info *vtmp; 2796 2797 list_for_each_entry_safe(iter, vtmp, 2798 &old_agg_info->agg_vsi_list, 2799 list_entry) 2800 if (iter->vsi_handle == vsi_handle) { 2801 old_agg_vsi_info = iter; 2802 break; 2803 } 2804 } 2805 2806 /* check if entry already exist */ 2807 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); 2808 if (!agg_vsi_info) { 2809 /* Create new entry for VSI under aggregator list */ 2810 agg_vsi_info = devm_kzalloc(ice_hw_to_dev(hw), 2811 sizeof(*agg_vsi_info), GFP_KERNEL); 2812 if (!agg_vsi_info) 2813 return -EINVAL; 2814 2815 /* add VSI ID into the aggregator list */ 2816 agg_vsi_info->vsi_handle = vsi_handle; 2817 list_add(&agg_vsi_info->list_entry, &agg_info->agg_vsi_list); 2818 } 2819 /* Move VSI node to new aggregator node for requested TC(s) */ 2820 ice_for_each_traffic_class(tc) { 2821 if (!ice_is_tc_ena(*tc_bitmap, tc)) 2822 continue; 2823 2824 /* Move VSI to new aggregator */ 2825 status = ice_sched_move_vsi_to_agg(pi, vsi_handle, agg_id, tc); 2826 if (status) 2827 break; 2828 2829 set_bit(tc, agg_vsi_info->tc_bitmap); 2830 if (old_agg_vsi_info) 2831 clear_bit(tc, old_agg_vsi_info->tc_bitmap); 2832 } 2833 if (old_agg_vsi_info && !old_agg_vsi_info->tc_bitmap[0]) { 2834 list_del(&old_agg_vsi_info->list_entry); 2835 devm_kfree(ice_hw_to_dev(pi->hw), old_agg_vsi_info); 2836 } 2837 return status; 2838 } 2839 2840 /** 2841 * ice_sched_rm_unused_rl_prof - remove unused RL profile 2842 * @pi: port information structure 2843 * 2844 * This function removes unused rate limit profiles from the HW and 2845 * SW DB. The caller needs to hold scheduler lock. 2846 */ 2847 static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi) 2848 { 2849 u16 ln; 2850 2851 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) { 2852 struct ice_aqc_rl_profile_info *rl_prof_elem; 2853 struct ice_aqc_rl_profile_info *rl_prof_tmp; 2854 2855 list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp, 2856 &pi->rl_prof_list[ln], list_entry) { 2857 if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem)) 2858 ice_debug(pi->hw, ICE_DBG_SCHED, "Removed rl profile\n"); 2859 } 2860 } 2861 } 2862 2863 /** 2864 * ice_sched_update_elem - update element 2865 * @hw: pointer to the HW struct 2866 * @node: pointer to node 2867 * @info: node info to update 2868 * 2869 * Update the HW DB, and local SW DB of node. Update the scheduling 2870 * parameters of node from argument info data buffer (Info->data buf) and 2871 * returns success or error on config sched element failure. The caller 2872 * needs to hold scheduler lock. 2873 */ 2874 static int 2875 ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, 2876 struct ice_aqc_txsched_elem_data *info) 2877 { 2878 struct ice_aqc_txsched_elem_data buf; 2879 u16 elem_cfgd = 0; 2880 u16 num_elems = 1; 2881 int status; 2882 2883 buf = *info; 2884 /* Parent TEID is reserved field in this aq call */ 2885 buf.parent_teid = 0; 2886 /* Element type is reserved field in this aq call */ 2887 buf.data.elem_type = 0; 2888 /* Flags is reserved field in this aq call */ 2889 buf.data.flags = 0; 2890 2891 /* Update HW DB */ 2892 /* Configure element node */ 2893 status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf), 2894 &elem_cfgd, NULL); 2895 if (status || elem_cfgd != num_elems) { 2896 ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n"); 2897 return -EIO; 2898 } 2899 2900 /* Config success case */ 2901 /* Now update local SW DB */ 2902 /* Only copy the data portion of info buffer */ 2903 node->info.data = info->data; 2904 return status; 2905 } 2906 2907 /** 2908 * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params 2909 * @hw: pointer to the HW struct 2910 * @node: sched node to configure 2911 * @rl_type: rate limit type CIR, EIR, or shared 2912 * @bw_alloc: BW weight/allocation 2913 * 2914 * This function configures node element's BW allocation. 2915 */ 2916 static int 2917 ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, 2918 enum ice_rl_type rl_type, u16 bw_alloc) 2919 { 2920 struct ice_aqc_txsched_elem_data buf; 2921 struct ice_aqc_txsched_elem *data; 2922 2923 buf = node->info; 2924 data = &buf.data; 2925 if (rl_type == ICE_MIN_BW) { 2926 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; 2927 data->cir_bw.bw_alloc = cpu_to_le16(bw_alloc); 2928 } else if (rl_type == ICE_MAX_BW) { 2929 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; 2930 data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc); 2931 } else { 2932 return -EINVAL; 2933 } 2934 2935 /* Configure element */ 2936 return ice_sched_update_elem(hw, node, &buf); 2937 } 2938 2939 /** 2940 * ice_move_vsi_to_agg - moves VSI to new or default aggregator 2941 * @pi: port information structure 2942 * @agg_id: aggregator ID 2943 * @vsi_handle: software VSI handle 2944 * @tc_bitmap: TC bitmap of enabled TC(s) 2945 * 2946 * Move or associate VSI to a new or default aggregator node. 2947 */ 2948 int 2949 ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, 2950 u8 tc_bitmap) 2951 { 2952 unsigned long bitmap = tc_bitmap; 2953 int status; 2954 2955 mutex_lock(&pi->sched_lock); 2956 status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle, 2957 (unsigned long *)&bitmap); 2958 if (!status) 2959 status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle, 2960 (unsigned long *)&bitmap); 2961 mutex_unlock(&pi->sched_lock); 2962 return status; 2963 } 2964 2965 /** 2966 * ice_set_clear_cir_bw - set or clear CIR BW 2967 * @bw_t_info: bandwidth type information structure 2968 * @bw: bandwidth in Kbps - Kilo bits per sec 2969 * 2970 * Save or clear CIR bandwidth (BW) in the passed param bw_t_info. 2971 */ 2972 static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) 2973 { 2974 if (bw == ICE_SCHED_DFLT_BW) { 2975 clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); 2976 bw_t_info->cir_bw.bw = 0; 2977 } else { 2978 /* Save type of BW information */ 2979 set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); 2980 bw_t_info->cir_bw.bw = bw; 2981 } 2982 } 2983 2984 /** 2985 * ice_set_clear_eir_bw - set or clear EIR BW 2986 * @bw_t_info: bandwidth type information structure 2987 * @bw: bandwidth in Kbps - Kilo bits per sec 2988 * 2989 * Save or clear EIR bandwidth (BW) in the passed param bw_t_info. 2990 */ 2991 static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) 2992 { 2993 if (bw == ICE_SCHED_DFLT_BW) { 2994 clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); 2995 bw_t_info->eir_bw.bw = 0; 2996 } else { 2997 /* EIR BW and Shared BW profiles are mutually exclusive and 2998 * hence only one of them may be set for any given element. 2999 * First clear earlier saved shared BW information. 3000 */ 3001 clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); 3002 bw_t_info->shared_bw = 0; 3003 /* save EIR BW information */ 3004 set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); 3005 bw_t_info->eir_bw.bw = bw; 3006 } 3007 } 3008 3009 /** 3010 * ice_set_clear_shared_bw - set or clear shared BW 3011 * @bw_t_info: bandwidth type information structure 3012 * @bw: bandwidth in Kbps - Kilo bits per sec 3013 * 3014 * Save or clear shared bandwidth (BW) in the passed param bw_t_info. 3015 */ 3016 static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw) 3017 { 3018 if (bw == ICE_SCHED_DFLT_BW) { 3019 clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); 3020 bw_t_info->shared_bw = 0; 3021 } else { 3022 /* EIR BW and Shared BW profiles are mutually exclusive and 3023 * hence only one of them may be set for any given element. 3024 * First clear earlier saved EIR BW information. 3025 */ 3026 clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); 3027 bw_t_info->eir_bw.bw = 0; 3028 /* save shared BW information */ 3029 set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); 3030 bw_t_info->shared_bw = bw; 3031 } 3032 } 3033 3034 /** 3035 * ice_sched_save_vsi_bw - save VSI node's BW information 3036 * @pi: port information structure 3037 * @vsi_handle: sw VSI handle 3038 * @tc: traffic class 3039 * @rl_type: rate limit type min, max, or shared 3040 * @bw: bandwidth in Kbps - Kilo bits per sec 3041 * 3042 * Save BW information of VSI type node for post replay use. 3043 */ 3044 static int 3045 ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3046 enum ice_rl_type rl_type, u32 bw) 3047 { 3048 struct ice_vsi_ctx *vsi_ctx; 3049 3050 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 3051 return -EINVAL; 3052 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 3053 if (!vsi_ctx) 3054 return -EINVAL; 3055 switch (rl_type) { 3056 case ICE_MIN_BW: 3057 ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw); 3058 break; 3059 case ICE_MAX_BW: 3060 ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw); 3061 break; 3062 case ICE_SHARED_BW: 3063 ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw); 3064 break; 3065 default: 3066 return -EINVAL; 3067 } 3068 return 0; 3069 } 3070 3071 /** 3072 * ice_sched_calc_wakeup - calculate RL profile wakeup parameter 3073 * @hw: pointer to the HW struct 3074 * @bw: bandwidth in Kbps 3075 * 3076 * This function calculates the wakeup parameter of RL profile. 3077 */ 3078 static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) 3079 { 3080 s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f; 3081 s32 wakeup_f_int; 3082 u16 wakeup = 0; 3083 3084 /* Get the wakeup integer value */ 3085 bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE); 3086 wakeup_int = div64_long(hw->psm_clk_freq, bytes_per_sec); 3087 if (wakeup_int > 63) { 3088 wakeup = (u16)((1 << 15) | wakeup_int); 3089 } else { 3090 /* Calculate fraction value up to 4 decimals 3091 * Convert Integer value to a constant multiplier 3092 */ 3093 wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int; 3094 wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER * 3095 hw->psm_clk_freq, bytes_per_sec); 3096 3097 /* Get Fraction value */ 3098 wakeup_f = wakeup_a - wakeup_b; 3099 3100 /* Round up the Fractional value via Ceil(Fractional value) */ 3101 if (wakeup_f > div64_long(ICE_RL_PROF_MULTIPLIER, 2)) 3102 wakeup_f += 1; 3103 3104 wakeup_f_int = (s32)div64_long(wakeup_f * ICE_RL_PROF_FRACTION, 3105 ICE_RL_PROF_MULTIPLIER); 3106 wakeup |= (u16)(wakeup_int << 9); 3107 wakeup |= (u16)(0x1ff & wakeup_f_int); 3108 } 3109 3110 return wakeup; 3111 } 3112 3113 /** 3114 * ice_sched_bw_to_rl_profile - convert BW to profile parameters 3115 * @hw: pointer to the HW struct 3116 * @bw: bandwidth in Kbps 3117 * @profile: profile parameters to return 3118 * 3119 * This function converts the BW to profile structure format. 3120 */ 3121 static int 3122 ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, 3123 struct ice_aqc_rl_profile_elem *profile) 3124 { 3125 s64 bytes_per_sec, ts_rate, mv_tmp; 3126 int status = -EINVAL; 3127 bool found = false; 3128 s32 encode = 0; 3129 s64 mv = 0; 3130 s32 i; 3131 3132 /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */ 3133 if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW) 3134 return status; 3135 3136 /* Bytes per second from Kbps */ 3137 bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE); 3138 3139 /* encode is 6 bits but really useful are 5 bits */ 3140 for (i = 0; i < 64; i++) { 3141 u64 pow_result = BIT_ULL(i); 3142 3143 ts_rate = div64_long((s64)hw->psm_clk_freq, 3144 pow_result * ICE_RL_PROF_TS_MULTIPLIER); 3145 if (ts_rate <= 0) 3146 continue; 3147 3148 /* Multiplier value */ 3149 mv_tmp = div64_long(bytes_per_sec * ICE_RL_PROF_MULTIPLIER, 3150 ts_rate); 3151 3152 /* Round to the nearest ICE_RL_PROF_MULTIPLIER */ 3153 mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER); 3154 3155 /* First multiplier value greater than the given 3156 * accuracy bytes 3157 */ 3158 if (mv > ICE_RL_PROF_ACCURACY_BYTES) { 3159 encode = i; 3160 found = true; 3161 break; 3162 } 3163 } 3164 if (found) { 3165 u16 wm; 3166 3167 wm = ice_sched_calc_wakeup(hw, bw); 3168 profile->rl_multiply = cpu_to_le16(mv); 3169 profile->wake_up_calc = cpu_to_le16(wm); 3170 profile->rl_encode = cpu_to_le16(encode); 3171 status = 0; 3172 } else { 3173 status = -ENOENT; 3174 } 3175 3176 return status; 3177 } 3178 3179 /** 3180 * ice_sched_add_rl_profile - add RL profile 3181 * @pi: port information structure 3182 * @rl_type: type of rate limit BW - min, max, or shared 3183 * @bw: bandwidth in Kbps - Kilo bits per sec 3184 * @layer_num: specifies in which layer to create profile 3185 * 3186 * This function first checks the existing list for corresponding BW 3187 * parameter. If it exists, it returns the associated profile otherwise 3188 * it creates a new rate limit profile for requested BW, and adds it to 3189 * the HW DB and local list. It returns the new profile or null on error. 3190 * The caller needs to hold the scheduler lock. 3191 */ 3192 static struct ice_aqc_rl_profile_info * 3193 ice_sched_add_rl_profile(struct ice_port_info *pi, 3194 enum ice_rl_type rl_type, u32 bw, u8 layer_num) 3195 { 3196 struct ice_aqc_rl_profile_info *rl_prof_elem; 3197 u16 profiles_added = 0, num_profiles = 1; 3198 struct ice_aqc_rl_profile_elem *buf; 3199 struct ice_hw *hw; 3200 u8 profile_type; 3201 int status; 3202 3203 if (!pi || layer_num >= pi->hw->num_tx_sched_layers) 3204 return NULL; 3205 switch (rl_type) { 3206 case ICE_MIN_BW: 3207 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; 3208 break; 3209 case ICE_MAX_BW: 3210 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; 3211 break; 3212 case ICE_SHARED_BW: 3213 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; 3214 break; 3215 default: 3216 return NULL; 3217 } 3218 3219 hw = pi->hw; 3220 list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], 3221 list_entry) 3222 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == 3223 profile_type && rl_prof_elem->bw == bw) 3224 /* Return existing profile ID info */ 3225 return rl_prof_elem; 3226 3227 /* Create new profile ID */ 3228 rl_prof_elem = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rl_prof_elem), 3229 GFP_KERNEL); 3230 3231 if (!rl_prof_elem) 3232 return NULL; 3233 3234 status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile); 3235 if (status) 3236 goto exit_add_rl_prof; 3237 3238 rl_prof_elem->bw = bw; 3239 /* layer_num is zero relative, and fw expects level from 1 to 9 */ 3240 rl_prof_elem->profile.level = layer_num + 1; 3241 rl_prof_elem->profile.flags = profile_type; 3242 rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size); 3243 3244 /* Create new entry in HW DB */ 3245 buf = &rl_prof_elem->profile; 3246 status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf), 3247 &profiles_added, NULL); 3248 if (status || profiles_added != num_profiles) 3249 goto exit_add_rl_prof; 3250 3251 /* Good entry - add in the list */ 3252 rl_prof_elem->prof_id_ref = 0; 3253 list_add(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]); 3254 return rl_prof_elem; 3255 3256 exit_add_rl_prof: 3257 devm_kfree(ice_hw_to_dev(hw), rl_prof_elem); 3258 return NULL; 3259 } 3260 3261 /** 3262 * ice_sched_cfg_node_bw_lmt - configure node sched params 3263 * @hw: pointer to the HW struct 3264 * @node: sched node to configure 3265 * @rl_type: rate limit type CIR, EIR, or shared 3266 * @rl_prof_id: rate limit profile ID 3267 * 3268 * This function configures node element's BW limit. 3269 */ 3270 static int 3271 ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, 3272 enum ice_rl_type rl_type, u16 rl_prof_id) 3273 { 3274 struct ice_aqc_txsched_elem_data buf; 3275 struct ice_aqc_txsched_elem *data; 3276 3277 buf = node->info; 3278 data = &buf.data; 3279 switch (rl_type) { 3280 case ICE_MIN_BW: 3281 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; 3282 data->cir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id); 3283 break; 3284 case ICE_MAX_BW: 3285 /* EIR BW and Shared BW profiles are mutually exclusive and 3286 * hence only one of them may be set for any given element 3287 */ 3288 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) 3289 return -EIO; 3290 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; 3291 data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id); 3292 break; 3293 case ICE_SHARED_BW: 3294 /* Check for removing shared BW */ 3295 if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) { 3296 /* remove shared profile */ 3297 data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED; 3298 data->srl_id = 0; /* clear SRL field */ 3299 3300 /* enable back EIR to default profile */ 3301 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; 3302 data->eir_bw.bw_profile_idx = 3303 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 3304 break; 3305 } 3306 /* EIR BW and Shared BW profiles are mutually exclusive and 3307 * hence only one of them may be set for any given element 3308 */ 3309 if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) && 3310 (le16_to_cpu(data->eir_bw.bw_profile_idx) != 3311 ICE_SCHED_DFLT_RL_PROF_ID)) 3312 return -EIO; 3313 /* EIR BW is set to default, disable it */ 3314 data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR; 3315 /* Okay to enable shared BW now */ 3316 data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED; 3317 data->srl_id = cpu_to_le16(rl_prof_id); 3318 break; 3319 default: 3320 /* Unknown rate limit type */ 3321 return -EINVAL; 3322 } 3323 3324 /* Configure element */ 3325 return ice_sched_update_elem(hw, node, &buf); 3326 } 3327 3328 /** 3329 * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID 3330 * @node: sched node 3331 * @rl_type: rate limit type 3332 * 3333 * If existing profile matches, it returns the corresponding rate 3334 * limit profile ID, otherwise it returns an invalid ID as error. 3335 */ 3336 static u16 3337 ice_sched_get_node_rl_prof_id(struct ice_sched_node *node, 3338 enum ice_rl_type rl_type) 3339 { 3340 u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID; 3341 struct ice_aqc_txsched_elem *data; 3342 3343 data = &node->info.data; 3344 switch (rl_type) { 3345 case ICE_MIN_BW: 3346 if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR) 3347 rl_prof_id = le16_to_cpu(data->cir_bw.bw_profile_idx); 3348 break; 3349 case ICE_MAX_BW: 3350 if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR) 3351 rl_prof_id = le16_to_cpu(data->eir_bw.bw_profile_idx); 3352 break; 3353 case ICE_SHARED_BW: 3354 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) 3355 rl_prof_id = le16_to_cpu(data->srl_id); 3356 break; 3357 default: 3358 break; 3359 } 3360 3361 return rl_prof_id; 3362 } 3363 3364 /** 3365 * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer 3366 * @pi: port information structure 3367 * @rl_type: type of rate limit BW - min, max, or shared 3368 * @layer_index: layer index 3369 * 3370 * This function returns requested profile creation layer. 3371 */ 3372 static u8 3373 ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type, 3374 u8 layer_index) 3375 { 3376 struct ice_hw *hw = pi->hw; 3377 3378 if (layer_index >= hw->num_tx_sched_layers) 3379 return ICE_SCHED_INVAL_LAYER_NUM; 3380 switch (rl_type) { 3381 case ICE_MIN_BW: 3382 if (hw->layer_info[layer_index].max_cir_rl_profiles) 3383 return layer_index; 3384 break; 3385 case ICE_MAX_BW: 3386 if (hw->layer_info[layer_index].max_eir_rl_profiles) 3387 return layer_index; 3388 break; 3389 case ICE_SHARED_BW: 3390 /* if current layer doesn't support SRL profile creation 3391 * then try a layer up or down. 3392 */ 3393 if (hw->layer_info[layer_index].max_srl_profiles) 3394 return layer_index; 3395 else if (layer_index < hw->num_tx_sched_layers - 1 && 3396 hw->layer_info[layer_index + 1].max_srl_profiles) 3397 return layer_index + 1; 3398 else if (layer_index > 0 && 3399 hw->layer_info[layer_index - 1].max_srl_profiles) 3400 return layer_index - 1; 3401 break; 3402 default: 3403 break; 3404 } 3405 return ICE_SCHED_INVAL_LAYER_NUM; 3406 } 3407 3408 /** 3409 * ice_sched_get_srl_node - get shared rate limit node 3410 * @node: tree node 3411 * @srl_layer: shared rate limit layer 3412 * 3413 * This function returns SRL node to be used for shared rate limit purpose. 3414 * The caller needs to hold scheduler lock. 3415 */ 3416 static struct ice_sched_node * 3417 ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer) 3418 { 3419 if (srl_layer > node->tx_sched_layer) 3420 return node->children[0]; 3421 else if (srl_layer < node->tx_sched_layer) 3422 /* Node can't be created without a parent. It will always 3423 * have a valid parent except root node. 3424 */ 3425 return node->parent; 3426 else 3427 return node; 3428 } 3429 3430 /** 3431 * ice_sched_rm_rl_profile - remove RL profile ID 3432 * @pi: port information structure 3433 * @layer_num: layer number where profiles are saved 3434 * @profile_type: profile type like EIR, CIR, or SRL 3435 * @profile_id: profile ID to remove 3436 * 3437 * This function removes rate limit profile from layer 'layer_num' of type 3438 * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold 3439 * scheduler lock. 3440 */ 3441 static int 3442 ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, 3443 u16 profile_id) 3444 { 3445 struct ice_aqc_rl_profile_info *rl_prof_elem; 3446 int status = 0; 3447 3448 if (layer_num >= pi->hw->num_tx_sched_layers) 3449 return -EINVAL; 3450 /* Check the existing list for RL profile */ 3451 list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], 3452 list_entry) 3453 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == 3454 profile_type && 3455 le16_to_cpu(rl_prof_elem->profile.profile_id) == 3456 profile_id) { 3457 if (rl_prof_elem->prof_id_ref) 3458 rl_prof_elem->prof_id_ref--; 3459 3460 /* Remove old profile ID from database */ 3461 status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem); 3462 if (status && status != -EBUSY) 3463 ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); 3464 break; 3465 } 3466 if (status == -EBUSY) 3467 status = 0; 3468 return status; 3469 } 3470 3471 /** 3472 * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default 3473 * @pi: port information structure 3474 * @node: pointer to node structure 3475 * @rl_type: rate limit type min, max, or shared 3476 * @layer_num: layer number where RL profiles are saved 3477 * 3478 * This function configures node element's BW rate limit profile ID of 3479 * type CIR, EIR, or SRL to default. This function needs to be called 3480 * with the scheduler lock held. 3481 */ 3482 static int 3483 ice_sched_set_node_bw_dflt(struct ice_port_info *pi, 3484 struct ice_sched_node *node, 3485 enum ice_rl_type rl_type, u8 layer_num) 3486 { 3487 struct ice_hw *hw; 3488 u8 profile_type; 3489 u16 rl_prof_id; 3490 u16 old_id; 3491 int status; 3492 3493 hw = pi->hw; 3494 switch (rl_type) { 3495 case ICE_MIN_BW: 3496 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; 3497 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; 3498 break; 3499 case ICE_MAX_BW: 3500 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; 3501 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; 3502 break; 3503 case ICE_SHARED_BW: 3504 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; 3505 /* No SRL is configured for default case */ 3506 rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID; 3507 break; 3508 default: 3509 return -EINVAL; 3510 } 3511 /* Save existing RL prof ID for later clean up */ 3512 old_id = ice_sched_get_node_rl_prof_id(node, rl_type); 3513 /* Configure BW scheduling parameters */ 3514 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); 3515 if (status) 3516 return status; 3517 3518 /* Remove stale RL profile ID */ 3519 if (old_id == ICE_SCHED_DFLT_RL_PROF_ID || 3520 old_id == ICE_SCHED_INVAL_PROF_ID) 3521 return 0; 3522 3523 return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id); 3524 } 3525 3526 /** 3527 * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness 3528 * @pi: port information structure 3529 * @node: pointer to node structure 3530 * @layer_num: layer number where rate limit profiles are saved 3531 * @rl_type: rate limit type min, max, or shared 3532 * @bw: bandwidth value 3533 * 3534 * This function prepares node element's bandwidth to SRL or EIR exclusively. 3535 * EIR BW and Shared BW profiles are mutually exclusive and hence only one of 3536 * them may be set for any given element. This function needs to be called 3537 * with the scheduler lock held. 3538 */ 3539 static int 3540 ice_sched_set_eir_srl_excl(struct ice_port_info *pi, 3541 struct ice_sched_node *node, 3542 u8 layer_num, enum ice_rl_type rl_type, u32 bw) 3543 { 3544 if (rl_type == ICE_SHARED_BW) { 3545 /* SRL node passed in this case, it may be different node */ 3546 if (bw == ICE_SCHED_DFLT_BW) 3547 /* SRL being removed, ice_sched_cfg_node_bw_lmt() 3548 * enables EIR to default. EIR is not set in this 3549 * case, so no additional action is required. 3550 */ 3551 return 0; 3552 3553 /* SRL being configured, set EIR to default here. 3554 * ice_sched_cfg_node_bw_lmt() disables EIR when it 3555 * configures SRL 3556 */ 3557 return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW, 3558 layer_num); 3559 } else if (rl_type == ICE_MAX_BW && 3560 node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) { 3561 /* Remove Shared profile. Set default shared BW call 3562 * removes shared profile for a node. 3563 */ 3564 return ice_sched_set_node_bw_dflt(pi, node, 3565 ICE_SHARED_BW, 3566 layer_num); 3567 } 3568 return 0; 3569 } 3570 3571 /** 3572 * ice_sched_set_node_bw - set node's bandwidth 3573 * @pi: port information structure 3574 * @node: tree node 3575 * @rl_type: rate limit type min, max, or shared 3576 * @bw: bandwidth in Kbps - Kilo bits per sec 3577 * @layer_num: layer number 3578 * 3579 * This function adds new profile corresponding to requested BW, configures 3580 * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile 3581 * ID from local database. The caller needs to hold scheduler lock. 3582 */ 3583 int 3584 ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, 3585 enum ice_rl_type rl_type, u32 bw, u8 layer_num) 3586 { 3587 struct ice_aqc_rl_profile_info *rl_prof_info; 3588 struct ice_hw *hw = pi->hw; 3589 u16 old_id, rl_prof_id; 3590 int status = -EINVAL; 3591 3592 rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num); 3593 if (!rl_prof_info) 3594 return status; 3595 3596 rl_prof_id = le16_to_cpu(rl_prof_info->profile.profile_id); 3597 3598 /* Save existing RL prof ID for later clean up */ 3599 old_id = ice_sched_get_node_rl_prof_id(node, rl_type); 3600 /* Configure BW scheduling parameters */ 3601 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); 3602 if (status) 3603 return status; 3604 3605 /* New changes has been applied */ 3606 /* Increment the profile ID reference count */ 3607 rl_prof_info->prof_id_ref++; 3608 3609 /* Check for old ID removal */ 3610 if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) || 3611 old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id) 3612 return 0; 3613 3614 return ice_sched_rm_rl_profile(pi, layer_num, 3615 rl_prof_info->profile.flags & 3616 ICE_AQC_RL_PROFILE_TYPE_M, old_id); 3617 } 3618 3619 /** 3620 * ice_sched_set_node_priority - set node's priority 3621 * @pi: port information structure 3622 * @node: tree node 3623 * @priority: number 0-7 representing priority among siblings 3624 * 3625 * This function sets priority of a node among it's siblings. 3626 */ 3627 int 3628 ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node, 3629 u16 priority) 3630 { 3631 struct ice_aqc_txsched_elem_data buf; 3632 struct ice_aqc_txsched_elem *data; 3633 3634 buf = node->info; 3635 data = &buf.data; 3636 3637 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; 3638 data->generic |= FIELD_PREP(ICE_AQC_ELEM_GENERIC_PRIO_M, priority); 3639 3640 return ice_sched_update_elem(pi->hw, node, &buf); 3641 } 3642 3643 /** 3644 * ice_sched_set_node_weight - set node's weight 3645 * @pi: port information structure 3646 * @node: tree node 3647 * @weight: number 1-200 representing weight for WFQ 3648 * 3649 * This function sets weight of the node for WFQ algorithm. 3650 */ 3651 int 3652 ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight) 3653 { 3654 struct ice_aqc_txsched_elem_data buf; 3655 struct ice_aqc_txsched_elem *data; 3656 3657 buf = node->info; 3658 data = &buf.data; 3659 3660 data->valid_sections = ICE_AQC_ELEM_VALID_CIR | ICE_AQC_ELEM_VALID_EIR | 3661 ICE_AQC_ELEM_VALID_GENERIC; 3662 data->cir_bw.bw_alloc = cpu_to_le16(weight); 3663 data->eir_bw.bw_alloc = cpu_to_le16(weight); 3664 3665 data->generic |= FIELD_PREP(ICE_AQC_ELEM_GENERIC_SP_M, 0x0); 3666 3667 return ice_sched_update_elem(pi->hw, node, &buf); 3668 } 3669 3670 /** 3671 * ice_sched_set_node_bw_lmt - set node's BW limit 3672 * @pi: port information structure 3673 * @node: tree node 3674 * @rl_type: rate limit type min, max, or shared 3675 * @bw: bandwidth in Kbps - Kilo bits per sec 3676 * 3677 * It updates node's BW limit parameters like BW RL profile ID of type CIR, 3678 * EIR, or SRL. The caller needs to hold scheduler lock. 3679 */ 3680 int 3681 ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, 3682 enum ice_rl_type rl_type, u32 bw) 3683 { 3684 struct ice_sched_node *cfg_node = node; 3685 int status; 3686 3687 struct ice_hw *hw; 3688 u8 layer_num; 3689 3690 if (!pi) 3691 return -EINVAL; 3692 hw = pi->hw; 3693 /* Remove unused RL profile IDs from HW and SW DB */ 3694 ice_sched_rm_unused_rl_prof(pi); 3695 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, 3696 node->tx_sched_layer); 3697 if (layer_num >= hw->num_tx_sched_layers) 3698 return -EINVAL; 3699 3700 if (rl_type == ICE_SHARED_BW) { 3701 /* SRL node may be different */ 3702 cfg_node = ice_sched_get_srl_node(node, layer_num); 3703 if (!cfg_node) 3704 return -EIO; 3705 } 3706 /* EIR BW and Shared BW profiles are mutually exclusive and 3707 * hence only one of them may be set for any given element 3708 */ 3709 status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type, 3710 bw); 3711 if (status) 3712 return status; 3713 if (bw == ICE_SCHED_DFLT_BW) 3714 return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type, 3715 layer_num); 3716 return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num); 3717 } 3718 3719 /** 3720 * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default 3721 * @pi: port information structure 3722 * @node: pointer to node structure 3723 * @rl_type: rate limit type min, max, or shared 3724 * 3725 * This function configures node element's BW rate limit profile ID of 3726 * type CIR, EIR, or SRL to default. This function needs to be called 3727 * with the scheduler lock held. 3728 */ 3729 static int 3730 ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, 3731 struct ice_sched_node *node, 3732 enum ice_rl_type rl_type) 3733 { 3734 return ice_sched_set_node_bw_lmt(pi, node, rl_type, 3735 ICE_SCHED_DFLT_BW); 3736 } 3737 3738 /** 3739 * ice_sched_validate_srl_node - Check node for SRL applicability 3740 * @node: sched node to configure 3741 * @sel_layer: selected SRL layer 3742 * 3743 * This function checks if the SRL can be applied to a selected layer node on 3744 * behalf of the requested node (first argument). This function needs to be 3745 * called with scheduler lock held. 3746 */ 3747 static int 3748 ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) 3749 { 3750 /* SRL profiles are not available on all layers. Check if the 3751 * SRL profile can be applied to a node above or below the 3752 * requested node. SRL configuration is possible only if the 3753 * selected layer's node has single child. 3754 */ 3755 if (sel_layer == node->tx_sched_layer || 3756 ((sel_layer == node->tx_sched_layer + 1) && 3757 node->num_children == 1) || 3758 ((sel_layer == node->tx_sched_layer - 1) && 3759 (node->parent && node->parent->num_children == 1))) 3760 return 0; 3761 3762 return -EIO; 3763 } 3764 3765 /** 3766 * ice_sched_save_q_bw - save queue node's BW information 3767 * @q_ctx: queue context structure 3768 * @rl_type: rate limit type min, max, or shared 3769 * @bw: bandwidth in Kbps - Kilo bits per sec 3770 * 3771 * Save BW information of queue type node for post replay use. 3772 */ 3773 static int 3774 ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) 3775 { 3776 switch (rl_type) { 3777 case ICE_MIN_BW: 3778 ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw); 3779 break; 3780 case ICE_MAX_BW: 3781 ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw); 3782 break; 3783 case ICE_SHARED_BW: 3784 ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw); 3785 break; 3786 default: 3787 return -EINVAL; 3788 } 3789 return 0; 3790 } 3791 3792 /** 3793 * ice_sched_set_q_bw_lmt - sets queue BW limit 3794 * @pi: port information structure 3795 * @vsi_handle: sw VSI handle 3796 * @tc: traffic class 3797 * @q_handle: software queue handle 3798 * @rl_type: min, max, or shared 3799 * @bw: bandwidth in Kbps 3800 * 3801 * This function sets BW limit of queue scheduling node. 3802 */ 3803 static int 3804 ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3805 u16 q_handle, enum ice_rl_type rl_type, u32 bw) 3806 { 3807 struct ice_sched_node *node; 3808 struct ice_q_ctx *q_ctx; 3809 int status = -EINVAL; 3810 3811 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 3812 return -EINVAL; 3813 mutex_lock(&pi->sched_lock); 3814 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle); 3815 if (!q_ctx) 3816 goto exit_q_bw_lmt; 3817 node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); 3818 if (!node) { 3819 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n"); 3820 goto exit_q_bw_lmt; 3821 } 3822 3823 /* Return error if it is not a leaf node */ 3824 if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) 3825 goto exit_q_bw_lmt; 3826 3827 /* SRL bandwidth layer selection */ 3828 if (rl_type == ICE_SHARED_BW) { 3829 u8 sel_layer; /* selected layer */ 3830 3831 sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type, 3832 node->tx_sched_layer); 3833 if (sel_layer >= pi->hw->num_tx_sched_layers) { 3834 status = -EINVAL; 3835 goto exit_q_bw_lmt; 3836 } 3837 status = ice_sched_validate_srl_node(node, sel_layer); 3838 if (status) 3839 goto exit_q_bw_lmt; 3840 } 3841 3842 if (bw == ICE_SCHED_DFLT_BW) 3843 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type); 3844 else 3845 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw); 3846 3847 if (!status) 3848 status = ice_sched_save_q_bw(q_ctx, rl_type, bw); 3849 3850 exit_q_bw_lmt: 3851 mutex_unlock(&pi->sched_lock); 3852 return status; 3853 } 3854 3855 /** 3856 * ice_cfg_q_bw_lmt - configure queue BW limit 3857 * @pi: port information structure 3858 * @vsi_handle: sw VSI handle 3859 * @tc: traffic class 3860 * @q_handle: software queue handle 3861 * @rl_type: min, max, or shared 3862 * @bw: bandwidth in Kbps 3863 * 3864 * This function configures BW limit of queue scheduling node. 3865 */ 3866 int 3867 ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3868 u16 q_handle, enum ice_rl_type rl_type, u32 bw) 3869 { 3870 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, 3871 bw); 3872 } 3873 3874 /** 3875 * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit 3876 * @pi: port information structure 3877 * @vsi_handle: sw VSI handle 3878 * @tc: traffic class 3879 * @q_handle: software queue handle 3880 * @rl_type: min, max, or shared 3881 * 3882 * This function configures BW default limit of queue scheduling node. 3883 */ 3884 int 3885 ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3886 u16 q_handle, enum ice_rl_type rl_type) 3887 { 3888 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, 3889 ICE_SCHED_DFLT_BW); 3890 } 3891 3892 /** 3893 * ice_sched_get_node_by_id_type - get node from ID type 3894 * @pi: port information structure 3895 * @id: identifier 3896 * @agg_type: type of aggregator 3897 * @tc: traffic class 3898 * 3899 * This function returns node identified by ID of type aggregator, and 3900 * based on traffic class (TC). This function needs to be called with 3901 * the scheduler lock held. 3902 */ 3903 static struct ice_sched_node * 3904 ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, 3905 enum ice_agg_type agg_type, u8 tc) 3906 { 3907 struct ice_sched_node *node = NULL; 3908 3909 switch (agg_type) { 3910 case ICE_AGG_TYPE_VSI: { 3911 struct ice_vsi_ctx *vsi_ctx; 3912 u16 vsi_handle = (u16)id; 3913 3914 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 3915 break; 3916 /* Get sched_vsi_info */ 3917 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 3918 if (!vsi_ctx) 3919 break; 3920 node = vsi_ctx->sched.vsi_node[tc]; 3921 break; 3922 } 3923 3924 case ICE_AGG_TYPE_AGG: { 3925 struct ice_sched_node *tc_node; 3926 3927 tc_node = ice_sched_get_tc_node(pi, tc); 3928 if (tc_node) 3929 node = ice_sched_get_agg_node(pi, tc_node, id); 3930 break; 3931 } 3932 3933 default: 3934 break; 3935 } 3936 3937 return node; 3938 } 3939 3940 /** 3941 * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC 3942 * @pi: port information structure 3943 * @id: ID (software VSI handle or AGG ID) 3944 * @agg_type: aggregator type (VSI or AGG type node) 3945 * @tc: traffic class 3946 * @rl_type: min or max 3947 * @bw: bandwidth in Kbps 3948 * 3949 * This function sets BW limit of VSI or Aggregator scheduling node 3950 * based on TC information from passed in argument BW. 3951 */ 3952 static int 3953 ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, 3954 enum ice_agg_type agg_type, u8 tc, 3955 enum ice_rl_type rl_type, u32 bw) 3956 { 3957 struct ice_sched_node *node; 3958 int status = -EINVAL; 3959 3960 if (!pi) 3961 return status; 3962 3963 if (rl_type == ICE_UNKNOWN_BW) 3964 return status; 3965 3966 mutex_lock(&pi->sched_lock); 3967 node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc); 3968 if (!node) { 3969 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n"); 3970 goto exit_set_node_bw_lmt_per_tc; 3971 } 3972 if (bw == ICE_SCHED_DFLT_BW) 3973 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type); 3974 else 3975 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw); 3976 3977 exit_set_node_bw_lmt_per_tc: 3978 mutex_unlock(&pi->sched_lock); 3979 return status; 3980 } 3981 3982 /** 3983 * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC 3984 * @pi: port information structure 3985 * @vsi_handle: software VSI handle 3986 * @tc: traffic class 3987 * @rl_type: min or max 3988 * @bw: bandwidth in Kbps 3989 * 3990 * This function configures BW limit of VSI scheduling node based on TC 3991 * information. 3992 */ 3993 int 3994 ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 3995 enum ice_rl_type rl_type, u32 bw) 3996 { 3997 int status; 3998 3999 status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle, 4000 ICE_AGG_TYPE_VSI, 4001 tc, rl_type, bw); 4002 if (!status) { 4003 mutex_lock(&pi->sched_lock); 4004 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw); 4005 mutex_unlock(&pi->sched_lock); 4006 } 4007 return status; 4008 } 4009 4010 /** 4011 * ice_cfg_vsi_bw_dflt_lmt_per_tc - configure default VSI BW limit per TC 4012 * @pi: port information structure 4013 * @vsi_handle: software VSI handle 4014 * @tc: traffic class 4015 * @rl_type: min or max 4016 * 4017 * This function configures default BW limit of VSI scheduling node based on TC 4018 * information. 4019 */ 4020 int 4021 ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4022 enum ice_rl_type rl_type) 4023 { 4024 int status; 4025 4026 status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle, 4027 ICE_AGG_TYPE_VSI, 4028 tc, rl_type, 4029 ICE_SCHED_DFLT_BW); 4030 if (!status) { 4031 mutex_lock(&pi->sched_lock); 4032 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, 4033 ICE_SCHED_DFLT_BW); 4034 mutex_unlock(&pi->sched_lock); 4035 } 4036 return status; 4037 } 4038 4039 /** 4040 * ice_cfg_rl_burst_size - Set burst size value 4041 * @hw: pointer to the HW struct 4042 * @bytes: burst size in bytes 4043 * 4044 * This function configures/set the burst size to requested new value. The new 4045 * burst size value is used for future rate limit calls. It doesn't change the 4046 * existing or previously created RL profiles. 4047 */ 4048 int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) 4049 { 4050 u16 burst_size_to_prog; 4051 4052 if (bytes < ICE_MIN_BURST_SIZE_ALLOWED || 4053 bytes > ICE_MAX_BURST_SIZE_ALLOWED) 4054 return -EINVAL; 4055 if (ice_round_to_num(bytes, 64) <= 4056 ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) { 4057 /* 64 byte granularity case */ 4058 /* Disable MSB granularity bit */ 4059 burst_size_to_prog = ICE_64_BYTE_GRANULARITY; 4060 /* round number to nearest 64 byte granularity */ 4061 bytes = ice_round_to_num(bytes, 64); 4062 /* The value is in 64 byte chunks */ 4063 burst_size_to_prog |= (u16)(bytes / 64); 4064 } else { 4065 /* k bytes granularity case */ 4066 /* Enable MSB granularity bit */ 4067 burst_size_to_prog = ICE_KBYTE_GRANULARITY; 4068 /* round number to nearest 1024 granularity */ 4069 bytes = ice_round_to_num(bytes, 1024); 4070 /* check rounding doesn't go beyond allowed */ 4071 if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY) 4072 bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY; 4073 /* The value is in k bytes */ 4074 burst_size_to_prog |= (u16)(bytes / 1024); 4075 } 4076 hw->max_burst_size = burst_size_to_prog; 4077 return 0; 4078 } 4079 4080 /** 4081 * ice_sched_replay_node_prio - re-configure node priority 4082 * @hw: pointer to the HW struct 4083 * @node: sched node to configure 4084 * @priority: priority value 4085 * 4086 * This function configures node element's priority value. It 4087 * needs to be called with scheduler lock held. 4088 */ 4089 static int 4090 ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, 4091 u8 priority) 4092 { 4093 struct ice_aqc_txsched_elem_data buf; 4094 struct ice_aqc_txsched_elem *data; 4095 int status; 4096 4097 buf = node->info; 4098 data = &buf.data; 4099 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; 4100 data->generic = priority; 4101 4102 /* Configure element */ 4103 status = ice_sched_update_elem(hw, node, &buf); 4104 return status; 4105 } 4106 4107 /** 4108 * ice_sched_replay_node_bw - replay node(s) BW 4109 * @hw: pointer to the HW struct 4110 * @node: sched node to configure 4111 * @bw_t_info: BW type information 4112 * 4113 * This function restores node's BW from bw_t_info. The caller needs 4114 * to hold the scheduler lock. 4115 */ 4116 static int 4117 ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node, 4118 struct ice_bw_type_info *bw_t_info) 4119 { 4120 struct ice_port_info *pi = hw->port_info; 4121 int status = -EINVAL; 4122 u16 bw_alloc; 4123 4124 if (!node) 4125 return status; 4126 if (bitmap_empty(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT)) 4127 return 0; 4128 if (test_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap)) { 4129 status = ice_sched_replay_node_prio(hw, node, 4130 bw_t_info->generic); 4131 if (status) 4132 return status; 4133 } 4134 if (test_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap)) { 4135 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, 4136 bw_t_info->cir_bw.bw); 4137 if (status) 4138 return status; 4139 } 4140 if (test_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap)) { 4141 bw_alloc = bw_t_info->cir_bw.bw_alloc; 4142 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW, 4143 bw_alloc); 4144 if (status) 4145 return status; 4146 } 4147 if (test_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap)) { 4148 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, 4149 bw_t_info->eir_bw.bw); 4150 if (status) 4151 return status; 4152 } 4153 if (test_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap)) { 4154 bw_alloc = bw_t_info->eir_bw.bw_alloc; 4155 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW, 4156 bw_alloc); 4157 if (status) 4158 return status; 4159 } 4160 if (test_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap)) 4161 status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW, 4162 bw_t_info->shared_bw); 4163 return status; 4164 } 4165 4166 /** 4167 * ice_sched_get_ena_tc_bitmap - get enabled TC bitmap 4168 * @pi: port info struct 4169 * @tc_bitmap: 8 bits TC bitmap to check 4170 * @ena_tc_bitmap: 8 bits enabled TC bitmap to return 4171 * 4172 * This function returns enabled TC bitmap in variable ena_tc_bitmap. Some TCs 4173 * may be missing, it returns enabled TCs. This function needs to be called with 4174 * scheduler lock held. 4175 */ 4176 static void 4177 ice_sched_get_ena_tc_bitmap(struct ice_port_info *pi, 4178 unsigned long *tc_bitmap, 4179 unsigned long *ena_tc_bitmap) 4180 { 4181 u8 tc; 4182 4183 /* Some TC(s) may be missing after reset, adjust for replay */ 4184 ice_for_each_traffic_class(tc) 4185 if (ice_is_tc_ena(*tc_bitmap, tc) && 4186 (ice_sched_get_tc_node(pi, tc))) 4187 set_bit(tc, ena_tc_bitmap); 4188 } 4189 4190 /** 4191 * ice_sched_replay_agg - recreate aggregator node(s) 4192 * @hw: pointer to the HW struct 4193 * 4194 * This function recreate aggregator type nodes which are not replayed earlier. 4195 * It also replay aggregator BW information. These aggregator nodes are not 4196 * associated with VSI type node yet. 4197 */ 4198 void ice_sched_replay_agg(struct ice_hw *hw) 4199 { 4200 struct ice_port_info *pi = hw->port_info; 4201 struct ice_sched_agg_info *agg_info; 4202 4203 mutex_lock(&pi->sched_lock); 4204 list_for_each_entry(agg_info, &hw->agg_list, list_entry) 4205 /* replay aggregator (re-create aggregator node) */ 4206 if (!bitmap_equal(agg_info->tc_bitmap, agg_info->replay_tc_bitmap, 4207 ICE_MAX_TRAFFIC_CLASS)) { 4208 DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 4209 int status; 4210 4211 bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 4212 ice_sched_get_ena_tc_bitmap(pi, 4213 agg_info->replay_tc_bitmap, 4214 replay_bitmap); 4215 status = ice_sched_cfg_agg(hw->port_info, 4216 agg_info->agg_id, 4217 ICE_AGG_TYPE_AGG, 4218 replay_bitmap); 4219 if (status) { 4220 dev_info(ice_hw_to_dev(hw), 4221 "Replay agg id[%d] failed\n", 4222 agg_info->agg_id); 4223 /* Move on to next one */ 4224 continue; 4225 } 4226 } 4227 mutex_unlock(&pi->sched_lock); 4228 } 4229 4230 /** 4231 * ice_sched_replay_agg_vsi_preinit - Agg/VSI replay pre initialization 4232 * @hw: pointer to the HW struct 4233 * 4234 * This function initialize aggregator(s) TC bitmap to zero. A required 4235 * preinit step for replaying aggregators. 4236 */ 4237 void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw) 4238 { 4239 struct ice_port_info *pi = hw->port_info; 4240 struct ice_sched_agg_info *agg_info; 4241 4242 mutex_lock(&pi->sched_lock); 4243 list_for_each_entry(agg_info, &hw->agg_list, list_entry) { 4244 struct ice_sched_agg_vsi_info *agg_vsi_info; 4245 4246 agg_info->tc_bitmap[0] = 0; 4247 list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list, 4248 list_entry) 4249 agg_vsi_info->tc_bitmap[0] = 0; 4250 } 4251 mutex_unlock(&pi->sched_lock); 4252 } 4253 4254 /** 4255 * ice_sched_replay_vsi_agg - replay aggregator & VSI to aggregator node(s) 4256 * @hw: pointer to the HW struct 4257 * @vsi_handle: software VSI handle 4258 * 4259 * This function replays aggregator node, VSI to aggregator type nodes, and 4260 * their node bandwidth information. This function needs to be called with 4261 * scheduler lock held. 4262 */ 4263 static int ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) 4264 { 4265 DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 4266 struct ice_sched_agg_vsi_info *agg_vsi_info; 4267 struct ice_port_info *pi = hw->port_info; 4268 struct ice_sched_agg_info *agg_info; 4269 int status; 4270 4271 bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 4272 if (!ice_is_vsi_valid(hw, vsi_handle)) 4273 return -EINVAL; 4274 agg_info = ice_get_vsi_agg_info(hw, vsi_handle); 4275 if (!agg_info) 4276 return 0; /* Not present in list - default Agg case */ 4277 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); 4278 if (!agg_vsi_info) 4279 return 0; /* Not present in list - default Agg case */ 4280 ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap, 4281 replay_bitmap); 4282 /* Replay aggregator node associated to vsi_handle */ 4283 status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id, 4284 ICE_AGG_TYPE_AGG, replay_bitmap); 4285 if (status) 4286 return status; 4287 4288 bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); 4289 ice_sched_get_ena_tc_bitmap(pi, agg_vsi_info->replay_tc_bitmap, 4290 replay_bitmap); 4291 /* Move this VSI (vsi_handle) to above aggregator */ 4292 return ice_sched_assoc_vsi_to_agg(pi, agg_info->agg_id, vsi_handle, 4293 replay_bitmap); 4294 } 4295 4296 /** 4297 * ice_replay_vsi_agg - replay VSI to aggregator node 4298 * @hw: pointer to the HW struct 4299 * @vsi_handle: software VSI handle 4300 * 4301 * This function replays association of VSI to aggregator type nodes, and 4302 * node bandwidth information. 4303 */ 4304 int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) 4305 { 4306 struct ice_port_info *pi = hw->port_info; 4307 int status; 4308 4309 mutex_lock(&pi->sched_lock); 4310 status = ice_sched_replay_vsi_agg(hw, vsi_handle); 4311 mutex_unlock(&pi->sched_lock); 4312 return status; 4313 } 4314 4315 /** 4316 * ice_sched_replay_q_bw - replay queue type node BW 4317 * @pi: port information structure 4318 * @q_ctx: queue context structure 4319 * 4320 * This function replays queue type node bandwidth. This function needs to be 4321 * called with scheduler lock held. 4322 */ 4323 int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx) 4324 { 4325 struct ice_sched_node *q_node; 4326 4327 /* Following also checks the presence of node in tree */ 4328 q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); 4329 if (!q_node) 4330 return -EINVAL; 4331 return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info); 4332 } 4333