1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2018-2021, Intel Corporation. */ 3 4 /* Link Aggregation code */ 5 6 #include "ice.h" 7 #include "ice_lib.h" 8 #include "ice_lag.h" 9 10 #define ICE_LAG_RES_SHARED BIT(14) 11 #define ICE_LAG_RES_VALID BIT(15) 12 13 #define LACP_TRAIN_PKT_LEN 16 14 static const u8 lacp_train_pkt[LACP_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0, 15 0, 0, 0, 0, 0, 0, 16 0x88, 0x09, 0, 0 }; 17 18 #define ICE_RECIPE_LEN 64 19 static const u8 ice_dflt_vsi_rcp[ICE_RECIPE_LEN] = { 20 0x05, 0, 0, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 21 0x85, 0, 0x01, 0, 0, 0, 0xff, 0xff, 0x08, 0, 0, 0, 0, 0, 0, 0, 22 0, 0, 0, 0, 0, 0, 0x30 }; 23 static const u8 ice_lport_rcp[ICE_RECIPE_LEN] = { 24 0x05, 0, 0, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25 0x85, 0, 0x16, 0, 0, 0, 0xff, 0xff, 0x07, 0, 0, 0, 0, 0, 0, 0, 26 0, 0, 0, 0, 0, 0, 0x30 }; 27 28 /** 29 * ice_lag_set_primary - set PF LAG state as Primary 30 * @lag: LAG info struct 31 */ 32 static void ice_lag_set_primary(struct ice_lag *lag) 33 { 34 struct ice_pf *pf = lag->pf; 35 36 if (!pf) 37 return; 38 39 if (lag->role != ICE_LAG_UNSET && lag->role != ICE_LAG_BACKUP) { 40 dev_warn(ice_pf_to_dev(pf), "%s: Attempt to be Primary, but incompatible state.\n", 41 netdev_name(lag->netdev)); 42 return; 43 } 44 45 lag->role = ICE_LAG_PRIMARY; 46 } 47 48 /** 49 * ice_lag_set_backup - set PF LAG state to Backup 50 * @lag: LAG info struct 51 */ 52 static void ice_lag_set_backup(struct ice_lag *lag) 53 { 54 struct ice_pf *pf = lag->pf; 55 56 if (!pf) 57 return; 58 59 if (lag->role != ICE_LAG_UNSET && lag->role != ICE_LAG_PRIMARY) { 60 dev_dbg(ice_pf_to_dev(pf), "%s: Attempt to be Backup, but incompatible state\n", 61 netdev_name(lag->netdev)); 62 return; 63 } 64 65 lag->role = ICE_LAG_BACKUP; 66 } 67 68 /** 69 * netif_is_same_ice - determine if netdev is on the same ice NIC as local PF 70 * @pf: local PF struct 71 * @netdev: netdev we are evaluating 72 */ 73 static bool netif_is_same_ice(struct ice_pf *pf, struct net_device *netdev) 74 { 75 struct ice_netdev_priv *np; 76 struct ice_pf *test_pf; 77 struct ice_vsi *vsi; 78 79 if (!netif_is_ice(netdev)) 80 return false; 81 82 np = netdev_priv(netdev); 83 if (!np) 84 return false; 85 86 vsi = np->vsi; 87 if (!vsi) 88 return false; 89 90 test_pf = vsi->back; 91 if (!test_pf) 92 return false; 93 94 if (pf->pdev->bus != test_pf->pdev->bus || 95 pf->pdev->slot != test_pf->pdev->slot) 96 return false; 97 98 return true; 99 } 100 101 /** 102 * ice_netdev_to_lag - return pointer to associated lag struct from netdev 103 * @netdev: pointer to net_device struct to query 104 */ 105 static struct ice_lag *ice_netdev_to_lag(struct net_device *netdev) 106 { 107 struct ice_netdev_priv *np; 108 struct ice_vsi *vsi; 109 110 if (!netif_is_ice(netdev)) 111 return NULL; 112 113 np = netdev_priv(netdev); 114 if (!np) 115 return NULL; 116 117 vsi = np->vsi; 118 if (!vsi) 119 return NULL; 120 121 return vsi->back->lag; 122 } 123 124 /** 125 * ice_lag_find_hw_by_lport - return an hw struct from bond members lport 126 * @lag: lag struct 127 * @lport: lport value to search for 128 */ 129 static struct ice_hw * 130 ice_lag_find_hw_by_lport(struct ice_lag *lag, u8 lport) 131 { 132 struct ice_lag_netdev_list *entry; 133 struct net_device *tmp_netdev; 134 struct ice_netdev_priv *np; 135 struct ice_hw *hw; 136 137 list_for_each_entry(entry, lag->netdev_head, node) { 138 tmp_netdev = entry->netdev; 139 if (!tmp_netdev || !netif_is_ice(tmp_netdev)) 140 continue; 141 142 np = netdev_priv(tmp_netdev); 143 if (!np || !np->vsi) 144 continue; 145 146 hw = &np->vsi->back->hw; 147 if (hw->port_info->lport == lport) 148 return hw; 149 } 150 151 return NULL; 152 } 153 154 /** 155 * ice_pkg_has_lport_extract - check if lport extraction supported 156 * @hw: HW struct 157 */ 158 static bool ice_pkg_has_lport_extract(struct ice_hw *hw) 159 { 160 int i; 161 162 for (i = 0; i < hw->blk[ICE_BLK_SW].es.count; i++) { 163 u16 offset; 164 u8 fv_prot; 165 166 ice_find_prot_off(hw, ICE_BLK_SW, ICE_SW_DEFAULT_PROFILE, i, 167 &fv_prot, &offset); 168 if (fv_prot == ICE_FV_PROT_MDID && 169 offset == ICE_LP_EXT_BUF_OFFSET) 170 return true; 171 } 172 return false; 173 } 174 175 /** 176 * ice_lag_find_primary - returns pointer to primary interfaces lag struct 177 * @lag: local interfaces lag struct 178 */ 179 static struct ice_lag *ice_lag_find_primary(struct ice_lag *lag) 180 { 181 struct ice_lag *primary_lag = NULL; 182 struct list_head *tmp; 183 184 list_for_each(tmp, lag->netdev_head) { 185 struct ice_lag_netdev_list *entry; 186 struct ice_lag *tmp_lag; 187 188 entry = list_entry(tmp, struct ice_lag_netdev_list, node); 189 tmp_lag = ice_netdev_to_lag(entry->netdev); 190 if (tmp_lag && tmp_lag->primary) { 191 primary_lag = tmp_lag; 192 break; 193 } 194 } 195 196 return primary_lag; 197 } 198 199 /** 200 * ice_lag_cfg_fltr - Add/Remove rule for LAG 201 * @lag: lag struct for local interface 202 * @act: rule action 203 * @recipe_id: recipe id for the new rule 204 * @rule_idx: pointer to rule index 205 * @add: boolean on whether we are adding filters 206 */ 207 static int 208 ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx, 209 bool add) 210 { 211 struct ice_sw_rule_lkup_rx_tx *s_rule; 212 u16 s_rule_sz, vsi_num; 213 struct ice_hw *hw; 214 u8 *eth_hdr; 215 u32 opc; 216 int err; 217 218 hw = &lag->pf->hw; 219 vsi_num = ice_get_hw_vsi_num(hw, 0); 220 221 s_rule_sz = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule); 222 s_rule = kzalloc(s_rule_sz, GFP_KERNEL); 223 if (!s_rule) { 224 dev_err(ice_pf_to_dev(lag->pf), "error allocating rule for LAG\n"); 225 return -ENOMEM; 226 } 227 228 if (add) { 229 eth_hdr = s_rule->hdr_data; 230 ice_fill_eth_hdr(eth_hdr); 231 232 act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi_num); 233 234 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); 235 s_rule->recipe_id = cpu_to_le16(recipe_id); 236 s_rule->src = cpu_to_le16(hw->port_info->lport); 237 s_rule->act = cpu_to_le32(act); 238 s_rule->hdr_len = cpu_to_le16(DUMMY_ETH_HDR_LEN); 239 opc = ice_aqc_opc_add_sw_rules; 240 } else { 241 s_rule->index = cpu_to_le16(*rule_idx); 242 opc = ice_aqc_opc_remove_sw_rules; 243 } 244 245 err = ice_aq_sw_rules(&lag->pf->hw, s_rule, s_rule_sz, 1, opc, NULL); 246 if (err) 247 goto dflt_fltr_free; 248 249 if (add) 250 *rule_idx = le16_to_cpu(s_rule->index); 251 else 252 *rule_idx = 0; 253 254 dflt_fltr_free: 255 kfree(s_rule); 256 return err; 257 } 258 259 /** 260 * ice_lag_cfg_dflt_fltr - Add/Remove default VSI rule for LAG 261 * @lag: lag struct for local interface 262 * @add: boolean on whether to add filter 263 */ 264 static int 265 ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add) 266 { 267 u32 act = ICE_SINGLE_ACT_VSI_FORWARDING | 268 ICE_SINGLE_ACT_VALID_BIT | ICE_SINGLE_ACT_LAN_ENABLE; 269 270 return ice_lag_cfg_fltr(lag, act, lag->pf_recipe, 271 &lag->pf_rule_id, add); 272 } 273 274 /** 275 * ice_lag_cfg_drop_fltr - Add/Remove lport drop rule 276 * @lag: lag struct for local interface 277 * @add: boolean on whether to add filter 278 */ 279 static int 280 ice_lag_cfg_drop_fltr(struct ice_lag *lag, bool add) 281 { 282 u32 act = ICE_SINGLE_ACT_VSI_FORWARDING | 283 ICE_SINGLE_ACT_VALID_BIT | 284 ICE_SINGLE_ACT_DROP; 285 286 return ice_lag_cfg_fltr(lag, act, lag->lport_recipe, 287 &lag->lport_rule_idx, add); 288 } 289 290 /** 291 * ice_lag_cfg_pf_fltrs - set filters up for new active port 292 * @lag: local interfaces lag struct 293 * @ptr: opaque data containing notifier event 294 */ 295 static void 296 ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr) 297 { 298 struct netdev_notifier_bonding_info *info; 299 struct netdev_bonding_info *bonding_info; 300 struct net_device *event_netdev; 301 struct device *dev; 302 303 event_netdev = netdev_notifier_info_to_dev(ptr); 304 /* not for this netdev */ 305 if (event_netdev != lag->netdev) 306 return; 307 308 info = (struct netdev_notifier_bonding_info *)ptr; 309 bonding_info = &info->bonding_info; 310 dev = ice_pf_to_dev(lag->pf); 311 312 /* interface not active - remove old default VSI rule */ 313 if (bonding_info->slave.state && lag->pf_rule_id) { 314 if (ice_lag_cfg_dflt_fltr(lag, false)) 315 dev_err(dev, "Error removing old default VSI filter\n"); 316 if (ice_lag_cfg_drop_fltr(lag, true)) 317 dev_err(dev, "Error adding new drop filter\n"); 318 return; 319 } 320 321 /* interface becoming active - add new default VSI rule */ 322 if (!bonding_info->slave.state && !lag->pf_rule_id) { 323 if (ice_lag_cfg_dflt_fltr(lag, true)) 324 dev_err(dev, "Error adding new default VSI filter\n"); 325 if (lag->lport_rule_idx && ice_lag_cfg_drop_fltr(lag, false)) 326 dev_err(dev, "Error removing old drop filter\n"); 327 } 328 } 329 330 /** 331 * ice_display_lag_info - print LAG info 332 * @lag: LAG info struct 333 */ 334 static void ice_display_lag_info(struct ice_lag *lag) 335 { 336 const char *name, *upper, *role, *bonded, *primary; 337 struct device *dev = &lag->pf->pdev->dev; 338 339 name = lag->netdev ? netdev_name(lag->netdev) : "unset"; 340 upper = lag->upper_netdev ? netdev_name(lag->upper_netdev) : "unset"; 341 primary = lag->primary ? "TRUE" : "FALSE"; 342 bonded = lag->bonded ? "BONDED" : "UNBONDED"; 343 344 switch (lag->role) { 345 case ICE_LAG_NONE: 346 role = "NONE"; 347 break; 348 case ICE_LAG_PRIMARY: 349 role = "PRIMARY"; 350 break; 351 case ICE_LAG_BACKUP: 352 role = "BACKUP"; 353 break; 354 case ICE_LAG_UNSET: 355 role = "UNSET"; 356 break; 357 default: 358 role = "ERROR"; 359 } 360 361 dev_dbg(dev, "%s %s, upper:%s, role:%s, primary:%s\n", name, bonded, 362 upper, role, primary); 363 } 364 365 /** 366 * ice_lag_qbuf_recfg - generate a buffer of queues for a reconfigure command 367 * @hw: HW struct that contains the queue contexts 368 * @qbuf: pointer to buffer to populate 369 * @vsi_num: index of the VSI in PF space 370 * @numq: number of queues to search for 371 * @tc: traffic class that contains the queues 372 * 373 * function returns the number of valid queues in buffer 374 */ 375 static u16 376 ice_lag_qbuf_recfg(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *qbuf, 377 u16 vsi_num, u16 numq, u8 tc) 378 { 379 struct ice_q_ctx *q_ctx; 380 u16 qid, count = 0; 381 struct ice_pf *pf; 382 int i; 383 384 pf = hw->back; 385 for (i = 0; i < numq; i++) { 386 q_ctx = ice_get_lan_q_ctx(hw, vsi_num, tc, i); 387 if (!q_ctx) { 388 dev_dbg(ice_hw_to_dev(hw), "%s queue %d NO Q CONTEXT\n", 389 __func__, i); 390 continue; 391 } 392 if (q_ctx->q_teid == ICE_INVAL_TEID) { 393 dev_dbg(ice_hw_to_dev(hw), "%s queue %d INVAL TEID\n", 394 __func__, i); 395 continue; 396 } 397 if (q_ctx->q_handle == ICE_INVAL_Q_HANDLE) { 398 dev_dbg(ice_hw_to_dev(hw), "%s queue %d INVAL Q HANDLE\n", 399 __func__, i); 400 continue; 401 } 402 403 qid = pf->vsi[vsi_num]->txq_map[q_ctx->q_handle]; 404 qbuf->queue_info[count].q_handle = cpu_to_le16(qid); 405 qbuf->queue_info[count].tc = tc; 406 qbuf->queue_info[count].q_teid = cpu_to_le32(q_ctx->q_teid); 407 count++; 408 } 409 410 return count; 411 } 412 413 /** 414 * ice_lag_get_sched_parent - locate or create a sched node parent 415 * @hw: HW struct for getting parent in 416 * @tc: traffic class on parent/node 417 */ 418 static struct ice_sched_node * 419 ice_lag_get_sched_parent(struct ice_hw *hw, u8 tc) 420 { 421 struct ice_sched_node *tc_node, *aggnode, *parent = NULL; 422 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 423 struct ice_port_info *pi = hw->port_info; 424 struct device *dev; 425 u8 aggl, vsil; 426 int n; 427 428 dev = ice_hw_to_dev(hw); 429 430 tc_node = ice_sched_get_tc_node(pi, tc); 431 if (!tc_node) { 432 dev_warn(dev, "Failure to find TC node for LAG move\n"); 433 return parent; 434 } 435 436 aggnode = ice_sched_get_agg_node(pi, tc_node, ICE_DFLT_AGG_ID); 437 if (!aggnode) { 438 dev_warn(dev, "Failure to find aggregate node for LAG move\n"); 439 return parent; 440 } 441 442 aggl = ice_sched_get_agg_layer(hw); 443 vsil = ice_sched_get_vsi_layer(hw); 444 445 for (n = aggl + 1; n < vsil; n++) 446 num_nodes[n] = 1; 447 448 for (n = 0; n < aggnode->num_children; n++) { 449 parent = ice_sched_get_free_vsi_parent(hw, aggnode->children[n], 450 num_nodes); 451 if (parent) 452 return parent; 453 } 454 455 /* if free parent not found - add one */ 456 parent = aggnode; 457 for (n = aggl + 1; n < vsil; n++) { 458 u16 num_nodes_added; 459 u32 first_teid; 460 int err; 461 462 err = ice_sched_add_nodes_to_layer(pi, tc_node, parent, n, 463 num_nodes[n], &first_teid, 464 &num_nodes_added); 465 if (err || num_nodes[n] != num_nodes_added) 466 return NULL; 467 468 if (num_nodes_added) 469 parent = ice_sched_find_node_by_teid(tc_node, 470 first_teid); 471 else 472 parent = parent->children[0]; 473 if (!parent) { 474 dev_warn(dev, "Failure to add new parent for LAG move\n"); 475 return parent; 476 } 477 } 478 479 return parent; 480 } 481 482 /** 483 * ice_lag_move_vf_node_tc - move scheduling nodes for one VF on one TC 484 * @lag: lag info struct 485 * @oldport: lport of previous nodes location 486 * @newport: lport of destination nodes location 487 * @vsi_num: array index of VSI in PF space 488 * @tc: traffic class to move 489 */ 490 static void 491 ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport, 492 u16 vsi_num, u8 tc) 493 { 494 DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1); 495 struct device *dev = ice_pf_to_dev(lag->pf); 496 u16 numq, valq, num_moved, qbuf_size; 497 u16 buf_size = __struct_size(buf); 498 struct ice_aqc_cfg_txqs_buf *qbuf; 499 struct ice_sched_node *n_prt; 500 struct ice_hw *new_hw = NULL; 501 __le32 teid, parent_teid; 502 struct ice_vsi_ctx *ctx; 503 u32 tmp_teid; 504 505 ctx = ice_get_vsi_ctx(&lag->pf->hw, vsi_num); 506 if (!ctx) { 507 dev_warn(dev, "Unable to locate VSI context for LAG failover\n"); 508 return; 509 } 510 511 /* check to see if this VF is enabled on this TC */ 512 if (!ctx->sched.vsi_node[tc]) 513 return; 514 515 /* locate HW struct for destination port */ 516 new_hw = ice_lag_find_hw_by_lport(lag, newport); 517 if (!new_hw) { 518 dev_warn(dev, "Unable to locate HW struct for LAG node destination\n"); 519 return; 520 } 521 522 numq = ctx->num_lan_q_entries[tc]; 523 teid = ctx->sched.vsi_node[tc]->info.node_teid; 524 tmp_teid = le32_to_cpu(teid); 525 parent_teid = ctx->sched.vsi_node[tc]->info.parent_teid; 526 /* if no teid assigned or numq == 0, then this TC is not active */ 527 if (!tmp_teid || !numq) 528 return; 529 530 /* suspend VSI subtree for Traffic Class "tc" on 531 * this VF's VSI 532 */ 533 if (ice_sched_suspend_resume_elems(&lag->pf->hw, 1, &tmp_teid, true)) 534 dev_dbg(dev, "Problem suspending traffic for LAG node move\n"); 535 536 /* reconfigure all VF's queues on this Traffic Class 537 * to new port 538 */ 539 qbuf_size = struct_size(qbuf, queue_info, numq); 540 qbuf = kzalloc(qbuf_size, GFP_KERNEL); 541 if (!qbuf) { 542 dev_warn(dev, "Failure allocating memory for VF queue recfg buffer\n"); 543 goto resume_traffic; 544 } 545 546 /* add the per queue info for the reconfigure command buffer */ 547 valq = ice_lag_qbuf_recfg(&lag->pf->hw, qbuf, vsi_num, numq, tc); 548 if (!valq) { 549 dev_dbg(dev, "No valid queues found for LAG failover\n"); 550 goto qbuf_none; 551 } 552 553 if (ice_aq_cfg_lan_txq(&lag->pf->hw, qbuf, qbuf_size, valq, oldport, 554 newport, NULL)) { 555 dev_warn(dev, "Failure to configure queues for LAG failover\n"); 556 goto qbuf_err; 557 } 558 559 qbuf_none: 560 kfree(qbuf); 561 562 /* find new parent in destination port's tree for VF VSI node on this 563 * Traffic Class 564 */ 565 n_prt = ice_lag_get_sched_parent(new_hw, tc); 566 if (!n_prt) 567 goto resume_traffic; 568 569 /* Move Vf's VSI node for this TC to newport's scheduler tree */ 570 buf->hdr.src_parent_teid = parent_teid; 571 buf->hdr.dest_parent_teid = n_prt->info.node_teid; 572 buf->hdr.num_elems = cpu_to_le16(1); 573 buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN; 574 buf->teid[0] = teid; 575 576 if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved)) 577 dev_warn(dev, "Failure to move VF nodes for failover\n"); 578 else 579 ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]); 580 581 goto resume_traffic; 582 583 qbuf_err: 584 kfree(qbuf); 585 586 resume_traffic: 587 /* restart traffic for VSI node */ 588 if (ice_sched_suspend_resume_elems(&lag->pf->hw, 1, &tmp_teid, false)) 589 dev_dbg(dev, "Problem restarting traffic for LAG node move\n"); 590 } 591 592 /** 593 * ice_lag_build_netdev_list - populate the lag struct's netdev list 594 * @lag: local lag struct 595 * @ndlist: pointer to netdev list to populate 596 */ 597 static void ice_lag_build_netdev_list(struct ice_lag *lag, 598 struct ice_lag_netdev_list *ndlist) 599 { 600 struct ice_lag_netdev_list *nl; 601 struct net_device *tmp_nd; 602 603 INIT_LIST_HEAD(&ndlist->node); 604 rcu_read_lock(); 605 for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) { 606 nl = kzalloc(sizeof(*nl), GFP_ATOMIC); 607 if (!nl) 608 break; 609 610 nl->netdev = tmp_nd; 611 list_add(&nl->node, &ndlist->node); 612 } 613 rcu_read_unlock(); 614 lag->netdev_head = &ndlist->node; 615 } 616 617 /** 618 * ice_lag_destroy_netdev_list - free lag struct's netdev list 619 * @lag: pointer to local lag struct 620 * @ndlist: pointer to lag struct netdev list 621 */ 622 static void ice_lag_destroy_netdev_list(struct ice_lag *lag, 623 struct ice_lag_netdev_list *ndlist) 624 { 625 struct ice_lag_netdev_list *entry, *n; 626 627 rcu_read_lock(); 628 list_for_each_entry_safe(entry, n, &ndlist->node, node) { 629 list_del(&entry->node); 630 kfree(entry); 631 } 632 rcu_read_unlock(); 633 lag->netdev_head = NULL; 634 } 635 636 /** 637 * ice_lag_move_single_vf_nodes - Move Tx scheduling nodes for single VF 638 * @lag: primary interface LAG struct 639 * @oldport: lport of previous interface 640 * @newport: lport of destination interface 641 * @vsi_num: SW index of VF's VSI 642 */ 643 static void 644 ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport, 645 u16 vsi_num) 646 { 647 u8 tc; 648 649 ice_for_each_traffic_class(tc) 650 ice_lag_move_vf_node_tc(lag, oldport, newport, vsi_num, tc); 651 } 652 653 /** 654 * ice_lag_move_new_vf_nodes - Move Tx scheduling nodes for a VF if required 655 * @vf: the VF to move Tx nodes for 656 * 657 * Called just after configuring new VF queues. Check whether the VF Tx 658 * scheduling nodes need to be updated to fail over to the active port. If so, 659 * move them now. 660 */ 661 void ice_lag_move_new_vf_nodes(struct ice_vf *vf) 662 { 663 struct ice_lag_netdev_list ndlist; 664 u8 pri_port, act_port; 665 struct ice_lag *lag; 666 struct ice_vsi *vsi; 667 struct ice_pf *pf; 668 669 vsi = ice_get_vf_vsi(vf); 670 671 if (WARN_ON(!vsi)) 672 return; 673 674 if (WARN_ON(vsi->type != ICE_VSI_VF)) 675 return; 676 677 pf = vf->pf; 678 lag = pf->lag; 679 680 mutex_lock(&pf->lag_mutex); 681 if (!lag->bonded) 682 goto new_vf_unlock; 683 684 pri_port = pf->hw.port_info->lport; 685 act_port = lag->active_port; 686 687 if (lag->upper_netdev) 688 ice_lag_build_netdev_list(lag, &ndlist); 689 690 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) && 691 lag->bonded && lag->primary && pri_port != act_port && 692 !list_empty(lag->netdev_head)) 693 ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx); 694 695 ice_lag_destroy_netdev_list(lag, &ndlist); 696 697 new_vf_unlock: 698 mutex_unlock(&pf->lag_mutex); 699 } 700 701 /** 702 * ice_lag_move_vf_nodes - move Tx scheduling nodes for all VFs to new port 703 * @lag: lag info struct 704 * @oldport: lport of previous interface 705 * @newport: lport of destination interface 706 */ 707 static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport) 708 { 709 struct ice_pf *pf; 710 int i; 711 712 if (!lag->primary) 713 return; 714 715 pf = lag->pf; 716 ice_for_each_vsi(pf, i) 717 if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF || 718 pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL)) 719 ice_lag_move_single_vf_nodes(lag, oldport, newport, i); 720 } 721 722 /** 723 * ice_lag_move_vf_nodes_cfg - move vf nodes outside LAG netdev event context 724 * @lag: local lag struct 725 * @src_prt: lport value for source port 726 * @dst_prt: lport value for destination port 727 * 728 * This function is used to move nodes during an out-of-netdev-event situation, 729 * primarily when the driver needs to reconfigure or recreate resources. 730 * 731 * Must be called while holding the lag_mutex to avoid lag events from 732 * processing while out-of-sync moves are happening. Also, paired moves, 733 * such as used in a reset flow, should both be called under the same mutex 734 * lock to avoid changes between start of reset and end of reset. 735 */ 736 void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt) 737 { 738 struct ice_lag_netdev_list ndlist; 739 740 ice_lag_build_netdev_list(lag, &ndlist); 741 ice_lag_move_vf_nodes(lag, src_prt, dst_prt); 742 ice_lag_destroy_netdev_list(lag, &ndlist); 743 } 744 745 #define ICE_LAG_SRIOV_CP_RECIPE 10 746 #define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16 747 748 /** 749 * ice_lag_cfg_cp_fltr - configure filter for control packets 750 * @lag: local interface's lag struct 751 * @add: add or remove rule 752 */ 753 static void 754 ice_lag_cfg_cp_fltr(struct ice_lag *lag, bool add) 755 { 756 struct ice_sw_rule_lkup_rx_tx *s_rule = NULL; 757 struct ice_vsi *vsi; 758 u16 buf_len, opc; 759 760 vsi = lag->pf->vsi[0]; 761 762 buf_len = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, 763 ICE_LAG_SRIOV_TRAIN_PKT_LEN); 764 s_rule = kzalloc(buf_len, GFP_KERNEL); 765 if (!s_rule) { 766 netdev_warn(lag->netdev, "-ENOMEM error configuring CP filter\n"); 767 return; 768 } 769 770 if (add) { 771 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); 772 s_rule->recipe_id = cpu_to_le16(ICE_LAG_SRIOV_CP_RECIPE); 773 s_rule->src = cpu_to_le16(vsi->port_info->lport); 774 s_rule->act = cpu_to_le32(ICE_FWD_TO_VSI | 775 ICE_SINGLE_ACT_LAN_ENABLE | 776 ICE_SINGLE_ACT_VALID_BIT | 777 FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi->vsi_num)); 778 s_rule->hdr_len = cpu_to_le16(ICE_LAG_SRIOV_TRAIN_PKT_LEN); 779 memcpy(s_rule->hdr_data, lacp_train_pkt, LACP_TRAIN_PKT_LEN); 780 opc = ice_aqc_opc_add_sw_rules; 781 } else { 782 opc = ice_aqc_opc_remove_sw_rules; 783 s_rule->index = cpu_to_le16(lag->cp_rule_idx); 784 } 785 if (ice_aq_sw_rules(&lag->pf->hw, s_rule, buf_len, 1, opc, NULL)) { 786 netdev_warn(lag->netdev, "Error %s CP rule for fail-over\n", 787 add ? "ADDING" : "REMOVING"); 788 goto cp_free; 789 } 790 791 if (add) 792 lag->cp_rule_idx = le16_to_cpu(s_rule->index); 793 else 794 lag->cp_rule_idx = 0; 795 796 cp_free: 797 kfree(s_rule); 798 } 799 800 /** 801 * ice_lag_info_event - handle NETDEV_BONDING_INFO event 802 * @lag: LAG info struct 803 * @ptr: opaque data pointer 804 * 805 * ptr is to be cast to (netdev_notifier_bonding_info *) 806 */ 807 static void ice_lag_info_event(struct ice_lag *lag, void *ptr) 808 { 809 struct netdev_notifier_bonding_info *info; 810 struct netdev_bonding_info *bonding_info; 811 struct net_device *event_netdev; 812 const char *lag_netdev_name; 813 814 event_netdev = netdev_notifier_info_to_dev(ptr); 815 info = ptr; 816 lag_netdev_name = netdev_name(lag->netdev); 817 bonding_info = &info->bonding_info; 818 819 if (event_netdev != lag->netdev || !lag->bonded || !lag->upper_netdev) 820 return; 821 822 if (bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) { 823 netdev_dbg(lag->netdev, "Bonding event recv, but mode not active/backup\n"); 824 goto lag_out; 825 } 826 827 if (strcmp(bonding_info->slave.slave_name, lag_netdev_name)) { 828 netdev_dbg(lag->netdev, "Bonding event recv, but secondary info not for us\n"); 829 goto lag_out; 830 } 831 832 if (bonding_info->slave.state) 833 ice_lag_set_backup(lag); 834 else 835 ice_lag_set_primary(lag); 836 837 lag_out: 838 ice_display_lag_info(lag); 839 } 840 841 /** 842 * ice_lag_reclaim_vf_tc - move scheduling nodes back to primary interface 843 * @lag: primary interface lag struct 844 * @src_hw: HW struct current node location 845 * @vsi_num: VSI index in PF space 846 * @tc: traffic class to move 847 */ 848 static void 849 ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num, 850 u8 tc) 851 { 852 DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1); 853 struct device *dev = ice_pf_to_dev(lag->pf); 854 u16 numq, valq, num_moved, qbuf_size; 855 u16 buf_size = __struct_size(buf); 856 struct ice_aqc_cfg_txqs_buf *qbuf; 857 struct ice_sched_node *n_prt; 858 __le32 teid, parent_teid; 859 struct ice_vsi_ctx *ctx; 860 struct ice_hw *hw; 861 u32 tmp_teid; 862 863 hw = &lag->pf->hw; 864 ctx = ice_get_vsi_ctx(hw, vsi_num); 865 if (!ctx) { 866 dev_warn(dev, "Unable to locate VSI context for LAG reclaim\n"); 867 return; 868 } 869 870 /* check to see if this VF is enabled on this TC */ 871 if (!ctx->sched.vsi_node[tc]) 872 return; 873 874 numq = ctx->num_lan_q_entries[tc]; 875 teid = ctx->sched.vsi_node[tc]->info.node_teid; 876 tmp_teid = le32_to_cpu(teid); 877 parent_teid = ctx->sched.vsi_node[tc]->info.parent_teid; 878 879 /* if !teid or !numq, then this TC is not active */ 880 if (!tmp_teid || !numq) 881 return; 882 883 /* suspend traffic */ 884 if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, true)) 885 dev_dbg(dev, "Problem suspending traffic for LAG node move\n"); 886 887 /* reconfig queues for new port */ 888 qbuf_size = struct_size(qbuf, queue_info, numq); 889 qbuf = kzalloc(qbuf_size, GFP_KERNEL); 890 if (!qbuf) { 891 dev_warn(dev, "Failure allocating memory for VF queue recfg buffer\n"); 892 goto resume_reclaim; 893 } 894 895 /* add the per queue info for the reconfigure command buffer */ 896 valq = ice_lag_qbuf_recfg(hw, qbuf, vsi_num, numq, tc); 897 if (!valq) { 898 dev_dbg(dev, "No valid queues found for LAG reclaim\n"); 899 goto reclaim_none; 900 } 901 902 if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq, 903 src_hw->port_info->lport, hw->port_info->lport, 904 NULL)) { 905 dev_warn(dev, "Failure to configure queues for LAG failover\n"); 906 goto reclaim_qerr; 907 } 908 909 reclaim_none: 910 kfree(qbuf); 911 912 /* find parent in primary tree */ 913 n_prt = ice_lag_get_sched_parent(hw, tc); 914 if (!n_prt) 915 goto resume_reclaim; 916 917 /* Move node to new parent */ 918 buf->hdr.src_parent_teid = parent_teid; 919 buf->hdr.dest_parent_teid = n_prt->info.node_teid; 920 buf->hdr.num_elems = cpu_to_le16(1); 921 buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN; 922 buf->teid[0] = teid; 923 924 if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved)) 925 dev_warn(dev, "Failure to move VF nodes for LAG reclaim\n"); 926 else 927 ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]); 928 929 goto resume_reclaim; 930 931 reclaim_qerr: 932 kfree(qbuf); 933 934 resume_reclaim: 935 /* restart traffic */ 936 if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, false)) 937 dev_warn(dev, "Problem restarting traffic for LAG node reclaim\n"); 938 } 939 940 /** 941 * ice_lag_reclaim_vf_nodes - When interface leaving bond primary reclaims nodes 942 * @lag: primary interface lag struct 943 * @src_hw: HW struct for current node location 944 */ 945 static void 946 ice_lag_reclaim_vf_nodes(struct ice_lag *lag, struct ice_hw *src_hw) 947 { 948 struct ice_pf *pf; 949 int i, tc; 950 951 if (!lag->primary || !src_hw) 952 return; 953 954 pf = lag->pf; 955 ice_for_each_vsi(pf, i) 956 if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF || 957 pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL)) 958 ice_for_each_traffic_class(tc) 959 ice_lag_reclaim_vf_tc(lag, src_hw, i, tc); 960 } 961 962 /** 963 * ice_lag_link - handle LAG link event 964 * @lag: LAG info struct 965 */ 966 static void ice_lag_link(struct ice_lag *lag) 967 { 968 struct ice_pf *pf = lag->pf; 969 970 if (lag->bonded) 971 dev_warn(ice_pf_to_dev(pf), "%s Already part of a bond\n", 972 netdev_name(lag->netdev)); 973 974 lag->bonded = true; 975 lag->role = ICE_LAG_UNSET; 976 netdev_info(lag->netdev, "Shared SR-IOV resources in bond are active\n"); 977 } 978 979 /** 980 * ice_lag_unlink - handle unlink event 981 * @lag: LAG info struct 982 */ 983 static void ice_lag_unlink(struct ice_lag *lag) 984 { 985 u8 pri_port, act_port, loc_port; 986 struct ice_pf *pf = lag->pf; 987 988 if (!lag->bonded) { 989 netdev_dbg(lag->netdev, "bonding unlink event on non-LAG netdev\n"); 990 return; 991 } 992 993 if (lag->primary) { 994 act_port = lag->active_port; 995 pri_port = lag->pf->hw.port_info->lport; 996 if (act_port != pri_port && act_port != ICE_LAG_INVALID_PORT) 997 ice_lag_move_vf_nodes(lag, act_port, pri_port); 998 lag->primary = false; 999 lag->active_port = ICE_LAG_INVALID_PORT; 1000 } else { 1001 struct ice_lag *primary_lag; 1002 1003 primary_lag = ice_lag_find_primary(lag); 1004 if (primary_lag) { 1005 act_port = primary_lag->active_port; 1006 pri_port = primary_lag->pf->hw.port_info->lport; 1007 loc_port = pf->hw.port_info->lport; 1008 if (act_port == loc_port && 1009 act_port != ICE_LAG_INVALID_PORT) { 1010 ice_lag_reclaim_vf_nodes(primary_lag, 1011 &lag->pf->hw); 1012 primary_lag->active_port = ICE_LAG_INVALID_PORT; 1013 } 1014 } 1015 } 1016 1017 lag->bonded = false; 1018 lag->role = ICE_LAG_NONE; 1019 lag->upper_netdev = NULL; 1020 } 1021 1022 /** 1023 * ice_lag_link_unlink - helper function to call lag_link/unlink 1024 * @lag: lag info struct 1025 * @ptr: opaque pointer data 1026 */ 1027 static void ice_lag_link_unlink(struct ice_lag *lag, void *ptr) 1028 { 1029 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 1030 struct netdev_notifier_changeupper_info *info = ptr; 1031 1032 if (netdev != lag->netdev) 1033 return; 1034 1035 if (info->linking) 1036 ice_lag_link(lag); 1037 else 1038 ice_lag_unlink(lag); 1039 } 1040 1041 /** 1042 * ice_lag_set_swid - set the SWID on secondary interface 1043 * @primary_swid: primary interface's SWID 1044 * @local_lag: local interfaces LAG struct 1045 * @link: Is this a linking activity 1046 * 1047 * If link is false, then primary_swid should be expected to not be valid 1048 * This function should never be called in interrupt context. 1049 */ 1050 static void 1051 ice_lag_set_swid(u16 primary_swid, struct ice_lag *local_lag, 1052 bool link) 1053 { 1054 struct ice_aqc_alloc_free_res_elem *buf; 1055 struct ice_aqc_set_port_params *cmd; 1056 struct ice_aq_desc desc; 1057 u16 buf_len, swid; 1058 int status, i; 1059 1060 buf_len = struct_size(buf, elem, 1); 1061 buf = kzalloc(buf_len, GFP_KERNEL); 1062 if (!buf) { 1063 dev_err(ice_pf_to_dev(local_lag->pf), "-ENOMEM error setting SWID\n"); 1064 return; 1065 } 1066 1067 buf->num_elems = cpu_to_le16(1); 1068 buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_SWID); 1069 /* if unlinnking need to free the shared resource */ 1070 if (!link && local_lag->bond_swid) { 1071 buf->elem[0].e.sw_resp = cpu_to_le16(local_lag->bond_swid); 1072 status = ice_aq_alloc_free_res(&local_lag->pf->hw, buf, 1073 buf_len, ice_aqc_opc_free_res); 1074 if (status) 1075 dev_err(ice_pf_to_dev(local_lag->pf), "Error freeing SWID during LAG unlink\n"); 1076 local_lag->bond_swid = 0; 1077 } 1078 1079 if (link) { 1080 buf->res_type |= cpu_to_le16(ICE_LAG_RES_SHARED | 1081 ICE_LAG_RES_VALID); 1082 /* store the primary's SWID in case it leaves bond first */ 1083 local_lag->bond_swid = primary_swid; 1084 buf->elem[0].e.sw_resp = cpu_to_le16(local_lag->bond_swid); 1085 } else { 1086 buf->elem[0].e.sw_resp = 1087 cpu_to_le16(local_lag->pf->hw.port_info->sw_id); 1088 } 1089 1090 status = ice_aq_alloc_free_res(&local_lag->pf->hw, buf, buf_len, 1091 ice_aqc_opc_alloc_res); 1092 if (status) 1093 dev_err(ice_pf_to_dev(local_lag->pf), "Error subscribing to SWID 0x%04X\n", 1094 local_lag->bond_swid); 1095 1096 kfree(buf); 1097 1098 /* Configure port param SWID to correct value */ 1099 if (link) 1100 swid = primary_swid; 1101 else 1102 swid = local_lag->pf->hw.port_info->sw_id; 1103 1104 cmd = &desc.params.set_port_params; 1105 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); 1106 1107 cmd->swid = cpu_to_le16(ICE_AQC_PORT_SWID_VALID | swid); 1108 /* If this is happening in reset context, it is possible that the 1109 * primary interface has not finished setting its SWID to SHARED 1110 * yet. Allow retries to account for this timing issue between 1111 * interfaces. 1112 */ 1113 for (i = 0; i < ICE_LAG_RESET_RETRIES; i++) { 1114 status = ice_aq_send_cmd(&local_lag->pf->hw, &desc, NULL, 0, 1115 NULL); 1116 if (!status) 1117 break; 1118 1119 usleep_range(1000, 2000); 1120 } 1121 1122 if (status) 1123 dev_err(ice_pf_to_dev(local_lag->pf), "Error setting SWID in port params %d\n", 1124 status); 1125 } 1126 1127 /** 1128 * ice_lag_primary_swid - set/clear the SHARED attrib of primary's SWID 1129 * @lag: primary interface's lag struct 1130 * @link: is this a linking activity 1131 * 1132 * Implement setting primary SWID as shared using 0x020B 1133 */ 1134 static void ice_lag_primary_swid(struct ice_lag *lag, bool link) 1135 { 1136 struct ice_hw *hw; 1137 u16 swid; 1138 1139 hw = &lag->pf->hw; 1140 swid = hw->port_info->sw_id; 1141 1142 if (ice_share_res(hw, ICE_AQC_RES_TYPE_SWID, link, swid)) 1143 dev_warn(ice_pf_to_dev(lag->pf), "Failure to set primary interface shared status\n"); 1144 } 1145 1146 /** 1147 * ice_lag_add_prune_list - Adds event_pf's VSI to primary's prune list 1148 * @lag: lag info struct 1149 * @event_pf: PF struct for VSI we are adding to primary's prune list 1150 */ 1151 static void ice_lag_add_prune_list(struct ice_lag *lag, struct ice_pf *event_pf) 1152 { 1153 u16 num_vsi, rule_buf_sz, vsi_list_id, event_vsi_num, prim_vsi_idx; 1154 struct ice_sw_rule_vsi_list *s_rule = NULL; 1155 struct device *dev; 1156 1157 num_vsi = 1; 1158 1159 dev = ice_pf_to_dev(lag->pf); 1160 event_vsi_num = event_pf->vsi[0]->vsi_num; 1161 prim_vsi_idx = lag->pf->vsi[0]->idx; 1162 1163 if (!ice_find_vsi_list_entry(&lag->pf->hw, ICE_SW_LKUP_VLAN, 1164 prim_vsi_idx, &vsi_list_id)) { 1165 dev_warn(dev, "Could not locate prune list when setting up SRIOV LAG\n"); 1166 return; 1167 } 1168 1169 rule_buf_sz = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi); 1170 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); 1171 if (!s_rule) { 1172 dev_warn(dev, "Error allocating space for prune list when configuring SRIOV LAG\n"); 1173 return; 1174 } 1175 1176 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_PRUNE_LIST_SET); 1177 s_rule->index = cpu_to_le16(vsi_list_id); 1178 s_rule->number_vsi = cpu_to_le16(num_vsi); 1179 s_rule->vsi[0] = cpu_to_le16(event_vsi_num); 1180 1181 if (ice_aq_sw_rules(&event_pf->hw, s_rule, rule_buf_sz, 1, 1182 ice_aqc_opc_update_sw_rules, NULL)) 1183 dev_warn(dev, "Error adding VSI prune list\n"); 1184 kfree(s_rule); 1185 } 1186 1187 /** 1188 * ice_lag_del_prune_list - Remove secondary's vsi from primary's prune list 1189 * @lag: primary interface's ice_lag struct 1190 * @event_pf: PF struct for unlinking interface 1191 */ 1192 static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf) 1193 { 1194 u16 num_vsi, vsi_num, vsi_idx, rule_buf_sz, vsi_list_id; 1195 struct ice_sw_rule_vsi_list *s_rule = NULL; 1196 struct device *dev; 1197 1198 num_vsi = 1; 1199 1200 dev = ice_pf_to_dev(lag->pf); 1201 vsi_num = event_pf->vsi[0]->vsi_num; 1202 vsi_idx = lag->pf->vsi[0]->idx; 1203 1204 if (!ice_find_vsi_list_entry(&lag->pf->hw, ICE_SW_LKUP_VLAN, 1205 vsi_idx, &vsi_list_id)) { 1206 dev_warn(dev, "Could not locate prune list when unwinding SRIOV LAG\n"); 1207 return; 1208 } 1209 1210 rule_buf_sz = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi); 1211 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); 1212 if (!s_rule) { 1213 dev_warn(dev, "Error allocating prune list when unwinding SRIOV LAG\n"); 1214 return; 1215 } 1216 1217 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR); 1218 s_rule->index = cpu_to_le16(vsi_list_id); 1219 s_rule->number_vsi = cpu_to_le16(num_vsi); 1220 s_rule->vsi[0] = cpu_to_le16(vsi_num); 1221 1222 if (ice_aq_sw_rules(&event_pf->hw, (struct ice_aqc_sw_rules *)s_rule, 1223 rule_buf_sz, 1, ice_aqc_opc_update_sw_rules, NULL)) 1224 dev_warn(dev, "Error clearing VSI prune list\n"); 1225 1226 kfree(s_rule); 1227 } 1228 1229 /** 1230 * ice_lag_init_feature_support_flag - Check for package and NVM support for LAG 1231 * @pf: PF struct 1232 */ 1233 static void ice_lag_init_feature_support_flag(struct ice_pf *pf) 1234 { 1235 struct ice_hw_common_caps *caps; 1236 1237 caps = &pf->hw.dev_caps.common_cap; 1238 if (caps->roce_lag) 1239 ice_set_feature_support(pf, ICE_F_ROCE_LAG); 1240 else 1241 ice_clear_feature_support(pf, ICE_F_ROCE_LAG); 1242 1243 if (caps->sriov_lag && ice_pkg_has_lport_extract(&pf->hw)) 1244 ice_set_feature_support(pf, ICE_F_SRIOV_LAG); 1245 else 1246 ice_clear_feature_support(pf, ICE_F_SRIOV_LAG); 1247 } 1248 1249 /** 1250 * ice_lag_changeupper_event - handle LAG changeupper event 1251 * @lag: LAG info struct 1252 * @ptr: opaque pointer data 1253 */ 1254 static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr) 1255 { 1256 struct netdev_notifier_changeupper_info *info; 1257 struct ice_lag *primary_lag; 1258 struct net_device *netdev; 1259 1260 info = ptr; 1261 netdev = netdev_notifier_info_to_dev(ptr); 1262 1263 /* not for this netdev */ 1264 if (netdev != lag->netdev) 1265 return; 1266 1267 primary_lag = ice_lag_find_primary(lag); 1268 if (info->linking) { 1269 lag->upper_netdev = info->upper_dev; 1270 /* If there is not already a primary interface in the LAG, 1271 * then mark this one as primary. 1272 */ 1273 if (!primary_lag) { 1274 lag->primary = true; 1275 /* Configure primary's SWID to be shared */ 1276 ice_lag_primary_swid(lag, true); 1277 primary_lag = lag; 1278 } else { 1279 u16 swid; 1280 1281 swid = primary_lag->pf->hw.port_info->sw_id; 1282 ice_lag_set_swid(swid, lag, true); 1283 ice_lag_add_prune_list(primary_lag, lag->pf); 1284 ice_lag_cfg_drop_fltr(lag, true); 1285 } 1286 /* add filter for primary control packets */ 1287 ice_lag_cfg_cp_fltr(lag, true); 1288 } else { 1289 if (!primary_lag && lag->primary) 1290 primary_lag = lag; 1291 1292 if (!lag->primary) { 1293 ice_lag_set_swid(0, lag, false); 1294 } else { 1295 if (primary_lag && lag->primary) { 1296 ice_lag_primary_swid(lag, false); 1297 ice_lag_del_prune_list(primary_lag, lag->pf); 1298 } 1299 } 1300 /* remove filter for control packets */ 1301 ice_lag_cfg_cp_fltr(lag, false); 1302 } 1303 } 1304 1305 /** 1306 * ice_lag_monitor_link - monitor interfaces entering/leaving the aggregate 1307 * @lag: lag info struct 1308 * @ptr: opaque data containing notifier event 1309 * 1310 * This function only operates after a primary has been set. 1311 */ 1312 static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr) 1313 { 1314 struct netdev_notifier_changeupper_info *info; 1315 struct ice_hw *prim_hw, *active_hw; 1316 struct net_device *event_netdev; 1317 struct ice_pf *pf; 1318 u8 prim_port; 1319 1320 if (!lag->primary) 1321 return; 1322 1323 event_netdev = netdev_notifier_info_to_dev(ptr); 1324 if (!netif_is_same_ice(lag->pf, event_netdev)) 1325 return; 1326 1327 pf = lag->pf; 1328 prim_hw = &pf->hw; 1329 prim_port = prim_hw->port_info->lport; 1330 1331 info = (struct netdev_notifier_changeupper_info *)ptr; 1332 if (info->upper_dev != lag->upper_netdev) 1333 return; 1334 1335 if (!info->linking) { 1336 /* Since there are only two interfaces allowed in SRIOV+LAG, if 1337 * one port is leaving, then nodes need to be on primary 1338 * interface. 1339 */ 1340 if (prim_port != lag->active_port && 1341 lag->active_port != ICE_LAG_INVALID_PORT) { 1342 active_hw = ice_lag_find_hw_by_lport(lag, 1343 lag->active_port); 1344 ice_lag_reclaim_vf_nodes(lag, active_hw); 1345 lag->active_port = ICE_LAG_INVALID_PORT; 1346 } 1347 } 1348 } 1349 1350 /** 1351 * ice_lag_monitor_active - main PF keep track of which port is active 1352 * @lag: lag info struct 1353 * @ptr: opaque data containing notifier event 1354 * 1355 * This function is for the primary PF to monitor changes in which port is 1356 * active and handle changes for SRIOV VF functionality 1357 */ 1358 static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr) 1359 { 1360 struct net_device *event_netdev, *event_upper; 1361 struct netdev_notifier_bonding_info *info; 1362 struct netdev_bonding_info *bonding_info; 1363 struct ice_netdev_priv *event_np; 1364 struct ice_pf *pf, *event_pf; 1365 u8 prim_port, event_port; 1366 1367 if (!lag->primary) 1368 return; 1369 1370 pf = lag->pf; 1371 if (!pf) 1372 return; 1373 1374 event_netdev = netdev_notifier_info_to_dev(ptr); 1375 rcu_read_lock(); 1376 event_upper = netdev_master_upper_dev_get_rcu(event_netdev); 1377 rcu_read_unlock(); 1378 if (!netif_is_ice(event_netdev) || event_upper != lag->upper_netdev) 1379 return; 1380 1381 event_np = netdev_priv(event_netdev); 1382 event_pf = event_np->vsi->back; 1383 event_port = event_pf->hw.port_info->lport; 1384 prim_port = pf->hw.port_info->lport; 1385 1386 info = (struct netdev_notifier_bonding_info *)ptr; 1387 bonding_info = &info->bonding_info; 1388 1389 if (!bonding_info->slave.state) { 1390 /* if no port is currently active, then nodes and filters exist 1391 * on primary port, check if we need to move them 1392 */ 1393 if (lag->active_port == ICE_LAG_INVALID_PORT) { 1394 if (event_port != prim_port) 1395 ice_lag_move_vf_nodes(lag, prim_port, 1396 event_port); 1397 lag->active_port = event_port; 1398 return; 1399 } 1400 1401 /* active port is already set and is current event port */ 1402 if (lag->active_port == event_port) 1403 return; 1404 /* new active port */ 1405 ice_lag_move_vf_nodes(lag, lag->active_port, event_port); 1406 lag->active_port = event_port; 1407 } else { 1408 /* port not set as currently active (e.g. new active port 1409 * has already claimed the nodes and filters 1410 */ 1411 if (lag->active_port != event_port) 1412 return; 1413 /* This is the case when neither port is active (both link down) 1414 * Link down on the bond - set active port to invalid and move 1415 * nodes and filters back to primary if not already there 1416 */ 1417 if (event_port != prim_port) 1418 ice_lag_move_vf_nodes(lag, event_port, prim_port); 1419 lag->active_port = ICE_LAG_INVALID_PORT; 1420 } 1421 } 1422 1423 /** 1424 * ice_lag_chk_comp - evaluate bonded interface for feature support 1425 * @lag: lag info struct 1426 * @ptr: opaque data for netdev event info 1427 */ 1428 static bool 1429 ice_lag_chk_comp(struct ice_lag *lag, void *ptr) 1430 { 1431 struct net_device *event_netdev, *event_upper; 1432 struct netdev_notifier_bonding_info *info; 1433 struct netdev_bonding_info *bonding_info; 1434 struct list_head *tmp; 1435 struct device *dev; 1436 int count = 0; 1437 1438 if (!lag->primary) 1439 return true; 1440 1441 event_netdev = netdev_notifier_info_to_dev(ptr); 1442 rcu_read_lock(); 1443 event_upper = netdev_master_upper_dev_get_rcu(event_netdev); 1444 rcu_read_unlock(); 1445 if (event_upper != lag->upper_netdev) 1446 return true; 1447 1448 dev = ice_pf_to_dev(lag->pf); 1449 1450 /* only supporting switchdev mode for SRIOV VF LAG. 1451 * primary interface has to be in switchdev mode 1452 */ 1453 if (!ice_is_switchdev_running(lag->pf)) { 1454 dev_info(dev, "Primary interface not in switchdev mode - VF LAG disabled\n"); 1455 return false; 1456 } 1457 1458 info = (struct netdev_notifier_bonding_info *)ptr; 1459 bonding_info = &info->bonding_info; 1460 lag->bond_mode = bonding_info->master.bond_mode; 1461 if (lag->bond_mode != BOND_MODE_ACTIVEBACKUP) { 1462 dev_info(dev, "Bond Mode not ACTIVE-BACKUP - VF LAG disabled\n"); 1463 return false; 1464 } 1465 1466 list_for_each(tmp, lag->netdev_head) { 1467 struct ice_dcbx_cfg *dcb_cfg, *peer_dcb_cfg; 1468 struct ice_lag_netdev_list *entry; 1469 struct ice_netdev_priv *peer_np; 1470 struct net_device *peer_netdev; 1471 struct ice_vsi *vsi, *peer_vsi; 1472 struct ice_pf *peer_pf; 1473 1474 entry = list_entry(tmp, struct ice_lag_netdev_list, node); 1475 peer_netdev = entry->netdev; 1476 if (!netif_is_ice(peer_netdev)) { 1477 dev_info(dev, "Found %s non-ice netdev in LAG - VF LAG disabled\n", 1478 netdev_name(peer_netdev)); 1479 return false; 1480 } 1481 1482 count++; 1483 if (count > 2) { 1484 dev_info(dev, "Found more than two netdevs in LAG - VF LAG disabled\n"); 1485 return false; 1486 } 1487 1488 peer_np = netdev_priv(peer_netdev); 1489 vsi = ice_get_main_vsi(lag->pf); 1490 peer_vsi = peer_np->vsi; 1491 if (lag->pf->pdev->bus != peer_vsi->back->pdev->bus || 1492 lag->pf->pdev->slot != peer_vsi->back->pdev->slot) { 1493 dev_info(dev, "Found %s on different device in LAG - VF LAG disabled\n", 1494 netdev_name(peer_netdev)); 1495 return false; 1496 } 1497 1498 dcb_cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg; 1499 peer_dcb_cfg = &peer_vsi->port_info->qos_cfg.local_dcbx_cfg; 1500 if (memcmp(dcb_cfg, peer_dcb_cfg, 1501 sizeof(struct ice_dcbx_cfg))) { 1502 dev_info(dev, "Found %s with different DCB in LAG - VF LAG disabled\n", 1503 netdev_name(peer_netdev)); 1504 return false; 1505 } 1506 1507 peer_pf = peer_vsi->back; 1508 if (test_bit(ICE_FLAG_FW_LLDP_AGENT, peer_pf->flags)) { 1509 dev_warn(dev, "Found %s with FW LLDP agent active - VF LAG disabled\n", 1510 netdev_name(peer_netdev)); 1511 return false; 1512 } 1513 } 1514 1515 return true; 1516 } 1517 1518 /** 1519 * ice_lag_unregister - handle netdev unregister events 1520 * @lag: LAG info struct 1521 * @event_netdev: netdev struct for target of notifier event 1522 */ 1523 static void 1524 ice_lag_unregister(struct ice_lag *lag, struct net_device *event_netdev) 1525 { 1526 struct ice_netdev_priv *np; 1527 struct ice_pf *event_pf; 1528 struct ice_lag *p_lag; 1529 1530 p_lag = ice_lag_find_primary(lag); 1531 np = netdev_priv(event_netdev); 1532 event_pf = np->vsi->back; 1533 1534 if (p_lag) { 1535 if (p_lag->active_port != p_lag->pf->hw.port_info->lport && 1536 p_lag->active_port != ICE_LAG_INVALID_PORT) { 1537 struct ice_hw *active_hw; 1538 1539 active_hw = ice_lag_find_hw_by_lport(lag, 1540 p_lag->active_port); 1541 if (active_hw) 1542 ice_lag_reclaim_vf_nodes(p_lag, active_hw); 1543 lag->active_port = ICE_LAG_INVALID_PORT; 1544 } 1545 } 1546 1547 /* primary processing for primary */ 1548 if (lag->primary && lag->netdev == event_netdev) 1549 ice_lag_primary_swid(lag, false); 1550 1551 /* primary processing for secondary */ 1552 if (lag->primary && lag->netdev != event_netdev) 1553 ice_lag_del_prune_list(lag, event_pf); 1554 1555 /* secondary processing for secondary */ 1556 if (!lag->primary && lag->netdev == event_netdev) 1557 ice_lag_set_swid(0, lag, false); 1558 } 1559 1560 /** 1561 * ice_lag_monitor_rdma - set and clear rdma functionality 1562 * @lag: pointer to lag struct 1563 * @ptr: opaque data for netdev event info 1564 */ 1565 static void 1566 ice_lag_monitor_rdma(struct ice_lag *lag, void *ptr) 1567 { 1568 struct netdev_notifier_changeupper_info *info; 1569 struct net_device *netdev; 1570 1571 info = ptr; 1572 netdev = netdev_notifier_info_to_dev(ptr); 1573 1574 if (netdev != lag->netdev) 1575 return; 1576 1577 if (info->linking) 1578 ice_clear_rdma_cap(lag->pf); 1579 else 1580 ice_set_rdma_cap(lag->pf); 1581 } 1582 1583 /** 1584 * ice_lag_chk_disabled_bond - monitor interfaces entering/leaving disabled bond 1585 * @lag: lag info struct 1586 * @ptr: opaque data containing event 1587 * 1588 * as interfaces enter a bond - determine if the bond is currently 1589 * SRIOV LAG compliant and flag if not. As interfaces leave the 1590 * bond, reset their compliant status. 1591 */ 1592 static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr) 1593 { 1594 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 1595 struct netdev_notifier_changeupper_info *info = ptr; 1596 struct ice_lag *prim_lag; 1597 1598 if (netdev != lag->netdev) 1599 return; 1600 1601 if (info->linking) { 1602 prim_lag = ice_lag_find_primary(lag); 1603 if (prim_lag && 1604 !ice_is_feature_supported(prim_lag->pf, ICE_F_SRIOV_LAG)) { 1605 ice_clear_feature_support(lag->pf, ICE_F_SRIOV_LAG); 1606 netdev_info(netdev, "Interface added to non-compliant SRIOV LAG aggregate\n"); 1607 } 1608 } else { 1609 ice_lag_init_feature_support_flag(lag->pf); 1610 } 1611 } 1612 1613 /** 1614 * ice_lag_disable_sriov_bond - set members of bond as not supporting SRIOV LAG 1615 * @lag: primary interfaces lag struct 1616 */ 1617 static void ice_lag_disable_sriov_bond(struct ice_lag *lag) 1618 { 1619 struct ice_netdev_priv *np; 1620 struct ice_pf *pf; 1621 1622 np = netdev_priv(lag->netdev); 1623 pf = np->vsi->back; 1624 ice_clear_feature_support(pf, ICE_F_SRIOV_LAG); 1625 } 1626 1627 /** 1628 * ice_lag_process_event - process a task assigned to the lag_wq 1629 * @work: pointer to work_struct 1630 */ 1631 static void ice_lag_process_event(struct work_struct *work) 1632 { 1633 struct netdev_notifier_changeupper_info *info; 1634 struct ice_lag_work *lag_work; 1635 struct net_device *netdev; 1636 struct list_head *tmp, *n; 1637 struct ice_pf *pf; 1638 1639 lag_work = container_of(work, struct ice_lag_work, lag_task); 1640 pf = lag_work->lag->pf; 1641 1642 mutex_lock(&pf->lag_mutex); 1643 lag_work->lag->netdev_head = &lag_work->netdev_list.node; 1644 1645 switch (lag_work->event) { 1646 case NETDEV_CHANGEUPPER: 1647 info = &lag_work->info.changeupper_info; 1648 ice_lag_chk_disabled_bond(lag_work->lag, info); 1649 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) { 1650 ice_lag_monitor_link(lag_work->lag, info); 1651 ice_lag_changeupper_event(lag_work->lag, info); 1652 ice_lag_link_unlink(lag_work->lag, info); 1653 } 1654 ice_lag_monitor_rdma(lag_work->lag, info); 1655 break; 1656 case NETDEV_BONDING_INFO: 1657 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) { 1658 if (!ice_lag_chk_comp(lag_work->lag, 1659 &lag_work->info.bonding_info)) { 1660 netdev = lag_work->info.bonding_info.info.dev; 1661 ice_lag_disable_sriov_bond(lag_work->lag); 1662 ice_lag_unregister(lag_work->lag, netdev); 1663 goto lag_cleanup; 1664 } 1665 ice_lag_monitor_active(lag_work->lag, 1666 &lag_work->info.bonding_info); 1667 ice_lag_cfg_pf_fltrs(lag_work->lag, 1668 &lag_work->info.bonding_info); 1669 } 1670 ice_lag_info_event(lag_work->lag, &lag_work->info.bonding_info); 1671 break; 1672 case NETDEV_UNREGISTER: 1673 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) { 1674 netdev = lag_work->info.bonding_info.info.dev; 1675 if ((netdev == lag_work->lag->netdev || 1676 lag_work->lag->primary) && lag_work->lag->bonded) 1677 ice_lag_unregister(lag_work->lag, netdev); 1678 } 1679 break; 1680 default: 1681 break; 1682 } 1683 1684 lag_cleanup: 1685 /* cleanup resources allocated for this work item */ 1686 list_for_each_safe(tmp, n, &lag_work->netdev_list.node) { 1687 struct ice_lag_netdev_list *entry; 1688 1689 entry = list_entry(tmp, struct ice_lag_netdev_list, node); 1690 list_del(&entry->node); 1691 kfree(entry); 1692 } 1693 lag_work->lag->netdev_head = NULL; 1694 1695 mutex_unlock(&pf->lag_mutex); 1696 1697 kfree(lag_work); 1698 } 1699 1700 /** 1701 * ice_lag_event_handler - handle LAG events from netdev 1702 * @notif_blk: notifier block registered by this netdev 1703 * @event: event type 1704 * @ptr: opaque data containing notifier event 1705 */ 1706 static int 1707 ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event, 1708 void *ptr) 1709 { 1710 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 1711 struct net_device *upper_netdev; 1712 struct ice_lag_work *lag_work; 1713 struct ice_lag *lag; 1714 1715 if (!netif_is_ice(netdev)) 1716 return NOTIFY_DONE; 1717 1718 if (event != NETDEV_CHANGEUPPER && event != NETDEV_BONDING_INFO && 1719 event != NETDEV_UNREGISTER) 1720 return NOTIFY_DONE; 1721 1722 if (!(netdev->priv_flags & IFF_BONDING)) 1723 return NOTIFY_DONE; 1724 1725 lag = container_of(notif_blk, struct ice_lag, notif_block); 1726 if (!lag->netdev) 1727 return NOTIFY_DONE; 1728 1729 if (!net_eq(dev_net(netdev), &init_net)) 1730 return NOTIFY_DONE; 1731 1732 /* This memory will be freed at the end of ice_lag_process_event */ 1733 lag_work = kzalloc(sizeof(*lag_work), GFP_KERNEL); 1734 if (!lag_work) 1735 return -ENOMEM; 1736 1737 lag_work->event_netdev = netdev; 1738 lag_work->lag = lag; 1739 lag_work->event = event; 1740 if (event == NETDEV_CHANGEUPPER) { 1741 struct netdev_notifier_changeupper_info *info; 1742 1743 info = ptr; 1744 upper_netdev = info->upper_dev; 1745 } else { 1746 upper_netdev = netdev_master_upper_dev_get(netdev); 1747 } 1748 1749 INIT_LIST_HEAD(&lag_work->netdev_list.node); 1750 if (upper_netdev) { 1751 struct ice_lag_netdev_list *nd_list; 1752 struct net_device *tmp_nd; 1753 1754 rcu_read_lock(); 1755 for_each_netdev_in_bond_rcu(upper_netdev, tmp_nd) { 1756 nd_list = kzalloc(sizeof(*nd_list), GFP_ATOMIC); 1757 if (!nd_list) 1758 break; 1759 1760 nd_list->netdev = tmp_nd; 1761 list_add(&nd_list->node, &lag_work->netdev_list.node); 1762 } 1763 rcu_read_unlock(); 1764 } 1765 1766 switch (event) { 1767 case NETDEV_CHANGEUPPER: 1768 lag_work->info.changeupper_info = 1769 *((struct netdev_notifier_changeupper_info *)ptr); 1770 break; 1771 case NETDEV_BONDING_INFO: 1772 lag_work->info.bonding_info = 1773 *((struct netdev_notifier_bonding_info *)ptr); 1774 break; 1775 default: 1776 lag_work->info.notifier_info = 1777 *((struct netdev_notifier_info *)ptr); 1778 break; 1779 } 1780 1781 INIT_WORK(&lag_work->lag_task, ice_lag_process_event); 1782 queue_work(ice_lag_wq, &lag_work->lag_task); 1783 1784 return NOTIFY_DONE; 1785 } 1786 1787 /** 1788 * ice_register_lag_handler - register LAG handler on netdev 1789 * @lag: LAG struct 1790 */ 1791 static int ice_register_lag_handler(struct ice_lag *lag) 1792 { 1793 struct device *dev = ice_pf_to_dev(lag->pf); 1794 struct notifier_block *notif_blk; 1795 1796 notif_blk = &lag->notif_block; 1797 1798 if (!notif_blk->notifier_call) { 1799 notif_blk->notifier_call = ice_lag_event_handler; 1800 if (register_netdevice_notifier(notif_blk)) { 1801 notif_blk->notifier_call = NULL; 1802 dev_err(dev, "FAIL register LAG event handler!\n"); 1803 return -EINVAL; 1804 } 1805 dev_dbg(dev, "LAG event handler registered\n"); 1806 } 1807 return 0; 1808 } 1809 1810 /** 1811 * ice_unregister_lag_handler - unregister LAG handler on netdev 1812 * @lag: LAG struct 1813 */ 1814 static void ice_unregister_lag_handler(struct ice_lag *lag) 1815 { 1816 struct device *dev = ice_pf_to_dev(lag->pf); 1817 struct notifier_block *notif_blk; 1818 1819 notif_blk = &lag->notif_block; 1820 if (notif_blk->notifier_call) { 1821 unregister_netdevice_notifier(notif_blk); 1822 dev_dbg(dev, "LAG event handler unregistered\n"); 1823 } 1824 } 1825 1826 /** 1827 * ice_create_lag_recipe 1828 * @hw: pointer to HW struct 1829 * @rid: pointer to u16 to pass back recipe index 1830 * @base_recipe: recipe to base the new recipe on 1831 * @prio: priority for new recipe 1832 * 1833 * function returns 0 on error 1834 */ 1835 static int ice_create_lag_recipe(struct ice_hw *hw, u16 *rid, 1836 const u8 *base_recipe, u8 prio) 1837 { 1838 struct ice_aqc_recipe_data_elem *new_rcp; 1839 int err; 1840 1841 err = ice_alloc_recipe(hw, rid); 1842 if (err) 1843 return err; 1844 1845 new_rcp = kzalloc(ICE_RECIPE_LEN * ICE_MAX_NUM_RECIPES, GFP_KERNEL); 1846 if (!new_rcp) 1847 return -ENOMEM; 1848 1849 memcpy(new_rcp, base_recipe, ICE_RECIPE_LEN); 1850 new_rcp->content.act_ctrl_fwd_priority = prio; 1851 new_rcp->content.rid = *rid | ICE_AQ_RECIPE_ID_IS_ROOT; 1852 new_rcp->recipe_indx = *rid; 1853 bitmap_zero((unsigned long *)new_rcp->recipe_bitmap, 1854 ICE_MAX_NUM_RECIPES); 1855 set_bit(*rid, (unsigned long *)new_rcp->recipe_bitmap); 1856 1857 err = ice_aq_add_recipe(hw, new_rcp, 1, NULL); 1858 if (err) 1859 *rid = 0; 1860 1861 kfree(new_rcp); 1862 return err; 1863 } 1864 1865 /** 1866 * ice_lag_move_vf_nodes_tc_sync - move a VF's nodes for a tc during reset 1867 * @lag: primary interfaces lag struct 1868 * @dest_hw: HW struct for destination's interface 1869 * @vsi_num: VSI index in PF space 1870 * @tc: traffic class to move 1871 */ 1872 static void 1873 ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw, 1874 u16 vsi_num, u8 tc) 1875 { 1876 DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1); 1877 struct device *dev = ice_pf_to_dev(lag->pf); 1878 u16 numq, valq, num_moved, qbuf_size; 1879 u16 buf_size = __struct_size(buf); 1880 struct ice_aqc_cfg_txqs_buf *qbuf; 1881 struct ice_sched_node *n_prt; 1882 __le32 teid, parent_teid; 1883 struct ice_vsi_ctx *ctx; 1884 struct ice_hw *hw; 1885 u32 tmp_teid; 1886 1887 hw = &lag->pf->hw; 1888 ctx = ice_get_vsi_ctx(hw, vsi_num); 1889 if (!ctx) { 1890 dev_warn(dev, "LAG rebuild failed after reset due to VSI Context failure\n"); 1891 return; 1892 } 1893 1894 if (!ctx->sched.vsi_node[tc]) 1895 return; 1896 1897 numq = ctx->num_lan_q_entries[tc]; 1898 teid = ctx->sched.vsi_node[tc]->info.node_teid; 1899 tmp_teid = le32_to_cpu(teid); 1900 parent_teid = ctx->sched.vsi_node[tc]->info.parent_teid; 1901 1902 if (!tmp_teid || !numq) 1903 return; 1904 1905 if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, true)) 1906 dev_dbg(dev, "Problem suspending traffic during reset rebuild\n"); 1907 1908 /* reconfig queues for new port */ 1909 qbuf_size = struct_size(qbuf, queue_info, numq); 1910 qbuf = kzalloc(qbuf_size, GFP_KERNEL); 1911 if (!qbuf) { 1912 dev_warn(dev, "Failure allocating VF queue recfg buffer for reset rebuild\n"); 1913 goto resume_sync; 1914 } 1915 1916 /* add the per queue info for the reconfigure command buffer */ 1917 valq = ice_lag_qbuf_recfg(hw, qbuf, vsi_num, numq, tc); 1918 if (!valq) { 1919 dev_warn(dev, "Failure to reconfig queues for LAG reset rebuild\n"); 1920 goto sync_none; 1921 } 1922 1923 if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq, hw->port_info->lport, 1924 dest_hw->port_info->lport, NULL)) { 1925 dev_warn(dev, "Failure to configure queues for LAG reset rebuild\n"); 1926 goto sync_qerr; 1927 } 1928 1929 sync_none: 1930 kfree(qbuf); 1931 1932 /* find parent in destination tree */ 1933 n_prt = ice_lag_get_sched_parent(dest_hw, tc); 1934 if (!n_prt) 1935 goto resume_sync; 1936 1937 /* Move node to new parent */ 1938 buf->hdr.src_parent_teid = parent_teid; 1939 buf->hdr.dest_parent_teid = n_prt->info.node_teid; 1940 buf->hdr.num_elems = cpu_to_le16(1); 1941 buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN; 1942 buf->teid[0] = teid; 1943 1944 if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved)) 1945 dev_warn(dev, "Failure to move VF nodes for LAG reset rebuild\n"); 1946 else 1947 ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]); 1948 1949 goto resume_sync; 1950 1951 sync_qerr: 1952 kfree(qbuf); 1953 1954 resume_sync: 1955 if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, false)) 1956 dev_warn(dev, "Problem restarting traffic for LAG node reset rebuild\n"); 1957 } 1958 1959 /** 1960 * ice_lag_move_vf_nodes_sync - move vf nodes to active interface 1961 * @lag: primary interfaces lag struct 1962 * @dest_hw: lport value for currently active port 1963 * 1964 * This function is used in a reset context, outside of event handling, 1965 * to move the VF nodes to the secondary interface when that interface 1966 * is the active interface during a reset rebuild 1967 */ 1968 static void 1969 ice_lag_move_vf_nodes_sync(struct ice_lag *lag, struct ice_hw *dest_hw) 1970 { 1971 struct ice_pf *pf; 1972 int i, tc; 1973 1974 if (!lag->primary || !dest_hw) 1975 return; 1976 1977 pf = lag->pf; 1978 ice_for_each_vsi(pf, i) 1979 if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF || 1980 pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL)) 1981 ice_for_each_traffic_class(tc) 1982 ice_lag_move_vf_nodes_tc_sync(lag, dest_hw, i, 1983 tc); 1984 } 1985 1986 /** 1987 * ice_init_lag - initialize support for LAG 1988 * @pf: PF struct 1989 * 1990 * Alloc memory for LAG structs and initialize the elements. 1991 * Memory will be freed in ice_deinit_lag 1992 */ 1993 int ice_init_lag(struct ice_pf *pf) 1994 { 1995 struct device *dev = ice_pf_to_dev(pf); 1996 struct ice_lag *lag; 1997 struct ice_vsi *vsi; 1998 u64 recipe_bits = 0; 1999 int n, err; 2000 2001 ice_lag_init_feature_support_flag(pf); 2002 if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) 2003 return 0; 2004 2005 pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL); 2006 if (!pf->lag) 2007 return -ENOMEM; 2008 lag = pf->lag; 2009 2010 vsi = ice_get_main_vsi(pf); 2011 if (!vsi) { 2012 dev_err(dev, "couldn't get main vsi, link aggregation init fail\n"); 2013 err = -EIO; 2014 goto lag_error; 2015 } 2016 2017 lag->pf = pf; 2018 lag->netdev = vsi->netdev; 2019 lag->role = ICE_LAG_NONE; 2020 lag->active_port = ICE_LAG_INVALID_PORT; 2021 lag->bonded = false; 2022 lag->upper_netdev = NULL; 2023 lag->notif_block.notifier_call = NULL; 2024 2025 err = ice_register_lag_handler(lag); 2026 if (err) { 2027 dev_warn(dev, "INIT LAG: Failed to register event handler\n"); 2028 goto lag_error; 2029 } 2030 2031 err = ice_create_lag_recipe(&pf->hw, &lag->pf_recipe, 2032 ice_dflt_vsi_rcp, 1); 2033 if (err) 2034 goto lag_error; 2035 2036 err = ice_create_lag_recipe(&pf->hw, &lag->lport_recipe, 2037 ice_lport_rcp, 3); 2038 if (err) 2039 goto free_rcp_res; 2040 2041 /* associate recipes to profiles */ 2042 for (n = 0; n < ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER; n++) { 2043 err = ice_aq_get_recipe_to_profile(&pf->hw, n, 2044 (u8 *)&recipe_bits, NULL); 2045 if (err) 2046 continue; 2047 2048 if (recipe_bits & BIT(ICE_SW_LKUP_DFLT)) { 2049 recipe_bits |= BIT(lag->pf_recipe) | 2050 BIT(lag->lport_recipe); 2051 ice_aq_map_recipe_to_profile(&pf->hw, n, 2052 (u8 *)&recipe_bits, NULL); 2053 } 2054 } 2055 2056 ice_display_lag_info(lag); 2057 2058 dev_dbg(dev, "INIT LAG complete\n"); 2059 return 0; 2060 2061 free_rcp_res: 2062 ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1, 2063 &pf->lag->pf_recipe); 2064 lag_error: 2065 kfree(lag); 2066 pf->lag = NULL; 2067 return err; 2068 } 2069 2070 /** 2071 * ice_deinit_lag - Clean up LAG 2072 * @pf: PF struct 2073 * 2074 * Clean up kernel LAG info and free memory 2075 * This function is meant to only be called on driver remove/shutdown 2076 */ 2077 void ice_deinit_lag(struct ice_pf *pf) 2078 { 2079 struct ice_lag *lag; 2080 2081 lag = pf->lag; 2082 2083 if (!lag) 2084 return; 2085 2086 if (lag->pf) 2087 ice_unregister_lag_handler(lag); 2088 2089 flush_workqueue(ice_lag_wq); 2090 2091 ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1, 2092 &pf->lag->pf_recipe); 2093 ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1, 2094 &pf->lag->lport_recipe); 2095 2096 kfree(lag); 2097 2098 pf->lag = NULL; 2099 } 2100 2101 /** 2102 * ice_lag_rebuild - rebuild lag resources after reset 2103 * @pf: pointer to local pf struct 2104 * 2105 * PF resets are promoted to CORER resets when interface in an aggregate. This 2106 * means that we need to rebuild the PF resources for the interface. Since 2107 * this will happen outside the normal event processing, need to acquire the lag 2108 * lock. 2109 * 2110 * This function will also evaluate the VF resources if this is the primary 2111 * interface. 2112 */ 2113 void ice_lag_rebuild(struct ice_pf *pf) 2114 { 2115 struct ice_lag_netdev_list ndlist; 2116 struct ice_lag *lag, *prim_lag; 2117 u8 act_port, loc_port; 2118 2119 if (!pf->lag || !pf->lag->bonded) 2120 return; 2121 2122 mutex_lock(&pf->lag_mutex); 2123 2124 lag = pf->lag; 2125 if (lag->primary) { 2126 prim_lag = lag; 2127 } else { 2128 ice_lag_build_netdev_list(lag, &ndlist); 2129 prim_lag = ice_lag_find_primary(lag); 2130 } 2131 2132 if (!prim_lag) { 2133 dev_dbg(ice_pf_to_dev(pf), "No primary interface in aggregate, can't rebuild\n"); 2134 goto lag_rebuild_out; 2135 } 2136 2137 act_port = prim_lag->active_port; 2138 loc_port = lag->pf->hw.port_info->lport; 2139 2140 /* configure SWID for this port */ 2141 if (lag->primary) { 2142 ice_lag_primary_swid(lag, true); 2143 } else { 2144 ice_lag_set_swid(prim_lag->pf->hw.port_info->sw_id, lag, true); 2145 ice_lag_add_prune_list(prim_lag, pf); 2146 if (act_port == loc_port) 2147 ice_lag_move_vf_nodes_sync(prim_lag, &pf->hw); 2148 } 2149 2150 ice_lag_cfg_cp_fltr(lag, true); 2151 2152 if (lag->pf_rule_id) 2153 if (ice_lag_cfg_dflt_fltr(lag, true)) 2154 dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n"); 2155 2156 ice_clear_rdma_cap(pf); 2157 lag_rebuild_out: 2158 ice_lag_destroy_netdev_list(lag, &ndlist); 2159 mutex_unlock(&pf->lag_mutex); 2160 } 2161 2162 /** 2163 * ice_lag_is_switchdev_running 2164 * @pf: pointer to PF structure 2165 * 2166 * Check if switchdev is running on any of the interfaces connected to lag. 2167 */ 2168 bool ice_lag_is_switchdev_running(struct ice_pf *pf) 2169 { 2170 struct ice_lag *lag = pf->lag; 2171 struct net_device *tmp_nd; 2172 2173 if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) || !lag) 2174 return false; 2175 2176 rcu_read_lock(); 2177 for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) { 2178 struct ice_netdev_priv *priv = netdev_priv(tmp_nd); 2179 2180 if (!netif_is_ice(tmp_nd) || !priv || !priv->vsi || 2181 !priv->vsi->back) 2182 continue; 2183 2184 if (ice_is_switchdev_running(priv->vsi->back)) { 2185 rcu_read_unlock(); 2186 return true; 2187 } 2188 } 2189 rcu_read_unlock(); 2190 2191 return false; 2192 } 2193