1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2023, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_eswitch_br.h" 6 #include "ice_repr.h" 7 #include "ice_switch.h" 8 #include "ice_vlan.h" 9 #include "ice_vf_vsi_vlan_ops.h" 10 #include "ice_trace.h" 11 12 #define ICE_ESW_BRIDGE_UPDATE_INTERVAL msecs_to_jiffies(1000) 13 14 static const struct rhashtable_params ice_fdb_ht_params = { 15 .key_offset = offsetof(struct ice_esw_br_fdb_entry, data), 16 .key_len = sizeof(struct ice_esw_br_fdb_data), 17 .head_offset = offsetof(struct ice_esw_br_fdb_entry, ht_node), 18 .automatic_shrinking = true, 19 }; 20 21 static bool ice_eswitch_br_is_dev_valid(const struct net_device *dev) 22 { 23 /* Accept only PF netdev, PRs and LAG */ 24 return ice_is_port_repr_netdev(dev) || netif_is_ice(dev) || 25 netif_is_lag_master(dev); 26 } 27 28 static struct net_device * 29 ice_eswitch_br_get_uplink_from_lag(struct net_device *lag_dev) 30 { 31 struct net_device *lower; 32 struct list_head *iter; 33 34 netdev_for_each_lower_dev(lag_dev, lower, iter) { 35 if (netif_is_ice(lower)) 36 return lower; 37 } 38 39 return NULL; 40 } 41 42 static struct ice_esw_br_port * 43 ice_eswitch_br_netdev_to_port(struct net_device *dev) 44 { 45 if (ice_is_port_repr_netdev(dev)) { 46 struct ice_repr *repr = ice_netdev_to_repr(dev); 47 48 return repr->br_port; 49 } else if (netif_is_ice(dev) || netif_is_lag_master(dev)) { 50 struct net_device *ice_dev; 51 struct ice_pf *pf; 52 53 if (netif_is_lag_master(dev)) 54 ice_dev = ice_eswitch_br_get_uplink_from_lag(dev); 55 else 56 ice_dev = dev; 57 58 if (!ice_dev) 59 return NULL; 60 61 pf = ice_netdev_to_pf(ice_dev); 62 63 return pf->br_port; 64 } 65 66 return NULL; 67 } 68 69 static void 70 ice_eswitch_br_ingress_rule_setup(struct ice_adv_rule_info *rule_info, 71 u8 pf_id, u16 vf_vsi_idx) 72 { 73 rule_info->sw_act.vsi_handle = vf_vsi_idx; 74 rule_info->sw_act.flag |= ICE_FLTR_RX; 75 rule_info->sw_act.src = pf_id; 76 rule_info->priority = 2; 77 } 78 79 static void 80 ice_eswitch_br_egress_rule_setup(struct ice_adv_rule_info *rule_info, 81 u16 pf_vsi_idx) 82 { 83 rule_info->sw_act.vsi_handle = pf_vsi_idx; 84 rule_info->sw_act.flag |= ICE_FLTR_TX; 85 rule_info->flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; 86 rule_info->flags_info.act_valid = true; 87 rule_info->priority = 2; 88 } 89 90 static int 91 ice_eswitch_br_rule_delete(struct ice_hw *hw, struct ice_rule_query_data *rule) 92 { 93 int err; 94 95 if (!rule) 96 return -EINVAL; 97 98 err = ice_rem_adv_rule_by_id(hw, rule); 99 kfree(rule); 100 101 return err; 102 } 103 104 static u16 105 ice_eswitch_br_get_lkups_cnt(u16 vid) 106 { 107 return ice_eswitch_br_is_vid_valid(vid) ? 2 : 1; 108 } 109 110 static void 111 ice_eswitch_br_add_vlan_lkup(struct ice_adv_lkup_elem *list, u16 vid) 112 { 113 if (ice_eswitch_br_is_vid_valid(vid)) { 114 list[1].type = ICE_VLAN_OFOS; 115 list[1].h_u.vlan_hdr.vlan = cpu_to_be16(vid & VLAN_VID_MASK); 116 list[1].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF); 117 } 118 } 119 120 static struct ice_rule_query_data * 121 ice_eswitch_br_fwd_rule_create(struct ice_hw *hw, int vsi_idx, int port_type, 122 const unsigned char *mac, u16 vid) 123 { 124 struct ice_adv_rule_info rule_info = { 0 }; 125 struct ice_rule_query_data *rule; 126 struct ice_adv_lkup_elem *list; 127 u16 lkups_cnt; 128 int err; 129 130 lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid); 131 132 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 133 if (!rule) 134 return ERR_PTR(-ENOMEM); 135 136 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); 137 if (!list) { 138 err = -ENOMEM; 139 goto err_list_alloc; 140 } 141 142 switch (port_type) { 143 case ICE_ESWITCH_BR_UPLINK_PORT: 144 ice_eswitch_br_egress_rule_setup(&rule_info, vsi_idx); 145 break; 146 case ICE_ESWITCH_BR_VF_REPR_PORT: 147 ice_eswitch_br_ingress_rule_setup(&rule_info, hw->pf_id, 148 vsi_idx); 149 break; 150 default: 151 err = -EINVAL; 152 goto err_add_rule; 153 } 154 155 list[0].type = ICE_MAC_OFOS; 156 ether_addr_copy(list[0].h_u.eth_hdr.dst_addr, mac); 157 eth_broadcast_addr(list[0].m_u.eth_hdr.dst_addr); 158 159 ice_eswitch_br_add_vlan_lkup(list, vid); 160 161 rule_info.need_pass_l2 = true; 162 163 rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; 164 165 err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule); 166 if (err) 167 goto err_add_rule; 168 169 kfree(list); 170 171 return rule; 172 173 err_add_rule: 174 kfree(list); 175 err_list_alloc: 176 kfree(rule); 177 178 return ERR_PTR(err); 179 } 180 181 static struct ice_rule_query_data * 182 ice_eswitch_br_guard_rule_create(struct ice_hw *hw, u16 vsi_idx, 183 const unsigned char *mac, u16 vid) 184 { 185 struct ice_adv_rule_info rule_info = { 0 }; 186 struct ice_rule_query_data *rule; 187 struct ice_adv_lkup_elem *list; 188 int err = -ENOMEM; 189 u16 lkups_cnt; 190 191 lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid); 192 193 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 194 if (!rule) 195 goto err_exit; 196 197 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); 198 if (!list) 199 goto err_list_alloc; 200 201 list[0].type = ICE_MAC_OFOS; 202 ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac); 203 eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr); 204 205 ice_eswitch_br_add_vlan_lkup(list, vid); 206 207 rule_info.allow_pass_l2 = true; 208 rule_info.sw_act.vsi_handle = vsi_idx; 209 rule_info.sw_act.fltr_act = ICE_NOP; 210 rule_info.priority = 2; 211 212 err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule); 213 if (err) 214 goto err_add_rule; 215 216 kfree(list); 217 218 return rule; 219 220 err_add_rule: 221 kfree(list); 222 err_list_alloc: 223 kfree(rule); 224 err_exit: 225 return ERR_PTR(err); 226 } 227 228 static struct ice_esw_br_flow * 229 ice_eswitch_br_flow_create(struct device *dev, struct ice_hw *hw, int vsi_idx, 230 int port_type, const unsigned char *mac, u16 vid) 231 { 232 struct ice_rule_query_data *fwd_rule, *guard_rule; 233 struct ice_esw_br_flow *flow; 234 int err; 235 236 flow = kzalloc(sizeof(*flow), GFP_KERNEL); 237 if (!flow) 238 return ERR_PTR(-ENOMEM); 239 240 fwd_rule = ice_eswitch_br_fwd_rule_create(hw, vsi_idx, port_type, mac, 241 vid); 242 err = PTR_ERR_OR_ZERO(fwd_rule); 243 if (err) { 244 dev_err(dev, "Failed to create eswitch bridge %sgress forward rule, err: %d\n", 245 port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in", 246 err); 247 goto err_fwd_rule; 248 } 249 250 guard_rule = ice_eswitch_br_guard_rule_create(hw, vsi_idx, mac, vid); 251 err = PTR_ERR_OR_ZERO(guard_rule); 252 if (err) { 253 dev_err(dev, "Failed to create eswitch bridge %sgress guard rule, err: %d\n", 254 port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in", 255 err); 256 goto err_guard_rule; 257 } 258 259 flow->fwd_rule = fwd_rule; 260 flow->guard_rule = guard_rule; 261 262 return flow; 263 264 err_guard_rule: 265 ice_eswitch_br_rule_delete(hw, fwd_rule); 266 err_fwd_rule: 267 kfree(flow); 268 269 return ERR_PTR(err); 270 } 271 272 static struct ice_esw_br_fdb_entry * 273 ice_eswitch_br_fdb_find(struct ice_esw_br *bridge, const unsigned char *mac, 274 u16 vid) 275 { 276 struct ice_esw_br_fdb_data data = { 277 .vid = vid, 278 }; 279 280 ether_addr_copy(data.addr, mac); 281 return rhashtable_lookup_fast(&bridge->fdb_ht, &data, 282 ice_fdb_ht_params); 283 } 284 285 static void 286 ice_eswitch_br_flow_delete(struct ice_pf *pf, struct ice_esw_br_flow *flow) 287 { 288 struct device *dev = ice_pf_to_dev(pf); 289 int err; 290 291 err = ice_eswitch_br_rule_delete(&pf->hw, flow->fwd_rule); 292 if (err) 293 dev_err(dev, "Failed to delete FDB forward rule, err: %d\n", 294 err); 295 296 err = ice_eswitch_br_rule_delete(&pf->hw, flow->guard_rule); 297 if (err) 298 dev_err(dev, "Failed to delete FDB guard rule, err: %d\n", 299 err); 300 301 kfree(flow); 302 } 303 304 static struct ice_esw_br_vlan * 305 ice_esw_br_port_vlan_lookup(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid) 306 { 307 struct ice_pf *pf = bridge->br_offloads->pf; 308 struct device *dev = ice_pf_to_dev(pf); 309 struct ice_esw_br_port *port; 310 struct ice_esw_br_vlan *vlan; 311 312 port = xa_load(&bridge->ports, vsi_idx); 313 if (!port) { 314 dev_info(dev, "Bridge port lookup failed (vsi=%u)\n", vsi_idx); 315 return ERR_PTR(-EINVAL); 316 } 317 318 vlan = xa_load(&port->vlans, vid); 319 if (!vlan) { 320 dev_info(dev, "Bridge port vlan metadata lookup failed (vsi=%u)\n", 321 vsi_idx); 322 return ERR_PTR(-EINVAL); 323 } 324 325 return vlan; 326 } 327 328 static void 329 ice_eswitch_br_fdb_entry_delete(struct ice_esw_br *bridge, 330 struct ice_esw_br_fdb_entry *fdb_entry) 331 { 332 struct ice_pf *pf = bridge->br_offloads->pf; 333 334 rhashtable_remove_fast(&bridge->fdb_ht, &fdb_entry->ht_node, 335 ice_fdb_ht_params); 336 list_del(&fdb_entry->list); 337 338 ice_eswitch_br_flow_delete(pf, fdb_entry->flow); 339 340 kfree(fdb_entry); 341 } 342 343 static void 344 ice_eswitch_br_fdb_offload_notify(struct net_device *dev, 345 const unsigned char *mac, u16 vid, 346 unsigned long val) 347 { 348 struct switchdev_notifier_fdb_info fdb_info = { 349 .addr = mac, 350 .vid = vid, 351 .offloaded = true, 352 }; 353 354 call_switchdev_notifiers(val, dev, &fdb_info.info, NULL); 355 } 356 357 static void 358 ice_eswitch_br_fdb_entry_notify_and_cleanup(struct ice_esw_br *bridge, 359 struct ice_esw_br_fdb_entry *entry) 360 { 361 if (!(entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER)) 362 ice_eswitch_br_fdb_offload_notify(entry->dev, entry->data.addr, 363 entry->data.vid, 364 SWITCHDEV_FDB_DEL_TO_BRIDGE); 365 ice_eswitch_br_fdb_entry_delete(bridge, entry); 366 } 367 368 static void 369 ice_eswitch_br_fdb_entry_find_and_delete(struct ice_esw_br *bridge, 370 const unsigned char *mac, u16 vid) 371 { 372 struct ice_pf *pf = bridge->br_offloads->pf; 373 struct ice_esw_br_fdb_entry *fdb_entry; 374 struct device *dev = ice_pf_to_dev(pf); 375 376 fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid); 377 if (!fdb_entry) { 378 dev_err(dev, "FDB entry with mac: %pM and vid: %u not found\n", 379 mac, vid); 380 return; 381 } 382 383 trace_ice_eswitch_br_fdb_entry_find_and_delete(fdb_entry); 384 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry); 385 } 386 387 static void 388 ice_eswitch_br_fdb_entry_create(struct net_device *netdev, 389 struct ice_esw_br_port *br_port, 390 bool added_by_user, 391 const unsigned char *mac, u16 vid) 392 { 393 struct ice_esw_br *bridge = br_port->bridge; 394 struct ice_pf *pf = bridge->br_offloads->pf; 395 struct device *dev = ice_pf_to_dev(pf); 396 struct ice_esw_br_fdb_entry *fdb_entry; 397 struct ice_esw_br_flow *flow; 398 struct ice_esw_br_vlan *vlan; 399 struct ice_hw *hw = &pf->hw; 400 unsigned long event; 401 int err; 402 403 /* untagged filtering is not yet supported */ 404 if (!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING) && vid) 405 return; 406 407 if ((bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) { 408 vlan = ice_esw_br_port_vlan_lookup(bridge, br_port->vsi_idx, 409 vid); 410 if (IS_ERR(vlan)) { 411 dev_err(dev, "Failed to find vlan lookup, err: %ld\n", 412 PTR_ERR(vlan)); 413 return; 414 } 415 } 416 417 fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid); 418 if (fdb_entry) 419 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry); 420 421 fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL); 422 if (!fdb_entry) { 423 err = -ENOMEM; 424 goto err_exit; 425 } 426 427 flow = ice_eswitch_br_flow_create(dev, hw, br_port->vsi_idx, 428 br_port->type, mac, vid); 429 if (IS_ERR(flow)) { 430 err = PTR_ERR(flow); 431 goto err_add_flow; 432 } 433 434 ether_addr_copy(fdb_entry->data.addr, mac); 435 fdb_entry->data.vid = vid; 436 fdb_entry->br_port = br_port; 437 fdb_entry->flow = flow; 438 fdb_entry->dev = netdev; 439 fdb_entry->last_use = jiffies; 440 event = SWITCHDEV_FDB_ADD_TO_BRIDGE; 441 442 if (added_by_user) { 443 fdb_entry->flags |= ICE_ESWITCH_BR_FDB_ADDED_BY_USER; 444 event = SWITCHDEV_FDB_OFFLOADED; 445 } 446 447 err = rhashtable_insert_fast(&bridge->fdb_ht, &fdb_entry->ht_node, 448 ice_fdb_ht_params); 449 if (err) 450 goto err_fdb_insert; 451 452 list_add(&fdb_entry->list, &bridge->fdb_list); 453 trace_ice_eswitch_br_fdb_entry_create(fdb_entry); 454 455 ice_eswitch_br_fdb_offload_notify(netdev, mac, vid, event); 456 457 return; 458 459 err_fdb_insert: 460 ice_eswitch_br_flow_delete(pf, flow); 461 err_add_flow: 462 kfree(fdb_entry); 463 err_exit: 464 dev_err(dev, "Failed to create fdb entry, err: %d\n", err); 465 } 466 467 static void 468 ice_eswitch_br_fdb_work_dealloc(struct ice_esw_br_fdb_work *fdb_work) 469 { 470 kfree(fdb_work->fdb_info.addr); 471 kfree(fdb_work); 472 } 473 474 static void 475 ice_eswitch_br_fdb_event_work(struct work_struct *work) 476 { 477 struct ice_esw_br_fdb_work *fdb_work = ice_work_to_fdb_work(work); 478 bool added_by_user = fdb_work->fdb_info.added_by_user; 479 const unsigned char *mac = fdb_work->fdb_info.addr; 480 u16 vid = fdb_work->fdb_info.vid; 481 struct ice_esw_br_port *br_port; 482 483 rtnl_lock(); 484 485 br_port = ice_eswitch_br_netdev_to_port(fdb_work->dev); 486 if (!br_port) 487 goto err_exit; 488 489 switch (fdb_work->event) { 490 case SWITCHDEV_FDB_ADD_TO_DEVICE: 491 ice_eswitch_br_fdb_entry_create(fdb_work->dev, br_port, 492 added_by_user, mac, vid); 493 break; 494 case SWITCHDEV_FDB_DEL_TO_DEVICE: 495 ice_eswitch_br_fdb_entry_find_and_delete(br_port->bridge, 496 mac, vid); 497 break; 498 default: 499 goto err_exit; 500 } 501 502 err_exit: 503 rtnl_unlock(); 504 dev_put(fdb_work->dev); 505 ice_eswitch_br_fdb_work_dealloc(fdb_work); 506 } 507 508 static struct ice_esw_br_fdb_work * 509 ice_eswitch_br_fdb_work_alloc(struct switchdev_notifier_fdb_info *fdb_info, 510 struct net_device *dev, 511 unsigned long event) 512 { 513 struct ice_esw_br_fdb_work *work; 514 unsigned char *mac; 515 516 work = kzalloc(sizeof(*work), GFP_ATOMIC); 517 if (!work) 518 return ERR_PTR(-ENOMEM); 519 520 INIT_WORK(&work->work, ice_eswitch_br_fdb_event_work); 521 memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info)); 522 523 mac = kzalloc(ETH_ALEN, GFP_ATOMIC); 524 if (!mac) { 525 kfree(work); 526 return ERR_PTR(-ENOMEM); 527 } 528 529 ether_addr_copy(mac, fdb_info->addr); 530 work->fdb_info.addr = mac; 531 work->event = event; 532 work->dev = dev; 533 534 return work; 535 } 536 537 static int 538 ice_eswitch_br_switchdev_event(struct notifier_block *nb, 539 unsigned long event, void *ptr) 540 { 541 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 542 struct switchdev_notifier_fdb_info *fdb_info; 543 struct switchdev_notifier_info *info = ptr; 544 struct ice_esw_br_offloads *br_offloads; 545 struct ice_esw_br_fdb_work *work; 546 struct netlink_ext_ack *extack; 547 struct net_device *upper; 548 549 br_offloads = ice_nb_to_br_offloads(nb, switchdev_nb); 550 extack = switchdev_notifier_info_to_extack(ptr); 551 552 upper = netdev_master_upper_dev_get_rcu(dev); 553 if (!upper) 554 return NOTIFY_DONE; 555 556 if (!netif_is_bridge_master(upper)) 557 return NOTIFY_DONE; 558 559 if (!ice_eswitch_br_is_dev_valid(dev)) 560 return NOTIFY_DONE; 561 562 if (!ice_eswitch_br_netdev_to_port(dev)) 563 return NOTIFY_DONE; 564 565 switch (event) { 566 case SWITCHDEV_FDB_ADD_TO_DEVICE: 567 case SWITCHDEV_FDB_DEL_TO_DEVICE: 568 fdb_info = container_of(info, typeof(*fdb_info), info); 569 570 work = ice_eswitch_br_fdb_work_alloc(fdb_info, dev, event); 571 if (IS_ERR(work)) { 572 NL_SET_ERR_MSG_MOD(extack, "Failed to init switchdev fdb work"); 573 return notifier_from_errno(PTR_ERR(work)); 574 } 575 dev_hold(dev); 576 577 queue_work(br_offloads->wq, &work->work); 578 break; 579 default: 580 break; 581 } 582 return NOTIFY_DONE; 583 } 584 585 static void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge) 586 { 587 struct ice_esw_br_fdb_entry *entry, *tmp; 588 589 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) 590 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry); 591 } 592 593 static void 594 ice_eswitch_br_vlan_filtering_set(struct ice_esw_br *bridge, bool enable) 595 { 596 if (enable == !!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) 597 return; 598 599 ice_eswitch_br_fdb_flush(bridge); 600 if (enable) 601 bridge->flags |= ICE_ESWITCH_BR_VLAN_FILTERING; 602 else 603 bridge->flags &= ~ICE_ESWITCH_BR_VLAN_FILTERING; 604 } 605 606 static void 607 ice_eswitch_br_clear_pvid(struct ice_esw_br_port *port) 608 { 609 struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, port->pvid, 0); 610 struct ice_vsi_vlan_ops *vlan_ops; 611 612 vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi); 613 614 vlan_ops->del_vlan(port->vsi, &port_vlan); 615 vlan_ops->clear_port_vlan(port->vsi); 616 617 ice_vf_vsi_disable_port_vlan(port->vsi); 618 619 port->pvid = 0; 620 } 621 622 static void 623 ice_eswitch_br_vlan_cleanup(struct ice_esw_br_port *port, 624 struct ice_esw_br_vlan *vlan) 625 { 626 struct ice_esw_br_fdb_entry *fdb_entry, *tmp; 627 struct ice_esw_br *bridge = port->bridge; 628 629 trace_ice_eswitch_br_vlan_cleanup(vlan); 630 631 list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) { 632 if (vlan->vid == fdb_entry->data.vid) 633 ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry); 634 } 635 636 xa_erase(&port->vlans, vlan->vid); 637 if (port->pvid == vlan->vid) 638 ice_eswitch_br_clear_pvid(port); 639 kfree(vlan); 640 } 641 642 static void ice_eswitch_br_port_vlans_flush(struct ice_esw_br_port *port) 643 { 644 struct ice_esw_br_vlan *vlan; 645 unsigned long index; 646 647 xa_for_each(&port->vlans, index, vlan) 648 ice_eswitch_br_vlan_cleanup(port, vlan); 649 } 650 651 static int 652 ice_eswitch_br_set_pvid(struct ice_esw_br_port *port, 653 struct ice_esw_br_vlan *vlan) 654 { 655 struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, vlan->vid, 0); 656 struct device *dev = ice_pf_to_dev(port->vsi->back); 657 struct ice_vsi_vlan_ops *vlan_ops; 658 int err; 659 660 if (port->pvid == vlan->vid || vlan->vid == 1) 661 return 0; 662 663 /* Setting port vlan on uplink isn't supported by hw */ 664 if (port->type == ICE_ESWITCH_BR_UPLINK_PORT) 665 return -EOPNOTSUPP; 666 667 if (port->pvid) { 668 dev_info(dev, 669 "Port VLAN (vsi=%u, vid=%u) already exists on the port, remove it before adding new one\n", 670 port->vsi_idx, port->pvid); 671 return -EEXIST; 672 } 673 674 ice_vf_vsi_enable_port_vlan(port->vsi); 675 676 vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi); 677 err = vlan_ops->set_port_vlan(port->vsi, &port_vlan); 678 if (err) 679 return err; 680 681 err = vlan_ops->add_vlan(port->vsi, &port_vlan); 682 if (err) 683 return err; 684 685 ice_eswitch_br_port_vlans_flush(port); 686 port->pvid = vlan->vid; 687 688 return 0; 689 } 690 691 static struct ice_esw_br_vlan * 692 ice_eswitch_br_vlan_create(u16 vid, u16 flags, struct ice_esw_br_port *port) 693 { 694 struct device *dev = ice_pf_to_dev(port->vsi->back); 695 struct ice_esw_br_vlan *vlan; 696 int err; 697 698 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 699 if (!vlan) 700 return ERR_PTR(-ENOMEM); 701 702 vlan->vid = vid; 703 vlan->flags = flags; 704 if ((flags & BRIDGE_VLAN_INFO_PVID) && 705 (flags & BRIDGE_VLAN_INFO_UNTAGGED)) { 706 err = ice_eswitch_br_set_pvid(port, vlan); 707 if (err) 708 goto err_set_pvid; 709 } else if ((flags & BRIDGE_VLAN_INFO_PVID) || 710 (flags & BRIDGE_VLAN_INFO_UNTAGGED)) { 711 dev_info(dev, "VLAN push and pop are supported only simultaneously\n"); 712 err = -EOPNOTSUPP; 713 goto err_set_pvid; 714 } 715 716 err = xa_insert(&port->vlans, vlan->vid, vlan, GFP_KERNEL); 717 if (err) 718 goto err_insert; 719 720 trace_ice_eswitch_br_vlan_create(vlan); 721 722 return vlan; 723 724 err_insert: 725 if (port->pvid) 726 ice_eswitch_br_clear_pvid(port); 727 err_set_pvid: 728 kfree(vlan); 729 return ERR_PTR(err); 730 } 731 732 static int 733 ice_eswitch_br_port_vlan_add(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid, 734 u16 flags, struct netlink_ext_ack *extack) 735 { 736 struct ice_esw_br_port *port; 737 struct ice_esw_br_vlan *vlan; 738 739 port = xa_load(&bridge->ports, vsi_idx); 740 if (!port) 741 return -EINVAL; 742 743 if (port->pvid) { 744 dev_info(ice_pf_to_dev(port->vsi->back), 745 "Port VLAN (vsi=%u, vid=%d) exists on the port, remove it to add trunk VLANs\n", 746 port->vsi_idx, port->pvid); 747 return -EEXIST; 748 } 749 750 vlan = xa_load(&port->vlans, vid); 751 if (vlan) { 752 if (vlan->flags == flags) 753 return 0; 754 755 ice_eswitch_br_vlan_cleanup(port, vlan); 756 } 757 758 vlan = ice_eswitch_br_vlan_create(vid, flags, port); 759 if (IS_ERR(vlan)) { 760 NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to create VLAN entry, vid: %u, vsi: %u", 761 vid, vsi_idx); 762 return PTR_ERR(vlan); 763 } 764 765 return 0; 766 } 767 768 static void 769 ice_eswitch_br_port_vlan_del(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid) 770 { 771 struct ice_esw_br_port *port; 772 struct ice_esw_br_vlan *vlan; 773 774 port = xa_load(&bridge->ports, vsi_idx); 775 if (!port) 776 return; 777 778 vlan = xa_load(&port->vlans, vid); 779 if (!vlan) 780 return; 781 782 ice_eswitch_br_vlan_cleanup(port, vlan); 783 } 784 785 static int 786 ice_eswitch_br_port_obj_add(struct net_device *netdev, const void *ctx, 787 const struct switchdev_obj *obj, 788 struct netlink_ext_ack *extack) 789 { 790 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev); 791 struct switchdev_obj_port_vlan *vlan; 792 int err; 793 794 if (!br_port) 795 return -EINVAL; 796 797 switch (obj->id) { 798 case SWITCHDEV_OBJ_ID_PORT_VLAN: 799 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 800 err = ice_eswitch_br_port_vlan_add(br_port->bridge, 801 br_port->vsi_idx, vlan->vid, 802 vlan->flags, extack); 803 return err; 804 default: 805 return -EOPNOTSUPP; 806 } 807 } 808 809 static int 810 ice_eswitch_br_port_obj_del(struct net_device *netdev, const void *ctx, 811 const struct switchdev_obj *obj) 812 { 813 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev); 814 struct switchdev_obj_port_vlan *vlan; 815 816 if (!br_port) 817 return -EINVAL; 818 819 switch (obj->id) { 820 case SWITCHDEV_OBJ_ID_PORT_VLAN: 821 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 822 ice_eswitch_br_port_vlan_del(br_port->bridge, br_port->vsi_idx, 823 vlan->vid); 824 return 0; 825 default: 826 return -EOPNOTSUPP; 827 } 828 } 829 830 static int 831 ice_eswitch_br_port_obj_attr_set(struct net_device *netdev, const void *ctx, 832 const struct switchdev_attr *attr, 833 struct netlink_ext_ack *extack) 834 { 835 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev); 836 837 if (!br_port) 838 return -EINVAL; 839 840 switch (attr->id) { 841 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 842 ice_eswitch_br_vlan_filtering_set(br_port->bridge, 843 attr->u.vlan_filtering); 844 return 0; 845 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 846 br_port->bridge->ageing_time = 847 clock_t_to_jiffies(attr->u.ageing_time); 848 return 0; 849 default: 850 return -EOPNOTSUPP; 851 } 852 } 853 854 static int 855 ice_eswitch_br_event_blocking(struct notifier_block *nb, unsigned long event, 856 void *ptr) 857 { 858 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 859 int err; 860 861 switch (event) { 862 case SWITCHDEV_PORT_OBJ_ADD: 863 err = switchdev_handle_port_obj_add(dev, ptr, 864 ice_eswitch_br_is_dev_valid, 865 ice_eswitch_br_port_obj_add); 866 break; 867 case SWITCHDEV_PORT_OBJ_DEL: 868 err = switchdev_handle_port_obj_del(dev, ptr, 869 ice_eswitch_br_is_dev_valid, 870 ice_eswitch_br_port_obj_del); 871 break; 872 case SWITCHDEV_PORT_ATTR_SET: 873 err = switchdev_handle_port_attr_set(dev, ptr, 874 ice_eswitch_br_is_dev_valid, 875 ice_eswitch_br_port_obj_attr_set); 876 break; 877 default: 878 err = 0; 879 } 880 881 return notifier_from_errno(err); 882 } 883 884 static void 885 ice_eswitch_br_port_deinit(struct ice_esw_br *bridge, 886 struct ice_esw_br_port *br_port) 887 { 888 struct ice_esw_br_fdb_entry *fdb_entry, *tmp; 889 struct ice_vsi *vsi = br_port->vsi; 890 891 list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) { 892 if (br_port == fdb_entry->br_port) 893 ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry); 894 } 895 896 if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) { 897 vsi->back->br_port = NULL; 898 } else { 899 struct ice_repr *repr = ice_repr_get_by_vsi(vsi); 900 901 if (repr) 902 repr->br_port = NULL; 903 } 904 905 xa_erase(&bridge->ports, br_port->vsi_idx); 906 ice_eswitch_br_port_vlans_flush(br_port); 907 kfree(br_port); 908 } 909 910 static struct ice_esw_br_port * 911 ice_eswitch_br_port_init(struct ice_esw_br *bridge) 912 { 913 struct ice_esw_br_port *br_port; 914 915 br_port = kzalloc(sizeof(*br_port), GFP_KERNEL); 916 if (!br_port) 917 return ERR_PTR(-ENOMEM); 918 919 xa_init(&br_port->vlans); 920 921 br_port->bridge = bridge; 922 923 return br_port; 924 } 925 926 static int 927 ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge, 928 struct ice_repr *repr) 929 { 930 struct ice_esw_br_port *br_port; 931 int err; 932 933 br_port = ice_eswitch_br_port_init(bridge); 934 if (IS_ERR(br_port)) 935 return PTR_ERR(br_port); 936 937 br_port->vsi = repr->src_vsi; 938 br_port->vsi_idx = br_port->vsi->idx; 939 br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT; 940 repr->br_port = br_port; 941 942 err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL); 943 if (err) { 944 ice_eswitch_br_port_deinit(bridge, br_port); 945 return err; 946 } 947 948 return 0; 949 } 950 951 static int 952 ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf) 953 { 954 struct ice_vsi *vsi = pf->eswitch.uplink_vsi; 955 struct ice_esw_br_port *br_port; 956 int err; 957 958 br_port = ice_eswitch_br_port_init(bridge); 959 if (IS_ERR(br_port)) 960 return PTR_ERR(br_port); 961 962 br_port->vsi = vsi; 963 br_port->vsi_idx = br_port->vsi->idx; 964 br_port->type = ICE_ESWITCH_BR_UPLINK_PORT; 965 pf->br_port = br_port; 966 967 err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL); 968 if (err) { 969 ice_eswitch_br_port_deinit(bridge, br_port); 970 return err; 971 } 972 973 return 0; 974 } 975 976 static void 977 ice_eswitch_br_ports_flush(struct ice_esw_br *bridge) 978 { 979 struct ice_esw_br_port *port; 980 unsigned long i; 981 982 xa_for_each(&bridge->ports, i, port) 983 ice_eswitch_br_port_deinit(bridge, port); 984 } 985 986 static void 987 ice_eswitch_br_deinit(struct ice_esw_br_offloads *br_offloads, 988 struct ice_esw_br *bridge) 989 { 990 if (!bridge) 991 return; 992 993 /* Cleanup all the ports that were added asynchronously 994 * through NETDEV_CHANGEUPPER event. 995 */ 996 ice_eswitch_br_ports_flush(bridge); 997 WARN_ON(!xa_empty(&bridge->ports)); 998 xa_destroy(&bridge->ports); 999 rhashtable_destroy(&bridge->fdb_ht); 1000 1001 br_offloads->bridge = NULL; 1002 kfree(bridge); 1003 } 1004 1005 static struct ice_esw_br * 1006 ice_eswitch_br_init(struct ice_esw_br_offloads *br_offloads, int ifindex) 1007 { 1008 struct ice_esw_br *bridge; 1009 int err; 1010 1011 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 1012 if (!bridge) 1013 return ERR_PTR(-ENOMEM); 1014 1015 err = rhashtable_init(&bridge->fdb_ht, &ice_fdb_ht_params); 1016 if (err) { 1017 kfree(bridge); 1018 return ERR_PTR(err); 1019 } 1020 1021 INIT_LIST_HEAD(&bridge->fdb_list); 1022 bridge->br_offloads = br_offloads; 1023 bridge->ifindex = ifindex; 1024 bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME); 1025 xa_init(&bridge->ports); 1026 br_offloads->bridge = bridge; 1027 1028 return bridge; 1029 } 1030 1031 static struct ice_esw_br * 1032 ice_eswitch_br_get(struct ice_esw_br_offloads *br_offloads, int ifindex, 1033 struct netlink_ext_ack *extack) 1034 { 1035 struct ice_esw_br *bridge = br_offloads->bridge; 1036 1037 if (bridge) { 1038 if (bridge->ifindex != ifindex) { 1039 NL_SET_ERR_MSG_MOD(extack, 1040 "Only one bridge is supported per eswitch"); 1041 return ERR_PTR(-EOPNOTSUPP); 1042 } 1043 return bridge; 1044 } 1045 1046 /* Create the bridge if it doesn't exist yet */ 1047 bridge = ice_eswitch_br_init(br_offloads, ifindex); 1048 if (IS_ERR(bridge)) 1049 NL_SET_ERR_MSG_MOD(extack, "Failed to init the bridge"); 1050 1051 return bridge; 1052 } 1053 1054 static void 1055 ice_eswitch_br_verify_deinit(struct ice_esw_br_offloads *br_offloads, 1056 struct ice_esw_br *bridge) 1057 { 1058 /* Remove the bridge if it exists and there are no ports left */ 1059 if (!bridge || !xa_empty(&bridge->ports)) 1060 return; 1061 1062 ice_eswitch_br_deinit(br_offloads, bridge); 1063 } 1064 1065 static int 1066 ice_eswitch_br_port_unlink(struct ice_esw_br_offloads *br_offloads, 1067 struct net_device *dev, int ifindex, 1068 struct netlink_ext_ack *extack) 1069 { 1070 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(dev); 1071 struct ice_esw_br *bridge; 1072 1073 if (!br_port) { 1074 NL_SET_ERR_MSG_MOD(extack, 1075 "Port representor is not attached to any bridge"); 1076 return -EINVAL; 1077 } 1078 1079 if (br_port->bridge->ifindex != ifindex) { 1080 NL_SET_ERR_MSG_MOD(extack, 1081 "Port representor is attached to another bridge"); 1082 return -EINVAL; 1083 } 1084 1085 bridge = br_port->bridge; 1086 1087 trace_ice_eswitch_br_port_unlink(br_port); 1088 ice_eswitch_br_port_deinit(br_port->bridge, br_port); 1089 ice_eswitch_br_verify_deinit(br_offloads, bridge); 1090 1091 return 0; 1092 } 1093 1094 static int 1095 ice_eswitch_br_port_link(struct ice_esw_br_offloads *br_offloads, 1096 struct net_device *dev, int ifindex, 1097 struct netlink_ext_ack *extack) 1098 { 1099 struct ice_esw_br *bridge; 1100 int err; 1101 1102 if (ice_eswitch_br_netdev_to_port(dev)) { 1103 NL_SET_ERR_MSG_MOD(extack, 1104 "Port is already attached to the bridge"); 1105 return -EINVAL; 1106 } 1107 1108 bridge = ice_eswitch_br_get(br_offloads, ifindex, extack); 1109 if (IS_ERR(bridge)) 1110 return PTR_ERR(bridge); 1111 1112 if (ice_is_port_repr_netdev(dev)) { 1113 struct ice_repr *repr = ice_netdev_to_repr(dev); 1114 1115 err = ice_eswitch_br_vf_repr_port_init(bridge, repr); 1116 trace_ice_eswitch_br_port_link(repr->br_port); 1117 } else { 1118 struct net_device *ice_dev; 1119 struct ice_pf *pf; 1120 1121 if (netif_is_lag_master(dev)) 1122 ice_dev = ice_eswitch_br_get_uplink_from_lag(dev); 1123 else 1124 ice_dev = dev; 1125 1126 if (!ice_dev) 1127 return 0; 1128 1129 pf = ice_netdev_to_pf(ice_dev); 1130 1131 err = ice_eswitch_br_uplink_port_init(bridge, pf); 1132 trace_ice_eswitch_br_port_link(pf->br_port); 1133 } 1134 if (err) { 1135 NL_SET_ERR_MSG_MOD(extack, "Failed to init bridge port"); 1136 goto err_port_init; 1137 } 1138 1139 return 0; 1140 1141 err_port_init: 1142 ice_eswitch_br_verify_deinit(br_offloads, bridge); 1143 return err; 1144 } 1145 1146 static int 1147 ice_eswitch_br_port_changeupper(struct notifier_block *nb, void *ptr) 1148 { 1149 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1150 struct netdev_notifier_changeupper_info *info = ptr; 1151 struct ice_esw_br_offloads *br_offloads; 1152 struct netlink_ext_ack *extack; 1153 struct net_device *upper; 1154 1155 br_offloads = ice_nb_to_br_offloads(nb, netdev_nb); 1156 1157 if (!ice_eswitch_br_is_dev_valid(dev)) 1158 return 0; 1159 1160 upper = info->upper_dev; 1161 if (!netif_is_bridge_master(upper)) 1162 return 0; 1163 1164 extack = netdev_notifier_info_to_extack(&info->info); 1165 1166 if (info->linking) 1167 return ice_eswitch_br_port_link(br_offloads, dev, 1168 upper->ifindex, extack); 1169 else 1170 return ice_eswitch_br_port_unlink(br_offloads, dev, 1171 upper->ifindex, extack); 1172 } 1173 1174 static int 1175 ice_eswitch_br_port_event(struct notifier_block *nb, 1176 unsigned long event, void *ptr) 1177 { 1178 int err = 0; 1179 1180 switch (event) { 1181 case NETDEV_CHANGEUPPER: 1182 err = ice_eswitch_br_port_changeupper(nb, ptr); 1183 break; 1184 } 1185 1186 return notifier_from_errno(err); 1187 } 1188 1189 static void 1190 ice_eswitch_br_offloads_dealloc(struct ice_pf *pf) 1191 { 1192 struct ice_esw_br_offloads *br_offloads = pf->eswitch.br_offloads; 1193 1194 ASSERT_RTNL(); 1195 1196 if (!br_offloads) 1197 return; 1198 1199 ice_eswitch_br_deinit(br_offloads, br_offloads->bridge); 1200 1201 pf->eswitch.br_offloads = NULL; 1202 kfree(br_offloads); 1203 } 1204 1205 static struct ice_esw_br_offloads * 1206 ice_eswitch_br_offloads_alloc(struct ice_pf *pf) 1207 { 1208 struct ice_esw_br_offloads *br_offloads; 1209 1210 ASSERT_RTNL(); 1211 1212 if (pf->eswitch.br_offloads) 1213 return ERR_PTR(-EEXIST); 1214 1215 br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL); 1216 if (!br_offloads) 1217 return ERR_PTR(-ENOMEM); 1218 1219 pf->eswitch.br_offloads = br_offloads; 1220 br_offloads->pf = pf; 1221 1222 return br_offloads; 1223 } 1224 1225 void 1226 ice_eswitch_br_offloads_deinit(struct ice_pf *pf) 1227 { 1228 struct ice_esw_br_offloads *br_offloads; 1229 1230 br_offloads = pf->eswitch.br_offloads; 1231 if (!br_offloads) 1232 return; 1233 1234 cancel_delayed_work_sync(&br_offloads->update_work); 1235 unregister_netdevice_notifier(&br_offloads->netdev_nb); 1236 unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk); 1237 unregister_switchdev_notifier(&br_offloads->switchdev_nb); 1238 destroy_workqueue(br_offloads->wq); 1239 /* Although notifier block is unregistered just before, 1240 * so we don't get any new events, some events might be 1241 * already in progress. Hold the rtnl lock and wait for 1242 * them to finished. 1243 */ 1244 rtnl_lock(); 1245 ice_eswitch_br_offloads_dealloc(pf); 1246 rtnl_unlock(); 1247 } 1248 1249 static void ice_eswitch_br_update(struct ice_esw_br_offloads *br_offloads) 1250 { 1251 struct ice_esw_br *bridge = br_offloads->bridge; 1252 struct ice_esw_br_fdb_entry *entry, *tmp; 1253 1254 if (!bridge) 1255 return; 1256 1257 rtnl_lock(); 1258 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) { 1259 if (entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER) 1260 continue; 1261 1262 if (time_is_after_eq_jiffies(entry->last_use + 1263 bridge->ageing_time)) 1264 continue; 1265 1266 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry); 1267 } 1268 rtnl_unlock(); 1269 } 1270 1271 static void ice_eswitch_br_update_work(struct work_struct *work) 1272 { 1273 struct ice_esw_br_offloads *br_offloads; 1274 1275 br_offloads = ice_work_to_br_offloads(work); 1276 1277 ice_eswitch_br_update(br_offloads); 1278 1279 queue_delayed_work(br_offloads->wq, &br_offloads->update_work, 1280 ICE_ESW_BRIDGE_UPDATE_INTERVAL); 1281 } 1282 1283 int 1284 ice_eswitch_br_offloads_init(struct ice_pf *pf) 1285 { 1286 struct ice_esw_br_offloads *br_offloads; 1287 struct device *dev = ice_pf_to_dev(pf); 1288 int err; 1289 1290 rtnl_lock(); 1291 br_offloads = ice_eswitch_br_offloads_alloc(pf); 1292 rtnl_unlock(); 1293 if (IS_ERR(br_offloads)) { 1294 dev_err(dev, "Failed to init eswitch bridge\n"); 1295 return PTR_ERR(br_offloads); 1296 } 1297 1298 br_offloads->wq = alloc_ordered_workqueue("ice_bridge_wq", 0); 1299 if (!br_offloads->wq) { 1300 err = -ENOMEM; 1301 dev_err(dev, "Failed to allocate bridge workqueue\n"); 1302 goto err_alloc_wq; 1303 } 1304 1305 br_offloads->switchdev_nb.notifier_call = 1306 ice_eswitch_br_switchdev_event; 1307 err = register_switchdev_notifier(&br_offloads->switchdev_nb); 1308 if (err) { 1309 dev_err(dev, 1310 "Failed to register switchdev notifier\n"); 1311 goto err_reg_switchdev_nb; 1312 } 1313 1314 br_offloads->switchdev_blk.notifier_call = 1315 ice_eswitch_br_event_blocking; 1316 err = register_switchdev_blocking_notifier(&br_offloads->switchdev_blk); 1317 if (err) { 1318 dev_err(dev, 1319 "Failed to register bridge blocking switchdev notifier\n"); 1320 goto err_reg_switchdev_blk; 1321 } 1322 1323 br_offloads->netdev_nb.notifier_call = ice_eswitch_br_port_event; 1324 err = register_netdevice_notifier(&br_offloads->netdev_nb); 1325 if (err) { 1326 dev_err(dev, 1327 "Failed to register bridge port event notifier\n"); 1328 goto err_reg_netdev_nb; 1329 } 1330 1331 INIT_DELAYED_WORK(&br_offloads->update_work, 1332 ice_eswitch_br_update_work); 1333 queue_delayed_work(br_offloads->wq, &br_offloads->update_work, 1334 ICE_ESW_BRIDGE_UPDATE_INTERVAL); 1335 1336 return 0; 1337 1338 err_reg_netdev_nb: 1339 unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk); 1340 err_reg_switchdev_blk: 1341 unregister_switchdev_notifier(&br_offloads->switchdev_nb); 1342 err_reg_switchdev_nb: 1343 destroy_workqueue(br_offloads->wq); 1344 err_alloc_wq: 1345 rtnl_lock(); 1346 ice_eswitch_br_offloads_dealloc(pf); 1347 rtnl_unlock(); 1348 1349 return err; 1350 } 1351