1 /* 2 * net/switchdev/switchdev.c - Switch device API 3 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us> 4 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/types.h> 14 #include <linux/init.h> 15 #include <linux/mutex.h> 16 #include <linux/notifier.h> 17 #include <linux/netdevice.h> 18 #include <linux/etherdevice.h> 19 #include <linux/if_bridge.h> 20 #include <linux/list.h> 21 #include <linux/workqueue.h> 22 #include <linux/if_vlan.h> 23 #include <linux/rtnetlink.h> 24 #include <net/ip_fib.h> 25 #include <net/switchdev.h> 26 27 /** 28 * switchdev_trans_item_enqueue - Enqueue data item to transaction queue 29 * 30 * @trans: transaction 31 * @data: pointer to data being queued 32 * @destructor: data destructor 33 * @tritem: transaction item being queued 34 * 35 * Enqeueue data item to transaction queue. tritem is typically placed in 36 * cointainter pointed at by data pointer. Destructor is called on 37 * transaction abort and after successful commit phase in case 38 * the caller did not dequeue the item before. 39 */ 40 void switchdev_trans_item_enqueue(struct switchdev_trans *trans, 41 void *data, void (*destructor)(void const *), 42 struct switchdev_trans_item *tritem) 43 { 44 tritem->data = data; 45 tritem->destructor = destructor; 46 list_add_tail(&tritem->list, &trans->item_list); 47 } 48 EXPORT_SYMBOL_GPL(switchdev_trans_item_enqueue); 49 50 static struct switchdev_trans_item * 51 __switchdev_trans_item_dequeue(struct switchdev_trans *trans) 52 { 53 struct switchdev_trans_item *tritem; 54 55 if (list_empty(&trans->item_list)) 56 return NULL; 57 tritem = list_first_entry(&trans->item_list, 58 struct switchdev_trans_item, list); 59 list_del(&tritem->list); 60 return tritem; 61 } 62 63 /** 64 * switchdev_trans_item_dequeue - Dequeue data item from transaction queue 65 * 66 * @trans: transaction 67 */ 68 void *switchdev_trans_item_dequeue(struct switchdev_trans *trans) 69 { 70 struct switchdev_trans_item *tritem; 71 72 tritem = __switchdev_trans_item_dequeue(trans); 73 BUG_ON(!tritem); 74 return tritem->data; 75 } 76 EXPORT_SYMBOL_GPL(switchdev_trans_item_dequeue); 77 78 static void switchdev_trans_init(struct switchdev_trans *trans) 79 { 80 INIT_LIST_HEAD(&trans->item_list); 81 } 82 83 static void switchdev_trans_items_destroy(struct switchdev_trans *trans) 84 { 85 struct switchdev_trans_item *tritem; 86 87 while ((tritem = __switchdev_trans_item_dequeue(trans))) 88 tritem->destructor(tritem->data); 89 } 90 91 static void switchdev_trans_items_warn_destroy(struct net_device *dev, 92 struct switchdev_trans *trans) 93 { 94 WARN(!list_empty(&trans->item_list), "%s: transaction item queue is not empty.\n", 95 dev->name); 96 switchdev_trans_items_destroy(trans); 97 } 98 99 static LIST_HEAD(deferred); 100 static DEFINE_SPINLOCK(deferred_lock); 101 102 typedef void switchdev_deferred_func_t(struct net_device *dev, 103 const void *data); 104 105 struct switchdev_deferred_item { 106 struct list_head list; 107 struct net_device *dev; 108 switchdev_deferred_func_t *func; 109 unsigned long data[0]; 110 }; 111 112 static struct switchdev_deferred_item *switchdev_deferred_dequeue(void) 113 { 114 struct switchdev_deferred_item *dfitem; 115 116 spin_lock_bh(&deferred_lock); 117 if (list_empty(&deferred)) { 118 dfitem = NULL; 119 goto unlock; 120 } 121 dfitem = list_first_entry(&deferred, 122 struct switchdev_deferred_item, list); 123 list_del(&dfitem->list); 124 unlock: 125 spin_unlock_bh(&deferred_lock); 126 return dfitem; 127 } 128 129 /** 130 * switchdev_deferred_process - Process ops in deferred queue 131 * 132 * Called to flush the ops currently queued in deferred ops queue. 133 * rtnl_lock must be held. 134 */ 135 void switchdev_deferred_process(void) 136 { 137 struct switchdev_deferred_item *dfitem; 138 139 ASSERT_RTNL(); 140 141 while ((dfitem = switchdev_deferred_dequeue())) { 142 dfitem->func(dfitem->dev, dfitem->data); 143 dev_put(dfitem->dev); 144 kfree(dfitem); 145 } 146 } 147 EXPORT_SYMBOL_GPL(switchdev_deferred_process); 148 149 static void switchdev_deferred_process_work(struct work_struct *work) 150 { 151 rtnl_lock(); 152 switchdev_deferred_process(); 153 rtnl_unlock(); 154 } 155 156 static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work); 157 158 static int switchdev_deferred_enqueue(struct net_device *dev, 159 const void *data, size_t data_len, 160 switchdev_deferred_func_t *func) 161 { 162 struct switchdev_deferred_item *dfitem; 163 164 dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC); 165 if (!dfitem) 166 return -ENOMEM; 167 dfitem->dev = dev; 168 dfitem->func = func; 169 memcpy(dfitem->data, data, data_len); 170 dev_hold(dev); 171 spin_lock_bh(&deferred_lock); 172 list_add_tail(&dfitem->list, &deferred); 173 spin_unlock_bh(&deferred_lock); 174 schedule_work(&deferred_process_work); 175 return 0; 176 } 177 178 /** 179 * switchdev_port_attr_get - Get port attribute 180 * 181 * @dev: port device 182 * @attr: attribute to get 183 */ 184 int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr) 185 { 186 const struct switchdev_ops *ops = dev->switchdev_ops; 187 struct net_device *lower_dev; 188 struct list_head *iter; 189 struct switchdev_attr first = { 190 .id = SWITCHDEV_ATTR_ID_UNDEFINED 191 }; 192 int err = -EOPNOTSUPP; 193 194 if (ops && ops->switchdev_port_attr_get) 195 return ops->switchdev_port_attr_get(dev, attr); 196 197 if (attr->flags & SWITCHDEV_F_NO_RECURSE) 198 return err; 199 200 /* Switch device port(s) may be stacked under 201 * bond/team/vlan dev, so recurse down to get attr on 202 * each port. Return -ENODATA if attr values don't 203 * compare across ports. 204 */ 205 206 netdev_for_each_lower_dev(dev, lower_dev, iter) { 207 err = switchdev_port_attr_get(lower_dev, attr); 208 if (err) 209 break; 210 if (first.id == SWITCHDEV_ATTR_ID_UNDEFINED) 211 first = *attr; 212 else if (memcmp(&first, attr, sizeof(*attr))) 213 return -ENODATA; 214 } 215 216 return err; 217 } 218 EXPORT_SYMBOL_GPL(switchdev_port_attr_get); 219 220 static int __switchdev_port_attr_set(struct net_device *dev, 221 const struct switchdev_attr *attr, 222 struct switchdev_trans *trans) 223 { 224 const struct switchdev_ops *ops = dev->switchdev_ops; 225 struct net_device *lower_dev; 226 struct list_head *iter; 227 int err = -EOPNOTSUPP; 228 229 if (ops && ops->switchdev_port_attr_set) { 230 err = ops->switchdev_port_attr_set(dev, attr, trans); 231 goto done; 232 } 233 234 if (attr->flags & SWITCHDEV_F_NO_RECURSE) 235 goto done; 236 237 /* Switch device port(s) may be stacked under 238 * bond/team/vlan dev, so recurse down to set attr on 239 * each port. 240 */ 241 242 netdev_for_each_lower_dev(dev, lower_dev, iter) { 243 err = __switchdev_port_attr_set(lower_dev, attr, trans); 244 if (err) 245 break; 246 } 247 248 done: 249 if (err == -EOPNOTSUPP && attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP) 250 err = 0; 251 252 return err; 253 } 254 255 static int switchdev_port_attr_set_now(struct net_device *dev, 256 const struct switchdev_attr *attr) 257 { 258 struct switchdev_trans trans; 259 int err; 260 261 switchdev_trans_init(&trans); 262 263 /* Phase I: prepare for attr set. Driver/device should fail 264 * here if there are going to be issues in the commit phase, 265 * such as lack of resources or support. The driver/device 266 * should reserve resources needed for the commit phase here, 267 * but should not commit the attr. 268 */ 269 270 trans.ph_prepare = true; 271 err = __switchdev_port_attr_set(dev, attr, &trans); 272 if (err) { 273 /* Prepare phase failed: abort the transaction. Any 274 * resources reserved in the prepare phase are 275 * released. 276 */ 277 278 if (err != -EOPNOTSUPP) 279 switchdev_trans_items_destroy(&trans); 280 281 return err; 282 } 283 284 /* Phase II: commit attr set. This cannot fail as a fault 285 * of driver/device. If it does, it's a bug in the driver/device 286 * because the driver said everythings was OK in phase I. 287 */ 288 289 trans.ph_prepare = false; 290 err = __switchdev_port_attr_set(dev, attr, &trans); 291 WARN(err, "%s: Commit of attribute (id=%d) failed.\n", 292 dev->name, attr->id); 293 switchdev_trans_items_warn_destroy(dev, &trans); 294 295 return err; 296 } 297 298 static void switchdev_port_attr_set_deferred(struct net_device *dev, 299 const void *data) 300 { 301 const struct switchdev_attr *attr = data; 302 int err; 303 304 err = switchdev_port_attr_set_now(dev, attr); 305 if (err && err != -EOPNOTSUPP) 306 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n", 307 err, attr->id); 308 } 309 310 static int switchdev_port_attr_set_defer(struct net_device *dev, 311 const struct switchdev_attr *attr) 312 { 313 return switchdev_deferred_enqueue(dev, attr, sizeof(*attr), 314 switchdev_port_attr_set_deferred); 315 } 316 317 /** 318 * switchdev_port_attr_set - Set port attribute 319 * 320 * @dev: port device 321 * @attr: attribute to set 322 * 323 * Use a 2-phase prepare-commit transaction model to ensure 324 * system is not left in a partially updated state due to 325 * failure from driver/device. 326 * 327 * rtnl_lock must be held and must not be in atomic section, 328 * in case SWITCHDEV_F_DEFER flag is not set. 329 */ 330 int switchdev_port_attr_set(struct net_device *dev, 331 const struct switchdev_attr *attr) 332 { 333 if (attr->flags & SWITCHDEV_F_DEFER) 334 return switchdev_port_attr_set_defer(dev, attr); 335 ASSERT_RTNL(); 336 return switchdev_port_attr_set_now(dev, attr); 337 } 338 EXPORT_SYMBOL_GPL(switchdev_port_attr_set); 339 340 static size_t switchdev_obj_size(const struct switchdev_obj *obj) 341 { 342 switch (obj->id) { 343 case SWITCHDEV_OBJ_ID_PORT_VLAN: 344 return sizeof(struct switchdev_obj_port_vlan); 345 case SWITCHDEV_OBJ_ID_IPV4_FIB: 346 return sizeof(struct switchdev_obj_ipv4_fib); 347 case SWITCHDEV_OBJ_ID_PORT_FDB: 348 return sizeof(struct switchdev_obj_port_fdb); 349 case SWITCHDEV_OBJ_ID_PORT_MDB: 350 return sizeof(struct switchdev_obj_port_mdb); 351 default: 352 BUG(); 353 } 354 return 0; 355 } 356 357 static int __switchdev_port_obj_add(struct net_device *dev, 358 const struct switchdev_obj *obj, 359 struct switchdev_trans *trans) 360 { 361 const struct switchdev_ops *ops = dev->switchdev_ops; 362 struct net_device *lower_dev; 363 struct list_head *iter; 364 int err = -EOPNOTSUPP; 365 366 if (ops && ops->switchdev_port_obj_add) 367 return ops->switchdev_port_obj_add(dev, obj, trans); 368 369 /* Switch device port(s) may be stacked under 370 * bond/team/vlan dev, so recurse down to add object on 371 * each port. 372 */ 373 374 netdev_for_each_lower_dev(dev, lower_dev, iter) { 375 err = __switchdev_port_obj_add(lower_dev, obj, trans); 376 if (err) 377 break; 378 } 379 380 return err; 381 } 382 383 static int switchdev_port_obj_add_now(struct net_device *dev, 384 const struct switchdev_obj *obj) 385 { 386 struct switchdev_trans trans; 387 int err; 388 389 ASSERT_RTNL(); 390 391 switchdev_trans_init(&trans); 392 393 /* Phase I: prepare for obj add. Driver/device should fail 394 * here if there are going to be issues in the commit phase, 395 * such as lack of resources or support. The driver/device 396 * should reserve resources needed for the commit phase here, 397 * but should not commit the obj. 398 */ 399 400 trans.ph_prepare = true; 401 err = __switchdev_port_obj_add(dev, obj, &trans); 402 if (err) { 403 /* Prepare phase failed: abort the transaction. Any 404 * resources reserved in the prepare phase are 405 * released. 406 */ 407 408 if (err != -EOPNOTSUPP) 409 switchdev_trans_items_destroy(&trans); 410 411 return err; 412 } 413 414 /* Phase II: commit obj add. This cannot fail as a fault 415 * of driver/device. If it does, it's a bug in the driver/device 416 * because the driver said everythings was OK in phase I. 417 */ 418 419 trans.ph_prepare = false; 420 err = __switchdev_port_obj_add(dev, obj, &trans); 421 WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id); 422 switchdev_trans_items_warn_destroy(dev, &trans); 423 424 return err; 425 } 426 427 static void switchdev_port_obj_add_deferred(struct net_device *dev, 428 const void *data) 429 { 430 const struct switchdev_obj *obj = data; 431 int err; 432 433 err = switchdev_port_obj_add_now(dev, obj); 434 if (err && err != -EOPNOTSUPP) 435 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n", 436 err, obj->id); 437 } 438 439 static int switchdev_port_obj_add_defer(struct net_device *dev, 440 const struct switchdev_obj *obj) 441 { 442 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj), 443 switchdev_port_obj_add_deferred); 444 } 445 446 /** 447 * switchdev_port_obj_add - Add port object 448 * 449 * @dev: port device 450 * @id: object ID 451 * @obj: object to add 452 * 453 * Use a 2-phase prepare-commit transaction model to ensure 454 * system is not left in a partially updated state due to 455 * failure from driver/device. 456 * 457 * rtnl_lock must be held and must not be in atomic section, 458 * in case SWITCHDEV_F_DEFER flag is not set. 459 */ 460 int switchdev_port_obj_add(struct net_device *dev, 461 const struct switchdev_obj *obj) 462 { 463 if (obj->flags & SWITCHDEV_F_DEFER) 464 return switchdev_port_obj_add_defer(dev, obj); 465 ASSERT_RTNL(); 466 return switchdev_port_obj_add_now(dev, obj); 467 } 468 EXPORT_SYMBOL_GPL(switchdev_port_obj_add); 469 470 static int switchdev_port_obj_del_now(struct net_device *dev, 471 const struct switchdev_obj *obj) 472 { 473 const struct switchdev_ops *ops = dev->switchdev_ops; 474 struct net_device *lower_dev; 475 struct list_head *iter; 476 int err = -EOPNOTSUPP; 477 478 if (ops && ops->switchdev_port_obj_del) 479 return ops->switchdev_port_obj_del(dev, obj); 480 481 /* Switch device port(s) may be stacked under 482 * bond/team/vlan dev, so recurse down to delete object on 483 * each port. 484 */ 485 486 netdev_for_each_lower_dev(dev, lower_dev, iter) { 487 err = switchdev_port_obj_del_now(lower_dev, obj); 488 if (err) 489 break; 490 } 491 492 return err; 493 } 494 495 static void switchdev_port_obj_del_deferred(struct net_device *dev, 496 const void *data) 497 { 498 const struct switchdev_obj *obj = data; 499 int err; 500 501 err = switchdev_port_obj_del_now(dev, obj); 502 if (err && err != -EOPNOTSUPP) 503 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n", 504 err, obj->id); 505 } 506 507 static int switchdev_port_obj_del_defer(struct net_device *dev, 508 const struct switchdev_obj *obj) 509 { 510 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj), 511 switchdev_port_obj_del_deferred); 512 } 513 514 /** 515 * switchdev_port_obj_del - Delete port object 516 * 517 * @dev: port device 518 * @id: object ID 519 * @obj: object to delete 520 * 521 * rtnl_lock must be held and must not be in atomic section, 522 * in case SWITCHDEV_F_DEFER flag is not set. 523 */ 524 int switchdev_port_obj_del(struct net_device *dev, 525 const struct switchdev_obj *obj) 526 { 527 if (obj->flags & SWITCHDEV_F_DEFER) 528 return switchdev_port_obj_del_defer(dev, obj); 529 ASSERT_RTNL(); 530 return switchdev_port_obj_del_now(dev, obj); 531 } 532 EXPORT_SYMBOL_GPL(switchdev_port_obj_del); 533 534 /** 535 * switchdev_port_obj_dump - Dump port objects 536 * 537 * @dev: port device 538 * @id: object ID 539 * @obj: object to dump 540 * @cb: function to call with a filled object 541 * 542 * rtnl_lock must be held. 543 */ 544 int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj, 545 switchdev_obj_dump_cb_t *cb) 546 { 547 const struct switchdev_ops *ops = dev->switchdev_ops; 548 struct net_device *lower_dev; 549 struct list_head *iter; 550 int err = -EOPNOTSUPP; 551 552 ASSERT_RTNL(); 553 554 if (ops && ops->switchdev_port_obj_dump) 555 return ops->switchdev_port_obj_dump(dev, obj, cb); 556 557 /* Switch device port(s) may be stacked under 558 * bond/team/vlan dev, so recurse down to dump objects on 559 * first port at bottom of stack. 560 */ 561 562 netdev_for_each_lower_dev(dev, lower_dev, iter) { 563 err = switchdev_port_obj_dump(lower_dev, obj, cb); 564 break; 565 } 566 567 return err; 568 } 569 EXPORT_SYMBOL_GPL(switchdev_port_obj_dump); 570 571 static RAW_NOTIFIER_HEAD(switchdev_notif_chain); 572 573 /** 574 * register_switchdev_notifier - Register notifier 575 * @nb: notifier_block 576 * 577 * Register switch device notifier. This should be used by code 578 * which needs to monitor events happening in particular device. 579 * Return values are same as for atomic_notifier_chain_register(). 580 */ 581 int register_switchdev_notifier(struct notifier_block *nb) 582 { 583 int err; 584 585 rtnl_lock(); 586 err = raw_notifier_chain_register(&switchdev_notif_chain, nb); 587 rtnl_unlock(); 588 return err; 589 } 590 EXPORT_SYMBOL_GPL(register_switchdev_notifier); 591 592 /** 593 * unregister_switchdev_notifier - Unregister notifier 594 * @nb: notifier_block 595 * 596 * Unregister switch device notifier. 597 * Return values are same as for atomic_notifier_chain_unregister(). 598 */ 599 int unregister_switchdev_notifier(struct notifier_block *nb) 600 { 601 int err; 602 603 rtnl_lock(); 604 err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb); 605 rtnl_unlock(); 606 return err; 607 } 608 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier); 609 610 /** 611 * call_switchdev_notifiers - Call notifiers 612 * @val: value passed unmodified to notifier function 613 * @dev: port device 614 * @info: notifier information data 615 * 616 * Call all network notifier blocks. This should be called by driver 617 * when it needs to propagate hardware event. 618 * Return values are same as for atomic_notifier_call_chain(). 619 * rtnl_lock must be held. 620 */ 621 int call_switchdev_notifiers(unsigned long val, struct net_device *dev, 622 struct switchdev_notifier_info *info) 623 { 624 int err; 625 626 ASSERT_RTNL(); 627 628 info->dev = dev; 629 err = raw_notifier_call_chain(&switchdev_notif_chain, val, info); 630 return err; 631 } 632 EXPORT_SYMBOL_GPL(call_switchdev_notifiers); 633 634 struct switchdev_vlan_dump { 635 struct switchdev_obj_port_vlan vlan; 636 struct sk_buff *skb; 637 u32 filter_mask; 638 u16 flags; 639 u16 begin; 640 u16 end; 641 }; 642 643 static int switchdev_port_vlan_dump_put(struct switchdev_vlan_dump *dump) 644 { 645 struct bridge_vlan_info vinfo; 646 647 vinfo.flags = dump->flags; 648 649 if (dump->begin == 0 && dump->end == 0) { 650 return 0; 651 } else if (dump->begin == dump->end) { 652 vinfo.vid = dump->begin; 653 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO, 654 sizeof(vinfo), &vinfo)) 655 return -EMSGSIZE; 656 } else { 657 vinfo.vid = dump->begin; 658 vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN; 659 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO, 660 sizeof(vinfo), &vinfo)) 661 return -EMSGSIZE; 662 vinfo.vid = dump->end; 663 vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN; 664 vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_END; 665 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO, 666 sizeof(vinfo), &vinfo)) 667 return -EMSGSIZE; 668 } 669 670 return 0; 671 } 672 673 static int switchdev_port_vlan_dump_cb(struct switchdev_obj *obj) 674 { 675 struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 676 struct switchdev_vlan_dump *dump = 677 container_of(vlan, struct switchdev_vlan_dump, vlan); 678 int err = 0; 679 680 if (vlan->vid_begin > vlan->vid_end) 681 return -EINVAL; 682 683 if (dump->filter_mask & RTEXT_FILTER_BRVLAN) { 684 dump->flags = vlan->flags; 685 for (dump->begin = dump->end = vlan->vid_begin; 686 dump->begin <= vlan->vid_end; 687 dump->begin++, dump->end++) { 688 err = switchdev_port_vlan_dump_put(dump); 689 if (err) 690 return err; 691 } 692 } else if (dump->filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) { 693 if (dump->begin > vlan->vid_begin && 694 dump->begin >= vlan->vid_end) { 695 if ((dump->begin - 1) == vlan->vid_end && 696 dump->flags == vlan->flags) { 697 /* prepend */ 698 dump->begin = vlan->vid_begin; 699 } else { 700 err = switchdev_port_vlan_dump_put(dump); 701 dump->flags = vlan->flags; 702 dump->begin = vlan->vid_begin; 703 dump->end = vlan->vid_end; 704 } 705 } else if (dump->end <= vlan->vid_begin && 706 dump->end < vlan->vid_end) { 707 if ((dump->end + 1) == vlan->vid_begin && 708 dump->flags == vlan->flags) { 709 /* append */ 710 dump->end = vlan->vid_end; 711 } else { 712 err = switchdev_port_vlan_dump_put(dump); 713 dump->flags = vlan->flags; 714 dump->begin = vlan->vid_begin; 715 dump->end = vlan->vid_end; 716 } 717 } else { 718 err = -EINVAL; 719 } 720 } 721 722 return err; 723 } 724 725 static int switchdev_port_vlan_fill(struct sk_buff *skb, struct net_device *dev, 726 u32 filter_mask) 727 { 728 struct switchdev_vlan_dump dump = { 729 .vlan.obj.orig_dev = dev, 730 .vlan.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 731 .skb = skb, 732 .filter_mask = filter_mask, 733 }; 734 int err = 0; 735 736 if ((filter_mask & RTEXT_FILTER_BRVLAN) || 737 (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) { 738 err = switchdev_port_obj_dump(dev, &dump.vlan.obj, 739 switchdev_port_vlan_dump_cb); 740 if (err) 741 goto err_out; 742 if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) 743 /* last one */ 744 err = switchdev_port_vlan_dump_put(&dump); 745 } 746 747 err_out: 748 return err == -EOPNOTSUPP ? 0 : err; 749 } 750 751 /** 752 * switchdev_port_bridge_getlink - Get bridge port attributes 753 * 754 * @dev: port device 755 * 756 * Called for SELF on rtnl_bridge_getlink to get bridge port 757 * attributes. 758 */ 759 int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 760 struct net_device *dev, u32 filter_mask, 761 int nlflags) 762 { 763 struct switchdev_attr attr = { 764 .orig_dev = dev, 765 .id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS, 766 }; 767 u16 mode = BRIDGE_MODE_UNDEF; 768 u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD; 769 int err; 770 771 err = switchdev_port_attr_get(dev, &attr); 772 if (err && err != -EOPNOTSUPP) 773 return err; 774 775 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 776 attr.u.brport_flags, mask, nlflags, 777 filter_mask, switchdev_port_vlan_fill); 778 } 779 EXPORT_SYMBOL_GPL(switchdev_port_bridge_getlink); 780 781 static int switchdev_port_br_setflag(struct net_device *dev, 782 struct nlattr *nlattr, 783 unsigned long brport_flag) 784 { 785 struct switchdev_attr attr = { 786 .orig_dev = dev, 787 .id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS, 788 }; 789 u8 flag = nla_get_u8(nlattr); 790 int err; 791 792 err = switchdev_port_attr_get(dev, &attr); 793 if (err) 794 return err; 795 796 if (flag) 797 attr.u.brport_flags |= brport_flag; 798 else 799 attr.u.brport_flags &= ~brport_flag; 800 801 return switchdev_port_attr_set(dev, &attr); 802 } 803 804 static const struct nla_policy 805 switchdev_port_bridge_policy[IFLA_BRPORT_MAX + 1] = { 806 [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, 807 [IFLA_BRPORT_COST] = { .type = NLA_U32 }, 808 [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, 809 [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, 810 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, 811 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, 812 [IFLA_BRPORT_FAST_LEAVE] = { .type = NLA_U8 }, 813 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, 814 [IFLA_BRPORT_LEARNING_SYNC] = { .type = NLA_U8 }, 815 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, 816 }; 817 818 static int switchdev_port_br_setlink_protinfo(struct net_device *dev, 819 struct nlattr *protinfo) 820 { 821 struct nlattr *attr; 822 int rem; 823 int err; 824 825 err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX, 826 switchdev_port_bridge_policy); 827 if (err) 828 return err; 829 830 nla_for_each_nested(attr, protinfo, rem) { 831 switch (nla_type(attr)) { 832 case IFLA_BRPORT_LEARNING: 833 err = switchdev_port_br_setflag(dev, attr, 834 BR_LEARNING); 835 break; 836 case IFLA_BRPORT_LEARNING_SYNC: 837 err = switchdev_port_br_setflag(dev, attr, 838 BR_LEARNING_SYNC); 839 break; 840 case IFLA_BRPORT_UNICAST_FLOOD: 841 err = switchdev_port_br_setflag(dev, attr, BR_FLOOD); 842 break; 843 default: 844 err = -EOPNOTSUPP; 845 break; 846 } 847 if (err) 848 return err; 849 } 850 851 return 0; 852 } 853 854 static int switchdev_port_br_afspec(struct net_device *dev, 855 struct nlattr *afspec, 856 int (*f)(struct net_device *dev, 857 const struct switchdev_obj *obj)) 858 { 859 struct nlattr *attr; 860 struct bridge_vlan_info *vinfo; 861 struct switchdev_obj_port_vlan vlan = { 862 .obj.orig_dev = dev, 863 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 864 }; 865 int rem; 866 int err; 867 868 nla_for_each_nested(attr, afspec, rem) { 869 if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO) 870 continue; 871 if (nla_len(attr) != sizeof(struct bridge_vlan_info)) 872 return -EINVAL; 873 vinfo = nla_data(attr); 874 if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK) 875 return -EINVAL; 876 vlan.flags = vinfo->flags; 877 if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { 878 if (vlan.vid_begin) 879 return -EINVAL; 880 vlan.vid_begin = vinfo->vid; 881 /* don't allow range of pvids */ 882 if (vlan.flags & BRIDGE_VLAN_INFO_PVID) 883 return -EINVAL; 884 } else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) { 885 if (!vlan.vid_begin) 886 return -EINVAL; 887 vlan.vid_end = vinfo->vid; 888 if (vlan.vid_end <= vlan.vid_begin) 889 return -EINVAL; 890 err = f(dev, &vlan.obj); 891 if (err) 892 return err; 893 vlan.vid_begin = 0; 894 } else { 895 if (vlan.vid_begin) 896 return -EINVAL; 897 vlan.vid_begin = vinfo->vid; 898 vlan.vid_end = vinfo->vid; 899 err = f(dev, &vlan.obj); 900 if (err) 901 return err; 902 vlan.vid_begin = 0; 903 } 904 } 905 906 return 0; 907 } 908 909 /** 910 * switchdev_port_bridge_setlink - Set bridge port attributes 911 * 912 * @dev: port device 913 * @nlh: netlink header 914 * @flags: netlink flags 915 * 916 * Called for SELF on rtnl_bridge_setlink to set bridge port 917 * attributes. 918 */ 919 int switchdev_port_bridge_setlink(struct net_device *dev, 920 struct nlmsghdr *nlh, u16 flags) 921 { 922 struct nlattr *protinfo; 923 struct nlattr *afspec; 924 int err = 0; 925 926 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), 927 IFLA_PROTINFO); 928 if (protinfo) { 929 err = switchdev_port_br_setlink_protinfo(dev, protinfo); 930 if (err) 931 return err; 932 } 933 934 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), 935 IFLA_AF_SPEC); 936 if (afspec) 937 err = switchdev_port_br_afspec(dev, afspec, 938 switchdev_port_obj_add); 939 940 return err; 941 } 942 EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink); 943 944 /** 945 * switchdev_port_bridge_dellink - Set bridge port attributes 946 * 947 * @dev: port device 948 * @nlh: netlink header 949 * @flags: netlink flags 950 * 951 * Called for SELF on rtnl_bridge_dellink to set bridge port 952 * attributes. 953 */ 954 int switchdev_port_bridge_dellink(struct net_device *dev, 955 struct nlmsghdr *nlh, u16 flags) 956 { 957 struct nlattr *afspec; 958 959 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), 960 IFLA_AF_SPEC); 961 if (afspec) 962 return switchdev_port_br_afspec(dev, afspec, 963 switchdev_port_obj_del); 964 965 return 0; 966 } 967 EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink); 968 969 /** 970 * switchdev_port_fdb_add - Add FDB (MAC/VLAN) entry to port 971 * 972 * @ndmsg: netlink hdr 973 * @nlattr: netlink attributes 974 * @dev: port device 975 * @addr: MAC address to add 976 * @vid: VLAN to add 977 * 978 * Add FDB entry to switch device. 979 */ 980 int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 981 struct net_device *dev, const unsigned char *addr, 982 u16 vid, u16 nlm_flags) 983 { 984 struct switchdev_obj_port_fdb fdb = { 985 .obj.orig_dev = dev, 986 .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB, 987 .vid = vid, 988 }; 989 990 ether_addr_copy(fdb.addr, addr); 991 return switchdev_port_obj_add(dev, &fdb.obj); 992 } 993 EXPORT_SYMBOL_GPL(switchdev_port_fdb_add); 994 995 /** 996 * switchdev_port_fdb_del - Delete FDB (MAC/VLAN) entry from port 997 * 998 * @ndmsg: netlink hdr 999 * @nlattr: netlink attributes 1000 * @dev: port device 1001 * @addr: MAC address to delete 1002 * @vid: VLAN to delete 1003 * 1004 * Delete FDB entry from switch device. 1005 */ 1006 int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], 1007 struct net_device *dev, const unsigned char *addr, 1008 u16 vid) 1009 { 1010 struct switchdev_obj_port_fdb fdb = { 1011 .obj.orig_dev = dev, 1012 .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB, 1013 .vid = vid, 1014 }; 1015 1016 ether_addr_copy(fdb.addr, addr); 1017 return switchdev_port_obj_del(dev, &fdb.obj); 1018 } 1019 EXPORT_SYMBOL_GPL(switchdev_port_fdb_del); 1020 1021 struct switchdev_fdb_dump { 1022 struct switchdev_obj_port_fdb fdb; 1023 struct net_device *dev; 1024 struct sk_buff *skb; 1025 struct netlink_callback *cb; 1026 int idx; 1027 }; 1028 1029 static int switchdev_port_fdb_dump_cb(struct switchdev_obj *obj) 1030 { 1031 struct switchdev_obj_port_fdb *fdb = SWITCHDEV_OBJ_PORT_FDB(obj); 1032 struct switchdev_fdb_dump *dump = 1033 container_of(fdb, struct switchdev_fdb_dump, fdb); 1034 u32 portid = NETLINK_CB(dump->cb->skb).portid; 1035 u32 seq = dump->cb->nlh->nlmsg_seq; 1036 struct nlmsghdr *nlh; 1037 struct ndmsg *ndm; 1038 1039 if (dump->idx < dump->cb->args[0]) 1040 goto skip; 1041 1042 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, 1043 sizeof(*ndm), NLM_F_MULTI); 1044 if (!nlh) 1045 return -EMSGSIZE; 1046 1047 ndm = nlmsg_data(nlh); 1048 ndm->ndm_family = AF_BRIDGE; 1049 ndm->ndm_pad1 = 0; 1050 ndm->ndm_pad2 = 0; 1051 ndm->ndm_flags = NTF_SELF; 1052 ndm->ndm_type = 0; 1053 ndm->ndm_ifindex = dump->dev->ifindex; 1054 ndm->ndm_state = fdb->ndm_state; 1055 1056 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, fdb->addr)) 1057 goto nla_put_failure; 1058 1059 if (fdb->vid && nla_put_u16(dump->skb, NDA_VLAN, fdb->vid)) 1060 goto nla_put_failure; 1061 1062 nlmsg_end(dump->skb, nlh); 1063 1064 skip: 1065 dump->idx++; 1066 return 0; 1067 1068 nla_put_failure: 1069 nlmsg_cancel(dump->skb, nlh); 1070 return -EMSGSIZE; 1071 } 1072 1073 /** 1074 * switchdev_port_fdb_dump - Dump port FDB (MAC/VLAN) entries 1075 * 1076 * @skb: netlink skb 1077 * @cb: netlink callback 1078 * @dev: port device 1079 * @filter_dev: filter device 1080 * @idx: 1081 * 1082 * Delete FDB entry from switch device. 1083 */ 1084 int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 1085 struct net_device *dev, 1086 struct net_device *filter_dev, int idx) 1087 { 1088 struct switchdev_fdb_dump dump = { 1089 .fdb.obj.orig_dev = dev, 1090 .fdb.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB, 1091 .dev = dev, 1092 .skb = skb, 1093 .cb = cb, 1094 .idx = idx, 1095 }; 1096 1097 switchdev_port_obj_dump(dev, &dump.fdb.obj, switchdev_port_fdb_dump_cb); 1098 return dump.idx; 1099 } 1100 EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump); 1101 1102 static struct net_device *switchdev_get_lowest_dev(struct net_device *dev) 1103 { 1104 const struct switchdev_ops *ops = dev->switchdev_ops; 1105 struct net_device *lower_dev; 1106 struct net_device *port_dev; 1107 struct list_head *iter; 1108 1109 /* Recusively search down until we find a sw port dev. 1110 * (A sw port dev supports switchdev_port_attr_get). 1111 */ 1112 1113 if (ops && ops->switchdev_port_attr_get) 1114 return dev; 1115 1116 netdev_for_each_lower_dev(dev, lower_dev, iter) { 1117 port_dev = switchdev_get_lowest_dev(lower_dev); 1118 if (port_dev) 1119 return port_dev; 1120 } 1121 1122 return NULL; 1123 } 1124 1125 static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi) 1126 { 1127 struct switchdev_attr attr = { 1128 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID, 1129 }; 1130 struct switchdev_attr prev_attr; 1131 struct net_device *dev = NULL; 1132 int nhsel; 1133 1134 ASSERT_RTNL(); 1135 1136 /* For this route, all nexthop devs must be on the same switch. */ 1137 1138 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) { 1139 const struct fib_nh *nh = &fi->fib_nh[nhsel]; 1140 1141 if (!nh->nh_dev) 1142 return NULL; 1143 1144 dev = switchdev_get_lowest_dev(nh->nh_dev); 1145 if (!dev) 1146 return NULL; 1147 1148 attr.orig_dev = dev; 1149 if (switchdev_port_attr_get(dev, &attr)) 1150 return NULL; 1151 1152 if (nhsel > 0 && 1153 !netdev_phys_item_id_same(&prev_attr.u.ppid, &attr.u.ppid)) 1154 return NULL; 1155 1156 prev_attr = attr; 1157 } 1158 1159 return dev; 1160 } 1161 1162 /** 1163 * switchdev_fib_ipv4_add - Add/modify switch IPv4 route entry 1164 * 1165 * @dst: route's IPv4 destination address 1166 * @dst_len: destination address length (prefix length) 1167 * @fi: route FIB info structure 1168 * @tos: route TOS 1169 * @type: route type 1170 * @nlflags: netlink flags passed in (NLM_F_*) 1171 * @tb_id: route table ID 1172 * 1173 * Add/modify switch IPv4 route entry. 1174 */ 1175 int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, 1176 u8 tos, u8 type, u32 nlflags, u32 tb_id) 1177 { 1178 struct switchdev_obj_ipv4_fib ipv4_fib = { 1179 .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB, 1180 .dst = dst, 1181 .dst_len = dst_len, 1182 .tos = tos, 1183 .type = type, 1184 .nlflags = nlflags, 1185 .tb_id = tb_id, 1186 }; 1187 struct net_device *dev; 1188 int err = 0; 1189 1190 memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi)); 1191 1192 /* Don't offload route if using custom ip rules or if 1193 * IPv4 FIB offloading has been disabled completely. 1194 */ 1195 1196 #ifdef CONFIG_IP_MULTIPLE_TABLES 1197 if (fi->fib_net->ipv4.fib_has_custom_rules) 1198 return 0; 1199 #endif 1200 1201 if (fi->fib_net->ipv4.fib_offload_disabled) 1202 return 0; 1203 1204 dev = switchdev_get_dev_by_nhs(fi); 1205 if (!dev) 1206 return 0; 1207 1208 ipv4_fib.obj.orig_dev = dev; 1209 err = switchdev_port_obj_add(dev, &ipv4_fib.obj); 1210 if (!err) 1211 fi->fib_flags |= RTNH_F_OFFLOAD; 1212 1213 return err == -EOPNOTSUPP ? 0 : err; 1214 } 1215 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_add); 1216 1217 /** 1218 * switchdev_fib_ipv4_del - Delete IPv4 route entry from switch 1219 * 1220 * @dst: route's IPv4 destination address 1221 * @dst_len: destination address length (prefix length) 1222 * @fi: route FIB info structure 1223 * @tos: route TOS 1224 * @type: route type 1225 * @tb_id: route table ID 1226 * 1227 * Delete IPv4 route entry from switch device. 1228 */ 1229 int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, 1230 u8 tos, u8 type, u32 tb_id) 1231 { 1232 struct switchdev_obj_ipv4_fib ipv4_fib = { 1233 .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB, 1234 .dst = dst, 1235 .dst_len = dst_len, 1236 .tos = tos, 1237 .type = type, 1238 .nlflags = 0, 1239 .tb_id = tb_id, 1240 }; 1241 struct net_device *dev; 1242 int err = 0; 1243 1244 memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi)); 1245 1246 if (!(fi->fib_flags & RTNH_F_OFFLOAD)) 1247 return 0; 1248 1249 dev = switchdev_get_dev_by_nhs(fi); 1250 if (!dev) 1251 return 0; 1252 1253 ipv4_fib.obj.orig_dev = dev; 1254 err = switchdev_port_obj_del(dev, &ipv4_fib.obj); 1255 if (!err) 1256 fi->fib_flags &= ~RTNH_F_OFFLOAD; 1257 1258 return err == -EOPNOTSUPP ? 0 : err; 1259 } 1260 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_del); 1261 1262 /** 1263 * switchdev_fib_ipv4_abort - Abort an IPv4 FIB operation 1264 * 1265 * @fi: route FIB info structure 1266 */ 1267 void switchdev_fib_ipv4_abort(struct fib_info *fi) 1268 { 1269 /* There was a problem installing this route to the offload 1270 * device. For now, until we come up with more refined 1271 * policy handling, abruptly end IPv4 fib offloading for 1272 * for entire net by flushing offload device(s) of all 1273 * IPv4 routes, and mark IPv4 fib offloading broken from 1274 * this point forward. 1275 */ 1276 1277 fib_flush_external(fi->fib_net); 1278 fi->fib_net->ipv4.fib_offload_disabled = true; 1279 } 1280 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort); 1281 1282 static bool switchdev_port_same_parent_id(struct net_device *a, 1283 struct net_device *b) 1284 { 1285 struct switchdev_attr a_attr = { 1286 .orig_dev = a, 1287 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID, 1288 .flags = SWITCHDEV_F_NO_RECURSE, 1289 }; 1290 struct switchdev_attr b_attr = { 1291 .orig_dev = b, 1292 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID, 1293 .flags = SWITCHDEV_F_NO_RECURSE, 1294 }; 1295 1296 if (switchdev_port_attr_get(a, &a_attr) || 1297 switchdev_port_attr_get(b, &b_attr)) 1298 return false; 1299 1300 return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid); 1301 } 1302 1303 static u32 switchdev_port_fwd_mark_get(struct net_device *dev, 1304 struct net_device *group_dev) 1305 { 1306 struct net_device *lower_dev; 1307 struct list_head *iter; 1308 1309 netdev_for_each_lower_dev(group_dev, lower_dev, iter) { 1310 if (lower_dev == dev) 1311 continue; 1312 if (switchdev_port_same_parent_id(dev, lower_dev)) 1313 return lower_dev->offload_fwd_mark; 1314 return switchdev_port_fwd_mark_get(dev, lower_dev); 1315 } 1316 1317 return dev->ifindex; 1318 } 1319 1320 static void switchdev_port_fwd_mark_reset(struct net_device *group_dev, 1321 u32 old_mark, u32 *reset_mark) 1322 { 1323 struct net_device *lower_dev; 1324 struct list_head *iter; 1325 1326 netdev_for_each_lower_dev(group_dev, lower_dev, iter) { 1327 if (lower_dev->offload_fwd_mark == old_mark) { 1328 if (!*reset_mark) 1329 *reset_mark = lower_dev->ifindex; 1330 lower_dev->offload_fwd_mark = *reset_mark; 1331 } 1332 switchdev_port_fwd_mark_reset(lower_dev, old_mark, reset_mark); 1333 } 1334 } 1335 1336 /** 1337 * switchdev_port_fwd_mark_set - Set port offload forwarding mark 1338 * 1339 * @dev: port device 1340 * @group_dev: containing device 1341 * @joining: true if dev is joining group; false if leaving group 1342 * 1343 * An ungrouped port's offload mark is just its ifindex. A grouped 1344 * port's (member of a bridge, for example) offload mark is the ifindex 1345 * of one of the ports in the group with the same parent (switch) ID. 1346 * Ports on the same device in the same group will have the same mark. 1347 * 1348 * Example: 1349 * 1350 * br0 ifindex=9 1351 * sw1p1 ifindex=2 mark=2 1352 * sw1p2 ifindex=3 mark=2 1353 * sw2p1 ifindex=4 mark=5 1354 * sw2p2 ifindex=5 mark=5 1355 * 1356 * If sw2p2 leaves the bridge, we'll have: 1357 * 1358 * br0 ifindex=9 1359 * sw1p1 ifindex=2 mark=2 1360 * sw1p2 ifindex=3 mark=2 1361 * sw2p1 ifindex=4 mark=4 1362 * sw2p2 ifindex=5 mark=5 1363 */ 1364 void switchdev_port_fwd_mark_set(struct net_device *dev, 1365 struct net_device *group_dev, 1366 bool joining) 1367 { 1368 u32 mark = dev->ifindex; 1369 u32 reset_mark = 0; 1370 1371 if (group_dev) { 1372 ASSERT_RTNL(); 1373 if (joining) 1374 mark = switchdev_port_fwd_mark_get(dev, group_dev); 1375 else if (dev->offload_fwd_mark == mark) 1376 /* Ohoh, this port was the mark reference port, 1377 * but it's leaving the group, so reset the 1378 * mark for the remaining ports in the group. 1379 */ 1380 switchdev_port_fwd_mark_reset(group_dev, mark, 1381 &reset_mark); 1382 } 1383 1384 dev->offload_fwd_mark = mark; 1385 } 1386 EXPORT_SYMBOL_GPL(switchdev_port_fwd_mark_set); 1387