1 /* 2 * net/switchdev/switchdev.c - Switch device API 3 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us> 4 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/types.h> 14 #include <linux/init.h> 15 #include <linux/mutex.h> 16 #include <linux/notifier.h> 17 #include <linux/netdevice.h> 18 #include <linux/etherdevice.h> 19 #include <linux/if_bridge.h> 20 #include <linux/list.h> 21 #include <linux/workqueue.h> 22 #include <linux/if_vlan.h> 23 #include <net/ip_fib.h> 24 #include <net/switchdev.h> 25 26 /** 27 * switchdev_trans_item_enqueue - Enqueue data item to transaction queue 28 * 29 * @trans: transaction 30 * @data: pointer to data being queued 31 * @destructor: data destructor 32 * @tritem: transaction item being queued 33 * 34 * Enqeueue data item to transaction queue. tritem is typically placed in 35 * cointainter pointed at by data pointer. Destructor is called on 36 * transaction abort and after successful commit phase in case 37 * the caller did not dequeue the item before. 38 */ 39 void switchdev_trans_item_enqueue(struct switchdev_trans *trans, 40 void *data, void (*destructor)(void const *), 41 struct switchdev_trans_item *tritem) 42 { 43 tritem->data = data; 44 tritem->destructor = destructor; 45 list_add_tail(&tritem->list, &trans->item_list); 46 } 47 EXPORT_SYMBOL_GPL(switchdev_trans_item_enqueue); 48 49 static struct switchdev_trans_item * 50 __switchdev_trans_item_dequeue(struct switchdev_trans *trans) 51 { 52 struct switchdev_trans_item *tritem; 53 54 if (list_empty(&trans->item_list)) 55 return NULL; 56 tritem = list_first_entry(&trans->item_list, 57 struct switchdev_trans_item, list); 58 list_del(&tritem->list); 59 return tritem; 60 } 61 62 /** 63 * switchdev_trans_item_dequeue - Dequeue data item from transaction queue 64 * 65 * @trans: transaction 66 */ 67 void *switchdev_trans_item_dequeue(struct switchdev_trans *trans) 68 { 69 struct switchdev_trans_item *tritem; 70 71 tritem = __switchdev_trans_item_dequeue(trans); 72 BUG_ON(!tritem); 73 return tritem->data; 74 } 75 EXPORT_SYMBOL_GPL(switchdev_trans_item_dequeue); 76 77 static void switchdev_trans_init(struct switchdev_trans *trans) 78 { 79 INIT_LIST_HEAD(&trans->item_list); 80 } 81 82 static void switchdev_trans_items_destroy(struct switchdev_trans *trans) 83 { 84 struct switchdev_trans_item *tritem; 85 86 while ((tritem = __switchdev_trans_item_dequeue(trans))) 87 tritem->destructor(tritem->data); 88 } 89 90 static void switchdev_trans_items_warn_destroy(struct net_device *dev, 91 struct switchdev_trans *trans) 92 { 93 WARN(!list_empty(&trans->item_list), "%s: transaction item queue is not empty.\n", 94 dev->name); 95 switchdev_trans_items_destroy(trans); 96 } 97 98 static LIST_HEAD(deferred); 99 static DEFINE_SPINLOCK(deferred_lock); 100 101 typedef void switchdev_deferred_func_t(struct net_device *dev, 102 const void *data); 103 104 struct switchdev_deferred_item { 105 struct list_head list; 106 struct net_device *dev; 107 switchdev_deferred_func_t *func; 108 unsigned long data[0]; 109 }; 110 111 static struct switchdev_deferred_item *switchdev_deferred_dequeue(void) 112 { 113 struct switchdev_deferred_item *dfitem; 114 115 spin_lock_bh(&deferred_lock); 116 if (list_empty(&deferred)) { 117 dfitem = NULL; 118 goto unlock; 119 } 120 dfitem = list_first_entry(&deferred, 121 struct switchdev_deferred_item, list); 122 list_del(&dfitem->list); 123 unlock: 124 spin_unlock_bh(&deferred_lock); 125 return dfitem; 126 } 127 128 /** 129 * switchdev_deferred_process - Process ops in deferred queue 130 * 131 * Called to flush the ops currently queued in deferred ops queue. 132 * rtnl_lock must be held. 133 */ 134 void switchdev_deferred_process(void) 135 { 136 struct switchdev_deferred_item *dfitem; 137 138 ASSERT_RTNL(); 139 140 while ((dfitem = switchdev_deferred_dequeue())) { 141 dfitem->func(dfitem->dev, dfitem->data); 142 dev_put(dfitem->dev); 143 kfree(dfitem); 144 } 145 } 146 EXPORT_SYMBOL_GPL(switchdev_deferred_process); 147 148 static void switchdev_deferred_process_work(struct work_struct *work) 149 { 150 rtnl_lock(); 151 switchdev_deferred_process(); 152 rtnl_unlock(); 153 } 154 155 static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work); 156 157 static int switchdev_deferred_enqueue(struct net_device *dev, 158 const void *data, size_t data_len, 159 switchdev_deferred_func_t *func) 160 { 161 struct switchdev_deferred_item *dfitem; 162 163 dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC); 164 if (!dfitem) 165 return -ENOMEM; 166 dfitem->dev = dev; 167 dfitem->func = func; 168 memcpy(dfitem->data, data, data_len); 169 dev_hold(dev); 170 spin_lock_bh(&deferred_lock); 171 list_add_tail(&dfitem->list, &deferred); 172 spin_unlock_bh(&deferred_lock); 173 schedule_work(&deferred_process_work); 174 return 0; 175 } 176 177 /** 178 * switchdev_port_attr_get - Get port attribute 179 * 180 * @dev: port device 181 * @attr: attribute to get 182 */ 183 int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr) 184 { 185 const struct switchdev_ops *ops = dev->switchdev_ops; 186 struct net_device *lower_dev; 187 struct list_head *iter; 188 struct switchdev_attr first = { 189 .id = SWITCHDEV_ATTR_ID_UNDEFINED 190 }; 191 int err = -EOPNOTSUPP; 192 193 if (ops && ops->switchdev_port_attr_get) 194 return ops->switchdev_port_attr_get(dev, attr); 195 196 if (attr->flags & SWITCHDEV_F_NO_RECURSE) 197 return err; 198 199 /* Switch device port(s) may be stacked under 200 * bond/team/vlan dev, so recurse down to get attr on 201 * each port. Return -ENODATA if attr values don't 202 * compare across ports. 203 */ 204 205 netdev_for_each_lower_dev(dev, lower_dev, iter) { 206 err = switchdev_port_attr_get(lower_dev, attr); 207 if (err) 208 break; 209 if (first.id == SWITCHDEV_ATTR_ID_UNDEFINED) 210 first = *attr; 211 else if (memcmp(&first, attr, sizeof(*attr))) 212 return -ENODATA; 213 } 214 215 return err; 216 } 217 EXPORT_SYMBOL_GPL(switchdev_port_attr_get); 218 219 static int __switchdev_port_attr_set(struct net_device *dev, 220 const struct switchdev_attr *attr, 221 struct switchdev_trans *trans) 222 { 223 const struct switchdev_ops *ops = dev->switchdev_ops; 224 struct net_device *lower_dev; 225 struct list_head *iter; 226 int err = -EOPNOTSUPP; 227 228 if (ops && ops->switchdev_port_attr_set) { 229 err = ops->switchdev_port_attr_set(dev, attr, trans); 230 goto done; 231 } 232 233 if (attr->flags & SWITCHDEV_F_NO_RECURSE) 234 goto done; 235 236 /* Switch device port(s) may be stacked under 237 * bond/team/vlan dev, so recurse down to set attr on 238 * each port. 239 */ 240 241 netdev_for_each_lower_dev(dev, lower_dev, iter) { 242 err = __switchdev_port_attr_set(lower_dev, attr, trans); 243 if (err) 244 break; 245 } 246 247 done: 248 if (err == -EOPNOTSUPP && attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP) 249 err = 0; 250 251 return err; 252 } 253 254 static int switchdev_port_attr_set_now(struct net_device *dev, 255 const struct switchdev_attr *attr) 256 { 257 struct switchdev_trans trans; 258 int err; 259 260 switchdev_trans_init(&trans); 261 262 /* Phase I: prepare for attr set. Driver/device should fail 263 * here if there are going to be issues in the commit phase, 264 * such as lack of resources or support. The driver/device 265 * should reserve resources needed for the commit phase here, 266 * but should not commit the attr. 267 */ 268 269 trans.ph_prepare = true; 270 err = __switchdev_port_attr_set(dev, attr, &trans); 271 if (err) { 272 /* Prepare phase failed: abort the transaction. Any 273 * resources reserved in the prepare phase are 274 * released. 275 */ 276 277 if (err != -EOPNOTSUPP) 278 switchdev_trans_items_destroy(&trans); 279 280 return err; 281 } 282 283 /* Phase II: commit attr set. This cannot fail as a fault 284 * of driver/device. If it does, it's a bug in the driver/device 285 * because the driver said everythings was OK in phase I. 286 */ 287 288 trans.ph_prepare = false; 289 err = __switchdev_port_attr_set(dev, attr, &trans); 290 WARN(err, "%s: Commit of attribute (id=%d) failed.\n", 291 dev->name, attr->id); 292 switchdev_trans_items_warn_destroy(dev, &trans); 293 294 return err; 295 } 296 297 static void switchdev_port_attr_set_deferred(struct net_device *dev, 298 const void *data) 299 { 300 const struct switchdev_attr *attr = data; 301 int err; 302 303 err = switchdev_port_attr_set_now(dev, attr); 304 if (err && err != -EOPNOTSUPP) 305 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n", 306 err, attr->id); 307 } 308 309 static int switchdev_port_attr_set_defer(struct net_device *dev, 310 const struct switchdev_attr *attr) 311 { 312 return switchdev_deferred_enqueue(dev, attr, sizeof(*attr), 313 switchdev_port_attr_set_deferred); 314 } 315 316 /** 317 * switchdev_port_attr_set - Set port attribute 318 * 319 * @dev: port device 320 * @attr: attribute to set 321 * 322 * Use a 2-phase prepare-commit transaction model to ensure 323 * system is not left in a partially updated state due to 324 * failure from driver/device. 325 * 326 * rtnl_lock must be held and must not be in atomic section, 327 * in case SWITCHDEV_F_DEFER flag is not set. 328 */ 329 int switchdev_port_attr_set(struct net_device *dev, 330 const struct switchdev_attr *attr) 331 { 332 if (attr->flags & SWITCHDEV_F_DEFER) 333 return switchdev_port_attr_set_defer(dev, attr); 334 ASSERT_RTNL(); 335 return switchdev_port_attr_set_now(dev, attr); 336 } 337 EXPORT_SYMBOL_GPL(switchdev_port_attr_set); 338 339 static size_t switchdev_obj_size(const struct switchdev_obj *obj) 340 { 341 switch (obj->id) { 342 case SWITCHDEV_OBJ_ID_PORT_VLAN: 343 return sizeof(struct switchdev_obj_port_vlan); 344 case SWITCHDEV_OBJ_ID_IPV4_FIB: 345 return sizeof(struct switchdev_obj_ipv4_fib); 346 case SWITCHDEV_OBJ_ID_PORT_FDB: 347 return sizeof(struct switchdev_obj_port_fdb); 348 case SWITCHDEV_OBJ_ID_PORT_MDB: 349 return sizeof(struct switchdev_obj_port_mdb); 350 default: 351 BUG(); 352 } 353 return 0; 354 } 355 356 static int __switchdev_port_obj_add(struct net_device *dev, 357 const struct switchdev_obj *obj, 358 struct switchdev_trans *trans) 359 { 360 const struct switchdev_ops *ops = dev->switchdev_ops; 361 struct net_device *lower_dev; 362 struct list_head *iter; 363 int err = -EOPNOTSUPP; 364 365 if (ops && ops->switchdev_port_obj_add) 366 return ops->switchdev_port_obj_add(dev, obj, trans); 367 368 /* Switch device port(s) may be stacked under 369 * bond/team/vlan dev, so recurse down to add object on 370 * each port. 371 */ 372 373 netdev_for_each_lower_dev(dev, lower_dev, iter) { 374 err = __switchdev_port_obj_add(lower_dev, obj, trans); 375 if (err) 376 break; 377 } 378 379 return err; 380 } 381 382 static int switchdev_port_obj_add_now(struct net_device *dev, 383 const struct switchdev_obj *obj) 384 { 385 struct switchdev_trans trans; 386 int err; 387 388 ASSERT_RTNL(); 389 390 switchdev_trans_init(&trans); 391 392 /* Phase I: prepare for obj add. Driver/device should fail 393 * here if there are going to be issues in the commit phase, 394 * such as lack of resources or support. The driver/device 395 * should reserve resources needed for the commit phase here, 396 * but should not commit the obj. 397 */ 398 399 trans.ph_prepare = true; 400 err = __switchdev_port_obj_add(dev, obj, &trans); 401 if (err) { 402 /* Prepare phase failed: abort the transaction. Any 403 * resources reserved in the prepare phase are 404 * released. 405 */ 406 407 if (err != -EOPNOTSUPP) 408 switchdev_trans_items_destroy(&trans); 409 410 return err; 411 } 412 413 /* Phase II: commit obj add. This cannot fail as a fault 414 * of driver/device. If it does, it's a bug in the driver/device 415 * because the driver said everythings was OK in phase I. 416 */ 417 418 trans.ph_prepare = false; 419 err = __switchdev_port_obj_add(dev, obj, &trans); 420 WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id); 421 switchdev_trans_items_warn_destroy(dev, &trans); 422 423 return err; 424 } 425 426 static void switchdev_port_obj_add_deferred(struct net_device *dev, 427 const void *data) 428 { 429 const struct switchdev_obj *obj = data; 430 int err; 431 432 err = switchdev_port_obj_add_now(dev, obj); 433 if (err && err != -EOPNOTSUPP) 434 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n", 435 err, obj->id); 436 } 437 438 static int switchdev_port_obj_add_defer(struct net_device *dev, 439 const struct switchdev_obj *obj) 440 { 441 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj), 442 switchdev_port_obj_add_deferred); 443 } 444 445 /** 446 * switchdev_port_obj_add - Add port object 447 * 448 * @dev: port device 449 * @id: object ID 450 * @obj: object to add 451 * 452 * Use a 2-phase prepare-commit transaction model to ensure 453 * system is not left in a partially updated state due to 454 * failure from driver/device. 455 * 456 * rtnl_lock must be held and must not be in atomic section, 457 * in case SWITCHDEV_F_DEFER flag is not set. 458 */ 459 int switchdev_port_obj_add(struct net_device *dev, 460 const struct switchdev_obj *obj) 461 { 462 if (obj->flags & SWITCHDEV_F_DEFER) 463 return switchdev_port_obj_add_defer(dev, obj); 464 ASSERT_RTNL(); 465 return switchdev_port_obj_add_now(dev, obj); 466 } 467 EXPORT_SYMBOL_GPL(switchdev_port_obj_add); 468 469 static int switchdev_port_obj_del_now(struct net_device *dev, 470 const struct switchdev_obj *obj) 471 { 472 const struct switchdev_ops *ops = dev->switchdev_ops; 473 struct net_device *lower_dev; 474 struct list_head *iter; 475 int err = -EOPNOTSUPP; 476 477 if (ops && ops->switchdev_port_obj_del) 478 return ops->switchdev_port_obj_del(dev, obj); 479 480 /* Switch device port(s) may be stacked under 481 * bond/team/vlan dev, so recurse down to delete object on 482 * each port. 483 */ 484 485 netdev_for_each_lower_dev(dev, lower_dev, iter) { 486 err = switchdev_port_obj_del_now(lower_dev, obj); 487 if (err) 488 break; 489 } 490 491 return err; 492 } 493 494 static void switchdev_port_obj_del_deferred(struct net_device *dev, 495 const void *data) 496 { 497 const struct switchdev_obj *obj = data; 498 int err; 499 500 err = switchdev_port_obj_del_now(dev, obj); 501 if (err && err != -EOPNOTSUPP) 502 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n", 503 err, obj->id); 504 } 505 506 static int switchdev_port_obj_del_defer(struct net_device *dev, 507 const struct switchdev_obj *obj) 508 { 509 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj), 510 switchdev_port_obj_del_deferred); 511 } 512 513 /** 514 * switchdev_port_obj_del - Delete port object 515 * 516 * @dev: port device 517 * @id: object ID 518 * @obj: object to delete 519 * 520 * rtnl_lock must be held and must not be in atomic section, 521 * in case SWITCHDEV_F_DEFER flag is not set. 522 */ 523 int switchdev_port_obj_del(struct net_device *dev, 524 const struct switchdev_obj *obj) 525 { 526 if (obj->flags & SWITCHDEV_F_DEFER) 527 return switchdev_port_obj_del_defer(dev, obj); 528 ASSERT_RTNL(); 529 return switchdev_port_obj_del_now(dev, obj); 530 } 531 EXPORT_SYMBOL_GPL(switchdev_port_obj_del); 532 533 /** 534 * switchdev_port_obj_dump - Dump port objects 535 * 536 * @dev: port device 537 * @id: object ID 538 * @obj: object to dump 539 * @cb: function to call with a filled object 540 * 541 * rtnl_lock must be held. 542 */ 543 int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj, 544 switchdev_obj_dump_cb_t *cb) 545 { 546 const struct switchdev_ops *ops = dev->switchdev_ops; 547 struct net_device *lower_dev; 548 struct list_head *iter; 549 int err = -EOPNOTSUPP; 550 551 ASSERT_RTNL(); 552 553 if (ops && ops->switchdev_port_obj_dump) 554 return ops->switchdev_port_obj_dump(dev, obj, cb); 555 556 /* Switch device port(s) may be stacked under 557 * bond/team/vlan dev, so recurse down to dump objects on 558 * first port at bottom of stack. 559 */ 560 561 netdev_for_each_lower_dev(dev, lower_dev, iter) { 562 err = switchdev_port_obj_dump(lower_dev, obj, cb); 563 break; 564 } 565 566 return err; 567 } 568 EXPORT_SYMBOL_GPL(switchdev_port_obj_dump); 569 570 static DEFINE_MUTEX(switchdev_mutex); 571 static RAW_NOTIFIER_HEAD(switchdev_notif_chain); 572 573 /** 574 * register_switchdev_notifier - Register notifier 575 * @nb: notifier_block 576 * 577 * Register switch device notifier. This should be used by code 578 * which needs to monitor events happening in particular device. 579 * Return values are same as for atomic_notifier_chain_register(). 580 */ 581 int register_switchdev_notifier(struct notifier_block *nb) 582 { 583 int err; 584 585 mutex_lock(&switchdev_mutex); 586 err = raw_notifier_chain_register(&switchdev_notif_chain, nb); 587 mutex_unlock(&switchdev_mutex); 588 return err; 589 } 590 EXPORT_SYMBOL_GPL(register_switchdev_notifier); 591 592 /** 593 * unregister_switchdev_notifier - Unregister notifier 594 * @nb: notifier_block 595 * 596 * Unregister switch device notifier. 597 * Return values are same as for atomic_notifier_chain_unregister(). 598 */ 599 int unregister_switchdev_notifier(struct notifier_block *nb) 600 { 601 int err; 602 603 mutex_lock(&switchdev_mutex); 604 err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb); 605 mutex_unlock(&switchdev_mutex); 606 return err; 607 } 608 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier); 609 610 /** 611 * call_switchdev_notifiers - Call notifiers 612 * @val: value passed unmodified to notifier function 613 * @dev: port device 614 * @info: notifier information data 615 * 616 * Call all network notifier blocks. This should be called by driver 617 * when it needs to propagate hardware event. 618 * Return values are same as for atomic_notifier_call_chain(). 619 */ 620 int call_switchdev_notifiers(unsigned long val, struct net_device *dev, 621 struct switchdev_notifier_info *info) 622 { 623 int err; 624 625 info->dev = dev; 626 mutex_lock(&switchdev_mutex); 627 err = raw_notifier_call_chain(&switchdev_notif_chain, val, info); 628 mutex_unlock(&switchdev_mutex); 629 return err; 630 } 631 EXPORT_SYMBOL_GPL(call_switchdev_notifiers); 632 633 struct switchdev_vlan_dump { 634 struct switchdev_obj_port_vlan vlan; 635 struct sk_buff *skb; 636 u32 filter_mask; 637 u16 flags; 638 u16 begin; 639 u16 end; 640 }; 641 642 static int switchdev_port_vlan_dump_put(struct switchdev_vlan_dump *dump) 643 { 644 struct bridge_vlan_info vinfo; 645 646 vinfo.flags = dump->flags; 647 648 if (dump->begin == 0 && dump->end == 0) { 649 return 0; 650 } else if (dump->begin == dump->end) { 651 vinfo.vid = dump->begin; 652 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO, 653 sizeof(vinfo), &vinfo)) 654 return -EMSGSIZE; 655 } else { 656 vinfo.vid = dump->begin; 657 vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN; 658 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO, 659 sizeof(vinfo), &vinfo)) 660 return -EMSGSIZE; 661 vinfo.vid = dump->end; 662 vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN; 663 vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_END; 664 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO, 665 sizeof(vinfo), &vinfo)) 666 return -EMSGSIZE; 667 } 668 669 return 0; 670 } 671 672 static int switchdev_port_vlan_dump_cb(struct switchdev_obj *obj) 673 { 674 struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 675 struct switchdev_vlan_dump *dump = 676 container_of(vlan, struct switchdev_vlan_dump, vlan); 677 int err = 0; 678 679 if (vlan->vid_begin > vlan->vid_end) 680 return -EINVAL; 681 682 if (dump->filter_mask & RTEXT_FILTER_BRVLAN) { 683 dump->flags = vlan->flags; 684 for (dump->begin = dump->end = vlan->vid_begin; 685 dump->begin <= vlan->vid_end; 686 dump->begin++, dump->end++) { 687 err = switchdev_port_vlan_dump_put(dump); 688 if (err) 689 return err; 690 } 691 } else if (dump->filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) { 692 if (dump->begin > vlan->vid_begin && 693 dump->begin >= vlan->vid_end) { 694 if ((dump->begin - 1) == vlan->vid_end && 695 dump->flags == vlan->flags) { 696 /* prepend */ 697 dump->begin = vlan->vid_begin; 698 } else { 699 err = switchdev_port_vlan_dump_put(dump); 700 dump->flags = vlan->flags; 701 dump->begin = vlan->vid_begin; 702 dump->end = vlan->vid_end; 703 } 704 } else if (dump->end <= vlan->vid_begin && 705 dump->end < vlan->vid_end) { 706 if ((dump->end + 1) == vlan->vid_begin && 707 dump->flags == vlan->flags) { 708 /* append */ 709 dump->end = vlan->vid_end; 710 } else { 711 err = switchdev_port_vlan_dump_put(dump); 712 dump->flags = vlan->flags; 713 dump->begin = vlan->vid_begin; 714 dump->end = vlan->vid_end; 715 } 716 } else { 717 err = -EINVAL; 718 } 719 } 720 721 return err; 722 } 723 724 static int switchdev_port_vlan_fill(struct sk_buff *skb, struct net_device *dev, 725 u32 filter_mask) 726 { 727 struct switchdev_vlan_dump dump = { 728 .vlan.obj.orig_dev = dev, 729 .vlan.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 730 .skb = skb, 731 .filter_mask = filter_mask, 732 }; 733 int err = 0; 734 735 if ((filter_mask & RTEXT_FILTER_BRVLAN) || 736 (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) { 737 err = switchdev_port_obj_dump(dev, &dump.vlan.obj, 738 switchdev_port_vlan_dump_cb); 739 if (err) 740 goto err_out; 741 if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) 742 /* last one */ 743 err = switchdev_port_vlan_dump_put(&dump); 744 } 745 746 err_out: 747 return err == -EOPNOTSUPP ? 0 : err; 748 } 749 750 /** 751 * switchdev_port_bridge_getlink - Get bridge port attributes 752 * 753 * @dev: port device 754 * 755 * Called for SELF on rtnl_bridge_getlink to get bridge port 756 * attributes. 757 */ 758 int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 759 struct net_device *dev, u32 filter_mask, 760 int nlflags) 761 { 762 struct switchdev_attr attr = { 763 .orig_dev = dev, 764 .id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS, 765 }; 766 u16 mode = BRIDGE_MODE_UNDEF; 767 u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD; 768 int err; 769 770 err = switchdev_port_attr_get(dev, &attr); 771 if (err && err != -EOPNOTSUPP) 772 return err; 773 774 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 775 attr.u.brport_flags, mask, nlflags, 776 filter_mask, switchdev_port_vlan_fill); 777 } 778 EXPORT_SYMBOL_GPL(switchdev_port_bridge_getlink); 779 780 static int switchdev_port_br_setflag(struct net_device *dev, 781 struct nlattr *nlattr, 782 unsigned long brport_flag) 783 { 784 struct switchdev_attr attr = { 785 .orig_dev = dev, 786 .id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS, 787 }; 788 u8 flag = nla_get_u8(nlattr); 789 int err; 790 791 err = switchdev_port_attr_get(dev, &attr); 792 if (err) 793 return err; 794 795 if (flag) 796 attr.u.brport_flags |= brport_flag; 797 else 798 attr.u.brport_flags &= ~brport_flag; 799 800 return switchdev_port_attr_set(dev, &attr); 801 } 802 803 static const struct nla_policy 804 switchdev_port_bridge_policy[IFLA_BRPORT_MAX + 1] = { 805 [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, 806 [IFLA_BRPORT_COST] = { .type = NLA_U32 }, 807 [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, 808 [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, 809 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, 810 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, 811 [IFLA_BRPORT_FAST_LEAVE] = { .type = NLA_U8 }, 812 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, 813 [IFLA_BRPORT_LEARNING_SYNC] = { .type = NLA_U8 }, 814 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, 815 }; 816 817 static int switchdev_port_br_setlink_protinfo(struct net_device *dev, 818 struct nlattr *protinfo) 819 { 820 struct nlattr *attr; 821 int rem; 822 int err; 823 824 err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX, 825 switchdev_port_bridge_policy); 826 if (err) 827 return err; 828 829 nla_for_each_nested(attr, protinfo, rem) { 830 switch (nla_type(attr)) { 831 case IFLA_BRPORT_LEARNING: 832 err = switchdev_port_br_setflag(dev, attr, 833 BR_LEARNING); 834 break; 835 case IFLA_BRPORT_LEARNING_SYNC: 836 err = switchdev_port_br_setflag(dev, attr, 837 BR_LEARNING_SYNC); 838 break; 839 case IFLA_BRPORT_UNICAST_FLOOD: 840 err = switchdev_port_br_setflag(dev, attr, BR_FLOOD); 841 break; 842 default: 843 err = -EOPNOTSUPP; 844 break; 845 } 846 if (err) 847 return err; 848 } 849 850 return 0; 851 } 852 853 static int switchdev_port_br_afspec(struct net_device *dev, 854 struct nlattr *afspec, 855 int (*f)(struct net_device *dev, 856 const struct switchdev_obj *obj)) 857 { 858 struct nlattr *attr; 859 struct bridge_vlan_info *vinfo; 860 struct switchdev_obj_port_vlan vlan = { 861 .obj.orig_dev = dev, 862 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 863 }; 864 int rem; 865 int err; 866 867 nla_for_each_nested(attr, afspec, rem) { 868 if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO) 869 continue; 870 if (nla_len(attr) != sizeof(struct bridge_vlan_info)) 871 return -EINVAL; 872 vinfo = nla_data(attr); 873 if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK) 874 return -EINVAL; 875 vlan.flags = vinfo->flags; 876 if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { 877 if (vlan.vid_begin) 878 return -EINVAL; 879 vlan.vid_begin = vinfo->vid; 880 /* don't allow range of pvids */ 881 if (vlan.flags & BRIDGE_VLAN_INFO_PVID) 882 return -EINVAL; 883 } else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) { 884 if (!vlan.vid_begin) 885 return -EINVAL; 886 vlan.vid_end = vinfo->vid; 887 if (vlan.vid_end <= vlan.vid_begin) 888 return -EINVAL; 889 err = f(dev, &vlan.obj); 890 if (err) 891 return err; 892 vlan.vid_begin = 0; 893 } else { 894 if (vlan.vid_begin) 895 return -EINVAL; 896 vlan.vid_begin = vinfo->vid; 897 vlan.vid_end = vinfo->vid; 898 err = f(dev, &vlan.obj); 899 if (err) 900 return err; 901 vlan.vid_begin = 0; 902 } 903 } 904 905 return 0; 906 } 907 908 /** 909 * switchdev_port_bridge_setlink - Set bridge port attributes 910 * 911 * @dev: port device 912 * @nlh: netlink header 913 * @flags: netlink flags 914 * 915 * Called for SELF on rtnl_bridge_setlink to set bridge port 916 * attributes. 917 */ 918 int switchdev_port_bridge_setlink(struct net_device *dev, 919 struct nlmsghdr *nlh, u16 flags) 920 { 921 struct nlattr *protinfo; 922 struct nlattr *afspec; 923 int err = 0; 924 925 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), 926 IFLA_PROTINFO); 927 if (protinfo) { 928 err = switchdev_port_br_setlink_protinfo(dev, protinfo); 929 if (err) 930 return err; 931 } 932 933 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), 934 IFLA_AF_SPEC); 935 if (afspec) 936 err = switchdev_port_br_afspec(dev, afspec, 937 switchdev_port_obj_add); 938 939 return err; 940 } 941 EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink); 942 943 /** 944 * switchdev_port_bridge_dellink - Set bridge port attributes 945 * 946 * @dev: port device 947 * @nlh: netlink header 948 * @flags: netlink flags 949 * 950 * Called for SELF on rtnl_bridge_dellink to set bridge port 951 * attributes. 952 */ 953 int switchdev_port_bridge_dellink(struct net_device *dev, 954 struct nlmsghdr *nlh, u16 flags) 955 { 956 struct nlattr *afspec; 957 958 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), 959 IFLA_AF_SPEC); 960 if (afspec) 961 return switchdev_port_br_afspec(dev, afspec, 962 switchdev_port_obj_del); 963 964 return 0; 965 } 966 EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink); 967 968 /** 969 * switchdev_port_fdb_add - Add FDB (MAC/VLAN) entry to port 970 * 971 * @ndmsg: netlink hdr 972 * @nlattr: netlink attributes 973 * @dev: port device 974 * @addr: MAC address to add 975 * @vid: VLAN to add 976 * 977 * Add FDB entry to switch device. 978 */ 979 int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 980 struct net_device *dev, const unsigned char *addr, 981 u16 vid, u16 nlm_flags) 982 { 983 struct switchdev_obj_port_fdb fdb = { 984 .obj.orig_dev = dev, 985 .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB, 986 .vid = vid, 987 }; 988 989 ether_addr_copy(fdb.addr, addr); 990 return switchdev_port_obj_add(dev, &fdb.obj); 991 } 992 EXPORT_SYMBOL_GPL(switchdev_port_fdb_add); 993 994 /** 995 * switchdev_port_fdb_del - Delete FDB (MAC/VLAN) entry from port 996 * 997 * @ndmsg: netlink hdr 998 * @nlattr: netlink attributes 999 * @dev: port device 1000 * @addr: MAC address to delete 1001 * @vid: VLAN to delete 1002 * 1003 * Delete FDB entry from switch device. 1004 */ 1005 int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], 1006 struct net_device *dev, const unsigned char *addr, 1007 u16 vid) 1008 { 1009 struct switchdev_obj_port_fdb fdb = { 1010 .obj.orig_dev = dev, 1011 .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB, 1012 .vid = vid, 1013 }; 1014 1015 ether_addr_copy(fdb.addr, addr); 1016 return switchdev_port_obj_del(dev, &fdb.obj); 1017 } 1018 EXPORT_SYMBOL_GPL(switchdev_port_fdb_del); 1019 1020 struct switchdev_fdb_dump { 1021 struct switchdev_obj_port_fdb fdb; 1022 struct net_device *dev; 1023 struct sk_buff *skb; 1024 struct netlink_callback *cb; 1025 int idx; 1026 }; 1027 1028 static int switchdev_port_fdb_dump_cb(struct switchdev_obj *obj) 1029 { 1030 struct switchdev_obj_port_fdb *fdb = SWITCHDEV_OBJ_PORT_FDB(obj); 1031 struct switchdev_fdb_dump *dump = 1032 container_of(fdb, struct switchdev_fdb_dump, fdb); 1033 u32 portid = NETLINK_CB(dump->cb->skb).portid; 1034 u32 seq = dump->cb->nlh->nlmsg_seq; 1035 struct nlmsghdr *nlh; 1036 struct ndmsg *ndm; 1037 1038 if (dump->idx < dump->cb->args[0]) 1039 goto skip; 1040 1041 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, 1042 sizeof(*ndm), NLM_F_MULTI); 1043 if (!nlh) 1044 return -EMSGSIZE; 1045 1046 ndm = nlmsg_data(nlh); 1047 ndm->ndm_family = AF_BRIDGE; 1048 ndm->ndm_pad1 = 0; 1049 ndm->ndm_pad2 = 0; 1050 ndm->ndm_flags = NTF_SELF; 1051 ndm->ndm_type = 0; 1052 ndm->ndm_ifindex = dump->dev->ifindex; 1053 ndm->ndm_state = fdb->ndm_state; 1054 1055 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, fdb->addr)) 1056 goto nla_put_failure; 1057 1058 if (fdb->vid && nla_put_u16(dump->skb, NDA_VLAN, fdb->vid)) 1059 goto nla_put_failure; 1060 1061 nlmsg_end(dump->skb, nlh); 1062 1063 skip: 1064 dump->idx++; 1065 return 0; 1066 1067 nla_put_failure: 1068 nlmsg_cancel(dump->skb, nlh); 1069 return -EMSGSIZE; 1070 } 1071 1072 /** 1073 * switchdev_port_fdb_dump - Dump port FDB (MAC/VLAN) entries 1074 * 1075 * @skb: netlink skb 1076 * @cb: netlink callback 1077 * @dev: port device 1078 * @filter_dev: filter device 1079 * @idx: 1080 * 1081 * Delete FDB entry from switch device. 1082 */ 1083 int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 1084 struct net_device *dev, 1085 struct net_device *filter_dev, int idx) 1086 { 1087 struct switchdev_fdb_dump dump = { 1088 .fdb.obj.orig_dev = dev, 1089 .fdb.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB, 1090 .dev = dev, 1091 .skb = skb, 1092 .cb = cb, 1093 .idx = idx, 1094 }; 1095 1096 switchdev_port_obj_dump(dev, &dump.fdb.obj, switchdev_port_fdb_dump_cb); 1097 return dump.idx; 1098 } 1099 EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump); 1100 1101 static struct net_device *switchdev_get_lowest_dev(struct net_device *dev) 1102 { 1103 const struct switchdev_ops *ops = dev->switchdev_ops; 1104 struct net_device *lower_dev; 1105 struct net_device *port_dev; 1106 struct list_head *iter; 1107 1108 /* Recusively search down until we find a sw port dev. 1109 * (A sw port dev supports switchdev_port_attr_get). 1110 */ 1111 1112 if (ops && ops->switchdev_port_attr_get) 1113 return dev; 1114 1115 netdev_for_each_lower_dev(dev, lower_dev, iter) { 1116 port_dev = switchdev_get_lowest_dev(lower_dev); 1117 if (port_dev) 1118 return port_dev; 1119 } 1120 1121 return NULL; 1122 } 1123 1124 static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi) 1125 { 1126 struct switchdev_attr attr = { 1127 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID, 1128 }; 1129 struct switchdev_attr prev_attr; 1130 struct net_device *dev = NULL; 1131 int nhsel; 1132 1133 ASSERT_RTNL(); 1134 1135 /* For this route, all nexthop devs must be on the same switch. */ 1136 1137 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) { 1138 const struct fib_nh *nh = &fi->fib_nh[nhsel]; 1139 1140 if (!nh->nh_dev) 1141 return NULL; 1142 1143 dev = switchdev_get_lowest_dev(nh->nh_dev); 1144 if (!dev) 1145 return NULL; 1146 1147 attr.orig_dev = dev; 1148 if (switchdev_port_attr_get(dev, &attr)) 1149 return NULL; 1150 1151 if (nhsel > 0 && 1152 !netdev_phys_item_id_same(&prev_attr.u.ppid, &attr.u.ppid)) 1153 return NULL; 1154 1155 prev_attr = attr; 1156 } 1157 1158 return dev; 1159 } 1160 1161 /** 1162 * switchdev_fib_ipv4_add - Add/modify switch IPv4 route entry 1163 * 1164 * @dst: route's IPv4 destination address 1165 * @dst_len: destination address length (prefix length) 1166 * @fi: route FIB info structure 1167 * @tos: route TOS 1168 * @type: route type 1169 * @nlflags: netlink flags passed in (NLM_F_*) 1170 * @tb_id: route table ID 1171 * 1172 * Add/modify switch IPv4 route entry. 1173 */ 1174 int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, 1175 u8 tos, u8 type, u32 nlflags, u32 tb_id) 1176 { 1177 struct switchdev_obj_ipv4_fib ipv4_fib = { 1178 .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB, 1179 .dst = dst, 1180 .dst_len = dst_len, 1181 .tos = tos, 1182 .type = type, 1183 .nlflags = nlflags, 1184 .tb_id = tb_id, 1185 }; 1186 struct net_device *dev; 1187 int err = 0; 1188 1189 memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi)); 1190 1191 /* Don't offload route if using custom ip rules or if 1192 * IPv4 FIB offloading has been disabled completely. 1193 */ 1194 1195 #ifdef CONFIG_IP_MULTIPLE_TABLES 1196 if (fi->fib_net->ipv4.fib_has_custom_rules) 1197 return 0; 1198 #endif 1199 1200 if (fi->fib_net->ipv4.fib_offload_disabled) 1201 return 0; 1202 1203 dev = switchdev_get_dev_by_nhs(fi); 1204 if (!dev) 1205 return 0; 1206 1207 ipv4_fib.obj.orig_dev = dev; 1208 err = switchdev_port_obj_add(dev, &ipv4_fib.obj); 1209 if (!err) 1210 fi->fib_flags |= RTNH_F_OFFLOAD; 1211 1212 return err == -EOPNOTSUPP ? 0 : err; 1213 } 1214 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_add); 1215 1216 /** 1217 * switchdev_fib_ipv4_del - Delete IPv4 route entry from switch 1218 * 1219 * @dst: route's IPv4 destination address 1220 * @dst_len: destination address length (prefix length) 1221 * @fi: route FIB info structure 1222 * @tos: route TOS 1223 * @type: route type 1224 * @tb_id: route table ID 1225 * 1226 * Delete IPv4 route entry from switch device. 1227 */ 1228 int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, 1229 u8 tos, u8 type, u32 tb_id) 1230 { 1231 struct switchdev_obj_ipv4_fib ipv4_fib = { 1232 .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB, 1233 .dst = dst, 1234 .dst_len = dst_len, 1235 .tos = tos, 1236 .type = type, 1237 .nlflags = 0, 1238 .tb_id = tb_id, 1239 }; 1240 struct net_device *dev; 1241 int err = 0; 1242 1243 memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi)); 1244 1245 if (!(fi->fib_flags & RTNH_F_OFFLOAD)) 1246 return 0; 1247 1248 dev = switchdev_get_dev_by_nhs(fi); 1249 if (!dev) 1250 return 0; 1251 1252 ipv4_fib.obj.orig_dev = dev; 1253 err = switchdev_port_obj_del(dev, &ipv4_fib.obj); 1254 if (!err) 1255 fi->fib_flags &= ~RTNH_F_OFFLOAD; 1256 1257 return err == -EOPNOTSUPP ? 0 : err; 1258 } 1259 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_del); 1260 1261 /** 1262 * switchdev_fib_ipv4_abort - Abort an IPv4 FIB operation 1263 * 1264 * @fi: route FIB info structure 1265 */ 1266 void switchdev_fib_ipv4_abort(struct fib_info *fi) 1267 { 1268 /* There was a problem installing this route to the offload 1269 * device. For now, until we come up with more refined 1270 * policy handling, abruptly end IPv4 fib offloading for 1271 * for entire net by flushing offload device(s) of all 1272 * IPv4 routes, and mark IPv4 fib offloading broken from 1273 * this point forward. 1274 */ 1275 1276 fib_flush_external(fi->fib_net); 1277 fi->fib_net->ipv4.fib_offload_disabled = true; 1278 } 1279 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort); 1280 1281 static bool switchdev_port_same_parent_id(struct net_device *a, 1282 struct net_device *b) 1283 { 1284 struct switchdev_attr a_attr = { 1285 .orig_dev = a, 1286 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID, 1287 .flags = SWITCHDEV_F_NO_RECURSE, 1288 }; 1289 struct switchdev_attr b_attr = { 1290 .orig_dev = b, 1291 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID, 1292 .flags = SWITCHDEV_F_NO_RECURSE, 1293 }; 1294 1295 if (switchdev_port_attr_get(a, &a_attr) || 1296 switchdev_port_attr_get(b, &b_attr)) 1297 return false; 1298 1299 return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid); 1300 } 1301 1302 static u32 switchdev_port_fwd_mark_get(struct net_device *dev, 1303 struct net_device *group_dev) 1304 { 1305 struct net_device *lower_dev; 1306 struct list_head *iter; 1307 1308 netdev_for_each_lower_dev(group_dev, lower_dev, iter) { 1309 if (lower_dev == dev) 1310 continue; 1311 if (switchdev_port_same_parent_id(dev, lower_dev)) 1312 return lower_dev->offload_fwd_mark; 1313 return switchdev_port_fwd_mark_get(dev, lower_dev); 1314 } 1315 1316 return dev->ifindex; 1317 } 1318 1319 static void switchdev_port_fwd_mark_reset(struct net_device *group_dev, 1320 u32 old_mark, u32 *reset_mark) 1321 { 1322 struct net_device *lower_dev; 1323 struct list_head *iter; 1324 1325 netdev_for_each_lower_dev(group_dev, lower_dev, iter) { 1326 if (lower_dev->offload_fwd_mark == old_mark) { 1327 if (!*reset_mark) 1328 *reset_mark = lower_dev->ifindex; 1329 lower_dev->offload_fwd_mark = *reset_mark; 1330 } 1331 switchdev_port_fwd_mark_reset(lower_dev, old_mark, reset_mark); 1332 } 1333 } 1334 1335 /** 1336 * switchdev_port_fwd_mark_set - Set port offload forwarding mark 1337 * 1338 * @dev: port device 1339 * @group_dev: containing device 1340 * @joining: true if dev is joining group; false if leaving group 1341 * 1342 * An ungrouped port's offload mark is just its ifindex. A grouped 1343 * port's (member of a bridge, for example) offload mark is the ifindex 1344 * of one of the ports in the group with the same parent (switch) ID. 1345 * Ports on the same device in the same group will have the same mark. 1346 * 1347 * Example: 1348 * 1349 * br0 ifindex=9 1350 * sw1p1 ifindex=2 mark=2 1351 * sw1p2 ifindex=3 mark=2 1352 * sw2p1 ifindex=4 mark=5 1353 * sw2p2 ifindex=5 mark=5 1354 * 1355 * If sw2p2 leaves the bridge, we'll have: 1356 * 1357 * br0 ifindex=9 1358 * sw1p1 ifindex=2 mark=2 1359 * sw1p2 ifindex=3 mark=2 1360 * sw2p1 ifindex=4 mark=4 1361 * sw2p2 ifindex=5 mark=5 1362 */ 1363 void switchdev_port_fwd_mark_set(struct net_device *dev, 1364 struct net_device *group_dev, 1365 bool joining) 1366 { 1367 u32 mark = dev->ifindex; 1368 u32 reset_mark = 0; 1369 1370 if (group_dev) { 1371 ASSERT_RTNL(); 1372 if (joining) 1373 mark = switchdev_port_fwd_mark_get(dev, group_dev); 1374 else if (dev->offload_fwd_mark == mark) 1375 /* Ohoh, this port was the mark reference port, 1376 * but it's leaving the group, so reset the 1377 * mark for the remaining ports in the group. 1378 */ 1379 switchdev_port_fwd_mark_reset(group_dev, mark, 1380 &reset_mark); 1381 } 1382 1383 dev->offload_fwd_mark = mark; 1384 } 1385 EXPORT_SYMBOL_GPL(switchdev_port_fwd_mark_set); 1386