1 /* 2 * net/switchdev/switchdev.c - Switch device API 3 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us> 4 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/types.h> 14 #include <linux/init.h> 15 #include <linux/mutex.h> 16 #include <linux/notifier.h> 17 #include <linux/netdevice.h> 18 #include <linux/etherdevice.h> 19 #include <linux/if_bridge.h> 20 #include <linux/list.h> 21 #include <linux/workqueue.h> 22 #include <linux/if_vlan.h> 23 #include <net/ip_fib.h> 24 #include <net/switchdev.h> 25 26 /** 27 * switchdev_trans_item_enqueue - Enqueue data item to transaction queue 28 * 29 * @trans: transaction 30 * @data: pointer to data being queued 31 * @destructor: data destructor 32 * @tritem: transaction item being queued 33 * 34 * Enqeueue data item to transaction queue. tritem is typically placed in 35 * cointainter pointed at by data pointer. Destructor is called on 36 * transaction abort and after successful commit phase in case 37 * the caller did not dequeue the item before. 38 */ 39 void switchdev_trans_item_enqueue(struct switchdev_trans *trans, 40 void *data, void (*destructor)(void const *), 41 struct switchdev_trans_item *tritem) 42 { 43 tritem->data = data; 44 tritem->destructor = destructor; 45 list_add_tail(&tritem->list, &trans->item_list); 46 } 47 EXPORT_SYMBOL_GPL(switchdev_trans_item_enqueue); 48 49 static struct switchdev_trans_item * 50 __switchdev_trans_item_dequeue(struct switchdev_trans *trans) 51 { 52 struct switchdev_trans_item *tritem; 53 54 if (list_empty(&trans->item_list)) 55 return NULL; 56 tritem = list_first_entry(&trans->item_list, 57 struct switchdev_trans_item, list); 58 list_del(&tritem->list); 59 return tritem; 60 } 61 62 /** 63 * switchdev_trans_item_dequeue - Dequeue data item from transaction queue 64 * 65 * @trans: transaction 66 */ 67 void *switchdev_trans_item_dequeue(struct switchdev_trans *trans) 68 { 69 struct switchdev_trans_item *tritem; 70 71 tritem = __switchdev_trans_item_dequeue(trans); 72 BUG_ON(!tritem); 73 return tritem->data; 74 } 75 EXPORT_SYMBOL_GPL(switchdev_trans_item_dequeue); 76 77 static void switchdev_trans_init(struct switchdev_trans *trans) 78 { 79 INIT_LIST_HEAD(&trans->item_list); 80 } 81 82 static void switchdev_trans_items_destroy(struct switchdev_trans *trans) 83 { 84 struct switchdev_trans_item *tritem; 85 86 while ((tritem = __switchdev_trans_item_dequeue(trans))) 87 tritem->destructor(tritem->data); 88 } 89 90 static void switchdev_trans_items_warn_destroy(struct net_device *dev, 91 struct switchdev_trans *trans) 92 { 93 WARN(!list_empty(&trans->item_list), "%s: transaction item queue is not empty.\n", 94 dev->name); 95 switchdev_trans_items_destroy(trans); 96 } 97 98 static LIST_HEAD(deferred); 99 static DEFINE_SPINLOCK(deferred_lock); 100 101 typedef void switchdev_deferred_func_t(struct net_device *dev, 102 const void *data); 103 104 struct switchdev_deferred_item { 105 struct list_head list; 106 struct net_device *dev; 107 switchdev_deferred_func_t *func; 108 unsigned long data[0]; 109 }; 110 111 static struct switchdev_deferred_item *switchdev_deferred_dequeue(void) 112 { 113 struct switchdev_deferred_item *dfitem; 114 115 spin_lock_bh(&deferred_lock); 116 if (list_empty(&deferred)) { 117 dfitem = NULL; 118 goto unlock; 119 } 120 dfitem = list_first_entry(&deferred, 121 struct switchdev_deferred_item, list); 122 list_del(&dfitem->list); 123 unlock: 124 spin_unlock_bh(&deferred_lock); 125 return dfitem; 126 } 127 128 /** 129 * switchdev_deferred_process - Process ops in deferred queue 130 * 131 * Called to flush the ops currently queued in deferred ops queue. 132 * rtnl_lock must be held. 133 */ 134 void switchdev_deferred_process(void) 135 { 136 struct switchdev_deferred_item *dfitem; 137 138 ASSERT_RTNL(); 139 140 while ((dfitem = switchdev_deferred_dequeue())) { 141 dfitem->func(dfitem->dev, dfitem->data); 142 dev_put(dfitem->dev); 143 kfree(dfitem); 144 } 145 } 146 EXPORT_SYMBOL_GPL(switchdev_deferred_process); 147 148 static void switchdev_deferred_process_work(struct work_struct *work) 149 { 150 rtnl_lock(); 151 switchdev_deferred_process(); 152 rtnl_unlock(); 153 } 154 155 static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work); 156 157 static int switchdev_deferred_enqueue(struct net_device *dev, 158 const void *data, size_t data_len, 159 switchdev_deferred_func_t *func) 160 { 161 struct switchdev_deferred_item *dfitem; 162 163 dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC); 164 if (!dfitem) 165 return -ENOMEM; 166 dfitem->dev = dev; 167 dfitem->func = func; 168 memcpy(dfitem->data, data, data_len); 169 dev_hold(dev); 170 spin_lock_bh(&deferred_lock); 171 list_add_tail(&dfitem->list, &deferred); 172 spin_unlock_bh(&deferred_lock); 173 schedule_work(&deferred_process_work); 174 return 0; 175 } 176 177 /** 178 * switchdev_port_attr_get - Get port attribute 179 * 180 * @dev: port device 181 * @attr: attribute to get 182 */ 183 int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr) 184 { 185 const struct switchdev_ops *ops = dev->switchdev_ops; 186 struct net_device *lower_dev; 187 struct list_head *iter; 188 struct switchdev_attr first = { 189 .id = SWITCHDEV_ATTR_ID_UNDEFINED 190 }; 191 int err = -EOPNOTSUPP; 192 193 if (ops && ops->switchdev_port_attr_get) 194 return ops->switchdev_port_attr_get(dev, attr); 195 196 if (attr->flags & SWITCHDEV_F_NO_RECURSE) 197 return err; 198 199 /* Switch device port(s) may be stacked under 200 * bond/team/vlan dev, so recurse down to get attr on 201 * each port. Return -ENODATA if attr values don't 202 * compare across ports. 203 */ 204 205 netdev_for_each_lower_dev(dev, lower_dev, iter) { 206 err = switchdev_port_attr_get(lower_dev, attr); 207 if (err) 208 break; 209 if (first.id == SWITCHDEV_ATTR_ID_UNDEFINED) 210 first = *attr; 211 else if (memcmp(&first, attr, sizeof(*attr))) 212 return -ENODATA; 213 } 214 215 return err; 216 } 217 EXPORT_SYMBOL_GPL(switchdev_port_attr_get); 218 219 static int __switchdev_port_attr_set(struct net_device *dev, 220 const struct switchdev_attr *attr, 221 struct switchdev_trans *trans) 222 { 223 const struct switchdev_ops *ops = dev->switchdev_ops; 224 struct net_device *lower_dev; 225 struct list_head *iter; 226 int err = -EOPNOTSUPP; 227 228 if (ops && ops->switchdev_port_attr_set) { 229 err = ops->switchdev_port_attr_set(dev, attr, trans); 230 goto done; 231 } 232 233 if (attr->flags & SWITCHDEV_F_NO_RECURSE) 234 goto done; 235 236 /* Switch device port(s) may be stacked under 237 * bond/team/vlan dev, so recurse down to set attr on 238 * each port. 239 */ 240 241 netdev_for_each_lower_dev(dev, lower_dev, iter) { 242 err = __switchdev_port_attr_set(lower_dev, attr, trans); 243 if (err) 244 break; 245 } 246 247 done: 248 if (err == -EOPNOTSUPP && attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP) 249 err = 0; 250 251 return err; 252 } 253 254 static int switchdev_port_attr_set_now(struct net_device *dev, 255 const struct switchdev_attr *attr) 256 { 257 struct switchdev_trans trans; 258 int err; 259 260 switchdev_trans_init(&trans); 261 262 /* Phase I: prepare for attr set. Driver/device should fail 263 * here if there are going to be issues in the commit phase, 264 * such as lack of resources or support. The driver/device 265 * should reserve resources needed for the commit phase here, 266 * but should not commit the attr. 267 */ 268 269 trans.ph_prepare = true; 270 err = __switchdev_port_attr_set(dev, attr, &trans); 271 if (err) { 272 /* Prepare phase failed: abort the transaction. Any 273 * resources reserved in the prepare phase are 274 * released. 275 */ 276 277 if (err != -EOPNOTSUPP) 278 switchdev_trans_items_destroy(&trans); 279 280 return err; 281 } 282 283 /* Phase II: commit attr set. This cannot fail as a fault 284 * of driver/device. If it does, it's a bug in the driver/device 285 * because the driver said everythings was OK in phase I. 286 */ 287 288 trans.ph_prepare = false; 289 err = __switchdev_port_attr_set(dev, attr, &trans); 290 WARN(err, "%s: Commit of attribute (id=%d) failed.\n", 291 dev->name, attr->id); 292 switchdev_trans_items_warn_destroy(dev, &trans); 293 294 return err; 295 } 296 297 static void switchdev_port_attr_set_deferred(struct net_device *dev, 298 const void *data) 299 { 300 const struct switchdev_attr *attr = data; 301 int err; 302 303 err = switchdev_port_attr_set_now(dev, attr); 304 if (err && err != -EOPNOTSUPP) 305 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n", 306 err, attr->id); 307 } 308 309 static int switchdev_port_attr_set_defer(struct net_device *dev, 310 const struct switchdev_attr *attr) 311 { 312 return switchdev_deferred_enqueue(dev, attr, sizeof(*attr), 313 switchdev_port_attr_set_deferred); 314 } 315 316 /** 317 * switchdev_port_attr_set - Set port attribute 318 * 319 * @dev: port device 320 * @attr: attribute to set 321 * 322 * Use a 2-phase prepare-commit transaction model to ensure 323 * system is not left in a partially updated state due to 324 * failure from driver/device. 325 * 326 * rtnl_lock must be held and must not be in atomic section, 327 * in case SWITCHDEV_F_DEFER flag is not set. 328 */ 329 int switchdev_port_attr_set(struct net_device *dev, 330 const struct switchdev_attr *attr) 331 { 332 if (attr->flags & SWITCHDEV_F_DEFER) 333 return switchdev_port_attr_set_defer(dev, attr); 334 ASSERT_RTNL(); 335 return switchdev_port_attr_set_now(dev, attr); 336 } 337 EXPORT_SYMBOL_GPL(switchdev_port_attr_set); 338 339 static size_t switchdev_obj_size(const struct switchdev_obj *obj) 340 { 341 switch (obj->id) { 342 case SWITCHDEV_OBJ_ID_PORT_VLAN: 343 return sizeof(struct switchdev_obj_port_vlan); 344 case SWITCHDEV_OBJ_ID_IPV4_FIB: 345 return sizeof(struct switchdev_obj_ipv4_fib); 346 case SWITCHDEV_OBJ_ID_PORT_FDB: 347 return sizeof(struct switchdev_obj_port_fdb); 348 default: 349 BUG(); 350 } 351 return 0; 352 } 353 354 static int __switchdev_port_obj_add(struct net_device *dev, 355 const struct switchdev_obj *obj, 356 struct switchdev_trans *trans) 357 { 358 const struct switchdev_ops *ops = dev->switchdev_ops; 359 struct net_device *lower_dev; 360 struct list_head *iter; 361 int err = -EOPNOTSUPP; 362 363 if (ops && ops->switchdev_port_obj_add) 364 return ops->switchdev_port_obj_add(dev, obj, trans); 365 366 /* Switch device port(s) may be stacked under 367 * bond/team/vlan dev, so recurse down to add object on 368 * each port. 369 */ 370 371 netdev_for_each_lower_dev(dev, lower_dev, iter) { 372 err = __switchdev_port_obj_add(lower_dev, obj, trans); 373 if (err) 374 break; 375 } 376 377 return err; 378 } 379 380 static int switchdev_port_obj_add_now(struct net_device *dev, 381 const struct switchdev_obj *obj) 382 { 383 struct switchdev_trans trans; 384 int err; 385 386 ASSERT_RTNL(); 387 388 switchdev_trans_init(&trans); 389 390 /* Phase I: prepare for obj add. Driver/device should fail 391 * here if there are going to be issues in the commit phase, 392 * such as lack of resources or support. The driver/device 393 * should reserve resources needed for the commit phase here, 394 * but should not commit the obj. 395 */ 396 397 trans.ph_prepare = true; 398 err = __switchdev_port_obj_add(dev, obj, &trans); 399 if (err) { 400 /* Prepare phase failed: abort the transaction. Any 401 * resources reserved in the prepare phase are 402 * released. 403 */ 404 405 if (err != -EOPNOTSUPP) 406 switchdev_trans_items_destroy(&trans); 407 408 return err; 409 } 410 411 /* Phase II: commit obj add. This cannot fail as a fault 412 * of driver/device. If it does, it's a bug in the driver/device 413 * because the driver said everythings was OK in phase I. 414 */ 415 416 trans.ph_prepare = false; 417 err = __switchdev_port_obj_add(dev, obj, &trans); 418 WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id); 419 switchdev_trans_items_warn_destroy(dev, &trans); 420 421 return err; 422 } 423 424 static void switchdev_port_obj_add_deferred(struct net_device *dev, 425 const void *data) 426 { 427 const struct switchdev_obj *obj = data; 428 int err; 429 430 err = switchdev_port_obj_add_now(dev, obj); 431 if (err && err != -EOPNOTSUPP) 432 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n", 433 err, obj->id); 434 } 435 436 static int switchdev_port_obj_add_defer(struct net_device *dev, 437 const struct switchdev_obj *obj) 438 { 439 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj), 440 switchdev_port_obj_add_deferred); 441 } 442 443 /** 444 * switchdev_port_obj_add - Add port object 445 * 446 * @dev: port device 447 * @id: object ID 448 * @obj: object to add 449 * 450 * Use a 2-phase prepare-commit transaction model to ensure 451 * system is not left in a partially updated state due to 452 * failure from driver/device. 453 * 454 * rtnl_lock must be held and must not be in atomic section, 455 * in case SWITCHDEV_F_DEFER flag is not set. 456 */ 457 int switchdev_port_obj_add(struct net_device *dev, 458 const struct switchdev_obj *obj) 459 { 460 if (obj->flags & SWITCHDEV_F_DEFER) 461 return switchdev_port_obj_add_defer(dev, obj); 462 ASSERT_RTNL(); 463 return switchdev_port_obj_add_now(dev, obj); 464 } 465 EXPORT_SYMBOL_GPL(switchdev_port_obj_add); 466 467 static int switchdev_port_obj_del_now(struct net_device *dev, 468 const struct switchdev_obj *obj) 469 { 470 const struct switchdev_ops *ops = dev->switchdev_ops; 471 struct net_device *lower_dev; 472 struct list_head *iter; 473 int err = -EOPNOTSUPP; 474 475 if (ops && ops->switchdev_port_obj_del) 476 return ops->switchdev_port_obj_del(dev, obj); 477 478 /* Switch device port(s) may be stacked under 479 * bond/team/vlan dev, so recurse down to delete object on 480 * each port. 481 */ 482 483 netdev_for_each_lower_dev(dev, lower_dev, iter) { 484 err = switchdev_port_obj_del_now(lower_dev, obj); 485 if (err) 486 break; 487 } 488 489 return err; 490 } 491 492 static void switchdev_port_obj_del_deferred(struct net_device *dev, 493 const void *data) 494 { 495 const struct switchdev_obj *obj = data; 496 int err; 497 498 err = switchdev_port_obj_del_now(dev, obj); 499 if (err && err != -EOPNOTSUPP) 500 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n", 501 err, obj->id); 502 } 503 504 static int switchdev_port_obj_del_defer(struct net_device *dev, 505 const struct switchdev_obj *obj) 506 { 507 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj), 508 switchdev_port_obj_del_deferred); 509 } 510 511 /** 512 * switchdev_port_obj_del - Delete port object 513 * 514 * @dev: port device 515 * @id: object ID 516 * @obj: object to delete 517 * 518 * rtnl_lock must be held and must not be in atomic section, 519 * in case SWITCHDEV_F_DEFER flag is not set. 520 */ 521 int switchdev_port_obj_del(struct net_device *dev, 522 const struct switchdev_obj *obj) 523 { 524 if (obj->flags & SWITCHDEV_F_DEFER) 525 return switchdev_port_obj_del_defer(dev, obj); 526 ASSERT_RTNL(); 527 return switchdev_port_obj_del_now(dev, obj); 528 } 529 EXPORT_SYMBOL_GPL(switchdev_port_obj_del); 530 531 /** 532 * switchdev_port_obj_dump - Dump port objects 533 * 534 * @dev: port device 535 * @id: object ID 536 * @obj: object to dump 537 * @cb: function to call with a filled object 538 * 539 * rtnl_lock must be held. 540 */ 541 int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj, 542 switchdev_obj_dump_cb_t *cb) 543 { 544 const struct switchdev_ops *ops = dev->switchdev_ops; 545 struct net_device *lower_dev; 546 struct list_head *iter; 547 int err = -EOPNOTSUPP; 548 549 ASSERT_RTNL(); 550 551 if (ops && ops->switchdev_port_obj_dump) 552 return ops->switchdev_port_obj_dump(dev, obj, cb); 553 554 /* Switch device port(s) may be stacked under 555 * bond/team/vlan dev, so recurse down to dump objects on 556 * first port at bottom of stack. 557 */ 558 559 netdev_for_each_lower_dev(dev, lower_dev, iter) { 560 err = switchdev_port_obj_dump(lower_dev, obj, cb); 561 break; 562 } 563 564 return err; 565 } 566 EXPORT_SYMBOL_GPL(switchdev_port_obj_dump); 567 568 static DEFINE_MUTEX(switchdev_mutex); 569 static RAW_NOTIFIER_HEAD(switchdev_notif_chain); 570 571 /** 572 * register_switchdev_notifier - Register notifier 573 * @nb: notifier_block 574 * 575 * Register switch device notifier. This should be used by code 576 * which needs to monitor events happening in particular device. 577 * Return values are same as for atomic_notifier_chain_register(). 578 */ 579 int register_switchdev_notifier(struct notifier_block *nb) 580 { 581 int err; 582 583 mutex_lock(&switchdev_mutex); 584 err = raw_notifier_chain_register(&switchdev_notif_chain, nb); 585 mutex_unlock(&switchdev_mutex); 586 return err; 587 } 588 EXPORT_SYMBOL_GPL(register_switchdev_notifier); 589 590 /** 591 * unregister_switchdev_notifier - Unregister notifier 592 * @nb: notifier_block 593 * 594 * Unregister switch device notifier. 595 * Return values are same as for atomic_notifier_chain_unregister(). 596 */ 597 int unregister_switchdev_notifier(struct notifier_block *nb) 598 { 599 int err; 600 601 mutex_lock(&switchdev_mutex); 602 err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb); 603 mutex_unlock(&switchdev_mutex); 604 return err; 605 } 606 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier); 607 608 /** 609 * call_switchdev_notifiers - Call notifiers 610 * @val: value passed unmodified to notifier function 611 * @dev: port device 612 * @info: notifier information data 613 * 614 * Call all network notifier blocks. This should be called by driver 615 * when it needs to propagate hardware event. 616 * Return values are same as for atomic_notifier_call_chain(). 617 */ 618 int call_switchdev_notifiers(unsigned long val, struct net_device *dev, 619 struct switchdev_notifier_info *info) 620 { 621 int err; 622 623 info->dev = dev; 624 mutex_lock(&switchdev_mutex); 625 err = raw_notifier_call_chain(&switchdev_notif_chain, val, info); 626 mutex_unlock(&switchdev_mutex); 627 return err; 628 } 629 EXPORT_SYMBOL_GPL(call_switchdev_notifiers); 630 631 struct switchdev_vlan_dump { 632 struct switchdev_obj_port_vlan vlan; 633 struct sk_buff *skb; 634 u32 filter_mask; 635 u16 flags; 636 u16 begin; 637 u16 end; 638 }; 639 640 static int switchdev_port_vlan_dump_put(struct switchdev_vlan_dump *dump) 641 { 642 struct bridge_vlan_info vinfo; 643 644 vinfo.flags = dump->flags; 645 646 if (dump->begin == 0 && dump->end == 0) { 647 return 0; 648 } else if (dump->begin == dump->end) { 649 vinfo.vid = dump->begin; 650 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO, 651 sizeof(vinfo), &vinfo)) 652 return -EMSGSIZE; 653 } else { 654 vinfo.vid = dump->begin; 655 vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN; 656 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO, 657 sizeof(vinfo), &vinfo)) 658 return -EMSGSIZE; 659 vinfo.vid = dump->end; 660 vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN; 661 vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_END; 662 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO, 663 sizeof(vinfo), &vinfo)) 664 return -EMSGSIZE; 665 } 666 667 return 0; 668 } 669 670 static int switchdev_port_vlan_dump_cb(struct switchdev_obj *obj) 671 { 672 struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 673 struct switchdev_vlan_dump *dump = 674 container_of(vlan, struct switchdev_vlan_dump, vlan); 675 int err = 0; 676 677 if (vlan->vid_begin > vlan->vid_end) 678 return -EINVAL; 679 680 if (dump->filter_mask & RTEXT_FILTER_BRVLAN) { 681 dump->flags = vlan->flags; 682 for (dump->begin = dump->end = vlan->vid_begin; 683 dump->begin <= vlan->vid_end; 684 dump->begin++, dump->end++) { 685 err = switchdev_port_vlan_dump_put(dump); 686 if (err) 687 return err; 688 } 689 } else if (dump->filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) { 690 if (dump->begin > vlan->vid_begin && 691 dump->begin >= vlan->vid_end) { 692 if ((dump->begin - 1) == vlan->vid_end && 693 dump->flags == vlan->flags) { 694 /* prepend */ 695 dump->begin = vlan->vid_begin; 696 } else { 697 err = switchdev_port_vlan_dump_put(dump); 698 dump->flags = vlan->flags; 699 dump->begin = vlan->vid_begin; 700 dump->end = vlan->vid_end; 701 } 702 } else if (dump->end <= vlan->vid_begin && 703 dump->end < vlan->vid_end) { 704 if ((dump->end + 1) == vlan->vid_begin && 705 dump->flags == vlan->flags) { 706 /* append */ 707 dump->end = vlan->vid_end; 708 } else { 709 err = switchdev_port_vlan_dump_put(dump); 710 dump->flags = vlan->flags; 711 dump->begin = vlan->vid_begin; 712 dump->end = vlan->vid_end; 713 } 714 } else { 715 err = -EINVAL; 716 } 717 } 718 719 return err; 720 } 721 722 static int switchdev_port_vlan_fill(struct sk_buff *skb, struct net_device *dev, 723 u32 filter_mask) 724 { 725 struct switchdev_vlan_dump dump = { 726 .vlan.obj.orig_dev = dev, 727 .vlan.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 728 .skb = skb, 729 .filter_mask = filter_mask, 730 }; 731 int err = 0; 732 733 if ((filter_mask & RTEXT_FILTER_BRVLAN) || 734 (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) { 735 err = switchdev_port_obj_dump(dev, &dump.vlan.obj, 736 switchdev_port_vlan_dump_cb); 737 if (err) 738 goto err_out; 739 if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) 740 /* last one */ 741 err = switchdev_port_vlan_dump_put(&dump); 742 } 743 744 err_out: 745 return err == -EOPNOTSUPP ? 0 : err; 746 } 747 748 /** 749 * switchdev_port_bridge_getlink - Get bridge port attributes 750 * 751 * @dev: port device 752 * 753 * Called for SELF on rtnl_bridge_getlink to get bridge port 754 * attributes. 755 */ 756 int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 757 struct net_device *dev, u32 filter_mask, 758 int nlflags) 759 { 760 struct switchdev_attr attr = { 761 .orig_dev = dev, 762 .id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS, 763 }; 764 u16 mode = BRIDGE_MODE_UNDEF; 765 u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD; 766 int err; 767 768 err = switchdev_port_attr_get(dev, &attr); 769 if (err && err != -EOPNOTSUPP) 770 return err; 771 772 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 773 attr.u.brport_flags, mask, nlflags, 774 filter_mask, switchdev_port_vlan_fill); 775 } 776 EXPORT_SYMBOL_GPL(switchdev_port_bridge_getlink); 777 778 static int switchdev_port_br_setflag(struct net_device *dev, 779 struct nlattr *nlattr, 780 unsigned long brport_flag) 781 { 782 struct switchdev_attr attr = { 783 .orig_dev = dev, 784 .id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS, 785 }; 786 u8 flag = nla_get_u8(nlattr); 787 int err; 788 789 err = switchdev_port_attr_get(dev, &attr); 790 if (err) 791 return err; 792 793 if (flag) 794 attr.u.brport_flags |= brport_flag; 795 else 796 attr.u.brport_flags &= ~brport_flag; 797 798 return switchdev_port_attr_set(dev, &attr); 799 } 800 801 static const struct nla_policy 802 switchdev_port_bridge_policy[IFLA_BRPORT_MAX + 1] = { 803 [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, 804 [IFLA_BRPORT_COST] = { .type = NLA_U32 }, 805 [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, 806 [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, 807 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, 808 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, 809 [IFLA_BRPORT_FAST_LEAVE] = { .type = NLA_U8 }, 810 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, 811 [IFLA_BRPORT_LEARNING_SYNC] = { .type = NLA_U8 }, 812 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, 813 }; 814 815 static int switchdev_port_br_setlink_protinfo(struct net_device *dev, 816 struct nlattr *protinfo) 817 { 818 struct nlattr *attr; 819 int rem; 820 int err; 821 822 err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX, 823 switchdev_port_bridge_policy); 824 if (err) 825 return err; 826 827 nla_for_each_nested(attr, protinfo, rem) { 828 switch (nla_type(attr)) { 829 case IFLA_BRPORT_LEARNING: 830 err = switchdev_port_br_setflag(dev, attr, 831 BR_LEARNING); 832 break; 833 case IFLA_BRPORT_LEARNING_SYNC: 834 err = switchdev_port_br_setflag(dev, attr, 835 BR_LEARNING_SYNC); 836 break; 837 case IFLA_BRPORT_UNICAST_FLOOD: 838 err = switchdev_port_br_setflag(dev, attr, BR_FLOOD); 839 break; 840 default: 841 err = -EOPNOTSUPP; 842 break; 843 } 844 if (err) 845 return err; 846 } 847 848 return 0; 849 } 850 851 static int switchdev_port_br_afspec(struct net_device *dev, 852 struct nlattr *afspec, 853 int (*f)(struct net_device *dev, 854 const struct switchdev_obj *obj)) 855 { 856 struct nlattr *attr; 857 struct bridge_vlan_info *vinfo; 858 struct switchdev_obj_port_vlan vlan = { 859 .obj.orig_dev = dev, 860 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 861 }; 862 int rem; 863 int err; 864 865 nla_for_each_nested(attr, afspec, rem) { 866 if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO) 867 continue; 868 if (nla_len(attr) != sizeof(struct bridge_vlan_info)) 869 return -EINVAL; 870 vinfo = nla_data(attr); 871 if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK) 872 return -EINVAL; 873 vlan.flags = vinfo->flags; 874 if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { 875 if (vlan.vid_begin) 876 return -EINVAL; 877 vlan.vid_begin = vinfo->vid; 878 /* don't allow range of pvids */ 879 if (vlan.flags & BRIDGE_VLAN_INFO_PVID) 880 return -EINVAL; 881 } else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) { 882 if (!vlan.vid_begin) 883 return -EINVAL; 884 vlan.vid_end = vinfo->vid; 885 if (vlan.vid_end <= vlan.vid_begin) 886 return -EINVAL; 887 err = f(dev, &vlan.obj); 888 if (err) 889 return err; 890 vlan.vid_begin = 0; 891 } else { 892 if (vlan.vid_begin) 893 return -EINVAL; 894 vlan.vid_begin = vinfo->vid; 895 vlan.vid_end = vinfo->vid; 896 err = f(dev, &vlan.obj); 897 if (err) 898 return err; 899 vlan.vid_begin = 0; 900 } 901 } 902 903 return 0; 904 } 905 906 /** 907 * switchdev_port_bridge_setlink - Set bridge port attributes 908 * 909 * @dev: port device 910 * @nlh: netlink header 911 * @flags: netlink flags 912 * 913 * Called for SELF on rtnl_bridge_setlink to set bridge port 914 * attributes. 915 */ 916 int switchdev_port_bridge_setlink(struct net_device *dev, 917 struct nlmsghdr *nlh, u16 flags) 918 { 919 struct nlattr *protinfo; 920 struct nlattr *afspec; 921 int err = 0; 922 923 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), 924 IFLA_PROTINFO); 925 if (protinfo) { 926 err = switchdev_port_br_setlink_protinfo(dev, protinfo); 927 if (err) 928 return err; 929 } 930 931 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), 932 IFLA_AF_SPEC); 933 if (afspec) 934 err = switchdev_port_br_afspec(dev, afspec, 935 switchdev_port_obj_add); 936 937 return err; 938 } 939 EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink); 940 941 /** 942 * switchdev_port_bridge_dellink - Set bridge port attributes 943 * 944 * @dev: port device 945 * @nlh: netlink header 946 * @flags: netlink flags 947 * 948 * Called for SELF on rtnl_bridge_dellink to set bridge port 949 * attributes. 950 */ 951 int switchdev_port_bridge_dellink(struct net_device *dev, 952 struct nlmsghdr *nlh, u16 flags) 953 { 954 struct nlattr *afspec; 955 956 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), 957 IFLA_AF_SPEC); 958 if (afspec) 959 return switchdev_port_br_afspec(dev, afspec, 960 switchdev_port_obj_del); 961 962 return 0; 963 } 964 EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink); 965 966 /** 967 * switchdev_port_fdb_add - Add FDB (MAC/VLAN) entry to port 968 * 969 * @ndmsg: netlink hdr 970 * @nlattr: netlink attributes 971 * @dev: port device 972 * @addr: MAC address to add 973 * @vid: VLAN to add 974 * 975 * Add FDB entry to switch device. 976 */ 977 int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 978 struct net_device *dev, const unsigned char *addr, 979 u16 vid, u16 nlm_flags) 980 { 981 struct switchdev_obj_port_fdb fdb = { 982 .obj.orig_dev = dev, 983 .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB, 984 .vid = vid, 985 }; 986 987 ether_addr_copy(fdb.addr, addr); 988 return switchdev_port_obj_add(dev, &fdb.obj); 989 } 990 EXPORT_SYMBOL_GPL(switchdev_port_fdb_add); 991 992 /** 993 * switchdev_port_fdb_del - Delete FDB (MAC/VLAN) entry from port 994 * 995 * @ndmsg: netlink hdr 996 * @nlattr: netlink attributes 997 * @dev: port device 998 * @addr: MAC address to delete 999 * @vid: VLAN to delete 1000 * 1001 * Delete FDB entry from switch device. 1002 */ 1003 int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], 1004 struct net_device *dev, const unsigned char *addr, 1005 u16 vid) 1006 { 1007 struct switchdev_obj_port_fdb fdb = { 1008 .obj.orig_dev = dev, 1009 .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB, 1010 .vid = vid, 1011 }; 1012 1013 ether_addr_copy(fdb.addr, addr); 1014 return switchdev_port_obj_del(dev, &fdb.obj); 1015 } 1016 EXPORT_SYMBOL_GPL(switchdev_port_fdb_del); 1017 1018 struct switchdev_fdb_dump { 1019 struct switchdev_obj_port_fdb fdb; 1020 struct net_device *dev; 1021 struct sk_buff *skb; 1022 struct netlink_callback *cb; 1023 int idx; 1024 }; 1025 1026 static int switchdev_port_fdb_dump_cb(struct switchdev_obj *obj) 1027 { 1028 struct switchdev_obj_port_fdb *fdb = SWITCHDEV_OBJ_PORT_FDB(obj); 1029 struct switchdev_fdb_dump *dump = 1030 container_of(fdb, struct switchdev_fdb_dump, fdb); 1031 u32 portid = NETLINK_CB(dump->cb->skb).portid; 1032 u32 seq = dump->cb->nlh->nlmsg_seq; 1033 struct nlmsghdr *nlh; 1034 struct ndmsg *ndm; 1035 1036 if (dump->idx < dump->cb->args[0]) 1037 goto skip; 1038 1039 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, 1040 sizeof(*ndm), NLM_F_MULTI); 1041 if (!nlh) 1042 return -EMSGSIZE; 1043 1044 ndm = nlmsg_data(nlh); 1045 ndm->ndm_family = AF_BRIDGE; 1046 ndm->ndm_pad1 = 0; 1047 ndm->ndm_pad2 = 0; 1048 ndm->ndm_flags = NTF_SELF; 1049 ndm->ndm_type = 0; 1050 ndm->ndm_ifindex = dump->dev->ifindex; 1051 ndm->ndm_state = fdb->ndm_state; 1052 1053 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, fdb->addr)) 1054 goto nla_put_failure; 1055 1056 if (fdb->vid && nla_put_u16(dump->skb, NDA_VLAN, fdb->vid)) 1057 goto nla_put_failure; 1058 1059 nlmsg_end(dump->skb, nlh); 1060 1061 skip: 1062 dump->idx++; 1063 return 0; 1064 1065 nla_put_failure: 1066 nlmsg_cancel(dump->skb, nlh); 1067 return -EMSGSIZE; 1068 } 1069 1070 /** 1071 * switchdev_port_fdb_dump - Dump port FDB (MAC/VLAN) entries 1072 * 1073 * @skb: netlink skb 1074 * @cb: netlink callback 1075 * @dev: port device 1076 * @filter_dev: filter device 1077 * @idx: 1078 * 1079 * Delete FDB entry from switch device. 1080 */ 1081 int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 1082 struct net_device *dev, 1083 struct net_device *filter_dev, int idx) 1084 { 1085 struct switchdev_fdb_dump dump = { 1086 .fdb.obj.orig_dev = dev, 1087 .fdb.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB, 1088 .dev = dev, 1089 .skb = skb, 1090 .cb = cb, 1091 .idx = idx, 1092 }; 1093 1094 switchdev_port_obj_dump(dev, &dump.fdb.obj, switchdev_port_fdb_dump_cb); 1095 return dump.idx; 1096 } 1097 EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump); 1098 1099 static struct net_device *switchdev_get_lowest_dev(struct net_device *dev) 1100 { 1101 const struct switchdev_ops *ops = dev->switchdev_ops; 1102 struct net_device *lower_dev; 1103 struct net_device *port_dev; 1104 struct list_head *iter; 1105 1106 /* Recusively search down until we find a sw port dev. 1107 * (A sw port dev supports switchdev_port_attr_get). 1108 */ 1109 1110 if (ops && ops->switchdev_port_attr_get) 1111 return dev; 1112 1113 netdev_for_each_lower_dev(dev, lower_dev, iter) { 1114 port_dev = switchdev_get_lowest_dev(lower_dev); 1115 if (port_dev) 1116 return port_dev; 1117 } 1118 1119 return NULL; 1120 } 1121 1122 static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi) 1123 { 1124 struct switchdev_attr attr = { 1125 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID, 1126 }; 1127 struct switchdev_attr prev_attr; 1128 struct net_device *dev = NULL; 1129 int nhsel; 1130 1131 ASSERT_RTNL(); 1132 1133 /* For this route, all nexthop devs must be on the same switch. */ 1134 1135 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) { 1136 const struct fib_nh *nh = &fi->fib_nh[nhsel]; 1137 1138 if (!nh->nh_dev) 1139 return NULL; 1140 1141 dev = switchdev_get_lowest_dev(nh->nh_dev); 1142 if (!dev) 1143 return NULL; 1144 1145 attr.orig_dev = dev; 1146 if (switchdev_port_attr_get(dev, &attr)) 1147 return NULL; 1148 1149 if (nhsel > 0 && 1150 !netdev_phys_item_id_same(&prev_attr.u.ppid, &attr.u.ppid)) 1151 return NULL; 1152 1153 prev_attr = attr; 1154 } 1155 1156 return dev; 1157 } 1158 1159 /** 1160 * switchdev_fib_ipv4_add - Add/modify switch IPv4 route entry 1161 * 1162 * @dst: route's IPv4 destination address 1163 * @dst_len: destination address length (prefix length) 1164 * @fi: route FIB info structure 1165 * @tos: route TOS 1166 * @type: route type 1167 * @nlflags: netlink flags passed in (NLM_F_*) 1168 * @tb_id: route table ID 1169 * 1170 * Add/modify switch IPv4 route entry. 1171 */ 1172 int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, 1173 u8 tos, u8 type, u32 nlflags, u32 tb_id) 1174 { 1175 struct switchdev_obj_ipv4_fib ipv4_fib = { 1176 .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB, 1177 .dst = dst, 1178 .dst_len = dst_len, 1179 .tos = tos, 1180 .type = type, 1181 .nlflags = nlflags, 1182 .tb_id = tb_id, 1183 }; 1184 struct net_device *dev; 1185 int err = 0; 1186 1187 memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi)); 1188 1189 /* Don't offload route if using custom ip rules or if 1190 * IPv4 FIB offloading has been disabled completely. 1191 */ 1192 1193 #ifdef CONFIG_IP_MULTIPLE_TABLES 1194 if (fi->fib_net->ipv4.fib_has_custom_rules) 1195 return 0; 1196 #endif 1197 1198 if (fi->fib_net->ipv4.fib_offload_disabled) 1199 return 0; 1200 1201 dev = switchdev_get_dev_by_nhs(fi); 1202 if (!dev) 1203 return 0; 1204 1205 ipv4_fib.obj.orig_dev = dev; 1206 err = switchdev_port_obj_add(dev, &ipv4_fib.obj); 1207 if (!err) 1208 fi->fib_flags |= RTNH_F_OFFLOAD; 1209 1210 return err == -EOPNOTSUPP ? 0 : err; 1211 } 1212 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_add); 1213 1214 /** 1215 * switchdev_fib_ipv4_del - Delete IPv4 route entry from switch 1216 * 1217 * @dst: route's IPv4 destination address 1218 * @dst_len: destination address length (prefix length) 1219 * @fi: route FIB info structure 1220 * @tos: route TOS 1221 * @type: route type 1222 * @tb_id: route table ID 1223 * 1224 * Delete IPv4 route entry from switch device. 1225 */ 1226 int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, 1227 u8 tos, u8 type, u32 tb_id) 1228 { 1229 struct switchdev_obj_ipv4_fib ipv4_fib = { 1230 .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB, 1231 .dst = dst, 1232 .dst_len = dst_len, 1233 .tos = tos, 1234 .type = type, 1235 .nlflags = 0, 1236 .tb_id = tb_id, 1237 }; 1238 struct net_device *dev; 1239 int err = 0; 1240 1241 memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi)); 1242 1243 if (!(fi->fib_flags & RTNH_F_OFFLOAD)) 1244 return 0; 1245 1246 dev = switchdev_get_dev_by_nhs(fi); 1247 if (!dev) 1248 return 0; 1249 1250 ipv4_fib.obj.orig_dev = dev; 1251 err = switchdev_port_obj_del(dev, &ipv4_fib.obj); 1252 if (!err) 1253 fi->fib_flags &= ~RTNH_F_OFFLOAD; 1254 1255 return err == -EOPNOTSUPP ? 0 : err; 1256 } 1257 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_del); 1258 1259 /** 1260 * switchdev_fib_ipv4_abort - Abort an IPv4 FIB operation 1261 * 1262 * @fi: route FIB info structure 1263 */ 1264 void switchdev_fib_ipv4_abort(struct fib_info *fi) 1265 { 1266 /* There was a problem installing this route to the offload 1267 * device. For now, until we come up with more refined 1268 * policy handling, abruptly end IPv4 fib offloading for 1269 * for entire net by flushing offload device(s) of all 1270 * IPv4 routes, and mark IPv4 fib offloading broken from 1271 * this point forward. 1272 */ 1273 1274 fib_flush_external(fi->fib_net); 1275 fi->fib_net->ipv4.fib_offload_disabled = true; 1276 } 1277 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort); 1278 1279 static bool switchdev_port_same_parent_id(struct net_device *a, 1280 struct net_device *b) 1281 { 1282 struct switchdev_attr a_attr = { 1283 .orig_dev = a, 1284 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID, 1285 .flags = SWITCHDEV_F_NO_RECURSE, 1286 }; 1287 struct switchdev_attr b_attr = { 1288 .orig_dev = b, 1289 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID, 1290 .flags = SWITCHDEV_F_NO_RECURSE, 1291 }; 1292 1293 if (switchdev_port_attr_get(a, &a_attr) || 1294 switchdev_port_attr_get(b, &b_attr)) 1295 return false; 1296 1297 return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid); 1298 } 1299 1300 static u32 switchdev_port_fwd_mark_get(struct net_device *dev, 1301 struct net_device *group_dev) 1302 { 1303 struct net_device *lower_dev; 1304 struct list_head *iter; 1305 1306 netdev_for_each_lower_dev(group_dev, lower_dev, iter) { 1307 if (lower_dev == dev) 1308 continue; 1309 if (switchdev_port_same_parent_id(dev, lower_dev)) 1310 return lower_dev->offload_fwd_mark; 1311 return switchdev_port_fwd_mark_get(dev, lower_dev); 1312 } 1313 1314 return dev->ifindex; 1315 } 1316 1317 static void switchdev_port_fwd_mark_reset(struct net_device *group_dev, 1318 u32 old_mark, u32 *reset_mark) 1319 { 1320 struct net_device *lower_dev; 1321 struct list_head *iter; 1322 1323 netdev_for_each_lower_dev(group_dev, lower_dev, iter) { 1324 if (lower_dev->offload_fwd_mark == old_mark) { 1325 if (!*reset_mark) 1326 *reset_mark = lower_dev->ifindex; 1327 lower_dev->offload_fwd_mark = *reset_mark; 1328 } 1329 switchdev_port_fwd_mark_reset(lower_dev, old_mark, reset_mark); 1330 } 1331 } 1332 1333 /** 1334 * switchdev_port_fwd_mark_set - Set port offload forwarding mark 1335 * 1336 * @dev: port device 1337 * @group_dev: containing device 1338 * @joining: true if dev is joining group; false if leaving group 1339 * 1340 * An ungrouped port's offload mark is just its ifindex. A grouped 1341 * port's (member of a bridge, for example) offload mark is the ifindex 1342 * of one of the ports in the group with the same parent (switch) ID. 1343 * Ports on the same device in the same group will have the same mark. 1344 * 1345 * Example: 1346 * 1347 * br0 ifindex=9 1348 * sw1p1 ifindex=2 mark=2 1349 * sw1p2 ifindex=3 mark=2 1350 * sw2p1 ifindex=4 mark=5 1351 * sw2p2 ifindex=5 mark=5 1352 * 1353 * If sw2p2 leaves the bridge, we'll have: 1354 * 1355 * br0 ifindex=9 1356 * sw1p1 ifindex=2 mark=2 1357 * sw1p2 ifindex=3 mark=2 1358 * sw2p1 ifindex=4 mark=4 1359 * sw2p2 ifindex=5 mark=5 1360 */ 1361 void switchdev_port_fwd_mark_set(struct net_device *dev, 1362 struct net_device *group_dev, 1363 bool joining) 1364 { 1365 u32 mark = dev->ifindex; 1366 u32 reset_mark = 0; 1367 1368 if (group_dev) { 1369 ASSERT_RTNL(); 1370 if (joining) 1371 mark = switchdev_port_fwd_mark_get(dev, group_dev); 1372 else if (dev->offload_fwd_mark == mark) 1373 /* Ohoh, this port was the mark reference port, 1374 * but it's leaving the group, so reset the 1375 * mark for the remaining ports in the group. 1376 */ 1377 switchdev_port_fwd_mark_reset(group_dev, mark, 1378 &reset_mark); 1379 } 1380 1381 dev->offload_fwd_mark = mark; 1382 } 1383 EXPORT_SYMBOL_GPL(switchdev_port_fwd_mark_set); 1384