1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a single switch chip, part of a switch fabric 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include <linux/if_bridge.h> 10 #include <linux/netdevice.h> 11 #include <linux/notifier.h> 12 #include <linux/if_vlan.h> 13 #include <net/switchdev.h> 14 15 #include "dsa_priv.h" 16 17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds, 18 unsigned int ageing_time) 19 { 20 struct dsa_port *dp; 21 22 dsa_switch_for_each_port(dp, ds) 23 if (dp->ageing_time && dp->ageing_time < ageing_time) 24 ageing_time = dp->ageing_time; 25 26 return ageing_time; 27 } 28 29 static int dsa_switch_ageing_time(struct dsa_switch *ds, 30 struct dsa_notifier_ageing_time_info *info) 31 { 32 unsigned int ageing_time = info->ageing_time; 33 34 if (ds->ageing_time_min && ageing_time < ds->ageing_time_min) 35 return -ERANGE; 36 37 if (ds->ageing_time_max && ageing_time > ds->ageing_time_max) 38 return -ERANGE; 39 40 /* Program the fastest ageing time in case of multiple bridges */ 41 ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time); 42 43 if (ds->ops->set_ageing_time) 44 return ds->ops->set_ageing_time(ds, ageing_time); 45 46 return 0; 47 } 48 49 static bool dsa_port_mtu_match(struct dsa_port *dp, 50 struct dsa_notifier_mtu_info *info) 51 { 52 if (dp->ds->index == info->sw_index && dp->index == info->port) 53 return true; 54 55 /* Do not propagate to other switches in the tree if the notifier was 56 * targeted for a single switch. 57 */ 58 if (info->targeted_match) 59 return false; 60 61 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) 62 return true; 63 64 return false; 65 } 66 67 static int dsa_switch_mtu(struct dsa_switch *ds, 68 struct dsa_notifier_mtu_info *info) 69 { 70 struct dsa_port *dp; 71 int ret; 72 73 if (!ds->ops->port_change_mtu) 74 return -EOPNOTSUPP; 75 76 dsa_switch_for_each_port(dp, ds) { 77 if (dsa_port_mtu_match(dp, info)) { 78 ret = ds->ops->port_change_mtu(ds, dp->index, 79 info->mtu); 80 if (ret) 81 return ret; 82 } 83 } 84 85 return 0; 86 } 87 88 static int dsa_switch_bridge_join(struct dsa_switch *ds, 89 struct dsa_notifier_bridge_info *info) 90 { 91 struct dsa_switch_tree *dst = ds->dst; 92 int err; 93 94 if (dst->index == info->tree_index && ds->index == info->sw_index) { 95 if (!ds->ops->port_bridge_join) 96 return -EOPNOTSUPP; 97 98 err = ds->ops->port_bridge_join(ds, info->port, info->br); 99 if (err) 100 return err; 101 } 102 103 if ((dst->index != info->tree_index || ds->index != info->sw_index) && 104 ds->ops->crosschip_bridge_join) { 105 err = ds->ops->crosschip_bridge_join(ds, info->tree_index, 106 info->sw_index, 107 info->port, info->br); 108 if (err) 109 return err; 110 } 111 112 return dsa_tag_8021q_bridge_join(ds, info); 113 } 114 115 static int dsa_switch_bridge_leave(struct dsa_switch *ds, 116 struct dsa_notifier_bridge_info *info) 117 { 118 struct dsa_switch_tree *dst = ds->dst; 119 struct netlink_ext_ack extack = {0}; 120 bool change_vlan_filtering = false; 121 bool vlan_filtering; 122 struct dsa_port *dp; 123 int err; 124 125 if (dst->index == info->tree_index && ds->index == info->sw_index && 126 ds->ops->port_bridge_leave) 127 ds->ops->port_bridge_leave(ds, info->port, info->br); 128 129 if ((dst->index != info->tree_index || ds->index != info->sw_index) && 130 ds->ops->crosschip_bridge_leave) 131 ds->ops->crosschip_bridge_leave(ds, info->tree_index, 132 info->sw_index, info->port, 133 info->br); 134 135 if (ds->needs_standalone_vlan_filtering && !br_vlan_enabled(info->br)) { 136 change_vlan_filtering = true; 137 vlan_filtering = true; 138 } else if (!ds->needs_standalone_vlan_filtering && 139 br_vlan_enabled(info->br)) { 140 change_vlan_filtering = true; 141 vlan_filtering = false; 142 } 143 144 /* If the bridge was vlan_filtering, the bridge core doesn't trigger an 145 * event for changing vlan_filtering setting upon slave ports leaving 146 * it. That is a good thing, because that lets us handle it and also 147 * handle the case where the switch's vlan_filtering setting is global 148 * (not per port). When that happens, the correct moment to trigger the 149 * vlan_filtering callback is only when the last port leaves the last 150 * VLAN-aware bridge. 151 */ 152 if (change_vlan_filtering && ds->vlan_filtering_is_global) { 153 dsa_switch_for_each_port(dp, ds) { 154 struct net_device *br = dsa_port_bridge_dev_get(dp); 155 156 if (br && br_vlan_enabled(br)) { 157 change_vlan_filtering = false; 158 break; 159 } 160 } 161 } 162 163 if (change_vlan_filtering) { 164 err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port), 165 vlan_filtering, &extack); 166 if (extack._msg) 167 dev_err(ds->dev, "port %d: %s\n", info->port, 168 extack._msg); 169 if (err && err != -EOPNOTSUPP) 170 return err; 171 } 172 173 return dsa_tag_8021q_bridge_leave(ds, info); 174 } 175 176 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing 177 * DSA links) that sit between the targeted port on which the notifier was 178 * emitted and its dedicated CPU port. 179 */ 180 static bool dsa_port_host_address_match(struct dsa_port *dp, 181 int info_sw_index, int info_port) 182 { 183 struct dsa_port *targeted_dp, *cpu_dp; 184 struct dsa_switch *targeted_ds; 185 186 targeted_ds = dsa_switch_find(dp->ds->dst->index, info_sw_index); 187 targeted_dp = dsa_to_port(targeted_ds, info_port); 188 cpu_dp = targeted_dp->cpu_dp; 189 190 if (dsa_switch_is_upstream_of(dp->ds, targeted_ds)) 191 return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index, 192 cpu_dp->index); 193 194 return false; 195 } 196 197 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list, 198 const unsigned char *addr, 199 u16 vid) 200 { 201 struct dsa_mac_addr *a; 202 203 list_for_each_entry(a, addr_list, list) 204 if (ether_addr_equal(a->addr, addr) && a->vid == vid) 205 return a; 206 207 return NULL; 208 } 209 210 static int dsa_port_do_mdb_add(struct dsa_port *dp, 211 const struct switchdev_obj_port_mdb *mdb) 212 { 213 struct dsa_switch *ds = dp->ds; 214 struct dsa_mac_addr *a; 215 int port = dp->index; 216 int err = 0; 217 218 /* No need to bother with refcounting for user ports */ 219 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 220 return ds->ops->port_mdb_add(ds, port, mdb); 221 222 mutex_lock(&dp->addr_lists_lock); 223 224 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid); 225 if (a) { 226 refcount_inc(&a->refcount); 227 goto out; 228 } 229 230 a = kzalloc(sizeof(*a), GFP_KERNEL); 231 if (!a) { 232 err = -ENOMEM; 233 goto out; 234 } 235 236 err = ds->ops->port_mdb_add(ds, port, mdb); 237 if (err) { 238 kfree(a); 239 goto out; 240 } 241 242 ether_addr_copy(a->addr, mdb->addr); 243 a->vid = mdb->vid; 244 refcount_set(&a->refcount, 1); 245 list_add_tail(&a->list, &dp->mdbs); 246 247 out: 248 mutex_unlock(&dp->addr_lists_lock); 249 250 return err; 251 } 252 253 static int dsa_port_do_mdb_del(struct dsa_port *dp, 254 const struct switchdev_obj_port_mdb *mdb) 255 { 256 struct dsa_switch *ds = dp->ds; 257 struct dsa_mac_addr *a; 258 int port = dp->index; 259 int err = 0; 260 261 /* No need to bother with refcounting for user ports */ 262 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 263 return ds->ops->port_mdb_del(ds, port, mdb); 264 265 mutex_lock(&dp->addr_lists_lock); 266 267 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid); 268 if (!a) { 269 err = -ENOENT; 270 goto out; 271 } 272 273 if (!refcount_dec_and_test(&a->refcount)) 274 goto out; 275 276 err = ds->ops->port_mdb_del(ds, port, mdb); 277 if (err) { 278 refcount_set(&a->refcount, 1); 279 goto out; 280 } 281 282 list_del(&a->list); 283 kfree(a); 284 285 out: 286 mutex_unlock(&dp->addr_lists_lock); 287 288 return err; 289 } 290 291 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr, 292 u16 vid) 293 { 294 struct dsa_switch *ds = dp->ds; 295 struct dsa_mac_addr *a; 296 int port = dp->index; 297 int err = 0; 298 299 /* No need to bother with refcounting for user ports */ 300 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 301 return ds->ops->port_fdb_add(ds, port, addr, vid); 302 303 mutex_lock(&dp->addr_lists_lock); 304 305 a = dsa_mac_addr_find(&dp->fdbs, addr, vid); 306 if (a) { 307 refcount_inc(&a->refcount); 308 goto out; 309 } 310 311 a = kzalloc(sizeof(*a), GFP_KERNEL); 312 if (!a) { 313 err = -ENOMEM; 314 goto out; 315 } 316 317 err = ds->ops->port_fdb_add(ds, port, addr, vid); 318 if (err) { 319 kfree(a); 320 goto out; 321 } 322 323 ether_addr_copy(a->addr, addr); 324 a->vid = vid; 325 refcount_set(&a->refcount, 1); 326 list_add_tail(&a->list, &dp->fdbs); 327 328 out: 329 mutex_unlock(&dp->addr_lists_lock); 330 331 return err; 332 } 333 334 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr, 335 u16 vid) 336 { 337 struct dsa_switch *ds = dp->ds; 338 struct dsa_mac_addr *a; 339 int port = dp->index; 340 int err = 0; 341 342 /* No need to bother with refcounting for user ports */ 343 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) 344 return ds->ops->port_fdb_del(ds, port, addr, vid); 345 346 mutex_lock(&dp->addr_lists_lock); 347 348 a = dsa_mac_addr_find(&dp->fdbs, addr, vid); 349 if (!a) { 350 err = -ENOENT; 351 goto out; 352 } 353 354 if (!refcount_dec_and_test(&a->refcount)) 355 goto out; 356 357 err = ds->ops->port_fdb_del(ds, port, addr, vid); 358 if (err) { 359 refcount_set(&a->refcount, 1); 360 goto out; 361 } 362 363 list_del(&a->list); 364 kfree(a); 365 366 out: 367 mutex_unlock(&dp->addr_lists_lock); 368 369 return err; 370 } 371 372 static int dsa_switch_host_fdb_add(struct dsa_switch *ds, 373 struct dsa_notifier_fdb_info *info) 374 { 375 struct dsa_port *dp; 376 int err = 0; 377 378 if (!ds->ops->port_fdb_add) 379 return -EOPNOTSUPP; 380 381 dsa_switch_for_each_port(dp, ds) { 382 if (dsa_port_host_address_match(dp, info->sw_index, 383 info->port)) { 384 err = dsa_port_do_fdb_add(dp, info->addr, info->vid); 385 if (err) 386 break; 387 } 388 } 389 390 return err; 391 } 392 393 static int dsa_switch_host_fdb_del(struct dsa_switch *ds, 394 struct dsa_notifier_fdb_info *info) 395 { 396 struct dsa_port *dp; 397 int err = 0; 398 399 if (!ds->ops->port_fdb_del) 400 return -EOPNOTSUPP; 401 402 dsa_switch_for_each_port(dp, ds) { 403 if (dsa_port_host_address_match(dp, info->sw_index, 404 info->port)) { 405 err = dsa_port_do_fdb_del(dp, info->addr, info->vid); 406 if (err) 407 break; 408 } 409 } 410 411 return err; 412 } 413 414 static int dsa_switch_fdb_add(struct dsa_switch *ds, 415 struct dsa_notifier_fdb_info *info) 416 { 417 int port = dsa_towards_port(ds, info->sw_index, info->port); 418 struct dsa_port *dp = dsa_to_port(ds, port); 419 420 if (!ds->ops->port_fdb_add) 421 return -EOPNOTSUPP; 422 423 return dsa_port_do_fdb_add(dp, info->addr, info->vid); 424 } 425 426 static int dsa_switch_fdb_del(struct dsa_switch *ds, 427 struct dsa_notifier_fdb_info *info) 428 { 429 int port = dsa_towards_port(ds, info->sw_index, info->port); 430 struct dsa_port *dp = dsa_to_port(ds, port); 431 432 if (!ds->ops->port_fdb_del) 433 return -EOPNOTSUPP; 434 435 return dsa_port_do_fdb_del(dp, info->addr, info->vid); 436 } 437 438 static int dsa_switch_hsr_join(struct dsa_switch *ds, 439 struct dsa_notifier_hsr_info *info) 440 { 441 if (ds->index == info->sw_index && ds->ops->port_hsr_join) 442 return ds->ops->port_hsr_join(ds, info->port, info->hsr); 443 444 return -EOPNOTSUPP; 445 } 446 447 static int dsa_switch_hsr_leave(struct dsa_switch *ds, 448 struct dsa_notifier_hsr_info *info) 449 { 450 if (ds->index == info->sw_index && ds->ops->port_hsr_leave) 451 return ds->ops->port_hsr_leave(ds, info->port, info->hsr); 452 453 return -EOPNOTSUPP; 454 } 455 456 static int dsa_switch_lag_change(struct dsa_switch *ds, 457 struct dsa_notifier_lag_info *info) 458 { 459 if (ds->index == info->sw_index && ds->ops->port_lag_change) 460 return ds->ops->port_lag_change(ds, info->port); 461 462 if (ds->index != info->sw_index && ds->ops->crosschip_lag_change) 463 return ds->ops->crosschip_lag_change(ds, info->sw_index, 464 info->port); 465 466 return 0; 467 } 468 469 static int dsa_switch_lag_join(struct dsa_switch *ds, 470 struct dsa_notifier_lag_info *info) 471 { 472 if (ds->index == info->sw_index && ds->ops->port_lag_join) 473 return ds->ops->port_lag_join(ds, info->port, info->lag, 474 info->info); 475 476 if (ds->index != info->sw_index && ds->ops->crosschip_lag_join) 477 return ds->ops->crosschip_lag_join(ds, info->sw_index, 478 info->port, info->lag, 479 info->info); 480 481 return -EOPNOTSUPP; 482 } 483 484 static int dsa_switch_lag_leave(struct dsa_switch *ds, 485 struct dsa_notifier_lag_info *info) 486 { 487 if (ds->index == info->sw_index && ds->ops->port_lag_leave) 488 return ds->ops->port_lag_leave(ds, info->port, info->lag); 489 490 if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave) 491 return ds->ops->crosschip_lag_leave(ds, info->sw_index, 492 info->port, info->lag); 493 494 return -EOPNOTSUPP; 495 } 496 497 static int dsa_switch_mdb_add(struct dsa_switch *ds, 498 struct dsa_notifier_mdb_info *info) 499 { 500 int port = dsa_towards_port(ds, info->sw_index, info->port); 501 struct dsa_port *dp = dsa_to_port(ds, port); 502 503 if (!ds->ops->port_mdb_add) 504 return -EOPNOTSUPP; 505 506 return dsa_port_do_mdb_add(dp, info->mdb); 507 } 508 509 static int dsa_switch_mdb_del(struct dsa_switch *ds, 510 struct dsa_notifier_mdb_info *info) 511 { 512 int port = dsa_towards_port(ds, info->sw_index, info->port); 513 struct dsa_port *dp = dsa_to_port(ds, port); 514 515 if (!ds->ops->port_mdb_del) 516 return -EOPNOTSUPP; 517 518 return dsa_port_do_mdb_del(dp, info->mdb); 519 } 520 521 static int dsa_switch_host_mdb_add(struct dsa_switch *ds, 522 struct dsa_notifier_mdb_info *info) 523 { 524 struct dsa_port *dp; 525 int err = 0; 526 527 if (!ds->ops->port_mdb_add) 528 return -EOPNOTSUPP; 529 530 dsa_switch_for_each_port(dp, ds) { 531 if (dsa_port_host_address_match(dp, info->sw_index, 532 info->port)) { 533 err = dsa_port_do_mdb_add(dp, info->mdb); 534 if (err) 535 break; 536 } 537 } 538 539 return err; 540 } 541 542 static int dsa_switch_host_mdb_del(struct dsa_switch *ds, 543 struct dsa_notifier_mdb_info *info) 544 { 545 struct dsa_port *dp; 546 int err = 0; 547 548 if (!ds->ops->port_mdb_del) 549 return -EOPNOTSUPP; 550 551 dsa_switch_for_each_port(dp, ds) { 552 if (dsa_port_host_address_match(dp, info->sw_index, 553 info->port)) { 554 err = dsa_port_do_mdb_del(dp, info->mdb); 555 if (err) 556 break; 557 } 558 } 559 560 return err; 561 } 562 563 static bool dsa_port_vlan_match(struct dsa_port *dp, 564 struct dsa_notifier_vlan_info *info) 565 { 566 if (dp->ds->index == info->sw_index && dp->index == info->port) 567 return true; 568 569 if (dsa_port_is_dsa(dp)) 570 return true; 571 572 return false; 573 } 574 575 static int dsa_switch_vlan_add(struct dsa_switch *ds, 576 struct dsa_notifier_vlan_info *info) 577 { 578 struct dsa_port *dp; 579 int err; 580 581 if (!ds->ops->port_vlan_add) 582 return -EOPNOTSUPP; 583 584 dsa_switch_for_each_port(dp, ds) { 585 if (dsa_port_vlan_match(dp, info)) { 586 err = ds->ops->port_vlan_add(ds, dp->index, info->vlan, 587 info->extack); 588 if (err) 589 return err; 590 } 591 } 592 593 return 0; 594 } 595 596 static int dsa_switch_vlan_del(struct dsa_switch *ds, 597 struct dsa_notifier_vlan_info *info) 598 { 599 if (!ds->ops->port_vlan_del) 600 return -EOPNOTSUPP; 601 602 if (ds->index == info->sw_index) 603 return ds->ops->port_vlan_del(ds, info->port, info->vlan); 604 605 /* Do not deprogram the DSA links as they may be used as conduit 606 * for other VLAN members in the fabric. 607 */ 608 return 0; 609 } 610 611 static int dsa_switch_change_tag_proto(struct dsa_switch *ds, 612 struct dsa_notifier_tag_proto_info *info) 613 { 614 const struct dsa_device_ops *tag_ops = info->tag_ops; 615 struct dsa_port *dp, *cpu_dp; 616 int err; 617 618 if (!ds->ops->change_tag_protocol) 619 return -EOPNOTSUPP; 620 621 ASSERT_RTNL(); 622 623 dsa_switch_for_each_cpu_port(cpu_dp, ds) { 624 err = ds->ops->change_tag_protocol(ds, cpu_dp->index, 625 tag_ops->proto); 626 if (err) 627 return err; 628 629 dsa_port_set_tag_protocol(cpu_dp, tag_ops); 630 } 631 632 /* Now that changing the tag protocol can no longer fail, let's update 633 * the remaining bits which are "duplicated for faster access", and the 634 * bits that depend on the tagger, such as the MTU. 635 */ 636 dsa_switch_for_each_user_port(dp, ds) { 637 struct net_device *slave = dp->slave; 638 639 dsa_slave_setup_tagger(slave); 640 641 /* rtnl_mutex is held in dsa_tree_change_tag_proto */ 642 dsa_slave_change_mtu(slave, slave->mtu); 643 } 644 645 return 0; 646 } 647 648 static int dsa_switch_mrp_add(struct dsa_switch *ds, 649 struct dsa_notifier_mrp_info *info) 650 { 651 if (!ds->ops->port_mrp_add) 652 return -EOPNOTSUPP; 653 654 if (ds->index == info->sw_index) 655 return ds->ops->port_mrp_add(ds, info->port, info->mrp); 656 657 return 0; 658 } 659 660 static int dsa_switch_mrp_del(struct dsa_switch *ds, 661 struct dsa_notifier_mrp_info *info) 662 { 663 if (!ds->ops->port_mrp_del) 664 return -EOPNOTSUPP; 665 666 if (ds->index == info->sw_index) 667 return ds->ops->port_mrp_del(ds, info->port, info->mrp); 668 669 return 0; 670 } 671 672 static int 673 dsa_switch_mrp_add_ring_role(struct dsa_switch *ds, 674 struct dsa_notifier_mrp_ring_role_info *info) 675 { 676 if (!ds->ops->port_mrp_add) 677 return -EOPNOTSUPP; 678 679 if (ds->index == info->sw_index) 680 return ds->ops->port_mrp_add_ring_role(ds, info->port, 681 info->mrp); 682 683 return 0; 684 } 685 686 static int 687 dsa_switch_mrp_del_ring_role(struct dsa_switch *ds, 688 struct dsa_notifier_mrp_ring_role_info *info) 689 { 690 if (!ds->ops->port_mrp_del) 691 return -EOPNOTSUPP; 692 693 if (ds->index == info->sw_index) 694 return ds->ops->port_mrp_del_ring_role(ds, info->port, 695 info->mrp); 696 697 return 0; 698 } 699 700 static int dsa_switch_event(struct notifier_block *nb, 701 unsigned long event, void *info) 702 { 703 struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb); 704 int err; 705 706 switch (event) { 707 case DSA_NOTIFIER_AGEING_TIME: 708 err = dsa_switch_ageing_time(ds, info); 709 break; 710 case DSA_NOTIFIER_BRIDGE_JOIN: 711 err = dsa_switch_bridge_join(ds, info); 712 break; 713 case DSA_NOTIFIER_BRIDGE_LEAVE: 714 err = dsa_switch_bridge_leave(ds, info); 715 break; 716 case DSA_NOTIFIER_FDB_ADD: 717 err = dsa_switch_fdb_add(ds, info); 718 break; 719 case DSA_NOTIFIER_FDB_DEL: 720 err = dsa_switch_fdb_del(ds, info); 721 break; 722 case DSA_NOTIFIER_HOST_FDB_ADD: 723 err = dsa_switch_host_fdb_add(ds, info); 724 break; 725 case DSA_NOTIFIER_HOST_FDB_DEL: 726 err = dsa_switch_host_fdb_del(ds, info); 727 break; 728 case DSA_NOTIFIER_HSR_JOIN: 729 err = dsa_switch_hsr_join(ds, info); 730 break; 731 case DSA_NOTIFIER_HSR_LEAVE: 732 err = dsa_switch_hsr_leave(ds, info); 733 break; 734 case DSA_NOTIFIER_LAG_CHANGE: 735 err = dsa_switch_lag_change(ds, info); 736 break; 737 case DSA_NOTIFIER_LAG_JOIN: 738 err = dsa_switch_lag_join(ds, info); 739 break; 740 case DSA_NOTIFIER_LAG_LEAVE: 741 err = dsa_switch_lag_leave(ds, info); 742 break; 743 case DSA_NOTIFIER_MDB_ADD: 744 err = dsa_switch_mdb_add(ds, info); 745 break; 746 case DSA_NOTIFIER_MDB_DEL: 747 err = dsa_switch_mdb_del(ds, info); 748 break; 749 case DSA_NOTIFIER_HOST_MDB_ADD: 750 err = dsa_switch_host_mdb_add(ds, info); 751 break; 752 case DSA_NOTIFIER_HOST_MDB_DEL: 753 err = dsa_switch_host_mdb_del(ds, info); 754 break; 755 case DSA_NOTIFIER_VLAN_ADD: 756 err = dsa_switch_vlan_add(ds, info); 757 break; 758 case DSA_NOTIFIER_VLAN_DEL: 759 err = dsa_switch_vlan_del(ds, info); 760 break; 761 case DSA_NOTIFIER_MTU: 762 err = dsa_switch_mtu(ds, info); 763 break; 764 case DSA_NOTIFIER_TAG_PROTO: 765 err = dsa_switch_change_tag_proto(ds, info); 766 break; 767 case DSA_NOTIFIER_MRP_ADD: 768 err = dsa_switch_mrp_add(ds, info); 769 break; 770 case DSA_NOTIFIER_MRP_DEL: 771 err = dsa_switch_mrp_del(ds, info); 772 break; 773 case DSA_NOTIFIER_MRP_ADD_RING_ROLE: 774 err = dsa_switch_mrp_add_ring_role(ds, info); 775 break; 776 case DSA_NOTIFIER_MRP_DEL_RING_ROLE: 777 err = dsa_switch_mrp_del_ring_role(ds, info); 778 break; 779 case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD: 780 err = dsa_switch_tag_8021q_vlan_add(ds, info); 781 break; 782 case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL: 783 err = dsa_switch_tag_8021q_vlan_del(ds, info); 784 break; 785 default: 786 err = -EOPNOTSUPP; 787 break; 788 } 789 790 if (err) 791 dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n", 792 event, err); 793 794 return notifier_from_errno(err); 795 } 796 797 int dsa_switch_register_notifier(struct dsa_switch *ds) 798 { 799 ds->nb.notifier_call = dsa_switch_event; 800 801 return raw_notifier_chain_register(&ds->dst->nh, &ds->nb); 802 } 803 804 void dsa_switch_unregister_notifier(struct dsa_switch *ds) 805 { 806 int err; 807 808 err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb); 809 if (err) 810 dev_err(ds->dev, "failed to unregister notifier (%d)\n", err); 811 } 812