1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a single switch chip, part of a switch fabric 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include <linux/if_bridge.h> 10 #include <linux/netdevice.h> 11 #include <linux/notifier.h> 12 #include <linux/if_vlan.h> 13 #include <net/switchdev.h> 14 15 #include "dsa.h" 16 #include "netlink.h" 17 #include "port.h" 18 #include "switch.h" 19 #include "tag_8021q.h" 20 #include "trace.h" 21 #include "user.h" 22 23 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds, 24 unsigned int ageing_time) 25 { 26 struct dsa_port *dp; 27 28 dsa_switch_for_each_port(dp, ds) 29 if (dp->ageing_time && dp->ageing_time < ageing_time) 30 ageing_time = dp->ageing_time; 31 32 return ageing_time; 33 } 34 35 static int dsa_switch_ageing_time(struct dsa_switch *ds, 36 struct dsa_notifier_ageing_time_info *info) 37 { 38 unsigned int ageing_time = info->ageing_time; 39 40 if (ds->ageing_time_min && ageing_time < ds->ageing_time_min) 41 return -ERANGE; 42 43 if (ds->ageing_time_max && ageing_time > ds->ageing_time_max) 44 return -ERANGE; 45 46 /* Program the fastest ageing time in case of multiple bridges */ 47 ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time); 48 49 if (ds->ops->set_ageing_time) 50 return ds->ops->set_ageing_time(ds, ageing_time); 51 52 return 0; 53 } 54 55 static bool dsa_port_mtu_match(struct dsa_port *dp, 56 struct dsa_notifier_mtu_info *info) 57 { 58 return dp == info->dp || dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp); 59 } 60 61 static int dsa_switch_mtu(struct dsa_switch *ds, 62 struct dsa_notifier_mtu_info *info) 63 { 64 struct dsa_port *dp; 65 int ret; 66 67 if (!ds->ops->port_change_mtu) 68 return -EOPNOTSUPP; 69 70 dsa_switch_for_each_port(dp, ds) { 71 if (dsa_port_mtu_match(dp, info)) { 72 ret = ds->ops->port_change_mtu(ds, dp->index, 73 info->mtu); 74 if (ret) 75 return ret; 76 } 77 } 78 79 return 0; 80 } 81 82 static int dsa_switch_bridge_join(struct dsa_switch *ds, 83 struct dsa_notifier_bridge_info *info) 84 { 85 int err; 86 87 if (info->dp->ds == ds) { 88 if (!ds->ops->port_bridge_join) 89 return -EOPNOTSUPP; 90 91 err = ds->ops->port_bridge_join(ds, info->dp->index, 92 info->bridge, 93 &info->tx_fwd_offload, 94 info->extack); 95 if (err) 96 return err; 97 } 98 99 if (info->dp->ds != ds && ds->ops->crosschip_bridge_join) { 100 err = ds->ops->crosschip_bridge_join(ds, 101 info->dp->ds->dst->index, 102 info->dp->ds->index, 103 info->dp->index, 104 info->bridge, 105 info->extack); 106 if (err) 107 return err; 108 } 109 110 return 0; 111 } 112 113 static int dsa_switch_bridge_leave(struct dsa_switch *ds, 114 struct dsa_notifier_bridge_info *info) 115 { 116 if (info->dp->ds == ds && ds->ops->port_bridge_leave) 117 ds->ops->port_bridge_leave(ds, info->dp->index, info->bridge); 118 119 if (info->dp->ds != ds && ds->ops->crosschip_bridge_leave) 120 ds->ops->crosschip_bridge_leave(ds, info->dp->ds->dst->index, 121 info->dp->ds->index, 122 info->dp->index, 123 info->bridge); 124 125 return 0; 126 } 127 128 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing 129 * DSA links) that sit between the targeted port on which the notifier was 130 * emitted and its dedicated CPU port. 131 */ 132 static bool dsa_port_host_address_match(struct dsa_port *dp, 133 const struct dsa_port *targeted_dp) 134 { 135 struct dsa_port *cpu_dp = targeted_dp->cpu_dp; 136 137 if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds)) 138 return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index, 139 cpu_dp->index); 140 141 return false; 142 } 143 144 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list, 145 const unsigned char *addr, u16 vid, 146 struct dsa_db db) 147 { 148 struct dsa_mac_addr *a; 149 150 list_for_each_entry(a, addr_list, list) 151 if (ether_addr_equal(a->addr, addr) && a->vid == vid && 152 dsa_db_equal(&a->db, &db)) 153 return a; 154 155 return NULL; 156 } 157 158 static int dsa_port_do_mdb_add(struct dsa_port *dp, 159 const struct switchdev_obj_port_mdb *mdb, 160 struct dsa_db db) 161 { 162 struct dsa_switch *ds = dp->ds; 163 struct dsa_mac_addr *a; 164 int port = dp->index; 165 int err = 0; 166 167 /* No need to bother with refcounting for user ports */ 168 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) { 169 err = ds->ops->port_mdb_add(ds, port, mdb, db); 170 trace_dsa_mdb_add_hw(dp, mdb->addr, mdb->vid, &db, err); 171 172 return err; 173 } 174 175 mutex_lock(&dp->addr_lists_lock); 176 177 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db); 178 if (a) { 179 refcount_inc(&a->refcount); 180 trace_dsa_mdb_add_bump(dp, mdb->addr, mdb->vid, &db, 181 &a->refcount); 182 goto out; 183 } 184 185 a = kzalloc(sizeof(*a), GFP_KERNEL); 186 if (!a) { 187 err = -ENOMEM; 188 goto out; 189 } 190 191 err = ds->ops->port_mdb_add(ds, port, mdb, db); 192 trace_dsa_mdb_add_hw(dp, mdb->addr, mdb->vid, &db, err); 193 if (err) { 194 kfree(a); 195 goto out; 196 } 197 198 ether_addr_copy(a->addr, mdb->addr); 199 a->vid = mdb->vid; 200 a->db = db; 201 refcount_set(&a->refcount, 1); 202 list_add_tail(&a->list, &dp->mdbs); 203 204 out: 205 mutex_unlock(&dp->addr_lists_lock); 206 207 return err; 208 } 209 210 static int dsa_port_do_mdb_del(struct dsa_port *dp, 211 const struct switchdev_obj_port_mdb *mdb, 212 struct dsa_db db) 213 { 214 struct dsa_switch *ds = dp->ds; 215 struct dsa_mac_addr *a; 216 int port = dp->index; 217 int err = 0; 218 219 /* No need to bother with refcounting for user ports */ 220 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) { 221 err = ds->ops->port_mdb_del(ds, port, mdb, db); 222 trace_dsa_mdb_del_hw(dp, mdb->addr, mdb->vid, &db, err); 223 224 return err; 225 } 226 227 mutex_lock(&dp->addr_lists_lock); 228 229 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db); 230 if (!a) { 231 trace_dsa_mdb_del_not_found(dp, mdb->addr, mdb->vid, &db); 232 err = -ENOENT; 233 goto out; 234 } 235 236 if (!refcount_dec_and_test(&a->refcount)) { 237 trace_dsa_mdb_del_drop(dp, mdb->addr, mdb->vid, &db, 238 &a->refcount); 239 goto out; 240 } 241 242 err = ds->ops->port_mdb_del(ds, port, mdb, db); 243 trace_dsa_mdb_del_hw(dp, mdb->addr, mdb->vid, &db, err); 244 if (err) { 245 refcount_set(&a->refcount, 1); 246 goto out; 247 } 248 249 list_del(&a->list); 250 kfree(a); 251 252 out: 253 mutex_unlock(&dp->addr_lists_lock); 254 255 return err; 256 } 257 258 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr, 259 u16 vid, struct dsa_db db) 260 { 261 struct dsa_switch *ds = dp->ds; 262 struct dsa_mac_addr *a; 263 int port = dp->index; 264 int err = 0; 265 266 /* No need to bother with refcounting for user ports */ 267 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) { 268 err = ds->ops->port_fdb_add(ds, port, addr, vid, db); 269 trace_dsa_fdb_add_hw(dp, addr, vid, &db, err); 270 271 return err; 272 } 273 274 mutex_lock(&dp->addr_lists_lock); 275 276 a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db); 277 if (a) { 278 refcount_inc(&a->refcount); 279 trace_dsa_fdb_add_bump(dp, addr, vid, &db, &a->refcount); 280 goto out; 281 } 282 283 a = kzalloc(sizeof(*a), GFP_KERNEL); 284 if (!a) { 285 err = -ENOMEM; 286 goto out; 287 } 288 289 err = ds->ops->port_fdb_add(ds, port, addr, vid, db); 290 trace_dsa_fdb_add_hw(dp, addr, vid, &db, err); 291 if (err) { 292 kfree(a); 293 goto out; 294 } 295 296 ether_addr_copy(a->addr, addr); 297 a->vid = vid; 298 a->db = db; 299 refcount_set(&a->refcount, 1); 300 list_add_tail(&a->list, &dp->fdbs); 301 302 out: 303 mutex_unlock(&dp->addr_lists_lock); 304 305 return err; 306 } 307 308 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr, 309 u16 vid, struct dsa_db db) 310 { 311 struct dsa_switch *ds = dp->ds; 312 struct dsa_mac_addr *a; 313 int port = dp->index; 314 int err = 0; 315 316 /* No need to bother with refcounting for user ports */ 317 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) { 318 err = ds->ops->port_fdb_del(ds, port, addr, vid, db); 319 trace_dsa_fdb_del_hw(dp, addr, vid, &db, err); 320 321 return err; 322 } 323 324 mutex_lock(&dp->addr_lists_lock); 325 326 a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db); 327 if (!a) { 328 trace_dsa_fdb_del_not_found(dp, addr, vid, &db); 329 err = -ENOENT; 330 goto out; 331 } 332 333 if (!refcount_dec_and_test(&a->refcount)) { 334 trace_dsa_fdb_del_drop(dp, addr, vid, &db, &a->refcount); 335 goto out; 336 } 337 338 err = ds->ops->port_fdb_del(ds, port, addr, vid, db); 339 trace_dsa_fdb_del_hw(dp, addr, vid, &db, err); 340 if (err) { 341 refcount_set(&a->refcount, 1); 342 goto out; 343 } 344 345 list_del(&a->list); 346 kfree(a); 347 348 out: 349 mutex_unlock(&dp->addr_lists_lock); 350 351 return err; 352 } 353 354 static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag, 355 const unsigned char *addr, u16 vid, 356 struct dsa_db db) 357 { 358 struct dsa_mac_addr *a; 359 int err = 0; 360 361 mutex_lock(&lag->fdb_lock); 362 363 a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db); 364 if (a) { 365 refcount_inc(&a->refcount); 366 trace_dsa_lag_fdb_add_bump(lag->dev, addr, vid, &db, 367 &a->refcount); 368 goto out; 369 } 370 371 a = kzalloc(sizeof(*a), GFP_KERNEL); 372 if (!a) { 373 err = -ENOMEM; 374 goto out; 375 } 376 377 err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db); 378 trace_dsa_lag_fdb_add_hw(lag->dev, addr, vid, &db, err); 379 if (err) { 380 kfree(a); 381 goto out; 382 } 383 384 ether_addr_copy(a->addr, addr); 385 a->vid = vid; 386 a->db = db; 387 refcount_set(&a->refcount, 1); 388 list_add_tail(&a->list, &lag->fdbs); 389 390 out: 391 mutex_unlock(&lag->fdb_lock); 392 393 return err; 394 } 395 396 static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag, 397 const unsigned char *addr, u16 vid, 398 struct dsa_db db) 399 { 400 struct dsa_mac_addr *a; 401 int err = 0; 402 403 mutex_lock(&lag->fdb_lock); 404 405 a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db); 406 if (!a) { 407 trace_dsa_lag_fdb_del_not_found(lag->dev, addr, vid, &db); 408 err = -ENOENT; 409 goto out; 410 } 411 412 if (!refcount_dec_and_test(&a->refcount)) { 413 trace_dsa_lag_fdb_del_drop(lag->dev, addr, vid, &db, 414 &a->refcount); 415 goto out; 416 } 417 418 err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db); 419 trace_dsa_lag_fdb_del_hw(lag->dev, addr, vid, &db, err); 420 if (err) { 421 refcount_set(&a->refcount, 1); 422 goto out; 423 } 424 425 list_del(&a->list); 426 kfree(a); 427 428 out: 429 mutex_unlock(&lag->fdb_lock); 430 431 return err; 432 } 433 434 static int dsa_switch_host_fdb_add(struct dsa_switch *ds, 435 struct dsa_notifier_fdb_info *info) 436 { 437 struct dsa_port *dp; 438 int err = 0; 439 440 if (!ds->ops->port_fdb_add) 441 return -EOPNOTSUPP; 442 443 dsa_switch_for_each_port(dp, ds) { 444 if (dsa_port_host_address_match(dp, info->dp)) { 445 if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) { 446 err = dsa_switch_do_lag_fdb_add(ds, dp->lag, 447 info->addr, 448 info->vid, 449 info->db); 450 } else { 451 err = dsa_port_do_fdb_add(dp, info->addr, 452 info->vid, info->db); 453 } 454 if (err) 455 break; 456 } 457 } 458 459 return err; 460 } 461 462 static int dsa_switch_host_fdb_del(struct dsa_switch *ds, 463 struct dsa_notifier_fdb_info *info) 464 { 465 struct dsa_port *dp; 466 int err = 0; 467 468 if (!ds->ops->port_fdb_del) 469 return -EOPNOTSUPP; 470 471 dsa_switch_for_each_port(dp, ds) { 472 if (dsa_port_host_address_match(dp, info->dp)) { 473 if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) { 474 err = dsa_switch_do_lag_fdb_del(ds, dp->lag, 475 info->addr, 476 info->vid, 477 info->db); 478 } else { 479 err = dsa_port_do_fdb_del(dp, info->addr, 480 info->vid, info->db); 481 } 482 if (err) 483 break; 484 } 485 } 486 487 return err; 488 } 489 490 static int dsa_switch_fdb_add(struct dsa_switch *ds, 491 struct dsa_notifier_fdb_info *info) 492 { 493 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index); 494 struct dsa_port *dp = dsa_to_port(ds, port); 495 496 if (!ds->ops->port_fdb_add) 497 return -EOPNOTSUPP; 498 499 return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db); 500 } 501 502 static int dsa_switch_fdb_del(struct dsa_switch *ds, 503 struct dsa_notifier_fdb_info *info) 504 { 505 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index); 506 struct dsa_port *dp = dsa_to_port(ds, port); 507 508 if (!ds->ops->port_fdb_del) 509 return -EOPNOTSUPP; 510 511 return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db); 512 } 513 514 static int dsa_switch_lag_fdb_add(struct dsa_switch *ds, 515 struct dsa_notifier_lag_fdb_info *info) 516 { 517 struct dsa_port *dp; 518 519 if (!ds->ops->lag_fdb_add) 520 return -EOPNOTSUPP; 521 522 /* Notify switch only if it has a port in this LAG */ 523 dsa_switch_for_each_port(dp, ds) 524 if (dsa_port_offloads_lag(dp, info->lag)) 525 return dsa_switch_do_lag_fdb_add(ds, info->lag, 526 info->addr, info->vid, 527 info->db); 528 529 return 0; 530 } 531 532 static int dsa_switch_lag_fdb_del(struct dsa_switch *ds, 533 struct dsa_notifier_lag_fdb_info *info) 534 { 535 struct dsa_port *dp; 536 537 if (!ds->ops->lag_fdb_del) 538 return -EOPNOTSUPP; 539 540 /* Notify switch only if it has a port in this LAG */ 541 dsa_switch_for_each_port(dp, ds) 542 if (dsa_port_offloads_lag(dp, info->lag)) 543 return dsa_switch_do_lag_fdb_del(ds, info->lag, 544 info->addr, info->vid, 545 info->db); 546 547 return 0; 548 } 549 550 static int dsa_switch_lag_change(struct dsa_switch *ds, 551 struct dsa_notifier_lag_info *info) 552 { 553 if (info->dp->ds == ds && ds->ops->port_lag_change) 554 return ds->ops->port_lag_change(ds, info->dp->index); 555 556 if (info->dp->ds != ds && ds->ops->crosschip_lag_change) 557 return ds->ops->crosschip_lag_change(ds, info->dp->ds->index, 558 info->dp->index); 559 560 return 0; 561 } 562 563 static int dsa_switch_lag_join(struct dsa_switch *ds, 564 struct dsa_notifier_lag_info *info) 565 { 566 if (info->dp->ds == ds && ds->ops->port_lag_join) 567 return ds->ops->port_lag_join(ds, info->dp->index, info->lag, 568 info->info, info->extack); 569 570 if (info->dp->ds != ds && ds->ops->crosschip_lag_join) 571 return ds->ops->crosschip_lag_join(ds, info->dp->ds->index, 572 info->dp->index, info->lag, 573 info->info, info->extack); 574 575 return -EOPNOTSUPP; 576 } 577 578 static int dsa_switch_lag_leave(struct dsa_switch *ds, 579 struct dsa_notifier_lag_info *info) 580 { 581 if (info->dp->ds == ds && ds->ops->port_lag_leave) 582 return ds->ops->port_lag_leave(ds, info->dp->index, info->lag); 583 584 if (info->dp->ds != ds && ds->ops->crosschip_lag_leave) 585 return ds->ops->crosschip_lag_leave(ds, info->dp->ds->index, 586 info->dp->index, info->lag); 587 588 return -EOPNOTSUPP; 589 } 590 591 static int dsa_switch_mdb_add(struct dsa_switch *ds, 592 struct dsa_notifier_mdb_info *info) 593 { 594 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index); 595 struct dsa_port *dp = dsa_to_port(ds, port); 596 597 if (!ds->ops->port_mdb_add) 598 return -EOPNOTSUPP; 599 600 return dsa_port_do_mdb_add(dp, info->mdb, info->db); 601 } 602 603 static int dsa_switch_mdb_del(struct dsa_switch *ds, 604 struct dsa_notifier_mdb_info *info) 605 { 606 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index); 607 struct dsa_port *dp = dsa_to_port(ds, port); 608 609 if (!ds->ops->port_mdb_del) 610 return -EOPNOTSUPP; 611 612 return dsa_port_do_mdb_del(dp, info->mdb, info->db); 613 } 614 615 static int dsa_switch_host_mdb_add(struct dsa_switch *ds, 616 struct dsa_notifier_mdb_info *info) 617 { 618 struct dsa_port *dp; 619 int err = 0; 620 621 if (!ds->ops->port_mdb_add) 622 return -EOPNOTSUPP; 623 624 dsa_switch_for_each_port(dp, ds) { 625 if (dsa_port_host_address_match(dp, info->dp)) { 626 err = dsa_port_do_mdb_add(dp, info->mdb, info->db); 627 if (err) 628 break; 629 } 630 } 631 632 return err; 633 } 634 635 static int dsa_switch_host_mdb_del(struct dsa_switch *ds, 636 struct dsa_notifier_mdb_info *info) 637 { 638 struct dsa_port *dp; 639 int err = 0; 640 641 if (!ds->ops->port_mdb_del) 642 return -EOPNOTSUPP; 643 644 dsa_switch_for_each_port(dp, ds) { 645 if (dsa_port_host_address_match(dp, info->dp)) { 646 err = dsa_port_do_mdb_del(dp, info->mdb, info->db); 647 if (err) 648 break; 649 } 650 } 651 652 return err; 653 } 654 655 /* Port VLANs match on the targeted port and on all DSA ports */ 656 static bool dsa_port_vlan_match(struct dsa_port *dp, 657 struct dsa_notifier_vlan_info *info) 658 { 659 return dsa_port_is_dsa(dp) || dp == info->dp; 660 } 661 662 /* Host VLANs match on the targeted port's CPU port, and on all DSA ports 663 * (upstream and downstream) of that switch and its upstream switches. 664 */ 665 static bool dsa_port_host_vlan_match(struct dsa_port *dp, 666 const struct dsa_port *targeted_dp) 667 { 668 struct dsa_port *cpu_dp = targeted_dp->cpu_dp; 669 670 if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds)) 671 return dsa_port_is_dsa(dp) || dp == cpu_dp; 672 673 return false; 674 } 675 676 struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list, 677 const struct switchdev_obj_port_vlan *vlan) 678 { 679 struct dsa_vlan *v; 680 681 list_for_each_entry(v, vlan_list, list) 682 if (v->vid == vlan->vid) 683 return v; 684 685 return NULL; 686 } 687 688 static int dsa_port_do_vlan_add(struct dsa_port *dp, 689 const struct switchdev_obj_port_vlan *vlan, 690 struct netlink_ext_ack *extack) 691 { 692 struct dsa_switch *ds = dp->ds; 693 int port = dp->index; 694 struct dsa_vlan *v; 695 int err = 0; 696 697 /* No need to bother with refcounting for user ports. */ 698 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) { 699 err = ds->ops->port_vlan_add(ds, port, vlan, extack); 700 trace_dsa_vlan_add_hw(dp, vlan, err); 701 702 return err; 703 } 704 705 /* No need to propagate on shared ports the existing VLANs that were 706 * re-notified after just the flags have changed. This would cause a 707 * refcount bump which we need to avoid, since it unbalances the 708 * additions with the deletions. 709 */ 710 if (vlan->changed) 711 return 0; 712 713 mutex_lock(&dp->vlans_lock); 714 715 v = dsa_vlan_find(&dp->vlans, vlan); 716 if (v) { 717 refcount_inc(&v->refcount); 718 trace_dsa_vlan_add_bump(dp, vlan, &v->refcount); 719 goto out; 720 } 721 722 v = kzalloc(sizeof(*v), GFP_KERNEL); 723 if (!v) { 724 err = -ENOMEM; 725 goto out; 726 } 727 728 err = ds->ops->port_vlan_add(ds, port, vlan, extack); 729 trace_dsa_vlan_add_hw(dp, vlan, err); 730 if (err) { 731 kfree(v); 732 goto out; 733 } 734 735 v->vid = vlan->vid; 736 refcount_set(&v->refcount, 1); 737 list_add_tail(&v->list, &dp->vlans); 738 739 out: 740 mutex_unlock(&dp->vlans_lock); 741 742 return err; 743 } 744 745 static int dsa_port_do_vlan_del(struct dsa_port *dp, 746 const struct switchdev_obj_port_vlan *vlan) 747 { 748 struct dsa_switch *ds = dp->ds; 749 int port = dp->index; 750 struct dsa_vlan *v; 751 int err = 0; 752 753 /* No need to bother with refcounting for user ports */ 754 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) { 755 err = ds->ops->port_vlan_del(ds, port, vlan); 756 trace_dsa_vlan_del_hw(dp, vlan, err); 757 758 return err; 759 } 760 761 mutex_lock(&dp->vlans_lock); 762 763 v = dsa_vlan_find(&dp->vlans, vlan); 764 if (!v) { 765 trace_dsa_vlan_del_not_found(dp, vlan); 766 err = -ENOENT; 767 goto out; 768 } 769 770 if (!refcount_dec_and_test(&v->refcount)) { 771 trace_dsa_vlan_del_drop(dp, vlan, &v->refcount); 772 goto out; 773 } 774 775 err = ds->ops->port_vlan_del(ds, port, vlan); 776 trace_dsa_vlan_del_hw(dp, vlan, err); 777 if (err) { 778 refcount_set(&v->refcount, 1); 779 goto out; 780 } 781 782 list_del(&v->list); 783 kfree(v); 784 785 out: 786 mutex_unlock(&dp->vlans_lock); 787 788 return err; 789 } 790 791 static int dsa_switch_vlan_add(struct dsa_switch *ds, 792 struct dsa_notifier_vlan_info *info) 793 { 794 struct dsa_port *dp; 795 int err; 796 797 if (!ds->ops->port_vlan_add) 798 return -EOPNOTSUPP; 799 800 dsa_switch_for_each_port(dp, ds) { 801 if (dsa_port_vlan_match(dp, info)) { 802 err = dsa_port_do_vlan_add(dp, info->vlan, 803 info->extack); 804 if (err) 805 return err; 806 } 807 } 808 809 return 0; 810 } 811 812 static int dsa_switch_vlan_del(struct dsa_switch *ds, 813 struct dsa_notifier_vlan_info *info) 814 { 815 struct dsa_port *dp; 816 int err; 817 818 if (!ds->ops->port_vlan_del) 819 return -EOPNOTSUPP; 820 821 dsa_switch_for_each_port(dp, ds) { 822 if (dsa_port_vlan_match(dp, info)) { 823 err = dsa_port_do_vlan_del(dp, info->vlan); 824 if (err) 825 return err; 826 } 827 } 828 829 return 0; 830 } 831 832 static int dsa_switch_host_vlan_add(struct dsa_switch *ds, 833 struct dsa_notifier_vlan_info *info) 834 { 835 struct dsa_port *dp; 836 int err; 837 838 if (!ds->ops->port_vlan_add) 839 return -EOPNOTSUPP; 840 841 dsa_switch_for_each_port(dp, ds) { 842 if (dsa_port_host_vlan_match(dp, info->dp)) { 843 err = dsa_port_do_vlan_add(dp, info->vlan, 844 info->extack); 845 if (err) 846 return err; 847 } 848 } 849 850 return 0; 851 } 852 853 static int dsa_switch_host_vlan_del(struct dsa_switch *ds, 854 struct dsa_notifier_vlan_info *info) 855 { 856 struct dsa_port *dp; 857 int err; 858 859 if (!ds->ops->port_vlan_del) 860 return -EOPNOTSUPP; 861 862 dsa_switch_for_each_port(dp, ds) { 863 if (dsa_port_host_vlan_match(dp, info->dp)) { 864 err = dsa_port_do_vlan_del(dp, info->vlan); 865 if (err) 866 return err; 867 } 868 } 869 870 return 0; 871 } 872 873 static int dsa_switch_change_tag_proto(struct dsa_switch *ds, 874 struct dsa_notifier_tag_proto_info *info) 875 { 876 const struct dsa_device_ops *tag_ops = info->tag_ops; 877 struct dsa_port *dp, *cpu_dp; 878 int err; 879 880 if (!ds->ops->change_tag_protocol) 881 return -EOPNOTSUPP; 882 883 ASSERT_RTNL(); 884 885 err = ds->ops->change_tag_protocol(ds, tag_ops->proto); 886 if (err) 887 return err; 888 889 dsa_switch_for_each_cpu_port(cpu_dp, ds) 890 dsa_port_set_tag_protocol(cpu_dp, tag_ops); 891 892 /* Now that changing the tag protocol can no longer fail, let's update 893 * the remaining bits which are "duplicated for faster access", and the 894 * bits that depend on the tagger, such as the MTU. 895 */ 896 dsa_switch_for_each_user_port(dp, ds) { 897 struct net_device *user = dp->user; 898 899 dsa_user_setup_tagger(user); 900 901 /* rtnl_mutex is held in dsa_tree_change_tag_proto */ 902 dsa_user_change_mtu(user, user->mtu); 903 } 904 905 return 0; 906 } 907 908 /* We use the same cross-chip notifiers to inform both the tagger side, as well 909 * as the switch side, of connection and disconnection events. 910 * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the 911 * switch side doesn't support connecting to this tagger, and therefore, the 912 * fact that we don't disconnect the tagger side doesn't constitute a memory 913 * leak: the tagger will still operate with persistent per-switch memory, just 914 * with the switch side unconnected to it. What does constitute a hard error is 915 * when the switch side supports connecting but fails. 916 */ 917 static int 918 dsa_switch_connect_tag_proto(struct dsa_switch *ds, 919 struct dsa_notifier_tag_proto_info *info) 920 { 921 const struct dsa_device_ops *tag_ops = info->tag_ops; 922 int err; 923 924 /* Notify the new tagger about the connection to this switch */ 925 if (tag_ops->connect) { 926 err = tag_ops->connect(ds); 927 if (err) 928 return err; 929 } 930 931 if (!ds->ops->connect_tag_protocol) 932 return -EOPNOTSUPP; 933 934 /* Notify the switch about the connection to the new tagger */ 935 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); 936 if (err) { 937 /* Revert the new tagger's connection to this tree */ 938 if (tag_ops->disconnect) 939 tag_ops->disconnect(ds); 940 return err; 941 } 942 943 return 0; 944 } 945 946 static int 947 dsa_switch_disconnect_tag_proto(struct dsa_switch *ds, 948 struct dsa_notifier_tag_proto_info *info) 949 { 950 const struct dsa_device_ops *tag_ops = info->tag_ops; 951 952 /* Notify the tagger about the disconnection from this switch */ 953 if (tag_ops->disconnect && ds->tagger_data) 954 tag_ops->disconnect(ds); 955 956 /* No need to notify the switch, since it shouldn't have any 957 * resources to tear down 958 */ 959 return 0; 960 } 961 962 static int 963 dsa_switch_conduit_state_change(struct dsa_switch *ds, 964 struct dsa_notifier_conduit_state_info *info) 965 { 966 if (!ds->ops->conduit_state_change) 967 return 0; 968 969 ds->ops->conduit_state_change(ds, info->conduit, info->operational); 970 971 return 0; 972 } 973 974 static int dsa_switch_event(struct notifier_block *nb, 975 unsigned long event, void *info) 976 { 977 struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb); 978 int err; 979 980 switch (event) { 981 case DSA_NOTIFIER_AGEING_TIME: 982 err = dsa_switch_ageing_time(ds, info); 983 break; 984 case DSA_NOTIFIER_BRIDGE_JOIN: 985 err = dsa_switch_bridge_join(ds, info); 986 break; 987 case DSA_NOTIFIER_BRIDGE_LEAVE: 988 err = dsa_switch_bridge_leave(ds, info); 989 break; 990 case DSA_NOTIFIER_FDB_ADD: 991 err = dsa_switch_fdb_add(ds, info); 992 break; 993 case DSA_NOTIFIER_FDB_DEL: 994 err = dsa_switch_fdb_del(ds, info); 995 break; 996 case DSA_NOTIFIER_HOST_FDB_ADD: 997 err = dsa_switch_host_fdb_add(ds, info); 998 break; 999 case DSA_NOTIFIER_HOST_FDB_DEL: 1000 err = dsa_switch_host_fdb_del(ds, info); 1001 break; 1002 case DSA_NOTIFIER_LAG_FDB_ADD: 1003 err = dsa_switch_lag_fdb_add(ds, info); 1004 break; 1005 case DSA_NOTIFIER_LAG_FDB_DEL: 1006 err = dsa_switch_lag_fdb_del(ds, info); 1007 break; 1008 case DSA_NOTIFIER_LAG_CHANGE: 1009 err = dsa_switch_lag_change(ds, info); 1010 break; 1011 case DSA_NOTIFIER_LAG_JOIN: 1012 err = dsa_switch_lag_join(ds, info); 1013 break; 1014 case DSA_NOTIFIER_LAG_LEAVE: 1015 err = dsa_switch_lag_leave(ds, info); 1016 break; 1017 case DSA_NOTIFIER_MDB_ADD: 1018 err = dsa_switch_mdb_add(ds, info); 1019 break; 1020 case DSA_NOTIFIER_MDB_DEL: 1021 err = dsa_switch_mdb_del(ds, info); 1022 break; 1023 case DSA_NOTIFIER_HOST_MDB_ADD: 1024 err = dsa_switch_host_mdb_add(ds, info); 1025 break; 1026 case DSA_NOTIFIER_HOST_MDB_DEL: 1027 err = dsa_switch_host_mdb_del(ds, info); 1028 break; 1029 case DSA_NOTIFIER_VLAN_ADD: 1030 err = dsa_switch_vlan_add(ds, info); 1031 break; 1032 case DSA_NOTIFIER_VLAN_DEL: 1033 err = dsa_switch_vlan_del(ds, info); 1034 break; 1035 case DSA_NOTIFIER_HOST_VLAN_ADD: 1036 err = dsa_switch_host_vlan_add(ds, info); 1037 break; 1038 case DSA_NOTIFIER_HOST_VLAN_DEL: 1039 err = dsa_switch_host_vlan_del(ds, info); 1040 break; 1041 case DSA_NOTIFIER_MTU: 1042 err = dsa_switch_mtu(ds, info); 1043 break; 1044 case DSA_NOTIFIER_TAG_PROTO: 1045 err = dsa_switch_change_tag_proto(ds, info); 1046 break; 1047 case DSA_NOTIFIER_TAG_PROTO_CONNECT: 1048 err = dsa_switch_connect_tag_proto(ds, info); 1049 break; 1050 case DSA_NOTIFIER_TAG_PROTO_DISCONNECT: 1051 err = dsa_switch_disconnect_tag_proto(ds, info); 1052 break; 1053 case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD: 1054 err = dsa_switch_tag_8021q_vlan_add(ds, info); 1055 break; 1056 case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL: 1057 err = dsa_switch_tag_8021q_vlan_del(ds, info); 1058 break; 1059 case DSA_NOTIFIER_CONDUIT_STATE_CHANGE: 1060 err = dsa_switch_conduit_state_change(ds, info); 1061 break; 1062 default: 1063 err = -EOPNOTSUPP; 1064 break; 1065 } 1066 1067 if (err) 1068 dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n", 1069 event, err); 1070 1071 return notifier_from_errno(err); 1072 } 1073 1074 /** 1075 * dsa_tree_notify - Execute code for all switches in a DSA switch tree. 1076 * @dst: collection of struct dsa_switch devices to notify. 1077 * @e: event, must be of type DSA_NOTIFIER_* 1078 * @v: event-specific value. 1079 * 1080 * Given a struct dsa_switch_tree, this can be used to run a function once for 1081 * each member DSA switch. The other alternative of traversing the tree is only 1082 * through its ports list, which does not uniquely list the switches. 1083 */ 1084 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v) 1085 { 1086 struct raw_notifier_head *nh = &dst->nh; 1087 int err; 1088 1089 err = raw_notifier_call_chain(nh, e, v); 1090 1091 return notifier_to_errno(err); 1092 } 1093 1094 /** 1095 * dsa_broadcast - Notify all DSA trees in the system. 1096 * @e: event, must be of type DSA_NOTIFIER_* 1097 * @v: event-specific value. 1098 * 1099 * Can be used to notify the switching fabric of events such as cross-chip 1100 * bridging between disjoint trees (such as islands of tagger-compatible 1101 * switches bridged by an incompatible middle switch). 1102 * 1103 * WARNING: this function is not reliable during probe time, because probing 1104 * between trees is asynchronous and not all DSA trees might have probed. 1105 */ 1106 int dsa_broadcast(unsigned long e, void *v) 1107 { 1108 struct dsa_switch_tree *dst; 1109 int err = 0; 1110 1111 list_for_each_entry(dst, &dsa_tree_list, list) { 1112 err = dsa_tree_notify(dst, e, v); 1113 if (err) 1114 break; 1115 } 1116 1117 return err; 1118 } 1119 1120 int dsa_switch_register_notifier(struct dsa_switch *ds) 1121 { 1122 ds->nb.notifier_call = dsa_switch_event; 1123 1124 return raw_notifier_chain_register(&ds->dst->nh, &ds->nb); 1125 } 1126 1127 void dsa_switch_unregister_notifier(struct dsa_switch *ds) 1128 { 1129 int err; 1130 1131 err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb); 1132 if (err) 1133 dev_err(ds->dev, "failed to unregister notifier (%d)\n", err); 1134 } 1135