1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) B.A.T.M.A.N. contributors: 3 * 4 * Linus Lüssing 5 */ 6 7 #include "multicast.h" 8 #include "main.h" 9 10 #include <linux/atomic.h> 11 #include <linux/bitops.h> 12 #include <linux/bug.h> 13 #include <linux/byteorder/generic.h> 14 #include <linux/errno.h> 15 #include <linux/etherdevice.h> 16 #include <linux/gfp.h> 17 #include <linux/icmpv6.h> 18 #include <linux/if_bridge.h> 19 #include <linux/if_ether.h> 20 #include <linux/igmp.h> 21 #include <linux/in.h> 22 #include <linux/in6.h> 23 #include <linux/inetdevice.h> 24 #include <linux/ip.h> 25 #include <linux/ipv6.h> 26 #include <linux/jiffies.h> 27 #include <linux/kernel.h> 28 #include <linux/kref.h> 29 #include <linux/list.h> 30 #include <linux/lockdep.h> 31 #include <linux/netdevice.h> 32 #include <linux/netlink.h> 33 #include <linux/printk.h> 34 #include <linux/rculist.h> 35 #include <linux/rcupdate.h> 36 #include <linux/skbuff.h> 37 #include <linux/slab.h> 38 #include <linux/spinlock.h> 39 #include <linux/stddef.h> 40 #include <linux/string.h> 41 #include <linux/types.h> 42 #include <linux/workqueue.h> 43 #include <net/addrconf.h> 44 #include <net/genetlink.h> 45 #include <net/if_inet6.h> 46 #include <net/ip.h> 47 #include <net/ipv6.h> 48 #include <net/netlink.h> 49 #include <net/sock.h> 50 #include <uapi/linux/batadv_packet.h> 51 #include <uapi/linux/batman_adv.h> 52 53 #include "bridge_loop_avoidance.h" 54 #include "hard-interface.h" 55 #include "hash.h" 56 #include "log.h" 57 #include "netlink.h" 58 #include "send.h" 59 #include "soft-interface.h" 60 #include "translation-table.h" 61 #include "tvlv.h" 62 63 static void batadv_mcast_mla_update(struct work_struct *work); 64 65 /** 66 * batadv_mcast_start_timer() - schedule the multicast periodic worker 67 * @bat_priv: the bat priv with all the soft interface information 68 */ 69 static void batadv_mcast_start_timer(struct batadv_priv *bat_priv) 70 { 71 queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work, 72 msecs_to_jiffies(BATADV_MCAST_WORK_PERIOD)); 73 } 74 75 /** 76 * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists 77 * @soft_iface: netdev struct of the mesh interface 78 * 79 * If the given soft interface has a bridge on top then the refcount 80 * of the according net device is increased. 81 * 82 * Return: NULL if no such bridge exists. Otherwise the net device of the 83 * bridge. 84 */ 85 static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface) 86 { 87 struct net_device *upper = soft_iface; 88 89 rcu_read_lock(); 90 do { 91 upper = netdev_master_upper_dev_get_rcu(upper); 92 } while (upper && !(upper->priv_flags & IFF_EBRIDGE)); 93 94 dev_hold(upper); 95 rcu_read_unlock(); 96 97 return upper; 98 } 99 100 /** 101 * batadv_mcast_mla_rtr_flags_softif_get_ipv4() - get mcast router flags from 102 * node for IPv4 103 * @dev: the interface to check 104 * 105 * Checks the presence of an IPv4 multicast router on this node. 106 * 107 * Caller needs to hold rcu read lock. 108 * 109 * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR4 otherwise. 110 */ 111 static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv4(struct net_device *dev) 112 { 113 struct in_device *in_dev = __in_dev_get_rcu(dev); 114 115 if (in_dev && IN_DEV_MFORWARD(in_dev)) 116 return BATADV_NO_FLAGS; 117 else 118 return BATADV_MCAST_WANT_NO_RTR4; 119 } 120 121 /** 122 * batadv_mcast_mla_rtr_flags_softif_get_ipv6() - get mcast router flags from 123 * node for IPv6 124 * @dev: the interface to check 125 * 126 * Checks the presence of an IPv6 multicast router on this node. 127 * 128 * Caller needs to hold rcu read lock. 129 * 130 * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR6 otherwise. 131 */ 132 #if IS_ENABLED(CONFIG_IPV6_MROUTE) 133 static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev) 134 { 135 struct inet6_dev *in6_dev = __in6_dev_get(dev); 136 137 if (in6_dev && in6_dev->cnf.mc_forwarding) 138 return BATADV_NO_FLAGS; 139 else 140 return BATADV_MCAST_WANT_NO_RTR6; 141 } 142 #else 143 static inline u8 144 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev) 145 { 146 return BATADV_MCAST_WANT_NO_RTR6; 147 } 148 #endif 149 150 /** 151 * batadv_mcast_mla_rtr_flags_softif_get() - get mcast router flags from node 152 * @bat_priv: the bat priv with all the soft interface information 153 * @bridge: bridge interface on top of the soft_iface if present, 154 * otherwise pass NULL 155 * 156 * Checks the presence of IPv4 and IPv6 multicast routers on this 157 * node. 158 * 159 * Return: 160 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present 161 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present 162 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present 163 * The former two OR'd: no multicast router is present 164 */ 165 static u8 batadv_mcast_mla_rtr_flags_softif_get(struct batadv_priv *bat_priv, 166 struct net_device *bridge) 167 { 168 struct net_device *dev = bridge ? bridge : bat_priv->soft_iface; 169 u8 flags = BATADV_NO_FLAGS; 170 171 rcu_read_lock(); 172 173 flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv4(dev); 174 flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv6(dev); 175 176 rcu_read_unlock(); 177 178 return flags; 179 } 180 181 /** 182 * batadv_mcast_mla_rtr_flags_bridge_get() - get mcast router flags from bridge 183 * @bat_priv: the bat priv with all the soft interface information 184 * @bridge: bridge interface on top of the soft_iface if present, 185 * otherwise pass NULL 186 * 187 * Checks the presence of IPv4 and IPv6 multicast routers behind a bridge. 188 * 189 * Return: 190 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present 191 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present 192 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present 193 * The former two OR'd: no multicast router is present 194 */ 195 static u8 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv, 196 struct net_device *bridge) 197 { 198 struct net_device *dev = bat_priv->soft_iface; 199 u8 flags = BATADV_NO_FLAGS; 200 201 if (!bridge) 202 return BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6; 203 204 if (!br_multicast_has_router_adjacent(dev, ETH_P_IP)) 205 flags |= BATADV_MCAST_WANT_NO_RTR4; 206 if (!br_multicast_has_router_adjacent(dev, ETH_P_IPV6)) 207 flags |= BATADV_MCAST_WANT_NO_RTR6; 208 209 return flags; 210 } 211 212 /** 213 * batadv_mcast_mla_rtr_flags_get() - get multicast router flags 214 * @bat_priv: the bat priv with all the soft interface information 215 * @bridge: bridge interface on top of the soft_iface if present, 216 * otherwise pass NULL 217 * 218 * Checks the presence of IPv4 and IPv6 multicast routers on this 219 * node or behind its bridge. 220 * 221 * Return: 222 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present 223 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present 224 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present 225 * The former two OR'd: no multicast router is present 226 */ 227 static u8 batadv_mcast_mla_rtr_flags_get(struct batadv_priv *bat_priv, 228 struct net_device *bridge) 229 { 230 u8 flags = BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6; 231 232 flags &= batadv_mcast_mla_rtr_flags_softif_get(bat_priv, bridge); 233 flags &= batadv_mcast_mla_rtr_flags_bridge_get(bat_priv, bridge); 234 235 return flags; 236 } 237 238 /** 239 * batadv_mcast_mla_flags_get() - get the new multicast flags 240 * @bat_priv: the bat priv with all the soft interface information 241 * 242 * Return: A set of flags for the current/next TVLV, querier and 243 * bridge state. 244 */ 245 static struct batadv_mcast_mla_flags 246 batadv_mcast_mla_flags_get(struct batadv_priv *bat_priv) 247 { 248 struct net_device *dev = bat_priv->soft_iface; 249 struct batadv_mcast_querier_state *qr4, *qr6; 250 struct batadv_mcast_mla_flags mla_flags; 251 struct net_device *bridge; 252 253 bridge = batadv_mcast_get_bridge(dev); 254 255 memset(&mla_flags, 0, sizeof(mla_flags)); 256 mla_flags.enabled = 1; 257 mla_flags.tvlv_flags |= batadv_mcast_mla_rtr_flags_get(bat_priv, 258 bridge); 259 260 if (!bridge) 261 return mla_flags; 262 263 dev_put(bridge); 264 265 mla_flags.bridged = 1; 266 qr4 = &mla_flags.querier_ipv4; 267 qr6 = &mla_flags.querier_ipv6; 268 269 if (!IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)) 270 pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n"); 271 272 qr4->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP); 273 qr4->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP); 274 275 qr6->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IPV6); 276 qr6->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IPV6); 277 278 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_UNSNOOPABLES; 279 280 /* 1) If no querier exists at all, then multicast listeners on 281 * our local TT clients behind the bridge will keep silent. 282 * 2) If the selected querier is on one of our local TT clients, 283 * behind the bridge, then this querier might shadow multicast 284 * listeners on our local TT clients, behind this bridge. 285 * 286 * In both cases, we will signalize other batman nodes that 287 * we need all multicast traffic of the according protocol. 288 */ 289 if (!qr4->exists || qr4->shadowing) { 290 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV4; 291 mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR4; 292 } 293 294 if (!qr6->exists || qr6->shadowing) { 295 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV6; 296 mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR6; 297 } 298 299 return mla_flags; 300 } 301 302 /** 303 * batadv_mcast_mla_is_duplicate() - check whether an address is in a list 304 * @mcast_addr: the multicast address to check 305 * @mcast_list: the list with multicast addresses to search in 306 * 307 * Return: true if the given address is already in the given list. 308 * Otherwise returns false. 309 */ 310 static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr, 311 struct hlist_head *mcast_list) 312 { 313 struct batadv_hw_addr *mcast_entry; 314 315 hlist_for_each_entry(mcast_entry, mcast_list, list) 316 if (batadv_compare_eth(mcast_entry->addr, mcast_addr)) 317 return true; 318 319 return false; 320 } 321 322 /** 323 * batadv_mcast_mla_softif_get_ipv4() - get softif IPv4 multicast listeners 324 * @dev: the device to collect multicast addresses from 325 * @mcast_list: a list to put found addresses into 326 * @flags: flags indicating the new multicast state 327 * 328 * Collects multicast addresses of IPv4 multicast listeners residing 329 * on this kernel on the given soft interface, dev, in 330 * the given mcast_list. In general, multicast listeners provided by 331 * your multicast receiving applications run directly on this node. 332 * 333 * Return: -ENOMEM on memory allocation error or the number of 334 * items added to the mcast_list otherwise. 335 */ 336 static int 337 batadv_mcast_mla_softif_get_ipv4(struct net_device *dev, 338 struct hlist_head *mcast_list, 339 struct batadv_mcast_mla_flags *flags) 340 { 341 struct batadv_hw_addr *new; 342 struct in_device *in_dev; 343 u8 mcast_addr[ETH_ALEN]; 344 struct ip_mc_list *pmc; 345 int ret = 0; 346 347 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4) 348 return 0; 349 350 rcu_read_lock(); 351 352 in_dev = __in_dev_get_rcu(dev); 353 if (!in_dev) { 354 rcu_read_unlock(); 355 return 0; 356 } 357 358 for (pmc = rcu_dereference(in_dev->mc_list); pmc; 359 pmc = rcu_dereference(pmc->next_rcu)) { 360 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 361 ipv4_is_local_multicast(pmc->multiaddr)) 362 continue; 363 364 if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) && 365 !ipv4_is_local_multicast(pmc->multiaddr)) 366 continue; 367 368 ip_eth_mc_map(pmc->multiaddr, mcast_addr); 369 370 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) 371 continue; 372 373 new = kmalloc(sizeof(*new), GFP_ATOMIC); 374 if (!new) { 375 ret = -ENOMEM; 376 break; 377 } 378 379 ether_addr_copy(new->addr, mcast_addr); 380 hlist_add_head(&new->list, mcast_list); 381 ret++; 382 } 383 rcu_read_unlock(); 384 385 return ret; 386 } 387 388 /** 389 * batadv_mcast_mla_softif_get_ipv6() - get softif IPv6 multicast listeners 390 * @dev: the device to collect multicast addresses from 391 * @mcast_list: a list to put found addresses into 392 * @flags: flags indicating the new multicast state 393 * 394 * Collects multicast addresses of IPv6 multicast listeners residing 395 * on this kernel on the given soft interface, dev, in 396 * the given mcast_list. In general, multicast listeners provided by 397 * your multicast receiving applications run directly on this node. 398 * 399 * Return: -ENOMEM on memory allocation error or the number of 400 * items added to the mcast_list otherwise. 401 */ 402 #if IS_ENABLED(CONFIG_IPV6) 403 static int 404 batadv_mcast_mla_softif_get_ipv6(struct net_device *dev, 405 struct hlist_head *mcast_list, 406 struct batadv_mcast_mla_flags *flags) 407 { 408 struct batadv_hw_addr *new; 409 struct inet6_dev *in6_dev; 410 u8 mcast_addr[ETH_ALEN]; 411 struct ifmcaddr6 *pmc6; 412 int ret = 0; 413 414 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6) 415 return 0; 416 417 rcu_read_lock(); 418 419 in6_dev = __in6_dev_get(dev); 420 if (!in6_dev) { 421 rcu_read_unlock(); 422 return 0; 423 } 424 425 for (pmc6 = rcu_dereference(in6_dev->mc_list); 426 pmc6; 427 pmc6 = rcu_dereference(pmc6->next)) { 428 if (IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) < 429 IPV6_ADDR_SCOPE_LINKLOCAL) 430 continue; 431 432 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 433 ipv6_addr_is_ll_all_nodes(&pmc6->mca_addr)) 434 continue; 435 436 if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) && 437 IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) > 438 IPV6_ADDR_SCOPE_LINKLOCAL) 439 continue; 440 441 ipv6_eth_mc_map(&pmc6->mca_addr, mcast_addr); 442 443 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) 444 continue; 445 446 new = kmalloc(sizeof(*new), GFP_ATOMIC); 447 if (!new) { 448 ret = -ENOMEM; 449 break; 450 } 451 452 ether_addr_copy(new->addr, mcast_addr); 453 hlist_add_head(&new->list, mcast_list); 454 ret++; 455 } 456 rcu_read_unlock(); 457 458 return ret; 459 } 460 #else 461 static inline int 462 batadv_mcast_mla_softif_get_ipv6(struct net_device *dev, 463 struct hlist_head *mcast_list, 464 struct batadv_mcast_mla_flags *flags) 465 { 466 return 0; 467 } 468 #endif 469 470 /** 471 * batadv_mcast_mla_softif_get() - get softif multicast listeners 472 * @dev: the device to collect multicast addresses from 473 * @mcast_list: a list to put found addresses into 474 * @flags: flags indicating the new multicast state 475 * 476 * Collects multicast addresses of multicast listeners residing 477 * on this kernel on the given soft interface, dev, in 478 * the given mcast_list. In general, multicast listeners provided by 479 * your multicast receiving applications run directly on this node. 480 * 481 * If there is a bridge interface on top of dev, collect from that one 482 * instead. Just like with IP addresses and routes, multicast listeners 483 * will(/should) register to the bridge interface instead of an 484 * enslaved bat0. 485 * 486 * Return: -ENOMEM on memory allocation error or the number of 487 * items added to the mcast_list otherwise. 488 */ 489 static int 490 batadv_mcast_mla_softif_get(struct net_device *dev, 491 struct hlist_head *mcast_list, 492 struct batadv_mcast_mla_flags *flags) 493 { 494 struct net_device *bridge = batadv_mcast_get_bridge(dev); 495 int ret4, ret6 = 0; 496 497 if (bridge) 498 dev = bridge; 499 500 ret4 = batadv_mcast_mla_softif_get_ipv4(dev, mcast_list, flags); 501 if (ret4 < 0) 502 goto out; 503 504 ret6 = batadv_mcast_mla_softif_get_ipv6(dev, mcast_list, flags); 505 if (ret6 < 0) { 506 ret4 = 0; 507 goto out; 508 } 509 510 out: 511 dev_put(bridge); 512 513 return ret4 + ret6; 514 } 515 516 /** 517 * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address 518 * @dst: destination to write to - a multicast MAC address 519 * @src: source to read from - a multicast IP address 520 * 521 * Converts a given multicast IPv4/IPv6 address from a bridge 522 * to its matching multicast MAC address and copies it into the given 523 * destination buffer. 524 * 525 * Caller needs to make sure the destination buffer can hold 526 * at least ETH_ALEN bytes. 527 */ 528 static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src) 529 { 530 if (src->proto == htons(ETH_P_IP)) 531 ip_eth_mc_map(src->dst.ip4, dst); 532 #if IS_ENABLED(CONFIG_IPV6) 533 else if (src->proto == htons(ETH_P_IPV6)) 534 ipv6_eth_mc_map(&src->dst.ip6, dst); 535 #endif 536 else 537 eth_zero_addr(dst); 538 } 539 540 /** 541 * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners 542 * @dev: a bridge slave whose bridge to collect multicast addresses from 543 * @mcast_list: a list to put found addresses into 544 * @flags: flags indicating the new multicast state 545 * 546 * Collects multicast addresses of multicast listeners residing 547 * on foreign, non-mesh devices which we gave access to our mesh via 548 * a bridge on top of the given soft interface, dev, in the given 549 * mcast_list. 550 * 551 * Return: -ENOMEM on memory allocation error or the number of 552 * items added to the mcast_list otherwise. 553 */ 554 static int batadv_mcast_mla_bridge_get(struct net_device *dev, 555 struct hlist_head *mcast_list, 556 struct batadv_mcast_mla_flags *flags) 557 { 558 struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list); 559 struct br_ip_list *br_ip_entry, *tmp; 560 u8 tvlv_flags = flags->tvlv_flags; 561 struct batadv_hw_addr *new; 562 u8 mcast_addr[ETH_ALEN]; 563 int ret; 564 565 /* we don't need to detect these devices/listeners, the IGMP/MLD 566 * snooping code of the Linux bridge already does that for us 567 */ 568 ret = br_multicast_list_adjacent(dev, &bridge_mcast_list); 569 if (ret < 0) 570 goto out; 571 572 list_for_each_entry(br_ip_entry, &bridge_mcast_list, list) { 573 if (br_ip_entry->addr.proto == htons(ETH_P_IP)) { 574 if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4) 575 continue; 576 577 if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 578 ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4)) 579 continue; 580 581 if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) && 582 !ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4)) 583 continue; 584 } 585 586 #if IS_ENABLED(CONFIG_IPV6) 587 if (br_ip_entry->addr.proto == htons(ETH_P_IPV6)) { 588 if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6) 589 continue; 590 591 if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 592 ipv6_addr_is_ll_all_nodes(&br_ip_entry->addr.dst.ip6)) 593 continue; 594 595 if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) && 596 IPV6_ADDR_MC_SCOPE(&br_ip_entry->addr.dst.ip6) > 597 IPV6_ADDR_SCOPE_LINKLOCAL) 598 continue; 599 } 600 #endif 601 602 batadv_mcast_mla_br_addr_cpy(mcast_addr, &br_ip_entry->addr); 603 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) 604 continue; 605 606 new = kmalloc(sizeof(*new), GFP_ATOMIC); 607 if (!new) { 608 ret = -ENOMEM; 609 break; 610 } 611 612 ether_addr_copy(new->addr, mcast_addr); 613 hlist_add_head(&new->list, mcast_list); 614 } 615 616 out: 617 list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) { 618 list_del(&br_ip_entry->list); 619 kfree(br_ip_entry); 620 } 621 622 return ret; 623 } 624 625 /** 626 * batadv_mcast_mla_list_free() - free a list of multicast addresses 627 * @mcast_list: the list to free 628 * 629 * Removes and frees all items in the given mcast_list. 630 */ 631 static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list) 632 { 633 struct batadv_hw_addr *mcast_entry; 634 struct hlist_node *tmp; 635 636 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) { 637 hlist_del(&mcast_entry->list); 638 kfree(mcast_entry); 639 } 640 } 641 642 /** 643 * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements 644 * @bat_priv: the bat priv with all the soft interface information 645 * @mcast_list: a list of addresses which should _not_ be removed 646 * 647 * Retracts the announcement of any multicast listener from the 648 * translation table except the ones listed in the given mcast_list. 649 * 650 * If mcast_list is NULL then all are retracted. 651 */ 652 static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv, 653 struct hlist_head *mcast_list) 654 { 655 struct batadv_hw_addr *mcast_entry; 656 struct hlist_node *tmp; 657 658 hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list, 659 list) { 660 if (mcast_list && 661 batadv_mcast_mla_is_duplicate(mcast_entry->addr, 662 mcast_list)) 663 continue; 664 665 batadv_tt_local_remove(bat_priv, mcast_entry->addr, 666 BATADV_NO_FLAGS, 667 "mcast TT outdated", false); 668 669 hlist_del(&mcast_entry->list); 670 kfree(mcast_entry); 671 } 672 } 673 674 /** 675 * batadv_mcast_mla_tt_add() - add multicast listener announcements 676 * @bat_priv: the bat priv with all the soft interface information 677 * @mcast_list: a list of addresses which are going to get added 678 * 679 * Adds multicast listener announcements from the given mcast_list to the 680 * translation table if they have not been added yet. 681 */ 682 static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv, 683 struct hlist_head *mcast_list) 684 { 685 struct batadv_hw_addr *mcast_entry; 686 struct hlist_node *tmp; 687 688 if (!mcast_list) 689 return; 690 691 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) { 692 if (batadv_mcast_mla_is_duplicate(mcast_entry->addr, 693 &bat_priv->mcast.mla_list)) 694 continue; 695 696 if (!batadv_tt_local_add(bat_priv->soft_iface, 697 mcast_entry->addr, BATADV_NO_FLAGS, 698 BATADV_NULL_IFINDEX, BATADV_NO_MARK)) 699 continue; 700 701 hlist_del(&mcast_entry->list); 702 hlist_add_head(&mcast_entry->list, &bat_priv->mcast.mla_list); 703 } 704 } 705 706 /** 707 * batadv_mcast_querier_log() - debug output regarding the querier status on 708 * link 709 * @bat_priv: the bat priv with all the soft interface information 710 * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD") 711 * @old_state: the previous querier state on our link 712 * @new_state: the new querier state on our link 713 * 714 * Outputs debug messages to the logging facility with log level 'mcast' 715 * regarding changes to the querier status on the link which are relevant 716 * to our multicast optimizations. 717 * 718 * Usually this is about whether a querier appeared or vanished in 719 * our mesh or whether the querier is in the suboptimal position of being 720 * behind our local bridge segment: Snooping switches will directly 721 * forward listener reports to the querier, therefore batman-adv and 722 * the bridge will potentially not see these listeners - the querier is 723 * potentially shadowing listeners from us then. 724 * 725 * This is only interesting for nodes with a bridge on top of their 726 * soft interface. 727 */ 728 static void 729 batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto, 730 struct batadv_mcast_querier_state *old_state, 731 struct batadv_mcast_querier_state *new_state) 732 { 733 if (!old_state->exists && new_state->exists) 734 batadv_info(bat_priv->soft_iface, "%s Querier appeared\n", 735 str_proto); 736 else if (old_state->exists && !new_state->exists) 737 batadv_info(bat_priv->soft_iface, 738 "%s Querier disappeared - multicast optimizations disabled\n", 739 str_proto); 740 else if (!bat_priv->mcast.mla_flags.bridged && !new_state->exists) 741 batadv_info(bat_priv->soft_iface, 742 "No %s Querier present - multicast optimizations disabled\n", 743 str_proto); 744 745 if (new_state->exists) { 746 if ((!old_state->shadowing && new_state->shadowing) || 747 (!old_state->exists && new_state->shadowing)) 748 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 749 "%s Querier is behind our bridged segment: Might shadow listeners\n", 750 str_proto); 751 else if (old_state->shadowing && !new_state->shadowing) 752 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 753 "%s Querier is not behind our bridged segment\n", 754 str_proto); 755 } 756 } 757 758 /** 759 * batadv_mcast_bridge_log() - debug output for topology changes in bridged 760 * setups 761 * @bat_priv: the bat priv with all the soft interface information 762 * @new_flags: flags indicating the new multicast state 763 * 764 * If no bridges are ever used on this node, then this function does nothing. 765 * 766 * Otherwise this function outputs debug information to the 'mcast' log level 767 * which might be relevant to our multicast optimizations. 768 * 769 * More precisely, it outputs information when a bridge interface is added or 770 * removed from a soft interface. And when a bridge is present, it further 771 * outputs information about the querier state which is relevant for the 772 * multicast flags this node is going to set. 773 */ 774 static void 775 batadv_mcast_bridge_log(struct batadv_priv *bat_priv, 776 struct batadv_mcast_mla_flags *new_flags) 777 { 778 struct batadv_mcast_mla_flags *old_flags = &bat_priv->mcast.mla_flags; 779 780 if (!old_flags->bridged && new_flags->bridged) 781 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 782 "Bridge added: Setting Unsnoopables(U)-flag\n"); 783 else if (old_flags->bridged && !new_flags->bridged) 784 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 785 "Bridge removed: Unsetting Unsnoopables(U)-flag\n"); 786 787 if (new_flags->bridged) { 788 batadv_mcast_querier_log(bat_priv, "IGMP", 789 &old_flags->querier_ipv4, 790 &new_flags->querier_ipv4); 791 batadv_mcast_querier_log(bat_priv, "MLD", 792 &old_flags->querier_ipv6, 793 &new_flags->querier_ipv6); 794 } 795 } 796 797 /** 798 * batadv_mcast_flags_log() - output debug information about mcast flag changes 799 * @bat_priv: the bat priv with all the soft interface information 800 * @flags: TVLV flags indicating the new multicast state 801 * 802 * Whenever the multicast TVLV flags this node announces change, this function 803 * should be used to notify userspace about the change. 804 */ 805 static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags) 806 { 807 bool old_enabled = bat_priv->mcast.mla_flags.enabled; 808 u8 old_flags = bat_priv->mcast.mla_flags.tvlv_flags; 809 char str_old_flags[] = "[.... . ]"; 810 811 sprintf(str_old_flags, "[%c%c%c%s%s]", 812 (old_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.', 813 (old_flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.', 814 (old_flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.', 815 !(old_flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ", 816 !(old_flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". "); 817 818 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 819 "Changing multicast flags from '%s' to '[%c%c%c%s%s]'\n", 820 old_enabled ? str_old_flags : "<undefined>", 821 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.', 822 (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.', 823 (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.', 824 !(flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ", 825 !(flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". "); 826 } 827 828 /** 829 * batadv_mcast_mla_flags_update() - update multicast flags 830 * @bat_priv: the bat priv with all the soft interface information 831 * @flags: flags indicating the new multicast state 832 * 833 * Updates the own multicast tvlv with our current multicast related settings, 834 * capabilities and inabilities. 835 */ 836 static void 837 batadv_mcast_mla_flags_update(struct batadv_priv *bat_priv, 838 struct batadv_mcast_mla_flags *flags) 839 { 840 struct batadv_tvlv_mcast_data mcast_data; 841 842 if (!memcmp(flags, &bat_priv->mcast.mla_flags, sizeof(*flags))) 843 return; 844 845 batadv_mcast_bridge_log(bat_priv, flags); 846 batadv_mcast_flags_log(bat_priv, flags->tvlv_flags); 847 848 mcast_data.flags = flags->tvlv_flags; 849 memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved)); 850 851 batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 2, 852 &mcast_data, sizeof(mcast_data)); 853 854 bat_priv->mcast.mla_flags = *flags; 855 } 856 857 /** 858 * __batadv_mcast_mla_update() - update the own MLAs 859 * @bat_priv: the bat priv with all the soft interface information 860 * 861 * Updates the own multicast listener announcements in the translation 862 * table as well as the own, announced multicast tvlv container. 863 * 864 * Note that non-conflicting reads and writes to bat_priv->mcast.mla_list 865 * in batadv_mcast_mla_tt_retract() and batadv_mcast_mla_tt_add() are 866 * ensured by the non-parallel execution of the worker this function 867 * belongs to. 868 */ 869 static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv) 870 { 871 struct net_device *soft_iface = bat_priv->soft_iface; 872 struct hlist_head mcast_list = HLIST_HEAD_INIT; 873 struct batadv_mcast_mla_flags flags; 874 int ret; 875 876 flags = batadv_mcast_mla_flags_get(bat_priv); 877 878 ret = batadv_mcast_mla_softif_get(soft_iface, &mcast_list, &flags); 879 if (ret < 0) 880 goto out; 881 882 ret = batadv_mcast_mla_bridge_get(soft_iface, &mcast_list, &flags); 883 if (ret < 0) 884 goto out; 885 886 spin_lock(&bat_priv->mcast.mla_lock); 887 batadv_mcast_mla_tt_retract(bat_priv, &mcast_list); 888 batadv_mcast_mla_tt_add(bat_priv, &mcast_list); 889 batadv_mcast_mla_flags_update(bat_priv, &flags); 890 spin_unlock(&bat_priv->mcast.mla_lock); 891 892 out: 893 batadv_mcast_mla_list_free(&mcast_list); 894 } 895 896 /** 897 * batadv_mcast_mla_update() - update the own MLAs 898 * @work: kernel work struct 899 * 900 * Updates the own multicast listener announcements in the translation 901 * table as well as the own, announced multicast tvlv container. 902 * 903 * In the end, reschedules the work timer. 904 */ 905 static void batadv_mcast_mla_update(struct work_struct *work) 906 { 907 struct delayed_work *delayed_work; 908 struct batadv_priv_mcast *priv_mcast; 909 struct batadv_priv *bat_priv; 910 911 delayed_work = to_delayed_work(work); 912 priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work); 913 bat_priv = container_of(priv_mcast, struct batadv_priv, mcast); 914 915 __batadv_mcast_mla_update(bat_priv); 916 batadv_mcast_start_timer(bat_priv); 917 } 918 919 /** 920 * batadv_mcast_is_report_ipv4() - check for IGMP reports 921 * @skb: the ethernet frame destined for the mesh 922 * 923 * This call might reallocate skb data. 924 * 925 * Checks whether the given frame is a valid IGMP report. 926 * 927 * Return: If so then true, otherwise false. 928 */ 929 static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb) 930 { 931 if (ip_mc_check_igmp(skb) < 0) 932 return false; 933 934 switch (igmp_hdr(skb)->type) { 935 case IGMP_HOST_MEMBERSHIP_REPORT: 936 case IGMPV2_HOST_MEMBERSHIP_REPORT: 937 case IGMPV3_HOST_MEMBERSHIP_REPORT: 938 return true; 939 } 940 941 return false; 942 } 943 944 /** 945 * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding 946 * potential 947 * @bat_priv: the bat priv with all the soft interface information 948 * @skb: the IPv4 packet to check 949 * @is_unsnoopable: stores whether the destination is snoopable 950 * @is_routable: stores whether the destination is routable 951 * 952 * Checks whether the given IPv4 packet has the potential to be forwarded with a 953 * mode more optimal than classic flooding. 954 * 955 * Return: If so then 0. Otherwise -EINVAL or -ENOMEM in case of memory 956 * allocation failure. 957 */ 958 static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv, 959 struct sk_buff *skb, 960 bool *is_unsnoopable, 961 int *is_routable) 962 { 963 struct iphdr *iphdr; 964 965 /* We might fail due to out-of-memory -> drop it */ 966 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr))) 967 return -ENOMEM; 968 969 if (batadv_mcast_is_report_ipv4(skb)) 970 return -EINVAL; 971 972 iphdr = ip_hdr(skb); 973 974 /* link-local multicast listeners behind a bridge are 975 * not snoopable (see RFC4541, section 2.1.2.2) 976 */ 977 if (ipv4_is_local_multicast(iphdr->daddr)) 978 *is_unsnoopable = true; 979 else 980 *is_routable = ETH_P_IP; 981 982 return 0; 983 } 984 985 /** 986 * batadv_mcast_is_report_ipv6() - check for MLD reports 987 * @skb: the ethernet frame destined for the mesh 988 * 989 * This call might reallocate skb data. 990 * 991 * Checks whether the given frame is a valid MLD report. 992 * 993 * Return: If so then true, otherwise false. 994 */ 995 static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb) 996 { 997 if (ipv6_mc_check_mld(skb) < 0) 998 return false; 999 1000 switch (icmp6_hdr(skb)->icmp6_type) { 1001 case ICMPV6_MGM_REPORT: 1002 case ICMPV6_MLD2_REPORT: 1003 return true; 1004 } 1005 1006 return false; 1007 } 1008 1009 /** 1010 * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding 1011 * potential 1012 * @bat_priv: the bat priv with all the soft interface information 1013 * @skb: the IPv6 packet to check 1014 * @is_unsnoopable: stores whether the destination is snoopable 1015 * @is_routable: stores whether the destination is routable 1016 * 1017 * Checks whether the given IPv6 packet has the potential to be forwarded with a 1018 * mode more optimal than classic flooding. 1019 * 1020 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory 1021 */ 1022 static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv, 1023 struct sk_buff *skb, 1024 bool *is_unsnoopable, 1025 int *is_routable) 1026 { 1027 struct ipv6hdr *ip6hdr; 1028 1029 /* We might fail due to out-of-memory -> drop it */ 1030 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr))) 1031 return -ENOMEM; 1032 1033 if (batadv_mcast_is_report_ipv6(skb)) 1034 return -EINVAL; 1035 1036 ip6hdr = ipv6_hdr(skb); 1037 1038 if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) < IPV6_ADDR_SCOPE_LINKLOCAL) 1039 return -EINVAL; 1040 1041 /* link-local-all-nodes multicast listeners behind a bridge are 1042 * not snoopable (see RFC4541, section 3, paragraph 3) 1043 */ 1044 if (ipv6_addr_is_ll_all_nodes(&ip6hdr->daddr)) 1045 *is_unsnoopable = true; 1046 else if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) > IPV6_ADDR_SCOPE_LINKLOCAL) 1047 *is_routable = ETH_P_IPV6; 1048 1049 return 0; 1050 } 1051 1052 /** 1053 * batadv_mcast_forw_mode_check() - check for optimized forwarding potential 1054 * @bat_priv: the bat priv with all the soft interface information 1055 * @skb: the multicast frame to check 1056 * @is_unsnoopable: stores whether the destination is snoopable 1057 * @is_routable: stores whether the destination is routable 1058 * 1059 * Checks whether the given multicast ethernet frame has the potential to be 1060 * forwarded with a mode more optimal than classic flooding. 1061 * 1062 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory 1063 */ 1064 static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv, 1065 struct sk_buff *skb, 1066 bool *is_unsnoopable, 1067 int *is_routable) 1068 { 1069 struct ethhdr *ethhdr = eth_hdr(skb); 1070 1071 if (!atomic_read(&bat_priv->multicast_mode)) 1072 return -EINVAL; 1073 1074 switch (ntohs(ethhdr->h_proto)) { 1075 case ETH_P_IP: 1076 return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb, 1077 is_unsnoopable, 1078 is_routable); 1079 case ETH_P_IPV6: 1080 if (!IS_ENABLED(CONFIG_IPV6)) 1081 return -EINVAL; 1082 1083 return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb, 1084 is_unsnoopable, 1085 is_routable); 1086 default: 1087 return -EINVAL; 1088 } 1089 } 1090 1091 /** 1092 * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast 1093 * interest 1094 * @bat_priv: the bat priv with all the soft interface information 1095 * @ethhdr: ethernet header of a packet 1096 * 1097 * Return: the number of nodes which want all IPv4 multicast traffic if the 1098 * given ethhdr is from an IPv4 packet or the number of nodes which want all 1099 * IPv6 traffic if it matches an IPv6 packet. 1100 */ 1101 static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv, 1102 struct ethhdr *ethhdr) 1103 { 1104 switch (ntohs(ethhdr->h_proto)) { 1105 case ETH_P_IP: 1106 return atomic_read(&bat_priv->mcast.num_want_all_ipv4); 1107 case ETH_P_IPV6: 1108 return atomic_read(&bat_priv->mcast.num_want_all_ipv6); 1109 default: 1110 /* we shouldn't be here... */ 1111 return 0; 1112 } 1113 } 1114 1115 /** 1116 * batadv_mcast_forw_rtr_count() - count nodes with a multicast router 1117 * @bat_priv: the bat priv with all the soft interface information 1118 * @protocol: the ethernet protocol type to count multicast routers for 1119 * 1120 * Return: the number of nodes which want all routable IPv4 multicast traffic 1121 * if the protocol is ETH_P_IP or the number of nodes which want all routable 1122 * IPv6 traffic if the protocol is ETH_P_IPV6. Otherwise returns 0. 1123 */ 1124 1125 static int batadv_mcast_forw_rtr_count(struct batadv_priv *bat_priv, 1126 int protocol) 1127 { 1128 switch (protocol) { 1129 case ETH_P_IP: 1130 return atomic_read(&bat_priv->mcast.num_want_all_rtr4); 1131 case ETH_P_IPV6: 1132 return atomic_read(&bat_priv->mcast.num_want_all_rtr6); 1133 default: 1134 return 0; 1135 } 1136 } 1137 1138 /** 1139 * batadv_mcast_forw_tt_node_get() - get a multicast tt node 1140 * @bat_priv: the bat priv with all the soft interface information 1141 * @ethhdr: the ether header containing the multicast destination 1142 * 1143 * Return: an orig_node matching the multicast address provided by ethhdr 1144 * via a translation table lookup. This increases the returned nodes refcount. 1145 */ 1146 static struct batadv_orig_node * 1147 batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv, 1148 struct ethhdr *ethhdr) 1149 { 1150 return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest, 1151 BATADV_NO_FLAGS); 1152 } 1153 1154 /** 1155 * batadv_mcast_forw_ipv4_node_get() - get a node with an ipv4 flag 1156 * @bat_priv: the bat priv with all the soft interface information 1157 * 1158 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and 1159 * increases its refcount. 1160 */ 1161 static struct batadv_orig_node * 1162 batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv) 1163 { 1164 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 1165 1166 rcu_read_lock(); 1167 hlist_for_each_entry_rcu(tmp_orig_node, 1168 &bat_priv->mcast.want_all_ipv4_list, 1169 mcast_want_all_ipv4_node) { 1170 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 1171 continue; 1172 1173 orig_node = tmp_orig_node; 1174 break; 1175 } 1176 rcu_read_unlock(); 1177 1178 return orig_node; 1179 } 1180 1181 /** 1182 * batadv_mcast_forw_ipv6_node_get() - get a node with an ipv6 flag 1183 * @bat_priv: the bat priv with all the soft interface information 1184 * 1185 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set 1186 * and increases its refcount. 1187 */ 1188 static struct batadv_orig_node * 1189 batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv) 1190 { 1191 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 1192 1193 rcu_read_lock(); 1194 hlist_for_each_entry_rcu(tmp_orig_node, 1195 &bat_priv->mcast.want_all_ipv6_list, 1196 mcast_want_all_ipv6_node) { 1197 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 1198 continue; 1199 1200 orig_node = tmp_orig_node; 1201 break; 1202 } 1203 rcu_read_unlock(); 1204 1205 return orig_node; 1206 } 1207 1208 /** 1209 * batadv_mcast_forw_ip_node_get() - get a node with an ipv4/ipv6 flag 1210 * @bat_priv: the bat priv with all the soft interface information 1211 * @ethhdr: an ethernet header to determine the protocol family from 1212 * 1213 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or 1214 * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, sets and 1215 * increases its refcount. 1216 */ 1217 static struct batadv_orig_node * 1218 batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv, 1219 struct ethhdr *ethhdr) 1220 { 1221 switch (ntohs(ethhdr->h_proto)) { 1222 case ETH_P_IP: 1223 return batadv_mcast_forw_ipv4_node_get(bat_priv); 1224 case ETH_P_IPV6: 1225 return batadv_mcast_forw_ipv6_node_get(bat_priv); 1226 default: 1227 /* we shouldn't be here... */ 1228 return NULL; 1229 } 1230 } 1231 1232 /** 1233 * batadv_mcast_forw_unsnoop_node_get() - get a node with an unsnoopable flag 1234 * @bat_priv: the bat priv with all the soft interface information 1235 * 1236 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag 1237 * set and increases its refcount. 1238 */ 1239 static struct batadv_orig_node * 1240 batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv) 1241 { 1242 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 1243 1244 rcu_read_lock(); 1245 hlist_for_each_entry_rcu(tmp_orig_node, 1246 &bat_priv->mcast.want_all_unsnoopables_list, 1247 mcast_want_all_unsnoopables_node) { 1248 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 1249 continue; 1250 1251 orig_node = tmp_orig_node; 1252 break; 1253 } 1254 rcu_read_unlock(); 1255 1256 return orig_node; 1257 } 1258 1259 /** 1260 * batadv_mcast_forw_rtr4_node_get() - get a node with an ipv4 mcast router flag 1261 * @bat_priv: the bat priv with all the soft interface information 1262 * 1263 * Return: an orig_node which has the BATADV_MCAST_WANT_NO_RTR4 flag unset and 1264 * increases its refcount. 1265 */ 1266 static struct batadv_orig_node * 1267 batadv_mcast_forw_rtr4_node_get(struct batadv_priv *bat_priv) 1268 { 1269 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 1270 1271 rcu_read_lock(); 1272 hlist_for_each_entry_rcu(tmp_orig_node, 1273 &bat_priv->mcast.want_all_rtr4_list, 1274 mcast_want_all_rtr4_node) { 1275 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 1276 continue; 1277 1278 orig_node = tmp_orig_node; 1279 break; 1280 } 1281 rcu_read_unlock(); 1282 1283 return orig_node; 1284 } 1285 1286 /** 1287 * batadv_mcast_forw_rtr6_node_get() - get a node with an ipv6 mcast router flag 1288 * @bat_priv: the bat priv with all the soft interface information 1289 * 1290 * Return: an orig_node which has the BATADV_MCAST_WANT_NO_RTR6 flag unset 1291 * and increases its refcount. 1292 */ 1293 static struct batadv_orig_node * 1294 batadv_mcast_forw_rtr6_node_get(struct batadv_priv *bat_priv) 1295 { 1296 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 1297 1298 rcu_read_lock(); 1299 hlist_for_each_entry_rcu(tmp_orig_node, 1300 &bat_priv->mcast.want_all_rtr6_list, 1301 mcast_want_all_rtr6_node) { 1302 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 1303 continue; 1304 1305 orig_node = tmp_orig_node; 1306 break; 1307 } 1308 rcu_read_unlock(); 1309 1310 return orig_node; 1311 } 1312 1313 /** 1314 * batadv_mcast_forw_rtr_node_get() - get a node with an ipv4/ipv6 router flag 1315 * @bat_priv: the bat priv with all the soft interface information 1316 * @ethhdr: an ethernet header to determine the protocol family from 1317 * 1318 * Return: an orig_node which has no BATADV_MCAST_WANT_NO_RTR4 or 1319 * BATADV_MCAST_WANT_NO_RTR6 flag, depending on the provided ethhdr, set and 1320 * increases its refcount. 1321 */ 1322 static struct batadv_orig_node * 1323 batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv, 1324 struct ethhdr *ethhdr) 1325 { 1326 switch (ntohs(ethhdr->h_proto)) { 1327 case ETH_P_IP: 1328 return batadv_mcast_forw_rtr4_node_get(bat_priv); 1329 case ETH_P_IPV6: 1330 return batadv_mcast_forw_rtr6_node_get(bat_priv); 1331 default: 1332 /* we shouldn't be here... */ 1333 return NULL; 1334 } 1335 } 1336 1337 /** 1338 * batadv_mcast_forw_mode() - check on how to forward a multicast packet 1339 * @bat_priv: the bat priv with all the soft interface information 1340 * @skb: The multicast packet to check 1341 * @orig: an originator to be set to forward the skb to 1342 * 1343 * Return: the forwarding mode as enum batadv_forw_mode and in case of 1344 * BATADV_FORW_SINGLE set the orig to the single originator the skb 1345 * should be forwarded to. 1346 */ 1347 enum batadv_forw_mode 1348 batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, 1349 struct batadv_orig_node **orig) 1350 { 1351 int ret, tt_count, ip_count, unsnoop_count, total_count; 1352 bool is_unsnoopable = false; 1353 unsigned int mcast_fanout; 1354 struct ethhdr *ethhdr; 1355 int is_routable = 0; 1356 int rtr_count = 0; 1357 1358 ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable, 1359 &is_routable); 1360 if (ret == -ENOMEM) 1361 return BATADV_FORW_NONE; 1362 else if (ret < 0) 1363 return BATADV_FORW_ALL; 1364 1365 ethhdr = eth_hdr(skb); 1366 1367 tt_count = batadv_tt_global_hash_count(bat_priv, ethhdr->h_dest, 1368 BATADV_NO_FLAGS); 1369 ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr); 1370 unsnoop_count = !is_unsnoopable ? 0 : 1371 atomic_read(&bat_priv->mcast.num_want_all_unsnoopables); 1372 rtr_count = batadv_mcast_forw_rtr_count(bat_priv, is_routable); 1373 1374 total_count = tt_count + ip_count + unsnoop_count + rtr_count; 1375 1376 switch (total_count) { 1377 case 1: 1378 if (tt_count) 1379 *orig = batadv_mcast_forw_tt_node_get(bat_priv, ethhdr); 1380 else if (ip_count) 1381 *orig = batadv_mcast_forw_ip_node_get(bat_priv, ethhdr); 1382 else if (unsnoop_count) 1383 *orig = batadv_mcast_forw_unsnoop_node_get(bat_priv); 1384 else if (rtr_count) 1385 *orig = batadv_mcast_forw_rtr_node_get(bat_priv, 1386 ethhdr); 1387 1388 if (*orig) 1389 return BATADV_FORW_SINGLE; 1390 1391 fallthrough; 1392 case 0: 1393 return BATADV_FORW_NONE; 1394 default: 1395 mcast_fanout = atomic_read(&bat_priv->multicast_fanout); 1396 1397 if (!unsnoop_count && total_count <= mcast_fanout) 1398 return BATADV_FORW_SOME; 1399 } 1400 1401 return BATADV_FORW_ALL; 1402 } 1403 1404 /** 1405 * batadv_mcast_forw_send_orig() - send a multicast packet to an originator 1406 * @bat_priv: the bat priv with all the soft interface information 1407 * @skb: the multicast packet to send 1408 * @vid: the vlan identifier 1409 * @orig_node: the originator to send the packet to 1410 * 1411 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. 1412 */ 1413 int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, 1414 struct sk_buff *skb, 1415 unsigned short vid, 1416 struct batadv_orig_node *orig_node) 1417 { 1418 /* Avoid sending multicast-in-unicast packets to other BLA 1419 * gateways - they already got the frame from the LAN side 1420 * we share with them. 1421 * TODO: Refactor to take BLA into account earlier, to avoid 1422 * reducing the mcast_fanout count. 1423 */ 1424 if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid)) { 1425 dev_kfree_skb(skb); 1426 return NET_XMIT_SUCCESS; 1427 } 1428 1429 return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0, 1430 orig_node, vid); 1431 } 1432 1433 /** 1434 * batadv_mcast_forw_tt() - forwards a packet to multicast listeners 1435 * @bat_priv: the bat priv with all the soft interface information 1436 * @skb: the multicast packet to transmit 1437 * @vid: the vlan identifier 1438 * 1439 * Sends copies of a frame with multicast destination to any multicast 1440 * listener registered in the translation table. A transmission is performed 1441 * via a batman-adv unicast packet for each such destination node. 1442 * 1443 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1444 * otherwise. 1445 */ 1446 static int 1447 batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb, 1448 unsigned short vid) 1449 { 1450 int ret = NET_XMIT_SUCCESS; 1451 struct sk_buff *newskb; 1452 1453 struct batadv_tt_orig_list_entry *orig_entry; 1454 1455 struct batadv_tt_global_entry *tt_global; 1456 const u8 *addr = eth_hdr(skb)->h_dest; 1457 1458 tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid); 1459 if (!tt_global) 1460 goto out; 1461 1462 rcu_read_lock(); 1463 hlist_for_each_entry_rcu(orig_entry, &tt_global->orig_list, list) { 1464 newskb = skb_copy(skb, GFP_ATOMIC); 1465 if (!newskb) { 1466 ret = NET_XMIT_DROP; 1467 break; 1468 } 1469 1470 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, 1471 orig_entry->orig_node); 1472 } 1473 rcu_read_unlock(); 1474 1475 batadv_tt_global_entry_put(tt_global); 1476 1477 out: 1478 return ret; 1479 } 1480 1481 /** 1482 * batadv_mcast_forw_want_all_ipv4() - forward to nodes with want-all-ipv4 1483 * @bat_priv: the bat priv with all the soft interface information 1484 * @skb: the multicast packet to transmit 1485 * @vid: the vlan identifier 1486 * 1487 * Sends copies of a frame with multicast destination to any node with a 1488 * BATADV_MCAST_WANT_ALL_IPV4 flag set. A transmission is performed via a 1489 * batman-adv unicast packet for each such destination node. 1490 * 1491 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1492 * otherwise. 1493 */ 1494 static int 1495 batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv, 1496 struct sk_buff *skb, unsigned short vid) 1497 { 1498 struct batadv_orig_node *orig_node; 1499 int ret = NET_XMIT_SUCCESS; 1500 struct sk_buff *newskb; 1501 1502 rcu_read_lock(); 1503 hlist_for_each_entry_rcu(orig_node, 1504 &bat_priv->mcast.want_all_ipv4_list, 1505 mcast_want_all_ipv4_node) { 1506 newskb = skb_copy(skb, GFP_ATOMIC); 1507 if (!newskb) { 1508 ret = NET_XMIT_DROP; 1509 break; 1510 } 1511 1512 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1513 } 1514 rcu_read_unlock(); 1515 return ret; 1516 } 1517 1518 /** 1519 * batadv_mcast_forw_want_all_ipv6() - forward to nodes with want-all-ipv6 1520 * @bat_priv: the bat priv with all the soft interface information 1521 * @skb: The multicast packet to transmit 1522 * @vid: the vlan identifier 1523 * 1524 * Sends copies of a frame with multicast destination to any node with a 1525 * BATADV_MCAST_WANT_ALL_IPV6 flag set. A transmission is performed via a 1526 * batman-adv unicast packet for each such destination node. 1527 * 1528 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1529 * otherwise. 1530 */ 1531 static int 1532 batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv, 1533 struct sk_buff *skb, unsigned short vid) 1534 { 1535 struct batadv_orig_node *orig_node; 1536 int ret = NET_XMIT_SUCCESS; 1537 struct sk_buff *newskb; 1538 1539 rcu_read_lock(); 1540 hlist_for_each_entry_rcu(orig_node, 1541 &bat_priv->mcast.want_all_ipv6_list, 1542 mcast_want_all_ipv6_node) { 1543 newskb = skb_copy(skb, GFP_ATOMIC); 1544 if (!newskb) { 1545 ret = NET_XMIT_DROP; 1546 break; 1547 } 1548 1549 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1550 } 1551 rcu_read_unlock(); 1552 return ret; 1553 } 1554 1555 /** 1556 * batadv_mcast_forw_want_all() - forward packet to nodes in a want-all list 1557 * @bat_priv: the bat priv with all the soft interface information 1558 * @skb: the multicast packet to transmit 1559 * @vid: the vlan identifier 1560 * 1561 * Sends copies of a frame with multicast destination to any node with a 1562 * BATADV_MCAST_WANT_ALL_IPV4 or BATADV_MCAST_WANT_ALL_IPV6 flag set. A 1563 * transmission is performed via a batman-adv unicast packet for each such 1564 * destination node. 1565 * 1566 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family 1567 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise. 1568 */ 1569 static int 1570 batadv_mcast_forw_want_all(struct batadv_priv *bat_priv, 1571 struct sk_buff *skb, unsigned short vid) 1572 { 1573 switch (ntohs(eth_hdr(skb)->h_proto)) { 1574 case ETH_P_IP: 1575 return batadv_mcast_forw_want_all_ipv4(bat_priv, skb, vid); 1576 case ETH_P_IPV6: 1577 return batadv_mcast_forw_want_all_ipv6(bat_priv, skb, vid); 1578 default: 1579 /* we shouldn't be here... */ 1580 return NET_XMIT_DROP; 1581 } 1582 } 1583 1584 /** 1585 * batadv_mcast_forw_want_all_rtr4() - forward to nodes with want-all-rtr4 1586 * @bat_priv: the bat priv with all the soft interface information 1587 * @skb: the multicast packet to transmit 1588 * @vid: the vlan identifier 1589 * 1590 * Sends copies of a frame with multicast destination to any node with a 1591 * BATADV_MCAST_WANT_NO_RTR4 flag unset. A transmission is performed via a 1592 * batman-adv unicast packet for each such destination node. 1593 * 1594 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1595 * otherwise. 1596 */ 1597 static int 1598 batadv_mcast_forw_want_all_rtr4(struct batadv_priv *bat_priv, 1599 struct sk_buff *skb, unsigned short vid) 1600 { 1601 struct batadv_orig_node *orig_node; 1602 int ret = NET_XMIT_SUCCESS; 1603 struct sk_buff *newskb; 1604 1605 rcu_read_lock(); 1606 hlist_for_each_entry_rcu(orig_node, 1607 &bat_priv->mcast.want_all_rtr4_list, 1608 mcast_want_all_rtr4_node) { 1609 newskb = skb_copy(skb, GFP_ATOMIC); 1610 if (!newskb) { 1611 ret = NET_XMIT_DROP; 1612 break; 1613 } 1614 1615 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1616 } 1617 rcu_read_unlock(); 1618 return ret; 1619 } 1620 1621 /** 1622 * batadv_mcast_forw_want_all_rtr6() - forward to nodes with want-all-rtr6 1623 * @bat_priv: the bat priv with all the soft interface information 1624 * @skb: The multicast packet to transmit 1625 * @vid: the vlan identifier 1626 * 1627 * Sends copies of a frame with multicast destination to any node with a 1628 * BATADV_MCAST_WANT_NO_RTR6 flag unset. A transmission is performed via a 1629 * batman-adv unicast packet for each such destination node. 1630 * 1631 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1632 * otherwise. 1633 */ 1634 static int 1635 batadv_mcast_forw_want_all_rtr6(struct batadv_priv *bat_priv, 1636 struct sk_buff *skb, unsigned short vid) 1637 { 1638 struct batadv_orig_node *orig_node; 1639 int ret = NET_XMIT_SUCCESS; 1640 struct sk_buff *newskb; 1641 1642 rcu_read_lock(); 1643 hlist_for_each_entry_rcu(orig_node, 1644 &bat_priv->mcast.want_all_rtr6_list, 1645 mcast_want_all_rtr6_node) { 1646 newskb = skb_copy(skb, GFP_ATOMIC); 1647 if (!newskb) { 1648 ret = NET_XMIT_DROP; 1649 break; 1650 } 1651 1652 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1653 } 1654 rcu_read_unlock(); 1655 return ret; 1656 } 1657 1658 /** 1659 * batadv_mcast_forw_want_rtr() - forward packet to nodes in a want-all-rtr list 1660 * @bat_priv: the bat priv with all the soft interface information 1661 * @skb: the multicast packet to transmit 1662 * @vid: the vlan identifier 1663 * 1664 * Sends copies of a frame with multicast destination to any node with a 1665 * BATADV_MCAST_WANT_NO_RTR4 or BATADV_MCAST_WANT_NO_RTR6 flag unset. A 1666 * transmission is performed via a batman-adv unicast packet for each such 1667 * destination node. 1668 * 1669 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family 1670 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise. 1671 */ 1672 static int 1673 batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv, 1674 struct sk_buff *skb, unsigned short vid) 1675 { 1676 switch (ntohs(eth_hdr(skb)->h_proto)) { 1677 case ETH_P_IP: 1678 return batadv_mcast_forw_want_all_rtr4(bat_priv, skb, vid); 1679 case ETH_P_IPV6: 1680 return batadv_mcast_forw_want_all_rtr6(bat_priv, skb, vid); 1681 default: 1682 /* we shouldn't be here... */ 1683 return NET_XMIT_DROP; 1684 } 1685 } 1686 1687 /** 1688 * batadv_mcast_forw_send() - send packet to any detected multicast recipient 1689 * @bat_priv: the bat priv with all the soft interface information 1690 * @skb: the multicast packet to transmit 1691 * @vid: the vlan identifier 1692 * 1693 * Sends copies of a frame with multicast destination to any node that signaled 1694 * interest in it, that is either via the translation table or the according 1695 * want-all flags. A transmission is performed via a batman-adv unicast packet 1696 * for each such destination node. 1697 * 1698 * The given skb is consumed/freed. 1699 * 1700 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family 1701 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise. 1702 */ 1703 int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, 1704 unsigned short vid) 1705 { 1706 int ret; 1707 1708 ret = batadv_mcast_forw_tt(bat_priv, skb, vid); 1709 if (ret != NET_XMIT_SUCCESS) { 1710 kfree_skb(skb); 1711 return ret; 1712 } 1713 1714 ret = batadv_mcast_forw_want_all(bat_priv, skb, vid); 1715 if (ret != NET_XMIT_SUCCESS) { 1716 kfree_skb(skb); 1717 return ret; 1718 } 1719 1720 ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid); 1721 if (ret != NET_XMIT_SUCCESS) { 1722 kfree_skb(skb); 1723 return ret; 1724 } 1725 1726 consume_skb(skb); 1727 return ret; 1728 } 1729 1730 /** 1731 * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list 1732 * @bat_priv: the bat priv with all the soft interface information 1733 * @orig: the orig_node which multicast state might have changed of 1734 * @mcast_flags: flags indicating the new multicast state 1735 * 1736 * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator, 1737 * orig, has toggled then this method updates the counter and the list 1738 * accordingly. 1739 * 1740 * Caller needs to hold orig->mcast_handler_lock. 1741 */ 1742 static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv, 1743 struct batadv_orig_node *orig, 1744 u8 mcast_flags) 1745 { 1746 struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node; 1747 struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list; 1748 1749 lockdep_assert_held(&orig->mcast_handler_lock); 1750 1751 /* switched from flag unset to set */ 1752 if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 1753 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) { 1754 atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables); 1755 1756 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1757 /* flag checks above + mcast_handler_lock prevents this */ 1758 WARN_ON(!hlist_unhashed(node)); 1759 1760 hlist_add_head_rcu(node, head); 1761 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1762 /* switched from flag set to unset */ 1763 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) && 1764 orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) { 1765 atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables); 1766 1767 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1768 /* flag checks above + mcast_handler_lock prevents this */ 1769 WARN_ON(hlist_unhashed(node)); 1770 1771 hlist_del_init_rcu(node); 1772 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1773 } 1774 } 1775 1776 /** 1777 * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list 1778 * @bat_priv: the bat priv with all the soft interface information 1779 * @orig: the orig_node which multicast state might have changed of 1780 * @mcast_flags: flags indicating the new multicast state 1781 * 1782 * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has 1783 * toggled then this method updates the counter and the list accordingly. 1784 * 1785 * Caller needs to hold orig->mcast_handler_lock. 1786 */ 1787 static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv, 1788 struct batadv_orig_node *orig, 1789 u8 mcast_flags) 1790 { 1791 struct hlist_node *node = &orig->mcast_want_all_ipv4_node; 1792 struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list; 1793 1794 lockdep_assert_held(&orig->mcast_handler_lock); 1795 1796 /* switched from flag unset to set */ 1797 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 && 1798 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) { 1799 atomic_inc(&bat_priv->mcast.num_want_all_ipv4); 1800 1801 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1802 /* flag checks above + mcast_handler_lock prevents this */ 1803 WARN_ON(!hlist_unhashed(node)); 1804 1805 hlist_add_head_rcu(node, head); 1806 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1807 /* switched from flag set to unset */ 1808 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) && 1809 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) { 1810 atomic_dec(&bat_priv->mcast.num_want_all_ipv4); 1811 1812 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1813 /* flag checks above + mcast_handler_lock prevents this */ 1814 WARN_ON(hlist_unhashed(node)); 1815 1816 hlist_del_init_rcu(node); 1817 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1818 } 1819 } 1820 1821 /** 1822 * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list 1823 * @bat_priv: the bat priv with all the soft interface information 1824 * @orig: the orig_node which multicast state might have changed of 1825 * @mcast_flags: flags indicating the new multicast state 1826 * 1827 * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has 1828 * toggled then this method updates the counter and the list accordingly. 1829 * 1830 * Caller needs to hold orig->mcast_handler_lock. 1831 */ 1832 static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv, 1833 struct batadv_orig_node *orig, 1834 u8 mcast_flags) 1835 { 1836 struct hlist_node *node = &orig->mcast_want_all_ipv6_node; 1837 struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list; 1838 1839 lockdep_assert_held(&orig->mcast_handler_lock); 1840 1841 /* switched from flag unset to set */ 1842 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 && 1843 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) { 1844 atomic_inc(&bat_priv->mcast.num_want_all_ipv6); 1845 1846 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1847 /* flag checks above + mcast_handler_lock prevents this */ 1848 WARN_ON(!hlist_unhashed(node)); 1849 1850 hlist_add_head_rcu(node, head); 1851 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1852 /* switched from flag set to unset */ 1853 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) && 1854 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) { 1855 atomic_dec(&bat_priv->mcast.num_want_all_ipv6); 1856 1857 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1858 /* flag checks above + mcast_handler_lock prevents this */ 1859 WARN_ON(hlist_unhashed(node)); 1860 1861 hlist_del_init_rcu(node); 1862 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1863 } 1864 } 1865 1866 /** 1867 * batadv_mcast_want_rtr4_update() - update want-all-rtr4 counter and list 1868 * @bat_priv: the bat priv with all the soft interface information 1869 * @orig: the orig_node which multicast state might have changed of 1870 * @mcast_flags: flags indicating the new multicast state 1871 * 1872 * If the BATADV_MCAST_WANT_NO_RTR4 flag of this originator, orig, has 1873 * toggled then this method updates the counter and the list accordingly. 1874 * 1875 * Caller needs to hold orig->mcast_handler_lock. 1876 */ 1877 static void batadv_mcast_want_rtr4_update(struct batadv_priv *bat_priv, 1878 struct batadv_orig_node *orig, 1879 u8 mcast_flags) 1880 { 1881 struct hlist_node *node = &orig->mcast_want_all_rtr4_node; 1882 struct hlist_head *head = &bat_priv->mcast.want_all_rtr4_list; 1883 1884 lockdep_assert_held(&orig->mcast_handler_lock); 1885 1886 /* switched from flag set to unset */ 1887 if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR4) && 1888 orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4) { 1889 atomic_inc(&bat_priv->mcast.num_want_all_rtr4); 1890 1891 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1892 /* flag checks above + mcast_handler_lock prevents this */ 1893 WARN_ON(!hlist_unhashed(node)); 1894 1895 hlist_add_head_rcu(node, head); 1896 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1897 /* switched from flag unset to set */ 1898 } else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR4 && 1899 !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4)) { 1900 atomic_dec(&bat_priv->mcast.num_want_all_rtr4); 1901 1902 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1903 /* flag checks above + mcast_handler_lock prevents this */ 1904 WARN_ON(hlist_unhashed(node)); 1905 1906 hlist_del_init_rcu(node); 1907 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1908 } 1909 } 1910 1911 /** 1912 * batadv_mcast_want_rtr6_update() - update want-all-rtr6 counter and list 1913 * @bat_priv: the bat priv with all the soft interface information 1914 * @orig: the orig_node which multicast state might have changed of 1915 * @mcast_flags: flags indicating the new multicast state 1916 * 1917 * If the BATADV_MCAST_WANT_NO_RTR6 flag of this originator, orig, has 1918 * toggled then this method updates the counter and the list accordingly. 1919 * 1920 * Caller needs to hold orig->mcast_handler_lock. 1921 */ 1922 static void batadv_mcast_want_rtr6_update(struct batadv_priv *bat_priv, 1923 struct batadv_orig_node *orig, 1924 u8 mcast_flags) 1925 { 1926 struct hlist_node *node = &orig->mcast_want_all_rtr6_node; 1927 struct hlist_head *head = &bat_priv->mcast.want_all_rtr6_list; 1928 1929 lockdep_assert_held(&orig->mcast_handler_lock); 1930 1931 /* switched from flag set to unset */ 1932 if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR6) && 1933 orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6) { 1934 atomic_inc(&bat_priv->mcast.num_want_all_rtr6); 1935 1936 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1937 /* flag checks above + mcast_handler_lock prevents this */ 1938 WARN_ON(!hlist_unhashed(node)); 1939 1940 hlist_add_head_rcu(node, head); 1941 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1942 /* switched from flag unset to set */ 1943 } else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR6 && 1944 !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6)) { 1945 atomic_dec(&bat_priv->mcast.num_want_all_rtr6); 1946 1947 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1948 /* flag checks above + mcast_handler_lock prevents this */ 1949 WARN_ON(hlist_unhashed(node)); 1950 1951 hlist_del_init_rcu(node); 1952 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1953 } 1954 } 1955 1956 /** 1957 * batadv_mcast_tvlv_flags_get() - get multicast flags from an OGM TVLV 1958 * @enabled: whether the originator has multicast TVLV support enabled 1959 * @tvlv_value: tvlv buffer containing the multicast flags 1960 * @tvlv_value_len: tvlv buffer length 1961 * 1962 * Return: multicast flags for the given tvlv buffer 1963 */ 1964 static u8 1965 batadv_mcast_tvlv_flags_get(bool enabled, void *tvlv_value, u16 tvlv_value_len) 1966 { 1967 u8 mcast_flags = BATADV_NO_FLAGS; 1968 1969 if (enabled && tvlv_value && tvlv_value_len >= sizeof(mcast_flags)) 1970 mcast_flags = *(u8 *)tvlv_value; 1971 1972 if (!enabled) { 1973 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV4; 1974 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV6; 1975 } 1976 1977 /* remove redundant flags to avoid sending duplicate packets later */ 1978 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) 1979 mcast_flags |= BATADV_MCAST_WANT_NO_RTR4; 1980 1981 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) 1982 mcast_flags |= BATADV_MCAST_WANT_NO_RTR6; 1983 1984 return mcast_flags; 1985 } 1986 1987 /** 1988 * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container 1989 * @bat_priv: the bat priv with all the soft interface information 1990 * @orig: the orig_node of the ogm 1991 * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags) 1992 * @tvlv_value: tvlv buffer containing the multicast data 1993 * @tvlv_value_len: tvlv buffer length 1994 */ 1995 static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv, 1996 struct batadv_orig_node *orig, 1997 u8 flags, 1998 void *tvlv_value, 1999 u16 tvlv_value_len) 2000 { 2001 bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 2002 u8 mcast_flags; 2003 2004 mcast_flags = batadv_mcast_tvlv_flags_get(orig_mcast_enabled, 2005 tvlv_value, tvlv_value_len); 2006 2007 spin_lock_bh(&orig->mcast_handler_lock); 2008 2009 if (orig_mcast_enabled && 2010 !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) { 2011 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities); 2012 } else if (!orig_mcast_enabled && 2013 test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) { 2014 clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities); 2015 } 2016 2017 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized); 2018 2019 batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags); 2020 batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags); 2021 batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags); 2022 batadv_mcast_want_rtr4_update(bat_priv, orig, mcast_flags); 2023 batadv_mcast_want_rtr6_update(bat_priv, orig, mcast_flags); 2024 2025 orig->mcast_flags = mcast_flags; 2026 spin_unlock_bh(&orig->mcast_handler_lock); 2027 } 2028 2029 /** 2030 * batadv_mcast_init() - initialize the multicast optimizations structures 2031 * @bat_priv: the bat priv with all the soft interface information 2032 */ 2033 void batadv_mcast_init(struct batadv_priv *bat_priv) 2034 { 2035 batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler, 2036 NULL, BATADV_TVLV_MCAST, 2, 2037 BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 2038 2039 INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update); 2040 batadv_mcast_start_timer(bat_priv); 2041 } 2042 2043 /** 2044 * batadv_mcast_mesh_info_put() - put multicast info into a netlink message 2045 * @msg: buffer for the message 2046 * @bat_priv: the bat priv with all the soft interface information 2047 * 2048 * Return: 0 or error code. 2049 */ 2050 int batadv_mcast_mesh_info_put(struct sk_buff *msg, 2051 struct batadv_priv *bat_priv) 2052 { 2053 u32 flags = bat_priv->mcast.mla_flags.tvlv_flags; 2054 u32 flags_priv = BATADV_NO_FLAGS; 2055 2056 if (bat_priv->mcast.mla_flags.bridged) { 2057 flags_priv |= BATADV_MCAST_FLAGS_BRIDGED; 2058 2059 if (bat_priv->mcast.mla_flags.querier_ipv4.exists) 2060 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_EXISTS; 2061 if (bat_priv->mcast.mla_flags.querier_ipv6.exists) 2062 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_EXISTS; 2063 if (bat_priv->mcast.mla_flags.querier_ipv4.shadowing) 2064 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_SHADOWING; 2065 if (bat_priv->mcast.mla_flags.querier_ipv6.shadowing) 2066 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_SHADOWING; 2067 } 2068 2069 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, flags) || 2070 nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS_PRIV, flags_priv)) 2071 return -EMSGSIZE; 2072 2073 return 0; 2074 } 2075 2076 /** 2077 * batadv_mcast_flags_dump_entry() - dump one entry of the multicast flags table 2078 * to a netlink socket 2079 * @msg: buffer for the message 2080 * @portid: netlink port 2081 * @cb: Control block containing additional options 2082 * @orig_node: originator to dump the multicast flags of 2083 * 2084 * Return: 0 or error code. 2085 */ 2086 static int 2087 batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid, 2088 struct netlink_callback *cb, 2089 struct batadv_orig_node *orig_node) 2090 { 2091 void *hdr; 2092 2093 hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq, 2094 &batadv_netlink_family, NLM_F_MULTI, 2095 BATADV_CMD_GET_MCAST_FLAGS); 2096 if (!hdr) 2097 return -ENOBUFS; 2098 2099 genl_dump_check_consistent(cb, hdr); 2100 2101 if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, 2102 orig_node->orig)) { 2103 genlmsg_cancel(msg, hdr); 2104 return -EMSGSIZE; 2105 } 2106 2107 if (test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 2108 &orig_node->capabilities)) { 2109 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, 2110 orig_node->mcast_flags)) { 2111 genlmsg_cancel(msg, hdr); 2112 return -EMSGSIZE; 2113 } 2114 } 2115 2116 genlmsg_end(msg, hdr); 2117 return 0; 2118 } 2119 2120 /** 2121 * batadv_mcast_flags_dump_bucket() - dump one bucket of the multicast flags 2122 * table to a netlink socket 2123 * @msg: buffer for the message 2124 * @portid: netlink port 2125 * @cb: Control block containing additional options 2126 * @hash: hash to dump 2127 * @bucket: bucket index to dump 2128 * @idx_skip: How many entries to skip 2129 * 2130 * Return: 0 or error code. 2131 */ 2132 static int 2133 batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid, 2134 struct netlink_callback *cb, 2135 struct batadv_hashtable *hash, 2136 unsigned int bucket, long *idx_skip) 2137 { 2138 struct batadv_orig_node *orig_node; 2139 long idx = 0; 2140 2141 spin_lock_bh(&hash->list_locks[bucket]); 2142 cb->seq = atomic_read(&hash->generation) << 1 | 1; 2143 2144 hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) { 2145 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 2146 &orig_node->capa_initialized)) 2147 continue; 2148 2149 if (idx < *idx_skip) 2150 goto skip; 2151 2152 if (batadv_mcast_flags_dump_entry(msg, portid, cb, orig_node)) { 2153 spin_unlock_bh(&hash->list_locks[bucket]); 2154 *idx_skip = idx; 2155 2156 return -EMSGSIZE; 2157 } 2158 2159 skip: 2160 idx++; 2161 } 2162 spin_unlock_bh(&hash->list_locks[bucket]); 2163 2164 return 0; 2165 } 2166 2167 /** 2168 * __batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket 2169 * @msg: buffer for the message 2170 * @portid: netlink port 2171 * @cb: Control block containing additional options 2172 * @bat_priv: the bat priv with all the soft interface information 2173 * @bucket: current bucket to dump 2174 * @idx: index in current bucket to the next entry to dump 2175 * 2176 * Return: 0 or error code. 2177 */ 2178 static int 2179 __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid, 2180 struct netlink_callback *cb, 2181 struct batadv_priv *bat_priv, long *bucket, long *idx) 2182 { 2183 struct batadv_hashtable *hash = bat_priv->orig_hash; 2184 long bucket_tmp = *bucket; 2185 long idx_tmp = *idx; 2186 2187 while (bucket_tmp < hash->size) { 2188 if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash, 2189 bucket_tmp, &idx_tmp)) 2190 break; 2191 2192 bucket_tmp++; 2193 idx_tmp = 0; 2194 } 2195 2196 *bucket = bucket_tmp; 2197 *idx = idx_tmp; 2198 2199 return msg->len; 2200 } 2201 2202 /** 2203 * batadv_mcast_netlink_get_primary() - get primary interface from netlink 2204 * callback 2205 * @cb: netlink callback structure 2206 * @primary_if: the primary interface pointer to return the result in 2207 * 2208 * Return: 0 or error code. 2209 */ 2210 static int 2211 batadv_mcast_netlink_get_primary(struct netlink_callback *cb, 2212 struct batadv_hard_iface **primary_if) 2213 { 2214 struct batadv_hard_iface *hard_iface = NULL; 2215 struct net *net = sock_net(cb->skb->sk); 2216 struct net_device *soft_iface; 2217 struct batadv_priv *bat_priv; 2218 int ifindex; 2219 int ret = 0; 2220 2221 ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX); 2222 if (!ifindex) 2223 return -EINVAL; 2224 2225 soft_iface = dev_get_by_index(net, ifindex); 2226 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) { 2227 ret = -ENODEV; 2228 goto out; 2229 } 2230 2231 bat_priv = netdev_priv(soft_iface); 2232 2233 hard_iface = batadv_primary_if_get_selected(bat_priv); 2234 if (!hard_iface || hard_iface->if_status != BATADV_IF_ACTIVE) { 2235 ret = -ENOENT; 2236 goto out; 2237 } 2238 2239 out: 2240 dev_put(soft_iface); 2241 2242 if (!ret && primary_if) 2243 *primary_if = hard_iface; 2244 else 2245 batadv_hardif_put(hard_iface); 2246 2247 return ret; 2248 } 2249 2250 /** 2251 * batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket 2252 * @msg: buffer for the message 2253 * @cb: callback structure containing arguments 2254 * 2255 * Return: message length. 2256 */ 2257 int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb) 2258 { 2259 struct batadv_hard_iface *primary_if = NULL; 2260 int portid = NETLINK_CB(cb->skb).portid; 2261 struct batadv_priv *bat_priv; 2262 long *bucket = &cb->args[0]; 2263 long *idx = &cb->args[1]; 2264 int ret; 2265 2266 ret = batadv_mcast_netlink_get_primary(cb, &primary_if); 2267 if (ret) 2268 return ret; 2269 2270 bat_priv = netdev_priv(primary_if->soft_iface); 2271 ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx); 2272 2273 batadv_hardif_put(primary_if); 2274 return ret; 2275 } 2276 2277 /** 2278 * batadv_mcast_free() - free the multicast optimizations structures 2279 * @bat_priv: the bat priv with all the soft interface information 2280 */ 2281 void batadv_mcast_free(struct batadv_priv *bat_priv) 2282 { 2283 cancel_delayed_work_sync(&bat_priv->mcast.work); 2284 2285 batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2); 2286 batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2); 2287 2288 /* safely calling outside of worker, as worker was canceled above */ 2289 batadv_mcast_mla_tt_retract(bat_priv, NULL); 2290 } 2291 2292 /** 2293 * batadv_mcast_purge_orig() - reset originator global mcast state modifications 2294 * @orig: the originator which is going to get purged 2295 */ 2296 void batadv_mcast_purge_orig(struct batadv_orig_node *orig) 2297 { 2298 struct batadv_priv *bat_priv = orig->bat_priv; 2299 2300 spin_lock_bh(&orig->mcast_handler_lock); 2301 2302 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS); 2303 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS); 2304 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS); 2305 batadv_mcast_want_rtr4_update(bat_priv, orig, 2306 BATADV_MCAST_WANT_NO_RTR4); 2307 batadv_mcast_want_rtr6_update(bat_priv, orig, 2308 BATADV_MCAST_WANT_NO_RTR6); 2309 2310 spin_unlock_bh(&orig->mcast_handler_lock); 2311 } 2312