1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) B.A.T.M.A.N. contributors: 3 * 4 * Linus Lüssing 5 */ 6 7 #include "multicast.h" 8 #include "main.h" 9 10 #include <linux/atomic.h> 11 #include <linux/bitops.h> 12 #include <linux/bug.h> 13 #include <linux/byteorder/generic.h> 14 #include <linux/container_of.h> 15 #include <linux/err.h> 16 #include <linux/errno.h> 17 #include <linux/etherdevice.h> 18 #include <linux/gfp.h> 19 #include <linux/icmpv6.h> 20 #include <linux/if_bridge.h> 21 #include <linux/if_ether.h> 22 #include <linux/igmp.h> 23 #include <linux/in.h> 24 #include <linux/in6.h> 25 #include <linux/inetdevice.h> 26 #include <linux/ip.h> 27 #include <linux/ipv6.h> 28 #include <linux/jiffies.h> 29 #include <linux/list.h> 30 #include <linux/lockdep.h> 31 #include <linux/netdevice.h> 32 #include <linux/netlink.h> 33 #include <linux/printk.h> 34 #include <linux/rculist.h> 35 #include <linux/rcupdate.h> 36 #include <linux/skbuff.h> 37 #include <linux/slab.h> 38 #include <linux/spinlock.h> 39 #include <linux/sprintf.h> 40 #include <linux/stddef.h> 41 #include <linux/string.h> 42 #include <linux/types.h> 43 #include <linux/workqueue.h> 44 #include <net/addrconf.h> 45 #include <net/genetlink.h> 46 #include <net/if_inet6.h> 47 #include <net/ip.h> 48 #include <net/ipv6.h> 49 #include <net/netlink.h> 50 #include <uapi/linux/batadv_packet.h> 51 #include <uapi/linux/batman_adv.h> 52 53 #include "bridge_loop_avoidance.h" 54 #include "hard-interface.h" 55 #include "hash.h" 56 #include "log.h" 57 #include "netlink.h" 58 #include "send.h" 59 #include "translation-table.h" 60 #include "tvlv.h" 61 62 static void batadv_mcast_mla_update(struct work_struct *work); 63 64 /** 65 * batadv_mcast_start_timer() - schedule the multicast periodic worker 66 * @bat_priv: the bat priv with all the soft interface information 67 */ 68 static void batadv_mcast_start_timer(struct batadv_priv *bat_priv) 69 { 70 queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work, 71 msecs_to_jiffies(BATADV_MCAST_WORK_PERIOD)); 72 } 73 74 /** 75 * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists 76 * @soft_iface: netdev struct of the mesh interface 77 * 78 * If the given soft interface has a bridge on top then the refcount 79 * of the according net device is increased. 80 * 81 * Return: NULL if no such bridge exists. Otherwise the net device of the 82 * bridge. 83 */ 84 static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface) 85 { 86 struct net_device *upper = soft_iface; 87 88 rcu_read_lock(); 89 do { 90 upper = netdev_master_upper_dev_get_rcu(upper); 91 } while (upper && !netif_is_bridge_master(upper)); 92 93 dev_hold(upper); 94 rcu_read_unlock(); 95 96 return upper; 97 } 98 99 /** 100 * batadv_mcast_mla_rtr_flags_softif_get_ipv4() - get mcast router flags from 101 * node for IPv4 102 * @dev: the interface to check 103 * 104 * Checks the presence of an IPv4 multicast router on this node. 105 * 106 * Caller needs to hold rcu read lock. 107 * 108 * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR4 otherwise. 109 */ 110 static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv4(struct net_device *dev) 111 { 112 struct in_device *in_dev = __in_dev_get_rcu(dev); 113 114 if (in_dev && IN_DEV_MFORWARD(in_dev)) 115 return BATADV_NO_FLAGS; 116 else 117 return BATADV_MCAST_WANT_NO_RTR4; 118 } 119 120 /** 121 * batadv_mcast_mla_rtr_flags_softif_get_ipv6() - get mcast router flags from 122 * node for IPv6 123 * @dev: the interface to check 124 * 125 * Checks the presence of an IPv6 multicast router on this node. 126 * 127 * Caller needs to hold rcu read lock. 128 * 129 * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR6 otherwise. 130 */ 131 #if IS_ENABLED(CONFIG_IPV6_MROUTE) 132 static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev) 133 { 134 struct inet6_dev *in6_dev = __in6_dev_get(dev); 135 136 if (in6_dev && atomic_read(&in6_dev->cnf.mc_forwarding)) 137 return BATADV_NO_FLAGS; 138 else 139 return BATADV_MCAST_WANT_NO_RTR6; 140 } 141 #else 142 static inline u8 143 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev) 144 { 145 return BATADV_MCAST_WANT_NO_RTR6; 146 } 147 #endif 148 149 /** 150 * batadv_mcast_mla_rtr_flags_softif_get() - get mcast router flags from node 151 * @bat_priv: the bat priv with all the soft interface information 152 * @bridge: bridge interface on top of the soft_iface if present, 153 * otherwise pass NULL 154 * 155 * Checks the presence of IPv4 and IPv6 multicast routers on this 156 * node. 157 * 158 * Return: 159 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present 160 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present 161 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present 162 * The former two OR'd: no multicast router is present 163 */ 164 static u8 batadv_mcast_mla_rtr_flags_softif_get(struct batadv_priv *bat_priv, 165 struct net_device *bridge) 166 { 167 struct net_device *dev = bridge ? bridge : bat_priv->soft_iface; 168 u8 flags = BATADV_NO_FLAGS; 169 170 rcu_read_lock(); 171 172 flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv4(dev); 173 flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv6(dev); 174 175 rcu_read_unlock(); 176 177 return flags; 178 } 179 180 /** 181 * batadv_mcast_mla_rtr_flags_bridge_get() - get mcast router flags from bridge 182 * @bat_priv: the bat priv with all the soft interface information 183 * @bridge: bridge interface on top of the soft_iface if present, 184 * otherwise pass NULL 185 * 186 * Checks the presence of IPv4 and IPv6 multicast routers behind a bridge. 187 * 188 * Return: 189 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present 190 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present 191 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present 192 * The former two OR'd: no multicast router is present 193 */ 194 static u8 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv, 195 struct net_device *bridge) 196 { 197 struct net_device *dev = bat_priv->soft_iface; 198 u8 flags = BATADV_NO_FLAGS; 199 200 if (!bridge) 201 return BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6; 202 203 if (!br_multicast_has_router_adjacent(dev, ETH_P_IP)) 204 flags |= BATADV_MCAST_WANT_NO_RTR4; 205 if (!br_multicast_has_router_adjacent(dev, ETH_P_IPV6)) 206 flags |= BATADV_MCAST_WANT_NO_RTR6; 207 208 return flags; 209 } 210 211 /** 212 * batadv_mcast_mla_rtr_flags_get() - get multicast router flags 213 * @bat_priv: the bat priv with all the soft interface information 214 * @bridge: bridge interface on top of the soft_iface if present, 215 * otherwise pass NULL 216 * 217 * Checks the presence of IPv4 and IPv6 multicast routers on this 218 * node or behind its bridge. 219 * 220 * Return: 221 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present 222 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present 223 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present 224 * The former two OR'd: no multicast router is present 225 */ 226 static u8 batadv_mcast_mla_rtr_flags_get(struct batadv_priv *bat_priv, 227 struct net_device *bridge) 228 { 229 u8 flags = BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6; 230 231 flags &= batadv_mcast_mla_rtr_flags_softif_get(bat_priv, bridge); 232 flags &= batadv_mcast_mla_rtr_flags_bridge_get(bat_priv, bridge); 233 234 return flags; 235 } 236 237 /** 238 * batadv_mcast_mla_forw_flags_get() - get multicast forwarding flags 239 * @bat_priv: the bat priv with all the soft interface information 240 * 241 * Checks if all active hard interfaces have an MTU larger or equal to 1280 242 * bytes (IPv6 minimum MTU). 243 * 244 * Return: BATADV_MCAST_HAVE_MC_PTYPE_CAPA if yes, BATADV_NO_FLAGS otherwise. 245 */ 246 static u8 batadv_mcast_mla_forw_flags_get(struct batadv_priv *bat_priv) 247 { 248 const struct batadv_hard_iface *hard_iface; 249 250 rcu_read_lock(); 251 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { 252 if (hard_iface->if_status != BATADV_IF_ACTIVE) 253 continue; 254 255 if (hard_iface->soft_iface != bat_priv->soft_iface) 256 continue; 257 258 if (hard_iface->net_dev->mtu < IPV6_MIN_MTU) { 259 rcu_read_unlock(); 260 return BATADV_NO_FLAGS; 261 } 262 } 263 rcu_read_unlock(); 264 265 return BATADV_MCAST_HAVE_MC_PTYPE_CAPA; 266 } 267 268 /** 269 * batadv_mcast_mla_flags_get() - get the new multicast flags 270 * @bat_priv: the bat priv with all the soft interface information 271 * 272 * Return: A set of flags for the current/next TVLV, querier and 273 * bridge state. 274 */ 275 static struct batadv_mcast_mla_flags 276 batadv_mcast_mla_flags_get(struct batadv_priv *bat_priv) 277 { 278 struct net_device *dev = bat_priv->soft_iface; 279 struct batadv_mcast_querier_state *qr4, *qr6; 280 struct batadv_mcast_mla_flags mla_flags; 281 struct net_device *bridge; 282 283 bridge = batadv_mcast_get_bridge(dev); 284 285 memset(&mla_flags, 0, sizeof(mla_flags)); 286 mla_flags.enabled = 1; 287 mla_flags.tvlv_flags |= batadv_mcast_mla_rtr_flags_get(bat_priv, 288 bridge); 289 mla_flags.tvlv_flags |= batadv_mcast_mla_forw_flags_get(bat_priv); 290 291 if (!bridge) 292 return mla_flags; 293 294 dev_put(bridge); 295 296 mla_flags.bridged = 1; 297 qr4 = &mla_flags.querier_ipv4; 298 qr6 = &mla_flags.querier_ipv6; 299 300 if (!IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)) 301 pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n"); 302 303 qr4->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP); 304 qr4->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP); 305 306 qr6->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IPV6); 307 qr6->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IPV6); 308 309 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_UNSNOOPABLES; 310 311 /* 1) If no querier exists at all, then multicast listeners on 312 * our local TT clients behind the bridge will keep silent. 313 * 2) If the selected querier is on one of our local TT clients, 314 * behind the bridge, then this querier might shadow multicast 315 * listeners on our local TT clients, behind this bridge. 316 * 317 * In both cases, we will signalize other batman nodes that 318 * we need all multicast traffic of the according protocol. 319 */ 320 if (!qr4->exists || qr4->shadowing) { 321 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV4; 322 mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR4; 323 } 324 325 if (!qr6->exists || qr6->shadowing) { 326 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV6; 327 mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR6; 328 } 329 330 return mla_flags; 331 } 332 333 /** 334 * batadv_mcast_mla_is_duplicate() - check whether an address is in a list 335 * @mcast_addr: the multicast address to check 336 * @mcast_list: the list with multicast addresses to search in 337 * 338 * Return: true if the given address is already in the given list. 339 * Otherwise returns false. 340 */ 341 static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr, 342 struct hlist_head *mcast_list) 343 { 344 struct batadv_hw_addr *mcast_entry; 345 346 hlist_for_each_entry(mcast_entry, mcast_list, list) 347 if (batadv_compare_eth(mcast_entry->addr, mcast_addr)) 348 return true; 349 350 return false; 351 } 352 353 /** 354 * batadv_mcast_mla_softif_get_ipv4() - get softif IPv4 multicast listeners 355 * @dev: the device to collect multicast addresses from 356 * @mcast_list: a list to put found addresses into 357 * @flags: flags indicating the new multicast state 358 * 359 * Collects multicast addresses of IPv4 multicast listeners residing 360 * on this kernel on the given soft interface, dev, in 361 * the given mcast_list. In general, multicast listeners provided by 362 * your multicast receiving applications run directly on this node. 363 * 364 * Return: -ENOMEM on memory allocation error or the number of 365 * items added to the mcast_list otherwise. 366 */ 367 static int 368 batadv_mcast_mla_softif_get_ipv4(struct net_device *dev, 369 struct hlist_head *mcast_list, 370 struct batadv_mcast_mla_flags *flags) 371 { 372 struct batadv_hw_addr *new; 373 struct in_device *in_dev; 374 u8 mcast_addr[ETH_ALEN]; 375 struct ip_mc_list *pmc; 376 int ret = 0; 377 378 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4) 379 return 0; 380 381 rcu_read_lock(); 382 383 in_dev = __in_dev_get_rcu(dev); 384 if (!in_dev) { 385 rcu_read_unlock(); 386 return 0; 387 } 388 389 for (pmc = rcu_dereference(in_dev->mc_list); pmc; 390 pmc = rcu_dereference(pmc->next_rcu)) { 391 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 392 ipv4_is_local_multicast(pmc->multiaddr)) 393 continue; 394 395 if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) && 396 !ipv4_is_local_multicast(pmc->multiaddr)) 397 continue; 398 399 ip_eth_mc_map(pmc->multiaddr, mcast_addr); 400 401 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) 402 continue; 403 404 new = kmalloc(sizeof(*new), GFP_ATOMIC); 405 if (!new) { 406 ret = -ENOMEM; 407 break; 408 } 409 410 ether_addr_copy(new->addr, mcast_addr); 411 hlist_add_head(&new->list, mcast_list); 412 ret++; 413 } 414 rcu_read_unlock(); 415 416 return ret; 417 } 418 419 /** 420 * batadv_mcast_mla_softif_get_ipv6() - get softif IPv6 multicast listeners 421 * @dev: the device to collect multicast addresses from 422 * @mcast_list: a list to put found addresses into 423 * @flags: flags indicating the new multicast state 424 * 425 * Collects multicast addresses of IPv6 multicast listeners residing 426 * on this kernel on the given soft interface, dev, in 427 * the given mcast_list. In general, multicast listeners provided by 428 * your multicast receiving applications run directly on this node. 429 * 430 * Return: -ENOMEM on memory allocation error or the number of 431 * items added to the mcast_list otherwise. 432 */ 433 #if IS_ENABLED(CONFIG_IPV6) 434 static int 435 batadv_mcast_mla_softif_get_ipv6(struct net_device *dev, 436 struct hlist_head *mcast_list, 437 struct batadv_mcast_mla_flags *flags) 438 { 439 struct batadv_hw_addr *new; 440 struct inet6_dev *in6_dev; 441 u8 mcast_addr[ETH_ALEN]; 442 struct ifmcaddr6 *pmc6; 443 int ret = 0; 444 445 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6) 446 return 0; 447 448 rcu_read_lock(); 449 450 in6_dev = __in6_dev_get(dev); 451 if (!in6_dev) { 452 rcu_read_unlock(); 453 return 0; 454 } 455 456 for (pmc6 = rcu_dereference(in6_dev->mc_list); 457 pmc6; 458 pmc6 = rcu_dereference(pmc6->next)) { 459 if (IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) < 460 IPV6_ADDR_SCOPE_LINKLOCAL) 461 continue; 462 463 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 464 ipv6_addr_is_ll_all_nodes(&pmc6->mca_addr)) 465 continue; 466 467 if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) && 468 IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) > 469 IPV6_ADDR_SCOPE_LINKLOCAL) 470 continue; 471 472 ipv6_eth_mc_map(&pmc6->mca_addr, mcast_addr); 473 474 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) 475 continue; 476 477 new = kmalloc(sizeof(*new), GFP_ATOMIC); 478 if (!new) { 479 ret = -ENOMEM; 480 break; 481 } 482 483 ether_addr_copy(new->addr, mcast_addr); 484 hlist_add_head(&new->list, mcast_list); 485 ret++; 486 } 487 rcu_read_unlock(); 488 489 return ret; 490 } 491 #else 492 static inline int 493 batadv_mcast_mla_softif_get_ipv6(struct net_device *dev, 494 struct hlist_head *mcast_list, 495 struct batadv_mcast_mla_flags *flags) 496 { 497 return 0; 498 } 499 #endif 500 501 /** 502 * batadv_mcast_mla_softif_get() - get softif multicast listeners 503 * @dev: the device to collect multicast addresses from 504 * @mcast_list: a list to put found addresses into 505 * @flags: flags indicating the new multicast state 506 * 507 * Collects multicast addresses of multicast listeners residing 508 * on this kernel on the given soft interface, dev, in 509 * the given mcast_list. In general, multicast listeners provided by 510 * your multicast receiving applications run directly on this node. 511 * 512 * If there is a bridge interface on top of dev, collect from that one 513 * instead. Just like with IP addresses and routes, multicast listeners 514 * will(/should) register to the bridge interface instead of an 515 * enslaved bat0. 516 * 517 * Return: -ENOMEM on memory allocation error or the number of 518 * items added to the mcast_list otherwise. 519 */ 520 static int 521 batadv_mcast_mla_softif_get(struct net_device *dev, 522 struct hlist_head *mcast_list, 523 struct batadv_mcast_mla_flags *flags) 524 { 525 struct net_device *bridge = batadv_mcast_get_bridge(dev); 526 int ret4, ret6 = 0; 527 528 if (bridge) 529 dev = bridge; 530 531 ret4 = batadv_mcast_mla_softif_get_ipv4(dev, mcast_list, flags); 532 if (ret4 < 0) 533 goto out; 534 535 ret6 = batadv_mcast_mla_softif_get_ipv6(dev, mcast_list, flags); 536 if (ret6 < 0) { 537 ret4 = 0; 538 goto out; 539 } 540 541 out: 542 dev_put(bridge); 543 544 return ret4 + ret6; 545 } 546 547 /** 548 * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address 549 * @dst: destination to write to - a multicast MAC address 550 * @src: source to read from - a multicast IP address 551 * 552 * Converts a given multicast IPv4/IPv6 address from a bridge 553 * to its matching multicast MAC address and copies it into the given 554 * destination buffer. 555 * 556 * Caller needs to make sure the destination buffer can hold 557 * at least ETH_ALEN bytes. 558 */ 559 static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src) 560 { 561 if (src->proto == htons(ETH_P_IP)) 562 ip_eth_mc_map(src->dst.ip4, dst); 563 #if IS_ENABLED(CONFIG_IPV6) 564 else if (src->proto == htons(ETH_P_IPV6)) 565 ipv6_eth_mc_map(&src->dst.ip6, dst); 566 #endif 567 else 568 eth_zero_addr(dst); 569 } 570 571 /** 572 * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners 573 * @dev: a bridge slave whose bridge to collect multicast addresses from 574 * @mcast_list: a list to put found addresses into 575 * @flags: flags indicating the new multicast state 576 * 577 * Collects multicast addresses of multicast listeners residing 578 * on foreign, non-mesh devices which we gave access to our mesh via 579 * a bridge on top of the given soft interface, dev, in the given 580 * mcast_list. 581 * 582 * Return: -ENOMEM on memory allocation error or the number of 583 * items added to the mcast_list otherwise. 584 */ 585 static int batadv_mcast_mla_bridge_get(struct net_device *dev, 586 struct hlist_head *mcast_list, 587 struct batadv_mcast_mla_flags *flags) 588 { 589 struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list); 590 struct br_ip_list *br_ip_entry, *tmp; 591 u8 tvlv_flags = flags->tvlv_flags; 592 struct batadv_hw_addr *new; 593 u8 mcast_addr[ETH_ALEN]; 594 int ret; 595 596 /* we don't need to detect these devices/listeners, the IGMP/MLD 597 * snooping code of the Linux bridge already does that for us 598 */ 599 ret = br_multicast_list_adjacent(dev, &bridge_mcast_list); 600 if (ret < 0) 601 goto out; 602 603 list_for_each_entry(br_ip_entry, &bridge_mcast_list, list) { 604 if (br_ip_entry->addr.proto == htons(ETH_P_IP)) { 605 if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4) 606 continue; 607 608 if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 609 ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4)) 610 continue; 611 612 if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) && 613 !ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4)) 614 continue; 615 } 616 617 #if IS_ENABLED(CONFIG_IPV6) 618 if (br_ip_entry->addr.proto == htons(ETH_P_IPV6)) { 619 if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6) 620 continue; 621 622 if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 623 ipv6_addr_is_ll_all_nodes(&br_ip_entry->addr.dst.ip6)) 624 continue; 625 626 if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) && 627 IPV6_ADDR_MC_SCOPE(&br_ip_entry->addr.dst.ip6) > 628 IPV6_ADDR_SCOPE_LINKLOCAL) 629 continue; 630 } 631 #endif 632 633 batadv_mcast_mla_br_addr_cpy(mcast_addr, &br_ip_entry->addr); 634 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) 635 continue; 636 637 new = kmalloc(sizeof(*new), GFP_ATOMIC); 638 if (!new) { 639 ret = -ENOMEM; 640 break; 641 } 642 643 ether_addr_copy(new->addr, mcast_addr); 644 hlist_add_head(&new->list, mcast_list); 645 } 646 647 out: 648 list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) { 649 list_del(&br_ip_entry->list); 650 kfree(br_ip_entry); 651 } 652 653 return ret; 654 } 655 656 /** 657 * batadv_mcast_mla_list_free() - free a list of multicast addresses 658 * @mcast_list: the list to free 659 * 660 * Removes and frees all items in the given mcast_list. 661 */ 662 static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list) 663 { 664 struct batadv_hw_addr *mcast_entry; 665 struct hlist_node *tmp; 666 667 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) { 668 hlist_del(&mcast_entry->list); 669 kfree(mcast_entry); 670 } 671 } 672 673 /** 674 * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements 675 * @bat_priv: the bat priv with all the soft interface information 676 * @mcast_list: a list of addresses which should _not_ be removed 677 * 678 * Retracts the announcement of any multicast listener from the 679 * translation table except the ones listed in the given mcast_list. 680 * 681 * If mcast_list is NULL then all are retracted. 682 */ 683 static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv, 684 struct hlist_head *mcast_list) 685 { 686 struct batadv_hw_addr *mcast_entry; 687 struct hlist_node *tmp; 688 689 hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list, 690 list) { 691 if (mcast_list && 692 batadv_mcast_mla_is_duplicate(mcast_entry->addr, 693 mcast_list)) 694 continue; 695 696 batadv_tt_local_remove(bat_priv, mcast_entry->addr, 697 BATADV_NO_FLAGS, 698 "mcast TT outdated", false); 699 700 hlist_del(&mcast_entry->list); 701 kfree(mcast_entry); 702 } 703 } 704 705 /** 706 * batadv_mcast_mla_tt_add() - add multicast listener announcements 707 * @bat_priv: the bat priv with all the soft interface information 708 * @mcast_list: a list of addresses which are going to get added 709 * 710 * Adds multicast listener announcements from the given mcast_list to the 711 * translation table if they have not been added yet. 712 */ 713 static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv, 714 struct hlist_head *mcast_list) 715 { 716 struct batadv_hw_addr *mcast_entry; 717 struct hlist_node *tmp; 718 719 if (!mcast_list) 720 return; 721 722 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) { 723 if (batadv_mcast_mla_is_duplicate(mcast_entry->addr, 724 &bat_priv->mcast.mla_list)) 725 continue; 726 727 if (!batadv_tt_local_add(bat_priv->soft_iface, 728 mcast_entry->addr, BATADV_NO_FLAGS, 729 BATADV_NULL_IFINDEX, BATADV_NO_MARK)) 730 continue; 731 732 hlist_del(&mcast_entry->list); 733 hlist_add_head(&mcast_entry->list, &bat_priv->mcast.mla_list); 734 } 735 } 736 737 /** 738 * batadv_mcast_querier_log() - debug output regarding the querier status on 739 * link 740 * @bat_priv: the bat priv with all the soft interface information 741 * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD") 742 * @old_state: the previous querier state on our link 743 * @new_state: the new querier state on our link 744 * 745 * Outputs debug messages to the logging facility with log level 'mcast' 746 * regarding changes to the querier status on the link which are relevant 747 * to our multicast optimizations. 748 * 749 * Usually this is about whether a querier appeared or vanished in 750 * our mesh or whether the querier is in the suboptimal position of being 751 * behind our local bridge segment: Snooping switches will directly 752 * forward listener reports to the querier, therefore batman-adv and 753 * the bridge will potentially not see these listeners - the querier is 754 * potentially shadowing listeners from us then. 755 * 756 * This is only interesting for nodes with a bridge on top of their 757 * soft interface. 758 */ 759 static void 760 batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto, 761 struct batadv_mcast_querier_state *old_state, 762 struct batadv_mcast_querier_state *new_state) 763 { 764 if (!old_state->exists && new_state->exists) 765 batadv_info(bat_priv->soft_iface, "%s Querier appeared\n", 766 str_proto); 767 else if (old_state->exists && !new_state->exists) 768 batadv_info(bat_priv->soft_iface, 769 "%s Querier disappeared - multicast optimizations disabled\n", 770 str_proto); 771 else if (!bat_priv->mcast.mla_flags.bridged && !new_state->exists) 772 batadv_info(bat_priv->soft_iface, 773 "No %s Querier present - multicast optimizations disabled\n", 774 str_proto); 775 776 if (new_state->exists) { 777 if ((!old_state->shadowing && new_state->shadowing) || 778 (!old_state->exists && new_state->shadowing)) 779 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 780 "%s Querier is behind our bridged segment: Might shadow listeners\n", 781 str_proto); 782 else if (old_state->shadowing && !new_state->shadowing) 783 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 784 "%s Querier is not behind our bridged segment\n", 785 str_proto); 786 } 787 } 788 789 /** 790 * batadv_mcast_bridge_log() - debug output for topology changes in bridged 791 * setups 792 * @bat_priv: the bat priv with all the soft interface information 793 * @new_flags: flags indicating the new multicast state 794 * 795 * If no bridges are ever used on this node, then this function does nothing. 796 * 797 * Otherwise this function outputs debug information to the 'mcast' log level 798 * which might be relevant to our multicast optimizations. 799 * 800 * More precisely, it outputs information when a bridge interface is added or 801 * removed from a soft interface. And when a bridge is present, it further 802 * outputs information about the querier state which is relevant for the 803 * multicast flags this node is going to set. 804 */ 805 static void 806 batadv_mcast_bridge_log(struct batadv_priv *bat_priv, 807 struct batadv_mcast_mla_flags *new_flags) 808 { 809 struct batadv_mcast_mla_flags *old_flags = &bat_priv->mcast.mla_flags; 810 811 if (!old_flags->bridged && new_flags->bridged) 812 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 813 "Bridge added: Setting Unsnoopables(U)-flag\n"); 814 else if (old_flags->bridged && !new_flags->bridged) 815 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 816 "Bridge removed: Unsetting Unsnoopables(U)-flag\n"); 817 818 if (new_flags->bridged) { 819 batadv_mcast_querier_log(bat_priv, "IGMP", 820 &old_flags->querier_ipv4, 821 &new_flags->querier_ipv4); 822 batadv_mcast_querier_log(bat_priv, "MLD", 823 &old_flags->querier_ipv6, 824 &new_flags->querier_ipv6); 825 } 826 } 827 828 /** 829 * batadv_mcast_flags_log() - output debug information about mcast flag changes 830 * @bat_priv: the bat priv with all the soft interface information 831 * @flags: TVLV flags indicating the new multicast state 832 * 833 * Whenever the multicast TVLV flags this node announces change, this function 834 * should be used to notify userspace about the change. 835 */ 836 static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags) 837 { 838 bool old_enabled = bat_priv->mcast.mla_flags.enabled; 839 u8 old_flags = bat_priv->mcast.mla_flags.tvlv_flags; 840 char str_old_flags[] = "[.... . .]"; 841 842 sprintf(str_old_flags, "[%c%c%c%s%s%c]", 843 (old_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.', 844 (old_flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.', 845 (old_flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.', 846 !(old_flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ", 847 !(old_flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". ", 848 !(old_flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA) ? 'P' : '.'); 849 850 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 851 "Changing multicast flags from '%s' to '[%c%c%c%s%s%c]'\n", 852 old_enabled ? str_old_flags : "<undefined>", 853 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.', 854 (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.', 855 (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.', 856 !(flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ", 857 !(flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". ", 858 !(flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA) ? 'P' : '.'); 859 } 860 861 /** 862 * batadv_mcast_mla_flags_update() - update multicast flags 863 * @bat_priv: the bat priv with all the soft interface information 864 * @flags: flags indicating the new multicast state 865 * 866 * Updates the own multicast tvlv with our current multicast related settings, 867 * capabilities and inabilities. 868 */ 869 static void 870 batadv_mcast_mla_flags_update(struct batadv_priv *bat_priv, 871 struct batadv_mcast_mla_flags *flags) 872 { 873 struct batadv_tvlv_mcast_data mcast_data; 874 875 if (!memcmp(flags, &bat_priv->mcast.mla_flags, sizeof(*flags))) 876 return; 877 878 batadv_mcast_bridge_log(bat_priv, flags); 879 batadv_mcast_flags_log(bat_priv, flags->tvlv_flags); 880 881 mcast_data.flags = flags->tvlv_flags; 882 memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved)); 883 884 batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 2, 885 &mcast_data, sizeof(mcast_data)); 886 887 bat_priv->mcast.mla_flags = *flags; 888 } 889 890 /** 891 * __batadv_mcast_mla_update() - update the own MLAs 892 * @bat_priv: the bat priv with all the soft interface information 893 * 894 * Updates the own multicast listener announcements in the translation 895 * table as well as the own, announced multicast tvlv container. 896 * 897 * Note that non-conflicting reads and writes to bat_priv->mcast.mla_list 898 * in batadv_mcast_mla_tt_retract() and batadv_mcast_mla_tt_add() are 899 * ensured by the non-parallel execution of the worker this function 900 * belongs to. 901 */ 902 static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv) 903 { 904 struct net_device *soft_iface = bat_priv->soft_iface; 905 struct hlist_head mcast_list = HLIST_HEAD_INIT; 906 struct batadv_mcast_mla_flags flags; 907 int ret; 908 909 flags = batadv_mcast_mla_flags_get(bat_priv); 910 911 ret = batadv_mcast_mla_softif_get(soft_iface, &mcast_list, &flags); 912 if (ret < 0) 913 goto out; 914 915 ret = batadv_mcast_mla_bridge_get(soft_iface, &mcast_list, &flags); 916 if (ret < 0) 917 goto out; 918 919 spin_lock(&bat_priv->mcast.mla_lock); 920 batadv_mcast_mla_tt_retract(bat_priv, &mcast_list); 921 batadv_mcast_mla_tt_add(bat_priv, &mcast_list); 922 batadv_mcast_mla_flags_update(bat_priv, &flags); 923 spin_unlock(&bat_priv->mcast.mla_lock); 924 925 out: 926 batadv_mcast_mla_list_free(&mcast_list); 927 } 928 929 /** 930 * batadv_mcast_mla_update() - update the own MLAs 931 * @work: kernel work struct 932 * 933 * Updates the own multicast listener announcements in the translation 934 * table as well as the own, announced multicast tvlv container. 935 * 936 * In the end, reschedules the work timer. 937 */ 938 static void batadv_mcast_mla_update(struct work_struct *work) 939 { 940 struct delayed_work *delayed_work; 941 struct batadv_priv_mcast *priv_mcast; 942 struct batadv_priv *bat_priv; 943 944 delayed_work = to_delayed_work(work); 945 priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work); 946 bat_priv = container_of(priv_mcast, struct batadv_priv, mcast); 947 948 __batadv_mcast_mla_update(bat_priv); 949 batadv_mcast_start_timer(bat_priv); 950 } 951 952 /** 953 * batadv_mcast_is_report_ipv4() - check for IGMP reports 954 * @skb: the ethernet frame destined for the mesh 955 * 956 * This call might reallocate skb data. 957 * 958 * Checks whether the given frame is a valid IGMP report. 959 * 960 * Return: If so then true, otherwise false. 961 */ 962 static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb) 963 { 964 if (ip_mc_check_igmp(skb) < 0) 965 return false; 966 967 switch (igmp_hdr(skb)->type) { 968 case IGMP_HOST_MEMBERSHIP_REPORT: 969 case IGMPV2_HOST_MEMBERSHIP_REPORT: 970 case IGMPV3_HOST_MEMBERSHIP_REPORT: 971 return true; 972 } 973 974 return false; 975 } 976 977 /** 978 * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding 979 * potential 980 * @bat_priv: the bat priv with all the soft interface information 981 * @skb: the IPv4 packet to check 982 * @is_unsnoopable: stores whether the destination is snoopable 983 * @is_routable: stores whether the destination is routable 984 * 985 * Checks whether the given IPv4 packet has the potential to be forwarded with a 986 * mode more optimal than classic flooding. 987 * 988 * Return: If so then 0. Otherwise -EINVAL or -ENOMEM in case of memory 989 * allocation failure. 990 */ 991 static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv, 992 struct sk_buff *skb, 993 bool *is_unsnoopable, 994 int *is_routable) 995 { 996 struct iphdr *iphdr; 997 998 /* We might fail due to out-of-memory -> drop it */ 999 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr))) 1000 return -ENOMEM; 1001 1002 if (batadv_mcast_is_report_ipv4(skb)) 1003 return -EINVAL; 1004 1005 iphdr = ip_hdr(skb); 1006 1007 /* link-local multicast listeners behind a bridge are 1008 * not snoopable (see RFC4541, section 2.1.2.2) 1009 */ 1010 if (ipv4_is_local_multicast(iphdr->daddr)) 1011 *is_unsnoopable = true; 1012 else 1013 *is_routable = ETH_P_IP; 1014 1015 return 0; 1016 } 1017 1018 /** 1019 * batadv_mcast_is_report_ipv6() - check for MLD reports 1020 * @skb: the ethernet frame destined for the mesh 1021 * 1022 * This call might reallocate skb data. 1023 * 1024 * Checks whether the given frame is a valid MLD report. 1025 * 1026 * Return: If so then true, otherwise false. 1027 */ 1028 static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb) 1029 { 1030 if (ipv6_mc_check_mld(skb) < 0) 1031 return false; 1032 1033 switch (icmp6_hdr(skb)->icmp6_type) { 1034 case ICMPV6_MGM_REPORT: 1035 case ICMPV6_MLD2_REPORT: 1036 return true; 1037 } 1038 1039 return false; 1040 } 1041 1042 /** 1043 * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding 1044 * potential 1045 * @bat_priv: the bat priv with all the soft interface information 1046 * @skb: the IPv6 packet to check 1047 * @is_unsnoopable: stores whether the destination is snoopable 1048 * @is_routable: stores whether the destination is routable 1049 * 1050 * Checks whether the given IPv6 packet has the potential to be forwarded with a 1051 * mode more optimal than classic flooding. 1052 * 1053 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory 1054 */ 1055 static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv, 1056 struct sk_buff *skb, 1057 bool *is_unsnoopable, 1058 int *is_routable) 1059 { 1060 struct ipv6hdr *ip6hdr; 1061 1062 /* We might fail due to out-of-memory -> drop it */ 1063 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr))) 1064 return -ENOMEM; 1065 1066 if (batadv_mcast_is_report_ipv6(skb)) 1067 return -EINVAL; 1068 1069 ip6hdr = ipv6_hdr(skb); 1070 1071 if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) < IPV6_ADDR_SCOPE_LINKLOCAL) 1072 return -EINVAL; 1073 1074 /* link-local-all-nodes multicast listeners behind a bridge are 1075 * not snoopable (see RFC4541, section 3, paragraph 3) 1076 */ 1077 if (ipv6_addr_is_ll_all_nodes(&ip6hdr->daddr)) 1078 *is_unsnoopable = true; 1079 else if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) > IPV6_ADDR_SCOPE_LINKLOCAL) 1080 *is_routable = ETH_P_IPV6; 1081 1082 return 0; 1083 } 1084 1085 /** 1086 * batadv_mcast_forw_mode_check() - check for optimized forwarding potential 1087 * @bat_priv: the bat priv with all the soft interface information 1088 * @skb: the multicast frame to check 1089 * @is_unsnoopable: stores whether the destination is snoopable 1090 * @is_routable: stores whether the destination is routable 1091 * 1092 * Checks whether the given multicast ethernet frame has the potential to be 1093 * forwarded with a mode more optimal than classic flooding. 1094 * 1095 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory 1096 */ 1097 static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv, 1098 struct sk_buff *skb, 1099 bool *is_unsnoopable, 1100 int *is_routable) 1101 { 1102 struct ethhdr *ethhdr = eth_hdr(skb); 1103 1104 if (!atomic_read(&bat_priv->multicast_mode)) 1105 return -EINVAL; 1106 1107 switch (ntohs(ethhdr->h_proto)) { 1108 case ETH_P_IP: 1109 return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb, 1110 is_unsnoopable, 1111 is_routable); 1112 case ETH_P_IPV6: 1113 if (!IS_ENABLED(CONFIG_IPV6)) 1114 return -EINVAL; 1115 1116 return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb, 1117 is_unsnoopable, 1118 is_routable); 1119 default: 1120 return -EINVAL; 1121 } 1122 } 1123 1124 /** 1125 * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast 1126 * interest 1127 * @bat_priv: the bat priv with all the soft interface information 1128 * @ethhdr: ethernet header of a packet 1129 * 1130 * Return: the number of nodes which want all IPv4 multicast traffic if the 1131 * given ethhdr is from an IPv4 packet or the number of nodes which want all 1132 * IPv6 traffic if it matches an IPv6 packet. 1133 */ 1134 static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv, 1135 struct ethhdr *ethhdr) 1136 { 1137 switch (ntohs(ethhdr->h_proto)) { 1138 case ETH_P_IP: 1139 return atomic_read(&bat_priv->mcast.num_want_all_ipv4); 1140 case ETH_P_IPV6: 1141 return atomic_read(&bat_priv->mcast.num_want_all_ipv6); 1142 default: 1143 /* we shouldn't be here... */ 1144 return 0; 1145 } 1146 } 1147 1148 /** 1149 * batadv_mcast_forw_rtr_count() - count nodes with a multicast router 1150 * @bat_priv: the bat priv with all the soft interface information 1151 * @protocol: the ethernet protocol type to count multicast routers for 1152 * 1153 * Return: the number of nodes which want all routable IPv4 multicast traffic 1154 * if the protocol is ETH_P_IP or the number of nodes which want all routable 1155 * IPv6 traffic if the protocol is ETH_P_IPV6. Otherwise returns 0. 1156 */ 1157 1158 static int batadv_mcast_forw_rtr_count(struct batadv_priv *bat_priv, 1159 int protocol) 1160 { 1161 switch (protocol) { 1162 case ETH_P_IP: 1163 return atomic_read(&bat_priv->mcast.num_want_all_rtr4); 1164 case ETH_P_IPV6: 1165 return atomic_read(&bat_priv->mcast.num_want_all_rtr6); 1166 default: 1167 return 0; 1168 } 1169 } 1170 1171 /** 1172 * batadv_mcast_forw_mode_by_count() - get forwarding mode by count 1173 * @bat_priv: the bat priv with all the soft interface information 1174 * @skb: the multicast packet to check 1175 * @vid: the vlan identifier 1176 * @is_routable: stores whether the destination is routable 1177 * @count: the number of originators the multicast packet need to be sent to 1178 * 1179 * For a multicast packet with multiple destination originators, checks which 1180 * mode to use. For BATADV_FORW_MCAST it also encapsulates the packet with a 1181 * complete batman-adv multicast header. 1182 * 1183 * Return: 1184 * BATADV_FORW_MCAST: If all nodes have multicast packet routing 1185 * capabilities and an MTU >= 1280 on all hard interfaces (including us) 1186 * and the encapsulated multicast packet with all destination addresses 1187 * would still fit into an 1280 bytes batman-adv multicast packet 1188 * (excluding the outer ethernet frame) and we could successfully push 1189 * the full batman-adv multicast packet header. 1190 * BATADV_FORW_UCASTS: If the packet cannot be sent in a batman-adv 1191 * multicast packet and the amount of batman-adv unicast packets needed 1192 * is smaller or equal to the configured multicast fanout. 1193 * BATADV_FORW_BCAST: Otherwise. 1194 */ 1195 static enum batadv_forw_mode 1196 batadv_mcast_forw_mode_by_count(struct batadv_priv *bat_priv, 1197 struct sk_buff *skb, unsigned short vid, 1198 int is_routable, int count) 1199 { 1200 unsigned int mcast_hdrlen = batadv_mcast_forw_packet_hdrlen(count); 1201 u8 own_tvlv_flags = bat_priv->mcast.mla_flags.tvlv_flags; 1202 1203 if (!atomic_read(&bat_priv->mcast.num_no_mc_ptype_capa) && 1204 own_tvlv_flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA && 1205 skb->len + mcast_hdrlen <= IPV6_MIN_MTU && 1206 batadv_mcast_forw_push(bat_priv, skb, vid, is_routable, count)) 1207 return BATADV_FORW_MCAST; 1208 1209 if (count <= atomic_read(&bat_priv->multicast_fanout)) 1210 return BATADV_FORW_UCASTS; 1211 1212 return BATADV_FORW_BCAST; 1213 } 1214 1215 /** 1216 * batadv_mcast_forw_mode() - check on how to forward a multicast packet 1217 * @bat_priv: the bat priv with all the soft interface information 1218 * @skb: the multicast packet to check 1219 * @vid: the vlan identifier 1220 * @is_routable: stores whether the destination is routable 1221 * 1222 * Return: The forwarding mode as enum batadv_forw_mode. 1223 */ 1224 enum batadv_forw_mode 1225 batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, 1226 unsigned short vid, int *is_routable) 1227 { 1228 int ret, tt_count, ip_count, unsnoop_count, total_count; 1229 bool is_unsnoopable = false; 1230 struct ethhdr *ethhdr; 1231 int rtr_count = 0; 1232 1233 ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable, 1234 is_routable); 1235 if (ret == -ENOMEM) 1236 return BATADV_FORW_NONE; 1237 else if (ret < 0) 1238 return BATADV_FORW_BCAST; 1239 1240 ethhdr = eth_hdr(skb); 1241 1242 tt_count = batadv_tt_global_hash_count(bat_priv, ethhdr->h_dest, 1243 BATADV_NO_FLAGS); 1244 ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr); 1245 unsnoop_count = !is_unsnoopable ? 0 : 1246 atomic_read(&bat_priv->mcast.num_want_all_unsnoopables); 1247 rtr_count = batadv_mcast_forw_rtr_count(bat_priv, *is_routable); 1248 1249 total_count = tt_count + ip_count + unsnoop_count + rtr_count; 1250 1251 if (!total_count) 1252 return BATADV_FORW_NONE; 1253 else if (unsnoop_count) 1254 return BATADV_FORW_BCAST; 1255 1256 return batadv_mcast_forw_mode_by_count(bat_priv, skb, vid, *is_routable, 1257 total_count); 1258 } 1259 1260 /** 1261 * batadv_mcast_forw_send_orig() - send a multicast packet to an originator 1262 * @bat_priv: the bat priv with all the soft interface information 1263 * @skb: the multicast packet to send 1264 * @vid: the vlan identifier 1265 * @orig_node: the originator to send the packet to 1266 * 1267 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. 1268 */ 1269 static int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, 1270 struct sk_buff *skb, 1271 unsigned short vid, 1272 struct batadv_orig_node *orig_node) 1273 { 1274 /* Avoid sending multicast-in-unicast packets to other BLA 1275 * gateways - they already got the frame from the LAN side 1276 * we share with them. 1277 * TODO: Refactor to take BLA into account earlier, to avoid 1278 * reducing the mcast_fanout count. 1279 */ 1280 if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid)) { 1281 dev_kfree_skb(skb); 1282 return NET_XMIT_SUCCESS; 1283 } 1284 1285 return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0, 1286 orig_node, vid); 1287 } 1288 1289 /** 1290 * batadv_mcast_forw_tt() - forwards a packet to multicast listeners 1291 * @bat_priv: the bat priv with all the soft interface information 1292 * @skb: the multicast packet to transmit 1293 * @vid: the vlan identifier 1294 * 1295 * Sends copies of a frame with multicast destination to any multicast 1296 * listener registered in the translation table. A transmission is performed 1297 * via a batman-adv unicast packet for each such destination node. 1298 * 1299 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1300 * otherwise. 1301 */ 1302 static int 1303 batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb, 1304 unsigned short vid) 1305 { 1306 int ret = NET_XMIT_SUCCESS; 1307 struct sk_buff *newskb; 1308 1309 struct batadv_tt_orig_list_entry *orig_entry; 1310 1311 struct batadv_tt_global_entry *tt_global; 1312 const u8 *addr = eth_hdr(skb)->h_dest; 1313 1314 tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid); 1315 if (!tt_global) 1316 goto out; 1317 1318 rcu_read_lock(); 1319 hlist_for_each_entry_rcu(orig_entry, &tt_global->orig_list, list) { 1320 newskb = skb_copy(skb, GFP_ATOMIC); 1321 if (!newskb) { 1322 ret = NET_XMIT_DROP; 1323 break; 1324 } 1325 1326 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, 1327 orig_entry->orig_node); 1328 } 1329 rcu_read_unlock(); 1330 1331 batadv_tt_global_entry_put(tt_global); 1332 1333 out: 1334 return ret; 1335 } 1336 1337 /** 1338 * batadv_mcast_forw_want_all_ipv4() - forward to nodes with want-all-ipv4 1339 * @bat_priv: the bat priv with all the soft interface information 1340 * @skb: the multicast packet to transmit 1341 * @vid: the vlan identifier 1342 * 1343 * Sends copies of a frame with multicast destination to any node with a 1344 * BATADV_MCAST_WANT_ALL_IPV4 flag set. A transmission is performed via a 1345 * batman-adv unicast packet for each such destination node. 1346 * 1347 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1348 * otherwise. 1349 */ 1350 static int 1351 batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv, 1352 struct sk_buff *skb, unsigned short vid) 1353 { 1354 struct batadv_orig_node *orig_node; 1355 int ret = NET_XMIT_SUCCESS; 1356 struct sk_buff *newskb; 1357 1358 rcu_read_lock(); 1359 hlist_for_each_entry_rcu(orig_node, 1360 &bat_priv->mcast.want_all_ipv4_list, 1361 mcast_want_all_ipv4_node) { 1362 newskb = skb_copy(skb, GFP_ATOMIC); 1363 if (!newskb) { 1364 ret = NET_XMIT_DROP; 1365 break; 1366 } 1367 1368 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1369 } 1370 rcu_read_unlock(); 1371 return ret; 1372 } 1373 1374 /** 1375 * batadv_mcast_forw_want_all_ipv6() - forward to nodes with want-all-ipv6 1376 * @bat_priv: the bat priv with all the soft interface information 1377 * @skb: The multicast packet to transmit 1378 * @vid: the vlan identifier 1379 * 1380 * Sends copies of a frame with multicast destination to any node with a 1381 * BATADV_MCAST_WANT_ALL_IPV6 flag set. A transmission is performed via a 1382 * batman-adv unicast packet for each such destination node. 1383 * 1384 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1385 * otherwise. 1386 */ 1387 static int 1388 batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv, 1389 struct sk_buff *skb, unsigned short vid) 1390 { 1391 struct batadv_orig_node *orig_node; 1392 int ret = NET_XMIT_SUCCESS; 1393 struct sk_buff *newskb; 1394 1395 rcu_read_lock(); 1396 hlist_for_each_entry_rcu(orig_node, 1397 &bat_priv->mcast.want_all_ipv6_list, 1398 mcast_want_all_ipv6_node) { 1399 newskb = skb_copy(skb, GFP_ATOMIC); 1400 if (!newskb) { 1401 ret = NET_XMIT_DROP; 1402 break; 1403 } 1404 1405 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1406 } 1407 rcu_read_unlock(); 1408 return ret; 1409 } 1410 1411 /** 1412 * batadv_mcast_forw_want_all() - forward packet to nodes in a want-all list 1413 * @bat_priv: the bat priv with all the soft interface information 1414 * @skb: the multicast packet to transmit 1415 * @vid: the vlan identifier 1416 * 1417 * Sends copies of a frame with multicast destination to any node with a 1418 * BATADV_MCAST_WANT_ALL_IPV4 or BATADV_MCAST_WANT_ALL_IPV6 flag set. A 1419 * transmission is performed via a batman-adv unicast packet for each such 1420 * destination node. 1421 * 1422 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family 1423 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise. 1424 */ 1425 static int 1426 batadv_mcast_forw_want_all(struct batadv_priv *bat_priv, 1427 struct sk_buff *skb, unsigned short vid) 1428 { 1429 switch (ntohs(eth_hdr(skb)->h_proto)) { 1430 case ETH_P_IP: 1431 return batadv_mcast_forw_want_all_ipv4(bat_priv, skb, vid); 1432 case ETH_P_IPV6: 1433 return batadv_mcast_forw_want_all_ipv6(bat_priv, skb, vid); 1434 default: 1435 /* we shouldn't be here... */ 1436 return NET_XMIT_DROP; 1437 } 1438 } 1439 1440 /** 1441 * batadv_mcast_forw_want_all_rtr4() - forward to nodes with want-all-rtr4 1442 * @bat_priv: the bat priv with all the soft interface information 1443 * @skb: the multicast packet to transmit 1444 * @vid: the vlan identifier 1445 * 1446 * Sends copies of a frame with multicast destination to any node with a 1447 * BATADV_MCAST_WANT_NO_RTR4 flag unset. A transmission is performed via a 1448 * batman-adv unicast packet for each such destination node. 1449 * 1450 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1451 * otherwise. 1452 */ 1453 static int 1454 batadv_mcast_forw_want_all_rtr4(struct batadv_priv *bat_priv, 1455 struct sk_buff *skb, unsigned short vid) 1456 { 1457 struct batadv_orig_node *orig_node; 1458 int ret = NET_XMIT_SUCCESS; 1459 struct sk_buff *newskb; 1460 1461 rcu_read_lock(); 1462 hlist_for_each_entry_rcu(orig_node, 1463 &bat_priv->mcast.want_all_rtr4_list, 1464 mcast_want_all_rtr4_node) { 1465 newskb = skb_copy(skb, GFP_ATOMIC); 1466 if (!newskb) { 1467 ret = NET_XMIT_DROP; 1468 break; 1469 } 1470 1471 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1472 } 1473 rcu_read_unlock(); 1474 return ret; 1475 } 1476 1477 /** 1478 * batadv_mcast_forw_want_all_rtr6() - forward to nodes with want-all-rtr6 1479 * @bat_priv: the bat priv with all the soft interface information 1480 * @skb: The multicast packet to transmit 1481 * @vid: the vlan identifier 1482 * 1483 * Sends copies of a frame with multicast destination to any node with a 1484 * BATADV_MCAST_WANT_NO_RTR6 flag unset. A transmission is performed via a 1485 * batman-adv unicast packet for each such destination node. 1486 * 1487 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1488 * otherwise. 1489 */ 1490 static int 1491 batadv_mcast_forw_want_all_rtr6(struct batadv_priv *bat_priv, 1492 struct sk_buff *skb, unsigned short vid) 1493 { 1494 struct batadv_orig_node *orig_node; 1495 int ret = NET_XMIT_SUCCESS; 1496 struct sk_buff *newskb; 1497 1498 rcu_read_lock(); 1499 hlist_for_each_entry_rcu(orig_node, 1500 &bat_priv->mcast.want_all_rtr6_list, 1501 mcast_want_all_rtr6_node) { 1502 newskb = skb_copy(skb, GFP_ATOMIC); 1503 if (!newskb) { 1504 ret = NET_XMIT_DROP; 1505 break; 1506 } 1507 1508 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1509 } 1510 rcu_read_unlock(); 1511 return ret; 1512 } 1513 1514 /** 1515 * batadv_mcast_forw_want_rtr() - forward packet to nodes in a want-all-rtr list 1516 * @bat_priv: the bat priv with all the soft interface information 1517 * @skb: the multicast packet to transmit 1518 * @vid: the vlan identifier 1519 * 1520 * Sends copies of a frame with multicast destination to any node with a 1521 * BATADV_MCAST_WANT_NO_RTR4 or BATADV_MCAST_WANT_NO_RTR6 flag unset. A 1522 * transmission is performed via a batman-adv unicast packet for each such 1523 * destination node. 1524 * 1525 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family 1526 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise. 1527 */ 1528 static int 1529 batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv, 1530 struct sk_buff *skb, unsigned short vid) 1531 { 1532 switch (ntohs(eth_hdr(skb)->h_proto)) { 1533 case ETH_P_IP: 1534 return batadv_mcast_forw_want_all_rtr4(bat_priv, skb, vid); 1535 case ETH_P_IPV6: 1536 return batadv_mcast_forw_want_all_rtr6(bat_priv, skb, vid); 1537 default: 1538 /* we shouldn't be here... */ 1539 return NET_XMIT_DROP; 1540 } 1541 } 1542 1543 /** 1544 * batadv_mcast_forw_send() - send packet to any detected multicast recipient 1545 * @bat_priv: the bat priv with all the soft interface information 1546 * @skb: the multicast packet to transmit 1547 * @vid: the vlan identifier 1548 * @is_routable: stores whether the destination is routable 1549 * 1550 * Sends copies of a frame with multicast destination to any node that signaled 1551 * interest in it, that is either via the translation table or the according 1552 * want-all flags. A transmission is performed via a batman-adv unicast packet 1553 * for each such destination node. 1554 * 1555 * The given skb is consumed/freed. 1556 * 1557 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family 1558 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise. 1559 */ 1560 int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, 1561 unsigned short vid, int is_routable) 1562 { 1563 int ret; 1564 1565 ret = batadv_mcast_forw_tt(bat_priv, skb, vid); 1566 if (ret != NET_XMIT_SUCCESS) { 1567 kfree_skb(skb); 1568 return ret; 1569 } 1570 1571 ret = batadv_mcast_forw_want_all(bat_priv, skb, vid); 1572 if (ret != NET_XMIT_SUCCESS) { 1573 kfree_skb(skb); 1574 return ret; 1575 } 1576 1577 if (!is_routable) 1578 goto skip_mc_router; 1579 1580 ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid); 1581 if (ret != NET_XMIT_SUCCESS) { 1582 kfree_skb(skb); 1583 return ret; 1584 } 1585 1586 skip_mc_router: 1587 consume_skb(skb); 1588 return ret; 1589 } 1590 1591 /** 1592 * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list 1593 * @bat_priv: the bat priv with all the soft interface information 1594 * @orig: the orig_node which multicast state might have changed of 1595 * @mcast_flags: flags indicating the new multicast state 1596 * 1597 * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator, 1598 * orig, has toggled then this method updates the counter and the list 1599 * accordingly. 1600 * 1601 * Caller needs to hold orig->mcast_handler_lock. 1602 */ 1603 static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv, 1604 struct batadv_orig_node *orig, 1605 u8 mcast_flags) 1606 { 1607 struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node; 1608 struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list; 1609 1610 lockdep_assert_held(&orig->mcast_handler_lock); 1611 1612 /* switched from flag unset to set */ 1613 if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 1614 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) { 1615 atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables); 1616 1617 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1618 /* flag checks above + mcast_handler_lock prevents this */ 1619 WARN_ON(!hlist_unhashed(node)); 1620 1621 hlist_add_head_rcu(node, head); 1622 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1623 /* switched from flag set to unset */ 1624 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) && 1625 orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) { 1626 atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables); 1627 1628 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1629 /* flag checks above + mcast_handler_lock prevents this */ 1630 WARN_ON(hlist_unhashed(node)); 1631 1632 hlist_del_init_rcu(node); 1633 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1634 } 1635 } 1636 1637 /** 1638 * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list 1639 * @bat_priv: the bat priv with all the soft interface information 1640 * @orig: the orig_node which multicast state might have changed of 1641 * @mcast_flags: flags indicating the new multicast state 1642 * 1643 * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has 1644 * toggled then this method updates the counter and the list accordingly. 1645 * 1646 * Caller needs to hold orig->mcast_handler_lock. 1647 */ 1648 static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv, 1649 struct batadv_orig_node *orig, 1650 u8 mcast_flags) 1651 { 1652 struct hlist_node *node = &orig->mcast_want_all_ipv4_node; 1653 struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list; 1654 1655 lockdep_assert_held(&orig->mcast_handler_lock); 1656 1657 /* switched from flag unset to set */ 1658 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 && 1659 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) { 1660 atomic_inc(&bat_priv->mcast.num_want_all_ipv4); 1661 1662 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1663 /* flag checks above + mcast_handler_lock prevents this */ 1664 WARN_ON(!hlist_unhashed(node)); 1665 1666 hlist_add_head_rcu(node, head); 1667 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1668 /* switched from flag set to unset */ 1669 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) && 1670 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) { 1671 atomic_dec(&bat_priv->mcast.num_want_all_ipv4); 1672 1673 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1674 /* flag checks above + mcast_handler_lock prevents this */ 1675 WARN_ON(hlist_unhashed(node)); 1676 1677 hlist_del_init_rcu(node); 1678 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1679 } 1680 } 1681 1682 /** 1683 * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list 1684 * @bat_priv: the bat priv with all the soft interface information 1685 * @orig: the orig_node which multicast state might have changed of 1686 * @mcast_flags: flags indicating the new multicast state 1687 * 1688 * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has 1689 * toggled then this method updates the counter and the list accordingly. 1690 * 1691 * Caller needs to hold orig->mcast_handler_lock. 1692 */ 1693 static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv, 1694 struct batadv_orig_node *orig, 1695 u8 mcast_flags) 1696 { 1697 struct hlist_node *node = &orig->mcast_want_all_ipv6_node; 1698 struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list; 1699 1700 lockdep_assert_held(&orig->mcast_handler_lock); 1701 1702 /* switched from flag unset to set */ 1703 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 && 1704 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) { 1705 atomic_inc(&bat_priv->mcast.num_want_all_ipv6); 1706 1707 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1708 /* flag checks above + mcast_handler_lock prevents this */ 1709 WARN_ON(!hlist_unhashed(node)); 1710 1711 hlist_add_head_rcu(node, head); 1712 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1713 /* switched from flag set to unset */ 1714 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) && 1715 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) { 1716 atomic_dec(&bat_priv->mcast.num_want_all_ipv6); 1717 1718 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1719 /* flag checks above + mcast_handler_lock prevents this */ 1720 WARN_ON(hlist_unhashed(node)); 1721 1722 hlist_del_init_rcu(node); 1723 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1724 } 1725 } 1726 1727 /** 1728 * batadv_mcast_want_rtr4_update() - update want-all-rtr4 counter and list 1729 * @bat_priv: the bat priv with all the soft interface information 1730 * @orig: the orig_node which multicast state might have changed of 1731 * @mcast_flags: flags indicating the new multicast state 1732 * 1733 * If the BATADV_MCAST_WANT_NO_RTR4 flag of this originator, orig, has 1734 * toggled then this method updates the counter and the list accordingly. 1735 * 1736 * Caller needs to hold orig->mcast_handler_lock. 1737 */ 1738 static void batadv_mcast_want_rtr4_update(struct batadv_priv *bat_priv, 1739 struct batadv_orig_node *orig, 1740 u8 mcast_flags) 1741 { 1742 struct hlist_node *node = &orig->mcast_want_all_rtr4_node; 1743 struct hlist_head *head = &bat_priv->mcast.want_all_rtr4_list; 1744 1745 lockdep_assert_held(&orig->mcast_handler_lock); 1746 1747 /* switched from flag set to unset */ 1748 if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR4) && 1749 orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4) { 1750 atomic_inc(&bat_priv->mcast.num_want_all_rtr4); 1751 1752 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1753 /* flag checks above + mcast_handler_lock prevents this */ 1754 WARN_ON(!hlist_unhashed(node)); 1755 1756 hlist_add_head_rcu(node, head); 1757 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1758 /* switched from flag unset to set */ 1759 } else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR4 && 1760 !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4)) { 1761 atomic_dec(&bat_priv->mcast.num_want_all_rtr4); 1762 1763 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1764 /* flag checks above + mcast_handler_lock prevents this */ 1765 WARN_ON(hlist_unhashed(node)); 1766 1767 hlist_del_init_rcu(node); 1768 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1769 } 1770 } 1771 1772 /** 1773 * batadv_mcast_want_rtr6_update() - update want-all-rtr6 counter and list 1774 * @bat_priv: the bat priv with all the soft interface information 1775 * @orig: the orig_node which multicast state might have changed of 1776 * @mcast_flags: flags indicating the new multicast state 1777 * 1778 * If the BATADV_MCAST_WANT_NO_RTR6 flag of this originator, orig, has 1779 * toggled then this method updates the counter and the list accordingly. 1780 * 1781 * Caller needs to hold orig->mcast_handler_lock. 1782 */ 1783 static void batadv_mcast_want_rtr6_update(struct batadv_priv *bat_priv, 1784 struct batadv_orig_node *orig, 1785 u8 mcast_flags) 1786 { 1787 struct hlist_node *node = &orig->mcast_want_all_rtr6_node; 1788 struct hlist_head *head = &bat_priv->mcast.want_all_rtr6_list; 1789 1790 lockdep_assert_held(&orig->mcast_handler_lock); 1791 1792 /* switched from flag set to unset */ 1793 if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR6) && 1794 orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6) { 1795 atomic_inc(&bat_priv->mcast.num_want_all_rtr6); 1796 1797 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1798 /* flag checks above + mcast_handler_lock prevents this */ 1799 WARN_ON(!hlist_unhashed(node)); 1800 1801 hlist_add_head_rcu(node, head); 1802 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1803 /* switched from flag unset to set */ 1804 } else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR6 && 1805 !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6)) { 1806 atomic_dec(&bat_priv->mcast.num_want_all_rtr6); 1807 1808 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1809 /* flag checks above + mcast_handler_lock prevents this */ 1810 WARN_ON(hlist_unhashed(node)); 1811 1812 hlist_del_init_rcu(node); 1813 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1814 } 1815 } 1816 1817 /** 1818 * batadv_mcast_have_mc_ptype_update() - update multicast packet type counter 1819 * @bat_priv: the bat priv with all the soft interface information 1820 * @orig: the orig_node which multicast state might have changed of 1821 * @mcast_flags: flags indicating the new multicast state 1822 * 1823 * If the BATADV_MCAST_HAVE_MC_PTYPE_CAPA flag of this originator, orig, has 1824 * toggled then this method updates the counter accordingly. 1825 */ 1826 static void batadv_mcast_have_mc_ptype_update(struct batadv_priv *bat_priv, 1827 struct batadv_orig_node *orig, 1828 u8 mcast_flags) 1829 { 1830 lockdep_assert_held(&orig->mcast_handler_lock); 1831 1832 /* switched from flag set to unset */ 1833 if (!(mcast_flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA) && 1834 orig->mcast_flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA) 1835 atomic_inc(&bat_priv->mcast.num_no_mc_ptype_capa); 1836 /* switched from flag unset to set */ 1837 else if (mcast_flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA && 1838 !(orig->mcast_flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA)) 1839 atomic_dec(&bat_priv->mcast.num_no_mc_ptype_capa); 1840 } 1841 1842 /** 1843 * batadv_mcast_tvlv_flags_get() - get multicast flags from an OGM TVLV 1844 * @enabled: whether the originator has multicast TVLV support enabled 1845 * @tvlv_value: tvlv buffer containing the multicast flags 1846 * @tvlv_value_len: tvlv buffer length 1847 * 1848 * Return: multicast flags for the given tvlv buffer 1849 */ 1850 static u8 1851 batadv_mcast_tvlv_flags_get(bool enabled, void *tvlv_value, u16 tvlv_value_len) 1852 { 1853 u8 mcast_flags = BATADV_NO_FLAGS; 1854 1855 if (enabled && tvlv_value && tvlv_value_len >= sizeof(mcast_flags)) 1856 mcast_flags = *(u8 *)tvlv_value; 1857 1858 if (!enabled) { 1859 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV4; 1860 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV6; 1861 } 1862 1863 /* remove redundant flags to avoid sending duplicate packets later */ 1864 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) 1865 mcast_flags |= BATADV_MCAST_WANT_NO_RTR4; 1866 1867 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) 1868 mcast_flags |= BATADV_MCAST_WANT_NO_RTR6; 1869 1870 return mcast_flags; 1871 } 1872 1873 /** 1874 * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container 1875 * @bat_priv: the bat priv with all the soft interface information 1876 * @orig: the orig_node of the ogm 1877 * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags) 1878 * @tvlv_value: tvlv buffer containing the multicast data 1879 * @tvlv_value_len: tvlv buffer length 1880 */ 1881 static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv, 1882 struct batadv_orig_node *orig, 1883 u8 flags, 1884 void *tvlv_value, 1885 u16 tvlv_value_len) 1886 { 1887 bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 1888 u8 mcast_flags; 1889 1890 mcast_flags = batadv_mcast_tvlv_flags_get(orig_mcast_enabled, 1891 tvlv_value, tvlv_value_len); 1892 1893 spin_lock_bh(&orig->mcast_handler_lock); 1894 1895 if (orig_mcast_enabled && 1896 !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) { 1897 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities); 1898 } else if (!orig_mcast_enabled && 1899 test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) { 1900 clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities); 1901 } 1902 1903 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized); 1904 1905 batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags); 1906 batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags); 1907 batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags); 1908 batadv_mcast_want_rtr4_update(bat_priv, orig, mcast_flags); 1909 batadv_mcast_want_rtr6_update(bat_priv, orig, mcast_flags); 1910 batadv_mcast_have_mc_ptype_update(bat_priv, orig, mcast_flags); 1911 1912 orig->mcast_flags = mcast_flags; 1913 spin_unlock_bh(&orig->mcast_handler_lock); 1914 } 1915 1916 /** 1917 * batadv_mcast_init() - initialize the multicast optimizations structures 1918 * @bat_priv: the bat priv with all the soft interface information 1919 */ 1920 void batadv_mcast_init(struct batadv_priv *bat_priv) 1921 { 1922 batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler, 1923 NULL, NULL, BATADV_TVLV_MCAST, 2, 1924 BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 1925 batadv_tvlv_handler_register(bat_priv, NULL, NULL, 1926 batadv_mcast_forw_tracker_tvlv_handler, 1927 BATADV_TVLV_MCAST_TRACKER, 1, 1928 BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 1929 1930 INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update); 1931 batadv_mcast_start_timer(bat_priv); 1932 } 1933 1934 /** 1935 * batadv_mcast_mesh_info_put() - put multicast info into a netlink message 1936 * @msg: buffer for the message 1937 * @bat_priv: the bat priv with all the soft interface information 1938 * 1939 * Return: 0 or error code. 1940 */ 1941 int batadv_mcast_mesh_info_put(struct sk_buff *msg, 1942 struct batadv_priv *bat_priv) 1943 { 1944 u32 flags = bat_priv->mcast.mla_flags.tvlv_flags; 1945 u32 flags_priv = BATADV_NO_FLAGS; 1946 1947 if (bat_priv->mcast.mla_flags.bridged) { 1948 flags_priv |= BATADV_MCAST_FLAGS_BRIDGED; 1949 1950 if (bat_priv->mcast.mla_flags.querier_ipv4.exists) 1951 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_EXISTS; 1952 if (bat_priv->mcast.mla_flags.querier_ipv6.exists) 1953 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_EXISTS; 1954 if (bat_priv->mcast.mla_flags.querier_ipv4.shadowing) 1955 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_SHADOWING; 1956 if (bat_priv->mcast.mla_flags.querier_ipv6.shadowing) 1957 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_SHADOWING; 1958 } 1959 1960 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, flags) || 1961 nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS_PRIV, flags_priv)) 1962 return -EMSGSIZE; 1963 1964 return 0; 1965 } 1966 1967 /** 1968 * batadv_mcast_flags_dump_entry() - dump one entry of the multicast flags table 1969 * to a netlink socket 1970 * @msg: buffer for the message 1971 * @portid: netlink port 1972 * @cb: Control block containing additional options 1973 * @orig_node: originator to dump the multicast flags of 1974 * 1975 * Return: 0 or error code. 1976 */ 1977 static int 1978 batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid, 1979 struct netlink_callback *cb, 1980 struct batadv_orig_node *orig_node) 1981 { 1982 void *hdr; 1983 1984 hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq, 1985 &batadv_netlink_family, NLM_F_MULTI, 1986 BATADV_CMD_GET_MCAST_FLAGS); 1987 if (!hdr) 1988 return -ENOBUFS; 1989 1990 genl_dump_check_consistent(cb, hdr); 1991 1992 if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, 1993 orig_node->orig)) { 1994 genlmsg_cancel(msg, hdr); 1995 return -EMSGSIZE; 1996 } 1997 1998 if (test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 1999 &orig_node->capabilities)) { 2000 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, 2001 orig_node->mcast_flags)) { 2002 genlmsg_cancel(msg, hdr); 2003 return -EMSGSIZE; 2004 } 2005 } 2006 2007 genlmsg_end(msg, hdr); 2008 return 0; 2009 } 2010 2011 /** 2012 * batadv_mcast_flags_dump_bucket() - dump one bucket of the multicast flags 2013 * table to a netlink socket 2014 * @msg: buffer for the message 2015 * @portid: netlink port 2016 * @cb: Control block containing additional options 2017 * @hash: hash to dump 2018 * @bucket: bucket index to dump 2019 * @idx_skip: How many entries to skip 2020 * 2021 * Return: 0 or error code. 2022 */ 2023 static int 2024 batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid, 2025 struct netlink_callback *cb, 2026 struct batadv_hashtable *hash, 2027 unsigned int bucket, long *idx_skip) 2028 { 2029 struct batadv_orig_node *orig_node; 2030 long idx = 0; 2031 2032 spin_lock_bh(&hash->list_locks[bucket]); 2033 cb->seq = atomic_read(&hash->generation) << 1 | 1; 2034 2035 hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) { 2036 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 2037 &orig_node->capa_initialized)) 2038 continue; 2039 2040 if (idx < *idx_skip) 2041 goto skip; 2042 2043 if (batadv_mcast_flags_dump_entry(msg, portid, cb, orig_node)) { 2044 spin_unlock_bh(&hash->list_locks[bucket]); 2045 *idx_skip = idx; 2046 2047 return -EMSGSIZE; 2048 } 2049 2050 skip: 2051 idx++; 2052 } 2053 spin_unlock_bh(&hash->list_locks[bucket]); 2054 2055 return 0; 2056 } 2057 2058 /** 2059 * __batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket 2060 * @msg: buffer for the message 2061 * @portid: netlink port 2062 * @cb: Control block containing additional options 2063 * @bat_priv: the bat priv with all the soft interface information 2064 * @bucket: current bucket to dump 2065 * @idx: index in current bucket to the next entry to dump 2066 * 2067 * Return: 0 or error code. 2068 */ 2069 static int 2070 __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid, 2071 struct netlink_callback *cb, 2072 struct batadv_priv *bat_priv, long *bucket, long *idx) 2073 { 2074 struct batadv_hashtable *hash = bat_priv->orig_hash; 2075 long bucket_tmp = *bucket; 2076 long idx_tmp = *idx; 2077 2078 while (bucket_tmp < hash->size) { 2079 if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash, 2080 bucket_tmp, &idx_tmp)) 2081 break; 2082 2083 bucket_tmp++; 2084 idx_tmp = 0; 2085 } 2086 2087 *bucket = bucket_tmp; 2088 *idx = idx_tmp; 2089 2090 return msg->len; 2091 } 2092 2093 /** 2094 * batadv_mcast_netlink_get_primary() - get primary interface from netlink 2095 * callback 2096 * @cb: netlink callback structure 2097 * @primary_if: the primary interface pointer to return the result in 2098 * 2099 * Return: 0 or error code. 2100 */ 2101 static int 2102 batadv_mcast_netlink_get_primary(struct netlink_callback *cb, 2103 struct batadv_hard_iface **primary_if) 2104 { 2105 struct batadv_hard_iface *hard_iface = NULL; 2106 struct net_device *soft_iface; 2107 struct batadv_priv *bat_priv; 2108 int ret = 0; 2109 2110 soft_iface = batadv_netlink_get_softif(cb); 2111 if (IS_ERR(soft_iface)) 2112 return PTR_ERR(soft_iface); 2113 2114 bat_priv = netdev_priv(soft_iface); 2115 2116 hard_iface = batadv_primary_if_get_selected(bat_priv); 2117 if (!hard_iface || hard_iface->if_status != BATADV_IF_ACTIVE) { 2118 ret = -ENOENT; 2119 goto out; 2120 } 2121 2122 out: 2123 dev_put(soft_iface); 2124 2125 if (!ret && primary_if) 2126 *primary_if = hard_iface; 2127 else 2128 batadv_hardif_put(hard_iface); 2129 2130 return ret; 2131 } 2132 2133 /** 2134 * batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket 2135 * @msg: buffer for the message 2136 * @cb: callback structure containing arguments 2137 * 2138 * Return: message length. 2139 */ 2140 int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb) 2141 { 2142 struct batadv_hard_iface *primary_if = NULL; 2143 int portid = NETLINK_CB(cb->skb).portid; 2144 struct batadv_priv *bat_priv; 2145 long *bucket = &cb->args[0]; 2146 long *idx = &cb->args[1]; 2147 int ret; 2148 2149 ret = batadv_mcast_netlink_get_primary(cb, &primary_if); 2150 if (ret) 2151 return ret; 2152 2153 bat_priv = netdev_priv(primary_if->soft_iface); 2154 ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx); 2155 2156 batadv_hardif_put(primary_if); 2157 return ret; 2158 } 2159 2160 /** 2161 * batadv_mcast_free() - free the multicast optimizations structures 2162 * @bat_priv: the bat priv with all the soft interface information 2163 */ 2164 void batadv_mcast_free(struct batadv_priv *bat_priv) 2165 { 2166 cancel_delayed_work_sync(&bat_priv->mcast.work); 2167 2168 batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2); 2169 batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST_TRACKER, 1); 2170 batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2); 2171 2172 /* safely calling outside of worker, as worker was canceled above */ 2173 batadv_mcast_mla_tt_retract(bat_priv, NULL); 2174 } 2175 2176 /** 2177 * batadv_mcast_purge_orig() - reset originator global mcast state modifications 2178 * @orig: the originator which is going to get purged 2179 */ 2180 void batadv_mcast_purge_orig(struct batadv_orig_node *orig) 2181 { 2182 struct batadv_priv *bat_priv = orig->bat_priv; 2183 2184 spin_lock_bh(&orig->mcast_handler_lock); 2185 2186 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS); 2187 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS); 2188 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS); 2189 batadv_mcast_want_rtr4_update(bat_priv, orig, 2190 BATADV_MCAST_WANT_NO_RTR4); 2191 batadv_mcast_want_rtr6_update(bat_priv, orig, 2192 BATADV_MCAST_WANT_NO_RTR6); 2193 batadv_mcast_have_mc_ptype_update(bat_priv, orig, 2194 BATADV_MCAST_HAVE_MC_PTYPE_CAPA); 2195 2196 spin_unlock_bh(&orig->mcast_handler_lock); 2197 } 2198