1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Bridge multicast support.
4 *
5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
6 */
7
8 #include <linux/err.h>
9 #include <linux/export.h>
10 #include <linux/if_ether.h>
11 #include <linux/igmp.h>
12 #include <linux/in.h>
13 #include <linux/jhash.h>
14 #include <linux/kernel.h>
15 #include <linux/log2.h>
16 #include <linux/netdevice.h>
17 #include <linux/netfilter_bridge.h>
18 #include <linux/random.h>
19 #include <linux/rculist.h>
20 #include <linux/skbuff.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
23 #include <linux/inetdevice.h>
24 #include <linux/mroute.h>
25 #include <net/ip.h>
26 #include <net/switchdev.h>
27 #if IS_ENABLED(CONFIG_IPV6)
28 #include <linux/icmpv6.h>
29 #include <net/ipv6.h>
30 #include <net/mld.h>
31 #include <net/ip6_checksum.h>
32 #include <net/addrconf.h>
33 #endif
34 #include <trace/events/bridge.h>
35
36 #include "br_private.h"
37 #include "br_private_mcast_eht.h"
38
39 static const struct rhashtable_params br_mdb_rht_params = {
40 .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
41 .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
42 .key_len = sizeof(struct br_ip),
43 .automatic_shrinking = true,
44 };
45
46 static const struct rhashtable_params br_sg_port_rht_params = {
47 .head_offset = offsetof(struct net_bridge_port_group, rhnode),
48 .key_offset = offsetof(struct net_bridge_port_group, key),
49 .key_len = sizeof(struct net_bridge_port_group_sg_key),
50 .automatic_shrinking = true,
51 };
52
53 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
54 struct bridge_mcast_own_query *query);
55 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
56 struct net_bridge_mcast_port *pmctx);
57 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
58 struct net_bridge_mcast_port *pmctx,
59 __be32 group,
60 __u16 vid,
61 const unsigned char *src);
62 static void br_multicast_port_group_rexmit(struct timer_list *t);
63
64 static void
65 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted);
66 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
67 struct net_bridge_mcast_port *pmctx);
68 #if IS_ENABLED(CONFIG_IPV6)
69 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
70 struct net_bridge_mcast_port *pmctx,
71 const struct in6_addr *group,
72 __u16 vid, const unsigned char *src);
73 #endif
74 static struct net_bridge_port_group *
75 __br_multicast_add_group(struct net_bridge_mcast *brmctx,
76 struct net_bridge_mcast_port *pmctx,
77 struct br_ip *group,
78 const unsigned char *src,
79 u8 filter_mode,
80 bool igmpv2_mldv1,
81 bool blocked);
82 static void br_multicast_find_del_pg(struct net_bridge *br,
83 struct net_bridge_port_group *pg);
84 static void __br_multicast_stop(struct net_bridge_mcast *brmctx);
85
86 static int br_mc_disabled_update(struct net_device *dev, bool value,
87 struct netlink_ext_ack *extack);
88
89 static struct net_bridge_port_group *
br_sg_port_find(struct net_bridge * br,struct net_bridge_port_group_sg_key * sg_p)90 br_sg_port_find(struct net_bridge *br,
91 struct net_bridge_port_group_sg_key *sg_p)
92 {
93 lockdep_assert_held_once(&br->multicast_lock);
94
95 return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p,
96 br_sg_port_rht_params);
97 }
98
br_mdb_ip_get_rcu(struct net_bridge * br,struct br_ip * dst)99 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
100 struct br_ip *dst)
101 {
102 return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
103 }
104
br_mdb_ip_get(struct net_bridge * br,struct br_ip * dst)105 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
106 struct br_ip *dst)
107 {
108 struct net_bridge_mdb_entry *ent;
109
110 lockdep_assert_held_once(&br->multicast_lock);
111
112 rcu_read_lock();
113 ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
114 rcu_read_unlock();
115
116 return ent;
117 }
118
br_mdb_ip4_get(struct net_bridge * br,__be32 dst,__u16 vid)119 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
120 __be32 dst, __u16 vid)
121 {
122 struct br_ip br_dst;
123
124 memset(&br_dst, 0, sizeof(br_dst));
125 br_dst.dst.ip4 = dst;
126 br_dst.proto = htons(ETH_P_IP);
127 br_dst.vid = vid;
128
129 return br_mdb_ip_get(br, &br_dst);
130 }
131
132 #if IS_ENABLED(CONFIG_IPV6)
br_mdb_ip6_get(struct net_bridge * br,const struct in6_addr * dst,__u16 vid)133 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
134 const struct in6_addr *dst,
135 __u16 vid)
136 {
137 struct br_ip br_dst;
138
139 memset(&br_dst, 0, sizeof(br_dst));
140 br_dst.dst.ip6 = *dst;
141 br_dst.proto = htons(ETH_P_IPV6);
142 br_dst.vid = vid;
143
144 return br_mdb_ip_get(br, &br_dst);
145 }
146 #endif
147
148 struct net_bridge_mdb_entry *
br_mdb_entry_skb_get(struct net_bridge_mcast * brmctx,struct sk_buff * skb,u16 vid)149 br_mdb_entry_skb_get(struct net_bridge_mcast *brmctx, struct sk_buff *skb,
150 u16 vid)
151 {
152 struct net_bridge *br = brmctx->br;
153 struct br_ip ip;
154
155 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
156 br_multicast_ctx_vlan_global_disabled(brmctx))
157 return NULL;
158
159 if (BR_INPUT_SKB_CB(skb)->igmp)
160 return NULL;
161
162 memset(&ip, 0, sizeof(ip));
163 ip.proto = skb->protocol;
164 ip.vid = vid;
165
166 switch (skb->protocol) {
167 case htons(ETH_P_IP):
168 ip.dst.ip4 = ip_hdr(skb)->daddr;
169 if (brmctx->multicast_igmp_version == 3) {
170 struct net_bridge_mdb_entry *mdb;
171
172 ip.src.ip4 = ip_hdr(skb)->saddr;
173 mdb = br_mdb_ip_get_rcu(br, &ip);
174 if (mdb)
175 return mdb;
176 ip.src.ip4 = 0;
177 }
178 break;
179 #if IS_ENABLED(CONFIG_IPV6)
180 case htons(ETH_P_IPV6):
181 ip.dst.ip6 = ipv6_hdr(skb)->daddr;
182 if (brmctx->multicast_mld_version == 2) {
183 struct net_bridge_mdb_entry *mdb;
184
185 ip.src.ip6 = ipv6_hdr(skb)->saddr;
186 mdb = br_mdb_ip_get_rcu(br, &ip);
187 if (mdb)
188 return mdb;
189 memset(&ip.src.ip6, 0, sizeof(ip.src.ip6));
190 }
191 break;
192 #endif
193 default:
194 ip.proto = 0;
195 ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest);
196 }
197
198 return br_mdb_ip_get_rcu(br, &ip);
199 }
200
201 /* IMPORTANT: this function must be used only when the contexts cannot be
202 * passed down (e.g. timer) and must be used for read-only purposes because
203 * the vlan snooping option can change, so it can return any context
204 * (non-vlan or vlan). Its initial intended purpose is to read timer values
205 * from the *current* context based on the option. At worst that could lead
206 * to inconsistent timers when the contexts are changed, i.e. src timer
207 * which needs to re-arm with a specific delay taken from the old context
208 */
209 static struct net_bridge_mcast_port *
br_multicast_pg_to_port_ctx(const struct net_bridge_port_group * pg)210 br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg)
211 {
212 struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx;
213 struct net_bridge_vlan *vlan;
214
215 lockdep_assert_held_once(&pg->key.port->br->multicast_lock);
216
217 /* if vlan snooping is disabled use the port's multicast context */
218 if (!pg->key.addr.vid ||
219 !br_opt_get(pg->key.port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
220 goto out;
221
222 /* locking is tricky here, due to different rules for multicast and
223 * vlans we need to take rcu to find the vlan and make sure it has
224 * the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under
225 * multicast_lock which must be already held here, so the vlan's pmctx
226 * can safely be used on return
227 */
228 rcu_read_lock();
229 vlan = br_vlan_find(nbp_vlan_group_rcu(pg->key.port), pg->key.addr.vid);
230 if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx))
231 pmctx = &vlan->port_mcast_ctx;
232 else
233 pmctx = NULL;
234 rcu_read_unlock();
235 out:
236 return pmctx;
237 }
238
239 static struct net_bridge_mcast_port *
br_multicast_port_vid_to_port_ctx(struct net_bridge_port * port,u16 vid)240 br_multicast_port_vid_to_port_ctx(struct net_bridge_port *port, u16 vid)
241 {
242 struct net_bridge_mcast_port *pmctx = NULL;
243 struct net_bridge_vlan *vlan;
244
245 lockdep_assert_held_once(&port->br->multicast_lock);
246
247 /* Take RCU to access the vlan. */
248 rcu_read_lock();
249
250 vlan = br_vlan_find(nbp_vlan_group_rcu(port), vid);
251 if (vlan)
252 pmctx = &vlan->port_mcast_ctx;
253
254 rcu_read_unlock();
255
256 return pmctx;
257 }
258
259 /* when snooping we need to check if the contexts should be used
260 * in the following order:
261 * - if pmctx is non-NULL (port), check if it should be used
262 * - if pmctx is NULL (bridge), check if brmctx should be used
263 */
264 static bool
br_multicast_ctx_should_use(const struct net_bridge_mcast * brmctx,const struct net_bridge_mcast_port * pmctx)265 br_multicast_ctx_should_use(const struct net_bridge_mcast *brmctx,
266 const struct net_bridge_mcast_port *pmctx)
267 {
268 if (!netif_running(brmctx->br->dev))
269 return false;
270
271 if (pmctx)
272 return !br_multicast_port_ctx_state_disabled(pmctx);
273 else
274 return !br_multicast_ctx_vlan_disabled(brmctx);
275 }
276
br_port_group_equal(struct net_bridge_port_group * p,struct net_bridge_port * port,const unsigned char * src)277 static bool br_port_group_equal(struct net_bridge_port_group *p,
278 struct net_bridge_port *port,
279 const unsigned char *src)
280 {
281 if (p->key.port != port)
282 return false;
283
284 if (!(port->flags & BR_MULTICAST_TO_UNICAST))
285 return true;
286
287 return ether_addr_equal(src, p->eth_addr);
288 }
289
__fwd_add_star_excl(struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,struct br_ip * sg_ip)290 static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx,
291 struct net_bridge_port_group *pg,
292 struct br_ip *sg_ip)
293 {
294 struct net_bridge_port_group_sg_key sg_key;
295 struct net_bridge_port_group *src_pg;
296 struct net_bridge_mcast *brmctx;
297
298 memset(&sg_key, 0, sizeof(sg_key));
299 brmctx = br_multicast_port_ctx_get_global(pmctx);
300 sg_key.port = pg->key.port;
301 sg_key.addr = *sg_ip;
302 if (br_sg_port_find(brmctx->br, &sg_key))
303 return;
304
305 src_pg = __br_multicast_add_group(brmctx, pmctx,
306 sg_ip, pg->eth_addr,
307 MCAST_INCLUDE, false, false);
308 if (IS_ERR_OR_NULL(src_pg) ||
309 src_pg->rt_protocol != RTPROT_KERNEL)
310 return;
311
312 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
313 }
314
__fwd_del_star_excl(struct net_bridge_port_group * pg,struct br_ip * sg_ip)315 static void __fwd_del_star_excl(struct net_bridge_port_group *pg,
316 struct br_ip *sg_ip)
317 {
318 struct net_bridge_port_group_sg_key sg_key;
319 struct net_bridge *br = pg->key.port->br;
320 struct net_bridge_port_group *src_pg;
321
322 memset(&sg_key, 0, sizeof(sg_key));
323 sg_key.port = pg->key.port;
324 sg_key.addr = *sg_ip;
325 src_pg = br_sg_port_find(br, &sg_key);
326 if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) ||
327 src_pg->rt_protocol != RTPROT_KERNEL)
328 return;
329
330 br_multicast_find_del_pg(br, src_pg);
331 }
332
333 /* When a port group transitions to (or is added as) EXCLUDE we need to add it
334 * to all other ports' S,G entries which are not blocked by the current group
335 * for proper replication, the assumption is that any S,G blocked entries
336 * are already added so the S,G,port lookup should skip them.
337 * When a port group transitions from EXCLUDE -> INCLUDE mode or is being
338 * deleted we need to remove it from all ports' S,G entries where it was
339 * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL).
340 */
br_multicast_star_g_handle_mode(struct net_bridge_port_group * pg,u8 filter_mode)341 void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
342 u8 filter_mode)
343 {
344 struct net_bridge *br = pg->key.port->br;
345 struct net_bridge_port_group *pg_lst;
346 struct net_bridge_mcast_port *pmctx;
347 struct net_bridge_mdb_entry *mp;
348 struct br_ip sg_ip;
349
350 if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr)))
351 return;
352
353 mp = br_mdb_ip_get(br, &pg->key.addr);
354 if (!mp)
355 return;
356 pmctx = br_multicast_pg_to_port_ctx(pg);
357 if (!pmctx)
358 return;
359
360 memset(&sg_ip, 0, sizeof(sg_ip));
361 sg_ip = pg->key.addr;
362
363 for (pg_lst = mlock_dereference(mp->ports, br);
364 pg_lst;
365 pg_lst = mlock_dereference(pg_lst->next, br)) {
366 struct net_bridge_group_src *src_ent;
367
368 if (pg_lst == pg)
369 continue;
370 hlist_for_each_entry(src_ent, &pg_lst->src_list, node) {
371 if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
372 continue;
373 sg_ip.src = src_ent->addr.src;
374 switch (filter_mode) {
375 case MCAST_INCLUDE:
376 __fwd_del_star_excl(pg, &sg_ip);
377 break;
378 case MCAST_EXCLUDE:
379 __fwd_add_star_excl(pmctx, pg, &sg_ip);
380 break;
381 }
382 }
383 }
384 }
385
386 /* called when adding a new S,G with host_joined == false by default */
br_multicast_sg_host_state(struct net_bridge_mdb_entry * star_mp,struct net_bridge_port_group * sg)387 static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp,
388 struct net_bridge_port_group *sg)
389 {
390 struct net_bridge_mdb_entry *sg_mp;
391
392 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
393 return;
394 if (!star_mp->host_joined)
395 return;
396
397 sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr);
398 if (!sg_mp)
399 return;
400 sg_mp->host_joined = true;
401 }
402
403 /* set the host_joined state of all of *,G's S,G entries */
br_multicast_star_g_host_state(struct net_bridge_mdb_entry * star_mp)404 static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp)
405 {
406 struct net_bridge *br = star_mp->br;
407 struct net_bridge_mdb_entry *sg_mp;
408 struct net_bridge_port_group *pg;
409 struct br_ip sg_ip;
410
411 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
412 return;
413
414 memset(&sg_ip, 0, sizeof(sg_ip));
415 sg_ip = star_mp->addr;
416 for (pg = mlock_dereference(star_mp->ports, br);
417 pg;
418 pg = mlock_dereference(pg->next, br)) {
419 struct net_bridge_group_src *src_ent;
420
421 hlist_for_each_entry(src_ent, &pg->src_list, node) {
422 if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
423 continue;
424 sg_ip.src = src_ent->addr.src;
425 sg_mp = br_mdb_ip_get(br, &sg_ip);
426 if (!sg_mp)
427 continue;
428 sg_mp->host_joined = star_mp->host_joined;
429 }
430 }
431 }
432
br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry * sgmp)433 static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp)
434 {
435 struct net_bridge_port_group __rcu **pp;
436 struct net_bridge_port_group *p;
437
438 /* *,G exclude ports are only added to S,G entries */
439 if (WARN_ON(br_multicast_is_star_g(&sgmp->addr)))
440 return;
441
442 /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports
443 * we should ignore perm entries since they're managed by user-space
444 */
445 for (pp = &sgmp->ports;
446 (p = mlock_dereference(*pp, sgmp->br)) != NULL;
447 pp = &p->next)
448 if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL |
449 MDB_PG_FLAGS_PERMANENT)))
450 return;
451
452 /* currently the host can only have joined the *,G which means
453 * we treat it as EXCLUDE {}, so for an S,G it's considered a
454 * STAR_EXCLUDE entry and we can safely leave it
455 */
456 sgmp->host_joined = false;
457
458 for (pp = &sgmp->ports;
459 (p = mlock_dereference(*pp, sgmp->br)) != NULL;) {
460 if (!(p->flags & MDB_PG_FLAGS_PERMANENT))
461 br_multicast_del_pg(sgmp, p, pp);
462 else
463 pp = &p->next;
464 }
465 }
466
br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry * star_mp,struct net_bridge_port_group * sg)467 void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
468 struct net_bridge_port_group *sg)
469 {
470 struct net_bridge_port_group_sg_key sg_key;
471 struct net_bridge *br = star_mp->br;
472 struct net_bridge_mcast_port *pmctx;
473 struct net_bridge_port_group *pg;
474 struct net_bridge_mcast *brmctx;
475
476 if (WARN_ON(br_multicast_is_star_g(&sg->key.addr)))
477 return;
478 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
479 return;
480
481 br_multicast_sg_host_state(star_mp, sg);
482 memset(&sg_key, 0, sizeof(sg_key));
483 sg_key.addr = sg->key.addr;
484 /* we need to add all exclude ports to the S,G */
485 for (pg = mlock_dereference(star_mp->ports, br);
486 pg;
487 pg = mlock_dereference(pg->next, br)) {
488 struct net_bridge_port_group *src_pg;
489
490 if (pg == sg || pg->filter_mode == MCAST_INCLUDE)
491 continue;
492
493 sg_key.port = pg->key.port;
494 if (br_sg_port_find(br, &sg_key))
495 continue;
496
497 pmctx = br_multicast_pg_to_port_ctx(pg);
498 if (!pmctx)
499 continue;
500 brmctx = br_multicast_port_ctx_get_global(pmctx);
501
502 src_pg = __br_multicast_add_group(brmctx, pmctx,
503 &sg->key.addr,
504 sg->eth_addr,
505 MCAST_INCLUDE, false, false);
506 if (IS_ERR_OR_NULL(src_pg) ||
507 src_pg->rt_protocol != RTPROT_KERNEL)
508 continue;
509 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
510 }
511 }
512
br_multicast_fwd_src_add(struct net_bridge_group_src * src)513 static void br_multicast_fwd_src_add(struct net_bridge_group_src *src)
514 {
515 struct net_bridge_mdb_entry *star_mp;
516 struct net_bridge_mcast_port *pmctx;
517 struct net_bridge_port_group *sg;
518 struct net_bridge_mcast *brmctx;
519 struct br_ip sg_ip;
520
521 if (src->flags & BR_SGRP_F_INSTALLED)
522 return;
523
524 memset(&sg_ip, 0, sizeof(sg_ip));
525 pmctx = br_multicast_pg_to_port_ctx(src->pg);
526 if (!pmctx)
527 return;
528 brmctx = br_multicast_port_ctx_get_global(pmctx);
529 sg_ip = src->pg->key.addr;
530 sg_ip.src = src->addr.src;
531
532 sg = __br_multicast_add_group(brmctx, pmctx, &sg_ip,
533 src->pg->eth_addr, MCAST_INCLUDE, false,
534 !timer_pending(&src->timer));
535 if (IS_ERR_OR_NULL(sg))
536 return;
537 src->flags |= BR_SGRP_F_INSTALLED;
538 sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL;
539
540 /* if it was added by user-space as perm we can skip next steps */
541 if (sg->rt_protocol != RTPROT_KERNEL &&
542 (sg->flags & MDB_PG_FLAGS_PERMANENT))
543 return;
544
545 /* the kernel is now responsible for removing this S,G */
546 timer_delete(&sg->timer);
547 star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr);
548 if (!star_mp)
549 return;
550
551 br_multicast_sg_add_exclude_ports(star_mp, sg);
552 }
553
br_multicast_fwd_src_remove(struct net_bridge_group_src * src,bool fastleave)554 static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src,
555 bool fastleave)
556 {
557 struct net_bridge_port_group *p, *pg = src->pg;
558 struct net_bridge_port_group __rcu **pp;
559 struct net_bridge_mdb_entry *mp;
560 struct br_ip sg_ip;
561
562 memset(&sg_ip, 0, sizeof(sg_ip));
563 sg_ip = pg->key.addr;
564 sg_ip.src = src->addr.src;
565
566 mp = br_mdb_ip_get(src->br, &sg_ip);
567 if (!mp)
568 return;
569
570 for (pp = &mp->ports;
571 (p = mlock_dereference(*pp, src->br)) != NULL;
572 pp = &p->next) {
573 if (!br_port_group_equal(p, pg->key.port, pg->eth_addr))
574 continue;
575
576 if (p->rt_protocol != RTPROT_KERNEL &&
577 (p->flags & MDB_PG_FLAGS_PERMANENT) &&
578 !(src->flags & BR_SGRP_F_USER_ADDED))
579 break;
580
581 if (fastleave)
582 p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
583 br_multicast_del_pg(mp, p, pp);
584 break;
585 }
586 src->flags &= ~BR_SGRP_F_INSTALLED;
587 }
588
589 /* install S,G and based on src's timer enable or disable forwarding */
br_multicast_fwd_src_handle(struct net_bridge_group_src * src)590 static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src)
591 {
592 struct net_bridge_port_group_sg_key sg_key;
593 struct net_bridge_port_group *sg;
594 u8 old_flags;
595
596 br_multicast_fwd_src_add(src);
597
598 memset(&sg_key, 0, sizeof(sg_key));
599 sg_key.addr = src->pg->key.addr;
600 sg_key.addr.src = src->addr.src;
601 sg_key.port = src->pg->key.port;
602
603 sg = br_sg_port_find(src->br, &sg_key);
604 if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT))
605 return;
606
607 old_flags = sg->flags;
608 if (timer_pending(&src->timer))
609 sg->flags &= ~MDB_PG_FLAGS_BLOCKED;
610 else
611 sg->flags |= MDB_PG_FLAGS_BLOCKED;
612
613 if (old_flags != sg->flags) {
614 struct net_bridge_mdb_entry *sg_mp;
615
616 sg_mp = br_mdb_ip_get(src->br, &sg_key.addr);
617 if (!sg_mp)
618 return;
619 br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB);
620 }
621 }
622
br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc * gc)623 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
624 {
625 struct net_bridge_mdb_entry *mp;
626
627 mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc);
628 WARN_ON(!hlist_unhashed(&mp->mdb_node));
629 WARN_ON(mp->ports);
630
631 timer_shutdown_sync(&mp->timer);
632 kfree_rcu(mp, rcu);
633 }
634
br_multicast_del_mdb_entry(struct net_bridge_mdb_entry * mp)635 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp)
636 {
637 struct net_bridge *br = mp->br;
638
639 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
640 br_mdb_rht_params);
641 hlist_del_init_rcu(&mp->mdb_node);
642 hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list);
643 queue_work(system_long_wq, &br->mcast_gc_work);
644 }
645
br_multicast_group_expired(struct timer_list * t)646 static void br_multicast_group_expired(struct timer_list *t)
647 {
648 struct net_bridge_mdb_entry *mp = timer_container_of(mp, t, timer);
649 struct net_bridge *br = mp->br;
650
651 spin_lock(&br->multicast_lock);
652 if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) ||
653 timer_pending(&mp->timer))
654 goto out;
655
656 br_multicast_host_leave(mp, true);
657
658 if (mp->ports)
659 goto out;
660 br_multicast_del_mdb_entry(mp);
661 out:
662 spin_unlock(&br->multicast_lock);
663 }
664
br_multicast_destroy_group_src(struct net_bridge_mcast_gc * gc)665 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
666 {
667 struct net_bridge_group_src *src;
668
669 src = container_of(gc, struct net_bridge_group_src, mcast_gc);
670 WARN_ON(!hlist_unhashed(&src->node));
671
672 timer_shutdown_sync(&src->timer);
673 kfree_rcu(src, rcu);
674 }
675
__br_multicast_del_group_src(struct net_bridge_group_src * src)676 void __br_multicast_del_group_src(struct net_bridge_group_src *src)
677 {
678 struct net_bridge *br = src->pg->key.port->br;
679
680 hlist_del_init_rcu(&src->node);
681 src->pg->src_ents--;
682 hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list);
683 queue_work(system_long_wq, &br->mcast_gc_work);
684 }
685
br_multicast_del_group_src(struct net_bridge_group_src * src,bool fastleave)686 void br_multicast_del_group_src(struct net_bridge_group_src *src,
687 bool fastleave)
688 {
689 br_multicast_fwd_src_remove(src, fastleave);
690 __br_multicast_del_group_src(src);
691 }
692
693 static int
br_multicast_port_ngroups_inc_one(struct net_bridge_mcast_port * pmctx,struct netlink_ext_ack * extack,const char * what)694 br_multicast_port_ngroups_inc_one(struct net_bridge_mcast_port *pmctx,
695 struct netlink_ext_ack *extack,
696 const char *what)
697 {
698 u32 max = READ_ONCE(pmctx->mdb_max_entries);
699 u32 n = READ_ONCE(pmctx->mdb_n_entries);
700
701 /* enforce the max limit when it's a port pmctx or a port-vlan pmctx
702 * with snooping enabled
703 */
704 if (!br_multicast_port_ctx_vlan_disabled(pmctx) && max && n >= max) {
705 NL_SET_ERR_MSG_FMT_MOD(extack, "%s is already in %u groups, and mcast_max_groups=%u",
706 what, n, max);
707 return -E2BIG;
708 }
709
710 WRITE_ONCE(pmctx->mdb_n_entries, n + 1);
711 return 0;
712 }
713
br_multicast_port_ngroups_dec_one(struct net_bridge_mcast_port * pmctx)714 static void br_multicast_port_ngroups_dec_one(struct net_bridge_mcast_port *pmctx)
715 {
716 u32 n = READ_ONCE(pmctx->mdb_n_entries);
717
718 WARN_ON_ONCE(n == 0);
719 WRITE_ONCE(pmctx->mdb_n_entries, n - 1);
720 }
721
br_multicast_port_ngroups_inc(struct net_bridge_port * port,const struct br_ip * group,struct netlink_ext_ack * extack)722 static int br_multicast_port_ngroups_inc(struct net_bridge_port *port,
723 const struct br_ip *group,
724 struct netlink_ext_ack *extack)
725 {
726 struct net_bridge_mcast_port *pmctx;
727 int err;
728
729 lockdep_assert_held_once(&port->br->multicast_lock);
730
731 /* Always count on the port context. */
732 err = br_multicast_port_ngroups_inc_one(&port->multicast_ctx, extack,
733 "Port");
734 if (err) {
735 trace_br_mdb_full(port->dev, group);
736 return err;
737 }
738
739 /* Only count on the VLAN context if VID is given */
740 if (!group->vid)
741 return 0;
742
743 pmctx = br_multicast_port_vid_to_port_ctx(port, group->vid);
744 if (!pmctx)
745 return 0;
746
747 err = br_multicast_port_ngroups_inc_one(pmctx, extack, "Port-VLAN");
748 if (err) {
749 trace_br_mdb_full(port->dev, group);
750 goto dec_one_out;
751 }
752
753 return 0;
754
755 dec_one_out:
756 br_multicast_port_ngroups_dec_one(&port->multicast_ctx);
757 return err;
758 }
759
br_multicast_port_ngroups_dec(struct net_bridge_port * port,u16 vid)760 static void br_multicast_port_ngroups_dec(struct net_bridge_port *port, u16 vid)
761 {
762 struct net_bridge_mcast_port *pmctx;
763
764 lockdep_assert_held_once(&port->br->multicast_lock);
765
766 if (vid) {
767 pmctx = br_multicast_port_vid_to_port_ctx(port, vid);
768 if (pmctx)
769 br_multicast_port_ngroups_dec_one(pmctx);
770 }
771 br_multicast_port_ngroups_dec_one(&port->multicast_ctx);
772 }
773
br_multicast_ngroups_get(const struct net_bridge_mcast_port * pmctx)774 u32 br_multicast_ngroups_get(const struct net_bridge_mcast_port *pmctx)
775 {
776 return READ_ONCE(pmctx->mdb_n_entries);
777 }
778
br_multicast_ngroups_set_max(struct net_bridge_mcast_port * pmctx,u32 max)779 void br_multicast_ngroups_set_max(struct net_bridge_mcast_port *pmctx, u32 max)
780 {
781 WRITE_ONCE(pmctx->mdb_max_entries, max);
782 }
783
br_multicast_ngroups_get_max(const struct net_bridge_mcast_port * pmctx)784 u32 br_multicast_ngroups_get_max(const struct net_bridge_mcast_port *pmctx)
785 {
786 return READ_ONCE(pmctx->mdb_max_entries);
787 }
788
br_multicast_destroy_port_group(struct net_bridge_mcast_gc * gc)789 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
790 {
791 struct net_bridge_port_group *pg;
792
793 pg = container_of(gc, struct net_bridge_port_group, mcast_gc);
794 WARN_ON(!hlist_unhashed(&pg->mglist));
795 WARN_ON(!hlist_empty(&pg->src_list));
796
797 timer_shutdown_sync(&pg->rexmit_timer);
798 timer_shutdown_sync(&pg->timer);
799 kfree_rcu(pg, rcu);
800 }
801
br_multicast_del_pg(struct net_bridge_mdb_entry * mp,struct net_bridge_port_group * pg,struct net_bridge_port_group __rcu ** pp)802 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
803 struct net_bridge_port_group *pg,
804 struct net_bridge_port_group __rcu **pp)
805 {
806 struct net_bridge *br = pg->key.port->br;
807 struct net_bridge_group_src *ent;
808 struct hlist_node *tmp;
809
810 rcu_assign_pointer(*pp, pg->next);
811 hlist_del_init(&pg->mglist);
812 br_multicast_eht_clean_sets(pg);
813 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
814 br_multicast_del_group_src(ent, false);
815 br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
816 if (!br_multicast_is_star_g(&mp->addr)) {
817 rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode,
818 br_sg_port_rht_params);
819 br_multicast_sg_del_exclude_ports(mp);
820 } else {
821 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
822 }
823 br_multicast_port_ngroups_dec(pg->key.port, pg->key.addr.vid);
824 hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
825 queue_work(system_long_wq, &br->mcast_gc_work);
826
827 if (!mp->ports && !mp->host_joined && netif_running(br->dev))
828 mod_timer(&mp->timer, jiffies);
829 }
830
br_multicast_find_del_pg(struct net_bridge * br,struct net_bridge_port_group * pg)831 static void br_multicast_find_del_pg(struct net_bridge *br,
832 struct net_bridge_port_group *pg)
833 {
834 struct net_bridge_port_group __rcu **pp;
835 struct net_bridge_mdb_entry *mp;
836 struct net_bridge_port_group *p;
837
838 mp = br_mdb_ip_get(br, &pg->key.addr);
839 if (WARN_ON(!mp))
840 return;
841
842 for (pp = &mp->ports;
843 (p = mlock_dereference(*pp, br)) != NULL;
844 pp = &p->next) {
845 if (p != pg)
846 continue;
847
848 br_multicast_del_pg(mp, pg, pp);
849 return;
850 }
851
852 WARN_ON(1);
853 }
854
br_multicast_port_group_expired(struct timer_list * t)855 static void br_multicast_port_group_expired(struct timer_list *t)
856 {
857 struct net_bridge_port_group *pg = timer_container_of(pg, t, timer);
858 struct net_bridge_group_src *src_ent;
859 struct net_bridge *br = pg->key.port->br;
860 struct hlist_node *tmp;
861 bool changed;
862
863 spin_lock(&br->multicast_lock);
864 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
865 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
866 goto out;
867
868 changed = !!(pg->filter_mode == MCAST_EXCLUDE);
869 pg->filter_mode = MCAST_INCLUDE;
870 hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
871 if (!timer_pending(&src_ent->timer)) {
872 br_multicast_del_group_src(src_ent, false);
873 changed = true;
874 }
875 }
876
877 if (hlist_empty(&pg->src_list)) {
878 br_multicast_find_del_pg(br, pg);
879 } else if (changed) {
880 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr);
881
882 if (changed && br_multicast_is_star_g(&pg->key.addr))
883 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
884
885 if (WARN_ON(!mp))
886 goto out;
887 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB);
888 }
889 out:
890 spin_unlock(&br->multicast_lock);
891 }
892
br_multicast_gc(struct hlist_head * head)893 static void br_multicast_gc(struct hlist_head *head)
894 {
895 struct net_bridge_mcast_gc *gcent;
896 struct hlist_node *tmp;
897
898 hlist_for_each_entry_safe(gcent, tmp, head, gc_node) {
899 hlist_del_init(&gcent->gc_node);
900 gcent->destroy(gcent);
901 }
902 }
903
__br_multicast_query_handle_vlan(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct sk_buff * skb)904 static void __br_multicast_query_handle_vlan(struct net_bridge_mcast *brmctx,
905 struct net_bridge_mcast_port *pmctx,
906 struct sk_buff *skb)
907 {
908 struct net_bridge_vlan *vlan = NULL;
909
910 if (pmctx && br_multicast_port_ctx_is_vlan(pmctx))
911 vlan = pmctx->vlan;
912 else if (br_multicast_ctx_is_vlan(brmctx))
913 vlan = brmctx->vlan;
914
915 if (vlan && !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
916 u16 vlan_proto;
917
918 if (br_vlan_get_proto(brmctx->br->dev, &vlan_proto) != 0)
919 return;
920 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan->vid);
921 }
922 }
923
br_ip4_multicast_alloc_query(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,__be32 ip_dst,__be32 group,bool with_srcs,bool over_lmqt,u8 sflag,u8 * igmp_type,bool * need_rexmit)924 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx,
925 struct net_bridge_mcast_port *pmctx,
926 struct net_bridge_port_group *pg,
927 __be32 ip_dst, __be32 group,
928 bool with_srcs, bool over_lmqt,
929 u8 sflag, u8 *igmp_type,
930 bool *need_rexmit)
931 {
932 struct net_bridge_port *p = pg ? pg->key.port : NULL;
933 struct net_bridge_group_src *ent;
934 size_t pkt_size, igmp_hdr_size;
935 unsigned long now = jiffies;
936 struct igmpv3_query *ihv3;
937 void *csum_start = NULL;
938 __sum16 *csum = NULL;
939 struct sk_buff *skb;
940 struct igmphdr *ih;
941 struct ethhdr *eth;
942 unsigned long lmqt;
943 struct iphdr *iph;
944 u16 lmqt_srcs = 0;
945
946 igmp_hdr_size = sizeof(*ih);
947 if (brmctx->multicast_igmp_version == 3) {
948 igmp_hdr_size = sizeof(*ihv3);
949 if (pg && with_srcs) {
950 lmqt = now + (brmctx->multicast_last_member_interval *
951 brmctx->multicast_last_member_count);
952 hlist_for_each_entry(ent, &pg->src_list, node) {
953 if (over_lmqt == time_after(ent->timer.expires,
954 lmqt) &&
955 ent->src_query_rexmit_cnt > 0)
956 lmqt_srcs++;
957 }
958
959 if (!lmqt_srcs)
960 return NULL;
961 igmp_hdr_size += lmqt_srcs * sizeof(__be32);
962 }
963 }
964
965 pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size;
966 if ((p && pkt_size > p->dev->mtu) ||
967 pkt_size > brmctx->br->dev->mtu)
968 return NULL;
969
970 skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
971 if (!skb)
972 goto out;
973
974 __br_multicast_query_handle_vlan(brmctx, pmctx, skb);
975 skb->protocol = htons(ETH_P_IP);
976
977 skb_reset_mac_header(skb);
978 eth = eth_hdr(skb);
979
980 ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
981 ip_eth_mc_map(ip_dst, eth->h_dest);
982 eth->h_proto = htons(ETH_P_IP);
983 skb_put(skb, sizeof(*eth));
984
985 skb_set_network_header(skb, skb->len);
986 iph = ip_hdr(skb);
987 iph->tot_len = htons(pkt_size - sizeof(*eth));
988
989 iph->version = 4;
990 iph->ihl = 6;
991 iph->tos = 0xc0;
992 iph->id = 0;
993 iph->frag_off = htons(IP_DF);
994 iph->ttl = 1;
995 iph->protocol = IPPROTO_IGMP;
996 iph->saddr = br_opt_get(brmctx->br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
997 inet_select_addr(brmctx->br->dev, 0, RT_SCOPE_LINK) : 0;
998 iph->daddr = ip_dst;
999 ((u8 *)&iph[1])[0] = IPOPT_RA;
1000 ((u8 *)&iph[1])[1] = 4;
1001 ((u8 *)&iph[1])[2] = 0;
1002 ((u8 *)&iph[1])[3] = 0;
1003 ip_send_check(iph);
1004 skb_put(skb, 24);
1005
1006 skb_set_transport_header(skb, skb->len);
1007 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
1008
1009 switch (brmctx->multicast_igmp_version) {
1010 case 2:
1011 ih = igmp_hdr(skb);
1012 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
1013 ih->code = (group ? brmctx->multicast_last_member_interval :
1014 brmctx->multicast_query_response_interval) /
1015 (HZ / IGMP_TIMER_SCALE);
1016 ih->group = group;
1017 ih->csum = 0;
1018 csum = &ih->csum;
1019 csum_start = (void *)ih;
1020 break;
1021 case 3:
1022 ihv3 = igmpv3_query_hdr(skb);
1023 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
1024 ihv3->code = (group ? brmctx->multicast_last_member_interval :
1025 brmctx->multicast_query_response_interval) /
1026 (HZ / IGMP_TIMER_SCALE);
1027 ihv3->group = group;
1028 ihv3->qqic = brmctx->multicast_query_interval / HZ;
1029 ihv3->nsrcs = htons(lmqt_srcs);
1030 ihv3->resv = 0;
1031 ihv3->suppress = sflag;
1032 ihv3->qrv = 2;
1033 ihv3->csum = 0;
1034 csum = &ihv3->csum;
1035 csum_start = (void *)ihv3;
1036 if (!pg || !with_srcs)
1037 break;
1038
1039 lmqt_srcs = 0;
1040 hlist_for_each_entry(ent, &pg->src_list, node) {
1041 if (over_lmqt == time_after(ent->timer.expires,
1042 lmqt) &&
1043 ent->src_query_rexmit_cnt > 0) {
1044 ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4;
1045 ent->src_query_rexmit_cnt--;
1046 if (need_rexmit && ent->src_query_rexmit_cnt)
1047 *need_rexmit = true;
1048 }
1049 }
1050 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) {
1051 kfree_skb(skb);
1052 return NULL;
1053 }
1054 break;
1055 }
1056
1057 if (WARN_ON(!csum || !csum_start)) {
1058 kfree_skb(skb);
1059 return NULL;
1060 }
1061
1062 *csum = ip_compute_csum(csum_start, igmp_hdr_size);
1063 skb_put(skb, igmp_hdr_size);
1064 __skb_pull(skb, sizeof(*eth));
1065
1066 out:
1067 return skb;
1068 }
1069
1070 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_alloc_query(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,const struct in6_addr * ip6_dst,const struct in6_addr * group,bool with_srcs,bool over_llqt,u8 sflag,u8 * igmp_type,bool * need_rexmit)1071 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx,
1072 struct net_bridge_mcast_port *pmctx,
1073 struct net_bridge_port_group *pg,
1074 const struct in6_addr *ip6_dst,
1075 const struct in6_addr *group,
1076 bool with_srcs, bool over_llqt,
1077 u8 sflag, u8 *igmp_type,
1078 bool *need_rexmit)
1079 {
1080 struct net_bridge_port *p = pg ? pg->key.port : NULL;
1081 struct net_bridge_group_src *ent;
1082 size_t pkt_size, mld_hdr_size;
1083 unsigned long now = jiffies;
1084 struct mld2_query *mld2q;
1085 void *csum_start = NULL;
1086 unsigned long interval;
1087 __sum16 *csum = NULL;
1088 struct ipv6hdr *ip6h;
1089 struct mld_msg *mldq;
1090 struct sk_buff *skb;
1091 unsigned long llqt;
1092 struct ethhdr *eth;
1093 u16 llqt_srcs = 0;
1094 u8 *hopopt;
1095
1096 mld_hdr_size = sizeof(*mldq);
1097 if (brmctx->multicast_mld_version == 2) {
1098 mld_hdr_size = sizeof(*mld2q);
1099 if (pg && with_srcs) {
1100 llqt = now + (brmctx->multicast_last_member_interval *
1101 brmctx->multicast_last_member_count);
1102 hlist_for_each_entry(ent, &pg->src_list, node) {
1103 if (over_llqt == time_after(ent->timer.expires,
1104 llqt) &&
1105 ent->src_query_rexmit_cnt > 0)
1106 llqt_srcs++;
1107 }
1108
1109 if (!llqt_srcs)
1110 return NULL;
1111 mld_hdr_size += llqt_srcs * sizeof(struct in6_addr);
1112 }
1113 }
1114
1115 pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size;
1116 if ((p && pkt_size > p->dev->mtu) ||
1117 pkt_size > brmctx->br->dev->mtu)
1118 return NULL;
1119
1120 skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
1121 if (!skb)
1122 goto out;
1123
1124 __br_multicast_query_handle_vlan(brmctx, pmctx, skb);
1125 skb->protocol = htons(ETH_P_IPV6);
1126
1127 /* Ethernet header */
1128 skb_reset_mac_header(skb);
1129 eth = eth_hdr(skb);
1130
1131 ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
1132 eth->h_proto = htons(ETH_P_IPV6);
1133 skb_put(skb, sizeof(*eth));
1134
1135 /* IPv6 header + HbH option */
1136 skb_set_network_header(skb, skb->len);
1137 ip6h = ipv6_hdr(skb);
1138
1139 *(__force __be32 *)ip6h = htonl(0x60000000);
1140 ip6h->payload_len = htons(8 + mld_hdr_size);
1141 ip6h->nexthdr = IPPROTO_HOPOPTS;
1142 ip6h->hop_limit = 1;
1143 ip6h->daddr = *ip6_dst;
1144 if (ipv6_dev_get_saddr(dev_net(brmctx->br->dev), brmctx->br->dev,
1145 &ip6h->daddr, 0, &ip6h->saddr)) {
1146 kfree_skb(skb);
1147 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, false);
1148 return NULL;
1149 }
1150
1151 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, true);
1152 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
1153
1154 hopopt = (u8 *)(ip6h + 1);
1155 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
1156 hopopt[1] = 0; /* length of HbH */
1157 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
1158 hopopt[3] = 2; /* Length of RA Option */
1159 hopopt[4] = 0; /* Type = 0x0000 (MLD) */
1160 hopopt[5] = 0;
1161 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
1162 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
1163
1164 skb_put(skb, sizeof(*ip6h) + 8);
1165
1166 /* ICMPv6 */
1167 skb_set_transport_header(skb, skb->len);
1168 interval = ipv6_addr_any(group) ?
1169 brmctx->multicast_query_response_interval :
1170 brmctx->multicast_last_member_interval;
1171 *igmp_type = ICMPV6_MGM_QUERY;
1172 switch (brmctx->multicast_mld_version) {
1173 case 1:
1174 mldq = (struct mld_msg *)icmp6_hdr(skb);
1175 mldq->mld_type = ICMPV6_MGM_QUERY;
1176 mldq->mld_code = 0;
1177 mldq->mld_cksum = 0;
1178 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
1179 mldq->mld_reserved = 0;
1180 mldq->mld_mca = *group;
1181 csum = &mldq->mld_cksum;
1182 csum_start = (void *)mldq;
1183 break;
1184 case 2:
1185 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1186 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
1187 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
1188 mld2q->mld2q_code = 0;
1189 mld2q->mld2q_cksum = 0;
1190 mld2q->mld2q_resv1 = 0;
1191 mld2q->mld2q_resv2 = 0;
1192 mld2q->mld2q_suppress = sflag;
1193 mld2q->mld2q_qrv = 2;
1194 mld2q->mld2q_nsrcs = htons(llqt_srcs);
1195 mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ;
1196 mld2q->mld2q_mca = *group;
1197 csum = &mld2q->mld2q_cksum;
1198 csum_start = (void *)mld2q;
1199 if (!pg || !with_srcs)
1200 break;
1201
1202 llqt_srcs = 0;
1203 hlist_for_each_entry(ent, &pg->src_list, node) {
1204 if (over_llqt == time_after(ent->timer.expires,
1205 llqt) &&
1206 ent->src_query_rexmit_cnt > 0) {
1207 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6;
1208 ent->src_query_rexmit_cnt--;
1209 if (need_rexmit && ent->src_query_rexmit_cnt)
1210 *need_rexmit = true;
1211 }
1212 }
1213 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) {
1214 kfree_skb(skb);
1215 return NULL;
1216 }
1217 break;
1218 }
1219
1220 if (WARN_ON(!csum || !csum_start)) {
1221 kfree_skb(skb);
1222 return NULL;
1223 }
1224
1225 *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size,
1226 IPPROTO_ICMPV6,
1227 csum_partial(csum_start, mld_hdr_size, 0));
1228 skb_put(skb, mld_hdr_size);
1229 __skb_pull(skb, sizeof(*eth));
1230
1231 out:
1232 return skb;
1233 }
1234 #endif
1235
br_multicast_alloc_query(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,struct br_ip * ip_dst,struct br_ip * group,bool with_srcs,bool over_lmqt,u8 sflag,u8 * igmp_type,bool * need_rexmit)1236 static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx,
1237 struct net_bridge_mcast_port *pmctx,
1238 struct net_bridge_port_group *pg,
1239 struct br_ip *ip_dst,
1240 struct br_ip *group,
1241 bool with_srcs, bool over_lmqt,
1242 u8 sflag, u8 *igmp_type,
1243 bool *need_rexmit)
1244 {
1245 __be32 ip4_dst;
1246
1247 switch (group->proto) {
1248 case htons(ETH_P_IP):
1249 ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
1250 return br_ip4_multicast_alloc_query(brmctx, pmctx, pg,
1251 ip4_dst, group->dst.ip4,
1252 with_srcs, over_lmqt,
1253 sflag, igmp_type,
1254 need_rexmit);
1255 #if IS_ENABLED(CONFIG_IPV6)
1256 case htons(ETH_P_IPV6): {
1257 struct in6_addr ip6_dst;
1258
1259 if (ip_dst)
1260 ip6_dst = ip_dst->dst.ip6;
1261 else
1262 ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
1263 htonl(1));
1264
1265 return br_ip6_multicast_alloc_query(brmctx, pmctx, pg,
1266 &ip6_dst, &group->dst.ip6,
1267 with_srcs, over_lmqt,
1268 sflag, igmp_type,
1269 need_rexmit);
1270 }
1271 #endif
1272 }
1273 return NULL;
1274 }
1275
br_multicast_new_group(struct net_bridge * br,struct br_ip * group)1276 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
1277 struct br_ip *group)
1278 {
1279 struct net_bridge_mdb_entry *mp;
1280 int err;
1281
1282 mp = br_mdb_ip_get(br, group);
1283 if (mp)
1284 return mp;
1285
1286 if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
1287 trace_br_mdb_full(br->dev, group);
1288 br_mc_disabled_update(br->dev, false, NULL);
1289 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
1290 return ERR_PTR(-E2BIG);
1291 }
1292
1293 mp = kzalloc_obj(*mp, GFP_ATOMIC);
1294 if (unlikely(!mp))
1295 return ERR_PTR(-ENOMEM);
1296
1297 mp->br = br;
1298 mp->addr = *group;
1299 mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry;
1300 timer_setup(&mp->timer, br_multicast_group_expired, 0);
1301 err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
1302 br_mdb_rht_params);
1303 if (err) {
1304 kfree(mp);
1305 mp = ERR_PTR(err);
1306 } else {
1307 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
1308 }
1309
1310 return mp;
1311 }
1312
br_multicast_group_src_expired(struct timer_list * t)1313 static void br_multicast_group_src_expired(struct timer_list *t)
1314 {
1315 struct net_bridge_group_src *src = timer_container_of(src, t, timer);
1316 struct net_bridge_port_group *pg;
1317 struct net_bridge *br = src->br;
1318
1319 spin_lock(&br->multicast_lock);
1320 if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
1321 timer_pending(&src->timer))
1322 goto out;
1323
1324 pg = src->pg;
1325 if (pg->filter_mode == MCAST_INCLUDE) {
1326 br_multicast_del_group_src(src, false);
1327 if (!hlist_empty(&pg->src_list))
1328 goto out;
1329 br_multicast_find_del_pg(br, pg);
1330 } else {
1331 br_multicast_fwd_src_handle(src);
1332 }
1333
1334 out:
1335 spin_unlock(&br->multicast_lock);
1336 }
1337
1338 struct net_bridge_group_src *
br_multicast_find_group_src(struct net_bridge_port_group * pg,struct br_ip * ip)1339 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
1340 {
1341 struct net_bridge_group_src *ent;
1342
1343 switch (ip->proto) {
1344 case htons(ETH_P_IP):
1345 hlist_for_each_entry(ent, &pg->src_list, node)
1346 if (ip->src.ip4 == ent->addr.src.ip4)
1347 return ent;
1348 break;
1349 #if IS_ENABLED(CONFIG_IPV6)
1350 case htons(ETH_P_IPV6):
1351 hlist_for_each_entry(ent, &pg->src_list, node)
1352 if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6))
1353 return ent;
1354 break;
1355 #endif
1356 }
1357
1358 return NULL;
1359 }
1360
1361 struct net_bridge_group_src *
br_multicast_new_group_src(struct net_bridge_port_group * pg,struct br_ip * src_ip)1362 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
1363 {
1364 struct net_bridge_group_src *grp_src;
1365
1366 if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
1367 return NULL;
1368
1369 switch (src_ip->proto) {
1370 case htons(ETH_P_IP):
1371 if (ipv4_is_zeronet(src_ip->src.ip4) ||
1372 ipv4_is_multicast(src_ip->src.ip4))
1373 return NULL;
1374 break;
1375 #if IS_ENABLED(CONFIG_IPV6)
1376 case htons(ETH_P_IPV6):
1377 if (ipv6_addr_any(&src_ip->src.ip6) ||
1378 ipv6_addr_is_multicast(&src_ip->src.ip6))
1379 return NULL;
1380 break;
1381 #endif
1382 }
1383
1384 grp_src = kzalloc_obj(*grp_src, GFP_ATOMIC);
1385 if (unlikely(!grp_src))
1386 return NULL;
1387
1388 grp_src->pg = pg;
1389 grp_src->br = pg->key.port->br;
1390 grp_src->addr = *src_ip;
1391 grp_src->mcast_gc.destroy = br_multicast_destroy_group_src;
1392 timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
1393
1394 hlist_add_head_rcu(&grp_src->node, &pg->src_list);
1395 pg->src_ents++;
1396
1397 return grp_src;
1398 }
1399
br_multicast_new_port_group(struct net_bridge_port * port,const struct br_ip * group,struct net_bridge_port_group __rcu * next,unsigned char flags,const unsigned char * src,u8 filter_mode,u8 rt_protocol,struct netlink_ext_ack * extack)1400 struct net_bridge_port_group *br_multicast_new_port_group(
1401 struct net_bridge_port *port,
1402 const struct br_ip *group,
1403 struct net_bridge_port_group __rcu *next,
1404 unsigned char flags,
1405 const unsigned char *src,
1406 u8 filter_mode,
1407 u8 rt_protocol,
1408 struct netlink_ext_ack *extack)
1409 {
1410 struct net_bridge_port_group *p;
1411 int err;
1412
1413 err = br_multicast_port_ngroups_inc(port, group, extack);
1414 if (err)
1415 return NULL;
1416
1417 p = kzalloc_obj(*p, GFP_ATOMIC);
1418 if (unlikely(!p)) {
1419 NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
1420 goto dec_out;
1421 }
1422
1423 p->key.addr = *group;
1424 p->key.port = port;
1425 p->flags = flags;
1426 p->filter_mode = filter_mode;
1427 p->rt_protocol = rt_protocol;
1428 p->eht_host_tree = RB_ROOT;
1429 p->eht_set_tree = RB_ROOT;
1430 p->mcast_gc.destroy = br_multicast_destroy_port_group;
1431 INIT_HLIST_HEAD(&p->src_list);
1432
1433 if (!br_multicast_is_star_g(group) &&
1434 rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode,
1435 br_sg_port_rht_params)) {
1436 NL_SET_ERR_MSG_MOD(extack, "Couldn't insert new port group");
1437 goto free_out;
1438 }
1439
1440 rcu_assign_pointer(p->next, next);
1441 timer_setup(&p->timer, br_multicast_port_group_expired, 0);
1442 timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
1443 hlist_add_head(&p->mglist, &port->mglist);
1444
1445 if (src)
1446 memcpy(p->eth_addr, src, ETH_ALEN);
1447 else
1448 eth_broadcast_addr(p->eth_addr);
1449
1450 return p;
1451
1452 free_out:
1453 kfree(p);
1454 dec_out:
1455 br_multicast_port_ngroups_dec(port, group->vid);
1456 return NULL;
1457 }
1458
br_multicast_del_port_group(struct net_bridge_port_group * p)1459 void br_multicast_del_port_group(struct net_bridge_port_group *p)
1460 {
1461 struct net_bridge_port *port = p->key.port;
1462 __u16 vid = p->key.addr.vid;
1463
1464 hlist_del_init(&p->mglist);
1465 if (!br_multicast_is_star_g(&p->key.addr))
1466 rhashtable_remove_fast(&port->br->sg_port_tbl, &p->rhnode,
1467 br_sg_port_rht_params);
1468 kfree(p);
1469 br_multicast_port_ngroups_dec(port, vid);
1470 }
1471
br_multicast_host_join(const struct net_bridge_mcast * brmctx,struct net_bridge_mdb_entry * mp,bool notify)1472 void br_multicast_host_join(const struct net_bridge_mcast *brmctx,
1473 struct net_bridge_mdb_entry *mp, bool notify)
1474 {
1475 if (!mp->host_joined) {
1476 mp->host_joined = true;
1477 if (br_multicast_is_star_g(&mp->addr))
1478 br_multicast_star_g_host_state(mp);
1479 if (notify)
1480 br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
1481 }
1482
1483 if (br_group_is_l2(&mp->addr))
1484 return;
1485
1486 mod_timer(&mp->timer, jiffies + brmctx->multicast_membership_interval);
1487 }
1488
br_multicast_host_leave(struct net_bridge_mdb_entry * mp,bool notify)1489 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
1490 {
1491 if (!mp->host_joined)
1492 return;
1493
1494 mp->host_joined = false;
1495 if (br_multicast_is_star_g(&mp->addr))
1496 br_multicast_star_g_host_state(mp);
1497 if (notify)
1498 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
1499 }
1500
1501 static struct net_bridge_port_group *
__br_multicast_add_group(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct br_ip * group,const unsigned char * src,u8 filter_mode,bool igmpv2_mldv1,bool blocked)1502 __br_multicast_add_group(struct net_bridge_mcast *brmctx,
1503 struct net_bridge_mcast_port *pmctx,
1504 struct br_ip *group,
1505 const unsigned char *src,
1506 u8 filter_mode,
1507 bool igmpv2_mldv1,
1508 bool blocked)
1509 {
1510 struct net_bridge_port_group __rcu **pp;
1511 struct net_bridge_port_group *p = NULL;
1512 struct net_bridge_mdb_entry *mp;
1513 unsigned long now = jiffies;
1514
1515 if (!br_multicast_ctx_should_use(brmctx, pmctx))
1516 goto out;
1517
1518 mp = br_multicast_new_group(brmctx->br, group);
1519 if (IS_ERR(mp))
1520 return ERR_CAST(mp);
1521
1522 if (!pmctx) {
1523 br_multicast_host_join(brmctx, mp, true);
1524 goto out;
1525 }
1526
1527 for (pp = &mp->ports;
1528 (p = mlock_dereference(*pp, brmctx->br)) != NULL;
1529 pp = &p->next) {
1530 if (br_port_group_equal(p, pmctx->port, src))
1531 goto found;
1532 if ((unsigned long)p->key.port < (unsigned long)pmctx->port)
1533 break;
1534 }
1535
1536 p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src,
1537 filter_mode, RTPROT_KERNEL, NULL);
1538 if (unlikely(!p)) {
1539 p = ERR_PTR(-ENOMEM);
1540 goto out;
1541 }
1542 rcu_assign_pointer(*pp, p);
1543 if (blocked)
1544 p->flags |= MDB_PG_FLAGS_BLOCKED;
1545 br_mdb_notify(brmctx->br->dev, mp, p, RTM_NEWMDB);
1546
1547 found:
1548 if (igmpv2_mldv1)
1549 mod_timer(&p->timer,
1550 now + brmctx->multicast_membership_interval);
1551
1552 out:
1553 return p;
1554 }
1555
br_multicast_add_group(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct br_ip * group,const unsigned char * src,u8 filter_mode,bool igmpv2_mldv1)1556 static int br_multicast_add_group(struct net_bridge_mcast *brmctx,
1557 struct net_bridge_mcast_port *pmctx,
1558 struct br_ip *group,
1559 const unsigned char *src,
1560 u8 filter_mode,
1561 bool igmpv2_mldv1)
1562 {
1563 struct net_bridge_port_group *pg;
1564 int err;
1565
1566 spin_lock(&brmctx->br->multicast_lock);
1567 pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode,
1568 igmpv2_mldv1, false);
1569 /* NULL is considered valid for host joined groups */
1570 err = PTR_ERR_OR_ZERO(pg);
1571 spin_unlock(&brmctx->br->multicast_lock);
1572
1573 return err;
1574 }
1575
br_ip4_multicast_add_group(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,__be32 group,__u16 vid,const unsigned char * src,bool igmpv2)1576 static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx,
1577 struct net_bridge_mcast_port *pmctx,
1578 __be32 group,
1579 __u16 vid,
1580 const unsigned char *src,
1581 bool igmpv2)
1582 {
1583 struct br_ip br_group;
1584 u8 filter_mode;
1585
1586 if (ipv4_is_local_multicast(group))
1587 return 0;
1588
1589 memset(&br_group, 0, sizeof(br_group));
1590 br_group.dst.ip4 = group;
1591 br_group.proto = htons(ETH_P_IP);
1592 br_group.vid = vid;
1593 filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1594
1595 return br_multicast_add_group(brmctx, pmctx, &br_group, src,
1596 filter_mode, igmpv2);
1597 }
1598
1599 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_add_group(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,const struct in6_addr * group,__u16 vid,const unsigned char * src,bool mldv1)1600 static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx,
1601 struct net_bridge_mcast_port *pmctx,
1602 const struct in6_addr *group,
1603 __u16 vid,
1604 const unsigned char *src,
1605 bool mldv1)
1606 {
1607 struct br_ip br_group;
1608 u8 filter_mode;
1609
1610 if (ipv6_addr_is_ll_all_nodes(group))
1611 return 0;
1612
1613 memset(&br_group, 0, sizeof(br_group));
1614 br_group.dst.ip6 = *group;
1615 br_group.proto = htons(ETH_P_IPV6);
1616 br_group.vid = vid;
1617 filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1618
1619 return br_multicast_add_group(brmctx, pmctx, &br_group, src,
1620 filter_mode, mldv1);
1621 }
1622 #endif
1623
br_multicast_rport_del(struct hlist_node * rlist)1624 static bool br_multicast_rport_del(struct hlist_node *rlist)
1625 {
1626 if (hlist_unhashed(rlist))
1627 return false;
1628
1629 hlist_del_init_rcu(rlist);
1630 return true;
1631 }
1632
br_ip4_multicast_rport_del(struct net_bridge_mcast_port * pmctx)1633 static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
1634 {
1635 return br_multicast_rport_del(&pmctx->ip4_rlist);
1636 }
1637
br_ip6_multicast_rport_del(struct net_bridge_mcast_port * pmctx)1638 static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
1639 {
1640 #if IS_ENABLED(CONFIG_IPV6)
1641 return br_multicast_rport_del(&pmctx->ip6_rlist);
1642 #else
1643 return false;
1644 #endif
1645 }
1646
br_multicast_router_expired(struct net_bridge_mcast_port * pmctx,struct timer_list * t,struct hlist_node * rlist)1647 static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx,
1648 struct timer_list *t,
1649 struct hlist_node *rlist)
1650 {
1651 struct net_bridge *br = pmctx->port->br;
1652 bool del;
1653
1654 spin_lock(&br->multicast_lock);
1655 if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
1656 pmctx->multicast_router == MDB_RTR_TYPE_PERM ||
1657 timer_pending(t))
1658 goto out;
1659
1660 del = br_multicast_rport_del(rlist);
1661 br_multicast_rport_del_notify(pmctx, del);
1662 out:
1663 spin_unlock(&br->multicast_lock);
1664 }
1665
br_ip4_multicast_router_expired(struct timer_list * t)1666 static void br_ip4_multicast_router_expired(struct timer_list *t)
1667 {
1668 struct net_bridge_mcast_port *pmctx = timer_container_of(pmctx, t,
1669 ip4_mc_router_timer);
1670
1671 br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist);
1672 }
1673
1674 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_router_expired(struct timer_list * t)1675 static void br_ip6_multicast_router_expired(struct timer_list *t)
1676 {
1677 struct net_bridge_mcast_port *pmctx = timer_container_of(pmctx, t,
1678 ip6_mc_router_timer);
1679
1680 br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist);
1681 }
1682 #endif
1683
br_mc_router_state_change(struct net_bridge * p,bool is_mc_router)1684 static void br_mc_router_state_change(struct net_bridge *p,
1685 bool is_mc_router)
1686 {
1687 struct switchdev_attr attr = {
1688 .orig_dev = p->dev,
1689 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
1690 .flags = SWITCHDEV_F_DEFER,
1691 .u.mrouter = is_mc_router,
1692 };
1693
1694 switchdev_port_attr_set(p->dev, &attr, NULL);
1695 }
1696
br_multicast_local_router_expired(struct net_bridge_mcast * brmctx,struct timer_list * timer)1697 static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx,
1698 struct timer_list *timer)
1699 {
1700 spin_lock(&brmctx->br->multicast_lock);
1701 if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
1702 brmctx->multicast_router == MDB_RTR_TYPE_PERM ||
1703 br_ip4_multicast_is_router(brmctx) ||
1704 br_ip6_multicast_is_router(brmctx))
1705 goto out;
1706
1707 br_mc_router_state_change(brmctx->br, false);
1708 out:
1709 spin_unlock(&brmctx->br->multicast_lock);
1710 }
1711
br_ip4_multicast_local_router_expired(struct timer_list * t)1712 static void br_ip4_multicast_local_router_expired(struct timer_list *t)
1713 {
1714 struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t,
1715 ip4_mc_router_timer);
1716
1717 br_multicast_local_router_expired(brmctx, t);
1718 }
1719
1720 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_local_router_expired(struct timer_list * t)1721 static void br_ip6_multicast_local_router_expired(struct timer_list *t)
1722 {
1723 struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t,
1724 ip6_mc_router_timer);
1725
1726 br_multicast_local_router_expired(brmctx, t);
1727 }
1728 #endif
1729
br_multicast_querier_expired(struct net_bridge_mcast * brmctx,struct bridge_mcast_own_query * query)1730 static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx,
1731 struct bridge_mcast_own_query *query)
1732 {
1733 spin_lock(&brmctx->br->multicast_lock);
1734 if (!netif_running(brmctx->br->dev) ||
1735 br_multicast_ctx_vlan_global_disabled(brmctx) ||
1736 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
1737 goto out;
1738
1739 br_multicast_start_querier(brmctx, query);
1740
1741 out:
1742 spin_unlock(&brmctx->br->multicast_lock);
1743 }
1744
br_ip4_multicast_querier_expired(struct timer_list * t)1745 static void br_ip4_multicast_querier_expired(struct timer_list *t)
1746 {
1747 struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t,
1748 ip4_other_query.timer);
1749
1750 br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query);
1751 }
1752
1753 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_querier_expired(struct timer_list * t)1754 static void br_ip6_multicast_querier_expired(struct timer_list *t)
1755 {
1756 struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t,
1757 ip6_other_query.timer);
1758
1759 br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query);
1760 }
1761 #endif
1762
br_multicast_query_delay_expired(struct timer_list * t)1763 static void br_multicast_query_delay_expired(struct timer_list *t)
1764 {
1765 }
1766
br_multicast_select_own_querier(struct net_bridge_mcast * brmctx,struct br_ip * ip,struct sk_buff * skb)1767 static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx,
1768 struct br_ip *ip,
1769 struct sk_buff *skb)
1770 {
1771 if (ip->proto == htons(ETH_P_IP))
1772 brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr;
1773 #if IS_ENABLED(CONFIG_IPV6)
1774 else
1775 brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr;
1776 #endif
1777 }
1778
__br_multicast_send_query(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,struct br_ip * ip_dst,struct br_ip * group,bool with_srcs,u8 sflag,bool * need_rexmit)1779 static void __br_multicast_send_query(struct net_bridge_mcast *brmctx,
1780 struct net_bridge_mcast_port *pmctx,
1781 struct net_bridge_port_group *pg,
1782 struct br_ip *ip_dst,
1783 struct br_ip *group,
1784 bool with_srcs,
1785 u8 sflag,
1786 bool *need_rexmit)
1787 {
1788 bool over_lmqt = !!sflag;
1789 struct sk_buff *skb;
1790 u8 igmp_type;
1791
1792 if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
1793 !br_multicast_ctx_matches_vlan_snooping(brmctx))
1794 return;
1795
1796 again_under_lmqt:
1797 skb = br_multicast_alloc_query(brmctx, pmctx, pg, ip_dst, group,
1798 with_srcs, over_lmqt, sflag, &igmp_type,
1799 need_rexmit);
1800 if (!skb)
1801 return;
1802
1803 if (pmctx) {
1804 skb->dev = pmctx->port->dev;
1805 br_multicast_count(brmctx->br, pmctx->port, skb, igmp_type,
1806 BR_MCAST_DIR_TX);
1807 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
1808 dev_net(pmctx->port->dev), NULL, skb, NULL, skb->dev,
1809 br_dev_queue_push_xmit);
1810
1811 if (over_lmqt && with_srcs && sflag) {
1812 over_lmqt = false;
1813 goto again_under_lmqt;
1814 }
1815 } else {
1816 br_multicast_select_own_querier(brmctx, group, skb);
1817 br_multicast_count(brmctx->br, NULL, skb, igmp_type,
1818 BR_MCAST_DIR_RX);
1819 netif_rx(skb);
1820 }
1821 }
1822
br_multicast_read_querier(const struct bridge_mcast_querier * querier,struct bridge_mcast_querier * dest)1823 static void br_multicast_read_querier(const struct bridge_mcast_querier *querier,
1824 struct bridge_mcast_querier *dest)
1825 {
1826 unsigned int seq;
1827
1828 memset(dest, 0, sizeof(*dest));
1829 do {
1830 seq = read_seqcount_begin(&querier->seq);
1831 dest->port_ifidx = querier->port_ifidx;
1832 memcpy(&dest->addr, &querier->addr, sizeof(struct br_ip));
1833 } while (read_seqcount_retry(&querier->seq, seq));
1834 }
1835
br_multicast_update_querier(struct net_bridge_mcast * brmctx,struct bridge_mcast_querier * querier,int ifindex,struct br_ip * saddr)1836 static void br_multicast_update_querier(struct net_bridge_mcast *brmctx,
1837 struct bridge_mcast_querier *querier,
1838 int ifindex,
1839 struct br_ip *saddr)
1840 {
1841 write_seqcount_begin(&querier->seq);
1842 querier->port_ifidx = ifindex;
1843 memcpy(&querier->addr, saddr, sizeof(*saddr));
1844 write_seqcount_end(&querier->seq);
1845 }
1846
br_multicast_send_query(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct bridge_mcast_own_query * own_query)1847 static void br_multicast_send_query(struct net_bridge_mcast *brmctx,
1848 struct net_bridge_mcast_port *pmctx,
1849 struct bridge_mcast_own_query *own_query)
1850 {
1851 struct bridge_mcast_other_query *other_query = NULL;
1852 struct bridge_mcast_querier *querier;
1853 struct br_ip br_group;
1854 unsigned long time;
1855
1856 if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
1857 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
1858 !brmctx->multicast_querier)
1859 return;
1860
1861 memset(&br_group.dst, 0, sizeof(br_group.dst));
1862
1863 if (pmctx ? (own_query == &pmctx->ip4_own_query) :
1864 (own_query == &brmctx->ip4_own_query)) {
1865 querier = &brmctx->ip4_querier;
1866 other_query = &brmctx->ip4_other_query;
1867 br_group.proto = htons(ETH_P_IP);
1868 #if IS_ENABLED(CONFIG_IPV6)
1869 } else {
1870 querier = &brmctx->ip6_querier;
1871 other_query = &brmctx->ip6_other_query;
1872 br_group.proto = htons(ETH_P_IPV6);
1873 #endif
1874 }
1875
1876 if (!other_query || timer_pending(&other_query->timer))
1877 return;
1878
1879 /* we're about to select ourselves as querier */
1880 if (!pmctx && querier->port_ifidx) {
1881 struct br_ip zeroip = {};
1882
1883 br_multicast_update_querier(brmctx, querier, 0, &zeroip);
1884 }
1885
1886 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &br_group, false,
1887 0, NULL);
1888
1889 time = jiffies;
1890 time += own_query->startup_sent < brmctx->multicast_startup_query_count ?
1891 brmctx->multicast_startup_query_interval :
1892 brmctx->multicast_query_interval;
1893 mod_timer(&own_query->timer, time);
1894 }
1895
1896 static void
br_multicast_port_query_expired(struct net_bridge_mcast_port * pmctx,struct bridge_mcast_own_query * query)1897 br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx,
1898 struct bridge_mcast_own_query *query)
1899 {
1900 struct net_bridge *br = pmctx->port->br;
1901 struct net_bridge_mcast *brmctx;
1902
1903 spin_lock(&br->multicast_lock);
1904 if (br_multicast_port_ctx_state_stopped(pmctx))
1905 goto out;
1906
1907 brmctx = br_multicast_port_ctx_get_global(pmctx);
1908 if (query->startup_sent < brmctx->multicast_startup_query_count)
1909 query->startup_sent++;
1910
1911 br_multicast_send_query(brmctx, pmctx, query);
1912
1913 out:
1914 spin_unlock(&br->multicast_lock);
1915 }
1916
br_ip4_multicast_port_query_expired(struct timer_list * t)1917 static void br_ip4_multicast_port_query_expired(struct timer_list *t)
1918 {
1919 struct net_bridge_mcast_port *pmctx = timer_container_of(pmctx, t,
1920 ip4_own_query.timer);
1921
1922 br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query);
1923 }
1924
1925 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_port_query_expired(struct timer_list * t)1926 static void br_ip6_multicast_port_query_expired(struct timer_list *t)
1927 {
1928 struct net_bridge_mcast_port *pmctx = timer_container_of(pmctx, t,
1929 ip6_own_query.timer);
1930
1931 br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query);
1932 }
1933 #endif
1934
br_multicast_port_group_rexmit(struct timer_list * t)1935 static void br_multicast_port_group_rexmit(struct timer_list *t)
1936 {
1937 struct net_bridge_port_group *pg = timer_container_of(pg, t,
1938 rexmit_timer);
1939 struct bridge_mcast_other_query *other_query = NULL;
1940 struct net_bridge *br = pg->key.port->br;
1941 struct net_bridge_mcast_port *pmctx;
1942 struct net_bridge_mcast *brmctx;
1943 bool need_rexmit = false;
1944
1945 spin_lock(&br->multicast_lock);
1946 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
1947 !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1948 goto out;
1949
1950 pmctx = br_multicast_pg_to_port_ctx(pg);
1951 if (!pmctx)
1952 goto out;
1953 brmctx = br_multicast_port_ctx_get_global(pmctx);
1954 if (!brmctx->multicast_querier)
1955 goto out;
1956
1957 if (pg->key.addr.proto == htons(ETH_P_IP))
1958 other_query = &brmctx->ip4_other_query;
1959 #if IS_ENABLED(CONFIG_IPV6)
1960 else
1961 other_query = &brmctx->ip6_other_query;
1962 #endif
1963
1964 if (!other_query || timer_pending(&other_query->timer))
1965 goto out;
1966
1967 if (pg->grp_query_rexmit_cnt) {
1968 pg->grp_query_rexmit_cnt--;
1969 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1970 &pg->key.addr, false, 1, NULL);
1971 }
1972 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1973 &pg->key.addr, true, 0, &need_rexmit);
1974
1975 if (pg->grp_query_rexmit_cnt || need_rexmit)
1976 mod_timer(&pg->rexmit_timer, jiffies +
1977 brmctx->multicast_last_member_interval);
1978 out:
1979 spin_unlock(&br->multicast_lock);
1980 }
1981
br_mc_disabled_update(struct net_device * dev,bool value,struct netlink_ext_ack * extack)1982 static int br_mc_disabled_update(struct net_device *dev, bool value,
1983 struct netlink_ext_ack *extack)
1984 {
1985 struct switchdev_attr attr = {
1986 .orig_dev = dev,
1987 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
1988 .flags = SWITCHDEV_F_DEFER,
1989 .u.mc_disabled = !value,
1990 };
1991
1992 return switchdev_port_attr_set(dev, &attr, extack);
1993 }
1994
br_multicast_port_ctx_init(struct net_bridge_port * port,struct net_bridge_vlan * vlan,struct net_bridge_mcast_port * pmctx)1995 void br_multicast_port_ctx_init(struct net_bridge_port *port,
1996 struct net_bridge_vlan *vlan,
1997 struct net_bridge_mcast_port *pmctx)
1998 {
1999 pmctx->port = port;
2000 pmctx->vlan = vlan;
2001 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
2002 timer_setup(&pmctx->ip4_mc_router_timer,
2003 br_ip4_multicast_router_expired, 0);
2004 timer_setup(&pmctx->ip4_own_query.timer,
2005 br_ip4_multicast_port_query_expired, 0);
2006 #if IS_ENABLED(CONFIG_IPV6)
2007 timer_setup(&pmctx->ip6_mc_router_timer,
2008 br_ip6_multicast_router_expired, 0);
2009 timer_setup(&pmctx->ip6_own_query.timer,
2010 br_ip6_multicast_port_query_expired, 0);
2011 #endif
2012 /* initialize mdb_n_entries if a new port vlan is being created */
2013 if (vlan) {
2014 struct net_bridge_port_group *pg;
2015 u32 n = 0;
2016
2017 spin_lock_bh(&port->br->multicast_lock);
2018 hlist_for_each_entry(pg, &port->mglist, mglist)
2019 if (pg->key.addr.vid == vlan->vid)
2020 n++;
2021 WRITE_ONCE(pmctx->mdb_n_entries, n);
2022 spin_unlock_bh(&port->br->multicast_lock);
2023 }
2024 }
2025
br_multicast_port_ctx_deinit(struct net_bridge_mcast_port * pmctx)2026 void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx)
2027 {
2028 struct net_bridge *br = pmctx->port->br;
2029 bool del = false;
2030
2031 #if IS_ENABLED(CONFIG_IPV6)
2032 timer_delete_sync(&pmctx->ip6_mc_router_timer);
2033 #endif
2034 timer_delete_sync(&pmctx->ip4_mc_router_timer);
2035
2036 spin_lock_bh(&br->multicast_lock);
2037 del |= br_ip6_multicast_rport_del(pmctx);
2038 del |= br_ip4_multicast_rport_del(pmctx);
2039 br_multicast_rport_del_notify(pmctx, del);
2040 spin_unlock_bh(&br->multicast_lock);
2041 }
2042
br_multicast_add_port(struct net_bridge_port * port)2043 int br_multicast_add_port(struct net_bridge_port *port)
2044 {
2045 int err;
2046
2047 port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
2048 br_multicast_port_ctx_init(port, NULL, &port->multicast_ctx);
2049
2050 err = br_mc_disabled_update(port->dev,
2051 br_opt_get(port->br,
2052 BROPT_MULTICAST_ENABLED),
2053 NULL);
2054 if (err && err != -EOPNOTSUPP)
2055 return err;
2056
2057 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
2058 if (!port->mcast_stats)
2059 return -ENOMEM;
2060
2061 return 0;
2062 }
2063
br_multicast_del_port(struct net_bridge_port * port)2064 void br_multicast_del_port(struct net_bridge_port *port)
2065 {
2066 struct net_bridge *br = port->br;
2067 struct net_bridge_port_group *pg;
2068 struct hlist_node *n;
2069
2070 /* Take care of the remaining groups, only perm ones should be left */
2071 spin_lock_bh(&br->multicast_lock);
2072 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
2073 br_multicast_find_del_pg(br, pg);
2074 spin_unlock_bh(&br->multicast_lock);
2075 flush_work(&br->mcast_gc_work);
2076 br_multicast_port_ctx_deinit(&port->multicast_ctx);
2077 free_percpu(port->mcast_stats);
2078 }
2079
br_multicast_enable(struct bridge_mcast_own_query * query)2080 static void br_multicast_enable(struct bridge_mcast_own_query *query)
2081 {
2082 query->startup_sent = 0;
2083
2084 if (timer_delete_sync_try(&query->timer) >= 0 ||
2085 timer_delete(&query->timer))
2086 mod_timer(&query->timer, jiffies);
2087 }
2088
__br_multicast_enable_port_ctx(struct net_bridge_mcast_port * pmctx)2089 static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
2090 {
2091 struct net_bridge *br = pmctx->port->br;
2092 struct net_bridge_mcast *brmctx;
2093
2094 brmctx = br_multicast_port_ctx_get_global(pmctx);
2095 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
2096 !netif_running(br->dev))
2097 return;
2098
2099 br_multicast_enable(&pmctx->ip4_own_query);
2100 #if IS_ENABLED(CONFIG_IPV6)
2101 br_multicast_enable(&pmctx->ip6_own_query);
2102 #endif
2103 if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) {
2104 br_ip4_multicast_add_router(brmctx, pmctx);
2105 br_ip6_multicast_add_router(brmctx, pmctx);
2106 }
2107 }
2108
br_multicast_enable_port_ctx(struct net_bridge_mcast_port * pmctx)2109 static void br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
2110 {
2111 struct net_bridge *br = pmctx->port->br;
2112
2113 spin_lock_bh(&br->multicast_lock);
2114 if (br_multicast_port_ctx_is_vlan(pmctx) &&
2115 !(pmctx->vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) {
2116 spin_unlock_bh(&br->multicast_lock);
2117 return;
2118 }
2119 __br_multicast_enable_port_ctx(pmctx);
2120 spin_unlock_bh(&br->multicast_lock);
2121 }
2122
__br_multicast_disable_port_ctx(struct net_bridge_mcast_port * pmctx)2123 static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
2124 {
2125 struct net_bridge_port_group *pg;
2126 struct hlist_node *n;
2127 bool del = false;
2128
2129 hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist)
2130 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) &&
2131 (!br_multicast_port_ctx_is_vlan(pmctx) ||
2132 pg->key.addr.vid == pmctx->vlan->vid))
2133 br_multicast_find_del_pg(pmctx->port->br, pg);
2134
2135 del |= br_ip4_multicast_rport_del(pmctx);
2136 timer_delete(&pmctx->ip4_mc_router_timer);
2137 timer_delete(&pmctx->ip4_own_query.timer);
2138 del |= br_ip6_multicast_rport_del(pmctx);
2139 #if IS_ENABLED(CONFIG_IPV6)
2140 timer_delete(&pmctx->ip6_mc_router_timer);
2141 timer_delete(&pmctx->ip6_own_query.timer);
2142 #endif
2143 br_multicast_rport_del_notify(pmctx, del);
2144 }
2145
br_multicast_disable_port_ctx(struct net_bridge_mcast_port * pmctx)2146 static void br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
2147 {
2148 struct net_bridge *br = pmctx->port->br;
2149
2150 spin_lock_bh(&br->multicast_lock);
2151 if (br_multicast_port_ctx_is_vlan(pmctx) &&
2152 !(pmctx->vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) {
2153 spin_unlock_bh(&br->multicast_lock);
2154 return;
2155 }
2156
2157 __br_multicast_disable_port_ctx(pmctx);
2158 spin_unlock_bh(&br->multicast_lock);
2159 }
2160
br_multicast_toggle_port(struct net_bridge_port * port,bool on)2161 static void br_multicast_toggle_port(struct net_bridge_port *port, bool on)
2162 {
2163 #if IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
2164 if (br_opt_get(port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
2165 struct net_bridge_vlan_group *vg;
2166 struct net_bridge_vlan *vlan;
2167
2168 rcu_read_lock();
2169 vg = nbp_vlan_group_rcu(port);
2170 if (!vg) {
2171 rcu_read_unlock();
2172 return;
2173 }
2174
2175 /* iterate each vlan, toggle vlan multicast context */
2176 list_for_each_entry_rcu(vlan, &vg->vlan_list, vlist) {
2177 struct net_bridge_mcast_port *pmctx =
2178 &vlan->port_mcast_ctx;
2179 u8 state = br_vlan_get_state(vlan);
2180 /* enable vlan multicast context when state is
2181 * LEARNING or FORWARDING
2182 */
2183 if (on && br_vlan_state_allowed(state, true))
2184 br_multicast_enable_port_ctx(pmctx);
2185 else
2186 br_multicast_disable_port_ctx(pmctx);
2187 }
2188 rcu_read_unlock();
2189 return;
2190 }
2191 #endif
2192 /* toggle port multicast context when vlan snooping is disabled */
2193 if (on)
2194 br_multicast_enable_port_ctx(&port->multicast_ctx);
2195 else
2196 br_multicast_disable_port_ctx(&port->multicast_ctx);
2197 }
2198
br_multicast_enable_port(struct net_bridge_port * port)2199 void br_multicast_enable_port(struct net_bridge_port *port)
2200 {
2201 br_multicast_toggle_port(port, true);
2202 }
2203
br_multicast_disable_port(struct net_bridge_port * port)2204 void br_multicast_disable_port(struct net_bridge_port *port)
2205 {
2206 br_multicast_toggle_port(port, false);
2207 }
2208
__grp_src_delete_marked(struct net_bridge_port_group * pg)2209 static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
2210 {
2211 struct net_bridge_group_src *ent;
2212 struct hlist_node *tmp;
2213 int deleted = 0;
2214
2215 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
2216 if (ent->flags & BR_SGRP_F_DELETE) {
2217 br_multicast_del_group_src(ent, false);
2218 deleted++;
2219 }
2220
2221 return deleted;
2222 }
2223
__grp_src_mod_timer(struct net_bridge_group_src * src,unsigned long expires)2224 static void __grp_src_mod_timer(struct net_bridge_group_src *src,
2225 unsigned long expires)
2226 {
2227 mod_timer(&src->timer, expires);
2228 br_multicast_fwd_src_handle(src);
2229 }
2230
__grp_src_query_marked_and_rexmit(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg)2231 static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx,
2232 struct net_bridge_mcast_port *pmctx,
2233 struct net_bridge_port_group *pg)
2234 {
2235 struct bridge_mcast_other_query *other_query = NULL;
2236 u32 lmqc = brmctx->multicast_last_member_count;
2237 unsigned long lmqt, lmi, now = jiffies;
2238 struct net_bridge_group_src *ent;
2239
2240 if (!netif_running(brmctx->br->dev) ||
2241 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
2242 return;
2243
2244 if (pg->key.addr.proto == htons(ETH_P_IP))
2245 other_query = &brmctx->ip4_other_query;
2246 #if IS_ENABLED(CONFIG_IPV6)
2247 else
2248 other_query = &brmctx->ip6_other_query;
2249 #endif
2250
2251 lmqt = now + br_multicast_lmqt(brmctx);
2252 hlist_for_each_entry(ent, &pg->src_list, node) {
2253 if (ent->flags & BR_SGRP_F_SEND) {
2254 ent->flags &= ~BR_SGRP_F_SEND;
2255 if (ent->timer.expires > lmqt) {
2256 if (brmctx->multicast_querier &&
2257 other_query &&
2258 !timer_pending(&other_query->timer))
2259 ent->src_query_rexmit_cnt = lmqc;
2260 __grp_src_mod_timer(ent, lmqt);
2261 }
2262 }
2263 }
2264
2265 if (!brmctx->multicast_querier ||
2266 !other_query || timer_pending(&other_query->timer))
2267 return;
2268
2269 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
2270 &pg->key.addr, true, 1, NULL);
2271
2272 lmi = now + brmctx->multicast_last_member_interval;
2273 if (!timer_pending(&pg->rexmit_timer) ||
2274 time_after(pg->rexmit_timer.expires, lmi))
2275 mod_timer(&pg->rexmit_timer, lmi);
2276 }
2277
__grp_send_query_and_rexmit(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg)2278 static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx,
2279 struct net_bridge_mcast_port *pmctx,
2280 struct net_bridge_port_group *pg)
2281 {
2282 struct bridge_mcast_other_query *other_query = NULL;
2283 unsigned long now = jiffies, lmi;
2284
2285 if (!netif_running(brmctx->br->dev) ||
2286 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
2287 return;
2288
2289 if (pg->key.addr.proto == htons(ETH_P_IP))
2290 other_query = &brmctx->ip4_other_query;
2291 #if IS_ENABLED(CONFIG_IPV6)
2292 else
2293 other_query = &brmctx->ip6_other_query;
2294 #endif
2295
2296 if (brmctx->multicast_querier &&
2297 other_query && !timer_pending(&other_query->timer)) {
2298 lmi = now + brmctx->multicast_last_member_interval;
2299 pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1;
2300 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
2301 &pg->key.addr, false, 0, NULL);
2302 if (!timer_pending(&pg->rexmit_timer) ||
2303 time_after(pg->rexmit_timer.expires, lmi))
2304 mod_timer(&pg->rexmit_timer, lmi);
2305 }
2306
2307 if (pg->filter_mode == MCAST_EXCLUDE &&
2308 (!timer_pending(&pg->timer) ||
2309 time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx))))
2310 mod_timer(&pg->timer, now + br_multicast_lmqt(brmctx));
2311 }
2312
2313 /* State Msg type New state Actions
2314 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
2315 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
2316 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
2317 */
br_multicast_isinc_allow(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2318 static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx,
2319 struct net_bridge_port_group *pg, void *h_addr,
2320 void *srcs, u32 nsrcs, size_t addr_size,
2321 int grec_type)
2322 {
2323 struct net_bridge_group_src *ent;
2324 unsigned long now = jiffies;
2325 bool changed = false;
2326 struct br_ip src_ip;
2327 u32 src_idx;
2328
2329 memset(&src_ip, 0, sizeof(src_ip));
2330 src_ip.proto = pg->key.addr.proto;
2331 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2332 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2333 ent = br_multicast_find_group_src(pg, &src_ip);
2334 if (!ent) {
2335 ent = br_multicast_new_group_src(pg, &src_ip);
2336 if (ent)
2337 changed = true;
2338 }
2339
2340 if (ent)
2341 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2342 }
2343
2344 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2345 grec_type))
2346 changed = true;
2347
2348 return changed;
2349 }
2350
2351 /* State Msg type New state Actions
2352 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
2353 * Delete (A-B)
2354 * Group Timer=GMI
2355 */
__grp_src_isexc_incl(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2356 static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx,
2357 struct net_bridge_port_group *pg, void *h_addr,
2358 void *srcs, u32 nsrcs, size_t addr_size,
2359 int grec_type)
2360 {
2361 struct net_bridge_group_src *ent;
2362 struct br_ip src_ip;
2363 u32 src_idx;
2364
2365 hlist_for_each_entry(ent, &pg->src_list, node)
2366 ent->flags |= BR_SGRP_F_DELETE;
2367
2368 memset(&src_ip, 0, sizeof(src_ip));
2369 src_ip.proto = pg->key.addr.proto;
2370 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2371 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2372 ent = br_multicast_find_group_src(pg, &src_ip);
2373 if (ent)
2374 ent->flags &= ~BR_SGRP_F_DELETE;
2375 else
2376 ent = br_multicast_new_group_src(pg, &src_ip);
2377 if (ent)
2378 br_multicast_fwd_src_handle(ent);
2379 }
2380
2381 br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2382 grec_type);
2383
2384 __grp_src_delete_marked(pg);
2385 }
2386
2387 /* State Msg type New state Actions
2388 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
2389 * Delete (X-A)
2390 * Delete (Y-A)
2391 * Group Timer=GMI
2392 */
__grp_src_isexc_excl(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2393 static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx,
2394 struct net_bridge_port_group *pg, void *h_addr,
2395 void *srcs, u32 nsrcs, size_t addr_size,
2396 int grec_type)
2397 {
2398 struct net_bridge_group_src *ent;
2399 unsigned long now = jiffies;
2400 bool changed = false;
2401 struct br_ip src_ip;
2402 u32 src_idx;
2403
2404 hlist_for_each_entry(ent, &pg->src_list, node)
2405 ent->flags |= BR_SGRP_F_DELETE;
2406
2407 memset(&src_ip, 0, sizeof(src_ip));
2408 src_ip.proto = pg->key.addr.proto;
2409 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2410 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2411 ent = br_multicast_find_group_src(pg, &src_ip);
2412 if (ent) {
2413 ent->flags &= ~BR_SGRP_F_DELETE;
2414 } else {
2415 ent = br_multicast_new_group_src(pg, &src_ip);
2416 if (ent) {
2417 __grp_src_mod_timer(ent,
2418 now + br_multicast_gmi(brmctx));
2419 changed = true;
2420 }
2421 }
2422 }
2423
2424 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2425 grec_type))
2426 changed = true;
2427
2428 if (__grp_src_delete_marked(pg))
2429 changed = true;
2430
2431 return changed;
2432 }
2433
br_multicast_isexc(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2434 static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx,
2435 struct net_bridge_port_group *pg, void *h_addr,
2436 void *srcs, u32 nsrcs, size_t addr_size,
2437 int grec_type)
2438 {
2439 bool changed = false;
2440
2441 switch (pg->filter_mode) {
2442 case MCAST_INCLUDE:
2443 __grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2444 grec_type);
2445 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
2446 changed = true;
2447 break;
2448 case MCAST_EXCLUDE:
2449 changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs,
2450 addr_size, grec_type);
2451 break;
2452 }
2453
2454 pg->filter_mode = MCAST_EXCLUDE;
2455 mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
2456
2457 return changed;
2458 }
2459
2460 /* State Msg type New state Actions
2461 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
2462 * Send Q(G,A-B)
2463 */
__grp_src_toin_incl(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2464 static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx,
2465 struct net_bridge_mcast_port *pmctx,
2466 struct net_bridge_port_group *pg, void *h_addr,
2467 void *srcs, u32 nsrcs, size_t addr_size,
2468 int grec_type)
2469 {
2470 u32 src_idx, to_send = pg->src_ents;
2471 struct net_bridge_group_src *ent;
2472 unsigned long now = jiffies;
2473 bool changed = false;
2474 struct br_ip src_ip;
2475
2476 hlist_for_each_entry(ent, &pg->src_list, node)
2477 ent->flags |= BR_SGRP_F_SEND;
2478
2479 memset(&src_ip, 0, sizeof(src_ip));
2480 src_ip.proto = pg->key.addr.proto;
2481 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2482 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2483 ent = br_multicast_find_group_src(pg, &src_ip);
2484 if (ent) {
2485 ent->flags &= ~BR_SGRP_F_SEND;
2486 to_send--;
2487 } else {
2488 ent = br_multicast_new_group_src(pg, &src_ip);
2489 if (ent)
2490 changed = true;
2491 }
2492 if (ent)
2493 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2494 }
2495
2496 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2497 grec_type))
2498 changed = true;
2499
2500 if (to_send)
2501 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2502
2503 return changed;
2504 }
2505
2506 /* State Msg type New state Actions
2507 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
2508 * Send Q(G,X-A)
2509 * Send Q(G)
2510 */
__grp_src_toin_excl(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2511 static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx,
2512 struct net_bridge_mcast_port *pmctx,
2513 struct net_bridge_port_group *pg, void *h_addr,
2514 void *srcs, u32 nsrcs, size_t addr_size,
2515 int grec_type)
2516 {
2517 u32 src_idx, to_send = pg->src_ents;
2518 struct net_bridge_group_src *ent;
2519 unsigned long now = jiffies;
2520 bool changed = false;
2521 struct br_ip src_ip;
2522
2523 hlist_for_each_entry(ent, &pg->src_list, node)
2524 if (timer_pending(&ent->timer))
2525 ent->flags |= BR_SGRP_F_SEND;
2526
2527 memset(&src_ip, 0, sizeof(src_ip));
2528 src_ip.proto = pg->key.addr.proto;
2529 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2530 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2531 ent = br_multicast_find_group_src(pg, &src_ip);
2532 if (ent) {
2533 if (timer_pending(&ent->timer)) {
2534 ent->flags &= ~BR_SGRP_F_SEND;
2535 to_send--;
2536 }
2537 } else {
2538 ent = br_multicast_new_group_src(pg, &src_ip);
2539 if (ent)
2540 changed = true;
2541 }
2542 if (ent)
2543 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2544 }
2545
2546 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2547 grec_type))
2548 changed = true;
2549
2550 if (to_send)
2551 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2552
2553 __grp_send_query_and_rexmit(brmctx, pmctx, pg);
2554
2555 return changed;
2556 }
2557
br_multicast_toin(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2558 static bool br_multicast_toin(struct net_bridge_mcast *brmctx,
2559 struct net_bridge_mcast_port *pmctx,
2560 struct net_bridge_port_group *pg, void *h_addr,
2561 void *srcs, u32 nsrcs, size_t addr_size,
2562 int grec_type)
2563 {
2564 bool changed = false;
2565
2566 switch (pg->filter_mode) {
2567 case MCAST_INCLUDE:
2568 changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs,
2569 nsrcs, addr_size, grec_type);
2570 break;
2571 case MCAST_EXCLUDE:
2572 changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs,
2573 nsrcs, addr_size, grec_type);
2574 break;
2575 }
2576
2577 if (br_multicast_eht_should_del_pg(pg)) {
2578 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2579 br_multicast_find_del_pg(pg->key.port->br, pg);
2580 /* a notification has already been sent and we shouldn't
2581 * access pg after the delete so we have to return false
2582 */
2583 changed = false;
2584 }
2585
2586 return changed;
2587 }
2588
2589 /* State Msg type New state Actions
2590 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
2591 * Delete (A-B)
2592 * Send Q(G,A*B)
2593 * Group Timer=GMI
2594 */
__grp_src_toex_incl(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2595 static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx,
2596 struct net_bridge_mcast_port *pmctx,
2597 struct net_bridge_port_group *pg, void *h_addr,
2598 void *srcs, u32 nsrcs, size_t addr_size,
2599 int grec_type)
2600 {
2601 struct net_bridge_group_src *ent;
2602 u32 src_idx, to_send = 0;
2603 struct br_ip src_ip;
2604
2605 hlist_for_each_entry(ent, &pg->src_list, node)
2606 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
2607
2608 memset(&src_ip, 0, sizeof(src_ip));
2609 src_ip.proto = pg->key.addr.proto;
2610 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2611 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2612 ent = br_multicast_find_group_src(pg, &src_ip);
2613 if (ent) {
2614 ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) |
2615 BR_SGRP_F_SEND;
2616 to_send++;
2617 } else {
2618 ent = br_multicast_new_group_src(pg, &src_ip);
2619 }
2620 if (ent)
2621 br_multicast_fwd_src_handle(ent);
2622 }
2623
2624 br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2625 grec_type);
2626
2627 __grp_src_delete_marked(pg);
2628 if (to_send)
2629 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2630 }
2631
2632 /* State Msg type New state Actions
2633 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
2634 * Delete (X-A)
2635 * Delete (Y-A)
2636 * Send Q(G,A-Y)
2637 * Group Timer=GMI
2638 */
__grp_src_toex_excl(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2639 static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx,
2640 struct net_bridge_mcast_port *pmctx,
2641 struct net_bridge_port_group *pg, void *h_addr,
2642 void *srcs, u32 nsrcs, size_t addr_size,
2643 int grec_type)
2644 {
2645 struct net_bridge_group_src *ent;
2646 u32 src_idx, to_send = 0;
2647 bool changed = false;
2648 struct br_ip src_ip;
2649
2650 hlist_for_each_entry(ent, &pg->src_list, node)
2651 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
2652
2653 memset(&src_ip, 0, sizeof(src_ip));
2654 src_ip.proto = pg->key.addr.proto;
2655 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2656 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2657 ent = br_multicast_find_group_src(pg, &src_ip);
2658 if (ent) {
2659 ent->flags &= ~BR_SGRP_F_DELETE;
2660 } else {
2661 ent = br_multicast_new_group_src(pg, &src_ip);
2662 if (ent) {
2663 __grp_src_mod_timer(ent, pg->timer.expires);
2664 changed = true;
2665 }
2666 }
2667 if (ent && timer_pending(&ent->timer)) {
2668 ent->flags |= BR_SGRP_F_SEND;
2669 to_send++;
2670 }
2671 }
2672
2673 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2674 grec_type))
2675 changed = true;
2676
2677 if (__grp_src_delete_marked(pg))
2678 changed = true;
2679 if (to_send)
2680 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2681
2682 return changed;
2683 }
2684
br_multicast_toex(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2685 static bool br_multicast_toex(struct net_bridge_mcast *brmctx,
2686 struct net_bridge_mcast_port *pmctx,
2687 struct net_bridge_port_group *pg, void *h_addr,
2688 void *srcs, u32 nsrcs, size_t addr_size,
2689 int grec_type)
2690 {
2691 bool changed = false;
2692
2693 switch (pg->filter_mode) {
2694 case MCAST_INCLUDE:
2695 __grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs,
2696 addr_size, grec_type);
2697 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
2698 changed = true;
2699 break;
2700 case MCAST_EXCLUDE:
2701 changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs,
2702 nsrcs, addr_size, grec_type);
2703 break;
2704 }
2705
2706 pg->filter_mode = MCAST_EXCLUDE;
2707 mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
2708
2709 return changed;
2710 }
2711
2712 /* State Msg type New state Actions
2713 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
2714 */
__grp_src_block_incl(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2715 static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx,
2716 struct net_bridge_mcast_port *pmctx,
2717 struct net_bridge_port_group *pg, void *h_addr,
2718 void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2719 {
2720 struct net_bridge_group_src *ent;
2721 u32 src_idx, to_send = 0;
2722 bool changed = false;
2723 struct br_ip src_ip;
2724
2725 hlist_for_each_entry(ent, &pg->src_list, node)
2726 ent->flags &= ~BR_SGRP_F_SEND;
2727
2728 memset(&src_ip, 0, sizeof(src_ip));
2729 src_ip.proto = pg->key.addr.proto;
2730 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2731 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2732 ent = br_multicast_find_group_src(pg, &src_ip);
2733 if (ent) {
2734 ent->flags |= BR_SGRP_F_SEND;
2735 to_send++;
2736 }
2737 }
2738
2739 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2740 grec_type))
2741 changed = true;
2742
2743 if (to_send)
2744 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2745
2746 return changed;
2747 }
2748
2749 /* State Msg type New state Actions
2750 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
2751 * Send Q(G,A-Y)
2752 */
__grp_src_block_excl(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2753 static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx,
2754 struct net_bridge_mcast_port *pmctx,
2755 struct net_bridge_port_group *pg, void *h_addr,
2756 void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2757 {
2758 struct net_bridge_group_src *ent;
2759 u32 src_idx, to_send = 0;
2760 bool changed = false;
2761 struct br_ip src_ip;
2762
2763 hlist_for_each_entry(ent, &pg->src_list, node)
2764 ent->flags &= ~BR_SGRP_F_SEND;
2765
2766 memset(&src_ip, 0, sizeof(src_ip));
2767 src_ip.proto = pg->key.addr.proto;
2768 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2769 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2770 ent = br_multicast_find_group_src(pg, &src_ip);
2771 if (!ent) {
2772 ent = br_multicast_new_group_src(pg, &src_ip);
2773 if (ent) {
2774 __grp_src_mod_timer(ent, pg->timer.expires);
2775 changed = true;
2776 }
2777 }
2778 if (ent && timer_pending(&ent->timer)) {
2779 ent->flags |= BR_SGRP_F_SEND;
2780 to_send++;
2781 }
2782 }
2783
2784 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2785 grec_type))
2786 changed = true;
2787
2788 if (to_send)
2789 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2790
2791 return changed;
2792 }
2793
br_multicast_block(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2794 static bool br_multicast_block(struct net_bridge_mcast *brmctx,
2795 struct net_bridge_mcast_port *pmctx,
2796 struct net_bridge_port_group *pg, void *h_addr,
2797 void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2798 {
2799 bool changed = false;
2800
2801 switch (pg->filter_mode) {
2802 case MCAST_INCLUDE:
2803 changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs,
2804 nsrcs, addr_size, grec_type);
2805 break;
2806 case MCAST_EXCLUDE:
2807 changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs,
2808 nsrcs, addr_size, grec_type);
2809 break;
2810 }
2811
2812 if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) ||
2813 br_multicast_eht_should_del_pg(pg)) {
2814 if (br_multicast_eht_should_del_pg(pg))
2815 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2816 br_multicast_find_del_pg(pg->key.port->br, pg);
2817 /* a notification has already been sent and we shouldn't
2818 * access pg after the delete so we have to return false
2819 */
2820 changed = false;
2821 }
2822
2823 return changed;
2824 }
2825
2826 static struct net_bridge_port_group *
br_multicast_find_port(struct net_bridge_mdb_entry * mp,struct net_bridge_port * p,const unsigned char * src)2827 br_multicast_find_port(struct net_bridge_mdb_entry *mp,
2828 struct net_bridge_port *p,
2829 const unsigned char *src)
2830 {
2831 struct net_bridge *br __maybe_unused = mp->br;
2832 struct net_bridge_port_group *pg;
2833
2834 for (pg = mlock_dereference(mp->ports, br);
2835 pg;
2836 pg = mlock_dereference(pg->next, br))
2837 if (br_port_group_equal(pg, p, src))
2838 return pg;
2839
2840 return NULL;
2841 }
2842
br_ip4_multicast_igmp3_report(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct sk_buff * skb,u16 vid)2843 static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx,
2844 struct net_bridge_mcast_port *pmctx,
2845 struct sk_buff *skb,
2846 u16 vid)
2847 {
2848 bool igmpv2 = brmctx->multicast_igmp_version == 2;
2849 struct net_bridge_mdb_entry *mdst;
2850 struct net_bridge_port_group *pg;
2851 const unsigned char *src;
2852 struct igmpv3_report *ih;
2853 struct igmpv3_grec *grec;
2854 int i, len, num, type;
2855 __be32 group, *h_addr;
2856 bool changed = false;
2857 int err = 0;
2858 u16 nsrcs;
2859
2860 ih = igmpv3_report_hdr(skb);
2861 num = ntohs(ih->ngrec);
2862 len = skb_transport_offset(skb) + sizeof(*ih);
2863
2864 for (i = 0; i < num; i++) {
2865 len += sizeof(*grec);
2866 if (!ip_mc_may_pull(skb, len))
2867 return -EINVAL;
2868
2869 grec = (void *)(skb->data + len - sizeof(*grec));
2870 group = grec->grec_mca;
2871 type = grec->grec_type;
2872 nsrcs = ntohs(grec->grec_nsrcs);
2873
2874 len += nsrcs * 4;
2875 if (!ip_mc_may_pull(skb, len))
2876 return -EINVAL;
2877
2878 switch (type) {
2879 case IGMPV3_MODE_IS_INCLUDE:
2880 case IGMPV3_MODE_IS_EXCLUDE:
2881 case IGMPV3_CHANGE_TO_INCLUDE:
2882 case IGMPV3_CHANGE_TO_EXCLUDE:
2883 case IGMPV3_ALLOW_NEW_SOURCES:
2884 case IGMPV3_BLOCK_OLD_SOURCES:
2885 break;
2886
2887 default:
2888 continue;
2889 }
2890
2891 src = eth_hdr(skb)->h_source;
2892 if (nsrcs == 0 &&
2893 (type == IGMPV3_CHANGE_TO_INCLUDE ||
2894 type == IGMPV3_MODE_IS_INCLUDE)) {
2895 if (!pmctx || igmpv2) {
2896 br_ip4_multicast_leave_group(brmctx, pmctx,
2897 group, vid, src);
2898 continue;
2899 }
2900 } else {
2901 err = br_ip4_multicast_add_group(brmctx, pmctx, group,
2902 vid, src, igmpv2);
2903 if (err)
2904 break;
2905 }
2906
2907 if (!pmctx || igmpv2)
2908 continue;
2909
2910 spin_lock(&brmctx->br->multicast_lock);
2911 if (!br_multicast_ctx_should_use(brmctx, pmctx))
2912 goto unlock_continue;
2913
2914 mdst = br_mdb_ip4_get(brmctx->br, group, vid);
2915 if (!mdst)
2916 goto unlock_continue;
2917 pg = br_multicast_find_port(mdst, pmctx->port, src);
2918 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
2919 goto unlock_continue;
2920 /* reload grec and host addr */
2921 grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4));
2922 h_addr = &ip_hdr(skb)->saddr;
2923 switch (type) {
2924 case IGMPV3_ALLOW_NEW_SOURCES:
2925 changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2926 grec->grec_src,
2927 nsrcs, sizeof(__be32), type);
2928 break;
2929 case IGMPV3_MODE_IS_INCLUDE:
2930 changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2931 grec->grec_src,
2932 nsrcs, sizeof(__be32), type);
2933 break;
2934 case IGMPV3_MODE_IS_EXCLUDE:
2935 changed = br_multicast_isexc(brmctx, pg, h_addr,
2936 grec->grec_src,
2937 nsrcs, sizeof(__be32), type);
2938 break;
2939 case IGMPV3_CHANGE_TO_INCLUDE:
2940 changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
2941 grec->grec_src,
2942 nsrcs, sizeof(__be32), type);
2943 break;
2944 case IGMPV3_CHANGE_TO_EXCLUDE:
2945 changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
2946 grec->grec_src,
2947 nsrcs, sizeof(__be32), type);
2948 break;
2949 case IGMPV3_BLOCK_OLD_SOURCES:
2950 changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
2951 grec->grec_src,
2952 nsrcs, sizeof(__be32), type);
2953 break;
2954 }
2955 if (changed)
2956 br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
2957 unlock_continue:
2958 spin_unlock(&brmctx->br->multicast_lock);
2959 }
2960
2961 return err;
2962 }
2963
2964 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_mld2_report(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct sk_buff * skb,u16 vid)2965 static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx,
2966 struct net_bridge_mcast_port *pmctx,
2967 struct sk_buff *skb,
2968 u16 vid)
2969 {
2970 bool mldv1 = brmctx->multicast_mld_version == 1;
2971 struct net_bridge_mdb_entry *mdst;
2972 struct net_bridge_port_group *pg;
2973 unsigned int nsrcs_offset;
2974 struct mld2_report *mld2r;
2975 const unsigned char *src;
2976 struct in6_addr *h_addr;
2977 struct mld2_grec *grec;
2978 unsigned int grec_len;
2979 bool changed = false;
2980 int i, len, num;
2981 int err = 0;
2982
2983 if (!ipv6_mc_may_pull(skb, sizeof(*mld2r)))
2984 return -EINVAL;
2985
2986 mld2r = (struct mld2_report *)icmp6_hdr(skb);
2987 num = ntohs(mld2r->mld2r_ngrec);
2988 len = skb_transport_offset(skb) + sizeof(*mld2r);
2989
2990 for (i = 0; i < num; i++) {
2991 __be16 *_nsrcs, __nsrcs;
2992 u16 nsrcs;
2993
2994 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
2995
2996 if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
2997 nsrcs_offset + sizeof(__nsrcs))
2998 return -EINVAL;
2999
3000 _nsrcs = skb_header_pointer(skb, nsrcs_offset,
3001 sizeof(__nsrcs), &__nsrcs);
3002 if (!_nsrcs)
3003 return -EINVAL;
3004
3005 nsrcs = ntohs(*_nsrcs);
3006 grec_len = struct_size(grec, grec_src, nsrcs);
3007
3008 if (!ipv6_mc_may_pull(skb, len + grec_len))
3009 return -EINVAL;
3010
3011 grec = (struct mld2_grec *)(skb->data + len);
3012 len += grec_len;
3013
3014 switch (grec->grec_type) {
3015 case MLD2_MODE_IS_INCLUDE:
3016 case MLD2_MODE_IS_EXCLUDE:
3017 case MLD2_CHANGE_TO_INCLUDE:
3018 case MLD2_CHANGE_TO_EXCLUDE:
3019 case MLD2_ALLOW_NEW_SOURCES:
3020 case MLD2_BLOCK_OLD_SOURCES:
3021 break;
3022
3023 default:
3024 continue;
3025 }
3026
3027 src = eth_hdr(skb)->h_source;
3028 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
3029 grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
3030 nsrcs == 0) {
3031 if (!pmctx || mldv1) {
3032 br_ip6_multicast_leave_group(brmctx, pmctx,
3033 &grec->grec_mca,
3034 vid, src);
3035 continue;
3036 }
3037 } else {
3038 err = br_ip6_multicast_add_group(brmctx, pmctx,
3039 &grec->grec_mca, vid,
3040 src, mldv1);
3041 if (err)
3042 break;
3043 }
3044
3045 if (!pmctx || mldv1)
3046 continue;
3047
3048 spin_lock(&brmctx->br->multicast_lock);
3049 if (!br_multicast_ctx_should_use(brmctx, pmctx))
3050 goto unlock_continue;
3051
3052 mdst = br_mdb_ip6_get(brmctx->br, &grec->grec_mca, vid);
3053 if (!mdst)
3054 goto unlock_continue;
3055 pg = br_multicast_find_port(mdst, pmctx->port, src);
3056 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
3057 goto unlock_continue;
3058 h_addr = &ipv6_hdr(skb)->saddr;
3059 switch (grec->grec_type) {
3060 case MLD2_ALLOW_NEW_SOURCES:
3061 changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
3062 grec->grec_src, nsrcs,
3063 sizeof(struct in6_addr),
3064 grec->grec_type);
3065 break;
3066 case MLD2_MODE_IS_INCLUDE:
3067 changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
3068 grec->grec_src, nsrcs,
3069 sizeof(struct in6_addr),
3070 grec->grec_type);
3071 break;
3072 case MLD2_MODE_IS_EXCLUDE:
3073 changed = br_multicast_isexc(brmctx, pg, h_addr,
3074 grec->grec_src, nsrcs,
3075 sizeof(struct in6_addr),
3076 grec->grec_type);
3077 break;
3078 case MLD2_CHANGE_TO_INCLUDE:
3079 changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
3080 grec->grec_src, nsrcs,
3081 sizeof(struct in6_addr),
3082 grec->grec_type);
3083 break;
3084 case MLD2_CHANGE_TO_EXCLUDE:
3085 changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
3086 grec->grec_src, nsrcs,
3087 sizeof(struct in6_addr),
3088 grec->grec_type);
3089 break;
3090 case MLD2_BLOCK_OLD_SOURCES:
3091 changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
3092 grec->grec_src, nsrcs,
3093 sizeof(struct in6_addr),
3094 grec->grec_type);
3095 break;
3096 }
3097 if (changed)
3098 br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
3099 unlock_continue:
3100 spin_unlock(&brmctx->br->multicast_lock);
3101 }
3102
3103 return err;
3104 }
3105 #endif
3106
br_multicast_select_querier(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct br_ip * saddr)3107 static bool br_multicast_select_querier(struct net_bridge_mcast *brmctx,
3108 struct net_bridge_mcast_port *pmctx,
3109 struct br_ip *saddr)
3110 {
3111 int port_ifidx = pmctx ? pmctx->port->dev->ifindex : 0;
3112 struct timer_list *own_timer, *other_timer;
3113 struct bridge_mcast_querier *querier;
3114
3115 switch (saddr->proto) {
3116 case htons(ETH_P_IP):
3117 querier = &brmctx->ip4_querier;
3118 own_timer = &brmctx->ip4_own_query.timer;
3119 other_timer = &brmctx->ip4_other_query.timer;
3120 if (!querier->addr.src.ip4 ||
3121 ntohl(saddr->src.ip4) <= ntohl(querier->addr.src.ip4))
3122 goto update;
3123 break;
3124 #if IS_ENABLED(CONFIG_IPV6)
3125 case htons(ETH_P_IPV6):
3126 querier = &brmctx->ip6_querier;
3127 own_timer = &brmctx->ip6_own_query.timer;
3128 other_timer = &brmctx->ip6_other_query.timer;
3129 if (ipv6_addr_cmp(&saddr->src.ip6, &querier->addr.src.ip6) <= 0)
3130 goto update;
3131 break;
3132 #endif
3133 default:
3134 return false;
3135 }
3136
3137 if (!timer_pending(own_timer) && !timer_pending(other_timer))
3138 goto update;
3139
3140 return false;
3141
3142 update:
3143 br_multicast_update_querier(brmctx, querier, port_ifidx, saddr);
3144
3145 return true;
3146 }
3147
3148 static struct net_bridge_port *
__br_multicast_get_querier_port(struct net_bridge * br,const struct bridge_mcast_querier * querier)3149 __br_multicast_get_querier_port(struct net_bridge *br,
3150 const struct bridge_mcast_querier *querier)
3151 {
3152 int port_ifidx = READ_ONCE(querier->port_ifidx);
3153 struct net_bridge_port *p;
3154 struct net_device *dev;
3155
3156 if (port_ifidx == 0)
3157 return NULL;
3158
3159 dev = dev_get_by_index_rcu(dev_net(br->dev), port_ifidx);
3160 if (!dev)
3161 return NULL;
3162 p = br_port_get_rtnl_rcu(dev);
3163 if (!p || p->br != br)
3164 return NULL;
3165
3166 return p;
3167 }
3168
br_multicast_querier_state_size(void)3169 size_t br_multicast_querier_state_size(void)
3170 {
3171 return nla_total_size(0) + /* nest attribute */
3172 nla_total_size(sizeof(__be32)) + /* BRIDGE_QUERIER_IP_ADDRESS */
3173 nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IP_PORT */
3174 nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IP_OTHER_TIMER */
3175 #if IS_ENABLED(CONFIG_IPV6)
3176 nla_total_size(sizeof(struct in6_addr)) + /* BRIDGE_QUERIER_IPV6_ADDRESS */
3177 nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IPV6_PORT */
3178 nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IPV6_OTHER_TIMER */
3179 #endif
3180 0;
3181 }
3182
3183 /* protected by rtnl or rcu */
br_multicast_dump_querier_state(struct sk_buff * skb,const struct net_bridge_mcast * brmctx,int nest_attr)3184 int br_multicast_dump_querier_state(struct sk_buff *skb,
3185 const struct net_bridge_mcast *brmctx,
3186 int nest_attr)
3187 {
3188 struct bridge_mcast_querier querier = {};
3189 struct net_bridge_port *p;
3190 struct nlattr *nest;
3191
3192 if (!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
3193 br_multicast_ctx_vlan_global_disabled(brmctx))
3194 return 0;
3195
3196 nest = nla_nest_start(skb, nest_attr);
3197 if (!nest)
3198 return -EMSGSIZE;
3199
3200 rcu_read_lock();
3201 if (!brmctx->multicast_querier &&
3202 !timer_pending(&brmctx->ip4_other_query.timer))
3203 goto out_v6;
3204
3205 br_multicast_read_querier(&brmctx->ip4_querier, &querier);
3206 if (nla_put_in_addr(skb, BRIDGE_QUERIER_IP_ADDRESS,
3207 querier.addr.src.ip4)) {
3208 rcu_read_unlock();
3209 goto out_err;
3210 }
3211
3212 p = __br_multicast_get_querier_port(brmctx->br, &querier);
3213 if (timer_pending(&brmctx->ip4_other_query.timer) &&
3214 (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IP_OTHER_TIMER,
3215 br_timer_value(&brmctx->ip4_other_query.timer),
3216 BRIDGE_QUERIER_PAD) ||
3217 (p && nla_put_u32(skb, BRIDGE_QUERIER_IP_PORT, p->dev->ifindex)))) {
3218 rcu_read_unlock();
3219 goto out_err;
3220 }
3221
3222 out_v6:
3223 #if IS_ENABLED(CONFIG_IPV6)
3224 if (!brmctx->multicast_querier &&
3225 !timer_pending(&brmctx->ip6_other_query.timer))
3226 goto out;
3227
3228 br_multicast_read_querier(&brmctx->ip6_querier, &querier);
3229 if (nla_put_in6_addr(skb, BRIDGE_QUERIER_IPV6_ADDRESS,
3230 &querier.addr.src.ip6)) {
3231 rcu_read_unlock();
3232 goto out_err;
3233 }
3234
3235 p = __br_multicast_get_querier_port(brmctx->br, &querier);
3236 if (timer_pending(&brmctx->ip6_other_query.timer) &&
3237 (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IPV6_OTHER_TIMER,
3238 br_timer_value(&brmctx->ip6_other_query.timer),
3239 BRIDGE_QUERIER_PAD) ||
3240 (p && nla_put_u32(skb, BRIDGE_QUERIER_IPV6_PORT,
3241 p->dev->ifindex)))) {
3242 rcu_read_unlock();
3243 goto out_err;
3244 }
3245 out:
3246 #endif
3247 rcu_read_unlock();
3248 nla_nest_end(skb, nest);
3249 if (!nla_len(nest))
3250 nla_nest_cancel(skb, nest);
3251
3252 return 0;
3253
3254 out_err:
3255 nla_nest_cancel(skb, nest);
3256 return -EMSGSIZE;
3257 }
3258
3259 static void
br_multicast_update_query_timer(struct net_bridge_mcast * brmctx,struct bridge_mcast_other_query * query,unsigned long max_delay)3260 br_multicast_update_query_timer(struct net_bridge_mcast *brmctx,
3261 struct bridge_mcast_other_query *query,
3262 unsigned long max_delay)
3263 {
3264 if (!timer_pending(&query->timer))
3265 mod_timer(&query->delay_timer, jiffies + max_delay);
3266
3267 mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval);
3268 }
3269
br_port_mc_router_state_change(struct net_bridge_port * p,bool is_mc_router)3270 static void br_port_mc_router_state_change(struct net_bridge_port *p,
3271 bool is_mc_router)
3272 {
3273 struct switchdev_attr attr = {
3274 .orig_dev = p->dev,
3275 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
3276 .flags = SWITCHDEV_F_DEFER,
3277 .u.mrouter = is_mc_router,
3278 };
3279
3280 switchdev_port_attr_set(p->dev, &attr, NULL);
3281 }
3282
3283 static struct net_bridge_port *
br_multicast_rport_from_node(struct net_bridge_mcast * brmctx,struct hlist_head * mc_router_list,struct hlist_node * rlist)3284 br_multicast_rport_from_node(struct net_bridge_mcast *brmctx,
3285 struct hlist_head *mc_router_list,
3286 struct hlist_node *rlist)
3287 {
3288 struct net_bridge_mcast_port *pmctx;
3289
3290 #if IS_ENABLED(CONFIG_IPV6)
3291 if (mc_router_list == &brmctx->ip6_mc_router_list)
3292 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
3293 ip6_rlist);
3294 else
3295 #endif
3296 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
3297 ip4_rlist);
3298
3299 return pmctx->port;
3300 }
3301
3302 static struct hlist_node *
br_multicast_get_rport_slot(struct net_bridge_mcast * brmctx,struct net_bridge_port * port,struct hlist_head * mc_router_list)3303 br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx,
3304 struct net_bridge_port *port,
3305 struct hlist_head *mc_router_list)
3306
3307 {
3308 struct hlist_node *slot = NULL;
3309 struct net_bridge_port *p;
3310 struct hlist_node *rlist;
3311
3312 hlist_for_each(rlist, mc_router_list) {
3313 p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist);
3314
3315 if ((unsigned long)port >= (unsigned long)p)
3316 break;
3317
3318 slot = rlist;
3319 }
3320
3321 return slot;
3322 }
3323
br_multicast_no_router_otherpf(struct net_bridge_mcast_port * pmctx,struct hlist_node * rnode)3324 static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx,
3325 struct hlist_node *rnode)
3326 {
3327 #if IS_ENABLED(CONFIG_IPV6)
3328 if (rnode != &pmctx->ip6_rlist)
3329 return hlist_unhashed(&pmctx->ip6_rlist);
3330 else
3331 return hlist_unhashed(&pmctx->ip4_rlist);
3332 #else
3333 return true;
3334 #endif
3335 }
3336
3337 /* Add port to router_list
3338 * list is maintained ordered by pointer value
3339 * and locked by br->multicast_lock and RCU
3340 */
br_multicast_add_router(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct hlist_node * rlist,struct hlist_head * mc_router_list)3341 static void br_multicast_add_router(struct net_bridge_mcast *brmctx,
3342 struct net_bridge_mcast_port *pmctx,
3343 struct hlist_node *rlist,
3344 struct hlist_head *mc_router_list)
3345 {
3346 struct hlist_node *slot;
3347
3348 if (!hlist_unhashed(rlist))
3349 return;
3350
3351 slot = br_multicast_get_rport_slot(brmctx, pmctx->port, mc_router_list);
3352
3353 if (slot)
3354 hlist_add_behind_rcu(rlist, slot);
3355 else
3356 hlist_add_head_rcu(rlist, mc_router_list);
3357
3358 /* For backwards compatibility for now, only notify if we
3359 * switched from no IPv4/IPv6 multicast router to a new
3360 * IPv4 or IPv6 multicast router.
3361 */
3362 if (br_multicast_no_router_otherpf(pmctx, rlist)) {
3363 br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_NEWMDB);
3364 br_port_mc_router_state_change(pmctx->port, true);
3365 }
3366 }
3367
3368 /* Add port to router_list
3369 * list is maintained ordered by pointer value
3370 * and locked by br->multicast_lock and RCU
3371 */
br_ip4_multicast_add_router(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx)3372 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
3373 struct net_bridge_mcast_port *pmctx)
3374 {
3375 br_multicast_add_router(brmctx, pmctx, &pmctx->ip4_rlist,
3376 &brmctx->ip4_mc_router_list);
3377 }
3378
3379 /* Add port to router_list
3380 * list is maintained ordered by pointer value
3381 * and locked by br->multicast_lock and RCU
3382 */
br_ip6_multicast_add_router(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx)3383 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
3384 struct net_bridge_mcast_port *pmctx)
3385 {
3386 #if IS_ENABLED(CONFIG_IPV6)
3387 br_multicast_add_router(brmctx, pmctx, &pmctx->ip6_rlist,
3388 &brmctx->ip6_mc_router_list);
3389 #endif
3390 }
3391
br_multicast_mark_router(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct timer_list * timer,struct hlist_node * rlist,struct hlist_head * mc_router_list)3392 static void br_multicast_mark_router(struct net_bridge_mcast *brmctx,
3393 struct net_bridge_mcast_port *pmctx,
3394 struct timer_list *timer,
3395 struct hlist_node *rlist,
3396 struct hlist_head *mc_router_list)
3397 {
3398 unsigned long now = jiffies;
3399
3400 if (!br_multicast_ctx_should_use(brmctx, pmctx))
3401 return;
3402
3403 if (!pmctx) {
3404 if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
3405 if (!br_ip4_multicast_is_router(brmctx) &&
3406 !br_ip6_multicast_is_router(brmctx))
3407 br_mc_router_state_change(brmctx->br, true);
3408 mod_timer(timer, now + brmctx->multicast_querier_interval);
3409 }
3410 return;
3411 }
3412
3413 if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
3414 pmctx->multicast_router == MDB_RTR_TYPE_PERM)
3415 return;
3416
3417 br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list);
3418 mod_timer(timer, now + brmctx->multicast_querier_interval);
3419 }
3420
br_ip4_multicast_mark_router(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx)3421 static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx,
3422 struct net_bridge_mcast_port *pmctx)
3423 {
3424 struct timer_list *timer = &brmctx->ip4_mc_router_timer;
3425 struct hlist_node *rlist = NULL;
3426
3427 if (pmctx) {
3428 timer = &pmctx->ip4_mc_router_timer;
3429 rlist = &pmctx->ip4_rlist;
3430 }
3431
3432 br_multicast_mark_router(brmctx, pmctx, timer, rlist,
3433 &brmctx->ip4_mc_router_list);
3434 }
3435
br_ip6_multicast_mark_router(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx)3436 static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx,
3437 struct net_bridge_mcast_port *pmctx)
3438 {
3439 #if IS_ENABLED(CONFIG_IPV6)
3440 struct timer_list *timer = &brmctx->ip6_mc_router_timer;
3441 struct hlist_node *rlist = NULL;
3442
3443 if (pmctx) {
3444 timer = &pmctx->ip6_mc_router_timer;
3445 rlist = &pmctx->ip6_rlist;
3446 }
3447
3448 br_multicast_mark_router(brmctx, pmctx, timer, rlist,
3449 &brmctx->ip6_mc_router_list);
3450 #endif
3451 }
3452
3453 static void
br_ip4_multicast_query_received(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct bridge_mcast_other_query * query,struct br_ip * saddr,unsigned long max_delay)3454 br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx,
3455 struct net_bridge_mcast_port *pmctx,
3456 struct bridge_mcast_other_query *query,
3457 struct br_ip *saddr,
3458 unsigned long max_delay)
3459 {
3460 if (!br_multicast_select_querier(brmctx, pmctx, saddr))
3461 return;
3462
3463 br_multicast_update_query_timer(brmctx, query, max_delay);
3464 br_ip4_multicast_mark_router(brmctx, pmctx);
3465 }
3466
3467 #if IS_ENABLED(CONFIG_IPV6)
3468 static void
br_ip6_multicast_query_received(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct bridge_mcast_other_query * query,struct br_ip * saddr,unsigned long max_delay)3469 br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx,
3470 struct net_bridge_mcast_port *pmctx,
3471 struct bridge_mcast_other_query *query,
3472 struct br_ip *saddr,
3473 unsigned long max_delay)
3474 {
3475 if (!br_multicast_select_querier(brmctx, pmctx, saddr))
3476 return;
3477
3478 br_multicast_update_query_timer(brmctx, query, max_delay);
3479 br_ip6_multicast_mark_router(brmctx, pmctx);
3480 }
3481 #endif
3482
br_ip4_multicast_query(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct sk_buff * skb,u16 vid)3483 static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx,
3484 struct net_bridge_mcast_port *pmctx,
3485 struct sk_buff *skb,
3486 u16 vid)
3487 {
3488 unsigned int transport_len = ip_transport_len(skb);
3489 const struct iphdr *iph = ip_hdr(skb);
3490 struct igmphdr *ih = igmp_hdr(skb);
3491 struct net_bridge_mdb_entry *mp;
3492 struct igmpv3_query *ih3;
3493 struct net_bridge_port_group *p;
3494 struct net_bridge_port_group __rcu **pp;
3495 struct br_ip saddr = {};
3496 unsigned long max_delay;
3497 unsigned long now = jiffies;
3498 __be32 group;
3499
3500 spin_lock(&brmctx->br->multicast_lock);
3501 if (!br_multicast_ctx_should_use(brmctx, pmctx))
3502 goto out;
3503
3504 group = ih->group;
3505
3506 if (transport_len == sizeof(*ih)) {
3507 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
3508
3509 if (!max_delay) {
3510 max_delay = 10 * HZ;
3511 group = 0;
3512 }
3513 } else if (transport_len >= sizeof(*ih3)) {
3514 ih3 = igmpv3_query_hdr(skb);
3515 if (ih3->nsrcs ||
3516 (brmctx->multicast_igmp_version == 3 && group &&
3517 ih3->suppress))
3518 goto out;
3519
3520 max_delay = ih3->code ?
3521 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
3522 } else {
3523 goto out;
3524 }
3525
3526 if (!group) {
3527 saddr.proto = htons(ETH_P_IP);
3528 saddr.src.ip4 = iph->saddr;
3529
3530 br_ip4_multicast_query_received(brmctx, pmctx,
3531 &brmctx->ip4_other_query,
3532 &saddr, max_delay);
3533 goto out;
3534 }
3535
3536 mp = br_mdb_ip4_get(brmctx->br, group, vid);
3537 if (!mp)
3538 goto out;
3539
3540 max_delay *= brmctx->multicast_last_member_count;
3541
3542 if (mp->host_joined &&
3543 (timer_pending(&mp->timer) ?
3544 time_after(mp->timer.expires, now + max_delay) :
3545 timer_delete_sync_try(&mp->timer) >= 0))
3546 mod_timer(&mp->timer, now + max_delay);
3547
3548 for (pp = &mp->ports;
3549 (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3550 pp = &p->next) {
3551 if (timer_pending(&p->timer) ?
3552 time_after(p->timer.expires, now + max_delay) :
3553 timer_delete_sync_try(&p->timer) >= 0 &&
3554 (brmctx->multicast_igmp_version == 2 ||
3555 p->filter_mode == MCAST_EXCLUDE))
3556 mod_timer(&p->timer, now + max_delay);
3557 }
3558
3559 out:
3560 spin_unlock(&brmctx->br->multicast_lock);
3561 }
3562
3563 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_query(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct sk_buff * skb,u16 vid)3564 static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx,
3565 struct net_bridge_mcast_port *pmctx,
3566 struct sk_buff *skb,
3567 u16 vid)
3568 {
3569 unsigned int transport_len = ipv6_transport_len(skb);
3570 struct mld_msg *mld;
3571 struct net_bridge_mdb_entry *mp;
3572 struct mld2_query *mld2q;
3573 struct net_bridge_port_group *p;
3574 struct net_bridge_port_group __rcu **pp;
3575 struct br_ip saddr = {};
3576 unsigned long max_delay;
3577 unsigned long now = jiffies;
3578 unsigned int offset = skb_transport_offset(skb);
3579 const struct in6_addr *group = NULL;
3580 bool is_general_query;
3581 int err = 0;
3582
3583 spin_lock(&brmctx->br->multicast_lock);
3584 if (!br_multicast_ctx_should_use(brmctx, pmctx))
3585 goto out;
3586
3587 if (transport_len == sizeof(*mld)) {
3588 if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
3589 err = -EINVAL;
3590 goto out;
3591 }
3592 mld = (struct mld_msg *) icmp6_hdr(skb);
3593 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
3594 if (max_delay)
3595 group = &mld->mld_mca;
3596 } else {
3597 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
3598 err = -EINVAL;
3599 goto out;
3600 }
3601 mld2q = (struct mld2_query *)icmp6_hdr(skb);
3602 if (!mld2q->mld2q_nsrcs)
3603 group = &mld2q->mld2q_mca;
3604 if (brmctx->multicast_mld_version == 2 &&
3605 !ipv6_addr_any(&mld2q->mld2q_mca) &&
3606 mld2q->mld2q_suppress)
3607 goto out;
3608
3609 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
3610 }
3611
3612 is_general_query = group && ipv6_addr_any(group);
3613
3614 if (is_general_query) {
3615 saddr.proto = htons(ETH_P_IPV6);
3616 saddr.src.ip6 = ipv6_hdr(skb)->saddr;
3617
3618 br_ip6_multicast_query_received(brmctx, pmctx,
3619 &brmctx->ip6_other_query,
3620 &saddr, max_delay);
3621 goto out;
3622 } else if (!group) {
3623 goto out;
3624 }
3625
3626 mp = br_mdb_ip6_get(brmctx->br, group, vid);
3627 if (!mp)
3628 goto out;
3629
3630 max_delay *= brmctx->multicast_last_member_count;
3631 if (mp->host_joined &&
3632 (timer_pending(&mp->timer) ?
3633 time_after(mp->timer.expires, now + max_delay) :
3634 timer_delete_sync_try(&mp->timer) >= 0))
3635 mod_timer(&mp->timer, now + max_delay);
3636
3637 for (pp = &mp->ports;
3638 (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3639 pp = &p->next) {
3640 if (timer_pending(&p->timer) ?
3641 time_after(p->timer.expires, now + max_delay) :
3642 timer_delete_sync_try(&p->timer) >= 0 &&
3643 (brmctx->multicast_mld_version == 1 ||
3644 p->filter_mode == MCAST_EXCLUDE))
3645 mod_timer(&p->timer, now + max_delay);
3646 }
3647
3648 out:
3649 spin_unlock(&brmctx->br->multicast_lock);
3650 return err;
3651 }
3652 #endif
3653
3654 static void
br_multicast_leave_group(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct br_ip * group,struct bridge_mcast_other_query * other_query,struct bridge_mcast_own_query * own_query,const unsigned char * src)3655 br_multicast_leave_group(struct net_bridge_mcast *brmctx,
3656 struct net_bridge_mcast_port *pmctx,
3657 struct br_ip *group,
3658 struct bridge_mcast_other_query *other_query,
3659 struct bridge_mcast_own_query *own_query,
3660 const unsigned char *src)
3661 {
3662 struct net_bridge_mdb_entry *mp;
3663 struct net_bridge_port_group *p;
3664 unsigned long now;
3665 unsigned long time;
3666
3667 spin_lock(&brmctx->br->multicast_lock);
3668 if (!br_multicast_ctx_should_use(brmctx, pmctx))
3669 goto out;
3670
3671 mp = br_mdb_ip_get(brmctx->br, group);
3672 if (!mp)
3673 goto out;
3674
3675 if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) {
3676 struct net_bridge_port_group __rcu **pp;
3677
3678 for (pp = &mp->ports;
3679 (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3680 pp = &p->next) {
3681 if (!br_port_group_equal(p, pmctx->port, src))
3682 continue;
3683
3684 if (p->flags & MDB_PG_FLAGS_PERMANENT)
3685 break;
3686
3687 p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
3688 br_multicast_del_pg(mp, p, pp);
3689 }
3690 goto out;
3691 }
3692
3693 if (timer_pending(&other_query->timer))
3694 goto out;
3695
3696 if (brmctx->multicast_querier) {
3697 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &mp->addr,
3698 false, 0, NULL);
3699
3700 time = jiffies + brmctx->multicast_last_member_count *
3701 brmctx->multicast_last_member_interval;
3702
3703 mod_timer(&own_query->timer, time);
3704
3705 for (p = mlock_dereference(mp->ports, brmctx->br);
3706 p != NULL && pmctx != NULL;
3707 p = mlock_dereference(p->next, brmctx->br)) {
3708 if (!br_port_group_equal(p, pmctx->port, src))
3709 continue;
3710
3711 if (!hlist_unhashed(&p->mglist) &&
3712 (timer_pending(&p->timer) ?
3713 time_after(p->timer.expires, time) :
3714 timer_delete_sync_try(&p->timer) >= 0)) {
3715 mod_timer(&p->timer, time);
3716 }
3717
3718 break;
3719 }
3720 }
3721
3722 now = jiffies;
3723 time = now + brmctx->multicast_last_member_count *
3724 brmctx->multicast_last_member_interval;
3725
3726 if (!pmctx) {
3727 if (mp->host_joined &&
3728 (timer_pending(&mp->timer) ?
3729 time_after(mp->timer.expires, time) :
3730 timer_delete_sync_try(&mp->timer) >= 0)) {
3731 mod_timer(&mp->timer, time);
3732 }
3733
3734 goto out;
3735 }
3736
3737 for (p = mlock_dereference(mp->ports, brmctx->br);
3738 p != NULL;
3739 p = mlock_dereference(p->next, brmctx->br)) {
3740 if (p->key.port != pmctx->port)
3741 continue;
3742
3743 if (!hlist_unhashed(&p->mglist) &&
3744 (timer_pending(&p->timer) ?
3745 time_after(p->timer.expires, time) :
3746 timer_delete_sync_try(&p->timer) >= 0)) {
3747 mod_timer(&p->timer, time);
3748 }
3749
3750 break;
3751 }
3752 out:
3753 spin_unlock(&brmctx->br->multicast_lock);
3754 }
3755
br_ip4_multicast_leave_group(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,__be32 group,__u16 vid,const unsigned char * src)3756 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
3757 struct net_bridge_mcast_port *pmctx,
3758 __be32 group,
3759 __u16 vid,
3760 const unsigned char *src)
3761 {
3762 struct br_ip br_group;
3763 struct bridge_mcast_own_query *own_query;
3764
3765 if (ipv4_is_local_multicast(group))
3766 return;
3767
3768 own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query;
3769
3770 memset(&br_group, 0, sizeof(br_group));
3771 br_group.dst.ip4 = group;
3772 br_group.proto = htons(ETH_P_IP);
3773 br_group.vid = vid;
3774
3775 br_multicast_leave_group(brmctx, pmctx, &br_group,
3776 &brmctx->ip4_other_query,
3777 own_query, src);
3778 }
3779
3780 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_leave_group(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,const struct in6_addr * group,__u16 vid,const unsigned char * src)3781 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
3782 struct net_bridge_mcast_port *pmctx,
3783 const struct in6_addr *group,
3784 __u16 vid,
3785 const unsigned char *src)
3786 {
3787 struct br_ip br_group;
3788 struct bridge_mcast_own_query *own_query;
3789
3790 if (ipv6_addr_is_ll_all_nodes(group))
3791 return;
3792
3793 own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query;
3794
3795 memset(&br_group, 0, sizeof(br_group));
3796 br_group.dst.ip6 = *group;
3797 br_group.proto = htons(ETH_P_IPV6);
3798 br_group.vid = vid;
3799
3800 br_multicast_leave_group(brmctx, pmctx, &br_group,
3801 &brmctx->ip6_other_query,
3802 own_query, src);
3803 }
3804 #endif
3805
br_multicast_err_count(const struct net_bridge * br,const struct net_bridge_port * p,__be16 proto)3806 static void br_multicast_err_count(const struct net_bridge *br,
3807 const struct net_bridge_port *p,
3808 __be16 proto)
3809 {
3810 struct bridge_mcast_stats __percpu *stats;
3811 struct bridge_mcast_stats *pstats;
3812
3813 if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
3814 return;
3815
3816 if (p)
3817 stats = p->mcast_stats;
3818 else
3819 stats = br->mcast_stats;
3820 if (WARN_ON(!stats))
3821 return;
3822
3823 pstats = this_cpu_ptr(stats);
3824
3825 u64_stats_update_begin(&pstats->syncp);
3826 switch (proto) {
3827 case htons(ETH_P_IP):
3828 pstats->mstats.igmp_parse_errors++;
3829 break;
3830 #if IS_ENABLED(CONFIG_IPV6)
3831 case htons(ETH_P_IPV6):
3832 pstats->mstats.mld_parse_errors++;
3833 break;
3834 #endif
3835 }
3836 u64_stats_update_end(&pstats->syncp);
3837 }
3838
br_multicast_pim(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,const struct sk_buff * skb)3839 static void br_multicast_pim(struct net_bridge_mcast *brmctx,
3840 struct net_bridge_mcast_port *pmctx,
3841 const struct sk_buff *skb)
3842 {
3843 unsigned int offset = skb_transport_offset(skb);
3844 struct pimhdr *pimhdr, _pimhdr;
3845
3846 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
3847 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
3848 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
3849 return;
3850
3851 spin_lock(&brmctx->br->multicast_lock);
3852 br_ip4_multicast_mark_router(brmctx, pmctx);
3853 spin_unlock(&brmctx->br->multicast_lock);
3854 }
3855
br_ip4_multicast_mrd_rcv(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct sk_buff * skb)3856 static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
3857 struct net_bridge_mcast_port *pmctx,
3858 struct sk_buff *skb)
3859 {
3860 if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
3861 igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
3862 return -ENOMSG;
3863
3864 spin_lock(&brmctx->br->multicast_lock);
3865 br_ip4_multicast_mark_router(brmctx, pmctx);
3866 spin_unlock(&brmctx->br->multicast_lock);
3867
3868 return 0;
3869 }
3870
br_multicast_ipv4_rcv(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct sk_buff * skb,u16 vid)3871 static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx,
3872 struct net_bridge_mcast_port *pmctx,
3873 struct sk_buff *skb,
3874 u16 vid)
3875 {
3876 struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
3877 const unsigned char *src;
3878 struct igmphdr *ih;
3879 int err;
3880
3881 err = ip_mc_check_igmp(skb);
3882
3883 if (err == -ENOMSG) {
3884 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
3885 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3886 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
3887 if (ip_hdr(skb)->protocol == IPPROTO_PIM)
3888 br_multicast_pim(brmctx, pmctx, skb);
3889 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
3890 br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb);
3891 }
3892
3893 return 0;
3894 } else if (err < 0) {
3895 br_multicast_err_count(brmctx->br, p, skb->protocol);
3896 return err;
3897 }
3898
3899 ih = igmp_hdr(skb);
3900 src = eth_hdr(skb)->h_source;
3901 BR_INPUT_SKB_CB(skb)->igmp = ih->type;
3902
3903 switch (ih->type) {
3904 case IGMP_HOST_MEMBERSHIP_REPORT:
3905 case IGMPV2_HOST_MEMBERSHIP_REPORT:
3906 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3907 err = br_ip4_multicast_add_group(brmctx, pmctx, ih->group, vid,
3908 src, true);
3909 break;
3910 case IGMPV3_HOST_MEMBERSHIP_REPORT:
3911 err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid);
3912 break;
3913 case IGMP_HOST_MEMBERSHIP_QUERY:
3914 br_ip4_multicast_query(brmctx, pmctx, skb, vid);
3915 break;
3916 case IGMP_HOST_LEAVE_MESSAGE:
3917 br_ip4_multicast_leave_group(brmctx, pmctx, ih->group, vid, src);
3918 break;
3919 }
3920
3921 br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
3922 BR_MCAST_DIR_RX);
3923
3924 return err;
3925 }
3926
3927 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_mrd_rcv(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct sk_buff * skb)3928 static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
3929 struct net_bridge_mcast_port *pmctx,
3930 struct sk_buff *skb)
3931 {
3932 if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
3933 return;
3934
3935 spin_lock(&brmctx->br->multicast_lock);
3936 br_ip6_multicast_mark_router(brmctx, pmctx);
3937 spin_unlock(&brmctx->br->multicast_lock);
3938 }
3939
br_multicast_ipv6_rcv(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct sk_buff * skb,u16 vid)3940 static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx,
3941 struct net_bridge_mcast_port *pmctx,
3942 struct sk_buff *skb,
3943 u16 vid)
3944 {
3945 struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
3946 const unsigned char *src;
3947 struct mld_msg *mld;
3948 int err;
3949
3950 err = ipv6_mc_check_mld(skb);
3951
3952 if (err == -ENOMSG || err == -ENODATA) {
3953 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
3954 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3955 if (err == -ENODATA &&
3956 ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
3957 br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb);
3958
3959 return 0;
3960 } else if (err < 0) {
3961 br_multicast_err_count(brmctx->br, p, skb->protocol);
3962 return err;
3963 }
3964
3965 mld = (struct mld_msg *)skb_transport_header(skb);
3966 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
3967
3968 switch (mld->mld_type) {
3969 case ICMPV6_MGM_REPORT:
3970 src = eth_hdr(skb)->h_source;
3971 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3972 err = br_ip6_multicast_add_group(brmctx, pmctx, &mld->mld_mca,
3973 vid, src, true);
3974 break;
3975 case ICMPV6_MLD2_REPORT:
3976 err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid);
3977 break;
3978 case ICMPV6_MGM_QUERY:
3979 err = br_ip6_multicast_query(brmctx, pmctx, skb, vid);
3980 break;
3981 case ICMPV6_MGM_REDUCTION:
3982 src = eth_hdr(skb)->h_source;
3983 br_ip6_multicast_leave_group(brmctx, pmctx, &mld->mld_mca, vid,
3984 src);
3985 break;
3986 }
3987
3988 br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
3989 BR_MCAST_DIR_RX);
3990
3991 return err;
3992 }
3993 #endif
3994
br_multicast_rcv(struct net_bridge_mcast ** brmctx,struct net_bridge_mcast_port ** pmctx,struct net_bridge_vlan * vlan,struct sk_buff * skb,u16 vid)3995 int br_multicast_rcv(struct net_bridge_mcast **brmctx,
3996 struct net_bridge_mcast_port **pmctx,
3997 struct net_bridge_vlan *vlan,
3998 struct sk_buff *skb, u16 vid)
3999 {
4000 int ret = 0;
4001
4002 BR_INPUT_SKB_CB(skb)->igmp = 0;
4003 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
4004
4005 if (!br_opt_get((*brmctx)->br, BROPT_MULTICAST_ENABLED))
4006 return 0;
4007
4008 if (br_opt_get((*brmctx)->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) {
4009 const struct net_bridge_vlan *masterv;
4010
4011 /* the vlan has the master flag set only when transmitting
4012 * through the bridge device
4013 */
4014 if (br_vlan_is_master(vlan)) {
4015 masterv = vlan;
4016 *brmctx = &vlan->br_mcast_ctx;
4017 *pmctx = NULL;
4018 } else {
4019 masterv = vlan->brvlan;
4020 *brmctx = &vlan->brvlan->br_mcast_ctx;
4021 *pmctx = &vlan->port_mcast_ctx;
4022 }
4023
4024 if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
4025 return 0;
4026 }
4027
4028 switch (skb->protocol) {
4029 case htons(ETH_P_IP):
4030 ret = br_multicast_ipv4_rcv(*brmctx, *pmctx, skb, vid);
4031 break;
4032 #if IS_ENABLED(CONFIG_IPV6)
4033 case htons(ETH_P_IPV6):
4034 ret = br_multicast_ipv6_rcv(*brmctx, *pmctx, skb, vid);
4035 break;
4036 #endif
4037 }
4038
4039 return ret;
4040 }
4041
br_multicast_query_expired(struct net_bridge_mcast * brmctx,struct bridge_mcast_own_query * query)4042 static void br_multicast_query_expired(struct net_bridge_mcast *brmctx,
4043 struct bridge_mcast_own_query *query)
4044 {
4045 spin_lock(&brmctx->br->multicast_lock);
4046 if (br_multicast_ctx_vlan_disabled(brmctx))
4047 goto out;
4048
4049 if (query->startup_sent < brmctx->multicast_startup_query_count)
4050 query->startup_sent++;
4051
4052 br_multicast_send_query(brmctx, NULL, query);
4053 out:
4054 spin_unlock(&brmctx->br->multicast_lock);
4055 }
4056
br_ip4_multicast_query_expired(struct timer_list * t)4057 static void br_ip4_multicast_query_expired(struct timer_list *t)
4058 {
4059 struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t,
4060 ip4_own_query.timer);
4061
4062 br_multicast_query_expired(brmctx, &brmctx->ip4_own_query);
4063 }
4064
4065 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_query_expired(struct timer_list * t)4066 static void br_ip6_multicast_query_expired(struct timer_list *t)
4067 {
4068 struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t,
4069 ip6_own_query.timer);
4070
4071 br_multicast_query_expired(brmctx, &brmctx->ip6_own_query);
4072 }
4073 #endif
4074
br_multicast_gc_work(struct work_struct * work)4075 static void br_multicast_gc_work(struct work_struct *work)
4076 {
4077 struct net_bridge *br = container_of(work, struct net_bridge,
4078 mcast_gc_work);
4079 HLIST_HEAD(deleted_head);
4080
4081 spin_lock_bh(&br->multicast_lock);
4082 hlist_move_list(&br->mcast_gc_list, &deleted_head);
4083 spin_unlock_bh(&br->multicast_lock);
4084
4085 br_multicast_gc(&deleted_head);
4086 }
4087
br_multicast_ctx_init(struct net_bridge * br,struct net_bridge_vlan * vlan,struct net_bridge_mcast * brmctx)4088 void br_multicast_ctx_init(struct net_bridge *br,
4089 struct net_bridge_vlan *vlan,
4090 struct net_bridge_mcast *brmctx)
4091 {
4092 brmctx->br = br;
4093 brmctx->vlan = vlan;
4094 brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
4095 brmctx->multicast_last_member_count = 2;
4096 brmctx->multicast_startup_query_count = 2;
4097
4098 brmctx->multicast_last_member_interval = HZ;
4099 brmctx->multicast_query_response_interval = 10 * HZ;
4100 brmctx->multicast_startup_query_interval = 125 * HZ / 4;
4101 brmctx->multicast_query_interval = 125 * HZ;
4102 brmctx->multicast_querier_interval = 255 * HZ;
4103 brmctx->multicast_membership_interval = 260 * HZ;
4104
4105 brmctx->ip4_querier.port_ifidx = 0;
4106 seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock);
4107 brmctx->multicast_igmp_version = 2;
4108 #if IS_ENABLED(CONFIG_IPV6)
4109 brmctx->multicast_mld_version = 1;
4110 brmctx->ip6_querier.port_ifidx = 0;
4111 seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock);
4112 #endif
4113
4114 timer_setup(&brmctx->ip4_mc_router_timer,
4115 br_ip4_multicast_local_router_expired, 0);
4116 timer_setup(&brmctx->ip4_other_query.timer,
4117 br_ip4_multicast_querier_expired, 0);
4118 timer_setup(&brmctx->ip4_other_query.delay_timer,
4119 br_multicast_query_delay_expired, 0);
4120 timer_setup(&brmctx->ip4_own_query.timer,
4121 br_ip4_multicast_query_expired, 0);
4122 #if IS_ENABLED(CONFIG_IPV6)
4123 timer_setup(&brmctx->ip6_mc_router_timer,
4124 br_ip6_multicast_local_router_expired, 0);
4125 timer_setup(&brmctx->ip6_other_query.timer,
4126 br_ip6_multicast_querier_expired, 0);
4127 timer_setup(&brmctx->ip6_other_query.delay_timer,
4128 br_multicast_query_delay_expired, 0);
4129 timer_setup(&brmctx->ip6_own_query.timer,
4130 br_ip6_multicast_query_expired, 0);
4131 #endif
4132 }
4133
br_multicast_ctx_deinit(struct net_bridge_mcast * brmctx)4134 void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx)
4135 {
4136 __br_multicast_stop(brmctx);
4137 }
4138
br_multicast_init(struct net_bridge * br)4139 void br_multicast_init(struct net_bridge *br)
4140 {
4141 br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
4142
4143 br_multicast_ctx_init(br, NULL, &br->multicast_ctx);
4144
4145 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
4146 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
4147
4148 spin_lock_init(&br->multicast_lock);
4149 INIT_HLIST_HEAD(&br->mdb_list);
4150 INIT_HLIST_HEAD(&br->mcast_gc_list);
4151 INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work);
4152 }
4153
br_ip4_multicast_join_snoopers(struct net_bridge * br)4154 static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
4155 {
4156 struct in_device *in_dev = in_dev_get(br->dev);
4157
4158 if (!in_dev)
4159 return;
4160
4161 __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
4162 in_dev_put(in_dev);
4163 }
4164
4165 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_join_snoopers(struct net_bridge * br)4166 static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
4167 {
4168 struct in6_addr addr;
4169
4170 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
4171 ipv6_dev_mc_inc(br->dev, &addr);
4172 }
4173 #else
br_ip6_multicast_join_snoopers(struct net_bridge * br)4174 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
4175 {
4176 }
4177 #endif
4178
br_multicast_join_snoopers(struct net_bridge * br)4179 void br_multicast_join_snoopers(struct net_bridge *br)
4180 {
4181 br_ip4_multicast_join_snoopers(br);
4182 br_ip6_multicast_join_snoopers(br);
4183 }
4184
br_ip4_multicast_leave_snoopers(struct net_bridge * br)4185 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
4186 {
4187 struct in_device *in_dev = in_dev_get(br->dev);
4188
4189 if (WARN_ON(!in_dev))
4190 return;
4191
4192 __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
4193 in_dev_put(in_dev);
4194 }
4195
4196 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_leave_snoopers(struct net_bridge * br)4197 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
4198 {
4199 struct in6_addr addr;
4200
4201 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
4202 ipv6_dev_mc_dec(br->dev, &addr);
4203 }
4204 #else
br_ip6_multicast_leave_snoopers(struct net_bridge * br)4205 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
4206 {
4207 }
4208 #endif
4209
br_multicast_leave_snoopers(struct net_bridge * br)4210 void br_multicast_leave_snoopers(struct net_bridge *br)
4211 {
4212 br_ip4_multicast_leave_snoopers(br);
4213 br_ip6_multicast_leave_snoopers(br);
4214 }
4215
__br_multicast_open_query(struct net_bridge * br,struct bridge_mcast_own_query * query)4216 static void __br_multicast_open_query(struct net_bridge *br,
4217 struct bridge_mcast_own_query *query)
4218 {
4219 query->startup_sent = 0;
4220
4221 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
4222 return;
4223
4224 mod_timer(&query->timer, jiffies);
4225 }
4226
__br_multicast_open(struct net_bridge_mcast * brmctx)4227 static void __br_multicast_open(struct net_bridge_mcast *brmctx)
4228 {
4229 __br_multicast_open_query(brmctx->br, &brmctx->ip4_own_query);
4230 #if IS_ENABLED(CONFIG_IPV6)
4231 __br_multicast_open_query(brmctx->br, &brmctx->ip6_own_query);
4232 #endif
4233 }
4234
br_multicast_open(struct net_bridge * br)4235 void br_multicast_open(struct net_bridge *br)
4236 {
4237 ASSERT_RTNL();
4238
4239 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
4240 struct net_bridge_vlan_group *vg;
4241 struct net_bridge_vlan *vlan;
4242
4243 vg = br_vlan_group(br);
4244 if (vg) {
4245 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
4246 struct net_bridge_mcast *brmctx;
4247
4248 brmctx = &vlan->br_mcast_ctx;
4249 if (br_vlan_is_brentry(vlan) &&
4250 !br_multicast_ctx_vlan_disabled(brmctx))
4251 __br_multicast_open(&vlan->br_mcast_ctx);
4252 }
4253 }
4254 } else {
4255 __br_multicast_open(&br->multicast_ctx);
4256 }
4257 }
4258
__br_multicast_stop(struct net_bridge_mcast * brmctx)4259 static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
4260 {
4261 timer_delete_sync(&brmctx->ip4_mc_router_timer);
4262 timer_delete_sync(&brmctx->ip4_other_query.timer);
4263 timer_delete_sync(&brmctx->ip4_other_query.delay_timer);
4264 timer_delete_sync(&brmctx->ip4_own_query.timer);
4265 #if IS_ENABLED(CONFIG_IPV6)
4266 timer_delete_sync(&brmctx->ip6_mc_router_timer);
4267 timer_delete_sync(&brmctx->ip6_other_query.timer);
4268 timer_delete_sync(&brmctx->ip6_other_query.delay_timer);
4269 timer_delete_sync(&brmctx->ip6_own_query.timer);
4270 #endif
4271 }
4272
br_multicast_update_vlan_mcast_ctx(struct net_bridge_vlan * v,u8 state)4273 void br_multicast_update_vlan_mcast_ctx(struct net_bridge_vlan *v, u8 state)
4274 {
4275 #if IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
4276 struct net_bridge *br;
4277
4278 if (!br_vlan_should_use(v))
4279 return;
4280
4281 if (br_vlan_is_master(v))
4282 return;
4283
4284 br = v->port->br;
4285
4286 if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
4287 return;
4288
4289 if (br_vlan_state_allowed(state, true))
4290 br_multicast_enable_port_ctx(&v->port_mcast_ctx);
4291
4292 /* Multicast is not disabled for the vlan when it goes in
4293 * blocking state because the timers will expire and stop by
4294 * themselves without sending more queries.
4295 */
4296 #endif
4297 }
4298
br_multicast_toggle_one_vlan(struct net_bridge_vlan * vlan,bool on)4299 void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on)
4300 {
4301 struct net_bridge *br;
4302
4303 /* it's okay to check for the flag without the multicast lock because it
4304 * can only change under RTNL -> multicast_lock, we need the latter to
4305 * sync with timers and packets
4306 */
4307 if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED))
4308 return;
4309
4310 if (br_vlan_is_master(vlan)) {
4311 br = vlan->br;
4312
4313 if (!br_vlan_is_brentry(vlan) ||
4314 (on &&
4315 br_multicast_ctx_vlan_global_disabled(&vlan->br_mcast_ctx)))
4316 return;
4317
4318 spin_lock_bh(&br->multicast_lock);
4319 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
4320 spin_unlock_bh(&br->multicast_lock);
4321
4322 if (on)
4323 __br_multicast_open(&vlan->br_mcast_ctx);
4324 else
4325 __br_multicast_stop(&vlan->br_mcast_ctx);
4326 } else {
4327 struct net_bridge_mcast *brmctx;
4328
4329 brmctx = br_multicast_port_ctx_get_global(&vlan->port_mcast_ctx);
4330 if (on && br_multicast_ctx_vlan_global_disabled(brmctx))
4331 return;
4332
4333 br = vlan->port->br;
4334 spin_lock_bh(&br->multicast_lock);
4335 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
4336 if (on)
4337 __br_multicast_enable_port_ctx(&vlan->port_mcast_ctx);
4338 else
4339 __br_multicast_disable_port_ctx(&vlan->port_mcast_ctx);
4340 spin_unlock_bh(&br->multicast_lock);
4341 }
4342 }
4343
br_multicast_toggle_vlan(struct net_bridge_vlan * vlan,bool on)4344 static void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on)
4345 {
4346 struct net_bridge_port *p;
4347
4348 if (WARN_ON_ONCE(!br_vlan_is_master(vlan)))
4349 return;
4350
4351 list_for_each_entry(p, &vlan->br->port_list, list) {
4352 struct net_bridge_vlan *vport;
4353
4354 vport = br_vlan_find(nbp_vlan_group(p), vlan->vid);
4355 if (!vport)
4356 continue;
4357 br_multicast_toggle_one_vlan(vport, on);
4358 }
4359
4360 if (br_vlan_is_brentry(vlan))
4361 br_multicast_toggle_one_vlan(vlan, on);
4362 }
4363
br_multicast_toggle_vlan_snooping(struct net_bridge * br,bool on,struct netlink_ext_ack * extack)4364 int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on,
4365 struct netlink_ext_ack *extack)
4366 {
4367 struct net_bridge_vlan_group *vg;
4368 struct net_bridge_vlan *vlan;
4369 struct net_bridge_port *p;
4370
4371 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on)
4372 return 0;
4373
4374 if (on && !br_opt_get(br, BROPT_VLAN_ENABLED)) {
4375 NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled");
4376 return -EINVAL;
4377 }
4378
4379 vg = br_vlan_group(br);
4380 if (!vg)
4381 return 0;
4382
4383 br_opt_toggle(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED, on);
4384
4385 /* disable/enable non-vlan mcast contexts based on vlan snooping */
4386 if (on)
4387 __br_multicast_stop(&br->multicast_ctx);
4388 else
4389 __br_multicast_open(&br->multicast_ctx);
4390 list_for_each_entry(p, &br->port_list, list) {
4391 if (on)
4392 br_multicast_disable_port_ctx(&p->multicast_ctx);
4393 else
4394 br_multicast_enable_port_ctx(&p->multicast_ctx);
4395 }
4396
4397 list_for_each_entry(vlan, &vg->vlan_list, vlist)
4398 br_multicast_toggle_vlan(vlan, on);
4399
4400 return 0;
4401 }
4402
br_multicast_toggle_global_vlan(struct net_bridge_vlan * vlan,bool on)4403 bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on)
4404 {
4405 ASSERT_RTNL();
4406
4407 /* BR_VLFLAG_GLOBAL_MCAST_ENABLED relies on eventual consistency and
4408 * requires only RTNL to change
4409 */
4410 if (on == !!(vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
4411 return false;
4412
4413 vlan->priv_flags ^= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
4414 br_multicast_toggle_vlan(vlan, on);
4415
4416 return true;
4417 }
4418
br_multicast_stop(struct net_bridge * br)4419 void br_multicast_stop(struct net_bridge *br)
4420 {
4421 ASSERT_RTNL();
4422
4423 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
4424 struct net_bridge_vlan_group *vg;
4425 struct net_bridge_vlan *vlan;
4426
4427 vg = br_vlan_group(br);
4428 if (vg) {
4429 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
4430 struct net_bridge_mcast *brmctx;
4431
4432 brmctx = &vlan->br_mcast_ctx;
4433 if (br_vlan_is_brentry(vlan) &&
4434 !br_multicast_ctx_vlan_disabled(brmctx))
4435 __br_multicast_stop(&vlan->br_mcast_ctx);
4436 }
4437 }
4438 } else {
4439 __br_multicast_stop(&br->multicast_ctx);
4440 }
4441 }
4442
br_multicast_dev_del(struct net_bridge * br)4443 void br_multicast_dev_del(struct net_bridge *br)
4444 {
4445 struct net_bridge_mdb_entry *mp;
4446 HLIST_HEAD(deleted_head);
4447 struct hlist_node *tmp;
4448
4449 spin_lock_bh(&br->multicast_lock);
4450 hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node)
4451 br_multicast_del_mdb_entry(mp);
4452 hlist_move_list(&br->mcast_gc_list, &deleted_head);
4453 spin_unlock_bh(&br->multicast_lock);
4454
4455 br_multicast_ctx_deinit(&br->multicast_ctx);
4456 br_multicast_gc(&deleted_head);
4457 cancel_work_sync(&br->mcast_gc_work);
4458
4459 rcu_barrier();
4460 }
4461
br_multicast_set_router(struct net_bridge_mcast * brmctx,unsigned long val)4462 int br_multicast_set_router(struct net_bridge_mcast *brmctx, unsigned long val)
4463 {
4464 int err = -EINVAL;
4465
4466 spin_lock_bh(&brmctx->br->multicast_lock);
4467
4468 switch (val) {
4469 case MDB_RTR_TYPE_DISABLED:
4470 case MDB_RTR_TYPE_PERM:
4471 br_mc_router_state_change(brmctx->br, val == MDB_RTR_TYPE_PERM);
4472 timer_delete(&brmctx->ip4_mc_router_timer);
4473 #if IS_ENABLED(CONFIG_IPV6)
4474 timer_delete(&brmctx->ip6_mc_router_timer);
4475 #endif
4476 brmctx->multicast_router = val;
4477 err = 0;
4478 break;
4479 case MDB_RTR_TYPE_TEMP_QUERY:
4480 if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
4481 br_mc_router_state_change(brmctx->br, false);
4482 brmctx->multicast_router = val;
4483 err = 0;
4484 break;
4485 }
4486
4487 spin_unlock_bh(&brmctx->br->multicast_lock);
4488
4489 return err;
4490 }
4491
4492 static void
br_multicast_rport_del_notify(struct net_bridge_mcast_port * pmctx,bool deleted)4493 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted)
4494 {
4495 if (!deleted)
4496 return;
4497
4498 /* For backwards compatibility for now, only notify if there is
4499 * no multicast router anymore for both IPv4 and IPv6.
4500 */
4501 if (!hlist_unhashed(&pmctx->ip4_rlist))
4502 return;
4503 #if IS_ENABLED(CONFIG_IPV6)
4504 if (!hlist_unhashed(&pmctx->ip6_rlist))
4505 return;
4506 #endif
4507
4508 br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_DELMDB);
4509 br_port_mc_router_state_change(pmctx->port, false);
4510
4511 /* don't allow timer refresh */
4512 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP)
4513 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
4514 }
4515
br_multicast_set_port_router(struct net_bridge_mcast_port * pmctx,unsigned long val)4516 int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx,
4517 unsigned long val)
4518 {
4519 struct net_bridge_mcast *brmctx;
4520 unsigned long now = jiffies;
4521 int err = -EINVAL;
4522 bool del = false;
4523
4524 brmctx = br_multicast_port_ctx_get_global(pmctx);
4525 spin_lock_bh(&brmctx->br->multicast_lock);
4526 if (pmctx->multicast_router == val) {
4527 /* Refresh the temp router port timer */
4528 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) {
4529 mod_timer(&pmctx->ip4_mc_router_timer,
4530 now + brmctx->multicast_querier_interval);
4531 #if IS_ENABLED(CONFIG_IPV6)
4532 mod_timer(&pmctx->ip6_mc_router_timer,
4533 now + brmctx->multicast_querier_interval);
4534 #endif
4535 }
4536 err = 0;
4537 goto unlock;
4538 }
4539 switch (val) {
4540 case MDB_RTR_TYPE_DISABLED:
4541 pmctx->multicast_router = MDB_RTR_TYPE_DISABLED;
4542 del |= br_ip4_multicast_rport_del(pmctx);
4543 timer_delete(&pmctx->ip4_mc_router_timer);
4544 del |= br_ip6_multicast_rport_del(pmctx);
4545 #if IS_ENABLED(CONFIG_IPV6)
4546 timer_delete(&pmctx->ip6_mc_router_timer);
4547 #endif
4548 br_multicast_rport_del_notify(pmctx, del);
4549 break;
4550 case MDB_RTR_TYPE_TEMP_QUERY:
4551 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
4552 del |= br_ip4_multicast_rport_del(pmctx);
4553 del |= br_ip6_multicast_rport_del(pmctx);
4554 br_multicast_rport_del_notify(pmctx, del);
4555 break;
4556 case MDB_RTR_TYPE_PERM:
4557 pmctx->multicast_router = MDB_RTR_TYPE_PERM;
4558 timer_delete(&pmctx->ip4_mc_router_timer);
4559 br_ip4_multicast_add_router(brmctx, pmctx);
4560 #if IS_ENABLED(CONFIG_IPV6)
4561 timer_delete(&pmctx->ip6_mc_router_timer);
4562 #endif
4563 br_ip6_multicast_add_router(brmctx, pmctx);
4564 break;
4565 case MDB_RTR_TYPE_TEMP:
4566 pmctx->multicast_router = MDB_RTR_TYPE_TEMP;
4567 br_ip4_multicast_mark_router(brmctx, pmctx);
4568 br_ip6_multicast_mark_router(brmctx, pmctx);
4569 break;
4570 default:
4571 goto unlock;
4572 }
4573 err = 0;
4574 unlock:
4575 spin_unlock_bh(&brmctx->br->multicast_lock);
4576
4577 return err;
4578 }
4579
br_multicast_set_vlan_router(struct net_bridge_vlan * v,u8 mcast_router)4580 int br_multicast_set_vlan_router(struct net_bridge_vlan *v, u8 mcast_router)
4581 {
4582 int err;
4583
4584 if (br_vlan_is_master(v))
4585 err = br_multicast_set_router(&v->br_mcast_ctx, mcast_router);
4586 else
4587 err = br_multicast_set_port_router(&v->port_mcast_ctx,
4588 mcast_router);
4589
4590 return err;
4591 }
4592
br_multicast_start_querier(struct net_bridge_mcast * brmctx,struct bridge_mcast_own_query * query)4593 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
4594 struct bridge_mcast_own_query *query)
4595 {
4596 struct net_bridge_port *port;
4597
4598 if (!br_multicast_ctx_matches_vlan_snooping(brmctx))
4599 return;
4600
4601 __br_multicast_open_query(brmctx->br, query);
4602
4603 rcu_read_lock();
4604 list_for_each_entry_rcu(port, &brmctx->br->port_list, list) {
4605 struct bridge_mcast_own_query *ip4_own_query;
4606 #if IS_ENABLED(CONFIG_IPV6)
4607 struct bridge_mcast_own_query *ip6_own_query;
4608 #endif
4609
4610 if (br_multicast_port_ctx_state_stopped(&port->multicast_ctx))
4611 continue;
4612
4613 if (br_multicast_ctx_is_vlan(brmctx)) {
4614 struct net_bridge_vlan *vlan;
4615
4616 vlan = br_vlan_find(nbp_vlan_group_rcu(port),
4617 brmctx->vlan->vid);
4618 if (!vlan ||
4619 br_multicast_port_ctx_state_stopped(&vlan->port_mcast_ctx))
4620 continue;
4621
4622 ip4_own_query = &vlan->port_mcast_ctx.ip4_own_query;
4623 #if IS_ENABLED(CONFIG_IPV6)
4624 ip6_own_query = &vlan->port_mcast_ctx.ip6_own_query;
4625 #endif
4626 } else {
4627 ip4_own_query = &port->multicast_ctx.ip4_own_query;
4628 #if IS_ENABLED(CONFIG_IPV6)
4629 ip6_own_query = &port->multicast_ctx.ip6_own_query;
4630 #endif
4631 }
4632
4633 if (query == &brmctx->ip4_own_query)
4634 br_multicast_enable(ip4_own_query);
4635 #if IS_ENABLED(CONFIG_IPV6)
4636 else
4637 br_multicast_enable(ip6_own_query);
4638 #endif
4639 }
4640 rcu_read_unlock();
4641 }
4642
br_multicast_del_grps(struct net_bridge * br)4643 static void br_multicast_del_grps(struct net_bridge *br)
4644 {
4645 struct net_bridge_port *port;
4646
4647 list_for_each_entry(port, &br->port_list, list)
4648 __br_multicast_disable_port_ctx(&port->multicast_ctx);
4649 }
4650
br_multicast_toggle(struct net_bridge * br,unsigned long val,struct netlink_ext_ack * extack)4651 int br_multicast_toggle(struct net_bridge *br, unsigned long val,
4652 struct netlink_ext_ack *extack)
4653 {
4654 struct net_bridge_port *port;
4655 bool change_snoopers = false;
4656 int err = 0;
4657
4658 spin_lock_bh(&br->multicast_lock);
4659 if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
4660 goto unlock;
4661
4662 err = br_mc_disabled_update(br->dev, val, extack);
4663 if (err == -EOPNOTSUPP)
4664 err = 0;
4665 if (err)
4666 goto unlock;
4667
4668 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
4669 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
4670 change_snoopers = true;
4671 br_multicast_del_grps(br);
4672 goto unlock;
4673 }
4674
4675 if (!netif_running(br->dev))
4676 goto unlock;
4677
4678 br_multicast_open(br);
4679 list_for_each_entry(port, &br->port_list, list)
4680 __br_multicast_enable_port_ctx(&port->multicast_ctx);
4681
4682 change_snoopers = true;
4683
4684 unlock:
4685 spin_unlock_bh(&br->multicast_lock);
4686
4687 /* br_multicast_join_snoopers has the potential to cause
4688 * an MLD Report/Leave to be delivered to br_multicast_rcv,
4689 * which would in turn call br_multicast_add_group, which would
4690 * attempt to acquire multicast_lock. This function should be
4691 * called after the lock has been released to avoid deadlocks on
4692 * multicast_lock.
4693 *
4694 * br_multicast_leave_snoopers does not have the problem since
4695 * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
4696 * returns without calling br_multicast_ipv4/6_rcv if it's not
4697 * enabled. Moved both functions out just for symmetry.
4698 */
4699 if (change_snoopers) {
4700 if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
4701 br_multicast_join_snoopers(br);
4702 else
4703 br_multicast_leave_snoopers(br);
4704 }
4705
4706 return err;
4707 }
4708
br_multicast_enabled(const struct net_device * dev)4709 bool br_multicast_enabled(const struct net_device *dev)
4710 {
4711 struct net_bridge *br = netdev_priv(dev);
4712
4713 return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
4714 }
4715 EXPORT_SYMBOL_GPL(br_multicast_enabled);
4716
br_multicast_router(const struct net_device * dev)4717 bool br_multicast_router(const struct net_device *dev)
4718 {
4719 struct net_bridge *br = netdev_priv(dev);
4720 bool is_router;
4721
4722 spin_lock_bh(&br->multicast_lock);
4723 is_router = br_multicast_is_router(&br->multicast_ctx, NULL);
4724 spin_unlock_bh(&br->multicast_lock);
4725 return is_router;
4726 }
4727 EXPORT_SYMBOL_GPL(br_multicast_router);
4728
br_multicast_set_querier(struct net_bridge_mcast * brmctx,unsigned long val)4729 int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val)
4730 {
4731 unsigned long max_delay;
4732
4733 val = !!val;
4734
4735 spin_lock_bh(&brmctx->br->multicast_lock);
4736 if (brmctx->multicast_querier == val)
4737 goto unlock;
4738
4739 WRITE_ONCE(brmctx->multicast_querier, val);
4740 if (!val)
4741 goto unlock;
4742
4743 max_delay = brmctx->multicast_query_response_interval;
4744
4745 if (!timer_pending(&brmctx->ip4_other_query.timer))
4746 mod_timer(&brmctx->ip4_other_query.delay_timer,
4747 jiffies + max_delay);
4748
4749 br_multicast_start_querier(brmctx, &brmctx->ip4_own_query);
4750
4751 #if IS_ENABLED(CONFIG_IPV6)
4752 if (!timer_pending(&brmctx->ip6_other_query.timer))
4753 mod_timer(&brmctx->ip6_other_query.delay_timer,
4754 jiffies + max_delay);
4755
4756 br_multicast_start_querier(brmctx, &brmctx->ip6_own_query);
4757 #endif
4758
4759 unlock:
4760 spin_unlock_bh(&brmctx->br->multicast_lock);
4761
4762 return 0;
4763 }
4764
br_multicast_set_igmp_version(struct net_bridge_mcast * brmctx,unsigned long val)4765 int br_multicast_set_igmp_version(struct net_bridge_mcast *brmctx,
4766 unsigned long val)
4767 {
4768 /* Currently we support only version 2 and 3 */
4769 switch (val) {
4770 case 2:
4771 case 3:
4772 break;
4773 default:
4774 return -EINVAL;
4775 }
4776
4777 spin_lock_bh(&brmctx->br->multicast_lock);
4778 brmctx->multicast_igmp_version = val;
4779 spin_unlock_bh(&brmctx->br->multicast_lock);
4780
4781 return 0;
4782 }
4783
4784 #if IS_ENABLED(CONFIG_IPV6)
br_multicast_set_mld_version(struct net_bridge_mcast * brmctx,unsigned long val)4785 int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx,
4786 unsigned long val)
4787 {
4788 /* Currently we support version 1 and 2 */
4789 switch (val) {
4790 case 1:
4791 case 2:
4792 break;
4793 default:
4794 return -EINVAL;
4795 }
4796
4797 spin_lock_bh(&brmctx->br->multicast_lock);
4798 brmctx->multicast_mld_version = val;
4799 spin_unlock_bh(&brmctx->br->multicast_lock);
4800
4801 return 0;
4802 }
4803 #endif
4804
br_multicast_set_query_intvl(struct net_bridge_mcast * brmctx,unsigned long val)4805 void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
4806 unsigned long val)
4807 {
4808 unsigned long intvl_jiffies = clock_t_to_jiffies(val);
4809
4810 if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) {
4811 br_info(brmctx->br,
4812 "trying to set multicast query interval below minimum, setting to %lu (%ums)\n",
4813 jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN),
4814 jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN));
4815 intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
4816 }
4817
4818 if (intvl_jiffies > BR_MULTICAST_QUERY_INTVL_MAX) {
4819 br_info(brmctx->br,
4820 "trying to set multicast query interval above maximum, setting to %lu (%ums)\n",
4821 jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MAX),
4822 jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MAX));
4823 intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MAX;
4824 }
4825
4826 brmctx->multicast_query_interval = intvl_jiffies;
4827 }
4828
br_multicast_set_startup_query_intvl(struct net_bridge_mcast * brmctx,unsigned long val)4829 void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
4830 unsigned long val)
4831 {
4832 unsigned long intvl_jiffies = clock_t_to_jiffies(val);
4833
4834 if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) {
4835 br_info(brmctx->br,
4836 "trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n",
4837 jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN),
4838 jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN));
4839 intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
4840 }
4841
4842 if (intvl_jiffies > BR_MULTICAST_STARTUP_QUERY_INTVL_MAX) {
4843 br_info(brmctx->br,
4844 "trying to set multicast startup query interval above maximum, setting to %lu (%ums)\n",
4845 jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX),
4846 jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX));
4847 intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MAX;
4848 }
4849
4850 brmctx->multicast_startup_query_interval = intvl_jiffies;
4851 }
4852
4853 /**
4854 * br_multicast_list_adjacent - Returns snooped multicast addresses
4855 * @dev: The bridge port adjacent to which to retrieve addresses
4856 * @br_ip_list: The list to store found, snooped multicast IP addresses in
4857 *
4858 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
4859 * snooping feature on all bridge ports of dev's bridge device, excluding
4860 * the addresses from dev itself.
4861 *
4862 * Returns the number of items added to br_ip_list.
4863 *
4864 * Notes:
4865 * - br_ip_list needs to be initialized by caller
4866 * - br_ip_list might contain duplicates in the end
4867 * (needs to be taken care of by caller)
4868 * - br_ip_list needs to be freed by caller
4869 */
br_multicast_list_adjacent(struct net_device * dev,struct list_head * br_ip_list)4870 int br_multicast_list_adjacent(struct net_device *dev,
4871 struct list_head *br_ip_list)
4872 {
4873 struct net_bridge *br;
4874 struct net_bridge_port *port;
4875 struct net_bridge_port_group *group;
4876 struct br_ip_list *entry;
4877 int count = 0;
4878
4879 rcu_read_lock();
4880 if (!br_ip_list || !netif_is_bridge_port(dev))
4881 goto unlock;
4882
4883 port = br_port_get_rcu(dev);
4884 if (!port || !port->br)
4885 goto unlock;
4886
4887 br = port->br;
4888
4889 list_for_each_entry_rcu(port, &br->port_list, list) {
4890 if (!port->dev || port->dev == dev)
4891 continue;
4892
4893 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
4894 entry = kmalloc_obj(*entry, GFP_ATOMIC);
4895 if (!entry)
4896 goto unlock;
4897
4898 entry->addr = group->key.addr;
4899 list_add(&entry->list, br_ip_list);
4900 count++;
4901 }
4902 }
4903
4904 unlock:
4905 rcu_read_unlock();
4906 return count;
4907 }
4908 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
4909
4910 /**
4911 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
4912 * @dev: The bridge port providing the bridge on which to check for a querier
4913 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4914 *
4915 * Checks whether the given interface has a bridge on top and if so returns
4916 * true if a valid querier exists anywhere on the bridged link layer.
4917 * Otherwise returns false.
4918 */
br_multicast_has_querier_anywhere(struct net_device * dev,int proto)4919 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
4920 {
4921 struct net_bridge *br;
4922 struct net_bridge_port *port;
4923 struct ethhdr eth;
4924 bool ret = false;
4925
4926 rcu_read_lock();
4927 if (!netif_is_bridge_port(dev))
4928 goto unlock;
4929
4930 port = br_port_get_rcu(dev);
4931 if (!port || !port->br)
4932 goto unlock;
4933
4934 br = port->br;
4935
4936 memset(ð, 0, sizeof(eth));
4937 eth.h_proto = htons(proto);
4938
4939 ret = br_multicast_querier_exists(&br->multicast_ctx, ð, NULL);
4940
4941 unlock:
4942 rcu_read_unlock();
4943 return ret;
4944 }
4945 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
4946
4947 /**
4948 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
4949 * @dev: The bridge port adjacent to which to check for a querier
4950 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4951 *
4952 * Checks whether the given interface has a bridge on top and if so returns
4953 * true if a selected querier is behind one of the other ports of this
4954 * bridge. Otherwise returns false.
4955 */
br_multicast_has_querier_adjacent(struct net_device * dev,int proto)4956 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
4957 {
4958 struct net_bridge_mcast *brmctx;
4959 struct net_bridge *br;
4960 struct net_bridge_port *port;
4961 bool ret = false;
4962 int port_ifidx;
4963
4964 rcu_read_lock();
4965 if (!netif_is_bridge_port(dev))
4966 goto unlock;
4967
4968 port = br_port_get_rcu(dev);
4969 if (!port || !port->br)
4970 goto unlock;
4971
4972 br = port->br;
4973 brmctx = &br->multicast_ctx;
4974
4975 switch (proto) {
4976 case ETH_P_IP:
4977 port_ifidx = brmctx->ip4_querier.port_ifidx;
4978 if (!timer_pending(&brmctx->ip4_other_query.timer) ||
4979 port_ifidx == port->dev->ifindex)
4980 goto unlock;
4981 break;
4982 #if IS_ENABLED(CONFIG_IPV6)
4983 case ETH_P_IPV6:
4984 port_ifidx = brmctx->ip6_querier.port_ifidx;
4985 if (!timer_pending(&brmctx->ip6_other_query.timer) ||
4986 port_ifidx == port->dev->ifindex)
4987 goto unlock;
4988 break;
4989 #endif
4990 default:
4991 goto unlock;
4992 }
4993
4994 ret = true;
4995 unlock:
4996 rcu_read_unlock();
4997 return ret;
4998 }
4999 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
5000
5001 /**
5002 * br_multicast_has_router_adjacent - Checks for a router behind a bridge port
5003 * @dev: The bridge port adjacent to which to check for a multicast router
5004 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
5005 *
5006 * Checks whether the given interface has a bridge on top and if so returns
5007 * true if a multicast router is behind one of the other ports of this
5008 * bridge. Otherwise returns false.
5009 */
br_multicast_has_router_adjacent(struct net_device * dev,int proto)5010 bool br_multicast_has_router_adjacent(struct net_device *dev, int proto)
5011 {
5012 struct net_bridge_mcast_port *pmctx;
5013 struct net_bridge_mcast *brmctx;
5014 struct net_bridge_port *port;
5015 bool ret = false;
5016
5017 rcu_read_lock();
5018 port = br_port_get_check_rcu(dev);
5019 if (!port)
5020 goto unlock;
5021
5022 brmctx = &port->br->multicast_ctx;
5023 switch (proto) {
5024 case ETH_P_IP:
5025 hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
5026 ip4_rlist) {
5027 if (pmctx->port == port)
5028 continue;
5029
5030 ret = true;
5031 goto unlock;
5032 }
5033 break;
5034 #if IS_ENABLED(CONFIG_IPV6)
5035 case ETH_P_IPV6:
5036 hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
5037 ip6_rlist) {
5038 if (pmctx->port == port)
5039 continue;
5040
5041 ret = true;
5042 goto unlock;
5043 }
5044 break;
5045 #endif
5046 default:
5047 /* when compiled without IPv6 support, be conservative and
5048 * always assume presence of an IPv6 multicast router
5049 */
5050 ret = true;
5051 }
5052
5053 unlock:
5054 rcu_read_unlock();
5055 return ret;
5056 }
5057 EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent);
5058
br_mcast_stats_add(struct bridge_mcast_stats __percpu * stats,const struct sk_buff * skb,u8 type,u8 dir)5059 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
5060 const struct sk_buff *skb, u8 type, u8 dir)
5061 {
5062 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
5063 __be16 proto = skb->protocol;
5064 unsigned int t_len;
5065
5066 u64_stats_update_begin(&pstats->syncp);
5067 switch (proto) {
5068 case htons(ETH_P_IP):
5069 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
5070 switch (type) {
5071 case IGMP_HOST_MEMBERSHIP_REPORT:
5072 pstats->mstats.igmp_v1reports[dir]++;
5073 break;
5074 case IGMPV2_HOST_MEMBERSHIP_REPORT:
5075 pstats->mstats.igmp_v2reports[dir]++;
5076 break;
5077 case IGMPV3_HOST_MEMBERSHIP_REPORT:
5078 pstats->mstats.igmp_v3reports[dir]++;
5079 break;
5080 case IGMP_HOST_MEMBERSHIP_QUERY:
5081 if (t_len != sizeof(struct igmphdr)) {
5082 pstats->mstats.igmp_v3queries[dir]++;
5083 } else {
5084 unsigned int offset = skb_transport_offset(skb);
5085 struct igmphdr *ih, _ihdr;
5086
5087 ih = skb_header_pointer(skb, offset,
5088 sizeof(_ihdr), &_ihdr);
5089 if (!ih)
5090 break;
5091 if (!ih->code)
5092 pstats->mstats.igmp_v1queries[dir]++;
5093 else
5094 pstats->mstats.igmp_v2queries[dir]++;
5095 }
5096 break;
5097 case IGMP_HOST_LEAVE_MESSAGE:
5098 pstats->mstats.igmp_leaves[dir]++;
5099 break;
5100 }
5101 break;
5102 #if IS_ENABLED(CONFIG_IPV6)
5103 case htons(ETH_P_IPV6):
5104 t_len = ntohs(ipv6_hdr(skb)->payload_len) +
5105 sizeof(struct ipv6hdr);
5106 t_len -= skb_network_header_len(skb);
5107 switch (type) {
5108 case ICMPV6_MGM_REPORT:
5109 pstats->mstats.mld_v1reports[dir]++;
5110 break;
5111 case ICMPV6_MLD2_REPORT:
5112 pstats->mstats.mld_v2reports[dir]++;
5113 break;
5114 case ICMPV6_MGM_QUERY:
5115 if (t_len != sizeof(struct mld_msg))
5116 pstats->mstats.mld_v2queries[dir]++;
5117 else
5118 pstats->mstats.mld_v1queries[dir]++;
5119 break;
5120 case ICMPV6_MGM_REDUCTION:
5121 pstats->mstats.mld_leaves[dir]++;
5122 break;
5123 }
5124 break;
5125 #endif /* CONFIG_IPV6 */
5126 }
5127 u64_stats_update_end(&pstats->syncp);
5128 }
5129
br_multicast_count(struct net_bridge * br,const struct net_bridge_port * p,const struct sk_buff * skb,u8 type,u8 dir)5130 void br_multicast_count(struct net_bridge *br,
5131 const struct net_bridge_port *p,
5132 const struct sk_buff *skb, u8 type, u8 dir)
5133 {
5134 struct bridge_mcast_stats __percpu *stats;
5135
5136 /* if multicast_disabled is true then igmp type can't be set */
5137 if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
5138 return;
5139
5140 if (p)
5141 stats = p->mcast_stats;
5142 else
5143 stats = br->mcast_stats;
5144 if (WARN_ON(!stats))
5145 return;
5146
5147 br_mcast_stats_add(stats, skb, type, dir);
5148 }
5149
br_multicast_init_stats(struct net_bridge * br)5150 int br_multicast_init_stats(struct net_bridge *br)
5151 {
5152 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
5153 if (!br->mcast_stats)
5154 return -ENOMEM;
5155
5156 return 0;
5157 }
5158
br_multicast_uninit_stats(struct net_bridge * br)5159 void br_multicast_uninit_stats(struct net_bridge *br)
5160 {
5161 free_percpu(br->mcast_stats);
5162 }
5163
5164 /* noinline for https://llvm.org/pr45802#c9 */
mcast_stats_add_dir(u64 * dst,u64 * src)5165 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
5166 {
5167 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
5168 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
5169 }
5170
br_multicast_get_stats(const struct net_bridge * br,const struct net_bridge_port * p,struct br_mcast_stats * dest)5171 void br_multicast_get_stats(const struct net_bridge *br,
5172 const struct net_bridge_port *p,
5173 struct br_mcast_stats *dest)
5174 {
5175 struct bridge_mcast_stats __percpu *stats;
5176 struct br_mcast_stats tdst;
5177 int i;
5178
5179 memset(dest, 0, sizeof(*dest));
5180 if (p)
5181 stats = p->mcast_stats;
5182 else
5183 stats = br->mcast_stats;
5184 if (WARN_ON(!stats))
5185 return;
5186
5187 memset(&tdst, 0, sizeof(tdst));
5188 for_each_possible_cpu(i) {
5189 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
5190 struct br_mcast_stats temp;
5191 unsigned int start;
5192
5193 do {
5194 start = u64_stats_fetch_begin(&cpu_stats->syncp);
5195 u64_stats_copy(&temp, &cpu_stats->mstats, sizeof(temp));
5196 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
5197
5198 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
5199 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
5200 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
5201 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
5202 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
5203 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
5204 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
5205 tdst.igmp_parse_errors += temp.igmp_parse_errors;
5206
5207 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
5208 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
5209 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
5210 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
5211 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
5212 tdst.mld_parse_errors += temp.mld_parse_errors;
5213 }
5214 memcpy(dest, &tdst, sizeof(*dest));
5215 }
5216
br_mdb_hash_init(struct net_bridge * br)5217 int br_mdb_hash_init(struct net_bridge *br)
5218 {
5219 int err;
5220
5221 err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params);
5222 if (err)
5223 return err;
5224
5225 err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
5226 if (err) {
5227 rhashtable_destroy(&br->sg_port_tbl);
5228 return err;
5229 }
5230
5231 return 0;
5232 }
5233
br_mdb_hash_fini(struct net_bridge * br)5234 void br_mdb_hash_fini(struct net_bridge *br)
5235 {
5236 rhashtable_destroy(&br->sg_port_tbl);
5237 rhashtable_destroy(&br->mdb_hash_tbl);
5238 }
5239