xref: /linux/net/bridge/br_multicast_eht.c (revision be54f8c558027a218423134dd9b8c7c46d92204a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // Copyright (c) 2020, Nikolay Aleksandrov <nikolay@nvidia.com>
3 #include <linux/err.h>
4 #include <linux/export.h>
5 #include <linux/if_ether.h>
6 #include <linux/igmp.h>
7 #include <linux/in.h>
8 #include <linux/jhash.h>
9 #include <linux/kernel.h>
10 #include <linux/log2.h>
11 #include <linux/netdevice.h>
12 #include <linux/netfilter_bridge.h>
13 #include <linux/random.h>
14 #include <linux/rculist.h>
15 #include <linux/skbuff.h>
16 #include <linux/slab.h>
17 #include <linux/timer.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mroute.h>
20 #include <net/ip.h>
21 #include <net/switchdev.h>
22 #if IS_ENABLED(CONFIG_IPV6)
23 #include <linux/icmpv6.h>
24 #include <net/ipv6.h>
25 #include <net/mld.h>
26 #include <net/ip6_checksum.h>
27 #include <net/addrconf.h>
28 #endif
29 
30 #include "br_private.h"
31 #include "br_private_mcast_eht.h"
32 
33 static bool br_multicast_del_eht_set_entry(struct net_bridge_port_group *pg,
34 					   union net_bridge_eht_addr *src_addr,
35 					   union net_bridge_eht_addr *h_addr);
36 static void br_multicast_create_eht_set_entry(const struct net_bridge_mcast *brmctx,
37 					      struct net_bridge_port_group *pg,
38 					      union net_bridge_eht_addr *src_addr,
39 					      union net_bridge_eht_addr *h_addr,
40 					      int filter_mode,
41 					      bool allow_zero_src);
42 
43 static struct net_bridge_group_eht_host *
br_multicast_eht_host_lookup(struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr)44 br_multicast_eht_host_lookup(struct net_bridge_port_group *pg,
45 			     union net_bridge_eht_addr *h_addr)
46 {
47 	struct rb_node *node = pg->eht_host_tree.rb_node;
48 
49 	while (node) {
50 		struct net_bridge_group_eht_host *this;
51 		int result;
52 
53 		this = rb_entry(node, struct net_bridge_group_eht_host,
54 				rb_node);
55 		result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
56 		if (result < 0)
57 			node = node->rb_left;
58 		else if (result > 0)
59 			node = node->rb_right;
60 		else
61 			return this;
62 	}
63 
64 	return NULL;
65 }
66 
br_multicast_eht_host_filter_mode(struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr)67 static int br_multicast_eht_host_filter_mode(struct net_bridge_port_group *pg,
68 					     union net_bridge_eht_addr *h_addr)
69 {
70 	struct net_bridge_group_eht_host *eht_host;
71 
72 	eht_host = br_multicast_eht_host_lookup(pg, h_addr);
73 	if (!eht_host)
74 		return MCAST_INCLUDE;
75 
76 	return eht_host->filter_mode;
77 }
78 
79 static struct net_bridge_group_eht_set_entry *
br_multicast_eht_set_entry_lookup(struct net_bridge_group_eht_set * eht_set,union net_bridge_eht_addr * h_addr)80 br_multicast_eht_set_entry_lookup(struct net_bridge_group_eht_set *eht_set,
81 				  union net_bridge_eht_addr *h_addr)
82 {
83 	struct rb_node *node = eht_set->entry_tree.rb_node;
84 
85 	while (node) {
86 		struct net_bridge_group_eht_set_entry *this;
87 		int result;
88 
89 		this = rb_entry(node, struct net_bridge_group_eht_set_entry,
90 				rb_node);
91 		result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
92 		if (result < 0)
93 			node = node->rb_left;
94 		else if (result > 0)
95 			node = node->rb_right;
96 		else
97 			return this;
98 	}
99 
100 	return NULL;
101 }
102 
103 static struct net_bridge_group_eht_set *
br_multicast_eht_set_lookup(struct net_bridge_port_group * pg,union net_bridge_eht_addr * src_addr)104 br_multicast_eht_set_lookup(struct net_bridge_port_group *pg,
105 			    union net_bridge_eht_addr *src_addr)
106 {
107 	struct rb_node *node = pg->eht_set_tree.rb_node;
108 
109 	while (node) {
110 		struct net_bridge_group_eht_set *this;
111 		int result;
112 
113 		this = rb_entry(node, struct net_bridge_group_eht_set,
114 				rb_node);
115 		result = memcmp(src_addr, &this->src_addr, sizeof(*src_addr));
116 		if (result < 0)
117 			node = node->rb_left;
118 		else if (result > 0)
119 			node = node->rb_right;
120 		else
121 			return this;
122 	}
123 
124 	return NULL;
125 }
126 
__eht_destroy_host(struct net_bridge_group_eht_host * eht_host)127 static void __eht_destroy_host(struct net_bridge_group_eht_host *eht_host)
128 {
129 	WARN_ON(!hlist_empty(&eht_host->set_entries));
130 
131 	br_multicast_eht_hosts_dec(eht_host->pg);
132 
133 	rb_erase(&eht_host->rb_node, &eht_host->pg->eht_host_tree);
134 	RB_CLEAR_NODE(&eht_host->rb_node);
135 	kfree(eht_host);
136 }
137 
br_multicast_destroy_eht_set_entry(struct net_bridge_mcast_gc * gc)138 static void br_multicast_destroy_eht_set_entry(struct net_bridge_mcast_gc *gc)
139 {
140 	struct net_bridge_group_eht_set_entry *set_h;
141 
142 	set_h = container_of(gc, struct net_bridge_group_eht_set_entry, mcast_gc);
143 	WARN_ON(!RB_EMPTY_NODE(&set_h->rb_node));
144 
145 	timer_shutdown_sync(&set_h->timer);
146 	kfree(set_h);
147 }
148 
br_multicast_destroy_eht_set(struct net_bridge_mcast_gc * gc)149 static void br_multicast_destroy_eht_set(struct net_bridge_mcast_gc *gc)
150 {
151 	struct net_bridge_group_eht_set *eht_set;
152 
153 	eht_set = container_of(gc, struct net_bridge_group_eht_set, mcast_gc);
154 	WARN_ON(!RB_EMPTY_NODE(&eht_set->rb_node));
155 	WARN_ON(!RB_EMPTY_ROOT(&eht_set->entry_tree));
156 
157 	timer_shutdown_sync(&eht_set->timer);
158 	kfree(eht_set);
159 }
160 
__eht_del_set_entry(struct net_bridge_group_eht_set_entry * set_h)161 static void __eht_del_set_entry(struct net_bridge_group_eht_set_entry *set_h)
162 {
163 	struct net_bridge_group_eht_host *eht_host = set_h->h_parent;
164 	union net_bridge_eht_addr zero_addr;
165 
166 	rb_erase(&set_h->rb_node, &set_h->eht_set->entry_tree);
167 	RB_CLEAR_NODE(&set_h->rb_node);
168 	hlist_del_init(&set_h->host_list);
169 	memset(&zero_addr, 0, sizeof(zero_addr));
170 	if (memcmp(&set_h->h_addr, &zero_addr, sizeof(zero_addr)))
171 		eht_host->num_entries--;
172 	hlist_add_head(&set_h->mcast_gc.gc_node, &set_h->br->mcast_gc_list);
173 	queue_work(system_long_wq, &set_h->br->mcast_gc_work);
174 
175 	if (hlist_empty(&eht_host->set_entries))
176 		__eht_destroy_host(eht_host);
177 }
178 
br_multicast_del_eht_set(struct net_bridge_group_eht_set * eht_set)179 static void br_multicast_del_eht_set(struct net_bridge_group_eht_set *eht_set)
180 {
181 	struct net_bridge_group_eht_set_entry *set_h;
182 	struct rb_node *node;
183 
184 	while ((node = rb_first(&eht_set->entry_tree))) {
185 		set_h = rb_entry(node, struct net_bridge_group_eht_set_entry,
186 				 rb_node);
187 		__eht_del_set_entry(set_h);
188 	}
189 
190 	rb_erase(&eht_set->rb_node, &eht_set->pg->eht_set_tree);
191 	RB_CLEAR_NODE(&eht_set->rb_node);
192 	hlist_add_head(&eht_set->mcast_gc.gc_node, &eht_set->br->mcast_gc_list);
193 	queue_work(system_long_wq, &eht_set->br->mcast_gc_work);
194 }
195 
br_multicast_eht_clean_sets(struct net_bridge_port_group * pg)196 void br_multicast_eht_clean_sets(struct net_bridge_port_group *pg)
197 {
198 	struct net_bridge_group_eht_set *eht_set;
199 	struct rb_node *node;
200 
201 	while ((node = rb_first(&pg->eht_set_tree))) {
202 		eht_set = rb_entry(node, struct net_bridge_group_eht_set,
203 				   rb_node);
204 		br_multicast_del_eht_set(eht_set);
205 	}
206 }
207 
br_multicast_eht_set_entry_expired(struct timer_list * t)208 static void br_multicast_eht_set_entry_expired(struct timer_list *t)
209 {
210 	struct net_bridge_group_eht_set_entry *set_h = timer_container_of(set_h,
211 									  t,
212 									  timer);
213 	struct net_bridge *br = set_h->br;
214 
215 	spin_lock(&br->multicast_lock);
216 	if (RB_EMPTY_NODE(&set_h->rb_node) || timer_pending(&set_h->timer))
217 		goto out;
218 
219 	br_multicast_del_eht_set_entry(set_h->eht_set->pg,
220 				       &set_h->eht_set->src_addr,
221 				       &set_h->h_addr);
222 out:
223 	spin_unlock(&br->multicast_lock);
224 }
225 
br_multicast_eht_set_expired(struct timer_list * t)226 static void br_multicast_eht_set_expired(struct timer_list *t)
227 {
228 	struct net_bridge_group_eht_set *eht_set = timer_container_of(eht_set,
229 								      t,
230 								      timer);
231 	struct net_bridge *br = eht_set->br;
232 
233 	spin_lock(&br->multicast_lock);
234 	if (RB_EMPTY_NODE(&eht_set->rb_node) || timer_pending(&eht_set->timer))
235 		goto out;
236 
237 	br_multicast_del_eht_set(eht_set);
238 out:
239 	spin_unlock(&br->multicast_lock);
240 }
241 
242 static struct net_bridge_group_eht_host *
__eht_lookup_create_host(struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,unsigned char filter_mode)243 __eht_lookup_create_host(struct net_bridge_port_group *pg,
244 			 union net_bridge_eht_addr *h_addr,
245 			 unsigned char filter_mode)
246 {
247 	struct rb_node **link = &pg->eht_host_tree.rb_node, *parent = NULL;
248 	struct net_bridge_group_eht_host *eht_host;
249 
250 	while (*link) {
251 		struct net_bridge_group_eht_host *this;
252 		int result;
253 
254 		this = rb_entry(*link, struct net_bridge_group_eht_host,
255 				rb_node);
256 		result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
257 		parent = *link;
258 		if (result < 0)
259 			link = &((*link)->rb_left);
260 		else if (result > 0)
261 			link = &((*link)->rb_right);
262 		else
263 			return this;
264 	}
265 
266 	if (br_multicast_eht_hosts_over_limit(pg))
267 		return NULL;
268 
269 	eht_host = kzalloc(sizeof(*eht_host), GFP_ATOMIC);
270 	if (!eht_host)
271 		return NULL;
272 
273 	memcpy(&eht_host->h_addr, h_addr, sizeof(*h_addr));
274 	INIT_HLIST_HEAD(&eht_host->set_entries);
275 	eht_host->pg = pg;
276 	eht_host->filter_mode = filter_mode;
277 
278 	rb_link_node(&eht_host->rb_node, parent, link);
279 	rb_insert_color(&eht_host->rb_node, &pg->eht_host_tree);
280 
281 	br_multicast_eht_hosts_inc(pg);
282 
283 	return eht_host;
284 }
285 
286 static struct net_bridge_group_eht_set_entry *
__eht_lookup_create_set_entry(struct net_bridge * br,struct net_bridge_group_eht_set * eht_set,struct net_bridge_group_eht_host * eht_host,bool allow_zero_src)287 __eht_lookup_create_set_entry(struct net_bridge *br,
288 			      struct net_bridge_group_eht_set *eht_set,
289 			      struct net_bridge_group_eht_host *eht_host,
290 			      bool allow_zero_src)
291 {
292 	struct rb_node **link = &eht_set->entry_tree.rb_node, *parent = NULL;
293 	struct net_bridge_group_eht_set_entry *set_h;
294 
295 	while (*link) {
296 		struct net_bridge_group_eht_set_entry *this;
297 		int result;
298 
299 		this = rb_entry(*link, struct net_bridge_group_eht_set_entry,
300 				rb_node);
301 		result = memcmp(&eht_host->h_addr, &this->h_addr,
302 				sizeof(union net_bridge_eht_addr));
303 		parent = *link;
304 		if (result < 0)
305 			link = &((*link)->rb_left);
306 		else if (result > 0)
307 			link = &((*link)->rb_right);
308 		else
309 			return this;
310 	}
311 
312 	/* always allow auto-created zero entry */
313 	if (!allow_zero_src && eht_host->num_entries >= PG_SRC_ENT_LIMIT)
314 		return NULL;
315 
316 	set_h = kzalloc(sizeof(*set_h), GFP_ATOMIC);
317 	if (!set_h)
318 		return NULL;
319 
320 	memcpy(&set_h->h_addr, &eht_host->h_addr,
321 	       sizeof(union net_bridge_eht_addr));
322 	set_h->mcast_gc.destroy = br_multicast_destroy_eht_set_entry;
323 	set_h->eht_set = eht_set;
324 	set_h->h_parent = eht_host;
325 	set_h->br = br;
326 	timer_setup(&set_h->timer, br_multicast_eht_set_entry_expired, 0);
327 
328 	hlist_add_head(&set_h->host_list, &eht_host->set_entries);
329 	rb_link_node(&set_h->rb_node, parent, link);
330 	rb_insert_color(&set_h->rb_node, &eht_set->entry_tree);
331 	/* we must not count the auto-created zero entry otherwise we won't be
332 	 * able to track the full list of PG_SRC_ENT_LIMIT entries
333 	 */
334 	if (!allow_zero_src)
335 		eht_host->num_entries++;
336 
337 	return set_h;
338 }
339 
340 static struct net_bridge_group_eht_set *
__eht_lookup_create_set(struct net_bridge_port_group * pg,union net_bridge_eht_addr * src_addr)341 __eht_lookup_create_set(struct net_bridge_port_group *pg,
342 			union net_bridge_eht_addr *src_addr)
343 {
344 	struct rb_node **link = &pg->eht_set_tree.rb_node, *parent = NULL;
345 	struct net_bridge_group_eht_set *eht_set;
346 
347 	while (*link) {
348 		struct net_bridge_group_eht_set *this;
349 		int result;
350 
351 		this = rb_entry(*link, struct net_bridge_group_eht_set,
352 				rb_node);
353 		result = memcmp(src_addr, &this->src_addr, sizeof(*src_addr));
354 		parent = *link;
355 		if (result < 0)
356 			link = &((*link)->rb_left);
357 		else if (result > 0)
358 			link = &((*link)->rb_right);
359 		else
360 			return this;
361 	}
362 
363 	eht_set = kzalloc(sizeof(*eht_set), GFP_ATOMIC);
364 	if (!eht_set)
365 		return NULL;
366 
367 	memcpy(&eht_set->src_addr, src_addr, sizeof(*src_addr));
368 	eht_set->mcast_gc.destroy = br_multicast_destroy_eht_set;
369 	eht_set->pg = pg;
370 	eht_set->br = pg->key.port->br;
371 	eht_set->entry_tree = RB_ROOT;
372 	timer_setup(&eht_set->timer, br_multicast_eht_set_expired, 0);
373 
374 	rb_link_node(&eht_set->rb_node, parent, link);
375 	rb_insert_color(&eht_set->rb_node, &pg->eht_set_tree);
376 
377 	return eht_set;
378 }
379 
br_multicast_ip_src_to_eht_addr(const struct br_ip * src,union net_bridge_eht_addr * dest)380 static void br_multicast_ip_src_to_eht_addr(const struct br_ip *src,
381 					    union net_bridge_eht_addr *dest)
382 {
383 	switch (src->proto) {
384 	case htons(ETH_P_IP):
385 		dest->ip4 = src->src.ip4;
386 		break;
387 #if IS_ENABLED(CONFIG_IPV6)
388 	case htons(ETH_P_IPV6):
389 		memcpy(&dest->ip6, &src->src.ip6, sizeof(struct in6_addr));
390 		break;
391 #endif
392 	}
393 }
394 
br_eht_convert_host_filter_mode(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,int filter_mode)395 static void br_eht_convert_host_filter_mode(const struct net_bridge_mcast *brmctx,
396 					    struct net_bridge_port_group *pg,
397 					    union net_bridge_eht_addr *h_addr,
398 					    int filter_mode)
399 {
400 	struct net_bridge_group_eht_host *eht_host;
401 	union net_bridge_eht_addr zero_addr;
402 
403 	eht_host = br_multicast_eht_host_lookup(pg, h_addr);
404 	if (eht_host)
405 		eht_host->filter_mode = filter_mode;
406 
407 	memset(&zero_addr, 0, sizeof(zero_addr));
408 	switch (filter_mode) {
409 	case MCAST_INCLUDE:
410 		br_multicast_del_eht_set_entry(pg, &zero_addr, h_addr);
411 		break;
412 	case MCAST_EXCLUDE:
413 		br_multicast_create_eht_set_entry(brmctx, pg, &zero_addr,
414 						  h_addr, MCAST_EXCLUDE,
415 						  true);
416 		break;
417 	}
418 }
419 
br_multicast_create_eht_set_entry(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,union net_bridge_eht_addr * src_addr,union net_bridge_eht_addr * h_addr,int filter_mode,bool allow_zero_src)420 static void br_multicast_create_eht_set_entry(const struct net_bridge_mcast *brmctx,
421 					      struct net_bridge_port_group *pg,
422 					      union net_bridge_eht_addr *src_addr,
423 					      union net_bridge_eht_addr *h_addr,
424 					      int filter_mode,
425 					      bool allow_zero_src)
426 {
427 	struct net_bridge_group_eht_set_entry *set_h;
428 	struct net_bridge_group_eht_host *eht_host;
429 	struct net_bridge *br = pg->key.port->br;
430 	struct net_bridge_group_eht_set *eht_set;
431 	union net_bridge_eht_addr zero_addr;
432 
433 	memset(&zero_addr, 0, sizeof(zero_addr));
434 	if (!allow_zero_src && !memcmp(src_addr, &zero_addr, sizeof(zero_addr)))
435 		return;
436 
437 	eht_set = __eht_lookup_create_set(pg, src_addr);
438 	if (!eht_set)
439 		return;
440 
441 	eht_host = __eht_lookup_create_host(pg, h_addr, filter_mode);
442 	if (!eht_host)
443 		goto fail_host;
444 
445 	set_h = __eht_lookup_create_set_entry(br, eht_set, eht_host,
446 					      allow_zero_src);
447 	if (!set_h)
448 		goto fail_set_entry;
449 
450 	mod_timer(&set_h->timer, jiffies + br_multicast_gmi(brmctx));
451 	mod_timer(&eht_set->timer, jiffies + br_multicast_gmi(brmctx));
452 
453 	return;
454 
455 fail_set_entry:
456 	if (hlist_empty(&eht_host->set_entries))
457 		__eht_destroy_host(eht_host);
458 fail_host:
459 	if (RB_EMPTY_ROOT(&eht_set->entry_tree))
460 		br_multicast_del_eht_set(eht_set);
461 }
462 
br_multicast_del_eht_set_entry(struct net_bridge_port_group * pg,union net_bridge_eht_addr * src_addr,union net_bridge_eht_addr * h_addr)463 static bool br_multicast_del_eht_set_entry(struct net_bridge_port_group *pg,
464 					   union net_bridge_eht_addr *src_addr,
465 					   union net_bridge_eht_addr *h_addr)
466 {
467 	struct net_bridge_group_eht_set_entry *set_h;
468 	struct net_bridge_group_eht_set *eht_set;
469 	bool set_deleted = false;
470 
471 	eht_set = br_multicast_eht_set_lookup(pg, src_addr);
472 	if (!eht_set)
473 		goto out;
474 
475 	set_h = br_multicast_eht_set_entry_lookup(eht_set, h_addr);
476 	if (!set_h)
477 		goto out;
478 
479 	__eht_del_set_entry(set_h);
480 
481 	if (RB_EMPTY_ROOT(&eht_set->entry_tree)) {
482 		br_multicast_del_eht_set(eht_set);
483 		set_deleted = true;
484 	}
485 
486 out:
487 	return set_deleted;
488 }
489 
br_multicast_del_eht_host(struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr)490 static void br_multicast_del_eht_host(struct net_bridge_port_group *pg,
491 				      union net_bridge_eht_addr *h_addr)
492 {
493 	struct net_bridge_group_eht_set_entry *set_h;
494 	struct net_bridge_group_eht_host *eht_host;
495 	struct hlist_node *tmp;
496 
497 	eht_host = br_multicast_eht_host_lookup(pg, h_addr);
498 	if (!eht_host)
499 		return;
500 
501 	hlist_for_each_entry_safe(set_h, tmp, &eht_host->set_entries, host_list)
502 		br_multicast_del_eht_set_entry(set_h->eht_set->pg,
503 					       &set_h->eht_set->src_addr,
504 					       &set_h->h_addr);
505 }
506 
507 /* create new set entries from reports */
__eht_create_set_entries(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int filter_mode)508 static void __eht_create_set_entries(const struct net_bridge_mcast *brmctx,
509 				     struct net_bridge_port_group *pg,
510 				     union net_bridge_eht_addr *h_addr,
511 				     void *srcs,
512 				     u32 nsrcs,
513 				     size_t addr_size,
514 				     int filter_mode)
515 {
516 	union net_bridge_eht_addr eht_src_addr;
517 	u32 src_idx;
518 
519 	memset(&eht_src_addr, 0, sizeof(eht_src_addr));
520 	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
521 		memcpy(&eht_src_addr, srcs + (src_idx * addr_size), addr_size);
522 		br_multicast_create_eht_set_entry(brmctx, pg, &eht_src_addr,
523 						  h_addr, filter_mode,
524 						  false);
525 	}
526 }
527 
528 /* delete existing set entries and their (S,G) entries if they were the last */
__eht_del_set_entries(struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,void * srcs,u32 nsrcs,size_t addr_size)529 static bool __eht_del_set_entries(struct net_bridge_port_group *pg,
530 				  union net_bridge_eht_addr *h_addr,
531 				  void *srcs,
532 				  u32 nsrcs,
533 				  size_t addr_size)
534 {
535 	union net_bridge_eht_addr eht_src_addr;
536 	struct net_bridge_group_src *src_ent;
537 	bool changed = false;
538 	struct br_ip src_ip;
539 	u32 src_idx;
540 
541 	memset(&eht_src_addr, 0, sizeof(eht_src_addr));
542 	memset(&src_ip, 0, sizeof(src_ip));
543 	src_ip.proto = pg->key.addr.proto;
544 	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
545 		memcpy(&eht_src_addr, srcs + (src_idx * addr_size), addr_size);
546 		if (!br_multicast_del_eht_set_entry(pg, &eht_src_addr, h_addr))
547 			continue;
548 		memcpy(&src_ip, srcs + (src_idx * addr_size), addr_size);
549 		src_ent = br_multicast_find_group_src(pg, &src_ip);
550 		if (!src_ent)
551 			continue;
552 		br_multicast_del_group_src(src_ent, true);
553 		changed = true;
554 	}
555 
556 	return changed;
557 }
558 
br_multicast_eht_allow(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,void * srcs,u32 nsrcs,size_t addr_size)559 static bool br_multicast_eht_allow(const struct net_bridge_mcast *brmctx,
560 				   struct net_bridge_port_group *pg,
561 				   union net_bridge_eht_addr *h_addr,
562 				   void *srcs,
563 				   u32 nsrcs,
564 				   size_t addr_size)
565 {
566 	bool changed = false;
567 
568 	switch (br_multicast_eht_host_filter_mode(pg, h_addr)) {
569 	case MCAST_INCLUDE:
570 		__eht_create_set_entries(brmctx, pg, h_addr, srcs, nsrcs,
571 					 addr_size, MCAST_INCLUDE);
572 		break;
573 	case MCAST_EXCLUDE:
574 		changed = __eht_del_set_entries(pg, h_addr, srcs, nsrcs,
575 						addr_size);
576 		break;
577 	}
578 
579 	return changed;
580 }
581 
br_multicast_eht_block(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,void * srcs,u32 nsrcs,size_t addr_size)582 static bool br_multicast_eht_block(const struct net_bridge_mcast *brmctx,
583 				   struct net_bridge_port_group *pg,
584 				   union net_bridge_eht_addr *h_addr,
585 				   void *srcs,
586 				   u32 nsrcs,
587 				   size_t addr_size)
588 {
589 	bool changed = false;
590 
591 	switch (br_multicast_eht_host_filter_mode(pg, h_addr)) {
592 	case MCAST_INCLUDE:
593 		changed = __eht_del_set_entries(pg, h_addr, srcs, nsrcs,
594 						addr_size);
595 		break;
596 	case MCAST_EXCLUDE:
597 		__eht_create_set_entries(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
598 					 MCAST_EXCLUDE);
599 		break;
600 	}
601 
602 	return changed;
603 }
604 
605 /* flush_entries is true when changing mode */
__eht_inc_exc(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,void * srcs,u32 nsrcs,size_t addr_size,unsigned char filter_mode,bool to_report)606 static bool __eht_inc_exc(const struct net_bridge_mcast *brmctx,
607 			  struct net_bridge_port_group *pg,
608 			  union net_bridge_eht_addr *h_addr,
609 			  void *srcs,
610 			  u32 nsrcs,
611 			  size_t addr_size,
612 			  unsigned char filter_mode,
613 			  bool to_report)
614 {
615 	bool changed = false, flush_entries = to_report;
616 	union net_bridge_eht_addr eht_src_addr;
617 
618 	if (br_multicast_eht_host_filter_mode(pg, h_addr) != filter_mode)
619 		flush_entries = true;
620 
621 	memset(&eht_src_addr, 0, sizeof(eht_src_addr));
622 	/* if we're changing mode del host and its entries */
623 	if (flush_entries)
624 		br_multicast_del_eht_host(pg, h_addr);
625 	__eht_create_set_entries(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
626 				 filter_mode);
627 	/* we can be missing sets only if we've deleted some entries */
628 	if (flush_entries) {
629 		struct net_bridge_group_eht_set *eht_set;
630 		struct net_bridge_group_src *src_ent;
631 		struct hlist_node *tmp;
632 
633 		hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
634 			br_multicast_ip_src_to_eht_addr(&src_ent->addr,
635 							&eht_src_addr);
636 			if (!br_multicast_eht_set_lookup(pg, &eht_src_addr)) {
637 				br_multicast_del_group_src(src_ent, true);
638 				changed = true;
639 				continue;
640 			}
641 			/* this is an optimization for TO_INCLUDE where we lower
642 			 * the set's timeout to LMQT to catch timeout hosts:
643 			 * - host A (timing out): set entries X, Y
644 			 * - host B: set entry Z (new from current TO_INCLUDE)
645 			 *           sends BLOCK Z after LMQT but host A's EHT
646 			 *           entries still exist (unless lowered to LMQT
647 			 *           so they can timeout with the S,Gs)
648 			 * => we wait another LMQT, when we can just delete the
649 			 *    group immediately
650 			 */
651 			if (!(src_ent->flags & BR_SGRP_F_SEND) ||
652 			    filter_mode != MCAST_INCLUDE ||
653 			    !to_report)
654 				continue;
655 			eht_set = br_multicast_eht_set_lookup(pg,
656 							      &eht_src_addr);
657 			if (!eht_set)
658 				continue;
659 			mod_timer(&eht_set->timer, jiffies + br_multicast_lmqt(brmctx));
660 		}
661 	}
662 
663 	return changed;
664 }
665 
br_multicast_eht_inc(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,void * srcs,u32 nsrcs,size_t addr_size,bool to_report)666 static bool br_multicast_eht_inc(const struct net_bridge_mcast *brmctx,
667 				 struct net_bridge_port_group *pg,
668 				 union net_bridge_eht_addr *h_addr,
669 				 void *srcs,
670 				 u32 nsrcs,
671 				 size_t addr_size,
672 				 bool to_report)
673 {
674 	bool changed;
675 
676 	changed = __eht_inc_exc(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
677 				MCAST_INCLUDE, to_report);
678 	br_eht_convert_host_filter_mode(brmctx, pg, h_addr, MCAST_INCLUDE);
679 
680 	return changed;
681 }
682 
br_multicast_eht_exc(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,void * srcs,u32 nsrcs,size_t addr_size,bool to_report)683 static bool br_multicast_eht_exc(const struct net_bridge_mcast *brmctx,
684 				 struct net_bridge_port_group *pg,
685 				 union net_bridge_eht_addr *h_addr,
686 				 void *srcs,
687 				 u32 nsrcs,
688 				 size_t addr_size,
689 				 bool to_report)
690 {
691 	bool changed;
692 
693 	changed = __eht_inc_exc(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
694 				MCAST_EXCLUDE, to_report);
695 	br_eht_convert_host_filter_mode(brmctx, pg, h_addr, MCAST_EXCLUDE);
696 
697 	return changed;
698 }
699 
__eht_ip4_handle(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,void * srcs,u32 nsrcs,int grec_type)700 static bool __eht_ip4_handle(const struct net_bridge_mcast *brmctx,
701 			     struct net_bridge_port_group *pg,
702 			     union net_bridge_eht_addr *h_addr,
703 			     void *srcs,
704 			     u32 nsrcs,
705 			     int grec_type)
706 {
707 	bool changed = false, to_report = false;
708 
709 	switch (grec_type) {
710 	case IGMPV3_ALLOW_NEW_SOURCES:
711 		br_multicast_eht_allow(brmctx, pg, h_addr, srcs, nsrcs,
712 				       sizeof(__be32));
713 		break;
714 	case IGMPV3_BLOCK_OLD_SOURCES:
715 		changed = br_multicast_eht_block(brmctx, pg, h_addr, srcs, nsrcs,
716 						 sizeof(__be32));
717 		break;
718 	case IGMPV3_CHANGE_TO_INCLUDE:
719 		to_report = true;
720 		fallthrough;
721 	case IGMPV3_MODE_IS_INCLUDE:
722 		changed = br_multicast_eht_inc(brmctx, pg, h_addr, srcs, nsrcs,
723 					       sizeof(__be32), to_report);
724 		break;
725 	case IGMPV3_CHANGE_TO_EXCLUDE:
726 		to_report = true;
727 		fallthrough;
728 	case IGMPV3_MODE_IS_EXCLUDE:
729 		changed = br_multicast_eht_exc(brmctx, pg, h_addr, srcs, nsrcs,
730 					       sizeof(__be32), to_report);
731 		break;
732 	}
733 
734 	return changed;
735 }
736 
737 #if IS_ENABLED(CONFIG_IPV6)
__eht_ip6_handle(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,void * srcs,u32 nsrcs,int grec_type)738 static bool __eht_ip6_handle(const struct net_bridge_mcast *brmctx,
739 			     struct net_bridge_port_group *pg,
740 			     union net_bridge_eht_addr *h_addr,
741 			     void *srcs,
742 			     u32 nsrcs,
743 			     int grec_type)
744 {
745 	bool changed = false, to_report = false;
746 
747 	switch (grec_type) {
748 	case MLD2_ALLOW_NEW_SOURCES:
749 		br_multicast_eht_allow(brmctx, pg, h_addr, srcs, nsrcs,
750 				       sizeof(struct in6_addr));
751 		break;
752 	case MLD2_BLOCK_OLD_SOURCES:
753 		changed = br_multicast_eht_block(brmctx, pg, h_addr, srcs, nsrcs,
754 						 sizeof(struct in6_addr));
755 		break;
756 	case MLD2_CHANGE_TO_INCLUDE:
757 		to_report = true;
758 		fallthrough;
759 	case MLD2_MODE_IS_INCLUDE:
760 		changed = br_multicast_eht_inc(brmctx, pg, h_addr, srcs, nsrcs,
761 					       sizeof(struct in6_addr),
762 					       to_report);
763 		break;
764 	case MLD2_CHANGE_TO_EXCLUDE:
765 		to_report = true;
766 		fallthrough;
767 	case MLD2_MODE_IS_EXCLUDE:
768 		changed = br_multicast_eht_exc(brmctx, pg, h_addr, srcs, nsrcs,
769 					       sizeof(struct in6_addr),
770 					       to_report);
771 		break;
772 	}
773 
774 	return changed;
775 }
776 #endif
777 
778 /* true means an entry was deleted */
br_multicast_eht_handle(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)779 bool br_multicast_eht_handle(const struct net_bridge_mcast *brmctx,
780 			     struct net_bridge_port_group *pg,
781 			     void *h_addr,
782 			     void *srcs,
783 			     u32 nsrcs,
784 			     size_t addr_size,
785 			     int grec_type)
786 {
787 	bool eht_enabled = !!(pg->key.port->flags & BR_MULTICAST_FAST_LEAVE);
788 	union net_bridge_eht_addr eht_host_addr;
789 	bool changed = false;
790 
791 	if (!eht_enabled)
792 		goto out;
793 
794 	memset(&eht_host_addr, 0, sizeof(eht_host_addr));
795 	memcpy(&eht_host_addr, h_addr, addr_size);
796 	if (addr_size == sizeof(__be32))
797 		changed = __eht_ip4_handle(brmctx, pg, &eht_host_addr, srcs,
798 					   nsrcs, grec_type);
799 #if IS_ENABLED(CONFIG_IPV6)
800 	else
801 		changed = __eht_ip6_handle(brmctx, pg, &eht_host_addr, srcs,
802 					   nsrcs, grec_type);
803 #endif
804 
805 out:
806 	return changed;
807 }
808 
br_multicast_eht_set_hosts_limit(struct net_bridge_port * p,u32 eht_hosts_limit)809 int br_multicast_eht_set_hosts_limit(struct net_bridge_port *p,
810 				     u32 eht_hosts_limit)
811 {
812 	struct net_bridge *br = p->br;
813 
814 	if (!eht_hosts_limit)
815 		return -EINVAL;
816 
817 	spin_lock_bh(&br->multicast_lock);
818 	p->multicast_eht_hosts_limit = eht_hosts_limit;
819 	spin_unlock_bh(&br->multicast_lock);
820 
821 	return 0;
822 }
823