xref: /linux/drivers/net/amt.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (c) 2021 Taehee Yoo <ap420073@gmail.com> */
3 
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 
6 #include <linux/module.h>
7 #include <linux/skbuff.h>
8 #include <linux/udp.h>
9 #include <linux/jhash.h>
10 #include <linux/if_tunnel.h>
11 #include <linux/net.h>
12 #include <linux/igmp.h>
13 #include <linux/workqueue.h>
14 #include <net/pkt_sched.h>
15 #include <net/net_namespace.h>
16 #include <net/ip.h>
17 #include <net/udp.h>
18 #include <net/udp_tunnel.h>
19 #include <net/icmp.h>
20 #include <net/mld.h>
21 #include <net/amt.h>
22 #include <uapi/linux/amt.h>
23 #include <linux/security.h>
24 #include <net/gro_cells.h>
25 #include <net/ipv6.h>
26 #include <net/if_inet6.h>
27 #include <net/ndisc.h>
28 #include <net/addrconf.h>
29 #include <net/ip6_route.h>
30 #include <net/inet_common.h>
31 #include <net/ip6_checksum.h>
32 
33 static struct workqueue_struct *amt_wq;
34 
35 static HLIST_HEAD(source_gc_list);
36 /* Lock for source_gc_list */
37 static spinlock_t source_gc_lock;
38 static struct delayed_work source_gc_wq;
39 static char *status_str[] = {
40 	"AMT_STATUS_INIT",
41 	"AMT_STATUS_SENT_DISCOVERY",
42 	"AMT_STATUS_RECEIVED_DISCOVERY",
43 	"AMT_STATUS_SENT_ADVERTISEMENT",
44 	"AMT_STATUS_RECEIVED_ADVERTISEMENT",
45 	"AMT_STATUS_SENT_REQUEST",
46 	"AMT_STATUS_RECEIVED_REQUEST",
47 	"AMT_STATUS_SENT_QUERY",
48 	"AMT_STATUS_RECEIVED_QUERY",
49 	"AMT_STATUS_SENT_UPDATE",
50 	"AMT_STATUS_RECEIVED_UPDATE",
51 };
52 
53 static char *type_str[] = {
54 	"", /* Type 0 is not defined */
55 	"AMT_MSG_DISCOVERY",
56 	"AMT_MSG_ADVERTISEMENT",
57 	"AMT_MSG_REQUEST",
58 	"AMT_MSG_MEMBERSHIP_QUERY",
59 	"AMT_MSG_MEMBERSHIP_UPDATE",
60 	"AMT_MSG_MULTICAST_DATA",
61 	"AMT_MSG_TEARDOWN",
62 };
63 
64 static char *action_str[] = {
65 	"AMT_ACT_GMI",
66 	"AMT_ACT_GMI_ZERO",
67 	"AMT_ACT_GT",
68 	"AMT_ACT_STATUS_FWD_NEW",
69 	"AMT_ACT_STATUS_D_FWD_NEW",
70 	"AMT_ACT_STATUS_NONE_NEW",
71 };
72 
73 static struct igmpv3_grec igmpv3_zero_grec;
74 
75 #if IS_ENABLED(CONFIG_IPV6)
76 #define MLD2_ALL_NODE_INIT { { { 0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01 } } }
77 static struct in6_addr mld2_all_node = MLD2_ALL_NODE_INIT;
78 static struct mld2_grec mldv2_zero_grec;
79 #endif
80 
amt_skb_cb(struct sk_buff * skb)81 static struct amt_skb_cb *amt_skb_cb(struct sk_buff *skb)
82 {
83 	BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct tc_skb_cb) >
84 		     sizeof_field(struct sk_buff, cb));
85 
86 	return (struct amt_skb_cb *)((void *)skb->cb +
87 		sizeof(struct tc_skb_cb));
88 }
89 
__amt_source_gc_work(void)90 static void __amt_source_gc_work(void)
91 {
92 	struct amt_source_node *snode;
93 	struct hlist_head gc_list;
94 	struct hlist_node *t;
95 
96 	spin_lock_bh(&source_gc_lock);
97 	hlist_move_list(&source_gc_list, &gc_list);
98 	spin_unlock_bh(&source_gc_lock);
99 
100 	hlist_for_each_entry_safe(snode, t, &gc_list, node) {
101 		hlist_del_rcu(&snode->node);
102 		kfree_rcu(snode, rcu);
103 	}
104 }
105 
amt_source_gc_work(struct work_struct * work)106 static void amt_source_gc_work(struct work_struct *work)
107 {
108 	__amt_source_gc_work();
109 
110 	spin_lock_bh(&source_gc_lock);
111 	mod_delayed_work(amt_wq, &source_gc_wq,
112 			 msecs_to_jiffies(AMT_GC_INTERVAL));
113 	spin_unlock_bh(&source_gc_lock);
114 }
115 
amt_addr_equal(union amt_addr * a,union amt_addr * b)116 static bool amt_addr_equal(union amt_addr *a, union amt_addr *b)
117 {
118 	return !memcmp(a, b, sizeof(union amt_addr));
119 }
120 
amt_source_hash(struct amt_tunnel_list * tunnel,union amt_addr * src)121 static u32 amt_source_hash(struct amt_tunnel_list *tunnel, union amt_addr *src)
122 {
123 	u32 hash = jhash(src, sizeof(*src), tunnel->amt->hash_seed);
124 
125 	return reciprocal_scale(hash, tunnel->amt->hash_buckets);
126 }
127 
amt_status_filter(struct amt_source_node * snode,enum amt_filter filter)128 static bool amt_status_filter(struct amt_source_node *snode,
129 			      enum amt_filter filter)
130 {
131 	bool rc = false;
132 
133 	switch (filter) {
134 	case AMT_FILTER_FWD:
135 		if (snode->status == AMT_SOURCE_STATUS_FWD &&
136 		    snode->flags == AMT_SOURCE_OLD)
137 			rc = true;
138 		break;
139 	case AMT_FILTER_D_FWD:
140 		if (snode->status == AMT_SOURCE_STATUS_D_FWD &&
141 		    snode->flags == AMT_SOURCE_OLD)
142 			rc = true;
143 		break;
144 	case AMT_FILTER_FWD_NEW:
145 		if (snode->status == AMT_SOURCE_STATUS_FWD &&
146 		    snode->flags == AMT_SOURCE_NEW)
147 			rc = true;
148 		break;
149 	case AMT_FILTER_D_FWD_NEW:
150 		if (snode->status == AMT_SOURCE_STATUS_D_FWD &&
151 		    snode->flags == AMT_SOURCE_NEW)
152 			rc = true;
153 		break;
154 	case AMT_FILTER_ALL:
155 		rc = true;
156 		break;
157 	case AMT_FILTER_NONE_NEW:
158 		if (snode->status == AMT_SOURCE_STATUS_NONE &&
159 		    snode->flags == AMT_SOURCE_NEW)
160 			rc = true;
161 		break;
162 	case AMT_FILTER_BOTH:
163 		if ((snode->status == AMT_SOURCE_STATUS_D_FWD ||
164 		     snode->status == AMT_SOURCE_STATUS_FWD) &&
165 		    snode->flags == AMT_SOURCE_OLD)
166 			rc = true;
167 		break;
168 	case AMT_FILTER_BOTH_NEW:
169 		if ((snode->status == AMT_SOURCE_STATUS_D_FWD ||
170 		     snode->status == AMT_SOURCE_STATUS_FWD) &&
171 		    snode->flags == AMT_SOURCE_NEW)
172 			rc = true;
173 		break;
174 	default:
175 		WARN_ON_ONCE(1);
176 		break;
177 	}
178 
179 	return rc;
180 }
181 
amt_lookup_src(struct amt_tunnel_list * tunnel,struct amt_group_node * gnode,enum amt_filter filter,union amt_addr * src)182 static struct amt_source_node *amt_lookup_src(struct amt_tunnel_list *tunnel,
183 					      struct amt_group_node *gnode,
184 					      enum amt_filter filter,
185 					      union amt_addr *src)
186 {
187 	u32 hash = amt_source_hash(tunnel, src);
188 	struct amt_source_node *snode;
189 
190 	hlist_for_each_entry_rcu(snode, &gnode->sources[hash], node)
191 		if (amt_status_filter(snode, filter) &&
192 		    amt_addr_equal(&snode->source_addr, src))
193 			return snode;
194 
195 	return NULL;
196 }
197 
amt_group_hash(struct amt_tunnel_list * tunnel,union amt_addr * group)198 static u32 amt_group_hash(struct amt_tunnel_list *tunnel, union amt_addr *group)
199 {
200 	u32 hash = jhash(group, sizeof(*group), tunnel->amt->hash_seed);
201 
202 	return reciprocal_scale(hash, tunnel->amt->hash_buckets);
203 }
204 
amt_lookup_group(struct amt_tunnel_list * tunnel,union amt_addr * group,union amt_addr * host,bool v6)205 static struct amt_group_node *amt_lookup_group(struct amt_tunnel_list *tunnel,
206 					       union amt_addr *group,
207 					       union amt_addr *host,
208 					       bool v6)
209 {
210 	u32 hash = amt_group_hash(tunnel, group);
211 	struct amt_group_node *gnode;
212 
213 	hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash], node) {
214 		if (amt_addr_equal(&gnode->group_addr, group) &&
215 		    amt_addr_equal(&gnode->host_addr, host) &&
216 		    gnode->v6 == v6)
217 			return gnode;
218 	}
219 
220 	return NULL;
221 }
222 
amt_destroy_source(struct amt_source_node * snode)223 static void amt_destroy_source(struct amt_source_node *snode)
224 {
225 	struct amt_group_node *gnode = snode->gnode;
226 	struct amt_tunnel_list *tunnel;
227 
228 	tunnel = gnode->tunnel_list;
229 
230 	if (!gnode->v6) {
231 		netdev_dbg(snode->gnode->amt->dev,
232 			   "Delete source %pI4 from %pI4\n",
233 			   &snode->source_addr.ip4,
234 			   &gnode->group_addr.ip4);
235 #if IS_ENABLED(CONFIG_IPV6)
236 	} else {
237 		netdev_dbg(snode->gnode->amt->dev,
238 			   "Delete source %pI6 from %pI6\n",
239 			   &snode->source_addr.ip6,
240 			   &gnode->group_addr.ip6);
241 #endif
242 	}
243 
244 	cancel_delayed_work(&snode->source_timer);
245 	hlist_del_init_rcu(&snode->node);
246 	tunnel->nr_sources--;
247 	gnode->nr_sources--;
248 	spin_lock_bh(&source_gc_lock);
249 	hlist_add_head_rcu(&snode->node, &source_gc_list);
250 	spin_unlock_bh(&source_gc_lock);
251 }
252 
amt_del_group(struct amt_dev * amt,struct amt_group_node * gnode)253 static void amt_del_group(struct amt_dev *amt, struct amt_group_node *gnode)
254 {
255 	struct amt_source_node *snode;
256 	struct hlist_node *t;
257 	int i;
258 
259 	if (cancel_delayed_work(&gnode->group_timer))
260 		dev_put(amt->dev);
261 	hlist_del_rcu(&gnode->node);
262 	gnode->tunnel_list->nr_groups--;
263 
264 	if (!gnode->v6)
265 		netdev_dbg(amt->dev, "Leave group %pI4\n",
266 			   &gnode->group_addr.ip4);
267 #if IS_ENABLED(CONFIG_IPV6)
268 	else
269 		netdev_dbg(amt->dev, "Leave group %pI6\n",
270 			   &gnode->group_addr.ip6);
271 #endif
272 	for (i = 0; i < amt->hash_buckets; i++)
273 		hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node)
274 			amt_destroy_source(snode);
275 
276 	/* tunnel->lock was acquired outside of amt_del_group()
277 	 * But rcu_read_lock() was acquired too so It's safe.
278 	 */
279 	kfree_rcu(gnode, rcu);
280 }
281 
282 /* If a source timer expires with a router filter-mode for the group of
283  * INCLUDE, the router concludes that traffic from this particular
284  * source is no longer desired on the attached network, and deletes the
285  * associated source record.
286  */
amt_source_work(struct work_struct * work)287 static void amt_source_work(struct work_struct *work)
288 {
289 	struct amt_source_node *snode = container_of(to_delayed_work(work),
290 						     struct amt_source_node,
291 						     source_timer);
292 	struct amt_group_node *gnode = snode->gnode;
293 	struct amt_dev *amt = gnode->amt;
294 	struct amt_tunnel_list *tunnel;
295 
296 	tunnel = gnode->tunnel_list;
297 	spin_lock_bh(&tunnel->lock);
298 	rcu_read_lock();
299 	if (gnode->filter_mode == MCAST_INCLUDE) {
300 		amt_destroy_source(snode);
301 		if (!gnode->nr_sources)
302 			amt_del_group(amt, gnode);
303 	} else {
304 		/* When a router filter-mode for a group is EXCLUDE,
305 		 * source records are only deleted when the group timer expires
306 		 */
307 		snode->status = AMT_SOURCE_STATUS_D_FWD;
308 	}
309 	rcu_read_unlock();
310 	spin_unlock_bh(&tunnel->lock);
311 }
312 
amt_act_src(struct amt_tunnel_list * tunnel,struct amt_group_node * gnode,struct amt_source_node * snode,enum amt_act act)313 static void amt_act_src(struct amt_tunnel_list *tunnel,
314 			struct amt_group_node *gnode,
315 			struct amt_source_node *snode,
316 			enum amt_act act)
317 {
318 	struct amt_dev *amt = tunnel->amt;
319 
320 	switch (act) {
321 	case AMT_ACT_GMI:
322 		mod_delayed_work(amt_wq, &snode->source_timer,
323 				 msecs_to_jiffies(amt_gmi(amt)));
324 		break;
325 	case AMT_ACT_GMI_ZERO:
326 		cancel_delayed_work(&snode->source_timer);
327 		break;
328 	case AMT_ACT_GT:
329 		mod_delayed_work(amt_wq, &snode->source_timer,
330 				 gnode->group_timer.timer.expires);
331 		break;
332 	case AMT_ACT_STATUS_FWD_NEW:
333 		snode->status = AMT_SOURCE_STATUS_FWD;
334 		snode->flags = AMT_SOURCE_NEW;
335 		break;
336 	case AMT_ACT_STATUS_D_FWD_NEW:
337 		snode->status = AMT_SOURCE_STATUS_D_FWD;
338 		snode->flags = AMT_SOURCE_NEW;
339 		break;
340 	case AMT_ACT_STATUS_NONE_NEW:
341 		cancel_delayed_work(&snode->source_timer);
342 		snode->status = AMT_SOURCE_STATUS_NONE;
343 		snode->flags = AMT_SOURCE_NEW;
344 		break;
345 	default:
346 		WARN_ON_ONCE(1);
347 		return;
348 	}
349 
350 	if (!gnode->v6)
351 		netdev_dbg(amt->dev, "Source %pI4 from %pI4 Acted %s\n",
352 			   &snode->source_addr.ip4,
353 			   &gnode->group_addr.ip4,
354 			   action_str[act]);
355 #if IS_ENABLED(CONFIG_IPV6)
356 	else
357 		netdev_dbg(amt->dev, "Source %pI6 from %pI6 Acted %s\n",
358 			   &snode->source_addr.ip6,
359 			   &gnode->group_addr.ip6,
360 			   action_str[act]);
361 #endif
362 }
363 
amt_alloc_snode(struct amt_group_node * gnode,union amt_addr * src)364 static struct amt_source_node *amt_alloc_snode(struct amt_group_node *gnode,
365 					       union amt_addr *src)
366 {
367 	struct amt_source_node *snode;
368 
369 	snode = kzalloc(sizeof(*snode), GFP_ATOMIC);
370 	if (!snode)
371 		return NULL;
372 
373 	memcpy(&snode->source_addr, src, sizeof(union amt_addr));
374 	snode->gnode = gnode;
375 	snode->status = AMT_SOURCE_STATUS_NONE;
376 	snode->flags = AMT_SOURCE_NEW;
377 	INIT_HLIST_NODE(&snode->node);
378 	INIT_DELAYED_WORK(&snode->source_timer, amt_source_work);
379 
380 	return snode;
381 }
382 
383 /* RFC 3810 - 7.2.2.  Definition of Filter Timers
384  *
385  *  Router Mode          Filter Timer         Actions/Comments
386  *  -----------       -----------------       ----------------
387  *
388  *    INCLUDE             Not Used            All listeners in
389  *                                            INCLUDE mode.
390  *
391  *    EXCLUDE             Timer > 0           At least one listener
392  *                                            in EXCLUDE mode.
393  *
394  *    EXCLUDE             Timer == 0          No more listeners in
395  *                                            EXCLUDE mode for the
396  *                                            multicast address.
397  *                                            If the Requested List
398  *                                            is empty, delete
399  *                                            Multicast Address
400  *                                            Record.  If not, switch
401  *                                            to INCLUDE filter mode;
402  *                                            the sources in the
403  *                                            Requested List are
404  *                                            moved to the Include
405  *                                            List, and the Exclude
406  *                                            List is deleted.
407  */
amt_group_work(struct work_struct * work)408 static void amt_group_work(struct work_struct *work)
409 {
410 	struct amt_group_node *gnode = container_of(to_delayed_work(work),
411 						    struct amt_group_node,
412 						    group_timer);
413 	struct amt_tunnel_list *tunnel = gnode->tunnel_list;
414 	struct amt_dev *amt = gnode->amt;
415 	struct amt_source_node *snode;
416 	bool delete_group = true;
417 	struct hlist_node *t;
418 	int i, buckets;
419 
420 	buckets = amt->hash_buckets;
421 
422 	spin_lock_bh(&tunnel->lock);
423 	if (gnode->filter_mode == MCAST_INCLUDE) {
424 		/* Not Used */
425 		spin_unlock_bh(&tunnel->lock);
426 		goto out;
427 	}
428 
429 	rcu_read_lock();
430 	for (i = 0; i < buckets; i++) {
431 		hlist_for_each_entry_safe(snode, t,
432 					  &gnode->sources[i], node) {
433 			if (!delayed_work_pending(&snode->source_timer) ||
434 			    snode->status == AMT_SOURCE_STATUS_D_FWD) {
435 				amt_destroy_source(snode);
436 			} else {
437 				delete_group = false;
438 				snode->status = AMT_SOURCE_STATUS_FWD;
439 			}
440 		}
441 	}
442 	if (delete_group)
443 		amt_del_group(amt, gnode);
444 	else
445 		gnode->filter_mode = MCAST_INCLUDE;
446 	rcu_read_unlock();
447 	spin_unlock_bh(&tunnel->lock);
448 out:
449 	dev_put(amt->dev);
450 }
451 
452 /* Non-existent group is created as INCLUDE {empty}:
453  *
454  * RFC 3376 - 5.1. Action on Change of Interface State
455  *
456  * If no interface state existed for that multicast address before
457  * the change (i.e., the change consisted of creating a new
458  * per-interface record), or if no state exists after the change
459  * (i.e., the change consisted of deleting a per-interface record),
460  * then the "non-existent" state is considered to have a filter mode
461  * of INCLUDE and an empty source list.
462  */
amt_add_group(struct amt_dev * amt,struct amt_tunnel_list * tunnel,union amt_addr * group,union amt_addr * host,bool v6)463 static struct amt_group_node *amt_add_group(struct amt_dev *amt,
464 					    struct amt_tunnel_list *tunnel,
465 					    union amt_addr *group,
466 					    union amt_addr *host,
467 					    bool v6)
468 {
469 	struct amt_group_node *gnode;
470 	u32 hash;
471 	int i;
472 
473 	if (tunnel->nr_groups >= amt->max_groups)
474 		return ERR_PTR(-ENOSPC);
475 
476 	gnode = kzalloc(sizeof(*gnode) +
477 			(sizeof(struct hlist_head) * amt->hash_buckets),
478 			GFP_ATOMIC);
479 	if (unlikely(!gnode))
480 		return ERR_PTR(-ENOMEM);
481 
482 	gnode->amt = amt;
483 	gnode->group_addr = *group;
484 	gnode->host_addr = *host;
485 	gnode->v6 = v6;
486 	gnode->tunnel_list = tunnel;
487 	gnode->filter_mode = MCAST_INCLUDE;
488 	INIT_HLIST_NODE(&gnode->node);
489 	INIT_DELAYED_WORK(&gnode->group_timer, amt_group_work);
490 	for (i = 0; i < amt->hash_buckets; i++)
491 		INIT_HLIST_HEAD(&gnode->sources[i]);
492 
493 	hash = amt_group_hash(tunnel, group);
494 	hlist_add_head_rcu(&gnode->node, &tunnel->groups[hash]);
495 	tunnel->nr_groups++;
496 
497 	if (!gnode->v6)
498 		netdev_dbg(amt->dev, "Join group %pI4\n",
499 			   &gnode->group_addr.ip4);
500 #if IS_ENABLED(CONFIG_IPV6)
501 	else
502 		netdev_dbg(amt->dev, "Join group %pI6\n",
503 			   &gnode->group_addr.ip6);
504 #endif
505 
506 	return gnode;
507 }
508 
amt_build_igmp_gq(struct amt_dev * amt)509 static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
510 {
511 	u8 ra[AMT_IPHDR_OPTS] = { IPOPT_RA, 4, 0, 0 };
512 	int hlen = LL_RESERVED_SPACE(amt->dev);
513 	int tlen = amt->dev->needed_tailroom;
514 	struct igmpv3_query *ihv3;
515 	void *csum_start = NULL;
516 	__sum16 *csum = NULL;
517 	struct sk_buff *skb;
518 	struct ethhdr *eth;
519 	struct iphdr *iph;
520 	unsigned int len;
521 	int offset;
522 
523 	len = hlen + tlen + sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3);
524 	skb = netdev_alloc_skb_ip_align(amt->dev, len);
525 	if (!skb)
526 		return NULL;
527 
528 	skb_reserve(skb, hlen);
529 	skb_push(skb, sizeof(*eth));
530 	skb->protocol = htons(ETH_P_IP);
531 	skb_reset_mac_header(skb);
532 	skb->priority = TC_PRIO_CONTROL;
533 	skb_put(skb, sizeof(*iph));
534 	skb_put_data(skb, ra, sizeof(ra));
535 	skb_put(skb, sizeof(*ihv3));
536 	skb_pull(skb, sizeof(*eth));
537 	skb_reset_network_header(skb);
538 
539 	iph		= ip_hdr(skb);
540 	iph->version	= 4;
541 	iph->ihl	= (sizeof(struct iphdr) + AMT_IPHDR_OPTS) >> 2;
542 	iph->tos	= AMT_TOS;
543 	iph->tot_len	= htons(sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3));
544 	iph->frag_off	= htons(IP_DF);
545 	iph->ttl	= 1;
546 	iph->id		= 0;
547 	iph->protocol	= IPPROTO_IGMP;
548 	iph->daddr	= htonl(INADDR_ALLHOSTS_GROUP);
549 	iph->saddr	= htonl(INADDR_ANY);
550 	ip_send_check(iph);
551 
552 	eth = eth_hdr(skb);
553 	ether_addr_copy(eth->h_source, amt->dev->dev_addr);
554 	ip_eth_mc_map(htonl(INADDR_ALLHOSTS_GROUP), eth->h_dest);
555 	eth->h_proto = htons(ETH_P_IP);
556 
557 	ihv3		= skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
558 	skb_reset_transport_header(skb);
559 	ihv3->type	= IGMP_HOST_MEMBERSHIP_QUERY;
560 	ihv3->code	= 1;
561 	ihv3->group	= 0;
562 	ihv3->qqic	= amt->qi;
563 	ihv3->nsrcs	= 0;
564 	ihv3->resv	= 0;
565 	ihv3->suppress	= false;
566 	ihv3->qrv	= READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
567 	ihv3->csum	= 0;
568 	csum		= &ihv3->csum;
569 	csum_start	= (void *)ihv3;
570 	*csum		= ip_compute_csum(csum_start, sizeof(*ihv3));
571 	offset		= skb_transport_offset(skb);
572 	skb->csum	= skb_checksum(skb, offset, skb->len - offset, 0);
573 	skb->ip_summed	= CHECKSUM_NONE;
574 
575 	skb_push(skb, sizeof(*eth) + sizeof(*iph) + AMT_IPHDR_OPTS);
576 
577 	return skb;
578 }
579 
amt_update_gw_status(struct amt_dev * amt,enum amt_status status,bool validate)580 static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
581 				 bool validate)
582 {
583 	if (validate && amt->status >= status)
584 		return;
585 	netdev_dbg(amt->dev, "Update GW status %s -> %s",
586 		   status_str[amt->status], status_str[status]);
587 	WRITE_ONCE(amt->status, status);
588 }
589 
__amt_update_relay_status(struct amt_tunnel_list * tunnel,enum amt_status status,bool validate)590 static void __amt_update_relay_status(struct amt_tunnel_list *tunnel,
591 				      enum amt_status status,
592 				      bool validate)
593 {
594 	if (validate && tunnel->status >= status)
595 		return;
596 	netdev_dbg(tunnel->amt->dev,
597 		   "Update Tunnel(IP = %pI4, PORT = %u) status %s -> %s",
598 		   &tunnel->ip4, ntohs(tunnel->source_port),
599 		   status_str[tunnel->status], status_str[status]);
600 	tunnel->status = status;
601 }
602 
amt_update_relay_status(struct amt_tunnel_list * tunnel,enum amt_status status,bool validate)603 static void amt_update_relay_status(struct amt_tunnel_list *tunnel,
604 				    enum amt_status status, bool validate)
605 {
606 	spin_lock_bh(&tunnel->lock);
607 	__amt_update_relay_status(tunnel, status, validate);
608 	spin_unlock_bh(&tunnel->lock);
609 }
610 
amt_send_discovery(struct amt_dev * amt)611 static void amt_send_discovery(struct amt_dev *amt)
612 {
613 	struct amt_header_discovery *amtd;
614 	int hlen, tlen, offset;
615 	struct socket *sock;
616 	struct udphdr *udph;
617 	struct sk_buff *skb;
618 	struct iphdr *iph;
619 	struct rtable *rt;
620 	struct flowi4 fl4;
621 	u32 len;
622 	int err;
623 
624 	rcu_read_lock();
625 	sock = rcu_dereference(amt->sock);
626 	if (!sock)
627 		goto out;
628 
629 	if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
630 		goto out;
631 
632 	rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
633 				   amt->discovery_ip, amt->local_ip,
634 				   amt->gw_port, amt->relay_port,
635 				   IPPROTO_UDP, 0,
636 				   amt->stream_dev->ifindex);
637 	if (IS_ERR(rt)) {
638 		amt->dev->stats.tx_errors++;
639 		goto out;
640 	}
641 
642 	hlen = LL_RESERVED_SPACE(amt->dev);
643 	tlen = amt->dev->needed_tailroom;
644 	len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtd);
645 	skb = netdev_alloc_skb_ip_align(amt->dev, len);
646 	if (!skb) {
647 		ip_rt_put(rt);
648 		amt->dev->stats.tx_errors++;
649 		goto out;
650 	}
651 
652 	skb->priority = TC_PRIO_CONTROL;
653 	skb_dst_set(skb, &rt->dst);
654 
655 	len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtd);
656 	skb_reset_network_header(skb);
657 	skb_put(skb, len);
658 	amtd = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
659 	amtd->version	= 0;
660 	amtd->type	= AMT_MSG_DISCOVERY;
661 	amtd->reserved	= 0;
662 	amtd->nonce	= amt->nonce;
663 	skb_push(skb, sizeof(*udph));
664 	skb_reset_transport_header(skb);
665 	udph		= udp_hdr(skb);
666 	udph->source	= amt->gw_port;
667 	udph->dest	= amt->relay_port;
668 	udph->len	= htons(sizeof(*udph) + sizeof(*amtd));
669 	udph->check	= 0;
670 	offset = skb_transport_offset(skb);
671 	skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
672 	udph->check = csum_tcpudp_magic(amt->local_ip, amt->discovery_ip,
673 					sizeof(*udph) + sizeof(*amtd),
674 					IPPROTO_UDP, skb->csum);
675 
676 	skb_push(skb, sizeof(*iph));
677 	iph		= ip_hdr(skb);
678 	iph->version	= 4;
679 	iph->ihl	= (sizeof(struct iphdr)) >> 2;
680 	iph->tos	= AMT_TOS;
681 	iph->frag_off	= 0;
682 	iph->ttl	= ip4_dst_hoplimit(&rt->dst);
683 	iph->daddr	= amt->discovery_ip;
684 	iph->saddr	= amt->local_ip;
685 	iph->protocol	= IPPROTO_UDP;
686 	iph->tot_len	= htons(len);
687 
688 	skb->ip_summed = CHECKSUM_NONE;
689 	ip_select_ident(amt->net, skb, NULL);
690 	ip_send_check(iph);
691 	err = ip_local_out(amt->net, sock->sk, skb);
692 	if (unlikely(net_xmit_eval(err)))
693 		amt->dev->stats.tx_errors++;
694 
695 	amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true);
696 out:
697 	rcu_read_unlock();
698 }
699 
amt_send_request(struct amt_dev * amt,bool v6)700 static void amt_send_request(struct amt_dev *amt, bool v6)
701 {
702 	struct amt_header_request *amtrh;
703 	int hlen, tlen, offset;
704 	struct socket *sock;
705 	struct udphdr *udph;
706 	struct sk_buff *skb;
707 	struct iphdr *iph;
708 	struct rtable *rt;
709 	struct flowi4 fl4;
710 	u32 len;
711 	int err;
712 
713 	rcu_read_lock();
714 	sock = rcu_dereference(amt->sock);
715 	if (!sock)
716 		goto out;
717 
718 	if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
719 		goto out;
720 
721 	rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
722 				   amt->remote_ip, amt->local_ip,
723 				   amt->gw_port, amt->relay_port,
724 				   IPPROTO_UDP, 0,
725 				   amt->stream_dev->ifindex);
726 	if (IS_ERR(rt)) {
727 		amt->dev->stats.tx_errors++;
728 		goto out;
729 	}
730 
731 	hlen = LL_RESERVED_SPACE(amt->dev);
732 	tlen = amt->dev->needed_tailroom;
733 	len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh);
734 	skb = netdev_alloc_skb_ip_align(amt->dev, len);
735 	if (!skb) {
736 		ip_rt_put(rt);
737 		amt->dev->stats.tx_errors++;
738 		goto out;
739 	}
740 
741 	skb->priority = TC_PRIO_CONTROL;
742 	skb_dst_set(skb, &rt->dst);
743 
744 	len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh);
745 	skb_reset_network_header(skb);
746 	skb_put(skb, len);
747 	amtrh = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
748 	amtrh->version	 = 0;
749 	amtrh->type	 = AMT_MSG_REQUEST;
750 	amtrh->reserved1 = 0;
751 	amtrh->p	 = v6;
752 	amtrh->reserved2 = 0;
753 	amtrh->nonce	 = amt->nonce;
754 	skb_push(skb, sizeof(*udph));
755 	skb_reset_transport_header(skb);
756 	udph		= udp_hdr(skb);
757 	udph->source	= amt->gw_port;
758 	udph->dest	= amt->relay_port;
759 	udph->len	= htons(sizeof(*amtrh) + sizeof(*udph));
760 	udph->check	= 0;
761 	offset = skb_transport_offset(skb);
762 	skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
763 	udph->check = csum_tcpudp_magic(amt->local_ip, amt->remote_ip,
764 					sizeof(*udph) + sizeof(*amtrh),
765 					IPPROTO_UDP, skb->csum);
766 
767 	skb_push(skb, sizeof(*iph));
768 	iph		= ip_hdr(skb);
769 	iph->version	= 4;
770 	iph->ihl	= (sizeof(struct iphdr)) >> 2;
771 	iph->tos	= AMT_TOS;
772 	iph->frag_off	= 0;
773 	iph->ttl	= ip4_dst_hoplimit(&rt->dst);
774 	iph->daddr	= amt->remote_ip;
775 	iph->saddr	= amt->local_ip;
776 	iph->protocol	= IPPROTO_UDP;
777 	iph->tot_len	= htons(len);
778 
779 	skb->ip_summed = CHECKSUM_NONE;
780 	ip_select_ident(amt->net, skb, NULL);
781 	ip_send_check(iph);
782 	err = ip_local_out(amt->net, sock->sk, skb);
783 	if (unlikely(net_xmit_eval(err)))
784 		amt->dev->stats.tx_errors++;
785 
786 out:
787 	rcu_read_unlock();
788 }
789 
amt_send_igmp_gq(struct amt_dev * amt,struct amt_tunnel_list * tunnel)790 static void amt_send_igmp_gq(struct amt_dev *amt,
791 			     struct amt_tunnel_list *tunnel)
792 {
793 	struct sk_buff *skb;
794 
795 	skb = amt_build_igmp_gq(amt);
796 	if (!skb)
797 		return;
798 
799 	amt_skb_cb(skb)->tunnel = tunnel;
800 	dev_queue_xmit(skb);
801 }
802 
803 #if IS_ENABLED(CONFIG_IPV6)
amt_build_mld_gq(struct amt_dev * amt)804 static struct sk_buff *amt_build_mld_gq(struct amt_dev *amt)
805 {
806 	u8 ra[AMT_IP6HDR_OPTS] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT,
807 				   2, 0, 0, IPV6_TLV_PAD1, IPV6_TLV_PAD1 };
808 	int hlen = LL_RESERVED_SPACE(amt->dev);
809 	int tlen = amt->dev->needed_tailroom;
810 	struct mld2_query *mld2q;
811 	void *csum_start = NULL;
812 	struct ipv6hdr *ip6h;
813 	struct sk_buff *skb;
814 	struct ethhdr *eth;
815 	u32 len;
816 
817 	len = hlen + tlen + sizeof(*ip6h) + sizeof(ra) + sizeof(*mld2q);
818 	skb = netdev_alloc_skb_ip_align(amt->dev, len);
819 	if (!skb)
820 		return NULL;
821 
822 	skb_reserve(skb, hlen);
823 	skb_push(skb, sizeof(*eth));
824 	skb_reset_mac_header(skb);
825 	eth = eth_hdr(skb);
826 	skb->priority = TC_PRIO_CONTROL;
827 	skb->protocol = htons(ETH_P_IPV6);
828 	skb_put_zero(skb, sizeof(*ip6h));
829 	skb_put_data(skb, ra, sizeof(ra));
830 	skb_put_zero(skb, sizeof(*mld2q));
831 	skb_pull(skb, sizeof(*eth));
832 	skb_reset_network_header(skb);
833 	ip6h			= ipv6_hdr(skb);
834 	ip6h->payload_len	= htons(sizeof(ra) + sizeof(*mld2q));
835 	ip6h->nexthdr		= NEXTHDR_HOP;
836 	ip6h->hop_limit		= 1;
837 	ip6h->daddr		= mld2_all_node;
838 	ip6_flow_hdr(ip6h, 0, 0);
839 
840 	if (ipv6_dev_get_saddr(amt->net, amt->dev, &ip6h->daddr, 0,
841 			       &ip6h->saddr)) {
842 		amt->dev->stats.tx_errors++;
843 		kfree_skb(skb);
844 		return NULL;
845 	}
846 
847 	eth->h_proto = htons(ETH_P_IPV6);
848 	ether_addr_copy(eth->h_source, amt->dev->dev_addr);
849 	ipv6_eth_mc_map(&mld2_all_node, eth->h_dest);
850 
851 	skb_pull(skb, sizeof(*ip6h) + sizeof(ra));
852 	skb_reset_transport_header(skb);
853 	mld2q			= (struct mld2_query *)icmp6_hdr(skb);
854 	mld2q->mld2q_mrc	= htons(1);
855 	mld2q->mld2q_type	= ICMPV6_MGM_QUERY;
856 	mld2q->mld2q_code	= 0;
857 	mld2q->mld2q_cksum	= 0;
858 	mld2q->mld2q_resv1	= 0;
859 	mld2q->mld2q_resv2	= 0;
860 	mld2q->mld2q_suppress	= 0;
861 	mld2q->mld2q_qrv	= amt->qrv;
862 	mld2q->mld2q_nsrcs	= 0;
863 	mld2q->mld2q_qqic	= amt->qi;
864 	csum_start		= (void *)mld2q;
865 	mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
866 					     sizeof(*mld2q),
867 					     IPPROTO_ICMPV6,
868 					     csum_partial(csum_start,
869 							  sizeof(*mld2q), 0));
870 
871 	skb->ip_summed = CHECKSUM_NONE;
872 	skb_push(skb, sizeof(*eth) + sizeof(*ip6h) + sizeof(ra));
873 	return skb;
874 }
875 
amt_send_mld_gq(struct amt_dev * amt,struct amt_tunnel_list * tunnel)876 static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
877 {
878 	struct sk_buff *skb;
879 
880 	skb = amt_build_mld_gq(amt);
881 	if (!skb)
882 		return;
883 
884 	amt_skb_cb(skb)->tunnel = tunnel;
885 	dev_queue_xmit(skb);
886 }
887 #else
amt_send_mld_gq(struct amt_dev * amt,struct amt_tunnel_list * tunnel)888 static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
889 {
890 }
891 #endif
892 
amt_queue_event(struct amt_dev * amt,enum amt_event event,struct sk_buff * skb)893 static bool amt_queue_event(struct amt_dev *amt, enum amt_event event,
894 			    struct sk_buff *skb)
895 {
896 	int index;
897 
898 	spin_lock_bh(&amt->lock);
899 	if (amt->nr_events >= AMT_MAX_EVENTS) {
900 		spin_unlock_bh(&amt->lock);
901 		return 1;
902 	}
903 
904 	index = (amt->event_idx + amt->nr_events) % AMT_MAX_EVENTS;
905 	amt->events[index].event = event;
906 	amt->events[index].skb = skb;
907 	amt->nr_events++;
908 	amt->event_idx %= AMT_MAX_EVENTS;
909 	queue_work(amt_wq, &amt->event_wq);
910 	spin_unlock_bh(&amt->lock);
911 
912 	return 0;
913 }
914 
amt_secret_work(struct work_struct * work)915 static void amt_secret_work(struct work_struct *work)
916 {
917 	struct amt_dev *amt = container_of(to_delayed_work(work),
918 					   struct amt_dev,
919 					   secret_wq);
920 
921 	spin_lock_bh(&amt->lock);
922 	get_random_bytes(&amt->key, sizeof(siphash_key_t));
923 	spin_unlock_bh(&amt->lock);
924 	mod_delayed_work(amt_wq, &amt->secret_wq,
925 			 msecs_to_jiffies(AMT_SECRET_TIMEOUT));
926 }
927 
amt_event_send_discovery(struct amt_dev * amt)928 static void amt_event_send_discovery(struct amt_dev *amt)
929 {
930 	if (amt->status > AMT_STATUS_SENT_DISCOVERY)
931 		goto out;
932 	get_random_bytes(&amt->nonce, sizeof(__be32));
933 
934 	amt_send_discovery(amt);
935 out:
936 	mod_delayed_work(amt_wq, &amt->discovery_wq,
937 			 msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
938 }
939 
amt_discovery_work(struct work_struct * work)940 static void amt_discovery_work(struct work_struct *work)
941 {
942 	struct amt_dev *amt = container_of(to_delayed_work(work),
943 					   struct amt_dev,
944 					   discovery_wq);
945 
946 	if (amt_queue_event(amt, AMT_EVENT_SEND_DISCOVERY, NULL))
947 		mod_delayed_work(amt_wq, &amt->discovery_wq,
948 				 msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
949 }
950 
amt_event_send_request(struct amt_dev * amt)951 static void amt_event_send_request(struct amt_dev *amt)
952 {
953 	u32 exp;
954 
955 	if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT)
956 		goto out;
957 
958 	if (amt->req_cnt > AMT_MAX_REQ_COUNT) {
959 		netdev_dbg(amt->dev, "Gateway is not ready");
960 		amt->qi = AMT_INIT_REQ_TIMEOUT;
961 		WRITE_ONCE(amt->ready4, false);
962 		WRITE_ONCE(amt->ready6, false);
963 		amt->remote_ip = 0;
964 		amt_update_gw_status(amt, AMT_STATUS_INIT, false);
965 		amt->req_cnt = 0;
966 		amt->nonce = 0;
967 		goto out;
968 	}
969 
970 	if (!amt->req_cnt) {
971 		WRITE_ONCE(amt->ready4, false);
972 		WRITE_ONCE(amt->ready6, false);
973 		get_random_bytes(&amt->nonce, sizeof(__be32));
974 	}
975 
976 	amt_send_request(amt, false);
977 	amt_send_request(amt, true);
978 	amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true);
979 	amt->req_cnt++;
980 out:
981 	exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT);
982 	mod_delayed_work(amt_wq, &amt->req_wq, secs_to_jiffies(exp));
983 }
984 
amt_req_work(struct work_struct * work)985 static void amt_req_work(struct work_struct *work)
986 {
987 	struct amt_dev *amt = container_of(to_delayed_work(work),
988 					   struct amt_dev,
989 					   req_wq);
990 
991 	if (amt_queue_event(amt, AMT_EVENT_SEND_REQUEST, NULL))
992 		mod_delayed_work(amt_wq, &amt->req_wq,
993 				 msecs_to_jiffies(100));
994 }
995 
amt_send_membership_update(struct amt_dev * amt,struct sk_buff * skb,bool v6)996 static bool amt_send_membership_update(struct amt_dev *amt,
997 				       struct sk_buff *skb,
998 				       bool v6)
999 {
1000 	struct amt_header_membership_update *amtmu;
1001 	struct socket *sock;
1002 	struct iphdr *iph;
1003 	struct flowi4 fl4;
1004 	struct rtable *rt;
1005 	int err;
1006 
1007 	sock = rcu_dereference_bh(amt->sock);
1008 	if (!sock)
1009 		return true;
1010 
1011 	err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmu) +
1012 			   sizeof(*iph) + sizeof(struct udphdr));
1013 	if (err)
1014 		return true;
1015 
1016 	skb_reset_inner_headers(skb);
1017 	memset(&fl4, 0, sizeof(struct flowi4));
1018 	fl4.flowi4_oif         = amt->stream_dev->ifindex;
1019 	fl4.daddr              = amt->remote_ip;
1020 	fl4.saddr              = amt->local_ip;
1021 	fl4.flowi4_tos         = AMT_TOS;
1022 	fl4.flowi4_proto       = IPPROTO_UDP;
1023 	rt = ip_route_output_key(amt->net, &fl4);
1024 	if (IS_ERR(rt)) {
1025 		netdev_dbg(amt->dev, "no route to %pI4\n", &amt->remote_ip);
1026 		return true;
1027 	}
1028 
1029 	amtmu			= skb_push(skb, sizeof(*amtmu));
1030 	amtmu->version		= 0;
1031 	amtmu->type		= AMT_MSG_MEMBERSHIP_UPDATE;
1032 	amtmu->reserved		= 0;
1033 	amtmu->nonce		= amt->nonce;
1034 	amtmu->response_mac	= amt->mac;
1035 
1036 	if (!v6)
1037 		skb_set_inner_protocol(skb, htons(ETH_P_IP));
1038 	else
1039 		skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
1040 	udp_tunnel_xmit_skb(rt, sock->sk, skb,
1041 			    fl4.saddr,
1042 			    fl4.daddr,
1043 			    AMT_TOS,
1044 			    ip4_dst_hoplimit(&rt->dst),
1045 			    0,
1046 			    amt->gw_port,
1047 			    amt->relay_port,
1048 			    false,
1049 			    false,
1050 			    0);
1051 	amt_update_gw_status(amt, AMT_STATUS_SENT_UPDATE, true);
1052 	return false;
1053 }
1054 
amt_send_multicast_data(struct amt_dev * amt,const struct sk_buff * oskb,struct amt_tunnel_list * tunnel,bool v6)1055 static void amt_send_multicast_data(struct amt_dev *amt,
1056 				    const struct sk_buff *oskb,
1057 				    struct amt_tunnel_list *tunnel,
1058 				    bool v6)
1059 {
1060 	struct amt_header_mcast_data *amtmd;
1061 	struct socket *sock;
1062 	struct sk_buff *skb;
1063 	struct iphdr *iph;
1064 	struct flowi4 fl4;
1065 	struct rtable *rt;
1066 
1067 	sock = rcu_dereference_bh(amt->sock);
1068 	if (!sock)
1069 		return;
1070 
1071 	skb = skb_copy_expand(oskb, sizeof(*amtmd) + sizeof(*iph) +
1072 			      sizeof(struct udphdr), 0, GFP_ATOMIC);
1073 	if (!skb)
1074 		return;
1075 
1076 	skb_reset_inner_headers(skb);
1077 	memset(&fl4, 0, sizeof(struct flowi4));
1078 	fl4.flowi4_oif         = amt->stream_dev->ifindex;
1079 	fl4.daddr              = tunnel->ip4;
1080 	fl4.saddr              = amt->local_ip;
1081 	fl4.flowi4_proto       = IPPROTO_UDP;
1082 	rt = ip_route_output_key(amt->net, &fl4);
1083 	if (IS_ERR(rt)) {
1084 		netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
1085 		kfree_skb(skb);
1086 		return;
1087 	}
1088 
1089 	amtmd = skb_push(skb, sizeof(*amtmd));
1090 	amtmd->version = 0;
1091 	amtmd->reserved = 0;
1092 	amtmd->type = AMT_MSG_MULTICAST_DATA;
1093 
1094 	if (!v6)
1095 		skb_set_inner_protocol(skb, htons(ETH_P_IP));
1096 	else
1097 		skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
1098 	udp_tunnel_xmit_skb(rt, sock->sk, skb,
1099 			    fl4.saddr,
1100 			    fl4.daddr,
1101 			    AMT_TOS,
1102 			    ip4_dst_hoplimit(&rt->dst),
1103 			    0,
1104 			    amt->relay_port,
1105 			    tunnel->source_port,
1106 			    false,
1107 			    false,
1108 			    0);
1109 }
1110 
amt_send_membership_query(struct amt_dev * amt,struct sk_buff * skb,struct amt_tunnel_list * tunnel,bool v6)1111 static bool amt_send_membership_query(struct amt_dev *amt,
1112 				      struct sk_buff *skb,
1113 				      struct amt_tunnel_list *tunnel,
1114 				      bool v6)
1115 {
1116 	struct amt_header_membership_query *amtmq;
1117 	struct socket *sock;
1118 	struct rtable *rt;
1119 	struct flowi4 fl4;
1120 	int err;
1121 
1122 	sock = rcu_dereference_bh(amt->sock);
1123 	if (!sock)
1124 		return true;
1125 
1126 	err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmq) +
1127 			   sizeof(struct iphdr) + sizeof(struct udphdr));
1128 	if (err)
1129 		return true;
1130 
1131 	skb_reset_inner_headers(skb);
1132 	memset(&fl4, 0, sizeof(struct flowi4));
1133 	fl4.flowi4_oif         = amt->stream_dev->ifindex;
1134 	fl4.daddr              = tunnel->ip4;
1135 	fl4.saddr              = amt->local_ip;
1136 	fl4.flowi4_tos         = AMT_TOS;
1137 	fl4.flowi4_proto       = IPPROTO_UDP;
1138 	rt = ip_route_output_key(amt->net, &fl4);
1139 	if (IS_ERR(rt)) {
1140 		netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
1141 		return true;
1142 	}
1143 
1144 	amtmq		= skb_push(skb, sizeof(*amtmq));
1145 	amtmq->version	= 0;
1146 	amtmq->type	= AMT_MSG_MEMBERSHIP_QUERY;
1147 	amtmq->reserved = 0;
1148 	amtmq->l	= 0;
1149 	amtmq->g	= 0;
1150 	amtmq->nonce	= tunnel->nonce;
1151 	amtmq->response_mac = tunnel->mac;
1152 
1153 	if (!v6)
1154 		skb_set_inner_protocol(skb, htons(ETH_P_IP));
1155 	else
1156 		skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
1157 	udp_tunnel_xmit_skb(rt, sock->sk, skb,
1158 			    fl4.saddr,
1159 			    fl4.daddr,
1160 			    AMT_TOS,
1161 			    ip4_dst_hoplimit(&rt->dst),
1162 			    0,
1163 			    amt->relay_port,
1164 			    tunnel->source_port,
1165 			    false,
1166 			    false,
1167 			    0);
1168 	amt_update_relay_status(tunnel, AMT_STATUS_SENT_QUERY, true);
1169 	return false;
1170 }
1171 
amt_dev_xmit(struct sk_buff * skb,struct net_device * dev)1172 static netdev_tx_t amt_dev_xmit(struct sk_buff *skb, struct net_device *dev)
1173 {
1174 	struct amt_dev *amt = netdev_priv(dev);
1175 	struct amt_tunnel_list *tunnel;
1176 	struct amt_group_node *gnode;
1177 	union amt_addr group = {0,};
1178 #if IS_ENABLED(CONFIG_IPV6)
1179 	struct ipv6hdr *ip6h;
1180 	struct mld_msg *mld;
1181 #endif
1182 	bool report = false;
1183 	struct igmphdr *ih;
1184 	bool query = false;
1185 	struct iphdr *iph;
1186 	bool data = false;
1187 	bool v6 = false;
1188 	u32 hash;
1189 
1190 	iph = ip_hdr(skb);
1191 	if (iph->version == 4) {
1192 		if (!ipv4_is_multicast(iph->daddr))
1193 			goto free;
1194 
1195 		if (!ip_mc_check_igmp(skb)) {
1196 			ih = igmp_hdr(skb);
1197 			switch (ih->type) {
1198 			case IGMPV3_HOST_MEMBERSHIP_REPORT:
1199 			case IGMP_HOST_MEMBERSHIP_REPORT:
1200 				report = true;
1201 				break;
1202 			case IGMP_HOST_MEMBERSHIP_QUERY:
1203 				query = true;
1204 				break;
1205 			default:
1206 				goto free;
1207 			}
1208 		} else {
1209 			data = true;
1210 		}
1211 		v6 = false;
1212 		group.ip4 = iph->daddr;
1213 #if IS_ENABLED(CONFIG_IPV6)
1214 	} else if (iph->version == 6) {
1215 		ip6h = ipv6_hdr(skb);
1216 		if (!ipv6_addr_is_multicast(&ip6h->daddr))
1217 			goto free;
1218 
1219 		if (!ipv6_mc_check_mld(skb)) {
1220 			mld = (struct mld_msg *)skb_transport_header(skb);
1221 			switch (mld->mld_type) {
1222 			case ICMPV6_MGM_REPORT:
1223 			case ICMPV6_MLD2_REPORT:
1224 				report = true;
1225 				break;
1226 			case ICMPV6_MGM_QUERY:
1227 				query = true;
1228 				break;
1229 			default:
1230 				goto free;
1231 			}
1232 		} else {
1233 			data = true;
1234 		}
1235 		v6 = true;
1236 		group.ip6 = ip6h->daddr;
1237 #endif
1238 	} else {
1239 		dev->stats.tx_errors++;
1240 		goto free;
1241 	}
1242 
1243 	if (!pskb_may_pull(skb, sizeof(struct ethhdr)))
1244 		goto free;
1245 
1246 	skb_pull(skb, sizeof(struct ethhdr));
1247 
1248 	if (amt->mode == AMT_MODE_GATEWAY) {
1249 		/* Gateway only passes IGMP/MLD packets */
1250 		if (!report)
1251 			goto free;
1252 		if ((!v6 && !READ_ONCE(amt->ready4)) ||
1253 		    (v6 && !READ_ONCE(amt->ready6)))
1254 			goto free;
1255 		if (amt_send_membership_update(amt, skb,  v6))
1256 			goto free;
1257 		goto unlock;
1258 	} else if (amt->mode == AMT_MODE_RELAY) {
1259 		if (query) {
1260 			tunnel = amt_skb_cb(skb)->tunnel;
1261 			if (!tunnel) {
1262 				WARN_ON(1);
1263 				goto free;
1264 			}
1265 
1266 			/* Do not forward unexpected query */
1267 			if (amt_send_membership_query(amt, skb, tunnel, v6))
1268 				goto free;
1269 			goto unlock;
1270 		}
1271 
1272 		if (!data)
1273 			goto free;
1274 		list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
1275 			hash = amt_group_hash(tunnel, &group);
1276 			hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash],
1277 						 node) {
1278 				if (!v6) {
1279 					if (gnode->group_addr.ip4 == iph->daddr)
1280 						goto found;
1281 #if IS_ENABLED(CONFIG_IPV6)
1282 				} else {
1283 					if (ipv6_addr_equal(&gnode->group_addr.ip6,
1284 							    &ip6h->daddr))
1285 						goto found;
1286 #endif
1287 				}
1288 			}
1289 			continue;
1290 found:
1291 			amt_send_multicast_data(amt, skb, tunnel, v6);
1292 		}
1293 	}
1294 
1295 	dev_kfree_skb(skb);
1296 	return NETDEV_TX_OK;
1297 free:
1298 	dev_kfree_skb(skb);
1299 unlock:
1300 	dev->stats.tx_dropped++;
1301 	return NETDEV_TX_OK;
1302 }
1303 
amt_parse_type(struct sk_buff * skb)1304 static int amt_parse_type(struct sk_buff *skb)
1305 {
1306 	struct amt_header *amth;
1307 
1308 	if (!pskb_may_pull(skb, sizeof(struct udphdr) +
1309 			   sizeof(struct amt_header)))
1310 		return -1;
1311 
1312 	amth = (struct amt_header *)(udp_hdr(skb) + 1);
1313 
1314 	if (amth->version != 0)
1315 		return -1;
1316 
1317 	if (amth->type >= __AMT_MSG_MAX || !amth->type)
1318 		return -1;
1319 	return amth->type;
1320 }
1321 
amt_clear_groups(struct amt_tunnel_list * tunnel)1322 static void amt_clear_groups(struct amt_tunnel_list *tunnel)
1323 {
1324 	struct amt_dev *amt = tunnel->amt;
1325 	struct amt_group_node *gnode;
1326 	struct hlist_node *t;
1327 	int i;
1328 
1329 	spin_lock_bh(&tunnel->lock);
1330 	rcu_read_lock();
1331 	for (i = 0; i < amt->hash_buckets; i++)
1332 		hlist_for_each_entry_safe(gnode, t, &tunnel->groups[i], node)
1333 			amt_del_group(amt, gnode);
1334 	rcu_read_unlock();
1335 	spin_unlock_bh(&tunnel->lock);
1336 }
1337 
amt_tunnel_expire(struct work_struct * work)1338 static void amt_tunnel_expire(struct work_struct *work)
1339 {
1340 	struct amt_tunnel_list *tunnel = container_of(to_delayed_work(work),
1341 						      struct amt_tunnel_list,
1342 						      gc_wq);
1343 	struct amt_dev *amt = tunnel->amt;
1344 
1345 	spin_lock_bh(&amt->lock);
1346 	rcu_read_lock();
1347 	list_del_rcu(&tunnel->list);
1348 	amt->nr_tunnels--;
1349 	amt_clear_groups(tunnel);
1350 	rcu_read_unlock();
1351 	spin_unlock_bh(&amt->lock);
1352 	kfree_rcu(tunnel, rcu);
1353 }
1354 
amt_cleanup_srcs(struct amt_dev * amt,struct amt_tunnel_list * tunnel,struct amt_group_node * gnode)1355 static void amt_cleanup_srcs(struct amt_dev *amt,
1356 			     struct amt_tunnel_list *tunnel,
1357 			     struct amt_group_node *gnode)
1358 {
1359 	struct amt_source_node *snode;
1360 	struct hlist_node *t;
1361 	int i;
1362 
1363 	/* Delete old sources */
1364 	for (i = 0; i < amt->hash_buckets; i++) {
1365 		hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node) {
1366 			if (snode->flags == AMT_SOURCE_OLD)
1367 				amt_destroy_source(snode);
1368 		}
1369 	}
1370 
1371 	/* switch from new to old */
1372 	for (i = 0; i < amt->hash_buckets; i++)  {
1373 		hlist_for_each_entry_rcu(snode, &gnode->sources[i], node) {
1374 			snode->flags = AMT_SOURCE_OLD;
1375 			if (!gnode->v6)
1376 				netdev_dbg(snode->gnode->amt->dev,
1377 					   "Add source as OLD %pI4 from %pI4\n",
1378 					   &snode->source_addr.ip4,
1379 					   &gnode->group_addr.ip4);
1380 #if IS_ENABLED(CONFIG_IPV6)
1381 			else
1382 				netdev_dbg(snode->gnode->amt->dev,
1383 					   "Add source as OLD %pI6 from %pI6\n",
1384 					   &snode->source_addr.ip6,
1385 					   &gnode->group_addr.ip6);
1386 #endif
1387 		}
1388 	}
1389 }
1390 
amt_add_srcs(struct amt_dev * amt,struct amt_tunnel_list * tunnel,struct amt_group_node * gnode,void * grec,bool v6)1391 static void amt_add_srcs(struct amt_dev *amt, struct amt_tunnel_list *tunnel,
1392 			 struct amt_group_node *gnode, void *grec,
1393 			 bool v6)
1394 {
1395 	struct igmpv3_grec *igmp_grec;
1396 	struct amt_source_node *snode;
1397 #if IS_ENABLED(CONFIG_IPV6)
1398 	struct mld2_grec *mld_grec;
1399 #endif
1400 	union amt_addr src = {0,};
1401 	u16 nsrcs;
1402 	u32 hash;
1403 	int i;
1404 
1405 	if (!v6) {
1406 		igmp_grec = grec;
1407 		nsrcs = ntohs(igmp_grec->grec_nsrcs);
1408 	} else {
1409 #if IS_ENABLED(CONFIG_IPV6)
1410 		mld_grec = grec;
1411 		nsrcs = ntohs(mld_grec->grec_nsrcs);
1412 #else
1413 	return;
1414 #endif
1415 	}
1416 	for (i = 0; i < nsrcs; i++) {
1417 		if (tunnel->nr_sources >= amt->max_sources)
1418 			return;
1419 		if (!v6)
1420 			src.ip4 = igmp_grec->grec_src[i];
1421 #if IS_ENABLED(CONFIG_IPV6)
1422 		else
1423 			memcpy(&src.ip6, &mld_grec->grec_src[i],
1424 			       sizeof(struct in6_addr));
1425 #endif
1426 		if (amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL, &src))
1427 			continue;
1428 
1429 		snode = amt_alloc_snode(gnode, &src);
1430 		if (snode) {
1431 			hash = amt_source_hash(tunnel, &snode->source_addr);
1432 			hlist_add_head_rcu(&snode->node, &gnode->sources[hash]);
1433 			tunnel->nr_sources++;
1434 			gnode->nr_sources++;
1435 
1436 			if (!gnode->v6)
1437 				netdev_dbg(snode->gnode->amt->dev,
1438 					   "Add source as NEW %pI4 from %pI4\n",
1439 					   &snode->source_addr.ip4,
1440 					   &gnode->group_addr.ip4);
1441 #if IS_ENABLED(CONFIG_IPV6)
1442 			else
1443 				netdev_dbg(snode->gnode->amt->dev,
1444 					   "Add source as NEW %pI6 from %pI6\n",
1445 					   &snode->source_addr.ip6,
1446 					   &gnode->group_addr.ip6);
1447 #endif
1448 		}
1449 	}
1450 }
1451 
1452 /* Router State   Report Rec'd New Router State
1453  * ------------   ------------ ----------------
1454  * EXCLUDE (X,Y)  IS_IN (A)    EXCLUDE (X+A,Y-A)
1455  *
1456  * -----------+-----------+-----------+
1457  *            |    OLD    |    NEW    |
1458  * -----------+-----------+-----------+
1459  *    FWD     |     X     |    X+A    |
1460  * -----------+-----------+-----------+
1461  *    D_FWD   |     Y     |    Y-A    |
1462  * -----------+-----------+-----------+
1463  *    NONE    |           |     A     |
1464  * -----------+-----------+-----------+
1465  *
1466  * a) Received sources are NONE/NEW
1467  * b) All NONE will be deleted by amt_cleanup_srcs().
1468  * c) All OLD will be deleted by amt_cleanup_srcs().
1469  * d) After delete, NEW source will be switched to OLD.
1470  */
amt_lookup_act_srcs(struct amt_tunnel_list * tunnel,struct amt_group_node * gnode,void * grec,enum amt_ops ops,enum amt_filter filter,enum amt_act act,bool v6)1471 static void amt_lookup_act_srcs(struct amt_tunnel_list *tunnel,
1472 				struct amt_group_node *gnode,
1473 				void *grec,
1474 				enum amt_ops ops,
1475 				enum amt_filter filter,
1476 				enum amt_act act,
1477 				bool v6)
1478 {
1479 	struct amt_dev *amt = tunnel->amt;
1480 	struct amt_source_node *snode;
1481 	struct igmpv3_grec *igmp_grec;
1482 #if IS_ENABLED(CONFIG_IPV6)
1483 	struct mld2_grec *mld_grec;
1484 #endif
1485 	union amt_addr src = {0,};
1486 	struct hlist_node *t;
1487 	u16 nsrcs;
1488 	int i, j;
1489 
1490 	if (!v6) {
1491 		igmp_grec = grec;
1492 		nsrcs = ntohs(igmp_grec->grec_nsrcs);
1493 	} else {
1494 #if IS_ENABLED(CONFIG_IPV6)
1495 		mld_grec = grec;
1496 		nsrcs = ntohs(mld_grec->grec_nsrcs);
1497 #else
1498 	return;
1499 #endif
1500 	}
1501 
1502 	memset(&src, 0, sizeof(union amt_addr));
1503 	switch (ops) {
1504 	case AMT_OPS_INT:
1505 		/* A*B */
1506 		for (i = 0; i < nsrcs; i++) {
1507 			if (!v6)
1508 				src.ip4 = igmp_grec->grec_src[i];
1509 #if IS_ENABLED(CONFIG_IPV6)
1510 			else
1511 				memcpy(&src.ip6, &mld_grec->grec_src[i],
1512 				       sizeof(struct in6_addr));
1513 #endif
1514 			snode = amt_lookup_src(tunnel, gnode, filter, &src);
1515 			if (!snode)
1516 				continue;
1517 			amt_act_src(tunnel, gnode, snode, act);
1518 		}
1519 		break;
1520 	case AMT_OPS_UNI:
1521 		/* A+B */
1522 		for (i = 0; i < amt->hash_buckets; i++) {
1523 			hlist_for_each_entry_safe(snode, t, &gnode->sources[i],
1524 						  node) {
1525 				if (amt_status_filter(snode, filter))
1526 					amt_act_src(tunnel, gnode, snode, act);
1527 			}
1528 		}
1529 		for (i = 0; i < nsrcs; i++) {
1530 			if (!v6)
1531 				src.ip4 = igmp_grec->grec_src[i];
1532 #if IS_ENABLED(CONFIG_IPV6)
1533 			else
1534 				memcpy(&src.ip6, &mld_grec->grec_src[i],
1535 				       sizeof(struct in6_addr));
1536 #endif
1537 			snode = amt_lookup_src(tunnel, gnode, filter, &src);
1538 			if (!snode)
1539 				continue;
1540 			amt_act_src(tunnel, gnode, snode, act);
1541 		}
1542 		break;
1543 	case AMT_OPS_SUB:
1544 		/* A-B */
1545 		for (i = 0; i < amt->hash_buckets; i++) {
1546 			hlist_for_each_entry_safe(snode, t, &gnode->sources[i],
1547 						  node) {
1548 				if (!amt_status_filter(snode, filter))
1549 					continue;
1550 				for (j = 0; j < nsrcs; j++) {
1551 					if (!v6)
1552 						src.ip4 = igmp_grec->grec_src[j];
1553 #if IS_ENABLED(CONFIG_IPV6)
1554 					else
1555 						memcpy(&src.ip6,
1556 						       &mld_grec->grec_src[j],
1557 						       sizeof(struct in6_addr));
1558 #endif
1559 					if (amt_addr_equal(&snode->source_addr,
1560 							   &src))
1561 						goto out_sub;
1562 				}
1563 				amt_act_src(tunnel, gnode, snode, act);
1564 				continue;
1565 out_sub:;
1566 			}
1567 		}
1568 		break;
1569 	case AMT_OPS_SUB_REV:
1570 		/* B-A */
1571 		for (i = 0; i < nsrcs; i++) {
1572 			if (!v6)
1573 				src.ip4 = igmp_grec->grec_src[i];
1574 #if IS_ENABLED(CONFIG_IPV6)
1575 			else
1576 				memcpy(&src.ip6, &mld_grec->grec_src[i],
1577 				       sizeof(struct in6_addr));
1578 #endif
1579 			snode = amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL,
1580 					       &src);
1581 			if (!snode) {
1582 				snode = amt_lookup_src(tunnel, gnode,
1583 						       filter, &src);
1584 				if (snode)
1585 					amt_act_src(tunnel, gnode, snode, act);
1586 			}
1587 		}
1588 		break;
1589 	default:
1590 		netdev_dbg(amt->dev, "Invalid type\n");
1591 		return;
1592 	}
1593 }
1594 
amt_mcast_is_in_handler(struct amt_dev * amt,struct amt_tunnel_list * tunnel,struct amt_group_node * gnode,void * grec,void * zero_grec,bool v6)1595 static void amt_mcast_is_in_handler(struct amt_dev *amt,
1596 				    struct amt_tunnel_list *tunnel,
1597 				    struct amt_group_node *gnode,
1598 				    void *grec, void *zero_grec, bool v6)
1599 {
1600 	if (gnode->filter_mode == MCAST_INCLUDE) {
1601 /* Router State   Report Rec'd New Router State        Actions
1602  * ------------   ------------ ----------------        -------
1603  * INCLUDE (A)    IS_IN (B)    INCLUDE (A+B)           (B)=GMI
1604  */
1605 		/* Update IS_IN (B) as FWD/NEW */
1606 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1607 				    AMT_FILTER_NONE_NEW,
1608 				    AMT_ACT_STATUS_FWD_NEW,
1609 				    v6);
1610 		/* Update INCLUDE (A) as NEW */
1611 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1612 				    AMT_FILTER_FWD,
1613 				    AMT_ACT_STATUS_FWD_NEW,
1614 				    v6);
1615 		/* (B)=GMI */
1616 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1617 				    AMT_FILTER_FWD_NEW,
1618 				    AMT_ACT_GMI,
1619 				    v6);
1620 	} else {
1621 /* State        Actions
1622  * ------------   ------------ ----------------        -------
1623  * EXCLUDE (X,Y)  IS_IN (A)    EXCLUDE (X+A,Y-A)       (A)=GMI
1624  */
1625 		/* Update (A) in (X, Y) as NONE/NEW */
1626 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1627 				    AMT_FILTER_BOTH,
1628 				    AMT_ACT_STATUS_NONE_NEW,
1629 				    v6);
1630 		/* Update FWD/OLD as FWD/NEW */
1631 		amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1632 				    AMT_FILTER_FWD,
1633 				    AMT_ACT_STATUS_FWD_NEW,
1634 				    v6);
1635 		/* Update IS_IN (A) as FWD/NEW */
1636 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1637 				    AMT_FILTER_NONE_NEW,
1638 				    AMT_ACT_STATUS_FWD_NEW,
1639 				    v6);
1640 		/* Update EXCLUDE (, Y-A) as D_FWD_NEW */
1641 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB,
1642 				    AMT_FILTER_D_FWD,
1643 				    AMT_ACT_STATUS_D_FWD_NEW,
1644 				    v6);
1645 	}
1646 }
1647 
amt_mcast_is_ex_handler(struct amt_dev * amt,struct amt_tunnel_list * tunnel,struct amt_group_node * gnode,void * grec,void * zero_grec,bool v6)1648 static void amt_mcast_is_ex_handler(struct amt_dev *amt,
1649 				    struct amt_tunnel_list *tunnel,
1650 				    struct amt_group_node *gnode,
1651 				    void *grec, void *zero_grec, bool v6)
1652 {
1653 	if (gnode->filter_mode == MCAST_INCLUDE) {
1654 /* Router State   Report Rec'd  New Router State         Actions
1655  * ------------   ------------  ----------------         -------
1656  * INCLUDE (A)    IS_EX (B)     EXCLUDE (A*B,B-A)        (B-A)=0
1657  *                                                       Delete (A-B)
1658  *                                                       Group Timer=GMI
1659  */
1660 		/* EXCLUDE(A*B, ) */
1661 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1662 				    AMT_FILTER_FWD,
1663 				    AMT_ACT_STATUS_FWD_NEW,
1664 				    v6);
1665 		/* EXCLUDE(, B-A) */
1666 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1667 				    AMT_FILTER_FWD,
1668 				    AMT_ACT_STATUS_D_FWD_NEW,
1669 				    v6);
1670 		/* (B-A)=0 */
1671 		amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1672 				    AMT_FILTER_D_FWD_NEW,
1673 				    AMT_ACT_GMI_ZERO,
1674 				    v6);
1675 		/* Group Timer=GMI */
1676 		if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1677 				      msecs_to_jiffies(amt_gmi(amt))))
1678 			dev_hold(amt->dev);
1679 		gnode->filter_mode = MCAST_EXCLUDE;
1680 		/* Delete (A-B) will be worked by amt_cleanup_srcs(). */
1681 	} else {
1682 /* Router State   Report Rec'd  New Router State	Actions
1683  * ------------   ------------  ----------------	-------
1684  * EXCLUDE (X,Y)  IS_EX (A)     EXCLUDE (A-Y,Y*A)	(A-X-Y)=GMI
1685  *							Delete (X-A)
1686  *							Delete (Y-A)
1687  *							Group Timer=GMI
1688  */
1689 		/* EXCLUDE (A-Y, ) */
1690 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1691 				    AMT_FILTER_D_FWD,
1692 				    AMT_ACT_STATUS_FWD_NEW,
1693 				    v6);
1694 		/* EXCLUDE (, Y*A ) */
1695 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1696 				    AMT_FILTER_D_FWD,
1697 				    AMT_ACT_STATUS_D_FWD_NEW,
1698 				    v6);
1699 		/* (A-X-Y)=GMI */
1700 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1701 				    AMT_FILTER_BOTH_NEW,
1702 				    AMT_ACT_GMI,
1703 				    v6);
1704 		/* Group Timer=GMI */
1705 		if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1706 				      msecs_to_jiffies(amt_gmi(amt))))
1707 			dev_hold(amt->dev);
1708 		/* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */
1709 	}
1710 }
1711 
amt_mcast_to_in_handler(struct amt_dev * amt,struct amt_tunnel_list * tunnel,struct amt_group_node * gnode,void * grec,void * zero_grec,bool v6)1712 static void amt_mcast_to_in_handler(struct amt_dev *amt,
1713 				    struct amt_tunnel_list *tunnel,
1714 				    struct amt_group_node *gnode,
1715 				    void *grec, void *zero_grec, bool v6)
1716 {
1717 	if (gnode->filter_mode == MCAST_INCLUDE) {
1718 /* Router State   Report Rec'd New Router State        Actions
1719  * ------------   ------------ ----------------        -------
1720  * INCLUDE (A)    TO_IN (B)    INCLUDE (A+B)           (B)=GMI
1721  *						       Send Q(G,A-B)
1722  */
1723 		/* Update TO_IN (B) sources as FWD/NEW */
1724 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1725 				    AMT_FILTER_NONE_NEW,
1726 				    AMT_ACT_STATUS_FWD_NEW,
1727 				    v6);
1728 		/* Update INCLUDE (A) sources as NEW */
1729 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1730 				    AMT_FILTER_FWD,
1731 				    AMT_ACT_STATUS_FWD_NEW,
1732 				    v6);
1733 		/* (B)=GMI */
1734 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1735 				    AMT_FILTER_FWD_NEW,
1736 				    AMT_ACT_GMI,
1737 				    v6);
1738 	} else {
1739 /* Router State   Report Rec'd New Router State        Actions
1740  * ------------   ------------ ----------------        -------
1741  * EXCLUDE (X,Y)  TO_IN (A)    EXCLUDE (X+A,Y-A)       (A)=GMI
1742  *						       Send Q(G,X-A)
1743  *						       Send Q(G)
1744  */
1745 		/* Update TO_IN (A) sources as FWD/NEW */
1746 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1747 				    AMT_FILTER_NONE_NEW,
1748 				    AMT_ACT_STATUS_FWD_NEW,
1749 				    v6);
1750 		/* Update EXCLUDE(X,) sources as FWD/NEW */
1751 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1752 				    AMT_FILTER_FWD,
1753 				    AMT_ACT_STATUS_FWD_NEW,
1754 				    v6);
1755 		/* EXCLUDE (, Y-A)
1756 		 * (A) are already switched to FWD_NEW.
1757 		 * So, D_FWD/OLD -> D_FWD/NEW is okay.
1758 		 */
1759 		amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1760 				    AMT_FILTER_D_FWD,
1761 				    AMT_ACT_STATUS_D_FWD_NEW,
1762 				    v6);
1763 		/* (A)=GMI
1764 		 * Only FWD_NEW will have (A) sources.
1765 		 */
1766 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1767 				    AMT_FILTER_FWD_NEW,
1768 				    AMT_ACT_GMI,
1769 				    v6);
1770 	}
1771 }
1772 
amt_mcast_to_ex_handler(struct amt_dev * amt,struct amt_tunnel_list * tunnel,struct amt_group_node * gnode,void * grec,void * zero_grec,bool v6)1773 static void amt_mcast_to_ex_handler(struct amt_dev *amt,
1774 				    struct amt_tunnel_list *tunnel,
1775 				    struct amt_group_node *gnode,
1776 				    void *grec, void *zero_grec, bool v6)
1777 {
1778 	if (gnode->filter_mode == MCAST_INCLUDE) {
1779 /* Router State   Report Rec'd New Router State        Actions
1780  * ------------   ------------ ----------------        -------
1781  * INCLUDE (A)    TO_EX (B)    EXCLUDE (A*B,B-A)       (B-A)=0
1782  *						       Delete (A-B)
1783  *						       Send Q(G,A*B)
1784  *						       Group Timer=GMI
1785  */
1786 		/* EXCLUDE (A*B, ) */
1787 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1788 				    AMT_FILTER_FWD,
1789 				    AMT_ACT_STATUS_FWD_NEW,
1790 				    v6);
1791 		/* EXCLUDE (, B-A) */
1792 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1793 				    AMT_FILTER_FWD,
1794 				    AMT_ACT_STATUS_D_FWD_NEW,
1795 				    v6);
1796 		/* (B-A)=0 */
1797 		amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1798 				    AMT_FILTER_D_FWD_NEW,
1799 				    AMT_ACT_GMI_ZERO,
1800 				    v6);
1801 		/* Group Timer=GMI */
1802 		if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1803 				      msecs_to_jiffies(amt_gmi(amt))))
1804 			dev_hold(amt->dev);
1805 		gnode->filter_mode = MCAST_EXCLUDE;
1806 		/* Delete (A-B) will be worked by amt_cleanup_srcs(). */
1807 	} else {
1808 /* Router State   Report Rec'd New Router State        Actions
1809  * ------------   ------------ ----------------        -------
1810  * EXCLUDE (X,Y)  TO_EX (A)    EXCLUDE (A-Y,Y*A)       (A-X-Y)=Group Timer
1811  *						       Delete (X-A)
1812  *						       Delete (Y-A)
1813  *						       Send Q(G,A-Y)
1814  *						       Group Timer=GMI
1815  */
1816 		/* Update (A-X-Y) as NONE/OLD */
1817 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1818 				    AMT_FILTER_BOTH,
1819 				    AMT_ACT_GT,
1820 				    v6);
1821 		/* EXCLUDE (A-Y, ) */
1822 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1823 				    AMT_FILTER_D_FWD,
1824 				    AMT_ACT_STATUS_FWD_NEW,
1825 				    v6);
1826 		/* EXCLUDE (, Y*A) */
1827 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1828 				    AMT_FILTER_D_FWD,
1829 				    AMT_ACT_STATUS_D_FWD_NEW,
1830 				    v6);
1831 		/* Group Timer=GMI */
1832 		if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1833 				      msecs_to_jiffies(amt_gmi(amt))))
1834 			dev_hold(amt->dev);
1835 		/* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */
1836 	}
1837 }
1838 
amt_mcast_allow_handler(struct amt_dev * amt,struct amt_tunnel_list * tunnel,struct amt_group_node * gnode,void * grec,void * zero_grec,bool v6)1839 static void amt_mcast_allow_handler(struct amt_dev *amt,
1840 				    struct amt_tunnel_list *tunnel,
1841 				    struct amt_group_node *gnode,
1842 				    void *grec, void *zero_grec, bool v6)
1843 {
1844 	if (gnode->filter_mode == MCAST_INCLUDE) {
1845 /* Router State   Report Rec'd New Router State        Actions
1846  * ------------   ------------ ----------------        -------
1847  * INCLUDE (A)    ALLOW (B)    INCLUDE (A+B)	       (B)=GMI
1848  */
1849 		/* INCLUDE (A+B) */
1850 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1851 				    AMT_FILTER_FWD,
1852 				    AMT_ACT_STATUS_FWD_NEW,
1853 				    v6);
1854 		/* (B)=GMI */
1855 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1856 				    AMT_FILTER_FWD_NEW,
1857 				    AMT_ACT_GMI,
1858 				    v6);
1859 	} else {
1860 /* Router State   Report Rec'd New Router State        Actions
1861  * ------------   ------------ ----------------        -------
1862  * EXCLUDE (X,Y)  ALLOW (A)    EXCLUDE (X+A,Y-A)       (A)=GMI
1863  */
1864 		/* EXCLUDE (X+A, ) */
1865 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1866 				    AMT_FILTER_FWD,
1867 				    AMT_ACT_STATUS_FWD_NEW,
1868 				    v6);
1869 		/* EXCLUDE (, Y-A) */
1870 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB,
1871 				    AMT_FILTER_D_FWD,
1872 				    AMT_ACT_STATUS_D_FWD_NEW,
1873 				    v6);
1874 		/* (A)=GMI
1875 		 * All (A) source are now FWD/NEW status.
1876 		 */
1877 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1878 				    AMT_FILTER_FWD_NEW,
1879 				    AMT_ACT_GMI,
1880 				    v6);
1881 	}
1882 }
1883 
amt_mcast_block_handler(struct amt_dev * amt,struct amt_tunnel_list * tunnel,struct amt_group_node * gnode,void * grec,void * zero_grec,bool v6)1884 static void amt_mcast_block_handler(struct amt_dev *amt,
1885 				    struct amt_tunnel_list *tunnel,
1886 				    struct amt_group_node *gnode,
1887 				    void *grec, void *zero_grec, bool v6)
1888 {
1889 	if (gnode->filter_mode == MCAST_INCLUDE) {
1890 /* Router State   Report Rec'd New Router State        Actions
1891  * ------------   ------------ ----------------        -------
1892  * INCLUDE (A)    BLOCK (B)    INCLUDE (A)             Send Q(G,A*B)
1893  */
1894 		/* INCLUDE (A) */
1895 		amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1896 				    AMT_FILTER_FWD,
1897 				    AMT_ACT_STATUS_FWD_NEW,
1898 				    v6);
1899 	} else {
1900 /* Router State   Report Rec'd New Router State        Actions
1901  * ------------   ------------ ----------------        -------
1902  * EXCLUDE (X,Y)  BLOCK (A)    EXCLUDE (X+(A-Y),Y)     (A-X-Y)=Group Timer
1903  *						       Send Q(G,A-Y)
1904  */
1905 		/* (A-X-Y)=Group Timer */
1906 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1907 				    AMT_FILTER_BOTH,
1908 				    AMT_ACT_GT,
1909 				    v6);
1910 		/* EXCLUDE (X, ) */
1911 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1912 				    AMT_FILTER_FWD,
1913 				    AMT_ACT_STATUS_FWD_NEW,
1914 				    v6);
1915 		/* EXCLUDE (X+(A-Y) */
1916 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1917 				    AMT_FILTER_D_FWD,
1918 				    AMT_ACT_STATUS_FWD_NEW,
1919 				    v6);
1920 		/* EXCLUDE (, Y) */
1921 		amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1922 				    AMT_FILTER_D_FWD,
1923 				    AMT_ACT_STATUS_D_FWD_NEW,
1924 				    v6);
1925 	}
1926 }
1927 
1928 /* RFC 3376
1929  * 7.3.2. In the Presence of Older Version Group Members
1930  *
1931  * When Group Compatibility Mode is IGMPv2, a router internally
1932  * translates the following IGMPv2 messages for that group to their
1933  * IGMPv3 equivalents:
1934  *
1935  * IGMPv2 Message                IGMPv3 Equivalent
1936  * --------------                -----------------
1937  * Report                        IS_EX( {} )
1938  * Leave                         TO_IN( {} )
1939  */
amt_igmpv2_report_handler(struct amt_dev * amt,struct sk_buff * skb,struct amt_tunnel_list * tunnel)1940 static void amt_igmpv2_report_handler(struct amt_dev *amt, struct sk_buff *skb,
1941 				      struct amt_tunnel_list *tunnel)
1942 {
1943 	struct igmphdr *ih = igmp_hdr(skb);
1944 	struct iphdr *iph = ip_hdr(skb);
1945 	struct amt_group_node *gnode;
1946 	union amt_addr group, host;
1947 
1948 	memset(&group, 0, sizeof(union amt_addr));
1949 	group.ip4 = ih->group;
1950 	memset(&host, 0, sizeof(union amt_addr));
1951 	host.ip4 = iph->saddr;
1952 
1953 	gnode = amt_lookup_group(tunnel, &group, &host, false);
1954 	if (!gnode) {
1955 		gnode = amt_add_group(amt, tunnel, &group, &host, false);
1956 		if (!IS_ERR(gnode)) {
1957 			gnode->filter_mode = MCAST_EXCLUDE;
1958 			if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1959 					      msecs_to_jiffies(amt_gmi(amt))))
1960 				dev_hold(amt->dev);
1961 		}
1962 	}
1963 }
1964 
1965 /* RFC 3376
1966  * 7.3.2. In the Presence of Older Version Group Members
1967  *
1968  * When Group Compatibility Mode is IGMPv2, a router internally
1969  * translates the following IGMPv2 messages for that group to their
1970  * IGMPv3 equivalents:
1971  *
1972  * IGMPv2 Message                IGMPv3 Equivalent
1973  * --------------                -----------------
1974  * Report                        IS_EX( {} )
1975  * Leave                         TO_IN( {} )
1976  */
amt_igmpv2_leave_handler(struct amt_dev * amt,struct sk_buff * skb,struct amt_tunnel_list * tunnel)1977 static void amt_igmpv2_leave_handler(struct amt_dev *amt, struct sk_buff *skb,
1978 				     struct amt_tunnel_list *tunnel)
1979 {
1980 	struct igmphdr *ih = igmp_hdr(skb);
1981 	struct iphdr *iph = ip_hdr(skb);
1982 	struct amt_group_node *gnode;
1983 	union amt_addr group, host;
1984 
1985 	memset(&group, 0, sizeof(union amt_addr));
1986 	group.ip4 = ih->group;
1987 	memset(&host, 0, sizeof(union amt_addr));
1988 	host.ip4 = iph->saddr;
1989 
1990 	gnode = amt_lookup_group(tunnel, &group, &host, false);
1991 	if (gnode)
1992 		amt_del_group(amt, gnode);
1993 }
1994 
amt_igmpv3_report_handler(struct amt_dev * amt,struct sk_buff * skb,struct amt_tunnel_list * tunnel)1995 static void amt_igmpv3_report_handler(struct amt_dev *amt, struct sk_buff *skb,
1996 				      struct amt_tunnel_list *tunnel)
1997 {
1998 	struct igmpv3_report *ihrv3 = igmpv3_report_hdr(skb);
1999 	int len = skb_transport_offset(skb) + sizeof(*ihrv3);
2000 	void *zero_grec = (void *)&igmpv3_zero_grec;
2001 	struct iphdr *iph = ip_hdr(skb);
2002 	struct amt_group_node *gnode;
2003 	union amt_addr group, host;
2004 	struct igmpv3_grec *grec;
2005 	u16 nsrcs;
2006 	int i;
2007 
2008 	for (i = 0; i < ntohs(ihrv3->ngrec); i++) {
2009 		len += sizeof(*grec);
2010 		if (!ip_mc_may_pull(skb, len))
2011 			break;
2012 
2013 		grec = (void *)(skb->data + len - sizeof(*grec));
2014 		nsrcs = ntohs(grec->grec_nsrcs);
2015 
2016 		len += nsrcs * sizeof(__be32);
2017 		if (!ip_mc_may_pull(skb, len))
2018 			break;
2019 
2020 		memset(&group, 0, sizeof(union amt_addr));
2021 		group.ip4 = grec->grec_mca;
2022 		memset(&host, 0, sizeof(union amt_addr));
2023 		host.ip4 = iph->saddr;
2024 		gnode = amt_lookup_group(tunnel, &group, &host, false);
2025 		if (!gnode) {
2026 			gnode = amt_add_group(amt, tunnel, &group, &host,
2027 					      false);
2028 			if (IS_ERR(gnode))
2029 				continue;
2030 		}
2031 
2032 		amt_add_srcs(amt, tunnel, gnode, grec, false);
2033 		switch (grec->grec_type) {
2034 		case IGMPV3_MODE_IS_INCLUDE:
2035 			amt_mcast_is_in_handler(amt, tunnel, gnode, grec,
2036 						zero_grec, false);
2037 			break;
2038 		case IGMPV3_MODE_IS_EXCLUDE:
2039 			amt_mcast_is_ex_handler(amt, tunnel, gnode, grec,
2040 						zero_grec, false);
2041 			break;
2042 		case IGMPV3_CHANGE_TO_INCLUDE:
2043 			amt_mcast_to_in_handler(amt, tunnel, gnode, grec,
2044 						zero_grec, false);
2045 			break;
2046 		case IGMPV3_CHANGE_TO_EXCLUDE:
2047 			amt_mcast_to_ex_handler(amt, tunnel, gnode, grec,
2048 						zero_grec, false);
2049 			break;
2050 		case IGMPV3_ALLOW_NEW_SOURCES:
2051 			amt_mcast_allow_handler(amt, tunnel, gnode, grec,
2052 						zero_grec, false);
2053 			break;
2054 		case IGMPV3_BLOCK_OLD_SOURCES:
2055 			amt_mcast_block_handler(amt, tunnel, gnode, grec,
2056 						zero_grec, false);
2057 			break;
2058 		default:
2059 			break;
2060 		}
2061 		amt_cleanup_srcs(amt, tunnel, gnode);
2062 	}
2063 }
2064 
2065 /* caller held tunnel->lock */
amt_igmp_report_handler(struct amt_dev * amt,struct sk_buff * skb,struct amt_tunnel_list * tunnel)2066 static void amt_igmp_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2067 				    struct amt_tunnel_list *tunnel)
2068 {
2069 	struct igmphdr *ih = igmp_hdr(skb);
2070 
2071 	switch (ih->type) {
2072 	case IGMPV3_HOST_MEMBERSHIP_REPORT:
2073 		amt_igmpv3_report_handler(amt, skb, tunnel);
2074 		break;
2075 	case IGMPV2_HOST_MEMBERSHIP_REPORT:
2076 		amt_igmpv2_report_handler(amt, skb, tunnel);
2077 		break;
2078 	case IGMP_HOST_LEAVE_MESSAGE:
2079 		amt_igmpv2_leave_handler(amt, skb, tunnel);
2080 		break;
2081 	default:
2082 		break;
2083 	}
2084 }
2085 
2086 #if IS_ENABLED(CONFIG_IPV6)
2087 /* RFC 3810
2088  * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners
2089  *
2090  * When Multicast Address Compatibility Mode is MLDv2, a router acts
2091  * using the MLDv2 protocol for that multicast address.  When Multicast
2092  * Address Compatibility Mode is MLDv1, a router internally translates
2093  * the following MLDv1 messages for that multicast address to their
2094  * MLDv2 equivalents:
2095  *
2096  * MLDv1 Message                 MLDv2 Equivalent
2097  * --------------                -----------------
2098  * Report                        IS_EX( {} )
2099  * Done                          TO_IN( {} )
2100  */
amt_mldv1_report_handler(struct amt_dev * amt,struct sk_buff * skb,struct amt_tunnel_list * tunnel)2101 static void amt_mldv1_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2102 				     struct amt_tunnel_list *tunnel)
2103 {
2104 	struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
2105 	struct ipv6hdr *ip6h = ipv6_hdr(skb);
2106 	struct amt_group_node *gnode;
2107 	union amt_addr group, host;
2108 
2109 	memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr));
2110 	memcpy(&host.ip6, &ip6h->saddr, sizeof(struct in6_addr));
2111 
2112 	gnode = amt_lookup_group(tunnel, &group, &host, true);
2113 	if (!gnode) {
2114 		gnode = amt_add_group(amt, tunnel, &group, &host, true);
2115 		if (!IS_ERR(gnode)) {
2116 			gnode->filter_mode = MCAST_EXCLUDE;
2117 			if (!mod_delayed_work(amt_wq, &gnode->group_timer,
2118 					      msecs_to_jiffies(amt_gmi(amt))))
2119 				dev_hold(amt->dev);
2120 		}
2121 	}
2122 }
2123 
2124 /* RFC 3810
2125  * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners
2126  *
2127  * When Multicast Address Compatibility Mode is MLDv2, a router acts
2128  * using the MLDv2 protocol for that multicast address.  When Multicast
2129  * Address Compatibility Mode is MLDv1, a router internally translates
2130  * the following MLDv1 messages for that multicast address to their
2131  * MLDv2 equivalents:
2132  *
2133  * MLDv1 Message                 MLDv2 Equivalent
2134  * --------------                -----------------
2135  * Report                        IS_EX( {} )
2136  * Done                          TO_IN( {} )
2137  */
amt_mldv1_leave_handler(struct amt_dev * amt,struct sk_buff * skb,struct amt_tunnel_list * tunnel)2138 static void amt_mldv1_leave_handler(struct amt_dev *amt, struct sk_buff *skb,
2139 				    struct amt_tunnel_list *tunnel)
2140 {
2141 	struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
2142 	struct iphdr *iph = ip_hdr(skb);
2143 	struct amt_group_node *gnode;
2144 	union amt_addr group, host;
2145 
2146 	memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr));
2147 	memset(&host, 0, sizeof(union amt_addr));
2148 	host.ip4 = iph->saddr;
2149 
2150 	gnode = amt_lookup_group(tunnel, &group, &host, true);
2151 	if (gnode) {
2152 		amt_del_group(amt, gnode);
2153 		return;
2154 	}
2155 }
2156 
amt_mldv2_report_handler(struct amt_dev * amt,struct sk_buff * skb,struct amt_tunnel_list * tunnel)2157 static void amt_mldv2_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2158 				     struct amt_tunnel_list *tunnel)
2159 {
2160 	struct mld2_report *mld2r = (struct mld2_report *)icmp6_hdr(skb);
2161 	int len = skb_transport_offset(skb) + sizeof(*mld2r);
2162 	void *zero_grec = (void *)&mldv2_zero_grec;
2163 	struct ipv6hdr *ip6h = ipv6_hdr(skb);
2164 	struct amt_group_node *gnode;
2165 	union amt_addr group, host;
2166 	struct mld2_grec *grec;
2167 	u16 nsrcs;
2168 	int i;
2169 
2170 	for (i = 0; i < ntohs(mld2r->mld2r_ngrec); i++) {
2171 		len += sizeof(*grec);
2172 		if (!ipv6_mc_may_pull(skb, len))
2173 			break;
2174 
2175 		grec = (void *)(skb->data + len - sizeof(*grec));
2176 		nsrcs = ntohs(grec->grec_nsrcs);
2177 
2178 		len += nsrcs * sizeof(struct in6_addr);
2179 		if (!ipv6_mc_may_pull(skb, len))
2180 			break;
2181 
2182 		memset(&group, 0, sizeof(union amt_addr));
2183 		group.ip6 = grec->grec_mca;
2184 		memset(&host, 0, sizeof(union amt_addr));
2185 		host.ip6 = ip6h->saddr;
2186 		gnode = amt_lookup_group(tunnel, &group, &host, true);
2187 		if (!gnode) {
2188 			gnode = amt_add_group(amt, tunnel, &group, &host,
2189 					      ETH_P_IPV6);
2190 			if (IS_ERR(gnode))
2191 				continue;
2192 		}
2193 
2194 		amt_add_srcs(amt, tunnel, gnode, grec, true);
2195 		switch (grec->grec_type) {
2196 		case MLD2_MODE_IS_INCLUDE:
2197 			amt_mcast_is_in_handler(amt, tunnel, gnode, grec,
2198 						zero_grec, true);
2199 			break;
2200 		case MLD2_MODE_IS_EXCLUDE:
2201 			amt_mcast_is_ex_handler(amt, tunnel, gnode, grec,
2202 						zero_grec, true);
2203 			break;
2204 		case MLD2_CHANGE_TO_INCLUDE:
2205 			amt_mcast_to_in_handler(amt, tunnel, gnode, grec,
2206 						zero_grec, true);
2207 			break;
2208 		case MLD2_CHANGE_TO_EXCLUDE:
2209 			amt_mcast_to_ex_handler(amt, tunnel, gnode, grec,
2210 						zero_grec, true);
2211 			break;
2212 		case MLD2_ALLOW_NEW_SOURCES:
2213 			amt_mcast_allow_handler(amt, tunnel, gnode, grec,
2214 						zero_grec, true);
2215 			break;
2216 		case MLD2_BLOCK_OLD_SOURCES:
2217 			amt_mcast_block_handler(amt, tunnel, gnode, grec,
2218 						zero_grec, true);
2219 			break;
2220 		default:
2221 			break;
2222 		}
2223 		amt_cleanup_srcs(amt, tunnel, gnode);
2224 	}
2225 }
2226 
2227 /* caller held tunnel->lock */
amt_mld_report_handler(struct amt_dev * amt,struct sk_buff * skb,struct amt_tunnel_list * tunnel)2228 static void amt_mld_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2229 				   struct amt_tunnel_list *tunnel)
2230 {
2231 	struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
2232 
2233 	switch (mld->mld_type) {
2234 	case ICMPV6_MGM_REPORT:
2235 		amt_mldv1_report_handler(amt, skb, tunnel);
2236 		break;
2237 	case ICMPV6_MLD2_REPORT:
2238 		amt_mldv2_report_handler(amt, skb, tunnel);
2239 		break;
2240 	case ICMPV6_MGM_REDUCTION:
2241 		amt_mldv1_leave_handler(amt, skb, tunnel);
2242 		break;
2243 	default:
2244 		break;
2245 	}
2246 }
2247 #endif
2248 
amt_advertisement_handler(struct amt_dev * amt,struct sk_buff * skb)2249 static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
2250 {
2251 	struct amt_header_advertisement *amta;
2252 	int hdr_size;
2253 
2254 	hdr_size = sizeof(*amta) + sizeof(struct udphdr);
2255 	if (!pskb_may_pull(skb, hdr_size))
2256 		return true;
2257 
2258 	amta = (struct amt_header_advertisement *)(udp_hdr(skb) + 1);
2259 	if (!amta->ip4)
2260 		return true;
2261 
2262 	if (amta->reserved || amta->version)
2263 		return true;
2264 
2265 	if (ipv4_is_loopback(amta->ip4) || ipv4_is_multicast(amta->ip4) ||
2266 	    ipv4_is_zeronet(amta->ip4))
2267 		return true;
2268 
2269 	if (amt->status != AMT_STATUS_SENT_DISCOVERY ||
2270 	    amt->nonce != amta->nonce)
2271 		return true;
2272 
2273 	amt->remote_ip = amta->ip4;
2274 	netdev_dbg(amt->dev, "advertised remote ip = %pI4\n", &amt->remote_ip);
2275 	mod_delayed_work(amt_wq, &amt->req_wq, 0);
2276 
2277 	amt_update_gw_status(amt, AMT_STATUS_RECEIVED_ADVERTISEMENT, true);
2278 	return false;
2279 }
2280 
amt_multicast_data_handler(struct amt_dev * amt,struct sk_buff * skb)2281 static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
2282 {
2283 	struct amt_header_mcast_data *amtmd;
2284 	int hdr_size, len, err;
2285 	struct ethhdr *eth;
2286 	struct iphdr *iph;
2287 
2288 	if (READ_ONCE(amt->status) != AMT_STATUS_SENT_UPDATE)
2289 		return true;
2290 
2291 	hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
2292 	if (!pskb_may_pull(skb, hdr_size))
2293 		return true;
2294 
2295 	amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1);
2296 	if (amtmd->reserved || amtmd->version)
2297 		return true;
2298 
2299 	if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false))
2300 		return true;
2301 
2302 	skb_reset_network_header(skb);
2303 	skb_push(skb, sizeof(*eth));
2304 	skb_reset_mac_header(skb);
2305 	skb_pull(skb, sizeof(*eth));
2306 	eth = eth_hdr(skb);
2307 
2308 	if (!pskb_may_pull(skb, sizeof(*iph)))
2309 		return true;
2310 	iph = ip_hdr(skb);
2311 
2312 	if (iph->version == 4) {
2313 		if (!ipv4_is_multicast(iph->daddr))
2314 			return true;
2315 		skb->protocol = htons(ETH_P_IP);
2316 		eth->h_proto = htons(ETH_P_IP);
2317 		ip_eth_mc_map(iph->daddr, eth->h_dest);
2318 #if IS_ENABLED(CONFIG_IPV6)
2319 	} else if (iph->version == 6) {
2320 		struct ipv6hdr *ip6h;
2321 
2322 		if (!pskb_may_pull(skb, sizeof(*ip6h)))
2323 			return true;
2324 
2325 		ip6h = ipv6_hdr(skb);
2326 		if (!ipv6_addr_is_multicast(&ip6h->daddr))
2327 			return true;
2328 		skb->protocol = htons(ETH_P_IPV6);
2329 		eth->h_proto = htons(ETH_P_IPV6);
2330 		ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
2331 #endif
2332 	} else {
2333 		return true;
2334 	}
2335 
2336 	skb->pkt_type = PACKET_MULTICAST;
2337 	skb->ip_summed = CHECKSUM_NONE;
2338 	len = skb->len;
2339 	err = gro_cells_receive(&amt->gro_cells, skb);
2340 	if (likely(err == NET_RX_SUCCESS))
2341 		dev_sw_netstats_rx_add(amt->dev, len);
2342 	else
2343 		amt->dev->stats.rx_dropped++;
2344 
2345 	return false;
2346 }
2347 
amt_membership_query_handler(struct amt_dev * amt,struct sk_buff * skb)2348 static bool amt_membership_query_handler(struct amt_dev *amt,
2349 					 struct sk_buff *skb)
2350 {
2351 	struct amt_header_membership_query *amtmq;
2352 	struct igmpv3_query *ihv3;
2353 	struct ethhdr *eth, *oeth;
2354 	struct iphdr *iph;
2355 	int hdr_size, len;
2356 
2357 	hdr_size = sizeof(*amtmq) + sizeof(struct udphdr);
2358 	if (!pskb_may_pull(skb, hdr_size))
2359 		return true;
2360 
2361 	amtmq = (struct amt_header_membership_query *)(udp_hdr(skb) + 1);
2362 	if (amtmq->reserved || amtmq->version)
2363 		return true;
2364 
2365 	if (amtmq->nonce != amt->nonce)
2366 		return true;
2367 
2368 	hdr_size -= sizeof(*eth);
2369 	if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false))
2370 		return true;
2371 
2372 	oeth = eth_hdr(skb);
2373 	skb_reset_mac_header(skb);
2374 	skb_pull(skb, sizeof(*eth));
2375 	skb_reset_network_header(skb);
2376 	eth = eth_hdr(skb);
2377 	if (!pskb_may_pull(skb, sizeof(*iph)))
2378 		return true;
2379 
2380 	iph = ip_hdr(skb);
2381 	if (iph->version == 4) {
2382 		if (READ_ONCE(amt->ready4))
2383 			return true;
2384 
2385 		if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS +
2386 				   sizeof(*ihv3)))
2387 			return true;
2388 
2389 		if (!ipv4_is_multicast(iph->daddr))
2390 			return true;
2391 
2392 		ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
2393 		skb_reset_transport_header(skb);
2394 		skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
2395 		WRITE_ONCE(amt->ready4, true);
2396 		amt->mac = amtmq->response_mac;
2397 		amt->req_cnt = 0;
2398 		amt->qi = ihv3->qqic;
2399 		skb->protocol = htons(ETH_P_IP);
2400 		eth->h_proto = htons(ETH_P_IP);
2401 		ip_eth_mc_map(iph->daddr, eth->h_dest);
2402 #if IS_ENABLED(CONFIG_IPV6)
2403 	} else if (iph->version == 6) {
2404 		struct mld2_query *mld2q;
2405 		struct ipv6hdr *ip6h;
2406 
2407 		if (READ_ONCE(amt->ready6))
2408 			return true;
2409 
2410 		if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS +
2411 				   sizeof(*mld2q)))
2412 			return true;
2413 
2414 		ip6h = ipv6_hdr(skb);
2415 		if (!ipv6_addr_is_multicast(&ip6h->daddr))
2416 			return true;
2417 
2418 		mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
2419 		skb_reset_transport_header(skb);
2420 		skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
2421 		WRITE_ONCE(amt->ready6, true);
2422 		amt->mac = amtmq->response_mac;
2423 		amt->req_cnt = 0;
2424 		amt->qi = mld2q->mld2q_qqic;
2425 		skb->protocol = htons(ETH_P_IPV6);
2426 		eth->h_proto = htons(ETH_P_IPV6);
2427 		ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
2428 #endif
2429 	} else {
2430 		return true;
2431 	}
2432 
2433 	ether_addr_copy(eth->h_source, oeth->h_source);
2434 	skb->pkt_type = PACKET_MULTICAST;
2435 	skb->ip_summed = CHECKSUM_NONE;
2436 	len = skb->len;
2437 	local_bh_disable();
2438 	if (__netif_rx(skb) == NET_RX_SUCCESS) {
2439 		amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true);
2440 		dev_sw_netstats_rx_add(amt->dev, len);
2441 	} else {
2442 		amt->dev->stats.rx_dropped++;
2443 	}
2444 	local_bh_enable();
2445 
2446 	return false;
2447 }
2448 
amt_update_handler(struct amt_dev * amt,struct sk_buff * skb)2449 static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
2450 {
2451 	struct amt_header_membership_update *amtmu;
2452 	struct amt_tunnel_list *tunnel;
2453 	struct ethhdr *eth;
2454 	struct iphdr *iph;
2455 	int len, hdr_size;
2456 
2457 	iph = ip_hdr(skb);
2458 
2459 	hdr_size = sizeof(*amtmu) + sizeof(struct udphdr);
2460 	if (!pskb_may_pull(skb, hdr_size))
2461 		return true;
2462 
2463 	amtmu = (struct amt_header_membership_update *)(udp_hdr(skb) + 1);
2464 	if (amtmu->reserved || amtmu->version)
2465 		return true;
2466 
2467 	if (iptunnel_pull_header(skb, hdr_size, skb->protocol, false))
2468 		return true;
2469 
2470 	skb_reset_network_header(skb);
2471 
2472 	list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
2473 		if (tunnel->ip4 == iph->saddr) {
2474 			if ((amtmu->nonce == tunnel->nonce &&
2475 			     amtmu->response_mac == tunnel->mac)) {
2476 				mod_delayed_work(amt_wq, &tunnel->gc_wq,
2477 						 msecs_to_jiffies(amt_gmi(amt))
2478 								  * 3);
2479 				goto report;
2480 			} else {
2481 				netdev_dbg(amt->dev, "Invalid MAC\n");
2482 				return true;
2483 			}
2484 		}
2485 	}
2486 
2487 	return true;
2488 
2489 report:
2490 	if (!pskb_may_pull(skb, sizeof(*iph)))
2491 		return true;
2492 
2493 	iph = ip_hdr(skb);
2494 	if (iph->version == 4) {
2495 		if (ip_mc_check_igmp(skb)) {
2496 			netdev_dbg(amt->dev, "Invalid IGMP\n");
2497 			return true;
2498 		}
2499 
2500 		spin_lock_bh(&tunnel->lock);
2501 		amt_igmp_report_handler(amt, skb, tunnel);
2502 		spin_unlock_bh(&tunnel->lock);
2503 
2504 		skb_push(skb, sizeof(struct ethhdr));
2505 		skb_reset_mac_header(skb);
2506 		eth = eth_hdr(skb);
2507 		skb->protocol = htons(ETH_P_IP);
2508 		eth->h_proto = htons(ETH_P_IP);
2509 		ip_eth_mc_map(iph->daddr, eth->h_dest);
2510 #if IS_ENABLED(CONFIG_IPV6)
2511 	} else if (iph->version == 6) {
2512 		struct ipv6hdr *ip6h = ipv6_hdr(skb);
2513 
2514 		if (ipv6_mc_check_mld(skb)) {
2515 			netdev_dbg(amt->dev, "Invalid MLD\n");
2516 			return true;
2517 		}
2518 
2519 		spin_lock_bh(&tunnel->lock);
2520 		amt_mld_report_handler(amt, skb, tunnel);
2521 		spin_unlock_bh(&tunnel->lock);
2522 
2523 		skb_push(skb, sizeof(struct ethhdr));
2524 		skb_reset_mac_header(skb);
2525 		eth = eth_hdr(skb);
2526 		skb->protocol = htons(ETH_P_IPV6);
2527 		eth->h_proto = htons(ETH_P_IPV6);
2528 		ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
2529 #endif
2530 	} else {
2531 		netdev_dbg(amt->dev, "Unsupported Protocol\n");
2532 		return true;
2533 	}
2534 
2535 	skb_pull(skb, sizeof(struct ethhdr));
2536 	skb->pkt_type = PACKET_MULTICAST;
2537 	skb->ip_summed = CHECKSUM_NONE;
2538 	len = skb->len;
2539 	if (__netif_rx(skb) == NET_RX_SUCCESS) {
2540 		amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_UPDATE,
2541 					true);
2542 		dev_sw_netstats_rx_add(amt->dev, len);
2543 	} else {
2544 		amt->dev->stats.rx_dropped++;
2545 	}
2546 
2547 	return false;
2548 }
2549 
amt_send_advertisement(struct amt_dev * amt,__be32 nonce,__be32 daddr,__be16 dport)2550 static void amt_send_advertisement(struct amt_dev *amt, __be32 nonce,
2551 				   __be32 daddr, __be16 dport)
2552 {
2553 	struct amt_header_advertisement *amta;
2554 	int hlen, tlen, offset;
2555 	struct socket *sock;
2556 	struct udphdr *udph;
2557 	struct sk_buff *skb;
2558 	struct iphdr *iph;
2559 	struct rtable *rt;
2560 	struct flowi4 fl4;
2561 	u32 len;
2562 	int err;
2563 
2564 	rcu_read_lock();
2565 	sock = rcu_dereference(amt->sock);
2566 	if (!sock)
2567 		goto out;
2568 
2569 	if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
2570 		goto out;
2571 
2572 	rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
2573 				   daddr, amt->local_ip,
2574 				   dport, amt->relay_port,
2575 				   IPPROTO_UDP, 0,
2576 				   amt->stream_dev->ifindex);
2577 	if (IS_ERR(rt)) {
2578 		amt->dev->stats.tx_errors++;
2579 		goto out;
2580 	}
2581 
2582 	hlen = LL_RESERVED_SPACE(amt->dev);
2583 	tlen = amt->dev->needed_tailroom;
2584 	len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amta);
2585 	skb = netdev_alloc_skb_ip_align(amt->dev, len);
2586 	if (!skb) {
2587 		ip_rt_put(rt);
2588 		amt->dev->stats.tx_errors++;
2589 		goto out;
2590 	}
2591 
2592 	skb->priority = TC_PRIO_CONTROL;
2593 	skb_dst_set(skb, &rt->dst);
2594 
2595 	len = sizeof(*iph) + sizeof(*udph) + sizeof(*amta);
2596 	skb_reset_network_header(skb);
2597 	skb_put(skb, len);
2598 	amta = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
2599 	amta->version	= 0;
2600 	amta->type	= AMT_MSG_ADVERTISEMENT;
2601 	amta->reserved	= 0;
2602 	amta->nonce	= nonce;
2603 	amta->ip4	= amt->local_ip;
2604 	skb_push(skb, sizeof(*udph));
2605 	skb_reset_transport_header(skb);
2606 	udph		= udp_hdr(skb);
2607 	udph->source	= amt->relay_port;
2608 	udph->dest	= dport;
2609 	udph->len	= htons(sizeof(*amta) + sizeof(*udph));
2610 	udph->check	= 0;
2611 	offset = skb_transport_offset(skb);
2612 	skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
2613 	udph->check = csum_tcpudp_magic(amt->local_ip, daddr,
2614 					sizeof(*udph) + sizeof(*amta),
2615 					IPPROTO_UDP, skb->csum);
2616 
2617 	skb_push(skb, sizeof(*iph));
2618 	iph		= ip_hdr(skb);
2619 	iph->version	= 4;
2620 	iph->ihl	= (sizeof(struct iphdr)) >> 2;
2621 	iph->tos	= AMT_TOS;
2622 	iph->frag_off	= 0;
2623 	iph->ttl	= ip4_dst_hoplimit(&rt->dst);
2624 	iph->daddr	= daddr;
2625 	iph->saddr	= amt->local_ip;
2626 	iph->protocol	= IPPROTO_UDP;
2627 	iph->tot_len	= htons(len);
2628 
2629 	skb->ip_summed = CHECKSUM_NONE;
2630 	ip_select_ident(amt->net, skb, NULL);
2631 	ip_send_check(iph);
2632 	err = ip_local_out(amt->net, sock->sk, skb);
2633 	if (unlikely(net_xmit_eval(err)))
2634 		amt->dev->stats.tx_errors++;
2635 
2636 out:
2637 	rcu_read_unlock();
2638 }
2639 
amt_discovery_handler(struct amt_dev * amt,struct sk_buff * skb)2640 static bool amt_discovery_handler(struct amt_dev *amt, struct sk_buff *skb)
2641 {
2642 	struct amt_header_discovery *amtd;
2643 	struct udphdr *udph;
2644 	struct iphdr *iph;
2645 
2646 	if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtd)))
2647 		return true;
2648 
2649 	iph = ip_hdr(skb);
2650 	udph = udp_hdr(skb);
2651 	amtd = (struct amt_header_discovery *)(udp_hdr(skb) + 1);
2652 
2653 	if (amtd->reserved || amtd->version)
2654 		return true;
2655 
2656 	amt_send_advertisement(amt, amtd->nonce, iph->saddr, udph->source);
2657 
2658 	return false;
2659 }
2660 
amt_request_handler(struct amt_dev * amt,struct sk_buff * skb)2661 static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
2662 {
2663 	struct amt_header_request *amtrh;
2664 	struct amt_tunnel_list *tunnel;
2665 	unsigned long long key;
2666 	struct udphdr *udph;
2667 	struct iphdr *iph;
2668 	u64 mac;
2669 	int i;
2670 
2671 	if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtrh)))
2672 		return true;
2673 
2674 	iph = ip_hdr(skb);
2675 	udph = udp_hdr(skb);
2676 	amtrh = (struct amt_header_request *)(udp_hdr(skb) + 1);
2677 
2678 	if (amtrh->reserved1 || amtrh->reserved2 || amtrh->version)
2679 		return true;
2680 
2681 	list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list)
2682 		if (tunnel->ip4 == iph->saddr)
2683 			goto send;
2684 
2685 	spin_lock_bh(&amt->lock);
2686 	if (amt->nr_tunnels >= amt->max_tunnels) {
2687 		spin_unlock_bh(&amt->lock);
2688 		icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
2689 		return true;
2690 	}
2691 
2692 	tunnel = kzalloc(sizeof(*tunnel) +
2693 			 (sizeof(struct hlist_head) * amt->hash_buckets),
2694 			 GFP_ATOMIC);
2695 	if (!tunnel) {
2696 		spin_unlock_bh(&amt->lock);
2697 		return true;
2698 	}
2699 
2700 	tunnel->source_port = udph->source;
2701 	tunnel->ip4 = iph->saddr;
2702 
2703 	memcpy(&key, &tunnel->key, sizeof(unsigned long long));
2704 	tunnel->amt = amt;
2705 	spin_lock_init(&tunnel->lock);
2706 	for (i = 0; i < amt->hash_buckets; i++)
2707 		INIT_HLIST_HEAD(&tunnel->groups[i]);
2708 
2709 	INIT_DELAYED_WORK(&tunnel->gc_wq, amt_tunnel_expire);
2710 
2711 	list_add_tail_rcu(&tunnel->list, &amt->tunnel_list);
2712 	tunnel->key = amt->key;
2713 	__amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true);
2714 	amt->nr_tunnels++;
2715 	mod_delayed_work(amt_wq, &tunnel->gc_wq,
2716 			 msecs_to_jiffies(amt_gmi(amt)));
2717 	spin_unlock_bh(&amt->lock);
2718 
2719 send:
2720 	tunnel->nonce = amtrh->nonce;
2721 	mac = siphash_3u32((__force u32)tunnel->ip4,
2722 			   (__force u32)tunnel->source_port,
2723 			   (__force u32)tunnel->nonce,
2724 			   &tunnel->key);
2725 	tunnel->mac = mac >> 16;
2726 
2727 	if (!netif_running(amt->dev) || !netif_running(amt->stream_dev))
2728 		return true;
2729 
2730 	if (!amtrh->p)
2731 		amt_send_igmp_gq(amt, tunnel);
2732 	else
2733 		amt_send_mld_gq(amt, tunnel);
2734 
2735 	return false;
2736 }
2737 
amt_gw_rcv(struct amt_dev * amt,struct sk_buff * skb)2738 static void amt_gw_rcv(struct amt_dev *amt, struct sk_buff *skb)
2739 {
2740 	int type = amt_parse_type(skb);
2741 	int err = 1;
2742 
2743 	if (type == -1)
2744 		goto drop;
2745 
2746 	if (amt->mode == AMT_MODE_GATEWAY) {
2747 		switch (type) {
2748 		case AMT_MSG_ADVERTISEMENT:
2749 			err = amt_advertisement_handler(amt, skb);
2750 			break;
2751 		case AMT_MSG_MEMBERSHIP_QUERY:
2752 			err = amt_membership_query_handler(amt, skb);
2753 			if (!err)
2754 				return;
2755 			break;
2756 		default:
2757 			netdev_dbg(amt->dev, "Invalid type of Gateway\n");
2758 			break;
2759 		}
2760 	}
2761 drop:
2762 	if (err) {
2763 		amt->dev->stats.rx_dropped++;
2764 		kfree_skb(skb);
2765 	} else {
2766 		consume_skb(skb);
2767 	}
2768 }
2769 
amt_rcv(struct sock * sk,struct sk_buff * skb)2770 static int amt_rcv(struct sock *sk, struct sk_buff *skb)
2771 {
2772 	struct amt_dev *amt;
2773 	struct iphdr *iph;
2774 	int type;
2775 	bool err;
2776 
2777 	rcu_read_lock_bh();
2778 	amt = rcu_dereference_sk_user_data(sk);
2779 	if (!amt) {
2780 		err = true;
2781 		kfree_skb(skb);
2782 		goto out;
2783 	}
2784 
2785 	skb->dev = amt->dev;
2786 	iph = ip_hdr(skb);
2787 	type = amt_parse_type(skb);
2788 	if (type == -1) {
2789 		err = true;
2790 		goto drop;
2791 	}
2792 
2793 	if (amt->mode == AMT_MODE_GATEWAY) {
2794 		switch (type) {
2795 		case AMT_MSG_ADVERTISEMENT:
2796 			if (iph->saddr != amt->discovery_ip) {
2797 				netdev_dbg(amt->dev, "Invalid Relay IP\n");
2798 				err = true;
2799 				goto drop;
2800 			}
2801 			if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) {
2802 				netdev_dbg(amt->dev, "AMT Event queue full\n");
2803 				err = true;
2804 				goto drop;
2805 			}
2806 			goto out;
2807 		case AMT_MSG_MULTICAST_DATA:
2808 			if (iph->saddr != amt->remote_ip) {
2809 				netdev_dbg(amt->dev, "Invalid Relay IP\n");
2810 				err = true;
2811 				goto drop;
2812 			}
2813 			err = amt_multicast_data_handler(amt, skb);
2814 			if (err)
2815 				goto drop;
2816 			else
2817 				goto out;
2818 		case AMT_MSG_MEMBERSHIP_QUERY:
2819 			if (iph->saddr != amt->remote_ip) {
2820 				netdev_dbg(amt->dev, "Invalid Relay IP\n");
2821 				err = true;
2822 				goto drop;
2823 			}
2824 			if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) {
2825 				netdev_dbg(amt->dev, "AMT Event queue full\n");
2826 				err = true;
2827 				goto drop;
2828 			}
2829 			goto out;
2830 		default:
2831 			err = true;
2832 			netdev_dbg(amt->dev, "Invalid type of Gateway\n");
2833 			break;
2834 		}
2835 	} else {
2836 		switch (type) {
2837 		case AMT_MSG_DISCOVERY:
2838 			err = amt_discovery_handler(amt, skb);
2839 			break;
2840 		case AMT_MSG_REQUEST:
2841 			err = amt_request_handler(amt, skb);
2842 			break;
2843 		case AMT_MSG_MEMBERSHIP_UPDATE:
2844 			err = amt_update_handler(amt, skb);
2845 			if (err)
2846 				goto drop;
2847 			else
2848 				goto out;
2849 		default:
2850 			err = true;
2851 			netdev_dbg(amt->dev, "Invalid type of relay\n");
2852 			break;
2853 		}
2854 	}
2855 drop:
2856 	if (err) {
2857 		amt->dev->stats.rx_dropped++;
2858 		kfree_skb(skb);
2859 	} else {
2860 		consume_skb(skb);
2861 	}
2862 out:
2863 	rcu_read_unlock_bh();
2864 	return 0;
2865 }
2866 
amt_event_work(struct work_struct * work)2867 static void amt_event_work(struct work_struct *work)
2868 {
2869 	struct amt_dev *amt = container_of(work, struct amt_dev, event_wq);
2870 	struct sk_buff *skb;
2871 	u8 event;
2872 	int i;
2873 
2874 	for (i = 0; i < AMT_MAX_EVENTS; i++) {
2875 		spin_lock_bh(&amt->lock);
2876 		if (amt->nr_events == 0) {
2877 			spin_unlock_bh(&amt->lock);
2878 			return;
2879 		}
2880 		event = amt->events[amt->event_idx].event;
2881 		skb = amt->events[amt->event_idx].skb;
2882 		amt->events[amt->event_idx].event = AMT_EVENT_NONE;
2883 		amt->events[amt->event_idx].skb = NULL;
2884 		amt->nr_events--;
2885 		amt->event_idx++;
2886 		amt->event_idx %= AMT_MAX_EVENTS;
2887 		spin_unlock_bh(&amt->lock);
2888 
2889 		switch (event) {
2890 		case AMT_EVENT_RECEIVE:
2891 			amt_gw_rcv(amt, skb);
2892 			break;
2893 		case AMT_EVENT_SEND_DISCOVERY:
2894 			amt_event_send_discovery(amt);
2895 			break;
2896 		case AMT_EVENT_SEND_REQUEST:
2897 			amt_event_send_request(amt);
2898 			break;
2899 		default:
2900 			kfree_skb(skb);
2901 			break;
2902 		}
2903 	}
2904 }
2905 
amt_err_lookup(struct sock * sk,struct sk_buff * skb)2906 static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
2907 {
2908 	struct amt_dev *amt;
2909 	int type;
2910 
2911 	rcu_read_lock_bh();
2912 	amt = rcu_dereference_sk_user_data(sk);
2913 	if (!amt)
2914 		goto out;
2915 
2916 	if (amt->mode != AMT_MODE_GATEWAY)
2917 		goto drop;
2918 
2919 	type = amt_parse_type(skb);
2920 	if (type == -1)
2921 		goto drop;
2922 
2923 	netdev_dbg(amt->dev, "Received IGMP Unreachable of %s\n",
2924 		   type_str[type]);
2925 	switch (type) {
2926 	case AMT_MSG_DISCOVERY:
2927 		break;
2928 	case AMT_MSG_REQUEST:
2929 	case AMT_MSG_MEMBERSHIP_UPDATE:
2930 		if (READ_ONCE(amt->status) >= AMT_STATUS_RECEIVED_ADVERTISEMENT)
2931 			mod_delayed_work(amt_wq, &amt->req_wq, 0);
2932 		break;
2933 	default:
2934 		goto drop;
2935 	}
2936 out:
2937 	rcu_read_unlock_bh();
2938 	return 0;
2939 drop:
2940 	rcu_read_unlock_bh();
2941 	amt->dev->stats.rx_dropped++;
2942 	return 0;
2943 }
2944 
amt_create_sock(struct net * net,__be16 port)2945 static struct socket *amt_create_sock(struct net *net, __be16 port)
2946 {
2947 	struct udp_port_cfg udp_conf;
2948 	struct socket *sock;
2949 	int err;
2950 
2951 	memset(&udp_conf, 0, sizeof(udp_conf));
2952 	udp_conf.family = AF_INET;
2953 	udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
2954 
2955 	udp_conf.local_udp_port = port;
2956 
2957 	err = udp_sock_create(net, &udp_conf, &sock);
2958 	if (err < 0)
2959 		return ERR_PTR(err);
2960 
2961 	return sock;
2962 }
2963 
amt_socket_create(struct amt_dev * amt)2964 static int amt_socket_create(struct amt_dev *amt)
2965 {
2966 	struct udp_tunnel_sock_cfg tunnel_cfg;
2967 	struct socket *sock;
2968 
2969 	sock = amt_create_sock(amt->net, amt->relay_port);
2970 	if (IS_ERR(sock))
2971 		return PTR_ERR(sock);
2972 
2973 	/* Mark socket as an encapsulation socket */
2974 	memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
2975 	tunnel_cfg.sk_user_data = amt;
2976 	tunnel_cfg.encap_type = 1;
2977 	tunnel_cfg.encap_rcv = amt_rcv;
2978 	tunnel_cfg.encap_err_lookup = amt_err_lookup;
2979 	tunnel_cfg.encap_destroy = NULL;
2980 	setup_udp_tunnel_sock(amt->net, sock, &tunnel_cfg);
2981 
2982 	rcu_assign_pointer(amt->sock, sock);
2983 	return 0;
2984 }
2985 
amt_dev_open(struct net_device * dev)2986 static int amt_dev_open(struct net_device *dev)
2987 {
2988 	struct amt_dev *amt = netdev_priv(dev);
2989 	int err;
2990 
2991 	amt->ready4 = false;
2992 	amt->ready6 = false;
2993 	amt->event_idx = 0;
2994 	amt->nr_events = 0;
2995 
2996 	err = amt_socket_create(amt);
2997 	if (err)
2998 		return err;
2999 
3000 	amt->req_cnt = 0;
3001 	amt->remote_ip = 0;
3002 	amt->nonce = 0;
3003 	get_random_bytes(&amt->key, sizeof(siphash_key_t));
3004 
3005 	amt->status = AMT_STATUS_INIT;
3006 	if (amt->mode == AMT_MODE_GATEWAY) {
3007 		mod_delayed_work(amt_wq, &amt->discovery_wq, 0);
3008 		mod_delayed_work(amt_wq, &amt->req_wq, 0);
3009 	} else if (amt->mode == AMT_MODE_RELAY) {
3010 		mod_delayed_work(amt_wq, &amt->secret_wq,
3011 				 msecs_to_jiffies(AMT_SECRET_TIMEOUT));
3012 	}
3013 	return err;
3014 }
3015 
amt_dev_stop(struct net_device * dev)3016 static int amt_dev_stop(struct net_device *dev)
3017 {
3018 	struct amt_dev *amt = netdev_priv(dev);
3019 	struct amt_tunnel_list *tunnel, *tmp;
3020 	struct socket *sock;
3021 	struct sk_buff *skb;
3022 	int i;
3023 
3024 	cancel_delayed_work_sync(&amt->req_wq);
3025 	cancel_delayed_work_sync(&amt->discovery_wq);
3026 	cancel_delayed_work_sync(&amt->secret_wq);
3027 
3028 	/* shutdown */
3029 	sock = rtnl_dereference(amt->sock);
3030 	RCU_INIT_POINTER(amt->sock, NULL);
3031 	synchronize_net();
3032 	if (sock)
3033 		udp_tunnel_sock_release(sock);
3034 
3035 	cancel_work_sync(&amt->event_wq);
3036 	for (i = 0; i < AMT_MAX_EVENTS; i++) {
3037 		skb = amt->events[i].skb;
3038 		kfree_skb(skb);
3039 		amt->events[i].event = AMT_EVENT_NONE;
3040 		amt->events[i].skb = NULL;
3041 	}
3042 
3043 	amt->ready4 = false;
3044 	amt->ready6 = false;
3045 	amt->req_cnt = 0;
3046 	amt->remote_ip = 0;
3047 
3048 	list_for_each_entry_safe(tunnel, tmp, &amt->tunnel_list, list) {
3049 		list_del_rcu(&tunnel->list);
3050 		amt->nr_tunnels--;
3051 		cancel_delayed_work_sync(&tunnel->gc_wq);
3052 		amt_clear_groups(tunnel);
3053 		kfree_rcu(tunnel, rcu);
3054 	}
3055 
3056 	return 0;
3057 }
3058 
3059 static const struct device_type amt_type = {
3060 	.name = "amt",
3061 };
3062 
amt_dev_init(struct net_device * dev)3063 static int amt_dev_init(struct net_device *dev)
3064 {
3065 	struct amt_dev *amt = netdev_priv(dev);
3066 	int err;
3067 
3068 	amt->dev = dev;
3069 
3070 	err = gro_cells_init(&amt->gro_cells, dev);
3071 	if (err)
3072 		return err;
3073 
3074 	return 0;
3075 }
3076 
amt_dev_uninit(struct net_device * dev)3077 static void amt_dev_uninit(struct net_device *dev)
3078 {
3079 	struct amt_dev *amt = netdev_priv(dev);
3080 
3081 	gro_cells_destroy(&amt->gro_cells);
3082 }
3083 
3084 static const struct net_device_ops amt_netdev_ops = {
3085 	.ndo_init               = amt_dev_init,
3086 	.ndo_uninit             = amt_dev_uninit,
3087 	.ndo_open		= amt_dev_open,
3088 	.ndo_stop		= amt_dev_stop,
3089 	.ndo_start_xmit         = amt_dev_xmit,
3090 };
3091 
amt_link_setup(struct net_device * dev)3092 static void amt_link_setup(struct net_device *dev)
3093 {
3094 	dev->netdev_ops         = &amt_netdev_ops;
3095 	dev->needs_free_netdev  = true;
3096 	SET_NETDEV_DEVTYPE(dev, &amt_type);
3097 	dev->min_mtu		= ETH_MIN_MTU;
3098 	dev->max_mtu		= ETH_MAX_MTU;
3099 	dev->type		= ARPHRD_NONE;
3100 	dev->flags		= IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
3101 	dev->hard_header_len	= 0;
3102 	dev->addr_len		= 0;
3103 	dev->priv_flags		|= IFF_NO_QUEUE;
3104 	dev->lltx		= true;
3105 	dev->netns_immutable	= true;
3106 	dev->features		|= NETIF_F_GSO_SOFTWARE;
3107 	dev->hw_features	|= NETIF_F_SG | NETIF_F_HW_CSUM;
3108 	dev->hw_features	|= NETIF_F_FRAGLIST | NETIF_F_RXCSUM;
3109 	dev->hw_features	|= NETIF_F_GSO_SOFTWARE;
3110 	dev->pcpu_stat_type	= NETDEV_PCPU_STAT_TSTATS;
3111 	eth_hw_addr_random(dev);
3112 	eth_zero_addr(dev->broadcast);
3113 	ether_setup(dev);
3114 }
3115 
3116 static const struct nla_policy amt_policy[IFLA_AMT_MAX + 1] = {
3117 	[IFLA_AMT_MODE]		= { .type = NLA_U32 },
3118 	[IFLA_AMT_RELAY_PORT]	= { .type = NLA_U16 },
3119 	[IFLA_AMT_GATEWAY_PORT]	= { .type = NLA_U16 },
3120 	[IFLA_AMT_LINK]		= { .type = NLA_U32 },
3121 	[IFLA_AMT_LOCAL_IP]	= { .len = sizeof_field(struct iphdr, daddr) },
3122 	[IFLA_AMT_REMOTE_IP]	= { .len = sizeof_field(struct iphdr, daddr) },
3123 	[IFLA_AMT_DISCOVERY_IP]	= { .len = sizeof_field(struct iphdr, daddr) },
3124 	[IFLA_AMT_MAX_TUNNELS]	= { .type = NLA_U32 },
3125 };
3126 
amt_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)3127 static int amt_validate(struct nlattr *tb[], struct nlattr *data[],
3128 			struct netlink_ext_ack *extack)
3129 {
3130 	if (!data)
3131 		return -EINVAL;
3132 
3133 	if (!data[IFLA_AMT_LINK]) {
3134 		NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LINK],
3135 				    "Link attribute is required");
3136 		return -EINVAL;
3137 	}
3138 
3139 	if (!data[IFLA_AMT_MODE]) {
3140 		NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE],
3141 				    "Mode attribute is required");
3142 		return -EINVAL;
3143 	}
3144 
3145 	if (nla_get_u32(data[IFLA_AMT_MODE]) > AMT_MODE_MAX) {
3146 		NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE],
3147 				    "Mode attribute is not valid");
3148 		return -EINVAL;
3149 	}
3150 
3151 	if (!data[IFLA_AMT_LOCAL_IP]) {
3152 		NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_DISCOVERY_IP],
3153 				    "Local attribute is required");
3154 		return -EINVAL;
3155 	}
3156 
3157 	if (!data[IFLA_AMT_DISCOVERY_IP] &&
3158 	    nla_get_u32(data[IFLA_AMT_MODE]) == AMT_MODE_GATEWAY) {
3159 		NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LOCAL_IP],
3160 				    "Discovery attribute is required");
3161 		return -EINVAL;
3162 	}
3163 
3164 	return 0;
3165 }
3166 
amt_newlink(struct net_device * dev,struct rtnl_newlink_params * params,struct netlink_ext_ack * extack)3167 static int amt_newlink(struct net_device *dev,
3168 		       struct rtnl_newlink_params *params,
3169 		       struct netlink_ext_ack *extack)
3170 {
3171 	struct net *link_net = rtnl_newlink_link_net(params);
3172 	struct amt_dev *amt = netdev_priv(dev);
3173 	struct nlattr **data = params->data;
3174 	struct nlattr **tb = params->tb;
3175 	int err = -EINVAL;
3176 
3177 	amt->net = link_net;
3178 	amt->mode = nla_get_u32(data[IFLA_AMT_MODE]);
3179 
3180 	if (data[IFLA_AMT_MAX_TUNNELS] &&
3181 	    nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]))
3182 		amt->max_tunnels = nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]);
3183 	else
3184 		amt->max_tunnels = AMT_MAX_TUNNELS;
3185 
3186 	spin_lock_init(&amt->lock);
3187 	amt->max_groups = AMT_MAX_GROUP;
3188 	amt->max_sources = AMT_MAX_SOURCE;
3189 	amt->hash_buckets = AMT_HSIZE;
3190 	amt->nr_tunnels = 0;
3191 	get_random_bytes(&amt->hash_seed, sizeof(amt->hash_seed));
3192 	amt->stream_dev = dev_get_by_index(link_net,
3193 					   nla_get_u32(data[IFLA_AMT_LINK]));
3194 	if (!amt->stream_dev) {
3195 		NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
3196 				    "Can't find stream device");
3197 		return -ENODEV;
3198 	}
3199 
3200 	if (amt->stream_dev->type != ARPHRD_ETHER) {
3201 		NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
3202 				    "Invalid stream device type");
3203 		goto err;
3204 	}
3205 
3206 	amt->local_ip = nla_get_in_addr(data[IFLA_AMT_LOCAL_IP]);
3207 	if (ipv4_is_loopback(amt->local_ip) ||
3208 	    ipv4_is_zeronet(amt->local_ip) ||
3209 	    ipv4_is_multicast(amt->local_ip)) {
3210 		NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LOCAL_IP],
3211 				    "Invalid Local address");
3212 		goto err;
3213 	}
3214 
3215 	amt->relay_port = nla_get_be16_default(data[IFLA_AMT_RELAY_PORT],
3216 					       htons(IANA_AMT_UDP_PORT));
3217 
3218 	amt->gw_port = nla_get_be16_default(data[IFLA_AMT_GATEWAY_PORT],
3219 					    htons(IANA_AMT_UDP_PORT));
3220 
3221 	if (!amt->relay_port) {
3222 		NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
3223 				    "relay port must not be 0");
3224 		goto err;
3225 	}
3226 	if (amt->mode == AMT_MODE_RELAY) {
3227 		amt->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
3228 		amt->qri = 10;
3229 		dev->needed_headroom = amt->stream_dev->needed_headroom +
3230 				       AMT_RELAY_HLEN;
3231 		dev->mtu = amt->stream_dev->mtu - AMT_RELAY_HLEN;
3232 		dev->max_mtu = dev->mtu;
3233 		dev->min_mtu = ETH_MIN_MTU + AMT_RELAY_HLEN;
3234 	} else {
3235 		if (!data[IFLA_AMT_DISCOVERY_IP]) {
3236 			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
3237 					    "discovery must be set in gateway mode");
3238 			goto err;
3239 		}
3240 		if (!amt->gw_port) {
3241 			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
3242 					    "gateway port must not be 0");
3243 			goto err;
3244 		}
3245 		amt->remote_ip = 0;
3246 		amt->discovery_ip = nla_get_in_addr(data[IFLA_AMT_DISCOVERY_IP]);
3247 		if (ipv4_is_loopback(amt->discovery_ip) ||
3248 		    ipv4_is_zeronet(amt->discovery_ip) ||
3249 		    ipv4_is_multicast(amt->discovery_ip)) {
3250 			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
3251 					    "discovery must be unicast");
3252 			goto err;
3253 		}
3254 
3255 		dev->needed_headroom = amt->stream_dev->needed_headroom +
3256 				       AMT_GW_HLEN;
3257 		dev->mtu = amt->stream_dev->mtu - AMT_GW_HLEN;
3258 		dev->max_mtu = dev->mtu;
3259 		dev->min_mtu = ETH_MIN_MTU + AMT_GW_HLEN;
3260 	}
3261 	amt->qi = AMT_INIT_QUERY_INTERVAL;
3262 
3263 	err = register_netdevice(dev);
3264 	if (err < 0) {
3265 		netdev_dbg(dev, "failed to register new netdev %d\n", err);
3266 		goto err;
3267 	}
3268 
3269 	err = netdev_upper_dev_link(amt->stream_dev, dev, extack);
3270 	if (err < 0) {
3271 		unregister_netdevice(dev);
3272 		goto err;
3273 	}
3274 
3275 	INIT_DELAYED_WORK(&amt->discovery_wq, amt_discovery_work);
3276 	INIT_DELAYED_WORK(&amt->req_wq, amt_req_work);
3277 	INIT_DELAYED_WORK(&amt->secret_wq, amt_secret_work);
3278 	INIT_WORK(&amt->event_wq, amt_event_work);
3279 	INIT_LIST_HEAD(&amt->tunnel_list);
3280 	return 0;
3281 err:
3282 	dev_put(amt->stream_dev);
3283 	return err;
3284 }
3285 
amt_dellink(struct net_device * dev,struct list_head * head)3286 static void amt_dellink(struct net_device *dev, struct list_head *head)
3287 {
3288 	struct amt_dev *amt = netdev_priv(dev);
3289 
3290 	unregister_netdevice_queue(dev, head);
3291 	netdev_upper_dev_unlink(amt->stream_dev, dev);
3292 	dev_put(amt->stream_dev);
3293 }
3294 
amt_get_size(const struct net_device * dev)3295 static size_t amt_get_size(const struct net_device *dev)
3296 {
3297 	return nla_total_size(sizeof(__u32)) + /* IFLA_AMT_MODE */
3298 	       nla_total_size(sizeof(__u16)) + /* IFLA_AMT_RELAY_PORT */
3299 	       nla_total_size(sizeof(__u16)) + /* IFLA_AMT_GATEWAY_PORT */
3300 	       nla_total_size(sizeof(__u32)) + /* IFLA_AMT_LINK */
3301 	       nla_total_size(sizeof(__u32)) + /* IFLA_MAX_TUNNELS */
3302 	       nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_DISCOVERY_IP */
3303 	       nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_REMOTE_IP */
3304 	       nla_total_size(sizeof(struct iphdr)); /* IFLA_AMT_LOCAL_IP */
3305 }
3306 
amt_fill_info(struct sk_buff * skb,const struct net_device * dev)3307 static int amt_fill_info(struct sk_buff *skb, const struct net_device *dev)
3308 {
3309 	struct amt_dev *amt = netdev_priv(dev);
3310 
3311 	if (nla_put_u32(skb, IFLA_AMT_MODE, amt->mode))
3312 		goto nla_put_failure;
3313 	if (nla_put_be16(skb, IFLA_AMT_RELAY_PORT, amt->relay_port))
3314 		goto nla_put_failure;
3315 	if (nla_put_be16(skb, IFLA_AMT_GATEWAY_PORT, amt->gw_port))
3316 		goto nla_put_failure;
3317 	if (nla_put_u32(skb, IFLA_AMT_LINK, amt->stream_dev->ifindex))
3318 		goto nla_put_failure;
3319 	if (nla_put_in_addr(skb, IFLA_AMT_LOCAL_IP, amt->local_ip))
3320 		goto nla_put_failure;
3321 	if (nla_put_in_addr(skb, IFLA_AMT_DISCOVERY_IP, amt->discovery_ip))
3322 		goto nla_put_failure;
3323 	if (amt->remote_ip)
3324 		if (nla_put_in_addr(skb, IFLA_AMT_REMOTE_IP, amt->remote_ip))
3325 			goto nla_put_failure;
3326 	if (nla_put_u32(skb, IFLA_AMT_MAX_TUNNELS, amt->max_tunnels))
3327 		goto nla_put_failure;
3328 
3329 	return 0;
3330 
3331 nla_put_failure:
3332 	return -EMSGSIZE;
3333 }
3334 
3335 static struct rtnl_link_ops amt_link_ops __read_mostly = {
3336 	.kind		= "amt",
3337 	.maxtype	= IFLA_AMT_MAX,
3338 	.policy		= amt_policy,
3339 	.priv_size	= sizeof(struct amt_dev),
3340 	.setup		= amt_link_setup,
3341 	.validate	= amt_validate,
3342 	.newlink	= amt_newlink,
3343 	.dellink	= amt_dellink,
3344 	.get_size       = amt_get_size,
3345 	.fill_info      = amt_fill_info,
3346 };
3347 
amt_lookup_upper_dev(struct net_device * dev)3348 static struct net_device *amt_lookup_upper_dev(struct net_device *dev)
3349 {
3350 	struct net_device *upper_dev;
3351 	struct amt_dev *amt;
3352 
3353 	for_each_netdev(dev_net(dev), upper_dev) {
3354 		if (netif_is_amt(upper_dev)) {
3355 			amt = netdev_priv(upper_dev);
3356 			if (amt->stream_dev == dev)
3357 				return upper_dev;
3358 		}
3359 	}
3360 
3361 	return NULL;
3362 }
3363 
amt_device_event(struct notifier_block * unused,unsigned long event,void * ptr)3364 static int amt_device_event(struct notifier_block *unused,
3365 			    unsigned long event, void *ptr)
3366 {
3367 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3368 	struct net_device *upper_dev;
3369 	struct amt_dev *amt;
3370 	LIST_HEAD(list);
3371 	int new_mtu;
3372 
3373 	upper_dev = amt_lookup_upper_dev(dev);
3374 	if (!upper_dev)
3375 		return NOTIFY_DONE;
3376 	amt = netdev_priv(upper_dev);
3377 
3378 	switch (event) {
3379 	case NETDEV_UNREGISTER:
3380 		amt_dellink(amt->dev, &list);
3381 		unregister_netdevice_many(&list);
3382 		break;
3383 	case NETDEV_CHANGEMTU:
3384 		if (amt->mode == AMT_MODE_RELAY)
3385 			new_mtu = dev->mtu - AMT_RELAY_HLEN;
3386 		else
3387 			new_mtu = dev->mtu - AMT_GW_HLEN;
3388 
3389 		dev_set_mtu(amt->dev, new_mtu);
3390 		break;
3391 	}
3392 
3393 	return NOTIFY_DONE;
3394 }
3395 
3396 static struct notifier_block amt_notifier_block __read_mostly = {
3397 	.notifier_call = amt_device_event,
3398 };
3399 
amt_init(void)3400 static int __init amt_init(void)
3401 {
3402 	int err;
3403 
3404 	err = register_netdevice_notifier(&amt_notifier_block);
3405 	if (err < 0)
3406 		goto err;
3407 
3408 	err = rtnl_link_register(&amt_link_ops);
3409 	if (err < 0)
3410 		goto unregister_notifier;
3411 
3412 	amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 0);
3413 	if (!amt_wq) {
3414 		err = -ENOMEM;
3415 		goto rtnl_unregister;
3416 	}
3417 
3418 	spin_lock_init(&source_gc_lock);
3419 	spin_lock_bh(&source_gc_lock);
3420 	INIT_DELAYED_WORK(&source_gc_wq, amt_source_gc_work);
3421 	mod_delayed_work(amt_wq, &source_gc_wq,
3422 			 msecs_to_jiffies(AMT_GC_INTERVAL));
3423 	spin_unlock_bh(&source_gc_lock);
3424 
3425 	return 0;
3426 
3427 rtnl_unregister:
3428 	rtnl_link_unregister(&amt_link_ops);
3429 unregister_notifier:
3430 	unregister_netdevice_notifier(&amt_notifier_block);
3431 err:
3432 	pr_err("error loading AMT module loaded\n");
3433 	return err;
3434 }
3435 late_initcall(amt_init);
3436 
amt_fini(void)3437 static void __exit amt_fini(void)
3438 {
3439 	rtnl_link_unregister(&amt_link_ops);
3440 	unregister_netdevice_notifier(&amt_notifier_block);
3441 	cancel_delayed_work_sync(&source_gc_wq);
3442 	__amt_source_gc_work();
3443 	destroy_workqueue(amt_wq);
3444 }
3445 module_exit(amt_fini);
3446 
3447 MODULE_LICENSE("GPL");
3448 MODULE_DESCRIPTION("Driver for Automatic Multicast Tunneling (AMT)");
3449 MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>");
3450 MODULE_ALIAS_RTNL_LINK("amt");
3451