xref: /linux/net/batman-adv/soft-interface.c (revision 336b78c655c84ce9ce47219185171b3912109c0a)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  */
6 
7 #include "soft-interface.h"
8 #include "main.h"
9 
10 #include <linux/atomic.h>
11 #include <linux/byteorder/generic.h>
12 #include <linux/cache.h>
13 #include <linux/compiler.h>
14 #include <linux/container_of.h>
15 #include <linux/cpumask.h>
16 #include <linux/errno.h>
17 #include <linux/etherdevice.h>
18 #include <linux/ethtool.h>
19 #include <linux/gfp.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_vlan.h>
22 #include <linux/jiffies.h>
23 #include <linux/kref.h>
24 #include <linux/list.h>
25 #include <linux/lockdep.h>
26 #include <linux/netdevice.h>
27 #include <linux/netlink.h>
28 #include <linux/percpu.h>
29 #include <linux/random.h>
30 #include <linux/rculist.h>
31 #include <linux/rcupdate.h>
32 #include <linux/skbuff.h>
33 #include <linux/slab.h>
34 #include <linux/socket.h>
35 #include <linux/spinlock.h>
36 #include <linux/stddef.h>
37 #include <linux/string.h>
38 #include <linux/types.h>
39 #include <net/net_namespace.h>
40 #include <net/netlink.h>
41 #include <uapi/linux/batadv_packet.h>
42 #include <uapi/linux/batman_adv.h>
43 
44 #include "bat_algo.h"
45 #include "bridge_loop_avoidance.h"
46 #include "distributed-arp-table.h"
47 #include "gateway_client.h"
48 #include "hard-interface.h"
49 #include "multicast.h"
50 #include "network-coding.h"
51 #include "send.h"
52 #include "translation-table.h"
53 
54 /**
55  * batadv_skb_head_push() - Increase header size and move (push) head pointer
56  * @skb: packet buffer which should be modified
57  * @len: number of bytes to add
58  *
59  * Return: 0 on success or negative error number in case of failure
60  */
61 int batadv_skb_head_push(struct sk_buff *skb, unsigned int len)
62 {
63 	int result;
64 
65 	/* TODO: We must check if we can release all references to non-payload
66 	 * data using __skb_header_release in our skbs to allow skb_cow_header
67 	 * to work optimally. This means that those skbs are not allowed to read
68 	 * or write any data which is before the current position of skb->data
69 	 * after that call and thus allow other skbs with the same data buffer
70 	 * to write freely in that area.
71 	 */
72 	result = skb_cow_head(skb, len);
73 	if (result < 0)
74 		return result;
75 
76 	skb_push(skb, len);
77 	return 0;
78 }
79 
80 static int batadv_interface_open(struct net_device *dev)
81 {
82 	netif_start_queue(dev);
83 	return 0;
84 }
85 
86 static int batadv_interface_release(struct net_device *dev)
87 {
88 	netif_stop_queue(dev);
89 	return 0;
90 }
91 
92 /**
93  * batadv_sum_counter() - Sum the cpu-local counters for index 'idx'
94  * @bat_priv: the bat priv with all the soft interface information
95  * @idx: index of counter to sum up
96  *
97  * Return: sum of all cpu-local counters
98  */
99 static u64 batadv_sum_counter(struct batadv_priv *bat_priv,  size_t idx)
100 {
101 	u64 *counters, sum = 0;
102 	int cpu;
103 
104 	for_each_possible_cpu(cpu) {
105 		counters = per_cpu_ptr(bat_priv->bat_counters, cpu);
106 		sum += counters[idx];
107 	}
108 
109 	return sum;
110 }
111 
112 static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
113 {
114 	struct batadv_priv *bat_priv = netdev_priv(dev);
115 	struct net_device_stats *stats = &dev->stats;
116 
117 	stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX);
118 	stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES);
119 	stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED);
120 	stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX);
121 	stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES);
122 	return stats;
123 }
124 
125 static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
126 {
127 	struct batadv_priv *bat_priv = netdev_priv(dev);
128 	struct batadv_softif_vlan *vlan;
129 	struct sockaddr *addr = p;
130 	u8 old_addr[ETH_ALEN];
131 
132 	if (!is_valid_ether_addr(addr->sa_data))
133 		return -EADDRNOTAVAIL;
134 
135 	ether_addr_copy(old_addr, dev->dev_addr);
136 	eth_hw_addr_set(dev, addr->sa_data);
137 
138 	/* only modify transtable if it has been initialized before */
139 	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
140 		return 0;
141 
142 	rcu_read_lock();
143 	hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
144 		batadv_tt_local_remove(bat_priv, old_addr, vlan->vid,
145 				       "mac address changed", false);
146 		batadv_tt_local_add(dev, addr->sa_data, vlan->vid,
147 				    BATADV_NULL_IFINDEX, BATADV_NO_MARK);
148 	}
149 	rcu_read_unlock();
150 
151 	return 0;
152 }
153 
154 static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
155 {
156 	/* check ranges */
157 	if (new_mtu < 68 || new_mtu > batadv_hardif_min_mtu(dev))
158 		return -EINVAL;
159 
160 	dev->mtu = new_mtu;
161 
162 	return 0;
163 }
164 
165 /**
166  * batadv_interface_set_rx_mode() - set the rx mode of a device
167  * @dev: registered network device to modify
168  *
169  * We do not actually need to set any rx filters for the virtual batman
170  * soft interface. However a dummy handler enables a user to set static
171  * multicast listeners for instance.
172  */
173 static void batadv_interface_set_rx_mode(struct net_device *dev)
174 {
175 }
176 
177 static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
178 				       struct net_device *soft_iface)
179 {
180 	struct ethhdr *ethhdr;
181 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
182 	struct batadv_hard_iface *primary_if = NULL;
183 	struct batadv_bcast_packet *bcast_packet;
184 	static const u8 stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
185 					      0x00, 0x00};
186 	static const u8 ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
187 					       0x00, 0x00};
188 	enum batadv_dhcp_recipient dhcp_rcp = BATADV_DHCP_NO;
189 	u8 *dst_hint = NULL, chaddr[ETH_ALEN];
190 	struct vlan_ethhdr *vhdr;
191 	unsigned int header_len = 0;
192 	int data_len = skb->len, ret;
193 	unsigned long brd_delay = 0;
194 	bool do_bcast = false, client_added;
195 	unsigned short vid;
196 	u32 seqno;
197 	int gw_mode;
198 	enum batadv_forw_mode forw_mode = BATADV_FORW_BCAST;
199 	int mcast_is_routable = 0;
200 	int network_offset = ETH_HLEN;
201 	__be16 proto;
202 
203 	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
204 		goto dropped;
205 
206 	/* reset control block to avoid left overs from previous users */
207 	memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
208 
209 	netif_trans_update(soft_iface);
210 	vid = batadv_get_vid(skb, 0);
211 
212 	skb_reset_mac_header(skb);
213 	ethhdr = eth_hdr(skb);
214 
215 	proto = ethhdr->h_proto;
216 
217 	switch (ntohs(proto)) {
218 	case ETH_P_8021Q:
219 		if (!pskb_may_pull(skb, sizeof(*vhdr)))
220 			goto dropped;
221 		vhdr = vlan_eth_hdr(skb);
222 		proto = vhdr->h_vlan_encapsulated_proto;
223 
224 		/* drop batman-in-batman packets to prevent loops */
225 		if (proto != htons(ETH_P_BATMAN)) {
226 			network_offset += VLAN_HLEN;
227 			break;
228 		}
229 
230 		fallthrough;
231 	case ETH_P_BATMAN:
232 		goto dropped;
233 	}
234 
235 	skb_set_network_header(skb, network_offset);
236 
237 	if (batadv_bla_tx(bat_priv, skb, vid))
238 		goto dropped;
239 
240 	/* skb->data might have been reallocated by batadv_bla_tx() */
241 	ethhdr = eth_hdr(skb);
242 
243 	/* Register the client MAC in the transtable */
244 	if (!is_multicast_ether_addr(ethhdr->h_source) &&
245 	    !batadv_bla_is_loopdetect_mac(ethhdr->h_source)) {
246 		client_added = batadv_tt_local_add(soft_iface, ethhdr->h_source,
247 						   vid, skb->skb_iif,
248 						   skb->mark);
249 		if (!client_added)
250 			goto dropped;
251 	}
252 
253 	/* Snoop address candidates from DHCPACKs for early DAT filling */
254 	batadv_dat_snoop_outgoing_dhcp_ack(bat_priv, skb, proto, vid);
255 
256 	/* don't accept stp packets. STP does not help in meshes.
257 	 * better use the bridge loop avoidance ...
258 	 *
259 	 * The same goes for ECTP sent at least by some Cisco Switches,
260 	 * it might confuse the mesh when used with bridge loop avoidance.
261 	 */
262 	if (batadv_compare_eth(ethhdr->h_dest, stp_addr))
263 		goto dropped;
264 
265 	if (batadv_compare_eth(ethhdr->h_dest, ectp_addr))
266 		goto dropped;
267 
268 	gw_mode = atomic_read(&bat_priv->gw.mode);
269 	if (is_multicast_ether_addr(ethhdr->h_dest)) {
270 		/* if gw mode is off, broadcast every packet */
271 		if (gw_mode == BATADV_GW_MODE_OFF) {
272 			do_bcast = true;
273 			goto send;
274 		}
275 
276 		dhcp_rcp = batadv_gw_dhcp_recipient_get(skb, &header_len,
277 							chaddr);
278 		/* skb->data may have been modified by
279 		 * batadv_gw_dhcp_recipient_get()
280 		 */
281 		ethhdr = eth_hdr(skb);
282 		/* if gw_mode is on, broadcast any non-DHCP message.
283 		 * All the DHCP packets are going to be sent as unicast
284 		 */
285 		if (dhcp_rcp == BATADV_DHCP_NO) {
286 			do_bcast = true;
287 			goto send;
288 		}
289 
290 		if (dhcp_rcp == BATADV_DHCP_TO_CLIENT)
291 			dst_hint = chaddr;
292 		else if ((gw_mode == BATADV_GW_MODE_SERVER) &&
293 			 (dhcp_rcp == BATADV_DHCP_TO_SERVER))
294 			/* gateways should not forward any DHCP message if
295 			 * directed to a DHCP server
296 			 */
297 			goto dropped;
298 
299 send:
300 		if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) {
301 			forw_mode = batadv_mcast_forw_mode(bat_priv, skb,
302 							   &mcast_is_routable);
303 			switch (forw_mode) {
304 			case BATADV_FORW_BCAST:
305 				break;
306 			case BATADV_FORW_UCASTS:
307 				do_bcast = false;
308 				break;
309 			case BATADV_FORW_NONE:
310 				fallthrough;
311 			default:
312 				goto dropped;
313 			}
314 		}
315 	}
316 
317 	batadv_skb_set_priority(skb, 0);
318 
319 	/* ethernet packet should be broadcasted */
320 	if (do_bcast) {
321 		primary_if = batadv_primary_if_get_selected(bat_priv);
322 		if (!primary_if)
323 			goto dropped;
324 
325 		/* in case of ARP request, we do not immediately broadcasti the
326 		 * packet, instead we first wait for DAT to try to retrieve the
327 		 * correct ARP entry
328 		 */
329 		if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb))
330 			brd_delay = msecs_to_jiffies(ARP_REQ_DELAY);
331 
332 		if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
333 			goto dropped;
334 
335 		bcast_packet = (struct batadv_bcast_packet *)skb->data;
336 		bcast_packet->version = BATADV_COMPAT_VERSION;
337 		bcast_packet->ttl = BATADV_TTL - 1;
338 
339 		/* batman packet type: broadcast */
340 		bcast_packet->packet_type = BATADV_BCAST;
341 		bcast_packet->reserved = 0;
342 
343 		/* hw address of first interface is the orig mac because only
344 		 * this mac is known throughout the mesh
345 		 */
346 		ether_addr_copy(bcast_packet->orig,
347 				primary_if->net_dev->dev_addr);
348 
349 		/* set broadcast sequence number */
350 		seqno = atomic_inc_return(&bat_priv->bcast_seqno);
351 		bcast_packet->seqno = htonl(seqno);
352 
353 		batadv_send_bcast_packet(bat_priv, skb, brd_delay, true);
354 	/* unicast packet */
355 	} else {
356 		/* DHCP packets going to a server will use the GW feature */
357 		if (dhcp_rcp == BATADV_DHCP_TO_SERVER) {
358 			ret = batadv_gw_out_of_range(bat_priv, skb);
359 			if (ret)
360 				goto dropped;
361 			ret = batadv_send_skb_via_gw(bat_priv, skb, vid);
362 		} else if (forw_mode == BATADV_FORW_UCASTS) {
363 			ret = batadv_mcast_forw_send(bat_priv, skb, vid,
364 						     mcast_is_routable);
365 		} else {
366 			if (batadv_dat_snoop_outgoing_arp_request(bat_priv,
367 								  skb))
368 				goto dropped;
369 
370 			batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb);
371 
372 			ret = batadv_send_skb_via_tt(bat_priv, skb, dst_hint,
373 						     vid);
374 		}
375 		if (ret != NET_XMIT_SUCCESS)
376 			goto dropped_freed;
377 	}
378 
379 	batadv_inc_counter(bat_priv, BATADV_CNT_TX);
380 	batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len);
381 	goto end;
382 
383 dropped:
384 	kfree_skb(skb);
385 dropped_freed:
386 	batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
387 end:
388 	batadv_hardif_put(primary_if);
389 	return NETDEV_TX_OK;
390 }
391 
392 /**
393  * batadv_interface_rx() - receive ethernet frame on local batman-adv interface
394  * @soft_iface: local interface which will receive the ethernet frame
395  * @skb: ethernet frame for @soft_iface
396  * @hdr_size: size of already parsed batman-adv header
397  * @orig_node: originator from which the batman-adv packet was sent
398  *
399  * Sends an ethernet frame to the receive path of the local @soft_iface.
400  * skb->data has still point to the batman-adv header with the size @hdr_size.
401  * The caller has to have parsed this header already and made sure that at least
402  * @hdr_size bytes are still available for pull in @skb.
403  *
404  * The packet may still get dropped. This can happen when the encapsulated
405  * ethernet frame is invalid or contains again an batman-adv packet. Also
406  * unicast packets will be dropped directly when it was sent between two
407  * isolated clients.
408  */
409 void batadv_interface_rx(struct net_device *soft_iface,
410 			 struct sk_buff *skb, int hdr_size,
411 			 struct batadv_orig_node *orig_node)
412 {
413 	struct batadv_bcast_packet *batadv_bcast_packet;
414 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
415 	struct vlan_ethhdr *vhdr;
416 	struct ethhdr *ethhdr;
417 	unsigned short vid;
418 	int packet_type;
419 
420 	batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
421 	packet_type = batadv_bcast_packet->packet_type;
422 
423 	skb_pull_rcsum(skb, hdr_size);
424 	skb_reset_mac_header(skb);
425 
426 	/* clean the netfilter state now that the batman-adv header has been
427 	 * removed
428 	 */
429 	nf_reset_ct(skb);
430 
431 	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
432 		goto dropped;
433 
434 	vid = batadv_get_vid(skb, 0);
435 	ethhdr = eth_hdr(skb);
436 
437 	switch (ntohs(ethhdr->h_proto)) {
438 	case ETH_P_8021Q:
439 		if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
440 			goto dropped;
441 
442 		vhdr = (struct vlan_ethhdr *)skb->data;
443 
444 		/* drop batman-in-batman packets to prevent loops */
445 		if (vhdr->h_vlan_encapsulated_proto != htons(ETH_P_BATMAN))
446 			break;
447 
448 		fallthrough;
449 	case ETH_P_BATMAN:
450 		goto dropped;
451 	}
452 
453 	/* skb->dev & skb->pkt_type are set here */
454 	skb->protocol = eth_type_trans(skb, soft_iface);
455 	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
456 
457 	batadv_inc_counter(bat_priv, BATADV_CNT_RX);
458 	batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
459 			   skb->len + ETH_HLEN);
460 
461 	/* Let the bridge loop avoidance check the packet. If will
462 	 * not handle it, we can safely push it up.
463 	 */
464 	if (batadv_bla_rx(bat_priv, skb, vid, packet_type))
465 		goto out;
466 
467 	if (orig_node)
468 		batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
469 						     ethhdr->h_source, vid);
470 
471 	if (is_multicast_ether_addr(ethhdr->h_dest)) {
472 		/* set the mark on broadcast packets if AP isolation is ON and
473 		 * the packet is coming from an "isolated" client
474 		 */
475 		if (batadv_vlan_ap_isola_get(bat_priv, vid) &&
476 		    batadv_tt_global_is_isolated(bat_priv, ethhdr->h_source,
477 						 vid)) {
478 			/* save bits in skb->mark not covered by the mask and
479 			 * apply the mark on the rest
480 			 */
481 			skb->mark &= ~bat_priv->isolation_mark_mask;
482 			skb->mark |= bat_priv->isolation_mark;
483 		}
484 	} else if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source,
485 					 ethhdr->h_dest, vid)) {
486 		goto dropped;
487 	}
488 
489 	netif_rx(skb);
490 	goto out;
491 
492 dropped:
493 	kfree_skb(skb);
494 out:
495 	return;
496 }
497 
498 /**
499  * batadv_softif_vlan_release() - release vlan from lists and queue for free
500  *  after rcu grace period
501  * @ref: kref pointer of the vlan object
502  */
503 void batadv_softif_vlan_release(struct kref *ref)
504 {
505 	struct batadv_softif_vlan *vlan;
506 
507 	vlan = container_of(ref, struct batadv_softif_vlan, refcount);
508 
509 	spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
510 	hlist_del_rcu(&vlan->list);
511 	spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock);
512 
513 	kfree_rcu(vlan, rcu);
514 }
515 
516 /**
517  * batadv_softif_vlan_get() - get the vlan object for a specific vid
518  * @bat_priv: the bat priv with all the soft interface information
519  * @vid: the identifier of the vlan object to retrieve
520  *
521  * Return: the private data of the vlan matching the vid passed as argument or
522  * NULL otherwise. The refcounter of the returned object is incremented by 1.
523  */
524 struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
525 						  unsigned short vid)
526 {
527 	struct batadv_softif_vlan *vlan_tmp, *vlan = NULL;
528 
529 	rcu_read_lock();
530 	hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) {
531 		if (vlan_tmp->vid != vid)
532 			continue;
533 
534 		if (!kref_get_unless_zero(&vlan_tmp->refcount))
535 			continue;
536 
537 		vlan = vlan_tmp;
538 		break;
539 	}
540 	rcu_read_unlock();
541 
542 	return vlan;
543 }
544 
545 /**
546  * batadv_softif_create_vlan() - allocate the needed resources for a new vlan
547  * @bat_priv: the bat priv with all the soft interface information
548  * @vid: the VLAN identifier
549  *
550  * Return: 0 on success, a negative error otherwise.
551  */
552 int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
553 {
554 	struct batadv_softif_vlan *vlan;
555 
556 	spin_lock_bh(&bat_priv->softif_vlan_list_lock);
557 
558 	vlan = batadv_softif_vlan_get(bat_priv, vid);
559 	if (vlan) {
560 		batadv_softif_vlan_put(vlan);
561 		spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
562 		return -EEXIST;
563 	}
564 
565 	vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
566 	if (!vlan) {
567 		spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
568 		return -ENOMEM;
569 	}
570 
571 	vlan->bat_priv = bat_priv;
572 	vlan->vid = vid;
573 	kref_init(&vlan->refcount);
574 
575 	atomic_set(&vlan->ap_isolation, 0);
576 
577 	kref_get(&vlan->refcount);
578 	hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
579 	spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
580 
581 	/* add a new TT local entry. This one will be marked with the NOPURGE
582 	 * flag
583 	 */
584 	batadv_tt_local_add(bat_priv->soft_iface,
585 			    bat_priv->soft_iface->dev_addr, vid,
586 			    BATADV_NULL_IFINDEX, BATADV_NO_MARK);
587 
588 	/* don't return reference to new softif_vlan */
589 	batadv_softif_vlan_put(vlan);
590 
591 	return 0;
592 }
593 
594 /**
595  * batadv_softif_destroy_vlan() - remove and destroy a softif_vlan object
596  * @bat_priv: the bat priv with all the soft interface information
597  * @vlan: the object to remove
598  */
599 static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
600 				       struct batadv_softif_vlan *vlan)
601 {
602 	/* explicitly remove the associated TT local entry because it is marked
603 	 * with the NOPURGE flag
604 	 */
605 	batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
606 			       vlan->vid, "vlan interface destroyed", false);
607 
608 	batadv_softif_vlan_put(vlan);
609 }
610 
611 /**
612  * batadv_interface_add_vid() - ndo_add_vid API implementation
613  * @dev: the netdev of the mesh interface
614  * @proto: protocol of the vlan id
615  * @vid: identifier of the new vlan
616  *
617  * Set up all the internal structures for handling the new vlan on top of the
618  * mesh interface
619  *
620  * Return: 0 on success or a negative error code in case of failure.
621  */
622 static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
623 				    unsigned short vid)
624 {
625 	struct batadv_priv *bat_priv = netdev_priv(dev);
626 	struct batadv_softif_vlan *vlan;
627 
628 	/* only 802.1Q vlans are supported.
629 	 * batman-adv does not know how to handle other types
630 	 */
631 	if (proto != htons(ETH_P_8021Q))
632 		return -EINVAL;
633 
634 	vid |= BATADV_VLAN_HAS_TAG;
635 
636 	/* if a new vlan is getting created and it already exists, it means that
637 	 * it was not deleted yet. batadv_softif_vlan_get() increases the
638 	 * refcount in order to revive the object.
639 	 *
640 	 * if it does not exist then create it.
641 	 */
642 	vlan = batadv_softif_vlan_get(bat_priv, vid);
643 	if (!vlan)
644 		return batadv_softif_create_vlan(bat_priv, vid);
645 
646 	/* add a new TT local entry. This one will be marked with the NOPURGE
647 	 * flag. This must be added again, even if the vlan object already
648 	 * exists, because the entry was deleted by kill_vid()
649 	 */
650 	batadv_tt_local_add(bat_priv->soft_iface,
651 			    bat_priv->soft_iface->dev_addr, vid,
652 			    BATADV_NULL_IFINDEX, BATADV_NO_MARK);
653 
654 	return 0;
655 }
656 
657 /**
658  * batadv_interface_kill_vid() - ndo_kill_vid API implementation
659  * @dev: the netdev of the mesh interface
660  * @proto: protocol of the vlan id
661  * @vid: identifier of the deleted vlan
662  *
663  * Destroy all the internal structures used to handle the vlan identified by vid
664  * on top of the mesh interface
665  *
666  * Return: 0 on success, -EINVAL if the specified prototype is not ETH_P_8021Q
667  * or -ENOENT if the specified vlan id wasn't registered.
668  */
669 static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
670 				     unsigned short vid)
671 {
672 	struct batadv_priv *bat_priv = netdev_priv(dev);
673 	struct batadv_softif_vlan *vlan;
674 
675 	/* only 802.1Q vlans are supported. batman-adv does not know how to
676 	 * handle other types
677 	 */
678 	if (proto != htons(ETH_P_8021Q))
679 		return -EINVAL;
680 
681 	vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG);
682 	if (!vlan)
683 		return -ENOENT;
684 
685 	batadv_softif_destroy_vlan(bat_priv, vlan);
686 
687 	/* finally free the vlan object */
688 	batadv_softif_vlan_put(vlan);
689 
690 	return 0;
691 }
692 
693 /* batman-adv network devices have devices nesting below it and are a special
694  * "super class" of normal network devices; split their locks off into a
695  * separate class since they always nest.
696  */
697 static struct lock_class_key batadv_netdev_xmit_lock_key;
698 static struct lock_class_key batadv_netdev_addr_lock_key;
699 
700 /**
701  * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
702  * @dev: device which owns the tx queue
703  * @txq: tx queue to modify
704  * @_unused: always NULL
705  */
706 static void batadv_set_lockdep_class_one(struct net_device *dev,
707 					 struct netdev_queue *txq,
708 					 void *_unused)
709 {
710 	lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key);
711 }
712 
713 /**
714  * batadv_set_lockdep_class() - Set txq and addr_list lockdep class
715  * @dev: network device to modify
716  */
717 static void batadv_set_lockdep_class(struct net_device *dev)
718 {
719 	lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key);
720 	netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
721 }
722 
723 /**
724  * batadv_softif_init_late() - late stage initialization of soft interface
725  * @dev: registered network device to modify
726  *
727  * Return: error code on failures
728  */
729 static int batadv_softif_init_late(struct net_device *dev)
730 {
731 	struct batadv_priv *bat_priv;
732 	u32 random_seqno;
733 	int ret;
734 	size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM;
735 
736 	batadv_set_lockdep_class(dev);
737 
738 	bat_priv = netdev_priv(dev);
739 	bat_priv->soft_iface = dev;
740 
741 	/* batadv_interface_stats() needs to be available as soon as
742 	 * register_netdevice() has been called
743 	 */
744 	bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(u64));
745 	if (!bat_priv->bat_counters)
746 		return -ENOMEM;
747 
748 	atomic_set(&bat_priv->aggregated_ogms, 1);
749 	atomic_set(&bat_priv->bonding, 0);
750 #ifdef CONFIG_BATMAN_ADV_BLA
751 	atomic_set(&bat_priv->bridge_loop_avoidance, 1);
752 #endif
753 #ifdef CONFIG_BATMAN_ADV_DAT
754 	atomic_set(&bat_priv->distributed_arp_table, 1);
755 #endif
756 #ifdef CONFIG_BATMAN_ADV_MCAST
757 	atomic_set(&bat_priv->multicast_mode, 1);
758 	atomic_set(&bat_priv->multicast_fanout, 16);
759 	atomic_set(&bat_priv->mcast.num_want_all_unsnoopables, 0);
760 	atomic_set(&bat_priv->mcast.num_want_all_ipv4, 0);
761 	atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
762 #endif
763 	atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF);
764 	atomic_set(&bat_priv->gw.bandwidth_down, 100);
765 	atomic_set(&bat_priv->gw.bandwidth_up, 20);
766 	atomic_set(&bat_priv->orig_interval, 1000);
767 	atomic_set(&bat_priv->hop_penalty, 30);
768 #ifdef CONFIG_BATMAN_ADV_DEBUG
769 	atomic_set(&bat_priv->log_level, 0);
770 #endif
771 	atomic_set(&bat_priv->fragmentation, 1);
772 	atomic_set(&bat_priv->packet_size_max, ETH_DATA_LEN);
773 	atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN);
774 	atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
775 
776 	atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
777 	atomic_set(&bat_priv->bcast_seqno, 1);
778 	atomic_set(&bat_priv->tt.vn, 0);
779 	atomic_set(&bat_priv->tt.local_changes, 0);
780 	atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
781 #ifdef CONFIG_BATMAN_ADV_BLA
782 	atomic_set(&bat_priv->bla.num_requests, 0);
783 #endif
784 	atomic_set(&bat_priv->tp_num, 0);
785 
786 	bat_priv->tt.last_changeset = NULL;
787 	bat_priv->tt.last_changeset_len = 0;
788 	bat_priv->isolation_mark = 0;
789 	bat_priv->isolation_mark_mask = 0;
790 
791 	/* randomize initial seqno to avoid collision */
792 	get_random_bytes(&random_seqno, sizeof(random_seqno));
793 	atomic_set(&bat_priv->frag_seqno, random_seqno);
794 
795 	bat_priv->primary_if = NULL;
796 
797 	batadv_nc_init_bat_priv(bat_priv);
798 
799 	if (!bat_priv->algo_ops) {
800 		ret = batadv_algo_select(bat_priv, batadv_routing_algo);
801 		if (ret < 0)
802 			goto free_bat_counters;
803 	}
804 
805 	ret = batadv_mesh_init(dev);
806 	if (ret < 0)
807 		goto free_bat_counters;
808 
809 	return 0;
810 
811 free_bat_counters:
812 	free_percpu(bat_priv->bat_counters);
813 	bat_priv->bat_counters = NULL;
814 
815 	return ret;
816 }
817 
818 /**
819  * batadv_softif_slave_add() - Add a slave interface to a batadv_soft_interface
820  * @dev: batadv_soft_interface used as master interface
821  * @slave_dev: net_device which should become the slave interface
822  * @extack: extended ACK report struct
823  *
824  * Return: 0 if successful or error otherwise.
825  */
826 static int batadv_softif_slave_add(struct net_device *dev,
827 				   struct net_device *slave_dev,
828 				   struct netlink_ext_ack *extack)
829 {
830 	struct batadv_hard_iface *hard_iface;
831 	int ret = -EINVAL;
832 
833 	hard_iface = batadv_hardif_get_by_netdev(slave_dev);
834 	if (!hard_iface || hard_iface->soft_iface)
835 		goto out;
836 
837 	ret = batadv_hardif_enable_interface(hard_iface, dev);
838 
839 out:
840 	batadv_hardif_put(hard_iface);
841 	return ret;
842 }
843 
844 /**
845  * batadv_softif_slave_del() - Delete a slave iface from a batadv_soft_interface
846  * @dev: batadv_soft_interface used as master interface
847  * @slave_dev: net_device which should be removed from the master interface
848  *
849  * Return: 0 if successful or error otherwise.
850  */
851 static int batadv_softif_slave_del(struct net_device *dev,
852 				   struct net_device *slave_dev)
853 {
854 	struct batadv_hard_iface *hard_iface;
855 	int ret = -EINVAL;
856 
857 	hard_iface = batadv_hardif_get_by_netdev(slave_dev);
858 
859 	if (!hard_iface || hard_iface->soft_iface != dev)
860 		goto out;
861 
862 	batadv_hardif_disable_interface(hard_iface);
863 	ret = 0;
864 
865 out:
866 	batadv_hardif_put(hard_iface);
867 	return ret;
868 }
869 
870 static const struct net_device_ops batadv_netdev_ops = {
871 	.ndo_init = batadv_softif_init_late,
872 	.ndo_open = batadv_interface_open,
873 	.ndo_stop = batadv_interface_release,
874 	.ndo_get_stats = batadv_interface_stats,
875 	.ndo_vlan_rx_add_vid = batadv_interface_add_vid,
876 	.ndo_vlan_rx_kill_vid = batadv_interface_kill_vid,
877 	.ndo_set_mac_address = batadv_interface_set_mac_addr,
878 	.ndo_change_mtu = batadv_interface_change_mtu,
879 	.ndo_set_rx_mode = batadv_interface_set_rx_mode,
880 	.ndo_start_xmit = batadv_interface_tx,
881 	.ndo_validate_addr = eth_validate_addr,
882 	.ndo_add_slave = batadv_softif_slave_add,
883 	.ndo_del_slave = batadv_softif_slave_del,
884 };
885 
886 static void batadv_get_drvinfo(struct net_device *dev,
887 			       struct ethtool_drvinfo *info)
888 {
889 	strscpy(info->driver, "B.A.T.M.A.N. advanced", sizeof(info->driver));
890 	strscpy(info->version, BATADV_SOURCE_VERSION, sizeof(info->version));
891 	strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
892 	strscpy(info->bus_info, "batman", sizeof(info->bus_info));
893 }
894 
895 /* Inspired by drivers/net/ethernet/dlink/sundance.c:1702
896  * Declare each description string in struct.name[] to get fixed sized buffer
897  * and compile time checking for strings longer than ETH_GSTRING_LEN.
898  */
899 static const struct {
900 	const char name[ETH_GSTRING_LEN];
901 } batadv_counters_strings[] = {
902 	{ "tx" },
903 	{ "tx_bytes" },
904 	{ "tx_dropped" },
905 	{ "rx" },
906 	{ "rx_bytes" },
907 	{ "forward" },
908 	{ "forward_bytes" },
909 	{ "mgmt_tx" },
910 	{ "mgmt_tx_bytes" },
911 	{ "mgmt_rx" },
912 	{ "mgmt_rx_bytes" },
913 	{ "frag_tx" },
914 	{ "frag_tx_bytes" },
915 	{ "frag_rx" },
916 	{ "frag_rx_bytes" },
917 	{ "frag_fwd" },
918 	{ "frag_fwd_bytes" },
919 	{ "tt_request_tx" },
920 	{ "tt_request_rx" },
921 	{ "tt_response_tx" },
922 	{ "tt_response_rx" },
923 	{ "tt_roam_adv_tx" },
924 	{ "tt_roam_adv_rx" },
925 #ifdef CONFIG_BATMAN_ADV_DAT
926 	{ "dat_get_tx" },
927 	{ "dat_get_rx" },
928 	{ "dat_put_tx" },
929 	{ "dat_put_rx" },
930 	{ "dat_cached_reply_tx" },
931 #endif
932 #ifdef CONFIG_BATMAN_ADV_NC
933 	{ "nc_code" },
934 	{ "nc_code_bytes" },
935 	{ "nc_recode" },
936 	{ "nc_recode_bytes" },
937 	{ "nc_buffer" },
938 	{ "nc_decode" },
939 	{ "nc_decode_bytes" },
940 	{ "nc_decode_failed" },
941 	{ "nc_sniffed" },
942 #endif
943 };
944 
945 static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data)
946 {
947 	if (stringset == ETH_SS_STATS)
948 		memcpy(data, batadv_counters_strings,
949 		       sizeof(batadv_counters_strings));
950 }
951 
952 static void batadv_get_ethtool_stats(struct net_device *dev,
953 				     struct ethtool_stats *stats, u64 *data)
954 {
955 	struct batadv_priv *bat_priv = netdev_priv(dev);
956 	int i;
957 
958 	for (i = 0; i < BATADV_CNT_NUM; i++)
959 		data[i] = batadv_sum_counter(bat_priv, i);
960 }
961 
962 static int batadv_get_sset_count(struct net_device *dev, int stringset)
963 {
964 	if (stringset == ETH_SS_STATS)
965 		return BATADV_CNT_NUM;
966 
967 	return -EOPNOTSUPP;
968 }
969 
970 static const struct ethtool_ops batadv_ethtool_ops = {
971 	.get_drvinfo = batadv_get_drvinfo,
972 	.get_link = ethtool_op_get_link,
973 	.get_strings = batadv_get_strings,
974 	.get_ethtool_stats = batadv_get_ethtool_stats,
975 	.get_sset_count = batadv_get_sset_count,
976 };
977 
978 /**
979  * batadv_softif_free() - Deconstructor of batadv_soft_interface
980  * @dev: Device to cleanup and remove
981  */
982 static void batadv_softif_free(struct net_device *dev)
983 {
984 	batadv_mesh_free(dev);
985 
986 	/* some scheduled RCU callbacks need the bat_priv struct to accomplish
987 	 * their tasks. Wait for them all to be finished before freeing the
988 	 * netdev and its private data (bat_priv)
989 	 */
990 	rcu_barrier();
991 }
992 
993 /**
994  * batadv_softif_init_early() - early stage initialization of soft interface
995  * @dev: registered network device to modify
996  */
997 static void batadv_softif_init_early(struct net_device *dev)
998 {
999 	ether_setup(dev);
1000 
1001 	dev->netdev_ops = &batadv_netdev_ops;
1002 	dev->needs_free_netdev = true;
1003 	dev->priv_destructor = batadv_softif_free;
1004 	dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL;
1005 	dev->features |= NETIF_F_LLTX;
1006 	dev->priv_flags |= IFF_NO_QUEUE;
1007 
1008 	/* can't call min_mtu, because the needed variables
1009 	 * have not been initialized yet
1010 	 */
1011 	dev->mtu = ETH_DATA_LEN;
1012 
1013 	/* generate random address */
1014 	eth_hw_addr_random(dev);
1015 
1016 	dev->ethtool_ops = &batadv_ethtool_ops;
1017 }
1018 
1019 /**
1020  * batadv_softif_validate() - validate configuration of new batadv link
1021  * @tb: IFLA_INFO_DATA netlink attributes
1022  * @data: enum batadv_ifla_attrs attributes
1023  * @extack: extended ACK report struct
1024  *
1025  * Return: 0 if successful or error otherwise.
1026  */
1027 static int batadv_softif_validate(struct nlattr *tb[], struct nlattr *data[],
1028 				  struct netlink_ext_ack *extack)
1029 {
1030 	struct batadv_algo_ops *algo_ops;
1031 
1032 	if (!data)
1033 		return 0;
1034 
1035 	if (data[IFLA_BATADV_ALGO_NAME]) {
1036 		algo_ops = batadv_algo_get(nla_data(data[IFLA_BATADV_ALGO_NAME]));
1037 		if (!algo_ops)
1038 			return -EINVAL;
1039 	}
1040 
1041 	return 0;
1042 }
1043 
1044 /**
1045  * batadv_softif_newlink() - pre-initialize and register new batadv link
1046  * @src_net: the applicable net namespace
1047  * @dev: network device to register
1048  * @tb: IFLA_INFO_DATA netlink attributes
1049  * @data: enum batadv_ifla_attrs attributes
1050  * @extack: extended ACK report struct
1051  *
1052  * Return: 0 if successful or error otherwise.
1053  */
1054 static int batadv_softif_newlink(struct net *src_net, struct net_device *dev,
1055 				 struct nlattr *tb[], struct nlattr *data[],
1056 				 struct netlink_ext_ack *extack)
1057 {
1058 	struct batadv_priv *bat_priv = netdev_priv(dev);
1059 	const char *algo_name;
1060 	int err;
1061 
1062 	if (data && data[IFLA_BATADV_ALGO_NAME]) {
1063 		algo_name = nla_data(data[IFLA_BATADV_ALGO_NAME]);
1064 		err = batadv_algo_select(bat_priv, algo_name);
1065 		if (err)
1066 			return -EINVAL;
1067 	}
1068 
1069 	return register_netdevice(dev);
1070 }
1071 
1072 /**
1073  * batadv_softif_destroy_netlink() - deletion of batadv_soft_interface via
1074  *  netlink
1075  * @soft_iface: the to-be-removed batman-adv interface
1076  * @head: list pointer
1077  */
1078 static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
1079 					  struct list_head *head)
1080 {
1081 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
1082 	struct batadv_hard_iface *hard_iface;
1083 	struct batadv_softif_vlan *vlan;
1084 
1085 	list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
1086 		if (hard_iface->soft_iface == soft_iface)
1087 			batadv_hardif_disable_interface(hard_iface);
1088 	}
1089 
1090 	/* destroy the "untagged" VLAN */
1091 	vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
1092 	if (vlan) {
1093 		batadv_softif_destroy_vlan(bat_priv, vlan);
1094 		batadv_softif_vlan_put(vlan);
1095 	}
1096 
1097 	unregister_netdevice_queue(soft_iface, head);
1098 }
1099 
1100 /**
1101  * batadv_softif_is_valid() - Check whether device is a batadv soft interface
1102  * @net_dev: device which should be checked
1103  *
1104  * Return: true when net_dev is a batman-adv interface, false otherwise
1105  */
1106 bool batadv_softif_is_valid(const struct net_device *net_dev)
1107 {
1108 	if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx)
1109 		return true;
1110 
1111 	return false;
1112 }
1113 
1114 static const struct nla_policy batadv_ifla_policy[IFLA_BATADV_MAX + 1] = {
1115 	[IFLA_BATADV_ALGO_NAME]	= { .type = NLA_NUL_STRING },
1116 };
1117 
1118 struct rtnl_link_ops batadv_link_ops __read_mostly = {
1119 	.kind		= "batadv",
1120 	.priv_size	= sizeof(struct batadv_priv),
1121 	.setup		= batadv_softif_init_early,
1122 	.maxtype	= IFLA_BATADV_MAX,
1123 	.policy		= batadv_ifla_policy,
1124 	.validate	= batadv_softif_validate,
1125 	.newlink	= batadv_softif_newlink,
1126 	.dellink	= batadv_softif_destroy_netlink,
1127 };
1128