1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner, Simon Wunderlich
5 */
6
7 #include "soft-interface.h"
8 #include "main.h"
9
10 #include <linux/atomic.h>
11 #include <linux/byteorder/generic.h>
12 #include <linux/cache.h>
13 #include <linux/compiler.h>
14 #include <linux/container_of.h>
15 #include <linux/cpumask.h>
16 #include <linux/errno.h>
17 #include <linux/etherdevice.h>
18 #include <linux/ethtool.h>
19 #include <linux/gfp.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_vlan.h>
22 #include <linux/jiffies.h>
23 #include <linux/kref.h>
24 #include <linux/list.h>
25 #include <linux/lockdep.h>
26 #include <linux/netdevice.h>
27 #include <linux/netlink.h>
28 #include <linux/percpu.h>
29 #include <linux/random.h>
30 #include <linux/rculist.h>
31 #include <linux/rcupdate.h>
32 #include <linux/skbuff.h>
33 #include <linux/slab.h>
34 #include <linux/socket.h>
35 #include <linux/spinlock.h>
36 #include <linux/stddef.h>
37 #include <linux/string.h>
38 #include <linux/types.h>
39 #include <net/net_namespace.h>
40 #include <net/netlink.h>
41 #include <uapi/linux/batadv_packet.h>
42 #include <uapi/linux/batman_adv.h>
43
44 #include "bat_algo.h"
45 #include "bridge_loop_avoidance.h"
46 #include "distributed-arp-table.h"
47 #include "gateway_client.h"
48 #include "hard-interface.h"
49 #include "multicast.h"
50 #include "network-coding.h"
51 #include "send.h"
52 #include "translation-table.h"
53
54 /**
55 * batadv_skb_head_push() - Increase header size and move (push) head pointer
56 * @skb: packet buffer which should be modified
57 * @len: number of bytes to add
58 *
59 * Return: 0 on success or negative error number in case of failure
60 */
batadv_skb_head_push(struct sk_buff * skb,unsigned int len)61 int batadv_skb_head_push(struct sk_buff *skb, unsigned int len)
62 {
63 int result;
64
65 /* TODO: We must check if we can release all references to non-payload
66 * data using __skb_header_release in our skbs to allow skb_cow_header
67 * to work optimally. This means that those skbs are not allowed to read
68 * or write any data which is before the current position of skb->data
69 * after that call and thus allow other skbs with the same data buffer
70 * to write freely in that area.
71 */
72 result = skb_cow_head(skb, len);
73 if (result < 0)
74 return result;
75
76 skb_push(skb, len);
77 return 0;
78 }
79
batadv_interface_open(struct net_device * dev)80 static int batadv_interface_open(struct net_device *dev)
81 {
82 netif_start_queue(dev);
83 return 0;
84 }
85
batadv_interface_release(struct net_device * dev)86 static int batadv_interface_release(struct net_device *dev)
87 {
88 netif_stop_queue(dev);
89 return 0;
90 }
91
92 /**
93 * batadv_sum_counter() - Sum the cpu-local counters for index 'idx'
94 * @bat_priv: the bat priv with all the soft interface information
95 * @idx: index of counter to sum up
96 *
97 * Return: sum of all cpu-local counters
98 */
batadv_sum_counter(struct batadv_priv * bat_priv,size_t idx)99 static u64 batadv_sum_counter(struct batadv_priv *bat_priv, size_t idx)
100 {
101 u64 *counters, sum = 0;
102 int cpu;
103
104 for_each_possible_cpu(cpu) {
105 counters = per_cpu_ptr(bat_priv->bat_counters, cpu);
106 sum += counters[idx];
107 }
108
109 return sum;
110 }
111
batadv_interface_stats(struct net_device * dev)112 static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
113 {
114 struct batadv_priv *bat_priv = netdev_priv(dev);
115 struct net_device_stats *stats = &dev->stats;
116
117 stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX);
118 stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES);
119 stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED);
120 stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX);
121 stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES);
122 return stats;
123 }
124
batadv_interface_set_mac_addr(struct net_device * dev,void * p)125 static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
126 {
127 struct batadv_priv *bat_priv = netdev_priv(dev);
128 struct batadv_softif_vlan *vlan;
129 struct sockaddr *addr = p;
130 u8 old_addr[ETH_ALEN];
131
132 if (!is_valid_ether_addr(addr->sa_data))
133 return -EADDRNOTAVAIL;
134
135 ether_addr_copy(old_addr, dev->dev_addr);
136 eth_hw_addr_set(dev, addr->sa_data);
137
138 /* only modify transtable if it has been initialized before */
139 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
140 return 0;
141
142 rcu_read_lock();
143 hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
144 batadv_tt_local_remove(bat_priv, old_addr, vlan->vid,
145 "mac address changed", false);
146 batadv_tt_local_add(dev, addr->sa_data, vlan->vid,
147 BATADV_NULL_IFINDEX, BATADV_NO_MARK);
148 }
149 rcu_read_unlock();
150
151 return 0;
152 }
153
batadv_interface_change_mtu(struct net_device * dev,int new_mtu)154 static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
155 {
156 struct batadv_priv *bat_priv = netdev_priv(dev);
157
158 /* check ranges */
159 if (new_mtu < ETH_MIN_MTU || new_mtu > batadv_hardif_min_mtu(dev))
160 return -EINVAL;
161
162 WRITE_ONCE(dev->mtu, new_mtu);
163 bat_priv->mtu_set_by_user = new_mtu;
164
165 return 0;
166 }
167
168 /**
169 * batadv_interface_set_rx_mode() - set the rx mode of a device
170 * @dev: registered network device to modify
171 *
172 * We do not actually need to set any rx filters for the virtual batman
173 * soft interface. However a dummy handler enables a user to set static
174 * multicast listeners for instance.
175 */
batadv_interface_set_rx_mode(struct net_device * dev)176 static void batadv_interface_set_rx_mode(struct net_device *dev)
177 {
178 }
179
batadv_interface_tx(struct sk_buff * skb,struct net_device * soft_iface)180 static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
181 struct net_device *soft_iface)
182 {
183 struct ethhdr *ethhdr;
184 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
185 struct batadv_hard_iface *primary_if = NULL;
186 struct batadv_bcast_packet *bcast_packet;
187 static const u8 stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
188 0x00, 0x00};
189 static const u8 ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
190 0x00, 0x00};
191 enum batadv_dhcp_recipient dhcp_rcp = BATADV_DHCP_NO;
192 u8 *dst_hint = NULL, chaddr[ETH_ALEN];
193 struct vlan_ethhdr *vhdr;
194 unsigned int header_len = 0;
195 int data_len = skb->len, ret;
196 unsigned long brd_delay = 0;
197 bool do_bcast = false, client_added;
198 unsigned short vid;
199 u32 seqno;
200 int gw_mode;
201 enum batadv_forw_mode forw_mode = BATADV_FORW_BCAST;
202 int mcast_is_routable = 0;
203 int network_offset = ETH_HLEN;
204 __be16 proto;
205
206 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
207 goto dropped;
208
209 /* reset control block to avoid left overs from previous users */
210 memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
211
212 netif_trans_update(soft_iface);
213 vid = batadv_get_vid(skb, 0);
214
215 skb_reset_mac_header(skb);
216 ethhdr = eth_hdr(skb);
217
218 proto = ethhdr->h_proto;
219
220 switch (ntohs(proto)) {
221 case ETH_P_8021Q:
222 if (!pskb_may_pull(skb, sizeof(*vhdr)))
223 goto dropped;
224 vhdr = vlan_eth_hdr(skb);
225 proto = vhdr->h_vlan_encapsulated_proto;
226
227 /* drop batman-in-batman packets to prevent loops */
228 if (proto != htons(ETH_P_BATMAN)) {
229 network_offset += VLAN_HLEN;
230 break;
231 }
232
233 fallthrough;
234 case ETH_P_BATMAN:
235 goto dropped;
236 }
237
238 skb_set_network_header(skb, network_offset);
239
240 if (batadv_bla_tx(bat_priv, skb, vid))
241 goto dropped;
242
243 /* skb->data might have been reallocated by batadv_bla_tx() */
244 ethhdr = eth_hdr(skb);
245
246 /* Register the client MAC in the transtable */
247 if (!is_multicast_ether_addr(ethhdr->h_source) &&
248 !batadv_bla_is_loopdetect_mac(ethhdr->h_source)) {
249 client_added = batadv_tt_local_add(soft_iface, ethhdr->h_source,
250 vid, skb->skb_iif,
251 skb->mark);
252 if (!client_added)
253 goto dropped;
254 }
255
256 /* Snoop address candidates from DHCPACKs for early DAT filling */
257 batadv_dat_snoop_outgoing_dhcp_ack(bat_priv, skb, proto, vid);
258
259 /* don't accept stp packets. STP does not help in meshes.
260 * better use the bridge loop avoidance ...
261 *
262 * The same goes for ECTP sent at least by some Cisco Switches,
263 * it might confuse the mesh when used with bridge loop avoidance.
264 */
265 if (batadv_compare_eth(ethhdr->h_dest, stp_addr))
266 goto dropped;
267
268 if (batadv_compare_eth(ethhdr->h_dest, ectp_addr))
269 goto dropped;
270
271 gw_mode = atomic_read(&bat_priv->gw.mode);
272 if (is_multicast_ether_addr(ethhdr->h_dest)) {
273 /* if gw mode is off, broadcast every packet */
274 if (gw_mode == BATADV_GW_MODE_OFF) {
275 do_bcast = true;
276 goto send;
277 }
278
279 dhcp_rcp = batadv_gw_dhcp_recipient_get(skb, &header_len,
280 chaddr);
281 /* skb->data may have been modified by
282 * batadv_gw_dhcp_recipient_get()
283 */
284 ethhdr = eth_hdr(skb);
285 /* if gw_mode is on, broadcast any non-DHCP message.
286 * All the DHCP packets are going to be sent as unicast
287 */
288 if (dhcp_rcp == BATADV_DHCP_NO) {
289 do_bcast = true;
290 goto send;
291 }
292
293 if (dhcp_rcp == BATADV_DHCP_TO_CLIENT)
294 dst_hint = chaddr;
295 else if ((gw_mode == BATADV_GW_MODE_SERVER) &&
296 (dhcp_rcp == BATADV_DHCP_TO_SERVER))
297 /* gateways should not forward any DHCP message if
298 * directed to a DHCP server
299 */
300 goto dropped;
301
302 send:
303 if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) {
304 forw_mode = batadv_mcast_forw_mode(bat_priv, skb, vid,
305 &mcast_is_routable);
306 switch (forw_mode) {
307 case BATADV_FORW_BCAST:
308 break;
309 case BATADV_FORW_UCASTS:
310 case BATADV_FORW_MCAST:
311 do_bcast = false;
312 break;
313 case BATADV_FORW_NONE:
314 fallthrough;
315 default:
316 goto dropped;
317 }
318 }
319 }
320
321 batadv_skb_set_priority(skb, 0);
322
323 /* ethernet packet should be broadcasted */
324 if (do_bcast) {
325 primary_if = batadv_primary_if_get_selected(bat_priv);
326 if (!primary_if)
327 goto dropped;
328
329 /* in case of ARP request, we do not immediately broadcasti the
330 * packet, instead we first wait for DAT to try to retrieve the
331 * correct ARP entry
332 */
333 if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb))
334 brd_delay = msecs_to_jiffies(ARP_REQ_DELAY);
335
336 if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
337 goto dropped;
338
339 bcast_packet = (struct batadv_bcast_packet *)skb->data;
340 bcast_packet->version = BATADV_COMPAT_VERSION;
341 bcast_packet->ttl = BATADV_TTL - 1;
342
343 /* batman packet type: broadcast */
344 bcast_packet->packet_type = BATADV_BCAST;
345 bcast_packet->reserved = 0;
346
347 /* hw address of first interface is the orig mac because only
348 * this mac is known throughout the mesh
349 */
350 ether_addr_copy(bcast_packet->orig,
351 primary_if->net_dev->dev_addr);
352
353 /* set broadcast sequence number */
354 seqno = atomic_inc_return(&bat_priv->bcast_seqno);
355 bcast_packet->seqno = htonl(seqno);
356
357 batadv_send_bcast_packet(bat_priv, skb, brd_delay, true);
358 /* unicast packet */
359 } else {
360 /* DHCP packets going to a server will use the GW feature */
361 if (dhcp_rcp == BATADV_DHCP_TO_SERVER) {
362 ret = batadv_gw_out_of_range(bat_priv, skb);
363 if (ret)
364 goto dropped;
365 ret = batadv_send_skb_via_gw(bat_priv, skb, vid);
366 } else if (forw_mode == BATADV_FORW_UCASTS) {
367 ret = batadv_mcast_forw_send(bat_priv, skb, vid,
368 mcast_is_routable);
369 } else if (forw_mode == BATADV_FORW_MCAST) {
370 ret = batadv_mcast_forw_mcsend(bat_priv, skb);
371 } else {
372 if (batadv_dat_snoop_outgoing_arp_request(bat_priv,
373 skb))
374 goto dropped;
375
376 batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb);
377
378 ret = batadv_send_skb_via_tt(bat_priv, skb, dst_hint,
379 vid);
380 }
381 if (ret != NET_XMIT_SUCCESS)
382 goto dropped_freed;
383 }
384
385 batadv_inc_counter(bat_priv, BATADV_CNT_TX);
386 batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len);
387 goto end;
388
389 dropped:
390 kfree_skb(skb);
391 dropped_freed:
392 batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
393 end:
394 batadv_hardif_put(primary_if);
395 return NETDEV_TX_OK;
396 }
397
398 /**
399 * batadv_interface_rx() - receive ethernet frame on local batman-adv interface
400 * @soft_iface: local interface which will receive the ethernet frame
401 * @skb: ethernet frame for @soft_iface
402 * @hdr_size: size of already parsed batman-adv header
403 * @orig_node: originator from which the batman-adv packet was sent
404 *
405 * Sends an ethernet frame to the receive path of the local @soft_iface.
406 * skb->data has still point to the batman-adv header with the size @hdr_size.
407 * The caller has to have parsed this header already and made sure that at least
408 * @hdr_size bytes are still available for pull in @skb.
409 *
410 * The packet may still get dropped. This can happen when the encapsulated
411 * ethernet frame is invalid or contains again an batman-adv packet. Also
412 * unicast packets will be dropped directly when it was sent between two
413 * isolated clients.
414 */
batadv_interface_rx(struct net_device * soft_iface,struct sk_buff * skb,int hdr_size,struct batadv_orig_node * orig_node)415 void batadv_interface_rx(struct net_device *soft_iface,
416 struct sk_buff *skb, int hdr_size,
417 struct batadv_orig_node *orig_node)
418 {
419 struct batadv_bcast_packet *batadv_bcast_packet;
420 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
421 struct vlan_ethhdr *vhdr;
422 struct ethhdr *ethhdr;
423 unsigned short vid;
424 int packet_type;
425
426 batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
427 packet_type = batadv_bcast_packet->packet_type;
428
429 skb_pull_rcsum(skb, hdr_size);
430 skb_reset_mac_header(skb);
431
432 /* clean the netfilter state now that the batman-adv header has been
433 * removed
434 */
435 nf_reset_ct(skb);
436
437 if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
438 goto dropped;
439
440 vid = batadv_get_vid(skb, 0);
441 ethhdr = eth_hdr(skb);
442
443 switch (ntohs(ethhdr->h_proto)) {
444 case ETH_P_8021Q:
445 if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
446 goto dropped;
447
448 vhdr = skb_vlan_eth_hdr(skb);
449
450 /* drop batman-in-batman packets to prevent loops */
451 if (vhdr->h_vlan_encapsulated_proto != htons(ETH_P_BATMAN))
452 break;
453
454 fallthrough;
455 case ETH_P_BATMAN:
456 goto dropped;
457 }
458
459 /* skb->dev & skb->pkt_type are set here */
460 skb->protocol = eth_type_trans(skb, soft_iface);
461 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
462
463 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
464 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
465 skb->len + ETH_HLEN);
466
467 /* Let the bridge loop avoidance check the packet. If will
468 * not handle it, we can safely push it up.
469 */
470 if (batadv_bla_rx(bat_priv, skb, vid, packet_type))
471 goto out;
472
473 if (orig_node)
474 batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
475 ethhdr->h_source, vid);
476
477 if (is_multicast_ether_addr(ethhdr->h_dest)) {
478 /* set the mark on broadcast packets if AP isolation is ON and
479 * the packet is coming from an "isolated" client
480 */
481 if (batadv_vlan_ap_isola_get(bat_priv, vid) &&
482 batadv_tt_global_is_isolated(bat_priv, ethhdr->h_source,
483 vid)) {
484 /* save bits in skb->mark not covered by the mask and
485 * apply the mark on the rest
486 */
487 skb->mark &= ~bat_priv->isolation_mark_mask;
488 skb->mark |= bat_priv->isolation_mark;
489 }
490 } else if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source,
491 ethhdr->h_dest, vid)) {
492 goto dropped;
493 }
494
495 netif_rx(skb);
496 goto out;
497
498 dropped:
499 kfree_skb(skb);
500 out:
501 return;
502 }
503
504 /**
505 * batadv_softif_vlan_release() - release vlan from lists and queue for free
506 * after rcu grace period
507 * @ref: kref pointer of the vlan object
508 */
batadv_softif_vlan_release(struct kref * ref)509 void batadv_softif_vlan_release(struct kref *ref)
510 {
511 struct batadv_softif_vlan *vlan;
512
513 vlan = container_of(ref, struct batadv_softif_vlan, refcount);
514
515 spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
516 hlist_del_rcu(&vlan->list);
517 spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock);
518
519 kfree_rcu(vlan, rcu);
520 }
521
522 /**
523 * batadv_softif_vlan_get() - get the vlan object for a specific vid
524 * @bat_priv: the bat priv with all the soft interface information
525 * @vid: the identifier of the vlan object to retrieve
526 *
527 * Return: the private data of the vlan matching the vid passed as argument or
528 * NULL otherwise. The refcounter of the returned object is incremented by 1.
529 */
batadv_softif_vlan_get(struct batadv_priv * bat_priv,unsigned short vid)530 struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
531 unsigned short vid)
532 {
533 struct batadv_softif_vlan *vlan_tmp, *vlan = NULL;
534
535 rcu_read_lock();
536 hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) {
537 if (vlan_tmp->vid != vid)
538 continue;
539
540 if (!kref_get_unless_zero(&vlan_tmp->refcount))
541 continue;
542
543 vlan = vlan_tmp;
544 break;
545 }
546 rcu_read_unlock();
547
548 return vlan;
549 }
550
551 /**
552 * batadv_softif_create_vlan() - allocate the needed resources for a new vlan
553 * @bat_priv: the bat priv with all the soft interface information
554 * @vid: the VLAN identifier
555 *
556 * Return: 0 on success, a negative error otherwise.
557 */
batadv_softif_create_vlan(struct batadv_priv * bat_priv,unsigned short vid)558 int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
559 {
560 struct batadv_softif_vlan *vlan;
561
562 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
563
564 vlan = batadv_softif_vlan_get(bat_priv, vid);
565 if (vlan) {
566 batadv_softif_vlan_put(vlan);
567 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
568 return -EEXIST;
569 }
570
571 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
572 if (!vlan) {
573 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
574 return -ENOMEM;
575 }
576
577 vlan->bat_priv = bat_priv;
578 vlan->vid = vid;
579 kref_init(&vlan->refcount);
580
581 atomic_set(&vlan->ap_isolation, 0);
582
583 kref_get(&vlan->refcount);
584 hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
585 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
586
587 /* add a new TT local entry. This one will be marked with the NOPURGE
588 * flag
589 */
590 batadv_tt_local_add(bat_priv->soft_iface,
591 bat_priv->soft_iface->dev_addr, vid,
592 BATADV_NULL_IFINDEX, BATADV_NO_MARK);
593
594 /* don't return reference to new softif_vlan */
595 batadv_softif_vlan_put(vlan);
596
597 return 0;
598 }
599
600 /**
601 * batadv_softif_destroy_vlan() - remove and destroy a softif_vlan object
602 * @bat_priv: the bat priv with all the soft interface information
603 * @vlan: the object to remove
604 */
batadv_softif_destroy_vlan(struct batadv_priv * bat_priv,struct batadv_softif_vlan * vlan)605 static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
606 struct batadv_softif_vlan *vlan)
607 {
608 /* explicitly remove the associated TT local entry because it is marked
609 * with the NOPURGE flag
610 */
611 batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
612 vlan->vid, "vlan interface destroyed", false);
613
614 batadv_softif_vlan_put(vlan);
615 }
616
617 /**
618 * batadv_interface_add_vid() - ndo_add_vid API implementation
619 * @dev: the netdev of the mesh interface
620 * @proto: protocol of the vlan id
621 * @vid: identifier of the new vlan
622 *
623 * Set up all the internal structures for handling the new vlan on top of the
624 * mesh interface
625 *
626 * Return: 0 on success or a negative error code in case of failure.
627 */
batadv_interface_add_vid(struct net_device * dev,__be16 proto,unsigned short vid)628 static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
629 unsigned short vid)
630 {
631 struct batadv_priv *bat_priv = netdev_priv(dev);
632 struct batadv_softif_vlan *vlan;
633
634 /* only 802.1Q vlans are supported.
635 * batman-adv does not know how to handle other types
636 */
637 if (proto != htons(ETH_P_8021Q))
638 return -EINVAL;
639
640 vid |= BATADV_VLAN_HAS_TAG;
641
642 /* if a new vlan is getting created and it already exists, it means that
643 * it was not deleted yet. batadv_softif_vlan_get() increases the
644 * refcount in order to revive the object.
645 *
646 * if it does not exist then create it.
647 */
648 vlan = batadv_softif_vlan_get(bat_priv, vid);
649 if (!vlan)
650 return batadv_softif_create_vlan(bat_priv, vid);
651
652 /* add a new TT local entry. This one will be marked with the NOPURGE
653 * flag. This must be added again, even if the vlan object already
654 * exists, because the entry was deleted by kill_vid()
655 */
656 batadv_tt_local_add(bat_priv->soft_iface,
657 bat_priv->soft_iface->dev_addr, vid,
658 BATADV_NULL_IFINDEX, BATADV_NO_MARK);
659
660 return 0;
661 }
662
663 /**
664 * batadv_interface_kill_vid() - ndo_kill_vid API implementation
665 * @dev: the netdev of the mesh interface
666 * @proto: protocol of the vlan id
667 * @vid: identifier of the deleted vlan
668 *
669 * Destroy all the internal structures used to handle the vlan identified by vid
670 * on top of the mesh interface
671 *
672 * Return: 0 on success, -EINVAL if the specified prototype is not ETH_P_8021Q
673 * or -ENOENT if the specified vlan id wasn't registered.
674 */
batadv_interface_kill_vid(struct net_device * dev,__be16 proto,unsigned short vid)675 static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
676 unsigned short vid)
677 {
678 struct batadv_priv *bat_priv = netdev_priv(dev);
679 struct batadv_softif_vlan *vlan;
680
681 /* only 802.1Q vlans are supported. batman-adv does not know how to
682 * handle other types
683 */
684 if (proto != htons(ETH_P_8021Q))
685 return -EINVAL;
686
687 vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG);
688 if (!vlan)
689 return -ENOENT;
690
691 batadv_softif_destroy_vlan(bat_priv, vlan);
692
693 /* finally free the vlan object */
694 batadv_softif_vlan_put(vlan);
695
696 return 0;
697 }
698
699 /* batman-adv network devices have devices nesting below it and are a special
700 * "super class" of normal network devices; split their locks off into a
701 * separate class since they always nest.
702 */
703 static struct lock_class_key batadv_netdev_xmit_lock_key;
704 static struct lock_class_key batadv_netdev_addr_lock_key;
705
706 /**
707 * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
708 * @dev: device which owns the tx queue
709 * @txq: tx queue to modify
710 * @_unused: always NULL
711 */
batadv_set_lockdep_class_one(struct net_device * dev,struct netdev_queue * txq,void * _unused)712 static void batadv_set_lockdep_class_one(struct net_device *dev,
713 struct netdev_queue *txq,
714 void *_unused)
715 {
716 lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key);
717 }
718
719 /**
720 * batadv_set_lockdep_class() - Set txq and addr_list lockdep class
721 * @dev: network device to modify
722 */
batadv_set_lockdep_class(struct net_device * dev)723 static void batadv_set_lockdep_class(struct net_device *dev)
724 {
725 lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key);
726 netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
727 }
728
729 /**
730 * batadv_softif_init_late() - late stage initialization of soft interface
731 * @dev: registered network device to modify
732 *
733 * Return: error code on failures
734 */
batadv_softif_init_late(struct net_device * dev)735 static int batadv_softif_init_late(struct net_device *dev)
736 {
737 struct batadv_priv *bat_priv;
738 u32 random_seqno;
739 int ret;
740 size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM;
741
742 batadv_set_lockdep_class(dev);
743
744 bat_priv = netdev_priv(dev);
745 bat_priv->soft_iface = dev;
746
747 /* batadv_interface_stats() needs to be available as soon as
748 * register_netdevice() has been called
749 */
750 bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(u64));
751 if (!bat_priv->bat_counters)
752 return -ENOMEM;
753
754 atomic_set(&bat_priv->aggregated_ogms, 1);
755 atomic_set(&bat_priv->bonding, 0);
756 #ifdef CONFIG_BATMAN_ADV_BLA
757 atomic_set(&bat_priv->bridge_loop_avoidance, 1);
758 #endif
759 #ifdef CONFIG_BATMAN_ADV_DAT
760 atomic_set(&bat_priv->distributed_arp_table, 1);
761 #endif
762 #ifdef CONFIG_BATMAN_ADV_MCAST
763 atomic_set(&bat_priv->multicast_mode, 1);
764 atomic_set(&bat_priv->multicast_fanout, 16);
765 atomic_set(&bat_priv->mcast.num_want_all_unsnoopables, 0);
766 atomic_set(&bat_priv->mcast.num_want_all_ipv4, 0);
767 atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
768 atomic_set(&bat_priv->mcast.num_no_mc_ptype_capa, 0);
769 #endif
770 atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF);
771 atomic_set(&bat_priv->gw.bandwidth_down, 100);
772 atomic_set(&bat_priv->gw.bandwidth_up, 20);
773 atomic_set(&bat_priv->orig_interval, 1000);
774 atomic_set(&bat_priv->hop_penalty, 30);
775 #ifdef CONFIG_BATMAN_ADV_DEBUG
776 atomic_set(&bat_priv->log_level, 0);
777 #endif
778 atomic_set(&bat_priv->fragmentation, 1);
779 atomic_set(&bat_priv->packet_size_max, ETH_DATA_LEN);
780 atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN);
781 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
782
783 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
784 atomic_set(&bat_priv->bcast_seqno, 1);
785 atomic_set(&bat_priv->tt.vn, 0);
786 atomic_set(&bat_priv->tt.local_changes, 0);
787 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
788 #ifdef CONFIG_BATMAN_ADV_BLA
789 atomic_set(&bat_priv->bla.num_requests, 0);
790 #endif
791 atomic_set(&bat_priv->tp_num, 0);
792
793 bat_priv->tt.last_changeset = NULL;
794 bat_priv->tt.last_changeset_len = 0;
795 bat_priv->isolation_mark = 0;
796 bat_priv->isolation_mark_mask = 0;
797
798 /* randomize initial seqno to avoid collision */
799 get_random_bytes(&random_seqno, sizeof(random_seqno));
800 atomic_set(&bat_priv->frag_seqno, random_seqno);
801
802 bat_priv->primary_if = NULL;
803
804 batadv_nc_init_bat_priv(bat_priv);
805
806 if (!bat_priv->algo_ops) {
807 ret = batadv_algo_select(bat_priv, batadv_routing_algo);
808 if (ret < 0)
809 goto free_bat_counters;
810 }
811
812 ret = batadv_mesh_init(dev);
813 if (ret < 0)
814 goto free_bat_counters;
815
816 return 0;
817
818 free_bat_counters:
819 free_percpu(bat_priv->bat_counters);
820 bat_priv->bat_counters = NULL;
821
822 return ret;
823 }
824
825 /**
826 * batadv_softif_slave_add() - Add a slave interface to a batadv_soft_interface
827 * @dev: batadv_soft_interface used as master interface
828 * @slave_dev: net_device which should become the slave interface
829 * @extack: extended ACK report struct
830 *
831 * Return: 0 if successful or error otherwise.
832 */
batadv_softif_slave_add(struct net_device * dev,struct net_device * slave_dev,struct netlink_ext_ack * extack)833 static int batadv_softif_slave_add(struct net_device *dev,
834 struct net_device *slave_dev,
835 struct netlink_ext_ack *extack)
836 {
837 struct batadv_hard_iface *hard_iface;
838 int ret = -EINVAL;
839
840 hard_iface = batadv_hardif_get_by_netdev(slave_dev);
841 if (!hard_iface || hard_iface->soft_iface)
842 goto out;
843
844 ret = batadv_hardif_enable_interface(hard_iface, dev);
845
846 out:
847 batadv_hardif_put(hard_iface);
848 return ret;
849 }
850
851 /**
852 * batadv_softif_slave_del() - Delete a slave iface from a batadv_soft_interface
853 * @dev: batadv_soft_interface used as master interface
854 * @slave_dev: net_device which should be removed from the master interface
855 *
856 * Return: 0 if successful or error otherwise.
857 */
batadv_softif_slave_del(struct net_device * dev,struct net_device * slave_dev)858 static int batadv_softif_slave_del(struct net_device *dev,
859 struct net_device *slave_dev)
860 {
861 struct batadv_hard_iface *hard_iface;
862 int ret = -EINVAL;
863
864 hard_iface = batadv_hardif_get_by_netdev(slave_dev);
865
866 if (!hard_iface || hard_iface->soft_iface != dev)
867 goto out;
868
869 batadv_hardif_disable_interface(hard_iface);
870 ret = 0;
871
872 out:
873 batadv_hardif_put(hard_iface);
874 return ret;
875 }
876
877 static const struct net_device_ops batadv_netdev_ops = {
878 .ndo_init = batadv_softif_init_late,
879 .ndo_open = batadv_interface_open,
880 .ndo_stop = batadv_interface_release,
881 .ndo_get_stats = batadv_interface_stats,
882 .ndo_vlan_rx_add_vid = batadv_interface_add_vid,
883 .ndo_vlan_rx_kill_vid = batadv_interface_kill_vid,
884 .ndo_set_mac_address = batadv_interface_set_mac_addr,
885 .ndo_change_mtu = batadv_interface_change_mtu,
886 .ndo_set_rx_mode = batadv_interface_set_rx_mode,
887 .ndo_start_xmit = batadv_interface_tx,
888 .ndo_validate_addr = eth_validate_addr,
889 .ndo_add_slave = batadv_softif_slave_add,
890 .ndo_del_slave = batadv_softif_slave_del,
891 };
892
batadv_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)893 static void batadv_get_drvinfo(struct net_device *dev,
894 struct ethtool_drvinfo *info)
895 {
896 strscpy(info->driver, "B.A.T.M.A.N. advanced", sizeof(info->driver));
897 strscpy(info->version, BATADV_SOURCE_VERSION, sizeof(info->version));
898 strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
899 strscpy(info->bus_info, "batman", sizeof(info->bus_info));
900 }
901
902 /* Inspired by drivers/net/ethernet/dlink/sundance.c:1702
903 * Declare each description string in struct.name[] to get fixed sized buffer
904 * and compile time checking for strings longer than ETH_GSTRING_LEN.
905 */
906 static const struct {
907 const char name[ETH_GSTRING_LEN];
908 } batadv_counters_strings[] = {
909 { "tx" },
910 { "tx_bytes" },
911 { "tx_dropped" },
912 { "rx" },
913 { "rx_bytes" },
914 { "forward" },
915 { "forward_bytes" },
916 { "mgmt_tx" },
917 { "mgmt_tx_bytes" },
918 { "mgmt_rx" },
919 { "mgmt_rx_bytes" },
920 { "frag_tx" },
921 { "frag_tx_bytes" },
922 { "frag_rx" },
923 { "frag_rx_bytes" },
924 { "frag_fwd" },
925 { "frag_fwd_bytes" },
926 { "tt_request_tx" },
927 { "tt_request_rx" },
928 { "tt_response_tx" },
929 { "tt_response_rx" },
930 { "tt_roam_adv_tx" },
931 { "tt_roam_adv_rx" },
932 #ifdef CONFIG_BATMAN_ADV_MCAST
933 { "mcast_tx" },
934 { "mcast_tx_bytes" },
935 { "mcast_tx_local" },
936 { "mcast_tx_local_bytes" },
937 { "mcast_rx" },
938 { "mcast_rx_bytes" },
939 { "mcast_rx_local" },
940 { "mcast_rx_local_bytes" },
941 { "mcast_fwd" },
942 { "mcast_fwd_bytes" },
943 #endif
944 #ifdef CONFIG_BATMAN_ADV_DAT
945 { "dat_get_tx" },
946 { "dat_get_rx" },
947 { "dat_put_tx" },
948 { "dat_put_rx" },
949 { "dat_cached_reply_tx" },
950 #endif
951 #ifdef CONFIG_BATMAN_ADV_NC
952 { "nc_code" },
953 { "nc_code_bytes" },
954 { "nc_recode" },
955 { "nc_recode_bytes" },
956 { "nc_buffer" },
957 { "nc_decode" },
958 { "nc_decode_bytes" },
959 { "nc_decode_failed" },
960 { "nc_sniffed" },
961 #endif
962 };
963
batadv_get_strings(struct net_device * dev,u32 stringset,u8 * data)964 static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data)
965 {
966 if (stringset == ETH_SS_STATS)
967 memcpy(data, batadv_counters_strings,
968 sizeof(batadv_counters_strings));
969 }
970
batadv_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)971 static void batadv_get_ethtool_stats(struct net_device *dev,
972 struct ethtool_stats *stats, u64 *data)
973 {
974 struct batadv_priv *bat_priv = netdev_priv(dev);
975 int i;
976
977 for (i = 0; i < BATADV_CNT_NUM; i++)
978 data[i] = batadv_sum_counter(bat_priv, i);
979 }
980
batadv_get_sset_count(struct net_device * dev,int stringset)981 static int batadv_get_sset_count(struct net_device *dev, int stringset)
982 {
983 if (stringset == ETH_SS_STATS)
984 return BATADV_CNT_NUM;
985
986 return -EOPNOTSUPP;
987 }
988
989 static const struct ethtool_ops batadv_ethtool_ops = {
990 .get_drvinfo = batadv_get_drvinfo,
991 .get_link = ethtool_op_get_link,
992 .get_strings = batadv_get_strings,
993 .get_ethtool_stats = batadv_get_ethtool_stats,
994 .get_sset_count = batadv_get_sset_count,
995 };
996
997 /**
998 * batadv_softif_free() - Deconstructor of batadv_soft_interface
999 * @dev: Device to cleanup and remove
1000 */
batadv_softif_free(struct net_device * dev)1001 static void batadv_softif_free(struct net_device *dev)
1002 {
1003 batadv_mesh_free(dev);
1004
1005 /* some scheduled RCU callbacks need the bat_priv struct to accomplish
1006 * their tasks. Wait for them all to be finished before freeing the
1007 * netdev and its private data (bat_priv)
1008 */
1009 rcu_barrier();
1010 }
1011
1012 /**
1013 * batadv_softif_init_early() - early stage initialization of soft interface
1014 * @dev: registered network device to modify
1015 */
batadv_softif_init_early(struct net_device * dev)1016 static void batadv_softif_init_early(struct net_device *dev)
1017 {
1018 ether_setup(dev);
1019
1020 dev->netdev_ops = &batadv_netdev_ops;
1021 dev->needs_free_netdev = true;
1022 dev->priv_destructor = batadv_softif_free;
1023 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1024 dev->priv_flags |= IFF_NO_QUEUE;
1025 dev->lltx = true;
1026 dev->netns_local = true;
1027
1028 /* can't call min_mtu, because the needed variables
1029 * have not been initialized yet
1030 */
1031 dev->mtu = ETH_DATA_LEN;
1032
1033 /* generate random address */
1034 eth_hw_addr_random(dev);
1035
1036 dev->ethtool_ops = &batadv_ethtool_ops;
1037 }
1038
1039 /**
1040 * batadv_softif_validate() - validate configuration of new batadv link
1041 * @tb: IFLA_INFO_DATA netlink attributes
1042 * @data: enum batadv_ifla_attrs attributes
1043 * @extack: extended ACK report struct
1044 *
1045 * Return: 0 if successful or error otherwise.
1046 */
batadv_softif_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1047 static int batadv_softif_validate(struct nlattr *tb[], struct nlattr *data[],
1048 struct netlink_ext_ack *extack)
1049 {
1050 struct batadv_algo_ops *algo_ops;
1051
1052 if (!data)
1053 return 0;
1054
1055 if (data[IFLA_BATADV_ALGO_NAME]) {
1056 algo_ops = batadv_algo_get(nla_data(data[IFLA_BATADV_ALGO_NAME]));
1057 if (!algo_ops)
1058 return -EINVAL;
1059 }
1060
1061 return 0;
1062 }
1063
1064 /**
1065 * batadv_softif_newlink() - pre-initialize and register new batadv link
1066 * @src_net: the applicable net namespace
1067 * @dev: network device to register
1068 * @tb: IFLA_INFO_DATA netlink attributes
1069 * @data: enum batadv_ifla_attrs attributes
1070 * @extack: extended ACK report struct
1071 *
1072 * Return: 0 if successful or error otherwise.
1073 */
batadv_softif_newlink(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1074 static int batadv_softif_newlink(struct net *src_net, struct net_device *dev,
1075 struct nlattr *tb[], struct nlattr *data[],
1076 struct netlink_ext_ack *extack)
1077 {
1078 struct batadv_priv *bat_priv = netdev_priv(dev);
1079 const char *algo_name;
1080 int err;
1081
1082 if (data && data[IFLA_BATADV_ALGO_NAME]) {
1083 algo_name = nla_data(data[IFLA_BATADV_ALGO_NAME]);
1084 err = batadv_algo_select(bat_priv, algo_name);
1085 if (err)
1086 return -EINVAL;
1087 }
1088
1089 return register_netdevice(dev);
1090 }
1091
1092 /**
1093 * batadv_softif_destroy_netlink() - deletion of batadv_soft_interface via
1094 * netlink
1095 * @soft_iface: the to-be-removed batman-adv interface
1096 * @head: list pointer
1097 */
batadv_softif_destroy_netlink(struct net_device * soft_iface,struct list_head * head)1098 static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
1099 struct list_head *head)
1100 {
1101 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
1102 struct batadv_hard_iface *hard_iface;
1103 struct batadv_softif_vlan *vlan;
1104
1105 list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
1106 if (hard_iface->soft_iface == soft_iface)
1107 batadv_hardif_disable_interface(hard_iface);
1108 }
1109
1110 /* destroy the "untagged" VLAN */
1111 vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
1112 if (vlan) {
1113 batadv_softif_destroy_vlan(bat_priv, vlan);
1114 batadv_softif_vlan_put(vlan);
1115 }
1116
1117 unregister_netdevice_queue(soft_iface, head);
1118 }
1119
1120 /**
1121 * batadv_softif_is_valid() - Check whether device is a batadv soft interface
1122 * @net_dev: device which should be checked
1123 *
1124 * Return: true when net_dev is a batman-adv interface, false otherwise
1125 */
batadv_softif_is_valid(const struct net_device * net_dev)1126 bool batadv_softif_is_valid(const struct net_device *net_dev)
1127 {
1128 if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx)
1129 return true;
1130
1131 return false;
1132 }
1133
1134 static const struct nla_policy batadv_ifla_policy[IFLA_BATADV_MAX + 1] = {
1135 [IFLA_BATADV_ALGO_NAME] = { .type = NLA_NUL_STRING },
1136 };
1137
1138 struct rtnl_link_ops batadv_link_ops __read_mostly = {
1139 .kind = "batadv",
1140 .priv_size = sizeof(struct batadv_priv),
1141 .setup = batadv_softif_init_early,
1142 .maxtype = IFLA_BATADV_MAX,
1143 .policy = batadv_ifla_policy,
1144 .validate = batadv_softif_validate,
1145 .newlink = batadv_softif_newlink,
1146 .dellink = batadv_softif_destroy_netlink,
1147 };
1148