xref: /linux/net/batman-adv/originator.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include "originator.h"
19 #include "main.h"
20 
21 #include <linux/errno.h>
22 #include <linux/etherdevice.h>
23 #include <linux/fs.h>
24 #include <linux/jiffies.h>
25 #include <linux/kernel.h>
26 #include <linux/list.h>
27 #include <linux/lockdep.h>
28 #include <linux/netdevice.h>
29 #include <linux/rculist.h>
30 #include <linux/seq_file.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/workqueue.h>
34 
35 #include "distributed-arp-table.h"
36 #include "fragmentation.h"
37 #include "gateway_client.h"
38 #include "hard-interface.h"
39 #include "hash.h"
40 #include "multicast.h"
41 #include "network-coding.h"
42 #include "routing.h"
43 #include "translation-table.h"
44 
45 /* hash class keys */
46 static struct lock_class_key batadv_orig_hash_lock_class_key;
47 
48 static void batadv_purge_orig(struct work_struct *work);
49 
50 /* returns 1 if they are the same originator */
51 int batadv_compare_orig(const struct hlist_node *node, const void *data2)
52 {
53 	const void *data1 = container_of(node, struct batadv_orig_node,
54 					 hash_entry);
55 
56 	return batadv_compare_eth(data1, data2);
57 }
58 
59 /**
60  * batadv_orig_node_vlan_get - get an orig_node_vlan object
61  * @orig_node: the originator serving the VLAN
62  * @vid: the VLAN identifier
63  *
64  * Returns the vlan object identified by vid and belonging to orig_node or NULL
65  * if it does not exist.
66  */
67 struct batadv_orig_node_vlan *
68 batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
69 			  unsigned short vid)
70 {
71 	struct batadv_orig_node_vlan *vlan = NULL, *tmp;
72 
73 	rcu_read_lock();
74 	hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
75 		if (tmp->vid != vid)
76 			continue;
77 
78 		if (!atomic_inc_not_zero(&tmp->refcount))
79 			continue;
80 
81 		vlan = tmp;
82 
83 		break;
84 	}
85 	rcu_read_unlock();
86 
87 	return vlan;
88 }
89 
90 /**
91  * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
92  *  object
93  * @orig_node: the originator serving the VLAN
94  * @vid: the VLAN identifier
95  *
96  * Returns NULL in case of failure or the vlan object identified by vid and
97  * belonging to orig_node otherwise. The object is created and added to the list
98  * if it does not exist.
99  *
100  * The object is returned with refcounter increased by 1.
101  */
102 struct batadv_orig_node_vlan *
103 batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
104 			  unsigned short vid)
105 {
106 	struct batadv_orig_node_vlan *vlan;
107 
108 	spin_lock_bh(&orig_node->vlan_list_lock);
109 
110 	/* first look if an object for this vid already exists */
111 	vlan = batadv_orig_node_vlan_get(orig_node, vid);
112 	if (vlan)
113 		goto out;
114 
115 	vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
116 	if (!vlan)
117 		goto out;
118 
119 	atomic_set(&vlan->refcount, 2);
120 	vlan->vid = vid;
121 
122 	hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
123 
124 out:
125 	spin_unlock_bh(&orig_node->vlan_list_lock);
126 
127 	return vlan;
128 }
129 
130 /**
131  * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
132  *  the originator-vlan object
133  * @orig_vlan: the originator-vlan object to release
134  */
135 void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
136 {
137 	if (atomic_dec_and_test(&orig_vlan->refcount))
138 		kfree_rcu(orig_vlan, rcu);
139 }
140 
141 int batadv_originator_init(struct batadv_priv *bat_priv)
142 {
143 	if (bat_priv->orig_hash)
144 		return 0;
145 
146 	bat_priv->orig_hash = batadv_hash_new(1024);
147 
148 	if (!bat_priv->orig_hash)
149 		goto err;
150 
151 	batadv_hash_set_lock_class(bat_priv->orig_hash,
152 				   &batadv_orig_hash_lock_class_key);
153 
154 	INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
155 	queue_delayed_work(batadv_event_workqueue,
156 			   &bat_priv->orig_work,
157 			   msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
158 
159 	return 0;
160 
161 err:
162 	return -ENOMEM;
163 }
164 
165 /**
166  * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
167  * @rcu: rcu pointer of the neigh_ifinfo object
168  */
169 static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
170 {
171 	struct batadv_neigh_ifinfo *neigh_ifinfo;
172 
173 	neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
174 
175 	if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
176 		batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
177 
178 	kfree(neigh_ifinfo);
179 }
180 
181 /**
182  * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
183  *  the neigh_ifinfo (without rcu callback)
184  * @neigh_ifinfo: the neigh_ifinfo object to release
185  */
186 static void
187 batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
188 {
189 	if (atomic_dec_and_test(&neigh_ifinfo->refcount))
190 		batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
191 }
192 
193 /**
194  * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
195  *  the neigh_ifinfo
196  * @neigh_ifinfo: the neigh_ifinfo object to release
197  */
198 void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
199 {
200 	if (atomic_dec_and_test(&neigh_ifinfo->refcount))
201 		call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
202 }
203 
204 /**
205  * batadv_neigh_node_free_rcu - free the neigh_node
206  * @rcu: rcu pointer of the neigh_node
207  */
208 static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
209 {
210 	struct hlist_node *node_tmp;
211 	struct batadv_neigh_node *neigh_node;
212 	struct batadv_neigh_ifinfo *neigh_ifinfo;
213 	struct batadv_algo_ops *bao;
214 
215 	neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
216 	bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
217 
218 	hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
219 				  &neigh_node->ifinfo_list, list) {
220 		batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
221 	}
222 
223 	if (bao->bat_neigh_free)
224 		bao->bat_neigh_free(neigh_node);
225 
226 	batadv_hardif_free_ref_now(neigh_node->if_incoming);
227 
228 	kfree(neigh_node);
229 }
230 
231 /**
232  * batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
233  *  and possibly free it (without rcu callback)
234  * @neigh_node: neigh neighbor to free
235  */
236 static void
237 batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
238 {
239 	if (atomic_dec_and_test(&neigh_node->refcount))
240 		batadv_neigh_node_free_rcu(&neigh_node->rcu);
241 }
242 
243 /**
244  * batadv_neigh_node_free_ref - decrement the neighbors refcounter
245  *  and possibly free it
246  * @neigh_node: neigh neighbor to free
247  */
248 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
249 {
250 	if (atomic_dec_and_test(&neigh_node->refcount))
251 		call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
252 }
253 
254 /**
255  * batadv_orig_node_get_router - router to the originator depending on iface
256  * @orig_node: the orig node for the router
257  * @if_outgoing: the interface where the payload packet has been received or
258  *  the OGM should be sent to
259  *
260  * Returns the neighbor which should be router for this orig_node/iface.
261  *
262  * The object is returned with refcounter increased by 1.
263  */
264 struct batadv_neigh_node *
265 batadv_orig_router_get(struct batadv_orig_node *orig_node,
266 		       const struct batadv_hard_iface *if_outgoing)
267 {
268 	struct batadv_orig_ifinfo *orig_ifinfo;
269 	struct batadv_neigh_node *router = NULL;
270 
271 	rcu_read_lock();
272 	hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
273 		if (orig_ifinfo->if_outgoing != if_outgoing)
274 			continue;
275 
276 		router = rcu_dereference(orig_ifinfo->router);
277 		break;
278 	}
279 
280 	if (router && !atomic_inc_not_zero(&router->refcount))
281 		router = NULL;
282 
283 	rcu_read_unlock();
284 	return router;
285 }
286 
287 /**
288  * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
289  * @orig_node: the orig node to be queried
290  * @if_outgoing: the interface for which the ifinfo should be acquired
291  *
292  * Returns the requested orig_ifinfo or NULL if not found.
293  *
294  * The object is returned with refcounter increased by 1.
295  */
296 struct batadv_orig_ifinfo *
297 batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
298 		       struct batadv_hard_iface *if_outgoing)
299 {
300 	struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
301 
302 	rcu_read_lock();
303 	hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
304 				 list) {
305 		if (tmp->if_outgoing != if_outgoing)
306 			continue;
307 
308 		if (!atomic_inc_not_zero(&tmp->refcount))
309 			continue;
310 
311 		orig_ifinfo = tmp;
312 		break;
313 	}
314 	rcu_read_unlock();
315 
316 	return orig_ifinfo;
317 }
318 
319 /**
320  * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
321  * @orig_node: the orig node to be queried
322  * @if_outgoing: the interface for which the ifinfo should be acquired
323  *
324  * Returns NULL in case of failure or the orig_ifinfo object for the if_outgoing
325  * interface otherwise. The object is created and added to the list
326  * if it does not exist.
327  *
328  * The object is returned with refcounter increased by 1.
329  */
330 struct batadv_orig_ifinfo *
331 batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
332 		       struct batadv_hard_iface *if_outgoing)
333 {
334 	struct batadv_orig_ifinfo *orig_ifinfo = NULL;
335 	unsigned long reset_time;
336 
337 	spin_lock_bh(&orig_node->neigh_list_lock);
338 
339 	orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
340 	if (orig_ifinfo)
341 		goto out;
342 
343 	orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
344 	if (!orig_ifinfo)
345 		goto out;
346 
347 	if (if_outgoing != BATADV_IF_DEFAULT &&
348 	    !atomic_inc_not_zero(&if_outgoing->refcount)) {
349 		kfree(orig_ifinfo);
350 		orig_ifinfo = NULL;
351 		goto out;
352 	}
353 
354 	reset_time = jiffies - 1;
355 	reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
356 	orig_ifinfo->batman_seqno_reset = reset_time;
357 	orig_ifinfo->if_outgoing = if_outgoing;
358 	INIT_HLIST_NODE(&orig_ifinfo->list);
359 	atomic_set(&orig_ifinfo->refcount, 2);
360 	hlist_add_head_rcu(&orig_ifinfo->list,
361 			   &orig_node->ifinfo_list);
362 out:
363 	spin_unlock_bh(&orig_node->neigh_list_lock);
364 	return orig_ifinfo;
365 }
366 
367 /**
368  * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
369  * @neigh_node: the neigh node to be queried
370  * @if_outgoing: the interface for which the ifinfo should be acquired
371  *
372  * The object is returned with refcounter increased by 1.
373  *
374  * Returns the requested neigh_ifinfo or NULL if not found
375  */
376 struct batadv_neigh_ifinfo *
377 batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
378 			struct batadv_hard_iface *if_outgoing)
379 {
380 	struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
381 				   *tmp_neigh_ifinfo;
382 
383 	rcu_read_lock();
384 	hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
385 				 list) {
386 		if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
387 			continue;
388 
389 		if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
390 			continue;
391 
392 		neigh_ifinfo = tmp_neigh_ifinfo;
393 		break;
394 	}
395 	rcu_read_unlock();
396 
397 	return neigh_ifinfo;
398 }
399 
400 /**
401  * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
402  * @neigh_node: the neigh node to be queried
403  * @if_outgoing: the interface for which the ifinfo should be acquired
404  *
405  * Returns NULL in case of failure or the neigh_ifinfo object for the
406  * if_outgoing interface otherwise. The object is created and added to the list
407  * if it does not exist.
408  *
409  * The object is returned with refcounter increased by 1.
410  */
411 struct batadv_neigh_ifinfo *
412 batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
413 			struct batadv_hard_iface *if_outgoing)
414 {
415 	struct batadv_neigh_ifinfo *neigh_ifinfo;
416 
417 	spin_lock_bh(&neigh->ifinfo_lock);
418 
419 	neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
420 	if (neigh_ifinfo)
421 		goto out;
422 
423 	neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
424 	if (!neigh_ifinfo)
425 		goto out;
426 
427 	if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
428 		kfree(neigh_ifinfo);
429 		neigh_ifinfo = NULL;
430 		goto out;
431 	}
432 
433 	INIT_HLIST_NODE(&neigh_ifinfo->list);
434 	atomic_set(&neigh_ifinfo->refcount, 2);
435 	neigh_ifinfo->if_outgoing = if_outgoing;
436 
437 	hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
438 
439 out:
440 	spin_unlock_bh(&neigh->ifinfo_lock);
441 
442 	return neigh_ifinfo;
443 }
444 
445 /**
446  * batadv_neigh_node_get - retrieve a neighbour from the list
447  * @orig_node: originator which the neighbour belongs to
448  * @hard_iface: the interface where this neighbour is connected to
449  * @addr: the address of the neighbour
450  *
451  * Looks for and possibly returns a neighbour belonging to this originator list
452  * which is connected through the provided hard interface.
453  * Returns NULL if the neighbour is not found.
454  */
455 static struct batadv_neigh_node *
456 batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
457 		      const struct batadv_hard_iface *hard_iface,
458 		      const u8 *addr)
459 {
460 	struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
461 
462 	rcu_read_lock();
463 	hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
464 		if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
465 			continue;
466 
467 		if (tmp_neigh_node->if_incoming != hard_iface)
468 			continue;
469 
470 		if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
471 			continue;
472 
473 		res = tmp_neigh_node;
474 		break;
475 	}
476 	rcu_read_unlock();
477 
478 	return res;
479 }
480 
481 /**
482  * batadv_neigh_node_new - create and init a new neigh_node object
483  * @orig_node: originator object representing the neighbour
484  * @hard_iface: the interface where the neighbour is connected to
485  * @neigh_addr: the mac address of the neighbour interface
486  *
487  * Allocates a new neigh_node object and initialises all the generic fields.
488  * Returns the new object or NULL on failure.
489  */
490 struct batadv_neigh_node *
491 batadv_neigh_node_new(struct batadv_orig_node *orig_node,
492 		      struct batadv_hard_iface *hard_iface,
493 		      const u8 *neigh_addr)
494 {
495 	struct batadv_neigh_node *neigh_node;
496 
497 	neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
498 	if (neigh_node)
499 		goto out;
500 
501 	neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
502 	if (!neigh_node)
503 		goto out;
504 
505 	if (!atomic_inc_not_zero(&hard_iface->refcount)) {
506 		kfree(neigh_node);
507 		neigh_node = NULL;
508 		goto out;
509 	}
510 
511 	INIT_HLIST_NODE(&neigh_node->list);
512 	INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
513 	spin_lock_init(&neigh_node->ifinfo_lock);
514 
515 	ether_addr_copy(neigh_node->addr, neigh_addr);
516 	neigh_node->if_incoming = hard_iface;
517 	neigh_node->orig_node = orig_node;
518 
519 	/* extra reference for return */
520 	atomic_set(&neigh_node->refcount, 2);
521 
522 	spin_lock_bh(&orig_node->neigh_list_lock);
523 	hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
524 	spin_unlock_bh(&orig_node->neigh_list_lock);
525 
526 	batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
527 		   "Creating new neighbor %pM for orig_node %pM on interface %s\n",
528 		   neigh_addr, orig_node->orig, hard_iface->net_dev->name);
529 
530 out:
531 	return neigh_node;
532 }
533 
534 /**
535  * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
536  * @rcu: rcu pointer of the orig_ifinfo object
537  */
538 static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
539 {
540 	struct batadv_orig_ifinfo *orig_ifinfo;
541 	struct batadv_neigh_node *router;
542 
543 	orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
544 
545 	if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
546 		batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
547 
548 	/* this is the last reference to this object */
549 	router = rcu_dereference_protected(orig_ifinfo->router, true);
550 	if (router)
551 		batadv_neigh_node_free_ref_now(router);
552 	kfree(orig_ifinfo);
553 }
554 
555 /**
556  * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
557  *  the orig_ifinfo (without rcu callback)
558  * @orig_ifinfo: the orig_ifinfo object to release
559  */
560 static void
561 batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo)
562 {
563 	if (atomic_dec_and_test(&orig_ifinfo->refcount))
564 		batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu);
565 }
566 
567 /**
568  * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
569  *  the orig_ifinfo
570  * @orig_ifinfo: the orig_ifinfo object to release
571  */
572 void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
573 {
574 	if (atomic_dec_and_test(&orig_ifinfo->refcount))
575 		call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu);
576 }
577 
578 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
579 {
580 	struct hlist_node *node_tmp;
581 	struct batadv_neigh_node *neigh_node;
582 	struct batadv_orig_node *orig_node;
583 	struct batadv_orig_ifinfo *orig_ifinfo;
584 
585 	orig_node = container_of(rcu, struct batadv_orig_node, rcu);
586 
587 	spin_lock_bh(&orig_node->neigh_list_lock);
588 
589 	/* for all neighbors towards this originator ... */
590 	hlist_for_each_entry_safe(neigh_node, node_tmp,
591 				  &orig_node->neigh_list, list) {
592 		hlist_del_rcu(&neigh_node->list);
593 		batadv_neigh_node_free_ref_now(neigh_node);
594 	}
595 
596 	hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
597 				  &orig_node->ifinfo_list, list) {
598 		hlist_del_rcu(&orig_ifinfo->list);
599 		batadv_orig_ifinfo_free_ref_now(orig_ifinfo);
600 	}
601 	spin_unlock_bh(&orig_node->neigh_list_lock);
602 
603 	batadv_mcast_purge_orig(orig_node);
604 
605 	/* Free nc_nodes */
606 	batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
607 
608 	batadv_frag_purge_orig(orig_node, NULL);
609 
610 	if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
611 		orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
612 
613 	kfree(orig_node->tt_buff);
614 	kfree(orig_node);
615 }
616 
617 /**
618  * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
619  * schedule an rcu callback for freeing it
620  * @orig_node: the orig node to free
621  */
622 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
623 {
624 	if (atomic_dec_and_test(&orig_node->refcount))
625 		call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
626 }
627 
628 /**
629  * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
630  * possibly free it (without rcu callback)
631  * @orig_node: the orig node to free
632  */
633 void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
634 {
635 	if (atomic_dec_and_test(&orig_node->refcount))
636 		batadv_orig_node_free_rcu(&orig_node->rcu);
637 }
638 
639 void batadv_originator_free(struct batadv_priv *bat_priv)
640 {
641 	struct batadv_hashtable *hash = bat_priv->orig_hash;
642 	struct hlist_node *node_tmp;
643 	struct hlist_head *head;
644 	spinlock_t *list_lock; /* spinlock to protect write access */
645 	struct batadv_orig_node *orig_node;
646 	u32 i;
647 
648 	if (!hash)
649 		return;
650 
651 	cancel_delayed_work_sync(&bat_priv->orig_work);
652 
653 	bat_priv->orig_hash = NULL;
654 
655 	for (i = 0; i < hash->size; i++) {
656 		head = &hash->table[i];
657 		list_lock = &hash->list_locks[i];
658 
659 		spin_lock_bh(list_lock);
660 		hlist_for_each_entry_safe(orig_node, node_tmp,
661 					  head, hash_entry) {
662 			hlist_del_rcu(&orig_node->hash_entry);
663 			batadv_orig_node_free_ref(orig_node);
664 		}
665 		spin_unlock_bh(list_lock);
666 	}
667 
668 	batadv_hash_destroy(hash);
669 }
670 
671 /**
672  * batadv_orig_node_new - creates a new orig_node
673  * @bat_priv: the bat priv with all the soft interface information
674  * @addr: the mac address of the originator
675  *
676  * Creates a new originator object and initialise all the generic fields.
677  * The new object is not added to the originator list.
678  * Returns the newly created object or NULL on failure.
679  */
680 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
681 					      const u8 *addr)
682 {
683 	struct batadv_orig_node *orig_node;
684 	struct batadv_orig_node_vlan *vlan;
685 	unsigned long reset_time;
686 	int i;
687 
688 	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
689 		   "Creating new originator: %pM\n", addr);
690 
691 	orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
692 	if (!orig_node)
693 		return NULL;
694 
695 	INIT_HLIST_HEAD(&orig_node->neigh_list);
696 	INIT_HLIST_HEAD(&orig_node->vlan_list);
697 	INIT_HLIST_HEAD(&orig_node->ifinfo_list);
698 	spin_lock_init(&orig_node->bcast_seqno_lock);
699 	spin_lock_init(&orig_node->neigh_list_lock);
700 	spin_lock_init(&orig_node->tt_buff_lock);
701 	spin_lock_init(&orig_node->tt_lock);
702 	spin_lock_init(&orig_node->vlan_list_lock);
703 
704 	batadv_nc_init_orig(orig_node);
705 
706 	/* extra reference for return */
707 	atomic_set(&orig_node->refcount, 2);
708 
709 	orig_node->bat_priv = bat_priv;
710 	ether_addr_copy(orig_node->orig, addr);
711 	batadv_dat_init_orig_node_addr(orig_node);
712 	atomic_set(&orig_node->last_ttvn, 0);
713 	orig_node->tt_buff = NULL;
714 	orig_node->tt_buff_len = 0;
715 	orig_node->last_seen = jiffies;
716 	reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
717 	orig_node->bcast_seqno_reset = reset_time;
718 
719 #ifdef CONFIG_BATMAN_ADV_MCAST
720 	orig_node->mcast_flags = BATADV_NO_FLAGS;
721 	INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
722 	INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
723 	INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
724 	spin_lock_init(&orig_node->mcast_handler_lock);
725 #endif
726 
727 	/* create a vlan object for the "untagged" LAN */
728 	vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
729 	if (!vlan)
730 		goto free_orig_node;
731 	/* batadv_orig_node_vlan_new() increases the refcounter.
732 	 * Immediately release vlan since it is not needed anymore in this
733 	 * context
734 	 */
735 	batadv_orig_node_vlan_free_ref(vlan);
736 
737 	for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
738 		INIT_HLIST_HEAD(&orig_node->fragments[i].head);
739 		spin_lock_init(&orig_node->fragments[i].lock);
740 		orig_node->fragments[i].size = 0;
741 	}
742 
743 	return orig_node;
744 free_orig_node:
745 	kfree(orig_node);
746 	return NULL;
747 }
748 
749 /**
750  * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
751  * @bat_priv: the bat priv with all the soft interface information
752  * @neigh: orig node which is to be checked
753  */
754 static void
755 batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
756 			  struct batadv_neigh_node *neigh)
757 {
758 	struct batadv_neigh_ifinfo *neigh_ifinfo;
759 	struct batadv_hard_iface *if_outgoing;
760 	struct hlist_node *node_tmp;
761 
762 	spin_lock_bh(&neigh->ifinfo_lock);
763 
764 	/* for all ifinfo objects for this neighinator */
765 	hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
766 				  &neigh->ifinfo_list, list) {
767 		if_outgoing = neigh_ifinfo->if_outgoing;
768 
769 		/* always keep the default interface */
770 		if (if_outgoing == BATADV_IF_DEFAULT)
771 			continue;
772 
773 		/* don't purge if the interface is not (going) down */
774 		if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
775 		    (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
776 		    (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
777 			continue;
778 
779 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
780 			   "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
781 			   neigh->addr, if_outgoing->net_dev->name);
782 
783 		hlist_del_rcu(&neigh_ifinfo->list);
784 		batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
785 	}
786 
787 	spin_unlock_bh(&neigh->ifinfo_lock);
788 }
789 
790 /**
791  * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
792  * @bat_priv: the bat priv with all the soft interface information
793  * @orig_node: orig node which is to be checked
794  *
795  * Returns true if any ifinfo entry was purged, false otherwise.
796  */
797 static bool
798 batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
799 			 struct batadv_orig_node *orig_node)
800 {
801 	struct batadv_orig_ifinfo *orig_ifinfo;
802 	struct batadv_hard_iface *if_outgoing;
803 	struct hlist_node *node_tmp;
804 	bool ifinfo_purged = false;
805 
806 	spin_lock_bh(&orig_node->neigh_list_lock);
807 
808 	/* for all ifinfo objects for this originator */
809 	hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
810 				  &orig_node->ifinfo_list, list) {
811 		if_outgoing = orig_ifinfo->if_outgoing;
812 
813 		/* always keep the default interface */
814 		if (if_outgoing == BATADV_IF_DEFAULT)
815 			continue;
816 
817 		/* don't purge if the interface is not (going) down */
818 		if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
819 		    (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
820 		    (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
821 			continue;
822 
823 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
824 			   "router/ifinfo purge: originator %pM, iface: %s\n",
825 			   orig_node->orig, if_outgoing->net_dev->name);
826 
827 		ifinfo_purged = true;
828 
829 		hlist_del_rcu(&orig_ifinfo->list);
830 		batadv_orig_ifinfo_free_ref(orig_ifinfo);
831 		if (orig_node->last_bonding_candidate == orig_ifinfo) {
832 			orig_node->last_bonding_candidate = NULL;
833 			batadv_orig_ifinfo_free_ref(orig_ifinfo);
834 		}
835 	}
836 
837 	spin_unlock_bh(&orig_node->neigh_list_lock);
838 
839 	return ifinfo_purged;
840 }
841 
842 /**
843  * batadv_purge_orig_neighbors - purges neighbors from originator
844  * @bat_priv: the bat priv with all the soft interface information
845  * @orig_node: orig node which is to be checked
846  *
847  * Returns true if any neighbor was purged, false otherwise
848  */
849 static bool
850 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
851 			    struct batadv_orig_node *orig_node)
852 {
853 	struct hlist_node *node_tmp;
854 	struct batadv_neigh_node *neigh_node;
855 	bool neigh_purged = false;
856 	unsigned long last_seen;
857 	struct batadv_hard_iface *if_incoming;
858 
859 	spin_lock_bh(&orig_node->neigh_list_lock);
860 
861 	/* for all neighbors towards this originator ... */
862 	hlist_for_each_entry_safe(neigh_node, node_tmp,
863 				  &orig_node->neigh_list, list) {
864 		last_seen = neigh_node->last_seen;
865 		if_incoming = neigh_node->if_incoming;
866 
867 		if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
868 		    (if_incoming->if_status == BATADV_IF_INACTIVE) ||
869 		    (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
870 		    (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
871 			if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
872 			    (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
873 			    (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
874 				batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
875 					   "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
876 					   orig_node->orig, neigh_node->addr,
877 					   if_incoming->net_dev->name);
878 			else
879 				batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
880 					   "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
881 					   orig_node->orig, neigh_node->addr,
882 					   jiffies_to_msecs(last_seen));
883 
884 			neigh_purged = true;
885 
886 			hlist_del_rcu(&neigh_node->list);
887 			batadv_neigh_node_free_ref(neigh_node);
888 		} else {
889 			/* only necessary if not the whole neighbor is to be
890 			 * deleted, but some interface has been removed.
891 			 */
892 			batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
893 		}
894 	}
895 
896 	spin_unlock_bh(&orig_node->neigh_list_lock);
897 	return neigh_purged;
898 }
899 
900 /**
901  * batadv_find_best_neighbor - finds the best neighbor after purging
902  * @bat_priv: the bat priv with all the soft interface information
903  * @orig_node: orig node which is to be checked
904  * @if_outgoing: the interface for which the metric should be compared
905  *
906  * Returns the current best neighbor, with refcount increased.
907  */
908 static struct batadv_neigh_node *
909 batadv_find_best_neighbor(struct batadv_priv *bat_priv,
910 			  struct batadv_orig_node *orig_node,
911 			  struct batadv_hard_iface *if_outgoing)
912 {
913 	struct batadv_neigh_node *best = NULL, *neigh;
914 	struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
915 
916 	rcu_read_lock();
917 	hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
918 		if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
919 						best, if_outgoing) <= 0))
920 			continue;
921 
922 		if (!atomic_inc_not_zero(&neigh->refcount))
923 			continue;
924 
925 		if (best)
926 			batadv_neigh_node_free_ref(best);
927 
928 		best = neigh;
929 	}
930 	rcu_read_unlock();
931 
932 	return best;
933 }
934 
935 /**
936  * batadv_purge_orig_node - purges obsolete information from an orig_node
937  * @bat_priv: the bat priv with all the soft interface information
938  * @orig_node: orig node which is to be checked
939  *
940  * This function checks if the orig_node or substructures of it have become
941  * obsolete, and purges this information if that's the case.
942  *
943  * Returns true if the orig_node is to be removed, false otherwise.
944  */
945 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
946 				   struct batadv_orig_node *orig_node)
947 {
948 	struct batadv_neigh_node *best_neigh_node;
949 	struct batadv_hard_iface *hard_iface;
950 	bool changed_ifinfo, changed_neigh;
951 
952 	if (batadv_has_timed_out(orig_node->last_seen,
953 				 2 * BATADV_PURGE_TIMEOUT)) {
954 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
955 			   "Originator timeout: originator %pM, last_seen %u\n",
956 			   orig_node->orig,
957 			   jiffies_to_msecs(orig_node->last_seen));
958 		return true;
959 	}
960 	changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
961 	changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
962 
963 	if (!changed_ifinfo && !changed_neigh)
964 		return false;
965 
966 	/* first for NULL ... */
967 	best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
968 						    BATADV_IF_DEFAULT);
969 	batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
970 			    best_neigh_node);
971 	if (best_neigh_node)
972 		batadv_neigh_node_free_ref(best_neigh_node);
973 
974 	/* ... then for all other interfaces. */
975 	rcu_read_lock();
976 	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
977 		if (hard_iface->if_status != BATADV_IF_ACTIVE)
978 			continue;
979 
980 		if (hard_iface->soft_iface != bat_priv->soft_iface)
981 			continue;
982 
983 		best_neigh_node = batadv_find_best_neighbor(bat_priv,
984 							    orig_node,
985 							    hard_iface);
986 		batadv_update_route(bat_priv, orig_node, hard_iface,
987 				    best_neigh_node);
988 		if (best_neigh_node)
989 			batadv_neigh_node_free_ref(best_neigh_node);
990 	}
991 	rcu_read_unlock();
992 
993 	return false;
994 }
995 
996 static void _batadv_purge_orig(struct batadv_priv *bat_priv)
997 {
998 	struct batadv_hashtable *hash = bat_priv->orig_hash;
999 	struct hlist_node *node_tmp;
1000 	struct hlist_head *head;
1001 	spinlock_t *list_lock; /* spinlock to protect write access */
1002 	struct batadv_orig_node *orig_node;
1003 	u32 i;
1004 
1005 	if (!hash)
1006 		return;
1007 
1008 	/* for all origins... */
1009 	for (i = 0; i < hash->size; i++) {
1010 		head = &hash->table[i];
1011 		list_lock = &hash->list_locks[i];
1012 
1013 		spin_lock_bh(list_lock);
1014 		hlist_for_each_entry_safe(orig_node, node_tmp,
1015 					  head, hash_entry) {
1016 			if (batadv_purge_orig_node(bat_priv, orig_node)) {
1017 				batadv_gw_node_delete(bat_priv, orig_node);
1018 				hlist_del_rcu(&orig_node->hash_entry);
1019 				batadv_tt_global_del_orig(orig_node->bat_priv,
1020 							  orig_node, -1,
1021 							  "originator timed out");
1022 				batadv_orig_node_free_ref(orig_node);
1023 				continue;
1024 			}
1025 
1026 			batadv_frag_purge_orig(orig_node,
1027 					       batadv_frag_check_entry);
1028 		}
1029 		spin_unlock_bh(list_lock);
1030 	}
1031 
1032 	batadv_gw_election(bat_priv);
1033 }
1034 
1035 static void batadv_purge_orig(struct work_struct *work)
1036 {
1037 	struct delayed_work *delayed_work;
1038 	struct batadv_priv *bat_priv;
1039 
1040 	delayed_work = container_of(work, struct delayed_work, work);
1041 	bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
1042 	_batadv_purge_orig(bat_priv);
1043 	queue_delayed_work(batadv_event_workqueue,
1044 			   &bat_priv->orig_work,
1045 			   msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
1046 }
1047 
1048 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
1049 {
1050 	_batadv_purge_orig(bat_priv);
1051 }
1052 
1053 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
1054 {
1055 	struct net_device *net_dev = (struct net_device *)seq->private;
1056 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
1057 	struct batadv_hard_iface *primary_if;
1058 
1059 	primary_if = batadv_seq_print_text_primary_if_get(seq);
1060 	if (!primary_if)
1061 		return 0;
1062 
1063 	seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
1064 		   BATADV_SOURCE_VERSION, primary_if->net_dev->name,
1065 		   primary_if->net_dev->dev_addr, net_dev->name,
1066 		   bat_priv->bat_algo_ops->name);
1067 
1068 	batadv_hardif_free_ref(primary_if);
1069 
1070 	if (!bat_priv->bat_algo_ops->bat_orig_print) {
1071 		seq_puts(seq,
1072 			 "No printing function for this routing protocol\n");
1073 		return 0;
1074 	}
1075 
1076 	bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
1077 					       BATADV_IF_DEFAULT);
1078 
1079 	return 0;
1080 }
1081 
1082 /**
1083  * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
1084  *  outgoing interface
1085  * @seq: debugfs table seq_file struct
1086  * @offset: not used
1087  *
1088  * Returns 0
1089  */
1090 int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1091 {
1092 	struct net_device *net_dev = (struct net_device *)seq->private;
1093 	struct batadv_hard_iface *hard_iface;
1094 	struct batadv_priv *bat_priv;
1095 
1096 	hard_iface = batadv_hardif_get_by_netdev(net_dev);
1097 
1098 	if (!hard_iface || !hard_iface->soft_iface) {
1099 		seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
1100 		goto out;
1101 	}
1102 
1103 	bat_priv = netdev_priv(hard_iface->soft_iface);
1104 	if (!bat_priv->bat_algo_ops->bat_orig_print) {
1105 		seq_puts(seq,
1106 			 "No printing function for this routing protocol\n");
1107 		goto out;
1108 	}
1109 
1110 	if (hard_iface->if_status != BATADV_IF_ACTIVE) {
1111 		seq_puts(seq, "Interface not active\n");
1112 		goto out;
1113 	}
1114 
1115 	seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1116 		   BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1117 		   hard_iface->net_dev->dev_addr,
1118 		   hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
1119 
1120 	bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1121 
1122 out:
1123 	if (hard_iface)
1124 		batadv_hardif_free_ref(hard_iface);
1125 	return 0;
1126 }
1127 
1128 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1129 			    int max_if_num)
1130 {
1131 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1132 	struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1133 	struct batadv_hashtable *hash = bat_priv->orig_hash;
1134 	struct hlist_head *head;
1135 	struct batadv_orig_node *orig_node;
1136 	u32 i;
1137 	int ret;
1138 
1139 	/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1140 	 * if_num
1141 	 */
1142 	for (i = 0; i < hash->size; i++) {
1143 		head = &hash->table[i];
1144 
1145 		rcu_read_lock();
1146 		hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1147 			ret = 0;
1148 			if (bao->bat_orig_add_if)
1149 				ret = bao->bat_orig_add_if(orig_node,
1150 							   max_if_num);
1151 			if (ret == -ENOMEM)
1152 				goto err;
1153 		}
1154 		rcu_read_unlock();
1155 	}
1156 
1157 	return 0;
1158 
1159 err:
1160 	rcu_read_unlock();
1161 	return -ENOMEM;
1162 }
1163 
1164 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1165 			    int max_if_num)
1166 {
1167 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1168 	struct batadv_hashtable *hash = bat_priv->orig_hash;
1169 	struct hlist_head *head;
1170 	struct batadv_hard_iface *hard_iface_tmp;
1171 	struct batadv_orig_node *orig_node;
1172 	struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1173 	u32 i;
1174 	int ret;
1175 
1176 	/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1177 	 * if_num
1178 	 */
1179 	for (i = 0; i < hash->size; i++) {
1180 		head = &hash->table[i];
1181 
1182 		rcu_read_lock();
1183 		hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1184 			ret = 0;
1185 			if (bao->bat_orig_del_if)
1186 				ret = bao->bat_orig_del_if(orig_node,
1187 							   max_if_num,
1188 							   hard_iface->if_num);
1189 			if (ret == -ENOMEM)
1190 				goto err;
1191 		}
1192 		rcu_read_unlock();
1193 	}
1194 
1195 	/* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1196 	rcu_read_lock();
1197 	list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
1198 		if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
1199 			continue;
1200 
1201 		if (hard_iface == hard_iface_tmp)
1202 			continue;
1203 
1204 		if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
1205 			continue;
1206 
1207 		if (hard_iface_tmp->if_num > hard_iface->if_num)
1208 			hard_iface_tmp->if_num--;
1209 	}
1210 	rcu_read_unlock();
1211 
1212 	hard_iface->if_num = -1;
1213 	return 0;
1214 
1215 err:
1216 	rcu_read_unlock();
1217 	return -ENOMEM;
1218 }
1219