xref: /linux/net/batman-adv/originator.c (revision a8fe58cec351c25e09c393bf46117c0c47b5a17c)
1 /* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include "originator.h"
19 #include "main.h"
20 
21 #include <linux/errno.h>
22 #include <linux/etherdevice.h>
23 #include <linux/fs.h>
24 #include <linux/jiffies.h>
25 #include <linux/kernel.h>
26 #include <linux/list.h>
27 #include <linux/lockdep.h>
28 #include <linux/netdevice.h>
29 #include <linux/rculist.h>
30 #include <linux/seq_file.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/workqueue.h>
34 
35 #include "distributed-arp-table.h"
36 #include "fragmentation.h"
37 #include "gateway_client.h"
38 #include "hard-interface.h"
39 #include "hash.h"
40 #include "multicast.h"
41 #include "network-coding.h"
42 #include "routing.h"
43 #include "translation-table.h"
44 
45 /* hash class keys */
46 static struct lock_class_key batadv_orig_hash_lock_class_key;
47 
48 static void batadv_purge_orig(struct work_struct *work);
49 
50 /* returns 1 if they are the same originator */
51 int batadv_compare_orig(const struct hlist_node *node, const void *data2)
52 {
53 	const void *data1 = container_of(node, struct batadv_orig_node,
54 					 hash_entry);
55 
56 	return batadv_compare_eth(data1, data2);
57 }
58 
59 /**
60  * batadv_orig_node_vlan_get - get an orig_node_vlan object
61  * @orig_node: the originator serving the VLAN
62  * @vid: the VLAN identifier
63  *
64  * Returns the vlan object identified by vid and belonging to orig_node or NULL
65  * if it does not exist.
66  */
67 struct batadv_orig_node_vlan *
68 batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
69 			  unsigned short vid)
70 {
71 	struct batadv_orig_node_vlan *vlan = NULL, *tmp;
72 
73 	rcu_read_lock();
74 	hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
75 		if (tmp->vid != vid)
76 			continue;
77 
78 		if (!atomic_inc_not_zero(&tmp->refcount))
79 			continue;
80 
81 		vlan = tmp;
82 
83 		break;
84 	}
85 	rcu_read_unlock();
86 
87 	return vlan;
88 }
89 
90 /**
91  * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
92  *  object
93  * @orig_node: the originator serving the VLAN
94  * @vid: the VLAN identifier
95  *
96  * Returns NULL in case of failure or the vlan object identified by vid and
97  * belonging to orig_node otherwise. The object is created and added to the list
98  * if it does not exist.
99  *
100  * The object is returned with refcounter increased by 1.
101  */
102 struct batadv_orig_node_vlan *
103 batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
104 			  unsigned short vid)
105 {
106 	struct batadv_orig_node_vlan *vlan;
107 
108 	spin_lock_bh(&orig_node->vlan_list_lock);
109 
110 	/* first look if an object for this vid already exists */
111 	vlan = batadv_orig_node_vlan_get(orig_node, vid);
112 	if (vlan)
113 		goto out;
114 
115 	vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
116 	if (!vlan)
117 		goto out;
118 
119 	atomic_set(&vlan->refcount, 2);
120 	vlan->vid = vid;
121 
122 	hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
123 
124 out:
125 	spin_unlock_bh(&orig_node->vlan_list_lock);
126 
127 	return vlan;
128 }
129 
130 /**
131  * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
132  *  the originator-vlan object
133  * @orig_vlan: the originator-vlan object to release
134  */
135 void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
136 {
137 	if (atomic_dec_and_test(&orig_vlan->refcount))
138 		kfree_rcu(orig_vlan, rcu);
139 }
140 
141 int batadv_originator_init(struct batadv_priv *bat_priv)
142 {
143 	if (bat_priv->orig_hash)
144 		return 0;
145 
146 	bat_priv->orig_hash = batadv_hash_new(1024);
147 
148 	if (!bat_priv->orig_hash)
149 		goto err;
150 
151 	batadv_hash_set_lock_class(bat_priv->orig_hash,
152 				   &batadv_orig_hash_lock_class_key);
153 
154 	INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
155 	queue_delayed_work(batadv_event_workqueue,
156 			   &bat_priv->orig_work,
157 			   msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
158 
159 	return 0;
160 
161 err:
162 	return -ENOMEM;
163 }
164 
165 /**
166  * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
167  *  free after rcu grace period
168  * @neigh_ifinfo: the neigh_ifinfo object to release
169  */
170 static void
171 batadv_neigh_ifinfo_release(struct batadv_neigh_ifinfo *neigh_ifinfo)
172 {
173 	if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
174 		batadv_hardif_free_ref(neigh_ifinfo->if_outgoing);
175 
176 	kfree_rcu(neigh_ifinfo, rcu);
177 }
178 
179 /**
180  * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly release
181  *  the neigh_ifinfo
182  * @neigh_ifinfo: the neigh_ifinfo object to release
183  */
184 void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
185 {
186 	if (atomic_dec_and_test(&neigh_ifinfo->refcount))
187 		batadv_neigh_ifinfo_release(neigh_ifinfo);
188 }
189 
190 /**
191  * batadv_hardif_neigh_release - release hardif neigh node from lists and
192  *  queue for free after rcu grace period
193  * @hardif_neigh: hardif neigh neighbor to free
194  */
195 static void
196 batadv_hardif_neigh_release(struct batadv_hardif_neigh_node *hardif_neigh)
197 {
198 	spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
199 	hlist_del_init_rcu(&hardif_neigh->list);
200 	spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
201 
202 	batadv_hardif_free_ref(hardif_neigh->if_incoming);
203 	kfree_rcu(hardif_neigh, rcu);
204 }
205 
206 /**
207  * batadv_hardif_neigh_free_ref - decrement the hardif neighbors refcounter
208  *  and possibly release it
209  * @hardif_neigh: hardif neigh neighbor to free
210  */
211 void batadv_hardif_neigh_free_ref(struct batadv_hardif_neigh_node *hardif_neigh)
212 {
213 	if (atomic_dec_and_test(&hardif_neigh->refcount))
214 		batadv_hardif_neigh_release(hardif_neigh);
215 }
216 
217 /**
218  * batadv_neigh_node_release - release neigh_node from lists and queue for
219  *  free after rcu grace period
220  * @neigh_node: neigh neighbor to free
221  */
222 static void batadv_neigh_node_release(struct batadv_neigh_node *neigh_node)
223 {
224 	struct hlist_node *node_tmp;
225 	struct batadv_hardif_neigh_node *hardif_neigh;
226 	struct batadv_neigh_ifinfo *neigh_ifinfo;
227 	struct batadv_algo_ops *bao;
228 
229 	bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
230 
231 	hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
232 				  &neigh_node->ifinfo_list, list) {
233 		batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
234 	}
235 
236 	hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming,
237 					       neigh_node->addr);
238 	if (hardif_neigh) {
239 		/* batadv_hardif_neigh_get() increases refcount too */
240 		batadv_hardif_neigh_free_ref(hardif_neigh);
241 		batadv_hardif_neigh_free_ref(hardif_neigh);
242 	}
243 
244 	if (bao->bat_neigh_free)
245 		bao->bat_neigh_free(neigh_node);
246 
247 	batadv_hardif_free_ref(neigh_node->if_incoming);
248 
249 	kfree_rcu(neigh_node, rcu);
250 }
251 
252 /**
253  * batadv_neigh_node_free_ref - decrement the neighbors refcounter
254  *  and possibly release it
255  * @neigh_node: neigh neighbor to free
256  */
257 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
258 {
259 	if (atomic_dec_and_test(&neigh_node->refcount))
260 		batadv_neigh_node_release(neigh_node);
261 }
262 
263 /**
264  * batadv_orig_node_get_router - router to the originator depending on iface
265  * @orig_node: the orig node for the router
266  * @if_outgoing: the interface where the payload packet has been received or
267  *  the OGM should be sent to
268  *
269  * Returns the neighbor which should be router for this orig_node/iface.
270  *
271  * The object is returned with refcounter increased by 1.
272  */
273 struct batadv_neigh_node *
274 batadv_orig_router_get(struct batadv_orig_node *orig_node,
275 		       const struct batadv_hard_iface *if_outgoing)
276 {
277 	struct batadv_orig_ifinfo *orig_ifinfo;
278 	struct batadv_neigh_node *router = NULL;
279 
280 	rcu_read_lock();
281 	hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
282 		if (orig_ifinfo->if_outgoing != if_outgoing)
283 			continue;
284 
285 		router = rcu_dereference(orig_ifinfo->router);
286 		break;
287 	}
288 
289 	if (router && !atomic_inc_not_zero(&router->refcount))
290 		router = NULL;
291 
292 	rcu_read_unlock();
293 	return router;
294 }
295 
296 /**
297  * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
298  * @orig_node: the orig node to be queried
299  * @if_outgoing: the interface for which the ifinfo should be acquired
300  *
301  * Returns the requested orig_ifinfo or NULL if not found.
302  *
303  * The object is returned with refcounter increased by 1.
304  */
305 struct batadv_orig_ifinfo *
306 batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
307 		       struct batadv_hard_iface *if_outgoing)
308 {
309 	struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
310 
311 	rcu_read_lock();
312 	hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
313 				 list) {
314 		if (tmp->if_outgoing != if_outgoing)
315 			continue;
316 
317 		if (!atomic_inc_not_zero(&tmp->refcount))
318 			continue;
319 
320 		orig_ifinfo = tmp;
321 		break;
322 	}
323 	rcu_read_unlock();
324 
325 	return orig_ifinfo;
326 }
327 
328 /**
329  * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
330  * @orig_node: the orig node to be queried
331  * @if_outgoing: the interface for which the ifinfo should be acquired
332  *
333  * Returns NULL in case of failure or the orig_ifinfo object for the if_outgoing
334  * interface otherwise. The object is created and added to the list
335  * if it does not exist.
336  *
337  * The object is returned with refcounter increased by 1.
338  */
339 struct batadv_orig_ifinfo *
340 batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
341 		       struct batadv_hard_iface *if_outgoing)
342 {
343 	struct batadv_orig_ifinfo *orig_ifinfo = NULL;
344 	unsigned long reset_time;
345 
346 	spin_lock_bh(&orig_node->neigh_list_lock);
347 
348 	orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
349 	if (orig_ifinfo)
350 		goto out;
351 
352 	orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
353 	if (!orig_ifinfo)
354 		goto out;
355 
356 	if (if_outgoing != BATADV_IF_DEFAULT &&
357 	    !atomic_inc_not_zero(&if_outgoing->refcount)) {
358 		kfree(orig_ifinfo);
359 		orig_ifinfo = NULL;
360 		goto out;
361 	}
362 
363 	reset_time = jiffies - 1;
364 	reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
365 	orig_ifinfo->batman_seqno_reset = reset_time;
366 	orig_ifinfo->if_outgoing = if_outgoing;
367 	INIT_HLIST_NODE(&orig_ifinfo->list);
368 	atomic_set(&orig_ifinfo->refcount, 2);
369 	hlist_add_head_rcu(&orig_ifinfo->list,
370 			   &orig_node->ifinfo_list);
371 out:
372 	spin_unlock_bh(&orig_node->neigh_list_lock);
373 	return orig_ifinfo;
374 }
375 
376 /**
377  * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
378  * @neigh_node: the neigh node to be queried
379  * @if_outgoing: the interface for which the ifinfo should be acquired
380  *
381  * The object is returned with refcounter increased by 1.
382  *
383  * Returns the requested neigh_ifinfo or NULL if not found
384  */
385 struct batadv_neigh_ifinfo *
386 batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
387 			struct batadv_hard_iface *if_outgoing)
388 {
389 	struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
390 				   *tmp_neigh_ifinfo;
391 
392 	rcu_read_lock();
393 	hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
394 				 list) {
395 		if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
396 			continue;
397 
398 		if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
399 			continue;
400 
401 		neigh_ifinfo = tmp_neigh_ifinfo;
402 		break;
403 	}
404 	rcu_read_unlock();
405 
406 	return neigh_ifinfo;
407 }
408 
409 /**
410  * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
411  * @neigh_node: the neigh node to be queried
412  * @if_outgoing: the interface for which the ifinfo should be acquired
413  *
414  * Returns NULL in case of failure or the neigh_ifinfo object for the
415  * if_outgoing interface otherwise. The object is created and added to the list
416  * if it does not exist.
417  *
418  * The object is returned with refcounter increased by 1.
419  */
420 struct batadv_neigh_ifinfo *
421 batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
422 			struct batadv_hard_iface *if_outgoing)
423 {
424 	struct batadv_neigh_ifinfo *neigh_ifinfo;
425 
426 	spin_lock_bh(&neigh->ifinfo_lock);
427 
428 	neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
429 	if (neigh_ifinfo)
430 		goto out;
431 
432 	neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
433 	if (!neigh_ifinfo)
434 		goto out;
435 
436 	if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
437 		kfree(neigh_ifinfo);
438 		neigh_ifinfo = NULL;
439 		goto out;
440 	}
441 
442 	INIT_HLIST_NODE(&neigh_ifinfo->list);
443 	atomic_set(&neigh_ifinfo->refcount, 2);
444 	neigh_ifinfo->if_outgoing = if_outgoing;
445 
446 	hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
447 
448 out:
449 	spin_unlock_bh(&neigh->ifinfo_lock);
450 
451 	return neigh_ifinfo;
452 }
453 
454 /**
455  * batadv_neigh_node_get - retrieve a neighbour from the list
456  * @orig_node: originator which the neighbour belongs to
457  * @hard_iface: the interface where this neighbour is connected to
458  * @addr: the address of the neighbour
459  *
460  * Looks for and possibly returns a neighbour belonging to this originator list
461  * which is connected through the provided hard interface.
462  * Returns NULL if the neighbour is not found.
463  */
464 static struct batadv_neigh_node *
465 batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
466 		      const struct batadv_hard_iface *hard_iface,
467 		      const u8 *addr)
468 {
469 	struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
470 
471 	rcu_read_lock();
472 	hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
473 		if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
474 			continue;
475 
476 		if (tmp_neigh_node->if_incoming != hard_iface)
477 			continue;
478 
479 		if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
480 			continue;
481 
482 		res = tmp_neigh_node;
483 		break;
484 	}
485 	rcu_read_unlock();
486 
487 	return res;
488 }
489 
490 /**
491  * batadv_hardif_neigh_create - create a hardif neighbour node
492  * @hard_iface: the interface this neighbour is connected to
493  * @neigh_addr: the interface address of the neighbour to retrieve
494  *
495  * Returns the hardif neighbour node if found or created or NULL otherwise.
496  */
497 static struct batadv_hardif_neigh_node *
498 batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
499 			   const u8 *neigh_addr)
500 {
501 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
502 	struct batadv_hardif_neigh_node *hardif_neigh = NULL;
503 
504 	spin_lock_bh(&hard_iface->neigh_list_lock);
505 
506 	/* check if neighbor hasn't been added in the meantime */
507 	hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
508 	if (hardif_neigh)
509 		goto out;
510 
511 	if (!atomic_inc_not_zero(&hard_iface->refcount))
512 		goto out;
513 
514 	hardif_neigh = kzalloc(sizeof(*hardif_neigh), GFP_ATOMIC);
515 	if (!hardif_neigh) {
516 		batadv_hardif_free_ref(hard_iface);
517 		goto out;
518 	}
519 
520 	INIT_HLIST_NODE(&hardif_neigh->list);
521 	ether_addr_copy(hardif_neigh->addr, neigh_addr);
522 	hardif_neigh->if_incoming = hard_iface;
523 	hardif_neigh->last_seen = jiffies;
524 
525 	atomic_set(&hardif_neigh->refcount, 1);
526 
527 	if (bat_priv->bat_algo_ops->bat_hardif_neigh_init)
528 		bat_priv->bat_algo_ops->bat_hardif_neigh_init(hardif_neigh);
529 
530 	hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
531 
532 out:
533 	spin_unlock_bh(&hard_iface->neigh_list_lock);
534 	return hardif_neigh;
535 }
536 
537 /**
538  * batadv_hardif_neigh_get_or_create - retrieve or create a hardif neighbour
539  *  node
540  * @hard_iface: the interface this neighbour is connected to
541  * @neigh_addr: the interface address of the neighbour to retrieve
542  *
543  * Returns the hardif neighbour node if found or created or NULL otherwise.
544  */
545 static struct batadv_hardif_neigh_node *
546 batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
547 				  const u8 *neigh_addr)
548 {
549 	struct batadv_hardif_neigh_node *hardif_neigh = NULL;
550 
551 	/* first check without locking to avoid the overhead */
552 	hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
553 	if (hardif_neigh)
554 		return hardif_neigh;
555 
556 	return batadv_hardif_neigh_create(hard_iface, neigh_addr);
557 }
558 
559 /**
560  * batadv_hardif_neigh_get - retrieve a hardif neighbour from the list
561  * @hard_iface: the interface where this neighbour is connected to
562  * @neigh_addr: the address of the neighbour
563  *
564  * Looks for and possibly returns a neighbour belonging to this hard interface.
565  * Returns NULL if the neighbour is not found.
566  */
567 struct batadv_hardif_neigh_node *
568 batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
569 			const u8 *neigh_addr)
570 {
571 	struct batadv_hardif_neigh_node *tmp_hardif_neigh, *hardif_neigh = NULL;
572 
573 	rcu_read_lock();
574 	hlist_for_each_entry_rcu(tmp_hardif_neigh,
575 				 &hard_iface->neigh_list, list) {
576 		if (!batadv_compare_eth(tmp_hardif_neigh->addr, neigh_addr))
577 			continue;
578 
579 		if (!atomic_inc_not_zero(&tmp_hardif_neigh->refcount))
580 			continue;
581 
582 		hardif_neigh = tmp_hardif_neigh;
583 		break;
584 	}
585 	rcu_read_unlock();
586 
587 	return hardif_neigh;
588 }
589 
590 /**
591  * batadv_neigh_node_new - create and init a new neigh_node object
592  * @orig_node: originator object representing the neighbour
593  * @hard_iface: the interface where the neighbour is connected to
594  * @neigh_addr: the mac address of the neighbour interface
595  *
596  * Allocates a new neigh_node object and initialises all the generic fields.
597  * Returns the new object or NULL on failure.
598  */
599 struct batadv_neigh_node *
600 batadv_neigh_node_new(struct batadv_orig_node *orig_node,
601 		      struct batadv_hard_iface *hard_iface,
602 		      const u8 *neigh_addr)
603 {
604 	struct batadv_neigh_node *neigh_node;
605 	struct batadv_hardif_neigh_node *hardif_neigh = NULL;
606 
607 	neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
608 	if (neigh_node)
609 		goto out;
610 
611 	hardif_neigh = batadv_hardif_neigh_get_or_create(hard_iface,
612 							 neigh_addr);
613 	if (!hardif_neigh)
614 		goto out;
615 
616 	neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
617 	if (!neigh_node)
618 		goto out;
619 
620 	if (!atomic_inc_not_zero(&hard_iface->refcount)) {
621 		kfree(neigh_node);
622 		neigh_node = NULL;
623 		goto out;
624 	}
625 
626 	INIT_HLIST_NODE(&neigh_node->list);
627 	INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
628 	spin_lock_init(&neigh_node->ifinfo_lock);
629 
630 	ether_addr_copy(neigh_node->addr, neigh_addr);
631 	neigh_node->if_incoming = hard_iface;
632 	neigh_node->orig_node = orig_node;
633 
634 	/* extra reference for return */
635 	atomic_set(&neigh_node->refcount, 2);
636 
637 	spin_lock_bh(&orig_node->neigh_list_lock);
638 	hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
639 	spin_unlock_bh(&orig_node->neigh_list_lock);
640 
641 	/* increment unique neighbor refcount */
642 	atomic_inc(&hardif_neigh->refcount);
643 
644 	batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
645 		   "Creating new neighbor %pM for orig_node %pM on interface %s\n",
646 		   neigh_addr, orig_node->orig, hard_iface->net_dev->name);
647 
648 out:
649 	if (hardif_neigh)
650 		batadv_hardif_neigh_free_ref(hardif_neigh);
651 	return neigh_node;
652 }
653 
654 /**
655  * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list
656  * @seq: neighbour table seq_file struct
657  * @offset: not used
658  *
659  * Always returns 0.
660  */
661 int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
662 {
663 	struct net_device *net_dev = (struct net_device *)seq->private;
664 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
665 	struct batadv_hard_iface *primary_if;
666 
667 	primary_if = batadv_seq_print_text_primary_if_get(seq);
668 	if (!primary_if)
669 		return 0;
670 
671 	seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
672 		   BATADV_SOURCE_VERSION, primary_if->net_dev->name,
673 		   primary_if->net_dev->dev_addr, net_dev->name,
674 		   bat_priv->bat_algo_ops->name);
675 
676 	batadv_hardif_free_ref(primary_if);
677 
678 	if (!bat_priv->bat_algo_ops->bat_neigh_print) {
679 		seq_puts(seq,
680 			 "No printing function for this routing protocol\n");
681 		return 0;
682 	}
683 
684 	bat_priv->bat_algo_ops->bat_neigh_print(bat_priv, seq);
685 	return 0;
686 }
687 
688 /**
689  * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
690  *  free after rcu grace period
691  * @orig_ifinfo: the orig_ifinfo object to release
692  */
693 static void batadv_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_ifinfo)
694 {
695 	struct batadv_neigh_node *router;
696 
697 	if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
698 		batadv_hardif_free_ref(orig_ifinfo->if_outgoing);
699 
700 	/* this is the last reference to this object */
701 	router = rcu_dereference_protected(orig_ifinfo->router, true);
702 	if (router)
703 		batadv_neigh_node_free_ref(router);
704 
705 	kfree_rcu(orig_ifinfo, rcu);
706 }
707 
708 /**
709  * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release
710  *  the orig_ifinfo
711  * @orig_ifinfo: the orig_ifinfo object to release
712  */
713 void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
714 {
715 	if (atomic_dec_and_test(&orig_ifinfo->refcount))
716 		batadv_orig_ifinfo_release(orig_ifinfo);
717 }
718 
719 /**
720  * batadv_orig_node_free_rcu - free the orig_node
721  * @rcu: rcu pointer of the orig_node
722  */
723 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
724 {
725 	struct batadv_orig_node *orig_node;
726 
727 	orig_node = container_of(rcu, struct batadv_orig_node, rcu);
728 
729 	batadv_mcast_purge_orig(orig_node);
730 
731 	batadv_frag_purge_orig(orig_node, NULL);
732 
733 	if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
734 		orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
735 
736 	kfree(orig_node->tt_buff);
737 	kfree(orig_node);
738 }
739 
740 /**
741  * batadv_orig_node_release - release orig_node from lists and queue for
742  *  free after rcu grace period
743  * @orig_node: the orig node to free
744  */
745 static void batadv_orig_node_release(struct batadv_orig_node *orig_node)
746 {
747 	struct hlist_node *node_tmp;
748 	struct batadv_neigh_node *neigh_node;
749 	struct batadv_orig_ifinfo *orig_ifinfo;
750 
751 	spin_lock_bh(&orig_node->neigh_list_lock);
752 
753 	/* for all neighbors towards this originator ... */
754 	hlist_for_each_entry_safe(neigh_node, node_tmp,
755 				  &orig_node->neigh_list, list) {
756 		hlist_del_rcu(&neigh_node->list);
757 		batadv_neigh_node_free_ref(neigh_node);
758 	}
759 
760 	hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
761 				  &orig_node->ifinfo_list, list) {
762 		hlist_del_rcu(&orig_ifinfo->list);
763 		batadv_orig_ifinfo_free_ref(orig_ifinfo);
764 	}
765 	spin_unlock_bh(&orig_node->neigh_list_lock);
766 
767 	/* Free nc_nodes */
768 	batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
769 
770 	call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
771 }
772 
773 /**
774  * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
775  *  release it
776  * @orig_node: the orig node to free
777  */
778 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
779 {
780 	if (atomic_dec_and_test(&orig_node->refcount))
781 		batadv_orig_node_release(orig_node);
782 }
783 
784 void batadv_originator_free(struct batadv_priv *bat_priv)
785 {
786 	struct batadv_hashtable *hash = bat_priv->orig_hash;
787 	struct hlist_node *node_tmp;
788 	struct hlist_head *head;
789 	spinlock_t *list_lock; /* spinlock to protect write access */
790 	struct batadv_orig_node *orig_node;
791 	u32 i;
792 
793 	if (!hash)
794 		return;
795 
796 	cancel_delayed_work_sync(&bat_priv->orig_work);
797 
798 	bat_priv->orig_hash = NULL;
799 
800 	for (i = 0; i < hash->size; i++) {
801 		head = &hash->table[i];
802 		list_lock = &hash->list_locks[i];
803 
804 		spin_lock_bh(list_lock);
805 		hlist_for_each_entry_safe(orig_node, node_tmp,
806 					  head, hash_entry) {
807 			hlist_del_rcu(&orig_node->hash_entry);
808 			batadv_orig_node_free_ref(orig_node);
809 		}
810 		spin_unlock_bh(list_lock);
811 	}
812 
813 	batadv_hash_destroy(hash);
814 }
815 
816 /**
817  * batadv_orig_node_new - creates a new orig_node
818  * @bat_priv: the bat priv with all the soft interface information
819  * @addr: the mac address of the originator
820  *
821  * Creates a new originator object and initialise all the generic fields.
822  * The new object is not added to the originator list.
823  * Returns the newly created object or NULL on failure.
824  */
825 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
826 					      const u8 *addr)
827 {
828 	struct batadv_orig_node *orig_node;
829 	struct batadv_orig_node_vlan *vlan;
830 	unsigned long reset_time;
831 	int i;
832 
833 	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
834 		   "Creating new originator: %pM\n", addr);
835 
836 	orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
837 	if (!orig_node)
838 		return NULL;
839 
840 	INIT_HLIST_HEAD(&orig_node->neigh_list);
841 	INIT_HLIST_HEAD(&orig_node->vlan_list);
842 	INIT_HLIST_HEAD(&orig_node->ifinfo_list);
843 	spin_lock_init(&orig_node->bcast_seqno_lock);
844 	spin_lock_init(&orig_node->neigh_list_lock);
845 	spin_lock_init(&orig_node->tt_buff_lock);
846 	spin_lock_init(&orig_node->tt_lock);
847 	spin_lock_init(&orig_node->vlan_list_lock);
848 
849 	batadv_nc_init_orig(orig_node);
850 
851 	/* extra reference for return */
852 	atomic_set(&orig_node->refcount, 2);
853 
854 	orig_node->bat_priv = bat_priv;
855 	ether_addr_copy(orig_node->orig, addr);
856 	batadv_dat_init_orig_node_addr(orig_node);
857 	atomic_set(&orig_node->last_ttvn, 0);
858 	orig_node->tt_buff = NULL;
859 	orig_node->tt_buff_len = 0;
860 	orig_node->last_seen = jiffies;
861 	reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
862 	orig_node->bcast_seqno_reset = reset_time;
863 
864 #ifdef CONFIG_BATMAN_ADV_MCAST
865 	orig_node->mcast_flags = BATADV_NO_FLAGS;
866 	INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
867 	INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
868 	INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
869 	spin_lock_init(&orig_node->mcast_handler_lock);
870 #endif
871 
872 	/* create a vlan object for the "untagged" LAN */
873 	vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
874 	if (!vlan)
875 		goto free_orig_node;
876 	/* batadv_orig_node_vlan_new() increases the refcounter.
877 	 * Immediately release vlan since it is not needed anymore in this
878 	 * context
879 	 */
880 	batadv_orig_node_vlan_free_ref(vlan);
881 
882 	for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
883 		INIT_HLIST_HEAD(&orig_node->fragments[i].head);
884 		spin_lock_init(&orig_node->fragments[i].lock);
885 		orig_node->fragments[i].size = 0;
886 	}
887 
888 	return orig_node;
889 free_orig_node:
890 	kfree(orig_node);
891 	return NULL;
892 }
893 
894 /**
895  * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
896  * @bat_priv: the bat priv with all the soft interface information
897  * @neigh: orig node which is to be checked
898  */
899 static void
900 batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
901 			  struct batadv_neigh_node *neigh)
902 {
903 	struct batadv_neigh_ifinfo *neigh_ifinfo;
904 	struct batadv_hard_iface *if_outgoing;
905 	struct hlist_node *node_tmp;
906 
907 	spin_lock_bh(&neigh->ifinfo_lock);
908 
909 	/* for all ifinfo objects for this neighinator */
910 	hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
911 				  &neigh->ifinfo_list, list) {
912 		if_outgoing = neigh_ifinfo->if_outgoing;
913 
914 		/* always keep the default interface */
915 		if (if_outgoing == BATADV_IF_DEFAULT)
916 			continue;
917 
918 		/* don't purge if the interface is not (going) down */
919 		if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
920 		    (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
921 		    (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
922 			continue;
923 
924 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
925 			   "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
926 			   neigh->addr, if_outgoing->net_dev->name);
927 
928 		hlist_del_rcu(&neigh_ifinfo->list);
929 		batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
930 	}
931 
932 	spin_unlock_bh(&neigh->ifinfo_lock);
933 }
934 
935 /**
936  * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
937  * @bat_priv: the bat priv with all the soft interface information
938  * @orig_node: orig node which is to be checked
939  *
940  * Returns true if any ifinfo entry was purged, false otherwise.
941  */
942 static bool
943 batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
944 			 struct batadv_orig_node *orig_node)
945 {
946 	struct batadv_orig_ifinfo *orig_ifinfo;
947 	struct batadv_hard_iface *if_outgoing;
948 	struct hlist_node *node_tmp;
949 	bool ifinfo_purged = false;
950 
951 	spin_lock_bh(&orig_node->neigh_list_lock);
952 
953 	/* for all ifinfo objects for this originator */
954 	hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
955 				  &orig_node->ifinfo_list, list) {
956 		if_outgoing = orig_ifinfo->if_outgoing;
957 
958 		/* always keep the default interface */
959 		if (if_outgoing == BATADV_IF_DEFAULT)
960 			continue;
961 
962 		/* don't purge if the interface is not (going) down */
963 		if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
964 		    (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
965 		    (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
966 			continue;
967 
968 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
969 			   "router/ifinfo purge: originator %pM, iface: %s\n",
970 			   orig_node->orig, if_outgoing->net_dev->name);
971 
972 		ifinfo_purged = true;
973 
974 		hlist_del_rcu(&orig_ifinfo->list);
975 		batadv_orig_ifinfo_free_ref(orig_ifinfo);
976 		if (orig_node->last_bonding_candidate == orig_ifinfo) {
977 			orig_node->last_bonding_candidate = NULL;
978 			batadv_orig_ifinfo_free_ref(orig_ifinfo);
979 		}
980 	}
981 
982 	spin_unlock_bh(&orig_node->neigh_list_lock);
983 
984 	return ifinfo_purged;
985 }
986 
987 /**
988  * batadv_purge_orig_neighbors - purges neighbors from originator
989  * @bat_priv: the bat priv with all the soft interface information
990  * @orig_node: orig node which is to be checked
991  *
992  * Returns true if any neighbor was purged, false otherwise
993  */
994 static bool
995 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
996 			    struct batadv_orig_node *orig_node)
997 {
998 	struct hlist_node *node_tmp;
999 	struct batadv_neigh_node *neigh_node;
1000 	bool neigh_purged = false;
1001 	unsigned long last_seen;
1002 	struct batadv_hard_iface *if_incoming;
1003 
1004 	spin_lock_bh(&orig_node->neigh_list_lock);
1005 
1006 	/* for all neighbors towards this originator ... */
1007 	hlist_for_each_entry_safe(neigh_node, node_tmp,
1008 				  &orig_node->neigh_list, list) {
1009 		last_seen = neigh_node->last_seen;
1010 		if_incoming = neigh_node->if_incoming;
1011 
1012 		if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
1013 		    (if_incoming->if_status == BATADV_IF_INACTIVE) ||
1014 		    (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1015 		    (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
1016 			if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
1017 			    (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1018 			    (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
1019 				batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1020 					   "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
1021 					   orig_node->orig, neigh_node->addr,
1022 					   if_incoming->net_dev->name);
1023 			else
1024 				batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1025 					   "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
1026 					   orig_node->orig, neigh_node->addr,
1027 					   jiffies_to_msecs(last_seen));
1028 
1029 			neigh_purged = true;
1030 
1031 			hlist_del_rcu(&neigh_node->list);
1032 			batadv_neigh_node_free_ref(neigh_node);
1033 		} else {
1034 			/* only necessary if not the whole neighbor is to be
1035 			 * deleted, but some interface has been removed.
1036 			 */
1037 			batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
1038 		}
1039 	}
1040 
1041 	spin_unlock_bh(&orig_node->neigh_list_lock);
1042 	return neigh_purged;
1043 }
1044 
1045 /**
1046  * batadv_find_best_neighbor - finds the best neighbor after purging
1047  * @bat_priv: the bat priv with all the soft interface information
1048  * @orig_node: orig node which is to be checked
1049  * @if_outgoing: the interface for which the metric should be compared
1050  *
1051  * Returns the current best neighbor, with refcount increased.
1052  */
1053 static struct batadv_neigh_node *
1054 batadv_find_best_neighbor(struct batadv_priv *bat_priv,
1055 			  struct batadv_orig_node *orig_node,
1056 			  struct batadv_hard_iface *if_outgoing)
1057 {
1058 	struct batadv_neigh_node *best = NULL, *neigh;
1059 	struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1060 
1061 	rcu_read_lock();
1062 	hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
1063 		if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
1064 						best, if_outgoing) <= 0))
1065 			continue;
1066 
1067 		if (!atomic_inc_not_zero(&neigh->refcount))
1068 			continue;
1069 
1070 		if (best)
1071 			batadv_neigh_node_free_ref(best);
1072 
1073 		best = neigh;
1074 	}
1075 	rcu_read_unlock();
1076 
1077 	return best;
1078 }
1079 
1080 /**
1081  * batadv_purge_orig_node - purges obsolete information from an orig_node
1082  * @bat_priv: the bat priv with all the soft interface information
1083  * @orig_node: orig node which is to be checked
1084  *
1085  * This function checks if the orig_node or substructures of it have become
1086  * obsolete, and purges this information if that's the case.
1087  *
1088  * Returns true if the orig_node is to be removed, false otherwise.
1089  */
1090 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
1091 				   struct batadv_orig_node *orig_node)
1092 {
1093 	struct batadv_neigh_node *best_neigh_node;
1094 	struct batadv_hard_iface *hard_iface;
1095 	bool changed_ifinfo, changed_neigh;
1096 
1097 	if (batadv_has_timed_out(orig_node->last_seen,
1098 				 2 * BATADV_PURGE_TIMEOUT)) {
1099 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1100 			   "Originator timeout: originator %pM, last_seen %u\n",
1101 			   orig_node->orig,
1102 			   jiffies_to_msecs(orig_node->last_seen));
1103 		return true;
1104 	}
1105 	changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
1106 	changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
1107 
1108 	if (!changed_ifinfo && !changed_neigh)
1109 		return false;
1110 
1111 	/* first for NULL ... */
1112 	best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
1113 						    BATADV_IF_DEFAULT);
1114 	batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
1115 			    best_neigh_node);
1116 	if (best_neigh_node)
1117 		batadv_neigh_node_free_ref(best_neigh_node);
1118 
1119 	/* ... then for all other interfaces. */
1120 	rcu_read_lock();
1121 	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
1122 		if (hard_iface->if_status != BATADV_IF_ACTIVE)
1123 			continue;
1124 
1125 		if (hard_iface->soft_iface != bat_priv->soft_iface)
1126 			continue;
1127 
1128 		best_neigh_node = batadv_find_best_neighbor(bat_priv,
1129 							    orig_node,
1130 							    hard_iface);
1131 		batadv_update_route(bat_priv, orig_node, hard_iface,
1132 				    best_neigh_node);
1133 		if (best_neigh_node)
1134 			batadv_neigh_node_free_ref(best_neigh_node);
1135 	}
1136 	rcu_read_unlock();
1137 
1138 	return false;
1139 }
1140 
1141 static void _batadv_purge_orig(struct batadv_priv *bat_priv)
1142 {
1143 	struct batadv_hashtable *hash = bat_priv->orig_hash;
1144 	struct hlist_node *node_tmp;
1145 	struct hlist_head *head;
1146 	spinlock_t *list_lock; /* spinlock to protect write access */
1147 	struct batadv_orig_node *orig_node;
1148 	u32 i;
1149 
1150 	if (!hash)
1151 		return;
1152 
1153 	/* for all origins... */
1154 	for (i = 0; i < hash->size; i++) {
1155 		head = &hash->table[i];
1156 		list_lock = &hash->list_locks[i];
1157 
1158 		spin_lock_bh(list_lock);
1159 		hlist_for_each_entry_safe(orig_node, node_tmp,
1160 					  head, hash_entry) {
1161 			if (batadv_purge_orig_node(bat_priv, orig_node)) {
1162 				batadv_gw_node_delete(bat_priv, orig_node);
1163 				hlist_del_rcu(&orig_node->hash_entry);
1164 				batadv_tt_global_del_orig(orig_node->bat_priv,
1165 							  orig_node, -1,
1166 							  "originator timed out");
1167 				batadv_orig_node_free_ref(orig_node);
1168 				continue;
1169 			}
1170 
1171 			batadv_frag_purge_orig(orig_node,
1172 					       batadv_frag_check_entry);
1173 		}
1174 		spin_unlock_bh(list_lock);
1175 	}
1176 
1177 	batadv_gw_election(bat_priv);
1178 }
1179 
1180 static void batadv_purge_orig(struct work_struct *work)
1181 {
1182 	struct delayed_work *delayed_work;
1183 	struct batadv_priv *bat_priv;
1184 
1185 	delayed_work = container_of(work, struct delayed_work, work);
1186 	bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
1187 	_batadv_purge_orig(bat_priv);
1188 	queue_delayed_work(batadv_event_workqueue,
1189 			   &bat_priv->orig_work,
1190 			   msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
1191 }
1192 
1193 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
1194 {
1195 	_batadv_purge_orig(bat_priv);
1196 }
1197 
1198 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
1199 {
1200 	struct net_device *net_dev = (struct net_device *)seq->private;
1201 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
1202 	struct batadv_hard_iface *primary_if;
1203 
1204 	primary_if = batadv_seq_print_text_primary_if_get(seq);
1205 	if (!primary_if)
1206 		return 0;
1207 
1208 	seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
1209 		   BATADV_SOURCE_VERSION, primary_if->net_dev->name,
1210 		   primary_if->net_dev->dev_addr, net_dev->name,
1211 		   bat_priv->bat_algo_ops->name);
1212 
1213 	batadv_hardif_free_ref(primary_if);
1214 
1215 	if (!bat_priv->bat_algo_ops->bat_orig_print) {
1216 		seq_puts(seq,
1217 			 "No printing function for this routing protocol\n");
1218 		return 0;
1219 	}
1220 
1221 	bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
1222 					       BATADV_IF_DEFAULT);
1223 
1224 	return 0;
1225 }
1226 
1227 /**
1228  * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
1229  *  outgoing interface
1230  * @seq: debugfs table seq_file struct
1231  * @offset: not used
1232  *
1233  * Returns 0
1234  */
1235 int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1236 {
1237 	struct net_device *net_dev = (struct net_device *)seq->private;
1238 	struct batadv_hard_iface *hard_iface;
1239 	struct batadv_priv *bat_priv;
1240 
1241 	hard_iface = batadv_hardif_get_by_netdev(net_dev);
1242 
1243 	if (!hard_iface || !hard_iface->soft_iface) {
1244 		seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
1245 		goto out;
1246 	}
1247 
1248 	bat_priv = netdev_priv(hard_iface->soft_iface);
1249 	if (!bat_priv->bat_algo_ops->bat_orig_print) {
1250 		seq_puts(seq,
1251 			 "No printing function for this routing protocol\n");
1252 		goto out;
1253 	}
1254 
1255 	if (hard_iface->if_status != BATADV_IF_ACTIVE) {
1256 		seq_puts(seq, "Interface not active\n");
1257 		goto out;
1258 	}
1259 
1260 	seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1261 		   BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1262 		   hard_iface->net_dev->dev_addr,
1263 		   hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
1264 
1265 	bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1266 
1267 out:
1268 	if (hard_iface)
1269 		batadv_hardif_free_ref(hard_iface);
1270 	return 0;
1271 }
1272 
1273 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1274 			    int max_if_num)
1275 {
1276 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1277 	struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1278 	struct batadv_hashtable *hash = bat_priv->orig_hash;
1279 	struct hlist_head *head;
1280 	struct batadv_orig_node *orig_node;
1281 	u32 i;
1282 	int ret;
1283 
1284 	/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1285 	 * if_num
1286 	 */
1287 	for (i = 0; i < hash->size; i++) {
1288 		head = &hash->table[i];
1289 
1290 		rcu_read_lock();
1291 		hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1292 			ret = 0;
1293 			if (bao->bat_orig_add_if)
1294 				ret = bao->bat_orig_add_if(orig_node,
1295 							   max_if_num);
1296 			if (ret == -ENOMEM)
1297 				goto err;
1298 		}
1299 		rcu_read_unlock();
1300 	}
1301 
1302 	return 0;
1303 
1304 err:
1305 	rcu_read_unlock();
1306 	return -ENOMEM;
1307 }
1308 
1309 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1310 			    int max_if_num)
1311 {
1312 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1313 	struct batadv_hashtable *hash = bat_priv->orig_hash;
1314 	struct hlist_head *head;
1315 	struct batadv_hard_iface *hard_iface_tmp;
1316 	struct batadv_orig_node *orig_node;
1317 	struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1318 	u32 i;
1319 	int ret;
1320 
1321 	/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1322 	 * if_num
1323 	 */
1324 	for (i = 0; i < hash->size; i++) {
1325 		head = &hash->table[i];
1326 
1327 		rcu_read_lock();
1328 		hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1329 			ret = 0;
1330 			if (bao->bat_orig_del_if)
1331 				ret = bao->bat_orig_del_if(orig_node,
1332 							   max_if_num,
1333 							   hard_iface->if_num);
1334 			if (ret == -ENOMEM)
1335 				goto err;
1336 		}
1337 		rcu_read_unlock();
1338 	}
1339 
1340 	/* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1341 	rcu_read_lock();
1342 	list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
1343 		if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
1344 			continue;
1345 
1346 		if (hard_iface == hard_iface_tmp)
1347 			continue;
1348 
1349 		if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
1350 			continue;
1351 
1352 		if (hard_iface_tmp->if_num > hard_iface->if_num)
1353 			hard_iface_tmp->if_num--;
1354 	}
1355 	rcu_read_unlock();
1356 
1357 	hard_iface->if_num = -1;
1358 	return 0;
1359 
1360 err:
1361 	rcu_read_unlock();
1362 	return -ENOMEM;
1363 }
1364