xref: /linux/net/batman-adv/translation-table.c (revision a508da6cc0093171833efb8376b00473f24221b9)
1 /*
2  * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21 
22 #include "main.h"
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "hard-interface.h"
26 #include "send.h"
27 #include "hash.h"
28 #include "originator.h"
29 #include "routing.h"
30 #include "bridge_loop_avoidance.h"
31 
32 #include <linux/crc16.h>
33 
34 static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
35 			  struct orig_node *orig_node);
36 static void tt_purge(struct work_struct *work);
37 static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry);
38 
39 /* returns 1 if they are the same mac addr */
40 static int compare_tt(const struct hlist_node *node, const void *data2)
41 {
42 	const void *data1 = container_of(node, struct tt_common_entry,
43 					 hash_entry);
44 
45 	return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
46 }
47 
48 static void tt_start_timer(struct bat_priv *bat_priv)
49 {
50 	INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge);
51 	queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work,
52 			   msecs_to_jiffies(5000));
53 }
54 
55 static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash,
56 					    const void *data)
57 {
58 	struct hlist_head *head;
59 	struct hlist_node *node;
60 	struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL;
61 	uint32_t index;
62 
63 	if (!hash)
64 		return NULL;
65 
66 	index = choose_orig(data, hash->size);
67 	head = &hash->table[index];
68 
69 	rcu_read_lock();
70 	hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
71 		if (!compare_eth(tt_common_entry, data))
72 			continue;
73 
74 		if (!atomic_inc_not_zero(&tt_common_entry->refcount))
75 			continue;
76 
77 		tt_common_entry_tmp = tt_common_entry;
78 		break;
79 	}
80 	rcu_read_unlock();
81 
82 	return tt_common_entry_tmp;
83 }
84 
85 static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
86 						 const void *data)
87 {
88 	struct tt_common_entry *tt_common_entry;
89 	struct tt_local_entry *tt_local_entry = NULL;
90 
91 	tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data);
92 	if (tt_common_entry)
93 		tt_local_entry = container_of(tt_common_entry,
94 					      struct tt_local_entry, common);
95 	return tt_local_entry;
96 }
97 
98 static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
99 						   const void *data)
100 {
101 	struct tt_common_entry *tt_common_entry;
102 	struct tt_global_entry *tt_global_entry = NULL;
103 
104 	tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data);
105 	if (tt_common_entry)
106 		tt_global_entry = container_of(tt_common_entry,
107 					       struct tt_global_entry, common);
108 	return tt_global_entry;
109 
110 }
111 
112 static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
113 {
114 	if (atomic_dec_and_test(&tt_local_entry->common.refcount))
115 		kfree_rcu(tt_local_entry, common.rcu);
116 }
117 
118 static void tt_global_entry_free_rcu(struct rcu_head *rcu)
119 {
120 	struct tt_common_entry *tt_common_entry;
121 	struct tt_global_entry *tt_global_entry;
122 
123 	tt_common_entry = container_of(rcu, struct tt_common_entry, rcu);
124 	tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
125 				       common);
126 
127 	kfree(tt_global_entry);
128 }
129 
130 static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
131 {
132 	if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
133 		tt_global_del_orig_list(tt_global_entry);
134 		call_rcu(&tt_global_entry->common.rcu,
135 			 tt_global_entry_free_rcu);
136 	}
137 }
138 
139 static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
140 {
141 	struct tt_orig_list_entry *orig_entry;
142 
143 	orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
144 	atomic_dec(&orig_entry->orig_node->tt_size);
145 	orig_node_free_ref(orig_entry->orig_node);
146 	kfree(orig_entry);
147 }
148 
149 static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
150 {
151 	call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
152 }
153 
154 static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
155 			   uint8_t flags)
156 {
157 	struct tt_change_node *tt_change_node;
158 
159 	tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
160 
161 	if (!tt_change_node)
162 		return;
163 
164 	tt_change_node->change.flags = flags;
165 	memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
166 
167 	spin_lock_bh(&bat_priv->tt_changes_list_lock);
168 	/* track the change in the OGMinterval list */
169 	list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
170 	atomic_inc(&bat_priv->tt_local_changes);
171 	spin_unlock_bh(&bat_priv->tt_changes_list_lock);
172 
173 	atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
174 }
175 
176 int tt_len(int changes_num)
177 {
178 	return changes_num * sizeof(struct tt_change);
179 }
180 
181 static int tt_local_init(struct bat_priv *bat_priv)
182 {
183 	if (bat_priv->tt_local_hash)
184 		return 1;
185 
186 	bat_priv->tt_local_hash = hash_new(1024);
187 
188 	if (!bat_priv->tt_local_hash)
189 		return 0;
190 
191 	return 1;
192 }
193 
194 void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
195 		  int ifindex)
196 {
197 	struct bat_priv *bat_priv = netdev_priv(soft_iface);
198 	struct tt_local_entry *tt_local_entry = NULL;
199 	struct tt_global_entry *tt_global_entry = NULL;
200 	struct hlist_head *head;
201 	struct hlist_node *node;
202 	struct tt_orig_list_entry *orig_entry;
203 	int hash_added;
204 
205 	tt_local_entry = tt_local_hash_find(bat_priv, addr);
206 
207 	if (tt_local_entry) {
208 		tt_local_entry->last_seen = jiffies;
209 		/* possibly unset the TT_CLIENT_PENDING flag */
210 		tt_local_entry->common.flags &= ~TT_CLIENT_PENDING;
211 		goto out;
212 	}
213 
214 	tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
215 	if (!tt_local_entry)
216 		goto out;
217 
218 	bat_dbg(DBG_TT, bat_priv,
219 		"Creating new local tt entry: %pM (ttvn: %d)\n", addr,
220 		(uint8_t)atomic_read(&bat_priv->ttvn));
221 
222 	memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
223 	tt_local_entry->common.flags = NO_FLAGS;
224 	if (is_wifi_iface(ifindex))
225 		tt_local_entry->common.flags |= TT_CLIENT_WIFI;
226 	atomic_set(&tt_local_entry->common.refcount, 2);
227 	tt_local_entry->last_seen = jiffies;
228 
229 	/* the batman interface mac address should never be purged */
230 	if (compare_eth(addr, soft_iface->dev_addr))
231 		tt_local_entry->common.flags |= TT_CLIENT_NOPURGE;
232 
233 	/* The local entry has to be marked as NEW to avoid to send it in
234 	 * a full table response going out before the next ttvn increment
235 	 * (consistency check) */
236 	tt_local_entry->common.flags |= TT_CLIENT_NEW;
237 
238 	hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig,
239 			 &tt_local_entry->common,
240 			 &tt_local_entry->common.hash_entry);
241 
242 	if (unlikely(hash_added != 0)) {
243 		/* remove the reference for the hash */
244 		tt_local_entry_free_ref(tt_local_entry);
245 		goto out;
246 	}
247 
248 	tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
249 
250 	/* remove address from global hash if present */
251 	tt_global_entry = tt_global_hash_find(bat_priv, addr);
252 
253 	/* Check whether it is a roaming! */
254 	if (tt_global_entry) {
255 		/* These node are probably going to update their tt table */
256 		head = &tt_global_entry->orig_list;
257 		rcu_read_lock();
258 		hlist_for_each_entry_rcu(orig_entry, node, head, list) {
259 			orig_entry->orig_node->tt_poss_change = true;
260 
261 			send_roam_adv(bat_priv, tt_global_entry->common.addr,
262 				      orig_entry->orig_node);
263 		}
264 		rcu_read_unlock();
265 		/* The global entry has to be marked as ROAMING and
266 		 * has to be kept for consistency purpose
267 		 */
268 		tt_global_entry->common.flags |= TT_CLIENT_ROAM;
269 		tt_global_entry->roam_at = jiffies;
270 	}
271 out:
272 	if (tt_local_entry)
273 		tt_local_entry_free_ref(tt_local_entry);
274 	if (tt_global_entry)
275 		tt_global_entry_free_ref(tt_global_entry);
276 }
277 
278 int tt_changes_fill_buffer(struct bat_priv *bat_priv,
279 			   unsigned char *buff, int buff_len)
280 {
281 	int count = 0, tot_changes = 0;
282 	struct tt_change_node *entry, *safe;
283 
284 	if (buff_len > 0)
285 		tot_changes = buff_len / tt_len(1);
286 
287 	spin_lock_bh(&bat_priv->tt_changes_list_lock);
288 	atomic_set(&bat_priv->tt_local_changes, 0);
289 
290 	list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
291 				 list) {
292 		if (count < tot_changes) {
293 			memcpy(buff + tt_len(count),
294 			       &entry->change, sizeof(struct tt_change));
295 			count++;
296 		}
297 		list_del(&entry->list);
298 		kfree(entry);
299 	}
300 	spin_unlock_bh(&bat_priv->tt_changes_list_lock);
301 
302 	/* Keep the buffer for possible tt_request */
303 	spin_lock_bh(&bat_priv->tt_buff_lock);
304 	kfree(bat_priv->tt_buff);
305 	bat_priv->tt_buff_len = 0;
306 	bat_priv->tt_buff = NULL;
307 	/* We check whether this new OGM has no changes due to size
308 	 * problems */
309 	if (buff_len > 0) {
310 		/**
311 		 * if kmalloc() fails we will reply with the full table
312 		 * instead of providing the diff
313 		 */
314 		bat_priv->tt_buff = kmalloc(buff_len, GFP_ATOMIC);
315 		if (bat_priv->tt_buff) {
316 			memcpy(bat_priv->tt_buff, buff, buff_len);
317 			bat_priv->tt_buff_len = buff_len;
318 		}
319 	}
320 	spin_unlock_bh(&bat_priv->tt_buff_lock);
321 
322 	return tot_changes;
323 }
324 
325 int tt_local_seq_print_text(struct seq_file *seq, void *offset)
326 {
327 	struct net_device *net_dev = (struct net_device *)seq->private;
328 	struct bat_priv *bat_priv = netdev_priv(net_dev);
329 	struct hashtable_t *hash = bat_priv->tt_local_hash;
330 	struct tt_common_entry *tt_common_entry;
331 	struct hard_iface *primary_if;
332 	struct hlist_node *node;
333 	struct hlist_head *head;
334 	uint32_t i;
335 	int ret = 0;
336 
337 	primary_if = primary_if_get_selected(bat_priv);
338 	if (!primary_if) {
339 		ret = seq_printf(seq,
340 				 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
341 				 net_dev->name);
342 		goto out;
343 	}
344 
345 	if (primary_if->if_status != IF_ACTIVE) {
346 		ret = seq_printf(seq,
347 				 "BATMAN mesh %s disabled - primary interface not active\n",
348 				 net_dev->name);
349 		goto out;
350 	}
351 
352 	seq_printf(seq,
353 		   "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
354 		   net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
355 
356 	for (i = 0; i < hash->size; i++) {
357 		head = &hash->table[i];
358 
359 		rcu_read_lock();
360 		hlist_for_each_entry_rcu(tt_common_entry, node,
361 					 head, hash_entry) {
362 			seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
363 				   tt_common_entry->addr,
364 				   (tt_common_entry->flags &
365 				    TT_CLIENT_ROAM ? 'R' : '.'),
366 				   (tt_common_entry->flags &
367 				    TT_CLIENT_NOPURGE ? 'P' : '.'),
368 				   (tt_common_entry->flags &
369 				    TT_CLIENT_NEW ? 'N' : '.'),
370 				   (tt_common_entry->flags &
371 				    TT_CLIENT_PENDING ? 'X' : '.'),
372 				   (tt_common_entry->flags &
373 				    TT_CLIENT_WIFI ? 'W' : '.'));
374 		}
375 		rcu_read_unlock();
376 	}
377 out:
378 	if (primary_if)
379 		hardif_free_ref(primary_if);
380 	return ret;
381 }
382 
383 static void tt_local_set_pending(struct bat_priv *bat_priv,
384 				 struct tt_local_entry *tt_local_entry,
385 				 uint16_t flags, const char *message)
386 {
387 	tt_local_event(bat_priv, tt_local_entry->common.addr,
388 		       tt_local_entry->common.flags | flags);
389 
390 	/* The local client has to be marked as "pending to be removed" but has
391 	 * to be kept in the table in order to send it in a full table
392 	 * response issued before the net ttvn increment (consistency check) */
393 	tt_local_entry->common.flags |= TT_CLIENT_PENDING;
394 
395 	bat_dbg(DBG_TT, bat_priv,
396 		"Local tt entry (%pM) pending to be removed: %s\n",
397 		tt_local_entry->common.addr, message);
398 }
399 
400 void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
401 		     const char *message, bool roaming)
402 {
403 	struct tt_local_entry *tt_local_entry = NULL;
404 
405 	tt_local_entry = tt_local_hash_find(bat_priv, addr);
406 	if (!tt_local_entry)
407 		goto out;
408 
409 	tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL |
410 			     (roaming ? TT_CLIENT_ROAM : NO_FLAGS), message);
411 out:
412 	if (tt_local_entry)
413 		tt_local_entry_free_ref(tt_local_entry);
414 }
415 
416 static void tt_local_purge(struct bat_priv *bat_priv)
417 {
418 	struct hashtable_t *hash = bat_priv->tt_local_hash;
419 	struct tt_local_entry *tt_local_entry;
420 	struct tt_common_entry *tt_common_entry;
421 	struct hlist_node *node, *node_tmp;
422 	struct hlist_head *head;
423 	spinlock_t *list_lock; /* protects write access to the hash lists */
424 	uint32_t i;
425 
426 	for (i = 0; i < hash->size; i++) {
427 		head = &hash->table[i];
428 		list_lock = &hash->list_locks[i];
429 
430 		spin_lock_bh(list_lock);
431 		hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
432 					  head, hash_entry) {
433 			tt_local_entry = container_of(tt_common_entry,
434 						      struct tt_local_entry,
435 						      common);
436 			if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE)
437 				continue;
438 
439 			/* entry already marked for deletion */
440 			if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
441 				continue;
442 
443 			if (!has_timed_out(tt_local_entry->last_seen,
444 					   TT_LOCAL_TIMEOUT))
445 				continue;
446 
447 			tt_local_set_pending(bat_priv, tt_local_entry,
448 					     TT_CLIENT_DEL, "timed out");
449 		}
450 		spin_unlock_bh(list_lock);
451 	}
452 
453 }
454 
455 static void tt_local_table_free(struct bat_priv *bat_priv)
456 {
457 	struct hashtable_t *hash;
458 	spinlock_t *list_lock; /* protects write access to the hash lists */
459 	struct tt_common_entry *tt_common_entry;
460 	struct tt_local_entry *tt_local_entry;
461 	struct hlist_node *node, *node_tmp;
462 	struct hlist_head *head;
463 	uint32_t i;
464 
465 	if (!bat_priv->tt_local_hash)
466 		return;
467 
468 	hash = bat_priv->tt_local_hash;
469 
470 	for (i = 0; i < hash->size; i++) {
471 		head = &hash->table[i];
472 		list_lock = &hash->list_locks[i];
473 
474 		spin_lock_bh(list_lock);
475 		hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
476 					  head, hash_entry) {
477 			hlist_del_rcu(node);
478 			tt_local_entry = container_of(tt_common_entry,
479 						      struct tt_local_entry,
480 						      common);
481 			tt_local_entry_free_ref(tt_local_entry);
482 		}
483 		spin_unlock_bh(list_lock);
484 	}
485 
486 	hash_destroy(hash);
487 
488 	bat_priv->tt_local_hash = NULL;
489 }
490 
491 static int tt_global_init(struct bat_priv *bat_priv)
492 {
493 	if (bat_priv->tt_global_hash)
494 		return 1;
495 
496 	bat_priv->tt_global_hash = hash_new(1024);
497 
498 	if (!bat_priv->tt_global_hash)
499 		return 0;
500 
501 	return 1;
502 }
503 
504 static void tt_changes_list_free(struct bat_priv *bat_priv)
505 {
506 	struct tt_change_node *entry, *safe;
507 
508 	spin_lock_bh(&bat_priv->tt_changes_list_lock);
509 
510 	list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
511 				 list) {
512 		list_del(&entry->list);
513 		kfree(entry);
514 	}
515 
516 	atomic_set(&bat_priv->tt_local_changes, 0);
517 	spin_unlock_bh(&bat_priv->tt_changes_list_lock);
518 }
519 
520 /* find out if an orig_node is already in the list of a tt_global_entry.
521  * returns 1 if found, 0 otherwise
522  */
523 static bool tt_global_entry_has_orig(const struct tt_global_entry *entry,
524 				     const struct orig_node *orig_node)
525 {
526 	struct tt_orig_list_entry *tmp_orig_entry;
527 	const struct hlist_head *head;
528 	struct hlist_node *node;
529 	bool found = false;
530 
531 	rcu_read_lock();
532 	head = &entry->orig_list;
533 	hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
534 		if (tmp_orig_entry->orig_node == orig_node) {
535 			found = true;
536 			break;
537 		}
538 	}
539 	rcu_read_unlock();
540 	return found;
541 }
542 
543 static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
544 				     struct orig_node *orig_node,
545 				     int ttvn)
546 {
547 	struct tt_orig_list_entry *orig_entry;
548 
549 	orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
550 	if (!orig_entry)
551 		return;
552 
553 	INIT_HLIST_NODE(&orig_entry->list);
554 	atomic_inc(&orig_node->refcount);
555 	atomic_inc(&orig_node->tt_size);
556 	orig_entry->orig_node = orig_node;
557 	orig_entry->ttvn = ttvn;
558 
559 	spin_lock_bh(&tt_global_entry->list_lock);
560 	hlist_add_head_rcu(&orig_entry->list,
561 			   &tt_global_entry->orig_list);
562 	spin_unlock_bh(&tt_global_entry->list_lock);
563 }
564 
565 /* caller must hold orig_node refcount */
566 int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
567 		  const unsigned char *tt_addr, uint8_t ttvn, bool roaming,
568 		  bool wifi)
569 {
570 	struct tt_global_entry *tt_global_entry = NULL;
571 	int ret = 0;
572 	int hash_added;
573 
574 	tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
575 
576 	if (!tt_global_entry) {
577 		tt_global_entry = kzalloc(sizeof(*tt_global_entry),
578 					  GFP_ATOMIC);
579 		if (!tt_global_entry)
580 			goto out;
581 
582 		memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN);
583 
584 		tt_global_entry->common.flags = NO_FLAGS;
585 		tt_global_entry->roam_at = 0;
586 		atomic_set(&tt_global_entry->common.refcount, 2);
587 
588 		INIT_HLIST_HEAD(&tt_global_entry->orig_list);
589 		spin_lock_init(&tt_global_entry->list_lock);
590 
591 		hash_added = hash_add(bat_priv->tt_global_hash, compare_tt,
592 				 choose_orig, &tt_global_entry->common,
593 				 &tt_global_entry->common.hash_entry);
594 
595 		if (unlikely(hash_added != 0)) {
596 			/* remove the reference for the hash */
597 			tt_global_entry_free_ref(tt_global_entry);
598 			goto out_remove;
599 		}
600 
601 		tt_global_add_orig_entry(tt_global_entry, orig_node, ttvn);
602 	} else {
603 		/* there is already a global entry, use this one. */
604 
605 		/* If there is the TT_CLIENT_ROAM flag set, there is only one
606 		 * originator left in the list and we previously received a
607 		 * delete + roaming change for this originator.
608 		 *
609 		 * We should first delete the old originator before adding the
610 		 * new one.
611 		 */
612 		if (tt_global_entry->common.flags & TT_CLIENT_ROAM) {
613 			tt_global_del_orig_list(tt_global_entry);
614 			tt_global_entry->common.flags &= ~TT_CLIENT_ROAM;
615 			tt_global_entry->roam_at = 0;
616 		}
617 
618 		if (!tt_global_entry_has_orig(tt_global_entry, orig_node))
619 			tt_global_add_orig_entry(tt_global_entry, orig_node,
620 						 ttvn);
621 	}
622 
623 	if (wifi)
624 		tt_global_entry->common.flags |= TT_CLIENT_WIFI;
625 
626 	bat_dbg(DBG_TT, bat_priv,
627 		"Creating new global tt entry: %pM (via %pM)\n",
628 		tt_global_entry->common.addr, orig_node->orig);
629 
630 out_remove:
631 	/* remove address from local hash if present */
632 	tt_local_remove(bat_priv, tt_global_entry->common.addr,
633 			"global tt received", roaming);
634 	ret = 1;
635 out:
636 	if (tt_global_entry)
637 		tt_global_entry_free_ref(tt_global_entry);
638 	return ret;
639 }
640 
641 /* print all orig nodes who announce the address for this global entry.
642  * it is assumed that the caller holds rcu_read_lock();
643  */
644 static void tt_global_print_entry(struct tt_global_entry *tt_global_entry,
645 				  struct seq_file *seq)
646 {
647 	struct hlist_head *head;
648 	struct hlist_node *node;
649 	struct tt_orig_list_entry *orig_entry;
650 	struct tt_common_entry *tt_common_entry;
651 	uint16_t flags;
652 	uint8_t last_ttvn;
653 
654 	tt_common_entry = &tt_global_entry->common;
655 
656 	head = &tt_global_entry->orig_list;
657 
658 	hlist_for_each_entry_rcu(orig_entry, node, head, list) {
659 		flags = tt_common_entry->flags;
660 		last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
661 		seq_printf(seq, " * %pM  (%3u) via %pM     (%3u)   [%c%c]\n",
662 			   tt_global_entry->common.addr, orig_entry->ttvn,
663 			   orig_entry->orig_node->orig, last_ttvn,
664 			   (flags & TT_CLIENT_ROAM ? 'R' : '.'),
665 			   (flags & TT_CLIENT_WIFI ? 'W' : '.'));
666 	}
667 }
668 
669 int tt_global_seq_print_text(struct seq_file *seq, void *offset)
670 {
671 	struct net_device *net_dev = (struct net_device *)seq->private;
672 	struct bat_priv *bat_priv = netdev_priv(net_dev);
673 	struct hashtable_t *hash = bat_priv->tt_global_hash;
674 	struct tt_common_entry *tt_common_entry;
675 	struct tt_global_entry *tt_global_entry;
676 	struct hard_iface *primary_if;
677 	struct hlist_node *node;
678 	struct hlist_head *head;
679 	uint32_t i;
680 	int ret = 0;
681 
682 	primary_if = primary_if_get_selected(bat_priv);
683 	if (!primary_if) {
684 		ret = seq_printf(seq,
685 				 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
686 				 net_dev->name);
687 		goto out;
688 	}
689 
690 	if (primary_if->if_status != IF_ACTIVE) {
691 		ret = seq_printf(seq,
692 				 "BATMAN mesh %s disabled - primary interface not active\n",
693 				 net_dev->name);
694 		goto out;
695 	}
696 
697 	seq_printf(seq,
698 		   "Globally announced TT entries received via the mesh %s\n",
699 		   net_dev->name);
700 	seq_printf(seq, "       %-13s %s       %-15s %s %s\n",
701 		   "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
702 
703 	for (i = 0; i < hash->size; i++) {
704 		head = &hash->table[i];
705 
706 		rcu_read_lock();
707 		hlist_for_each_entry_rcu(tt_common_entry, node,
708 					 head, hash_entry) {
709 			tt_global_entry = container_of(tt_common_entry,
710 						       struct tt_global_entry,
711 						       common);
712 			tt_global_print_entry(tt_global_entry, seq);
713 		}
714 		rcu_read_unlock();
715 	}
716 out:
717 	if (primary_if)
718 		hardif_free_ref(primary_if);
719 	return ret;
720 }
721 
722 /* deletes the orig list of a tt_global_entry */
723 static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry)
724 {
725 	struct hlist_head *head;
726 	struct hlist_node *node, *safe;
727 	struct tt_orig_list_entry *orig_entry;
728 
729 	spin_lock_bh(&tt_global_entry->list_lock);
730 	head = &tt_global_entry->orig_list;
731 	hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
732 		hlist_del_rcu(node);
733 		tt_orig_list_entry_free_ref(orig_entry);
734 	}
735 	spin_unlock_bh(&tt_global_entry->list_lock);
736 
737 }
738 
739 static void tt_global_del_orig_entry(struct bat_priv *bat_priv,
740 				     struct tt_global_entry *tt_global_entry,
741 				     struct orig_node *orig_node,
742 				     const char *message)
743 {
744 	struct hlist_head *head;
745 	struct hlist_node *node, *safe;
746 	struct tt_orig_list_entry *orig_entry;
747 
748 	spin_lock_bh(&tt_global_entry->list_lock);
749 	head = &tt_global_entry->orig_list;
750 	hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
751 		if (orig_entry->orig_node == orig_node) {
752 			bat_dbg(DBG_TT, bat_priv,
753 				"Deleting %pM from global tt entry %pM: %s\n",
754 				orig_node->orig, tt_global_entry->common.addr,
755 				message);
756 			hlist_del_rcu(node);
757 			tt_orig_list_entry_free_ref(orig_entry);
758 		}
759 	}
760 	spin_unlock_bh(&tt_global_entry->list_lock);
761 }
762 
763 static void tt_global_del_struct(struct bat_priv *bat_priv,
764 				 struct tt_global_entry *tt_global_entry,
765 				 const char *message)
766 {
767 	bat_dbg(DBG_TT, bat_priv,
768 		"Deleting global tt entry %pM: %s\n",
769 		tt_global_entry->common.addr, message);
770 
771 	hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
772 		    tt_global_entry->common.addr);
773 	tt_global_entry_free_ref(tt_global_entry);
774 
775 }
776 
777 /* If the client is to be deleted, we check if it is the last origantor entry
778  * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer,
779  * otherwise we simply remove the originator scheduled for deletion.
780  */
781 static void tt_global_del_roaming(struct bat_priv *bat_priv,
782 				  struct tt_global_entry *tt_global_entry,
783 				  struct orig_node *orig_node,
784 				  const char *message)
785 {
786 	bool last_entry = true;
787 	struct hlist_head *head;
788 	struct hlist_node *node;
789 	struct tt_orig_list_entry *orig_entry;
790 
791 	/* no local entry exists, case 1:
792 	 * Check if this is the last one or if other entries exist.
793 	 */
794 
795 	rcu_read_lock();
796 	head = &tt_global_entry->orig_list;
797 	hlist_for_each_entry_rcu(orig_entry, node, head, list) {
798 		if (orig_entry->orig_node != orig_node) {
799 			last_entry = false;
800 			break;
801 		}
802 	}
803 	rcu_read_unlock();
804 
805 	if (last_entry) {
806 		/* its the last one, mark for roaming. */
807 		tt_global_entry->common.flags |= TT_CLIENT_ROAM;
808 		tt_global_entry->roam_at = jiffies;
809 	} else
810 		/* there is another entry, we can simply delete this
811 		 * one and can still use the other one.
812 		 */
813 		tt_global_del_orig_entry(bat_priv, tt_global_entry,
814 					 orig_node, message);
815 }
816 
817 
818 
819 static void tt_global_del(struct bat_priv *bat_priv,
820 			  struct orig_node *orig_node,
821 			  const unsigned char *addr,
822 			  const char *message, bool roaming)
823 {
824 	struct tt_global_entry *tt_global_entry = NULL;
825 	struct tt_local_entry *tt_local_entry = NULL;
826 
827 	tt_global_entry = tt_global_hash_find(bat_priv, addr);
828 	if (!tt_global_entry)
829 		goto out;
830 
831 	if (!roaming) {
832 		tt_global_del_orig_entry(bat_priv, tt_global_entry, orig_node,
833 					 message);
834 
835 		if (hlist_empty(&tt_global_entry->orig_list))
836 			tt_global_del_struct(bat_priv, tt_global_entry,
837 					     message);
838 
839 		goto out;
840 	}
841 
842 	/* if we are deleting a global entry due to a roam
843 	 * event, there are two possibilities:
844 	 * 1) the client roamed from node A to node B => if there
845 	 *    is only one originator left for this client, we mark
846 	 *    it with TT_CLIENT_ROAM, we start a timer and we
847 	 *    wait for node B to claim it. In case of timeout
848 	 *    the entry is purged.
849 	 *
850 	 *    If there are other originators left, we directly delete
851 	 *    the originator.
852 	 * 2) the client roamed to us => we can directly delete
853 	 *    the global entry, since it is useless now. */
854 
855 	tt_local_entry = tt_local_hash_find(bat_priv,
856 					    tt_global_entry->common.addr);
857 	if (tt_local_entry) {
858 		/* local entry exists, case 2: client roamed to us. */
859 		tt_global_del_orig_list(tt_global_entry);
860 		tt_global_del_struct(bat_priv, tt_global_entry, message);
861 	} else
862 		/* no local entry exists, case 1: check for roaming */
863 		tt_global_del_roaming(bat_priv, tt_global_entry, orig_node,
864 				      message);
865 
866 
867 out:
868 	if (tt_global_entry)
869 		tt_global_entry_free_ref(tt_global_entry);
870 	if (tt_local_entry)
871 		tt_local_entry_free_ref(tt_local_entry);
872 }
873 
874 void tt_global_del_orig(struct bat_priv *bat_priv,
875 			struct orig_node *orig_node, const char *message)
876 {
877 	struct tt_global_entry *tt_global_entry;
878 	struct tt_common_entry *tt_common_entry;
879 	uint32_t i;
880 	struct hashtable_t *hash = bat_priv->tt_global_hash;
881 	struct hlist_node *node, *safe;
882 	struct hlist_head *head;
883 	spinlock_t *list_lock; /* protects write access to the hash lists */
884 
885 	if (!hash)
886 		return;
887 
888 	for (i = 0; i < hash->size; i++) {
889 		head = &hash->table[i];
890 		list_lock = &hash->list_locks[i];
891 
892 		spin_lock_bh(list_lock);
893 		hlist_for_each_entry_safe(tt_common_entry, node, safe,
894 					  head, hash_entry) {
895 			tt_global_entry = container_of(tt_common_entry,
896 						       struct tt_global_entry,
897 						       common);
898 
899 			tt_global_del_orig_entry(bat_priv, tt_global_entry,
900 						 orig_node, message);
901 
902 			if (hlist_empty(&tt_global_entry->orig_list)) {
903 				bat_dbg(DBG_TT, bat_priv,
904 					"Deleting global tt entry %pM: %s\n",
905 					tt_global_entry->common.addr,
906 					message);
907 				hlist_del_rcu(node);
908 				tt_global_entry_free_ref(tt_global_entry);
909 			}
910 		}
911 		spin_unlock_bh(list_lock);
912 	}
913 	atomic_set(&orig_node->tt_size, 0);
914 	orig_node->tt_initialised = false;
915 }
916 
917 static void tt_global_roam_purge(struct bat_priv *bat_priv)
918 {
919 	struct hashtable_t *hash = bat_priv->tt_global_hash;
920 	struct tt_common_entry *tt_common_entry;
921 	struct tt_global_entry *tt_global_entry;
922 	struct hlist_node *node, *node_tmp;
923 	struct hlist_head *head;
924 	spinlock_t *list_lock; /* protects write access to the hash lists */
925 	uint32_t i;
926 
927 	for (i = 0; i < hash->size; i++) {
928 		head = &hash->table[i];
929 		list_lock = &hash->list_locks[i];
930 
931 		spin_lock_bh(list_lock);
932 		hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
933 					  head, hash_entry) {
934 			tt_global_entry = container_of(tt_common_entry,
935 						       struct tt_global_entry,
936 						       common);
937 			if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
938 				continue;
939 			if (!has_timed_out(tt_global_entry->roam_at,
940 					   TT_CLIENT_ROAM_TIMEOUT))
941 				continue;
942 
943 			bat_dbg(DBG_TT, bat_priv,
944 				"Deleting global tt entry (%pM): Roaming timeout\n",
945 				tt_global_entry->common.addr);
946 
947 			hlist_del_rcu(node);
948 			tt_global_entry_free_ref(tt_global_entry);
949 		}
950 		spin_unlock_bh(list_lock);
951 	}
952 
953 }
954 
955 static void tt_global_table_free(struct bat_priv *bat_priv)
956 {
957 	struct hashtable_t *hash;
958 	spinlock_t *list_lock; /* protects write access to the hash lists */
959 	struct tt_common_entry *tt_common_entry;
960 	struct tt_global_entry *tt_global_entry;
961 	struct hlist_node *node, *node_tmp;
962 	struct hlist_head *head;
963 	uint32_t i;
964 
965 	if (!bat_priv->tt_global_hash)
966 		return;
967 
968 	hash = bat_priv->tt_global_hash;
969 
970 	for (i = 0; i < hash->size; i++) {
971 		head = &hash->table[i];
972 		list_lock = &hash->list_locks[i];
973 
974 		spin_lock_bh(list_lock);
975 		hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
976 					  head, hash_entry) {
977 			hlist_del_rcu(node);
978 			tt_global_entry = container_of(tt_common_entry,
979 						       struct tt_global_entry,
980 						       common);
981 			tt_global_entry_free_ref(tt_global_entry);
982 		}
983 		spin_unlock_bh(list_lock);
984 	}
985 
986 	hash_destroy(hash);
987 
988 	bat_priv->tt_global_hash = NULL;
989 }
990 
991 static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry,
992 			    struct tt_global_entry *tt_global_entry)
993 {
994 	bool ret = false;
995 
996 	if (tt_local_entry->common.flags & TT_CLIENT_WIFI &&
997 	    tt_global_entry->common.flags & TT_CLIENT_WIFI)
998 		ret = true;
999 
1000 	return ret;
1001 }
1002 
1003 struct orig_node *transtable_search(struct bat_priv *bat_priv,
1004 				    const uint8_t *src, const uint8_t *addr)
1005 {
1006 	struct tt_local_entry *tt_local_entry = NULL;
1007 	struct tt_global_entry *tt_global_entry = NULL;
1008 	struct orig_node *orig_node = NULL;
1009 	struct neigh_node *router = NULL;
1010 	struct hlist_head *head;
1011 	struct hlist_node *node;
1012 	struct tt_orig_list_entry *orig_entry;
1013 	int best_tq;
1014 
1015 	if (src && atomic_read(&bat_priv->ap_isolation)) {
1016 		tt_local_entry = tt_local_hash_find(bat_priv, src);
1017 		if (!tt_local_entry)
1018 			goto out;
1019 	}
1020 
1021 	tt_global_entry = tt_global_hash_find(bat_priv, addr);
1022 	if (!tt_global_entry)
1023 		goto out;
1024 
1025 	/* check whether the clients should not communicate due to AP
1026 	 * isolation */
1027 	if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
1028 		goto out;
1029 
1030 	best_tq = 0;
1031 
1032 	rcu_read_lock();
1033 	head = &tt_global_entry->orig_list;
1034 	hlist_for_each_entry_rcu(orig_entry, node, head, list) {
1035 		router = orig_node_get_router(orig_entry->orig_node);
1036 		if (!router)
1037 			continue;
1038 
1039 		if (router->tq_avg > best_tq) {
1040 			orig_node = orig_entry->orig_node;
1041 			best_tq = router->tq_avg;
1042 		}
1043 		neigh_node_free_ref(router);
1044 	}
1045 	/* found anything? */
1046 	if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
1047 		orig_node = NULL;
1048 	rcu_read_unlock();
1049 out:
1050 	if (tt_global_entry)
1051 		tt_global_entry_free_ref(tt_global_entry);
1052 	if (tt_local_entry)
1053 		tt_local_entry_free_ref(tt_local_entry);
1054 
1055 	return orig_node;
1056 }
1057 
1058 /* Calculates the checksum of the local table of a given orig_node */
1059 static uint16_t tt_global_crc(struct bat_priv *bat_priv,
1060 			      struct orig_node *orig_node)
1061 {
1062 	uint16_t total = 0, total_one;
1063 	struct hashtable_t *hash = bat_priv->tt_global_hash;
1064 	struct tt_common_entry *tt_common_entry;
1065 	struct tt_global_entry *tt_global_entry;
1066 	struct hlist_node *node;
1067 	struct hlist_head *head;
1068 	uint32_t i;
1069 	int j;
1070 
1071 	for (i = 0; i < hash->size; i++) {
1072 		head = &hash->table[i];
1073 
1074 		rcu_read_lock();
1075 		hlist_for_each_entry_rcu(tt_common_entry, node,
1076 					 head, hash_entry) {
1077 			tt_global_entry = container_of(tt_common_entry,
1078 						       struct tt_global_entry,
1079 						       common);
1080 			/* Roaming clients are in the global table for
1081 			 * consistency only. They don't have to be
1082 			 * taken into account while computing the
1083 			 * global crc
1084 			 */
1085 			if (tt_global_entry->common.flags & TT_CLIENT_ROAM)
1086 				continue;
1087 
1088 			/* find out if this global entry is announced by this
1089 			 * originator
1090 			 */
1091 			if (!tt_global_entry_has_orig(tt_global_entry,
1092 						      orig_node))
1093 				continue;
1094 
1095 			total_one = 0;
1096 			for (j = 0; j < ETH_ALEN; j++)
1097 				total_one = crc16_byte(total_one,
1098 					tt_global_entry->common.addr[j]);
1099 			total ^= total_one;
1100 		}
1101 		rcu_read_unlock();
1102 	}
1103 
1104 	return total;
1105 }
1106 
1107 /* Calculates the checksum of the local table */
1108 uint16_t tt_local_crc(struct bat_priv *bat_priv)
1109 {
1110 	uint16_t total = 0, total_one;
1111 	struct hashtable_t *hash = bat_priv->tt_local_hash;
1112 	struct tt_common_entry *tt_common_entry;
1113 	struct hlist_node *node;
1114 	struct hlist_head *head;
1115 	uint32_t i;
1116 	int j;
1117 
1118 	for (i = 0; i < hash->size; i++) {
1119 		head = &hash->table[i];
1120 
1121 		rcu_read_lock();
1122 		hlist_for_each_entry_rcu(tt_common_entry, node,
1123 					 head, hash_entry) {
1124 			/* not yet committed clients have not to be taken into
1125 			 * account while computing the CRC */
1126 			if (tt_common_entry->flags & TT_CLIENT_NEW)
1127 				continue;
1128 			total_one = 0;
1129 			for (j = 0; j < ETH_ALEN; j++)
1130 				total_one = crc16_byte(total_one,
1131 						   tt_common_entry->addr[j]);
1132 			total ^= total_one;
1133 		}
1134 		rcu_read_unlock();
1135 	}
1136 
1137 	return total;
1138 }
1139 
1140 static void tt_req_list_free(struct bat_priv *bat_priv)
1141 {
1142 	struct tt_req_node *node, *safe;
1143 
1144 	spin_lock_bh(&bat_priv->tt_req_list_lock);
1145 
1146 	list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1147 		list_del(&node->list);
1148 		kfree(node);
1149 	}
1150 
1151 	spin_unlock_bh(&bat_priv->tt_req_list_lock);
1152 }
1153 
1154 static void tt_save_orig_buffer(struct bat_priv *bat_priv,
1155 				struct orig_node *orig_node,
1156 				const unsigned char *tt_buff,
1157 				uint8_t tt_num_changes)
1158 {
1159 	uint16_t tt_buff_len = tt_len(tt_num_changes);
1160 
1161 	/* Replace the old buffer only if I received something in the
1162 	 * last OGM (the OGM could carry no changes) */
1163 	spin_lock_bh(&orig_node->tt_buff_lock);
1164 	if (tt_buff_len > 0) {
1165 		kfree(orig_node->tt_buff);
1166 		orig_node->tt_buff_len = 0;
1167 		orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
1168 		if (orig_node->tt_buff) {
1169 			memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
1170 			orig_node->tt_buff_len = tt_buff_len;
1171 		}
1172 	}
1173 	spin_unlock_bh(&orig_node->tt_buff_lock);
1174 }
1175 
1176 static void tt_req_purge(struct bat_priv *bat_priv)
1177 {
1178 	struct tt_req_node *node, *safe;
1179 
1180 	spin_lock_bh(&bat_priv->tt_req_list_lock);
1181 	list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1182 		if (has_timed_out(node->issued_at, TT_REQUEST_TIMEOUT)) {
1183 			list_del(&node->list);
1184 			kfree(node);
1185 		}
1186 	}
1187 	spin_unlock_bh(&bat_priv->tt_req_list_lock);
1188 }
1189 
1190 /* returns the pointer to the new tt_req_node struct if no request
1191  * has already been issued for this orig_node, NULL otherwise */
1192 static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv,
1193 					  struct orig_node *orig_node)
1194 {
1195 	struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
1196 
1197 	spin_lock_bh(&bat_priv->tt_req_list_lock);
1198 	list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
1199 		if (compare_eth(tt_req_node_tmp, orig_node) &&
1200 		    !has_timed_out(tt_req_node_tmp->issued_at,
1201 				   TT_REQUEST_TIMEOUT))
1202 			goto unlock;
1203 	}
1204 
1205 	tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
1206 	if (!tt_req_node)
1207 		goto unlock;
1208 
1209 	memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
1210 	tt_req_node->issued_at = jiffies;
1211 
1212 	list_add(&tt_req_node->list, &bat_priv->tt_req_list);
1213 unlock:
1214 	spin_unlock_bh(&bat_priv->tt_req_list_lock);
1215 	return tt_req_node;
1216 }
1217 
1218 /* data_ptr is useless here, but has to be kept to respect the prototype */
1219 static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr)
1220 {
1221 	const struct tt_common_entry *tt_common_entry = entry_ptr;
1222 
1223 	if (tt_common_entry->flags & TT_CLIENT_NEW)
1224 		return 0;
1225 	return 1;
1226 }
1227 
1228 static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
1229 {
1230 	const struct tt_common_entry *tt_common_entry = entry_ptr;
1231 	const struct tt_global_entry *tt_global_entry;
1232 	const struct orig_node *orig_node = data_ptr;
1233 
1234 	if (tt_common_entry->flags & TT_CLIENT_ROAM)
1235 		return 0;
1236 
1237 	tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
1238 				       common);
1239 
1240 	return tt_global_entry_has_orig(tt_global_entry, orig_node);
1241 }
1242 
1243 static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1244 					      struct hashtable_t *hash,
1245 					      struct hard_iface *primary_if,
1246 					      int (*valid_cb)(const void *,
1247 							      const void *),
1248 					      void *cb_data)
1249 {
1250 	struct tt_common_entry *tt_common_entry;
1251 	struct tt_query_packet *tt_response;
1252 	struct tt_change *tt_change;
1253 	struct hlist_node *node;
1254 	struct hlist_head *head;
1255 	struct sk_buff *skb = NULL;
1256 	uint16_t tt_tot, tt_count;
1257 	ssize_t tt_query_size = sizeof(struct tt_query_packet);
1258 	uint32_t i;
1259 
1260 	if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
1261 		tt_len = primary_if->soft_iface->mtu - tt_query_size;
1262 		tt_len -= tt_len % sizeof(struct tt_change);
1263 	}
1264 	tt_tot = tt_len / sizeof(struct tt_change);
1265 
1266 	skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN);
1267 	if (!skb)
1268 		goto out;
1269 
1270 	skb_reserve(skb, ETH_HLEN);
1271 	tt_response = (struct tt_query_packet *)skb_put(skb,
1272 						     tt_query_size + tt_len);
1273 	tt_response->ttvn = ttvn;
1274 
1275 	tt_change = (struct tt_change *)(skb->data + tt_query_size);
1276 	tt_count = 0;
1277 
1278 	rcu_read_lock();
1279 	for (i = 0; i < hash->size; i++) {
1280 		head = &hash->table[i];
1281 
1282 		hlist_for_each_entry_rcu(tt_common_entry, node,
1283 					 head, hash_entry) {
1284 			if (tt_count == tt_tot)
1285 				break;
1286 
1287 			if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
1288 				continue;
1289 
1290 			memcpy(tt_change->addr, tt_common_entry->addr,
1291 			       ETH_ALEN);
1292 			tt_change->flags = NO_FLAGS;
1293 
1294 			tt_count++;
1295 			tt_change++;
1296 		}
1297 	}
1298 	rcu_read_unlock();
1299 
1300 	/* store in the message the number of entries we have successfully
1301 	 * copied */
1302 	tt_response->tt_data = htons(tt_count);
1303 
1304 out:
1305 	return skb;
1306 }
1307 
1308 static int send_tt_request(struct bat_priv *bat_priv,
1309 			   struct orig_node *dst_orig_node,
1310 			   uint8_t ttvn, uint16_t tt_crc, bool full_table)
1311 {
1312 	struct sk_buff *skb = NULL;
1313 	struct tt_query_packet *tt_request;
1314 	struct neigh_node *neigh_node = NULL;
1315 	struct hard_iface *primary_if;
1316 	struct tt_req_node *tt_req_node = NULL;
1317 	int ret = 1;
1318 
1319 	primary_if = primary_if_get_selected(bat_priv);
1320 	if (!primary_if)
1321 		goto out;
1322 
1323 	/* The new tt_req will be issued only if I'm not waiting for a
1324 	 * reply from the same orig_node yet */
1325 	tt_req_node = new_tt_req_node(bat_priv, dst_orig_node);
1326 	if (!tt_req_node)
1327 		goto out;
1328 
1329 	skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN);
1330 	if (!skb)
1331 		goto out;
1332 
1333 	skb_reserve(skb, ETH_HLEN);
1334 
1335 	tt_request = (struct tt_query_packet *)skb_put(skb,
1336 				sizeof(struct tt_query_packet));
1337 
1338 	tt_request->header.packet_type = BAT_TT_QUERY;
1339 	tt_request->header.version = COMPAT_VERSION;
1340 	memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1341 	memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
1342 	tt_request->header.ttl = TTL;
1343 	tt_request->ttvn = ttvn;
1344 	tt_request->tt_data = htons(tt_crc);
1345 	tt_request->flags = TT_REQUEST;
1346 
1347 	if (full_table)
1348 		tt_request->flags |= TT_FULL_TABLE;
1349 
1350 	neigh_node = orig_node_get_router(dst_orig_node);
1351 	if (!neigh_node)
1352 		goto out;
1353 
1354 	bat_dbg(DBG_TT, bat_priv,
1355 		"Sending TT_REQUEST to %pM via %pM [%c]\n",
1356 		dst_orig_node->orig, neigh_node->addr,
1357 		(full_table ? 'F' : '.'));
1358 
1359 	send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1360 	ret = 0;
1361 
1362 out:
1363 	if (neigh_node)
1364 		neigh_node_free_ref(neigh_node);
1365 	if (primary_if)
1366 		hardif_free_ref(primary_if);
1367 	if (ret)
1368 		kfree_skb(skb);
1369 	if (ret && tt_req_node) {
1370 		spin_lock_bh(&bat_priv->tt_req_list_lock);
1371 		list_del(&tt_req_node->list);
1372 		spin_unlock_bh(&bat_priv->tt_req_list_lock);
1373 		kfree(tt_req_node);
1374 	}
1375 	return ret;
1376 }
1377 
1378 static bool send_other_tt_response(struct bat_priv *bat_priv,
1379 				   struct tt_query_packet *tt_request)
1380 {
1381 	struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL;
1382 	struct neigh_node *neigh_node = NULL;
1383 	struct hard_iface *primary_if = NULL;
1384 	uint8_t orig_ttvn, req_ttvn, ttvn;
1385 	int ret = false;
1386 	unsigned char *tt_buff;
1387 	bool full_table;
1388 	uint16_t tt_len, tt_tot;
1389 	struct sk_buff *skb = NULL;
1390 	struct tt_query_packet *tt_response;
1391 
1392 	bat_dbg(DBG_TT, bat_priv,
1393 		"Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
1394 		tt_request->src, tt_request->ttvn, tt_request->dst,
1395 		(tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1396 
1397 	/* Let's get the orig node of the REAL destination */
1398 	req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst);
1399 	if (!req_dst_orig_node)
1400 		goto out;
1401 
1402 	res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src);
1403 	if (!res_dst_orig_node)
1404 		goto out;
1405 
1406 	neigh_node = orig_node_get_router(res_dst_orig_node);
1407 	if (!neigh_node)
1408 		goto out;
1409 
1410 	primary_if = primary_if_get_selected(bat_priv);
1411 	if (!primary_if)
1412 		goto out;
1413 
1414 	orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1415 	req_ttvn = tt_request->ttvn;
1416 
1417 	/* I don't have the requested data */
1418 	if (orig_ttvn != req_ttvn ||
1419 	    tt_request->tt_data != req_dst_orig_node->tt_crc)
1420 		goto out;
1421 
1422 	/* If the full table has been explicitly requested */
1423 	if (tt_request->flags & TT_FULL_TABLE ||
1424 	    !req_dst_orig_node->tt_buff)
1425 		full_table = true;
1426 	else
1427 		full_table = false;
1428 
1429 	/* In this version, fragmentation is not implemented, then
1430 	 * I'll send only one packet with as much TT entries as I can */
1431 	if (!full_table) {
1432 		spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
1433 		tt_len = req_dst_orig_node->tt_buff_len;
1434 		tt_tot = tt_len / sizeof(struct tt_change);
1435 
1436 		skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1437 				    tt_len + ETH_HLEN);
1438 		if (!skb)
1439 			goto unlock;
1440 
1441 		skb_reserve(skb, ETH_HLEN);
1442 		tt_response = (struct tt_query_packet *)skb_put(skb,
1443 				sizeof(struct tt_query_packet) + tt_len);
1444 		tt_response->ttvn = req_ttvn;
1445 		tt_response->tt_data = htons(tt_tot);
1446 
1447 		tt_buff = skb->data + sizeof(struct tt_query_packet);
1448 		/* Copy the last orig_node's OGM buffer */
1449 		memcpy(tt_buff, req_dst_orig_node->tt_buff,
1450 		       req_dst_orig_node->tt_buff_len);
1451 
1452 		spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1453 	} else {
1454 		tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) *
1455 						sizeof(struct tt_change);
1456 		ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1457 
1458 		skb = tt_response_fill_table(tt_len, ttvn,
1459 					     bat_priv->tt_global_hash,
1460 					     primary_if, tt_global_valid_entry,
1461 					     req_dst_orig_node);
1462 		if (!skb)
1463 			goto out;
1464 
1465 		tt_response = (struct tt_query_packet *)skb->data;
1466 	}
1467 
1468 	tt_response->header.packet_type = BAT_TT_QUERY;
1469 	tt_response->header.version = COMPAT_VERSION;
1470 	tt_response->header.ttl = TTL;
1471 	memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
1472 	memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1473 	tt_response->flags = TT_RESPONSE;
1474 
1475 	if (full_table)
1476 		tt_response->flags |= TT_FULL_TABLE;
1477 
1478 	bat_dbg(DBG_TT, bat_priv,
1479 		"Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1480 		res_dst_orig_node->orig, neigh_node->addr,
1481 		req_dst_orig_node->orig, req_ttvn);
1482 
1483 	send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1484 	ret = true;
1485 	goto out;
1486 
1487 unlock:
1488 	spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1489 
1490 out:
1491 	if (res_dst_orig_node)
1492 		orig_node_free_ref(res_dst_orig_node);
1493 	if (req_dst_orig_node)
1494 		orig_node_free_ref(req_dst_orig_node);
1495 	if (neigh_node)
1496 		neigh_node_free_ref(neigh_node);
1497 	if (primary_if)
1498 		hardif_free_ref(primary_if);
1499 	if (!ret)
1500 		kfree_skb(skb);
1501 	return ret;
1502 
1503 }
1504 static bool send_my_tt_response(struct bat_priv *bat_priv,
1505 				struct tt_query_packet *tt_request)
1506 {
1507 	struct orig_node *orig_node = NULL;
1508 	struct neigh_node *neigh_node = NULL;
1509 	struct hard_iface *primary_if = NULL;
1510 	uint8_t my_ttvn, req_ttvn, ttvn;
1511 	int ret = false;
1512 	unsigned char *tt_buff;
1513 	bool full_table;
1514 	uint16_t tt_len, tt_tot;
1515 	struct sk_buff *skb = NULL;
1516 	struct tt_query_packet *tt_response;
1517 
1518 	bat_dbg(DBG_TT, bat_priv,
1519 		"Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
1520 		tt_request->src, tt_request->ttvn,
1521 		(tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1522 
1523 
1524 	my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1525 	req_ttvn = tt_request->ttvn;
1526 
1527 	orig_node = orig_hash_find(bat_priv, tt_request->src);
1528 	if (!orig_node)
1529 		goto out;
1530 
1531 	neigh_node = orig_node_get_router(orig_node);
1532 	if (!neigh_node)
1533 		goto out;
1534 
1535 	primary_if = primary_if_get_selected(bat_priv);
1536 	if (!primary_if)
1537 		goto out;
1538 
1539 	/* If the full table has been explicitly requested or the gap
1540 	 * is too big send the whole local translation table */
1541 	if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn ||
1542 	    !bat_priv->tt_buff)
1543 		full_table = true;
1544 	else
1545 		full_table = false;
1546 
1547 	/* In this version, fragmentation is not implemented, then
1548 	 * I'll send only one packet with as much TT entries as I can */
1549 	if (!full_table) {
1550 		spin_lock_bh(&bat_priv->tt_buff_lock);
1551 		tt_len = bat_priv->tt_buff_len;
1552 		tt_tot = tt_len / sizeof(struct tt_change);
1553 
1554 		skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1555 				    tt_len + ETH_HLEN);
1556 		if (!skb)
1557 			goto unlock;
1558 
1559 		skb_reserve(skb, ETH_HLEN);
1560 		tt_response = (struct tt_query_packet *)skb_put(skb,
1561 				sizeof(struct tt_query_packet) + tt_len);
1562 		tt_response->ttvn = req_ttvn;
1563 		tt_response->tt_data = htons(tt_tot);
1564 
1565 		tt_buff = skb->data + sizeof(struct tt_query_packet);
1566 		memcpy(tt_buff, bat_priv->tt_buff,
1567 		       bat_priv->tt_buff_len);
1568 		spin_unlock_bh(&bat_priv->tt_buff_lock);
1569 	} else {
1570 		tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) *
1571 						sizeof(struct tt_change);
1572 		ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1573 
1574 		skb = tt_response_fill_table(tt_len, ttvn,
1575 					     bat_priv->tt_local_hash,
1576 					     primary_if, tt_local_valid_entry,
1577 					     NULL);
1578 		if (!skb)
1579 			goto out;
1580 
1581 		tt_response = (struct tt_query_packet *)skb->data;
1582 	}
1583 
1584 	tt_response->header.packet_type = BAT_TT_QUERY;
1585 	tt_response->header.version = COMPAT_VERSION;
1586 	tt_response->header.ttl = TTL;
1587 	memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1588 	memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1589 	tt_response->flags = TT_RESPONSE;
1590 
1591 	if (full_table)
1592 		tt_response->flags |= TT_FULL_TABLE;
1593 
1594 	bat_dbg(DBG_TT, bat_priv,
1595 		"Sending TT_RESPONSE to %pM via %pM [%c]\n",
1596 		orig_node->orig, neigh_node->addr,
1597 		(tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1598 
1599 	send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1600 	ret = true;
1601 	goto out;
1602 
1603 unlock:
1604 	spin_unlock_bh(&bat_priv->tt_buff_lock);
1605 out:
1606 	if (orig_node)
1607 		orig_node_free_ref(orig_node);
1608 	if (neigh_node)
1609 		neigh_node_free_ref(neigh_node);
1610 	if (primary_if)
1611 		hardif_free_ref(primary_if);
1612 	if (!ret)
1613 		kfree_skb(skb);
1614 	/* This packet was for me, so it doesn't need to be re-routed */
1615 	return true;
1616 }
1617 
1618 bool send_tt_response(struct bat_priv *bat_priv,
1619 		      struct tt_query_packet *tt_request)
1620 {
1621 	if (is_my_mac(tt_request->dst)) {
1622 		/* don't answer backbone gws! */
1623 		if (bla_is_backbone_gw_orig(bat_priv, tt_request->src))
1624 			return true;
1625 
1626 		return send_my_tt_response(bat_priv, tt_request);
1627 	} else {
1628 		return send_other_tt_response(bat_priv, tt_request);
1629 	}
1630 }
1631 
1632 static void _tt_update_changes(struct bat_priv *bat_priv,
1633 			       struct orig_node *orig_node,
1634 			       struct tt_change *tt_change,
1635 			       uint16_t tt_num_changes, uint8_t ttvn)
1636 {
1637 	int i;
1638 
1639 	for (i = 0; i < tt_num_changes; i++) {
1640 		if ((tt_change + i)->flags & TT_CLIENT_DEL)
1641 			tt_global_del(bat_priv, orig_node,
1642 				      (tt_change + i)->addr,
1643 				      "tt removed by changes",
1644 				      (tt_change + i)->flags & TT_CLIENT_ROAM);
1645 		else
1646 			if (!tt_global_add(bat_priv, orig_node,
1647 					   (tt_change + i)->addr, ttvn, false,
1648 					   (tt_change + i)->flags &
1649 							TT_CLIENT_WIFI))
1650 				/* In case of problem while storing a
1651 				 * global_entry, we stop the updating
1652 				 * procedure without committing the
1653 				 * ttvn change. This will avoid to send
1654 				 * corrupted data on tt_request
1655 				 */
1656 				return;
1657 	}
1658 	orig_node->tt_initialised = true;
1659 }
1660 
1661 static void tt_fill_gtable(struct bat_priv *bat_priv,
1662 			   struct tt_query_packet *tt_response)
1663 {
1664 	struct orig_node *orig_node = NULL;
1665 
1666 	orig_node = orig_hash_find(bat_priv, tt_response->src);
1667 	if (!orig_node)
1668 		goto out;
1669 
1670 	/* Purge the old table first.. */
1671 	tt_global_del_orig(bat_priv, orig_node, "Received full table");
1672 
1673 	_tt_update_changes(bat_priv, orig_node,
1674 			   (struct tt_change *)(tt_response + 1),
1675 			   tt_response->tt_data, tt_response->ttvn);
1676 
1677 	spin_lock_bh(&orig_node->tt_buff_lock);
1678 	kfree(orig_node->tt_buff);
1679 	orig_node->tt_buff_len = 0;
1680 	orig_node->tt_buff = NULL;
1681 	spin_unlock_bh(&orig_node->tt_buff_lock);
1682 
1683 	atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
1684 
1685 out:
1686 	if (orig_node)
1687 		orig_node_free_ref(orig_node);
1688 }
1689 
1690 static void tt_update_changes(struct bat_priv *bat_priv,
1691 			      struct orig_node *orig_node,
1692 			      uint16_t tt_num_changes, uint8_t ttvn,
1693 			      struct tt_change *tt_change)
1694 {
1695 	_tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes,
1696 			   ttvn);
1697 
1698 	tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change,
1699 			    tt_num_changes);
1700 	atomic_set(&orig_node->last_ttvn, ttvn);
1701 }
1702 
1703 bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
1704 {
1705 	struct tt_local_entry *tt_local_entry = NULL;
1706 	bool ret = false;
1707 
1708 	tt_local_entry = tt_local_hash_find(bat_priv, addr);
1709 	if (!tt_local_entry)
1710 		goto out;
1711 	/* Check if the client has been logically deleted (but is kept for
1712 	 * consistency purpose) */
1713 	if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
1714 		goto out;
1715 	ret = true;
1716 out:
1717 	if (tt_local_entry)
1718 		tt_local_entry_free_ref(tt_local_entry);
1719 	return ret;
1720 }
1721 
1722 void handle_tt_response(struct bat_priv *bat_priv,
1723 			struct tt_query_packet *tt_response)
1724 {
1725 	struct tt_req_node *node, *safe;
1726 	struct orig_node *orig_node = NULL;
1727 
1728 	bat_dbg(DBG_TT, bat_priv,
1729 		"Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
1730 		tt_response->src, tt_response->ttvn, tt_response->tt_data,
1731 		(tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1732 
1733 	/* we should have never asked a backbone gw */
1734 	if (bla_is_backbone_gw_orig(bat_priv, tt_response->src))
1735 		goto out;
1736 
1737 	orig_node = orig_hash_find(bat_priv, tt_response->src);
1738 	if (!orig_node)
1739 		goto out;
1740 
1741 	if (tt_response->flags & TT_FULL_TABLE)
1742 		tt_fill_gtable(bat_priv, tt_response);
1743 	else
1744 		tt_update_changes(bat_priv, orig_node, tt_response->tt_data,
1745 				  tt_response->ttvn,
1746 				  (struct tt_change *)(tt_response + 1));
1747 
1748 	/* Delete the tt_req_node from pending tt_requests list */
1749 	spin_lock_bh(&bat_priv->tt_req_list_lock);
1750 	list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1751 		if (!compare_eth(node->addr, tt_response->src))
1752 			continue;
1753 		list_del(&node->list);
1754 		kfree(node);
1755 	}
1756 	spin_unlock_bh(&bat_priv->tt_req_list_lock);
1757 
1758 	/* Recalculate the CRC for this orig_node and store it */
1759 	orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
1760 	/* Roaming phase is over: tables are in sync again. I can
1761 	 * unset the flag */
1762 	orig_node->tt_poss_change = false;
1763 out:
1764 	if (orig_node)
1765 		orig_node_free_ref(orig_node);
1766 }
1767 
1768 int tt_init(struct bat_priv *bat_priv)
1769 {
1770 	if (!tt_local_init(bat_priv))
1771 		return 0;
1772 
1773 	if (!tt_global_init(bat_priv))
1774 		return 0;
1775 
1776 	tt_start_timer(bat_priv);
1777 
1778 	return 1;
1779 }
1780 
1781 static void tt_roam_list_free(struct bat_priv *bat_priv)
1782 {
1783 	struct tt_roam_node *node, *safe;
1784 
1785 	spin_lock_bh(&bat_priv->tt_roam_list_lock);
1786 
1787 	list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1788 		list_del(&node->list);
1789 		kfree(node);
1790 	}
1791 
1792 	spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1793 }
1794 
1795 static void tt_roam_purge(struct bat_priv *bat_priv)
1796 {
1797 	struct tt_roam_node *node, *safe;
1798 
1799 	spin_lock_bh(&bat_priv->tt_roam_list_lock);
1800 	list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1801 		if (!has_timed_out(node->first_time, ROAMING_MAX_TIME))
1802 			continue;
1803 
1804 		list_del(&node->list);
1805 		kfree(node);
1806 	}
1807 	spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1808 }
1809 
1810 /* This function checks whether the client already reached the
1811  * maximum number of possible roaming phases. In this case the ROAMING_ADV
1812  * will not be sent.
1813  *
1814  * returns true if the ROAMING_ADV can be sent, false otherwise */
1815 static bool tt_check_roam_count(struct bat_priv *bat_priv,
1816 				uint8_t *client)
1817 {
1818 	struct tt_roam_node *tt_roam_node;
1819 	bool ret = false;
1820 
1821 	spin_lock_bh(&bat_priv->tt_roam_list_lock);
1822 	/* The new tt_req will be issued only if I'm not waiting for a
1823 	 * reply from the same orig_node yet */
1824 	list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
1825 		if (!compare_eth(tt_roam_node->addr, client))
1826 			continue;
1827 
1828 		if (has_timed_out(tt_roam_node->first_time, ROAMING_MAX_TIME))
1829 			continue;
1830 
1831 		if (!atomic_dec_not_zero(&tt_roam_node->counter))
1832 			/* Sorry, you roamed too many times! */
1833 			goto unlock;
1834 		ret = true;
1835 		break;
1836 	}
1837 
1838 	if (!ret) {
1839 		tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
1840 		if (!tt_roam_node)
1841 			goto unlock;
1842 
1843 		tt_roam_node->first_time = jiffies;
1844 		atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1);
1845 		memcpy(tt_roam_node->addr, client, ETH_ALEN);
1846 
1847 		list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
1848 		ret = true;
1849 	}
1850 
1851 unlock:
1852 	spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1853 	return ret;
1854 }
1855 
1856 static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
1857 			  struct orig_node *orig_node)
1858 {
1859 	struct neigh_node *neigh_node = NULL;
1860 	struct sk_buff *skb = NULL;
1861 	struct roam_adv_packet *roam_adv_packet;
1862 	int ret = 1;
1863 	struct hard_iface *primary_if;
1864 
1865 	/* before going on we have to check whether the client has
1866 	 * already roamed to us too many times */
1867 	if (!tt_check_roam_count(bat_priv, client))
1868 		goto out;
1869 
1870 	skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN);
1871 	if (!skb)
1872 		goto out;
1873 
1874 	skb_reserve(skb, ETH_HLEN);
1875 
1876 	roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
1877 					sizeof(struct roam_adv_packet));
1878 
1879 	roam_adv_packet->header.packet_type = BAT_ROAM_ADV;
1880 	roam_adv_packet->header.version = COMPAT_VERSION;
1881 	roam_adv_packet->header.ttl = TTL;
1882 	primary_if = primary_if_get_selected(bat_priv);
1883 	if (!primary_if)
1884 		goto out;
1885 	memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1886 	hardif_free_ref(primary_if);
1887 	memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
1888 	memcpy(roam_adv_packet->client, client, ETH_ALEN);
1889 
1890 	neigh_node = orig_node_get_router(orig_node);
1891 	if (!neigh_node)
1892 		goto out;
1893 
1894 	bat_dbg(DBG_TT, bat_priv,
1895 		"Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
1896 		orig_node->orig, client, neigh_node->addr);
1897 
1898 	send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1899 	ret = 0;
1900 
1901 out:
1902 	if (neigh_node)
1903 		neigh_node_free_ref(neigh_node);
1904 	if (ret)
1905 		kfree_skb(skb);
1906 	return;
1907 }
1908 
1909 static void tt_purge(struct work_struct *work)
1910 {
1911 	struct delayed_work *delayed_work =
1912 		container_of(work, struct delayed_work, work);
1913 	struct bat_priv *bat_priv =
1914 		container_of(delayed_work, struct bat_priv, tt_work);
1915 
1916 	tt_local_purge(bat_priv);
1917 	tt_global_roam_purge(bat_priv);
1918 	tt_req_purge(bat_priv);
1919 	tt_roam_purge(bat_priv);
1920 
1921 	tt_start_timer(bat_priv);
1922 }
1923 
1924 void tt_free(struct bat_priv *bat_priv)
1925 {
1926 	cancel_delayed_work_sync(&bat_priv->tt_work);
1927 
1928 	tt_local_table_free(bat_priv);
1929 	tt_global_table_free(bat_priv);
1930 	tt_req_list_free(bat_priv);
1931 	tt_changes_list_free(bat_priv);
1932 	tt_roam_list_free(bat_priv);
1933 
1934 	kfree(bat_priv->tt_buff);
1935 }
1936 
1937 /* This function will enable or disable the specified flags for all the entries
1938  * in the given hash table and returns the number of modified entries */
1939 static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags,
1940 			     bool enable)
1941 {
1942 	uint32_t i;
1943 	uint16_t changed_num = 0;
1944 	struct hlist_head *head;
1945 	struct hlist_node *node;
1946 	struct tt_common_entry *tt_common_entry;
1947 
1948 	if (!hash)
1949 		goto out;
1950 
1951 	for (i = 0; i < hash->size; i++) {
1952 		head = &hash->table[i];
1953 
1954 		rcu_read_lock();
1955 		hlist_for_each_entry_rcu(tt_common_entry, node,
1956 					 head, hash_entry) {
1957 			if (enable) {
1958 				if ((tt_common_entry->flags & flags) == flags)
1959 					continue;
1960 				tt_common_entry->flags |= flags;
1961 			} else {
1962 				if (!(tt_common_entry->flags & flags))
1963 					continue;
1964 				tt_common_entry->flags &= ~flags;
1965 			}
1966 			changed_num++;
1967 		}
1968 		rcu_read_unlock();
1969 	}
1970 out:
1971 	return changed_num;
1972 }
1973 
1974 /* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
1975 static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
1976 {
1977 	struct hashtable_t *hash = bat_priv->tt_local_hash;
1978 	struct tt_common_entry *tt_common_entry;
1979 	struct tt_local_entry *tt_local_entry;
1980 	struct hlist_node *node, *node_tmp;
1981 	struct hlist_head *head;
1982 	spinlock_t *list_lock; /* protects write access to the hash lists */
1983 	uint32_t i;
1984 
1985 	if (!hash)
1986 		return;
1987 
1988 	for (i = 0; i < hash->size; i++) {
1989 		head = &hash->table[i];
1990 		list_lock = &hash->list_locks[i];
1991 
1992 		spin_lock_bh(list_lock);
1993 		hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
1994 					  head, hash_entry) {
1995 			if (!(tt_common_entry->flags & TT_CLIENT_PENDING))
1996 				continue;
1997 
1998 			bat_dbg(DBG_TT, bat_priv,
1999 				"Deleting local tt entry (%pM): pending\n",
2000 				tt_common_entry->addr);
2001 
2002 			atomic_dec(&bat_priv->num_local_tt);
2003 			hlist_del_rcu(node);
2004 			tt_local_entry = container_of(tt_common_entry,
2005 						      struct tt_local_entry,
2006 						      common);
2007 			tt_local_entry_free_ref(tt_local_entry);
2008 		}
2009 		spin_unlock_bh(list_lock);
2010 	}
2011 
2012 }
2013 
2014 void tt_commit_changes(struct bat_priv *bat_priv)
2015 {
2016 	uint16_t changed_num = tt_set_flags(bat_priv->tt_local_hash,
2017 					    TT_CLIENT_NEW, false);
2018 	/* all the reset entries have now to be effectively counted as local
2019 	 * entries */
2020 	atomic_add(changed_num, &bat_priv->num_local_tt);
2021 	tt_local_purge_pending_clients(bat_priv);
2022 
2023 	/* Increment the TTVN only once per OGM interval */
2024 	atomic_inc(&bat_priv->ttvn);
2025 	bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n",
2026 		(uint8_t)atomic_read(&bat_priv->ttvn));
2027 	bat_priv->tt_poss_change = false;
2028 }
2029 
2030 bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
2031 {
2032 	struct tt_local_entry *tt_local_entry = NULL;
2033 	struct tt_global_entry *tt_global_entry = NULL;
2034 	bool ret = true;
2035 
2036 	if (!atomic_read(&bat_priv->ap_isolation))
2037 		return false;
2038 
2039 	tt_local_entry = tt_local_hash_find(bat_priv, dst);
2040 	if (!tt_local_entry)
2041 		goto out;
2042 
2043 	tt_global_entry = tt_global_hash_find(bat_priv, src);
2044 	if (!tt_global_entry)
2045 		goto out;
2046 
2047 	if (_is_ap_isolated(tt_local_entry, tt_global_entry))
2048 		goto out;
2049 
2050 	ret = false;
2051 
2052 out:
2053 	if (tt_global_entry)
2054 		tt_global_entry_free_ref(tt_global_entry);
2055 	if (tt_local_entry)
2056 		tt_local_entry_free_ref(tt_local_entry);
2057 	return ret;
2058 }
2059 
2060 void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
2061 		    const unsigned char *tt_buff, uint8_t tt_num_changes,
2062 		    uint8_t ttvn, uint16_t tt_crc)
2063 {
2064 	uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
2065 	bool full_table = true;
2066 
2067 	/* don't care about a backbone gateways updates. */
2068 	if (bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
2069 		return;
2070 
2071 	/* orig table not initialised AND first diff is in the OGM OR the ttvn
2072 	 * increased by one -> we can apply the attached changes */
2073 	if ((!orig_node->tt_initialised && ttvn == 1) ||
2074 	    ttvn - orig_ttvn == 1) {
2075 		/* the OGM could not contain the changes due to their size or
2076 		 * because they have already been sent TT_OGM_APPEND_MAX times.
2077 		 * In this case send a tt request */
2078 		if (!tt_num_changes) {
2079 			full_table = false;
2080 			goto request_table;
2081 		}
2082 
2083 		tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
2084 				  (struct tt_change *)tt_buff);
2085 
2086 		/* Even if we received the precomputed crc with the OGM, we
2087 		 * prefer to recompute it to spot any possible inconsistency
2088 		 * in the global table */
2089 		orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
2090 
2091 		/* The ttvn alone is not enough to guarantee consistency
2092 		 * because a single value could represent different states
2093 		 * (due to the wrap around). Thus a node has to check whether
2094 		 * the resulting table (after applying the changes) is still
2095 		 * consistent or not. E.g. a node could disconnect while its
2096 		 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
2097 		 * checking the CRC value is mandatory to detect the
2098 		 * inconsistency */
2099 		if (orig_node->tt_crc != tt_crc)
2100 			goto request_table;
2101 
2102 		/* Roaming phase is over: tables are in sync again. I can
2103 		 * unset the flag */
2104 		orig_node->tt_poss_change = false;
2105 	} else {
2106 		/* if we missed more than one change or our tables are not
2107 		 * in sync anymore -> request fresh tt data */
2108 
2109 		if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
2110 		    orig_node->tt_crc != tt_crc) {
2111 request_table:
2112 			bat_dbg(DBG_TT, bat_priv,
2113 				"TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
2114 				orig_node->orig, ttvn, orig_ttvn, tt_crc,
2115 				orig_node->tt_crc, tt_num_changes);
2116 			send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
2117 					full_table);
2118 			return;
2119 		}
2120 	}
2121 }
2122 
2123 /* returns true whether we know that the client has moved from its old
2124  * originator to another one. This entry is kept is still kept for consistency
2125  * purposes
2126  */
2127 bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr)
2128 {
2129 	struct tt_global_entry *tt_global_entry;
2130 	bool ret = false;
2131 
2132 	tt_global_entry = tt_global_hash_find(bat_priv, addr);
2133 	if (!tt_global_entry)
2134 		goto out;
2135 
2136 	ret = tt_global_entry->common.flags & TT_CLIENT_ROAM;
2137 	tt_global_entry_free_ref(tt_global_entry);
2138 out:
2139 	return ret;
2140 }
2141