xref: /linux/net/mac80211/mesh_pathtbl.c (revision 3e4cd0737d2e9c3dd52153a23aef1753e3a99fc4)
1 /*
2  * Copyright (c) 2008, 2009 open80211s Ltd.
3  * Author:     Luis Carlos Cobo <luisca@cozybit.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  */
9 
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
17 #include "ieee80211_i.h"
18 #include "mesh.h"
19 
20 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
21 #define INIT_PATHS_SIZE_ORDER	2
22 
23 /* Keep the mean chain length below this constant */
24 #define MEAN_CHAIN_LEN		2
25 
26 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
27 				time_after(jiffies, mpath->exp_time) && \
28 				!(mpath->flags & MESH_PATH_FIXED))
29 
30 struct mpath_node {
31 	struct hlist_node list;
32 	struct rcu_head rcu;
33 	/* This indirection allows two different tables to point to the same
34 	 * mesh_path structure, useful when resizing
35 	 */
36 	struct mesh_path *mpath;
37 };
38 
39 static struct mesh_table *mesh_paths;
40 static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
41 
42 int mesh_paths_generation;
43 
44 /* This lock will have the grow table function as writer and add / delete nodes
45  * as readers. When reading the table (i.e. doing lookups) we are well protected
46  * by RCU
47  */
48 static DEFINE_RWLOCK(pathtbl_resize_lock);
49 
50 
51 static struct mesh_table *mesh_table_alloc(int size_order)
52 {
53 	int i;
54 	struct mesh_table *newtbl;
55 
56 	newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL);
57 	if (!newtbl)
58 		return NULL;
59 
60 	newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
61 			(1 << size_order), GFP_KERNEL);
62 
63 	if (!newtbl->hash_buckets) {
64 		kfree(newtbl);
65 		return NULL;
66 	}
67 
68 	newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
69 			(1 << size_order), GFP_KERNEL);
70 	if (!newtbl->hashwlock) {
71 		kfree(newtbl->hash_buckets);
72 		kfree(newtbl);
73 		return NULL;
74 	}
75 
76 	newtbl->size_order = size_order;
77 	newtbl->hash_mask = (1 << size_order) - 1;
78 	atomic_set(&newtbl->entries,  0);
79 	get_random_bytes(&newtbl->hash_rnd,
80 			sizeof(newtbl->hash_rnd));
81 	for (i = 0; i <= newtbl->hash_mask; i++)
82 		spin_lock_init(&newtbl->hashwlock[i]);
83 
84 	return newtbl;
85 }
86 
87 static void __mesh_table_free(struct mesh_table *tbl)
88 {
89 	kfree(tbl->hash_buckets);
90 	kfree(tbl->hashwlock);
91 	kfree(tbl);
92 }
93 
94 static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
95 {
96 	struct hlist_head *mesh_hash;
97 	struct hlist_node *p, *q;
98 	int i;
99 
100 	mesh_hash = tbl->hash_buckets;
101 	for (i = 0; i <= tbl->hash_mask; i++) {
102 		spin_lock_bh(&tbl->hashwlock[i]);
103 		hlist_for_each_safe(p, q, &mesh_hash[i]) {
104 			tbl->free_node(p, free_leafs);
105 			atomic_dec(&tbl->entries);
106 		}
107 		spin_unlock_bh(&tbl->hashwlock[i]);
108 	}
109 	__mesh_table_free(tbl);
110 }
111 
112 static int mesh_table_grow(struct mesh_table *oldtbl,
113 			   struct mesh_table *newtbl)
114 {
115 	struct hlist_head *oldhash;
116 	struct hlist_node *p, *q;
117 	int i;
118 
119 	if (atomic_read(&oldtbl->entries)
120 			< oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
121 		return -EAGAIN;
122 
123 	newtbl->free_node = oldtbl->free_node;
124 	newtbl->mean_chain_len = oldtbl->mean_chain_len;
125 	newtbl->copy_node = oldtbl->copy_node;
126 	atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
127 
128 	oldhash = oldtbl->hash_buckets;
129 	for (i = 0; i <= oldtbl->hash_mask; i++)
130 		hlist_for_each(p, &oldhash[i])
131 			if (oldtbl->copy_node(p, newtbl) < 0)
132 				goto errcopy;
133 
134 	return 0;
135 
136 errcopy:
137 	for (i = 0; i <= newtbl->hash_mask; i++) {
138 		hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
139 			oldtbl->free_node(p, 0);
140 	}
141 	return -ENOMEM;
142 }
143 
144 static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
145 			   struct mesh_table *tbl)
146 {
147 	/* Use last four bytes of hw addr and interface index as hash index */
148 	return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
149 		& tbl->hash_mask;
150 }
151 
152 
153 /**
154  *
155  * mesh_path_assign_nexthop - update mesh path next hop
156  *
157  * @mpath: mesh path to update
158  * @sta: next hop to assign
159  *
160  * Locking: mpath->state_lock must be held when calling this function
161  */
162 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
163 {
164 	struct sk_buff *skb;
165 	struct ieee80211_hdr *hdr;
166 	struct sk_buff_head tmpq;
167 	unsigned long flags;
168 
169 	rcu_assign_pointer(mpath->next_hop, sta);
170 
171 	__skb_queue_head_init(&tmpq);
172 
173 	spin_lock_irqsave(&mpath->frame_queue.lock, flags);
174 
175 	while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
176 		hdr = (struct ieee80211_hdr *) skb->data;
177 		memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
178 		__skb_queue_tail(&tmpq, skb);
179 	}
180 
181 	skb_queue_splice(&tmpq, &mpath->frame_queue);
182 	spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
183 }
184 
185 
186 /**
187  * mesh_path_lookup - look up a path in the mesh path table
188  * @dst: hardware address (ETH_ALEN length) of destination
189  * @sdata: local subif
190  *
191  * Returns: pointer to the mesh path structure, or NULL if not found
192  *
193  * Locking: must be called within a read rcu section.
194  */
195 struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
196 {
197 	struct mesh_path *mpath;
198 	struct hlist_node *n;
199 	struct hlist_head *bucket;
200 	struct mesh_table *tbl;
201 	struct mpath_node *node;
202 
203 	tbl = rcu_dereference(mesh_paths);
204 
205 	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
206 	hlist_for_each_entry_rcu(node, n, bucket, list) {
207 		mpath = node->mpath;
208 		if (mpath->sdata == sdata &&
209 				memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
210 			if (MPATH_EXPIRED(mpath)) {
211 				spin_lock_bh(&mpath->state_lock);
212 				if (MPATH_EXPIRED(mpath))
213 					mpath->flags &= ~MESH_PATH_ACTIVE;
214 				spin_unlock_bh(&mpath->state_lock);
215 			}
216 			return mpath;
217 		}
218 	}
219 	return NULL;
220 }
221 
222 struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
223 {
224 	struct mesh_path *mpath;
225 	struct hlist_node *n;
226 	struct hlist_head *bucket;
227 	struct mesh_table *tbl;
228 	struct mpath_node *node;
229 
230 	tbl = rcu_dereference(mpp_paths);
231 
232 	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
233 	hlist_for_each_entry_rcu(node, n, bucket, list) {
234 		mpath = node->mpath;
235 		if (mpath->sdata == sdata &&
236 		    memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
237 			if (MPATH_EXPIRED(mpath)) {
238 				spin_lock_bh(&mpath->state_lock);
239 				if (MPATH_EXPIRED(mpath))
240 					mpath->flags &= ~MESH_PATH_ACTIVE;
241 				spin_unlock_bh(&mpath->state_lock);
242 			}
243 			return mpath;
244 		}
245 	}
246 	return NULL;
247 }
248 
249 
250 /**
251  * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
252  * @idx: index
253  * @sdata: local subif, or NULL for all entries
254  *
255  * Returns: pointer to the mesh path structure, or NULL if not found.
256  *
257  * Locking: must be called within a read rcu section.
258  */
259 struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
260 {
261 	struct mpath_node *node;
262 	struct hlist_node *p;
263 	int i;
264 	int j = 0;
265 
266 	for_each_mesh_entry(mesh_paths, p, node, i) {
267 		if (sdata && node->mpath->sdata != sdata)
268 			continue;
269 		if (j++ == idx) {
270 			if (MPATH_EXPIRED(node->mpath)) {
271 				spin_lock_bh(&node->mpath->state_lock);
272 				if (MPATH_EXPIRED(node->mpath))
273 					node->mpath->flags &= ~MESH_PATH_ACTIVE;
274 				spin_unlock_bh(&node->mpath->state_lock);
275 			}
276 			return node->mpath;
277 		}
278 	}
279 
280 	return NULL;
281 }
282 
283 /**
284  * mesh_path_add - allocate and add a new path to the mesh path table
285  * @addr: destination address of the path (ETH_ALEN length)
286  * @sdata: local subif
287  *
288  * Returns: 0 on success
289  *
290  * State: the initial state of the new path is set to 0
291  */
292 int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
293 {
294 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
295 	struct ieee80211_local *local = sdata->local;
296 	struct mesh_path *mpath, *new_mpath;
297 	struct mpath_node *node, *new_node;
298 	struct hlist_head *bucket;
299 	struct hlist_node *n;
300 	int grow = 0;
301 	int err = 0;
302 	u32 hash_idx;
303 
304 	if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
305 		/* never add ourselves as neighbours */
306 		return -ENOTSUPP;
307 
308 	if (is_multicast_ether_addr(dst))
309 		return -ENOTSUPP;
310 
311 	if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
312 		return -ENOSPC;
313 
314 	err = -ENOMEM;
315 	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
316 	if (!new_mpath)
317 		goto err_path_alloc;
318 
319 	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
320 	if (!new_node)
321 		goto err_node_alloc;
322 
323 	read_lock_bh(&pathtbl_resize_lock);
324 	memcpy(new_mpath->dst, dst, ETH_ALEN);
325 	new_mpath->sdata = sdata;
326 	new_mpath->flags = 0;
327 	skb_queue_head_init(&new_mpath->frame_queue);
328 	new_node->mpath = new_mpath;
329 	new_mpath->timer.data = (unsigned long) new_mpath;
330 	new_mpath->timer.function = mesh_path_timer;
331 	new_mpath->exp_time = jiffies;
332 	spin_lock_init(&new_mpath->state_lock);
333 	init_timer(&new_mpath->timer);
334 
335 	hash_idx = mesh_table_hash(dst, sdata, mesh_paths);
336 	bucket = &mesh_paths->hash_buckets[hash_idx];
337 
338 	spin_lock_bh(&mesh_paths->hashwlock[hash_idx]);
339 
340 	err = -EEXIST;
341 	hlist_for_each_entry(node, n, bucket, list) {
342 		mpath = node->mpath;
343 		if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
344 			goto err_exists;
345 	}
346 
347 	hlist_add_head_rcu(&new_node->list, bucket);
348 	if (atomic_inc_return(&mesh_paths->entries) >=
349 		mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
350 		grow = 1;
351 
352 	mesh_paths_generation++;
353 
354 	spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]);
355 	read_unlock_bh(&pathtbl_resize_lock);
356 	if (grow) {
357 		set_bit(MESH_WORK_GROW_MPATH_TABLE,  &ifmsh->wrkq_flags);
358 		ieee80211_queue_work(&local->hw, &sdata->work);
359 	}
360 	return 0;
361 
362 err_exists:
363 	spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]);
364 	read_unlock_bh(&pathtbl_resize_lock);
365 	kfree(new_node);
366 err_node_alloc:
367 	kfree(new_mpath);
368 err_path_alloc:
369 	atomic_dec(&sdata->u.mesh.mpaths);
370 	return err;
371 }
372 
373 void mesh_mpath_table_grow(void)
374 {
375 	struct mesh_table *oldtbl, *newtbl;
376 
377 	rcu_read_lock();
378 	newtbl = mesh_table_alloc(rcu_dereference(mesh_paths)->size_order + 1);
379 	if (!newtbl)
380 		return;
381 	write_lock_bh(&pathtbl_resize_lock);
382 	oldtbl = mesh_paths;
383 	if (mesh_table_grow(mesh_paths, newtbl) < 0) {
384 		rcu_read_unlock();
385 		__mesh_table_free(newtbl);
386 		write_unlock_bh(&pathtbl_resize_lock);
387 		return;
388 	}
389 	rcu_read_unlock();
390 	rcu_assign_pointer(mesh_paths, newtbl);
391 	write_unlock_bh(&pathtbl_resize_lock);
392 
393 	synchronize_rcu();
394 	mesh_table_free(oldtbl, false);
395 }
396 
397 void mesh_mpp_table_grow(void)
398 {
399 	struct mesh_table *oldtbl, *newtbl;
400 
401 	rcu_read_lock();
402 	newtbl = mesh_table_alloc(rcu_dereference(mpp_paths)->size_order + 1);
403 	if (!newtbl)
404 		return;
405 	write_lock_bh(&pathtbl_resize_lock);
406 	oldtbl = mpp_paths;
407 	if (mesh_table_grow(mpp_paths, newtbl) < 0) {
408 		rcu_read_unlock();
409 		__mesh_table_free(newtbl);
410 		write_unlock_bh(&pathtbl_resize_lock);
411 		return;
412 	}
413 	rcu_read_unlock();
414 	rcu_assign_pointer(mpp_paths, newtbl);
415 	write_unlock_bh(&pathtbl_resize_lock);
416 
417 	synchronize_rcu();
418 	mesh_table_free(oldtbl, false);
419 }
420 
421 int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
422 {
423 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
424 	struct ieee80211_local *local = sdata->local;
425 	struct mesh_path *mpath, *new_mpath;
426 	struct mpath_node *node, *new_node;
427 	struct hlist_head *bucket;
428 	struct hlist_node *n;
429 	int grow = 0;
430 	int err = 0;
431 	u32 hash_idx;
432 
433 	if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
434 		/* never add ourselves as neighbours */
435 		return -ENOTSUPP;
436 
437 	if (is_multicast_ether_addr(dst))
438 		return -ENOTSUPP;
439 
440 	err = -ENOMEM;
441 	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
442 	if (!new_mpath)
443 		goto err_path_alloc;
444 
445 	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
446 	if (!new_node)
447 		goto err_node_alloc;
448 
449 	read_lock_bh(&pathtbl_resize_lock);
450 	memcpy(new_mpath->dst, dst, ETH_ALEN);
451 	memcpy(new_mpath->mpp, mpp, ETH_ALEN);
452 	new_mpath->sdata = sdata;
453 	new_mpath->flags = 0;
454 	skb_queue_head_init(&new_mpath->frame_queue);
455 	new_node->mpath = new_mpath;
456 	new_mpath->exp_time = jiffies;
457 	spin_lock_init(&new_mpath->state_lock);
458 
459 	hash_idx = mesh_table_hash(dst, sdata, mpp_paths);
460 	bucket = &mpp_paths->hash_buckets[hash_idx];
461 
462 	spin_lock_bh(&mpp_paths->hashwlock[hash_idx]);
463 
464 	err = -EEXIST;
465 	hlist_for_each_entry(node, n, bucket, list) {
466 		mpath = node->mpath;
467 		if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
468 			goto err_exists;
469 	}
470 
471 	hlist_add_head_rcu(&new_node->list, bucket);
472 	if (atomic_inc_return(&mpp_paths->entries) >=
473 		mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1))
474 		grow = 1;
475 
476 	spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]);
477 	read_unlock_bh(&pathtbl_resize_lock);
478 	if (grow) {
479 		set_bit(MESH_WORK_GROW_MPP_TABLE,  &ifmsh->wrkq_flags);
480 		ieee80211_queue_work(&local->hw, &sdata->work);
481 	}
482 	return 0;
483 
484 err_exists:
485 	spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]);
486 	read_unlock_bh(&pathtbl_resize_lock);
487 	kfree(new_node);
488 err_node_alloc:
489 	kfree(new_mpath);
490 err_path_alloc:
491 	return err;
492 }
493 
494 
495 /**
496  * mesh_plink_broken - deactivates paths and sends perr when a link breaks
497  *
498  * @sta: broken peer link
499  *
500  * This function must be called from the rate control algorithm if enough
501  * delivery errors suggest that a peer link is no longer usable.
502  */
503 void mesh_plink_broken(struct sta_info *sta)
504 {
505 	static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
506 	struct mesh_path *mpath;
507 	struct mpath_node *node;
508 	struct hlist_node *p;
509 	struct ieee80211_sub_if_data *sdata = sta->sdata;
510 	int i;
511 
512 	rcu_read_lock();
513 	for_each_mesh_entry(mesh_paths, p, node, i) {
514 		mpath = node->mpath;
515 		spin_lock_bh(&mpath->state_lock);
516 		if (mpath->next_hop == sta &&
517 		    mpath->flags & MESH_PATH_ACTIVE &&
518 		    !(mpath->flags & MESH_PATH_FIXED)) {
519 			mpath->flags &= ~MESH_PATH_ACTIVE;
520 			++mpath->sn;
521 			spin_unlock_bh(&mpath->state_lock);
522 			mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
523 					mpath->dst, cpu_to_le32(mpath->sn),
524 					cpu_to_le16(PERR_RCODE_DEST_UNREACH),
525 					bcast, sdata);
526 		} else
527 		spin_unlock_bh(&mpath->state_lock);
528 	}
529 	rcu_read_unlock();
530 }
531 
532 /**
533  * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
534  *
535  * @sta - mesh peer to match
536  *
537  * RCU notes: this function is called when a mesh plink transitions from
538  * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
539  * allows path creation. This will happen before the sta can be freed (because
540  * sta_info_destroy() calls this) so any reader in a rcu read block will be
541  * protected against the plink disappearing.
542  */
543 void mesh_path_flush_by_nexthop(struct sta_info *sta)
544 {
545 	struct mesh_path *mpath;
546 	struct mpath_node *node;
547 	struct hlist_node *p;
548 	int i;
549 
550 	for_each_mesh_entry(mesh_paths, p, node, i) {
551 		mpath = node->mpath;
552 		if (mpath->next_hop == sta)
553 			mesh_path_del(mpath->dst, mpath->sdata);
554 	}
555 }
556 
557 void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
558 {
559 	struct mesh_path *mpath;
560 	struct mpath_node *node;
561 	struct hlist_node *p;
562 	int i;
563 
564 	for_each_mesh_entry(mesh_paths, p, node, i) {
565 		mpath = node->mpath;
566 		if (mpath->sdata == sdata)
567 			mesh_path_del(mpath->dst, mpath->sdata);
568 	}
569 }
570 
571 static void mesh_path_node_reclaim(struct rcu_head *rp)
572 {
573 	struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
574 	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
575 
576 	del_timer_sync(&node->mpath->timer);
577 	atomic_dec(&sdata->u.mesh.mpaths);
578 	kfree(node->mpath);
579 	kfree(node);
580 }
581 
582 /**
583  * mesh_path_del - delete a mesh path from the table
584  *
585  * @addr: dst address (ETH_ALEN length)
586  * @sdata: local subif
587  *
588  * Returns: 0 if successful
589  */
590 int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
591 {
592 	struct mesh_path *mpath;
593 	struct mpath_node *node;
594 	struct hlist_head *bucket;
595 	struct hlist_node *n;
596 	int hash_idx;
597 	int err = 0;
598 
599 	read_lock_bh(&pathtbl_resize_lock);
600 	hash_idx = mesh_table_hash(addr, sdata, mesh_paths);
601 	bucket = &mesh_paths->hash_buckets[hash_idx];
602 
603 	spin_lock_bh(&mesh_paths->hashwlock[hash_idx]);
604 	hlist_for_each_entry(node, n, bucket, list) {
605 		mpath = node->mpath;
606 		if (mpath->sdata == sdata &&
607 				memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
608 			spin_lock_bh(&mpath->state_lock);
609 			mpath->flags |= MESH_PATH_RESOLVING;
610 			hlist_del_rcu(&node->list);
611 			call_rcu(&node->rcu, mesh_path_node_reclaim);
612 			atomic_dec(&mesh_paths->entries);
613 			spin_unlock_bh(&mpath->state_lock);
614 			goto enddel;
615 		}
616 	}
617 
618 	err = -ENXIO;
619 enddel:
620 	mesh_paths_generation++;
621 	spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]);
622 	read_unlock_bh(&pathtbl_resize_lock);
623 	return err;
624 }
625 
626 /**
627  * mesh_path_tx_pending - sends pending frames in a mesh path queue
628  *
629  * @mpath: mesh path to activate
630  *
631  * Locking: the state_lock of the mpath structure must NOT be held when calling
632  * this function.
633  */
634 void mesh_path_tx_pending(struct mesh_path *mpath)
635 {
636 	if (mpath->flags & MESH_PATH_ACTIVE)
637 		ieee80211_add_pending_skbs(mpath->sdata->local,
638 				&mpath->frame_queue);
639 }
640 
641 /**
642  * mesh_path_discard_frame - discard a frame whose path could not be resolved
643  *
644  * @skb: frame to discard
645  * @sdata: network subif the frame was to be sent through
646  *
647  * If the frame was being forwarded from another MP, a PERR frame will be sent
648  * to the precursor.  The precursor's address (i.e. the previous hop) was saved
649  * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
650  * the destination is successfully resolved.
651  *
652  * Locking: the function must me called within a rcu_read_lock region
653  */
654 void mesh_path_discard_frame(struct sk_buff *skb,
655 			     struct ieee80211_sub_if_data *sdata)
656 {
657 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
658 	struct mesh_path *mpath;
659 	u32 sn = 0;
660 
661 	if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
662 		u8 *ra, *da;
663 
664 		da = hdr->addr3;
665 		ra = hdr->addr1;
666 		mpath = mesh_path_lookup(da, sdata);
667 		if (mpath)
668 			sn = ++mpath->sn;
669 		mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
670 				   cpu_to_le32(sn),
671 				   cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata);
672 	}
673 
674 	kfree_skb(skb);
675 	sdata->u.mesh.mshstats.dropped_frames_no_route++;
676 }
677 
678 /**
679  * mesh_path_flush_pending - free the pending queue of a mesh path
680  *
681  * @mpath: mesh path whose queue has to be freed
682  *
683  * Locking: the function must me called within a rcu_read_lock region
684  */
685 void mesh_path_flush_pending(struct mesh_path *mpath)
686 {
687 	struct sk_buff *skb;
688 
689 	while ((skb = skb_dequeue(&mpath->frame_queue)) &&
690 			(mpath->flags & MESH_PATH_ACTIVE))
691 		mesh_path_discard_frame(skb, mpath->sdata);
692 }
693 
694 /**
695  * mesh_path_fix_nexthop - force a specific next hop for a mesh path
696  *
697  * @mpath: the mesh path to modify
698  * @next_hop: the next hop to force
699  *
700  * Locking: this function must be called holding mpath->state_lock
701  */
702 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
703 {
704 	spin_lock_bh(&mpath->state_lock);
705 	mesh_path_assign_nexthop(mpath, next_hop);
706 	mpath->sn = 0xffff;
707 	mpath->metric = 0;
708 	mpath->hop_count = 0;
709 	mpath->exp_time = 0;
710 	mpath->flags |= MESH_PATH_FIXED;
711 	mesh_path_activate(mpath);
712 	spin_unlock_bh(&mpath->state_lock);
713 	mesh_path_tx_pending(mpath);
714 }
715 
716 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
717 {
718 	struct mesh_path *mpath;
719 	struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
720 	mpath = node->mpath;
721 	hlist_del_rcu(p);
722 	if (free_leafs)
723 		kfree(mpath);
724 	kfree(node);
725 }
726 
727 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
728 {
729 	struct mesh_path *mpath;
730 	struct mpath_node *node, *new_node;
731 	u32 hash_idx;
732 
733 	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
734 	if (new_node == NULL)
735 		return -ENOMEM;
736 
737 	node = hlist_entry(p, struct mpath_node, list);
738 	mpath = node->mpath;
739 	new_node->mpath = mpath;
740 	hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
741 	hlist_add_head(&new_node->list,
742 			&newtbl->hash_buckets[hash_idx]);
743 	return 0;
744 }
745 
746 int mesh_pathtbl_init(void)
747 {
748 	mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
749 	if (!mesh_paths)
750 		return -ENOMEM;
751 	mesh_paths->free_node = &mesh_path_node_free;
752 	mesh_paths->copy_node = &mesh_path_node_copy;
753 	mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
754 
755 	mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
756 	if (!mpp_paths) {
757 		mesh_table_free(mesh_paths, true);
758 		return -ENOMEM;
759 	}
760 	mpp_paths->free_node = &mesh_path_node_free;
761 	mpp_paths->copy_node = &mesh_path_node_copy;
762 	mpp_paths->mean_chain_len = MEAN_CHAIN_LEN;
763 
764 	return 0;
765 }
766 
767 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
768 {
769 	struct mesh_path *mpath;
770 	struct mpath_node *node;
771 	struct hlist_node *p;
772 	int i;
773 
774 	read_lock_bh(&pathtbl_resize_lock);
775 	for_each_mesh_entry(mesh_paths, p, node, i) {
776 		if (node->mpath->sdata != sdata)
777 			continue;
778 		mpath = node->mpath;
779 		spin_lock_bh(&mpath->state_lock);
780 		if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
781 		    (!(mpath->flags & MESH_PATH_FIXED)) &&
782 			time_after(jiffies,
783 			 mpath->exp_time + MESH_PATH_EXPIRE)) {
784 			spin_unlock_bh(&mpath->state_lock);
785 			mesh_path_del(mpath->dst, mpath->sdata);
786 		} else
787 			spin_unlock_bh(&mpath->state_lock);
788 	}
789 	read_unlock_bh(&pathtbl_resize_lock);
790 }
791 
792 void mesh_pathtbl_unregister(void)
793 {
794 	mesh_table_free(mesh_paths, true);
795 	mesh_table_free(mpp_paths, true);
796 }
797