1 /* 2 * Copyright (c) 2008, 2009 open80211s Ltd. 3 * Author: Luis Carlos Cobo <luisca@cozybit.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 */ 9 10 #include <linux/etherdevice.h> 11 #include <linux/list.h> 12 #include <linux/random.h> 13 #include <linux/slab.h> 14 #include <linux/spinlock.h> 15 #include <linux/string.h> 16 #include <net/mac80211.h> 17 #include "wme.h" 18 #include "ieee80211_i.h" 19 #include "mesh.h" 20 21 #ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG 22 #define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args) 23 #else 24 #define mpath_dbg(fmt, args...) do { (void)(0); } while (0) 25 #endif 26 27 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */ 28 #define INIT_PATHS_SIZE_ORDER 2 29 30 /* Keep the mean chain length below this constant */ 31 #define MEAN_CHAIN_LEN 2 32 33 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \ 34 time_after(jiffies, mpath->exp_time) && \ 35 !(mpath->flags & MESH_PATH_FIXED)) 36 37 struct mpath_node { 38 struct hlist_node list; 39 struct rcu_head rcu; 40 /* This indirection allows two different tables to point to the same 41 * mesh_path structure, useful when resizing 42 */ 43 struct mesh_path *mpath; 44 }; 45 46 static struct mesh_table __rcu *mesh_paths; 47 static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */ 48 49 int mesh_paths_generation; 50 51 /* This lock will have the grow table function as writer and add / delete nodes 52 * as readers. RCU provides sufficient protection only when reading the table 53 * (i.e. doing lookups). Adding or adding or removing nodes requires we take 54 * the read lock or we risk operating on an old table. The write lock is only 55 * needed when modifying the number of buckets a table. 56 */ 57 static DEFINE_RWLOCK(pathtbl_resize_lock); 58 59 60 static inline struct mesh_table *resize_dereference_mesh_paths(void) 61 { 62 return rcu_dereference_protected(mesh_paths, 63 lockdep_is_held(&pathtbl_resize_lock)); 64 } 65 66 static inline struct mesh_table *resize_dereference_mpp_paths(void) 67 { 68 return rcu_dereference_protected(mpp_paths, 69 lockdep_is_held(&pathtbl_resize_lock)); 70 } 71 72 static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath); 73 74 /* 75 * CAREFUL -- "tbl" must not be an expression, 76 * in particular not an rcu_dereference(), since 77 * it's used twice. So it is illegal to do 78 * for_each_mesh_entry(rcu_dereference(...), ...) 79 */ 80 #define for_each_mesh_entry(tbl, p, node, i) \ 81 for (i = 0; i <= tbl->hash_mask; i++) \ 82 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list) 83 84 85 static struct mesh_table *mesh_table_alloc(int size_order) 86 { 87 int i; 88 struct mesh_table *newtbl; 89 90 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); 91 if (!newtbl) 92 return NULL; 93 94 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) * 95 (1 << size_order), GFP_ATOMIC); 96 97 if (!newtbl->hash_buckets) { 98 kfree(newtbl); 99 return NULL; 100 } 101 102 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) * 103 (1 << size_order), GFP_ATOMIC); 104 if (!newtbl->hashwlock) { 105 kfree(newtbl->hash_buckets); 106 kfree(newtbl); 107 return NULL; 108 } 109 110 newtbl->size_order = size_order; 111 newtbl->hash_mask = (1 << size_order) - 1; 112 atomic_set(&newtbl->entries, 0); 113 get_random_bytes(&newtbl->hash_rnd, 114 sizeof(newtbl->hash_rnd)); 115 for (i = 0; i <= newtbl->hash_mask; i++) 116 spin_lock_init(&newtbl->hashwlock[i]); 117 spin_lock_init(&newtbl->gates_lock); 118 119 return newtbl; 120 } 121 122 static void __mesh_table_free(struct mesh_table *tbl) 123 { 124 kfree(tbl->hash_buckets); 125 kfree(tbl->hashwlock); 126 kfree(tbl); 127 } 128 129 static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) 130 { 131 struct hlist_head *mesh_hash; 132 struct hlist_node *p, *q; 133 struct mpath_node *gate; 134 int i; 135 136 mesh_hash = tbl->hash_buckets; 137 for (i = 0; i <= tbl->hash_mask; i++) { 138 spin_lock_bh(&tbl->hashwlock[i]); 139 hlist_for_each_safe(p, q, &mesh_hash[i]) { 140 tbl->free_node(p, free_leafs); 141 atomic_dec(&tbl->entries); 142 } 143 spin_unlock_bh(&tbl->hashwlock[i]); 144 } 145 if (free_leafs) { 146 spin_lock_bh(&tbl->gates_lock); 147 hlist_for_each_entry_safe(gate, p, q, 148 tbl->known_gates, list) { 149 hlist_del(&gate->list); 150 kfree(gate); 151 } 152 kfree(tbl->known_gates); 153 spin_unlock_bh(&tbl->gates_lock); 154 } 155 156 __mesh_table_free(tbl); 157 } 158 159 static int mesh_table_grow(struct mesh_table *oldtbl, 160 struct mesh_table *newtbl) 161 { 162 struct hlist_head *oldhash; 163 struct hlist_node *p, *q; 164 int i; 165 166 if (atomic_read(&oldtbl->entries) 167 < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1)) 168 return -EAGAIN; 169 170 newtbl->free_node = oldtbl->free_node; 171 newtbl->mean_chain_len = oldtbl->mean_chain_len; 172 newtbl->copy_node = oldtbl->copy_node; 173 newtbl->known_gates = oldtbl->known_gates; 174 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries)); 175 176 oldhash = oldtbl->hash_buckets; 177 for (i = 0; i <= oldtbl->hash_mask; i++) 178 hlist_for_each(p, &oldhash[i]) 179 if (oldtbl->copy_node(p, newtbl) < 0) 180 goto errcopy; 181 182 return 0; 183 184 errcopy: 185 for (i = 0; i <= newtbl->hash_mask; i++) { 186 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) 187 oldtbl->free_node(p, 0); 188 } 189 return -ENOMEM; 190 } 191 192 static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, 193 struct mesh_table *tbl) 194 { 195 /* Use last four bytes of hw addr and interface index as hash index */ 196 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd) 197 & tbl->hash_mask; 198 } 199 200 201 /** 202 * 203 * mesh_path_assign_nexthop - update mesh path next hop 204 * 205 * @mpath: mesh path to update 206 * @sta: next hop to assign 207 * 208 * Locking: mpath->state_lock must be held when calling this function 209 */ 210 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) 211 { 212 struct sk_buff *skb; 213 struct ieee80211_hdr *hdr; 214 struct sk_buff_head tmpq; 215 unsigned long flags; 216 struct ieee80211_sub_if_data *sdata = mpath->sdata; 217 218 rcu_assign_pointer(mpath->next_hop, sta); 219 220 __skb_queue_head_init(&tmpq); 221 222 spin_lock_irqsave(&mpath->frame_queue.lock, flags); 223 224 while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) { 225 hdr = (struct ieee80211_hdr *) skb->data; 226 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); 227 skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb)); 228 ieee80211_set_qos_hdr(sdata, skb); 229 __skb_queue_tail(&tmpq, skb); 230 } 231 232 skb_queue_splice(&tmpq, &mpath->frame_queue); 233 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); 234 } 235 236 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, 237 struct mesh_path *gate_mpath) 238 { 239 struct ieee80211_hdr *hdr; 240 struct ieee80211s_hdr *mshdr; 241 int mesh_hdrlen, hdrlen; 242 char *next_hop; 243 244 hdr = (struct ieee80211_hdr *) skb->data; 245 hdrlen = ieee80211_hdrlen(hdr->frame_control); 246 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 247 248 if (!(mshdr->flags & MESH_FLAGS_AE)) { 249 /* size of the fixed part of the mesh header */ 250 mesh_hdrlen = 6; 251 252 /* make room for the two extended addresses */ 253 skb_push(skb, 2 * ETH_ALEN); 254 memmove(skb->data, hdr, hdrlen + mesh_hdrlen); 255 256 hdr = (struct ieee80211_hdr *) skb->data; 257 258 /* we preserve the previous mesh header and only add 259 * the new addreses */ 260 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 261 mshdr->flags = MESH_FLAGS_AE_A5_A6; 262 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN); 263 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN); 264 } 265 266 /* update next hop */ 267 hdr = (struct ieee80211_hdr *) skb->data; 268 rcu_read_lock(); 269 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr; 270 memcpy(hdr->addr1, next_hop, ETH_ALEN); 271 rcu_read_unlock(); 272 memcpy(hdr->addr3, dst_addr, ETH_ALEN); 273 } 274 275 /** 276 * 277 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another 278 * 279 * This function is used to transfer or copy frames from an unresolved mpath to 280 * a gate mpath. The function also adds the Address Extension field and 281 * updates the next hop. 282 * 283 * If a frame already has an Address Extension field, only the next hop and 284 * destination addresses are updated. 285 * 286 * The gate mpath must be an active mpath with a valid mpath->next_hop. 287 * 288 * @mpath: An active mpath the frames will be sent to (i.e. the gate) 289 * @from_mpath: The failed mpath 290 * @copy: When true, copy all the frames to the new mpath queue. When false, 291 * move them. 292 */ 293 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath, 294 struct mesh_path *from_mpath, 295 bool copy) 296 { 297 struct sk_buff *skb, *cp_skb = NULL; 298 struct sk_buff_head gateq, failq; 299 unsigned long flags; 300 int num_skbs; 301 302 BUG_ON(gate_mpath == from_mpath); 303 BUG_ON(!gate_mpath->next_hop); 304 305 __skb_queue_head_init(&gateq); 306 __skb_queue_head_init(&failq); 307 308 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); 309 skb_queue_splice_init(&from_mpath->frame_queue, &failq); 310 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); 311 312 num_skbs = skb_queue_len(&failq); 313 314 while (num_skbs--) { 315 skb = __skb_dequeue(&failq); 316 if (copy) { 317 cp_skb = skb_copy(skb, GFP_ATOMIC); 318 if (cp_skb) 319 __skb_queue_tail(&failq, cp_skb); 320 } 321 322 prepare_for_gate(skb, gate_mpath->dst, gate_mpath); 323 __skb_queue_tail(&gateq, skb); 324 } 325 326 spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags); 327 skb_queue_splice(&gateq, &gate_mpath->frame_queue); 328 mpath_dbg("Mpath queue for gate %pM has %d frames\n", 329 gate_mpath->dst, 330 skb_queue_len(&gate_mpath->frame_queue)); 331 spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags); 332 333 if (!copy) 334 return; 335 336 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); 337 skb_queue_splice(&failq, &from_mpath->frame_queue); 338 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); 339 } 340 341 342 static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst, 343 struct ieee80211_sub_if_data *sdata) 344 { 345 struct mesh_path *mpath; 346 struct hlist_node *n; 347 struct hlist_head *bucket; 348 struct mpath_node *node; 349 350 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; 351 hlist_for_each_entry_rcu(node, n, bucket, list) { 352 mpath = node->mpath; 353 if (mpath->sdata == sdata && 354 memcmp(dst, mpath->dst, ETH_ALEN) == 0) { 355 if (MPATH_EXPIRED(mpath)) { 356 spin_lock_bh(&mpath->state_lock); 357 mpath->flags &= ~MESH_PATH_ACTIVE; 358 spin_unlock_bh(&mpath->state_lock); 359 } 360 return mpath; 361 } 362 } 363 return NULL; 364 } 365 366 /** 367 * mesh_path_lookup - look up a path in the mesh path table 368 * @dst: hardware address (ETH_ALEN length) of destination 369 * @sdata: local subif 370 * 371 * Returns: pointer to the mesh path structure, or NULL if not found 372 * 373 * Locking: must be called within a read rcu section. 374 */ 375 struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) 376 { 377 return path_lookup(rcu_dereference(mesh_paths), dst, sdata); 378 } 379 380 struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) 381 { 382 return path_lookup(rcu_dereference(mpp_paths), dst, sdata); 383 } 384 385 386 /** 387 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index 388 * @idx: index 389 * @sdata: local subif, or NULL for all entries 390 * 391 * Returns: pointer to the mesh path structure, or NULL if not found. 392 * 393 * Locking: must be called within a read rcu section. 394 */ 395 struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) 396 { 397 struct mesh_table *tbl = rcu_dereference(mesh_paths); 398 struct mpath_node *node; 399 struct hlist_node *p; 400 int i; 401 int j = 0; 402 403 for_each_mesh_entry(tbl, p, node, i) { 404 if (sdata && node->mpath->sdata != sdata) 405 continue; 406 if (j++ == idx) { 407 if (MPATH_EXPIRED(node->mpath)) { 408 spin_lock_bh(&node->mpath->state_lock); 409 node->mpath->flags &= ~MESH_PATH_ACTIVE; 410 spin_unlock_bh(&node->mpath->state_lock); 411 } 412 return node->mpath; 413 } 414 } 415 416 return NULL; 417 } 418 419 static void mesh_gate_node_reclaim(struct rcu_head *rp) 420 { 421 struct mpath_node *node = container_of(rp, struct mpath_node, rcu); 422 kfree(node); 423 } 424 425 /** 426 * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates 427 * @mesh_tbl: table which contains known_gates list 428 * @mpath: mpath to known mesh gate 429 * 430 * Returns: 0 on success 431 * 432 */ 433 static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath) 434 { 435 struct mpath_node *gate, *new_gate; 436 struct hlist_node *n; 437 int err; 438 439 rcu_read_lock(); 440 tbl = rcu_dereference(tbl); 441 442 hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list) 443 if (gate->mpath == mpath) { 444 err = -EEXIST; 445 goto err_rcu; 446 } 447 448 new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC); 449 if (!new_gate) { 450 err = -ENOMEM; 451 goto err_rcu; 452 } 453 454 mpath->is_gate = true; 455 mpath->sdata->u.mesh.num_gates++; 456 new_gate->mpath = mpath; 457 spin_lock_bh(&tbl->gates_lock); 458 hlist_add_head_rcu(&new_gate->list, tbl->known_gates); 459 spin_unlock_bh(&tbl->gates_lock); 460 rcu_read_unlock(); 461 mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n", 462 mpath->sdata->name, mpath->dst, 463 mpath->sdata->u.mesh.num_gates); 464 return 0; 465 err_rcu: 466 rcu_read_unlock(); 467 return err; 468 } 469 470 /** 471 * mesh_gate_del - remove a mesh gate from the list of known gates 472 * @tbl: table which holds our list of known gates 473 * @mpath: gate mpath 474 * 475 * Returns: 0 on success 476 * 477 * Locking: must be called inside rcu_read_lock() section 478 */ 479 static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) 480 { 481 struct mpath_node *gate; 482 struct hlist_node *p, *q; 483 484 tbl = rcu_dereference(tbl); 485 486 hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list) 487 if (gate->mpath == mpath) { 488 spin_lock_bh(&tbl->gates_lock); 489 hlist_del_rcu(&gate->list); 490 call_rcu(&gate->rcu, mesh_gate_node_reclaim); 491 spin_unlock_bh(&tbl->gates_lock); 492 mpath->sdata->u.mesh.num_gates--; 493 mpath->is_gate = false; 494 mpath_dbg("Mesh path (%s): Deleted gate: %pM. " 495 "%d known gates\n", mpath->sdata->name, 496 mpath->dst, mpath->sdata->u.mesh.num_gates); 497 break; 498 } 499 500 return 0; 501 } 502 503 /** 504 * 505 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table 506 * @mpath: gate path to add to table 507 */ 508 int mesh_path_add_gate(struct mesh_path *mpath) 509 { 510 return mesh_gate_add(mesh_paths, mpath); 511 } 512 513 /** 514 * mesh_gate_num - number of gates known to this interface 515 * @sdata: subif data 516 */ 517 int mesh_gate_num(struct ieee80211_sub_if_data *sdata) 518 { 519 return sdata->u.mesh.num_gates; 520 } 521 522 /** 523 * mesh_path_add - allocate and add a new path to the mesh path table 524 * @addr: destination address of the path (ETH_ALEN length) 525 * @sdata: local subif 526 * 527 * Returns: 0 on success 528 * 529 * State: the initial state of the new path is set to 0 530 */ 531 int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) 532 { 533 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 534 struct ieee80211_local *local = sdata->local; 535 struct mesh_table *tbl; 536 struct mesh_path *mpath, *new_mpath; 537 struct mpath_node *node, *new_node; 538 struct hlist_head *bucket; 539 struct hlist_node *n; 540 int grow = 0; 541 int err = 0; 542 u32 hash_idx; 543 544 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) 545 /* never add ourselves as neighbours */ 546 return -ENOTSUPP; 547 548 if (is_multicast_ether_addr(dst)) 549 return -ENOTSUPP; 550 551 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) 552 return -ENOSPC; 553 554 err = -ENOMEM; 555 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); 556 if (!new_mpath) 557 goto err_path_alloc; 558 559 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); 560 if (!new_node) 561 goto err_node_alloc; 562 563 read_lock_bh(&pathtbl_resize_lock); 564 memcpy(new_mpath->dst, dst, ETH_ALEN); 565 new_mpath->sdata = sdata; 566 new_mpath->flags = 0; 567 skb_queue_head_init(&new_mpath->frame_queue); 568 new_node->mpath = new_mpath; 569 new_mpath->timer.data = (unsigned long) new_mpath; 570 new_mpath->timer.function = mesh_path_timer; 571 new_mpath->exp_time = jiffies; 572 spin_lock_init(&new_mpath->state_lock); 573 init_timer(&new_mpath->timer); 574 575 tbl = resize_dereference_mesh_paths(); 576 577 hash_idx = mesh_table_hash(dst, sdata, tbl); 578 bucket = &tbl->hash_buckets[hash_idx]; 579 580 spin_lock_bh(&tbl->hashwlock[hash_idx]); 581 582 err = -EEXIST; 583 hlist_for_each_entry(node, n, bucket, list) { 584 mpath = node->mpath; 585 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) 586 goto err_exists; 587 } 588 589 hlist_add_head_rcu(&new_node->list, bucket); 590 if (atomic_inc_return(&tbl->entries) >= 591 tbl->mean_chain_len * (tbl->hash_mask + 1)) 592 grow = 1; 593 594 mesh_paths_generation++; 595 596 spin_unlock_bh(&tbl->hashwlock[hash_idx]); 597 read_unlock_bh(&pathtbl_resize_lock); 598 if (grow) { 599 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); 600 ieee80211_queue_work(&local->hw, &sdata->work); 601 } 602 return 0; 603 604 err_exists: 605 spin_unlock_bh(&tbl->hashwlock[hash_idx]); 606 read_unlock_bh(&pathtbl_resize_lock); 607 kfree(new_node); 608 err_node_alloc: 609 kfree(new_mpath); 610 err_path_alloc: 611 atomic_dec(&sdata->u.mesh.mpaths); 612 return err; 613 } 614 615 static void mesh_table_free_rcu(struct rcu_head *rcu) 616 { 617 struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head); 618 619 mesh_table_free(tbl, false); 620 } 621 622 void mesh_mpath_table_grow(void) 623 { 624 struct mesh_table *oldtbl, *newtbl; 625 626 write_lock_bh(&pathtbl_resize_lock); 627 oldtbl = resize_dereference_mesh_paths(); 628 newtbl = mesh_table_alloc(oldtbl->size_order + 1); 629 if (!newtbl) 630 goto out; 631 if (mesh_table_grow(oldtbl, newtbl) < 0) { 632 __mesh_table_free(newtbl); 633 goto out; 634 } 635 rcu_assign_pointer(mesh_paths, newtbl); 636 637 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); 638 639 out: 640 write_unlock_bh(&pathtbl_resize_lock); 641 } 642 643 void mesh_mpp_table_grow(void) 644 { 645 struct mesh_table *oldtbl, *newtbl; 646 647 write_lock_bh(&pathtbl_resize_lock); 648 oldtbl = resize_dereference_mpp_paths(); 649 newtbl = mesh_table_alloc(oldtbl->size_order + 1); 650 if (!newtbl) 651 goto out; 652 if (mesh_table_grow(oldtbl, newtbl) < 0) { 653 __mesh_table_free(newtbl); 654 goto out; 655 } 656 rcu_assign_pointer(mpp_paths, newtbl); 657 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); 658 659 out: 660 write_unlock_bh(&pathtbl_resize_lock); 661 } 662 663 int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) 664 { 665 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 666 struct ieee80211_local *local = sdata->local; 667 struct mesh_table *tbl; 668 struct mesh_path *mpath, *new_mpath; 669 struct mpath_node *node, *new_node; 670 struct hlist_head *bucket; 671 struct hlist_node *n; 672 int grow = 0; 673 int err = 0; 674 u32 hash_idx; 675 676 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) 677 /* never add ourselves as neighbours */ 678 return -ENOTSUPP; 679 680 if (is_multicast_ether_addr(dst)) 681 return -ENOTSUPP; 682 683 err = -ENOMEM; 684 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); 685 if (!new_mpath) 686 goto err_path_alloc; 687 688 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); 689 if (!new_node) 690 goto err_node_alloc; 691 692 read_lock_bh(&pathtbl_resize_lock); 693 memcpy(new_mpath->dst, dst, ETH_ALEN); 694 memcpy(new_mpath->mpp, mpp, ETH_ALEN); 695 new_mpath->sdata = sdata; 696 new_mpath->flags = 0; 697 skb_queue_head_init(&new_mpath->frame_queue); 698 new_node->mpath = new_mpath; 699 init_timer(&new_mpath->timer); 700 new_mpath->exp_time = jiffies; 701 spin_lock_init(&new_mpath->state_lock); 702 703 tbl = resize_dereference_mpp_paths(); 704 705 hash_idx = mesh_table_hash(dst, sdata, tbl); 706 bucket = &tbl->hash_buckets[hash_idx]; 707 708 spin_lock_bh(&tbl->hashwlock[hash_idx]); 709 710 err = -EEXIST; 711 hlist_for_each_entry(node, n, bucket, list) { 712 mpath = node->mpath; 713 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) 714 goto err_exists; 715 } 716 717 hlist_add_head_rcu(&new_node->list, bucket); 718 if (atomic_inc_return(&tbl->entries) >= 719 tbl->mean_chain_len * (tbl->hash_mask + 1)) 720 grow = 1; 721 722 spin_unlock_bh(&tbl->hashwlock[hash_idx]); 723 read_unlock_bh(&pathtbl_resize_lock); 724 if (grow) { 725 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); 726 ieee80211_queue_work(&local->hw, &sdata->work); 727 } 728 return 0; 729 730 err_exists: 731 spin_unlock_bh(&tbl->hashwlock[hash_idx]); 732 read_unlock_bh(&pathtbl_resize_lock); 733 kfree(new_node); 734 err_node_alloc: 735 kfree(new_mpath); 736 err_path_alloc: 737 return err; 738 } 739 740 741 /** 742 * mesh_plink_broken - deactivates paths and sends perr when a link breaks 743 * 744 * @sta: broken peer link 745 * 746 * This function must be called from the rate control algorithm if enough 747 * delivery errors suggest that a peer link is no longer usable. 748 */ 749 void mesh_plink_broken(struct sta_info *sta) 750 { 751 struct mesh_table *tbl; 752 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 753 struct mesh_path *mpath; 754 struct mpath_node *node; 755 struct hlist_node *p; 756 struct ieee80211_sub_if_data *sdata = sta->sdata; 757 int i; 758 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE); 759 760 rcu_read_lock(); 761 tbl = rcu_dereference(mesh_paths); 762 for_each_mesh_entry(tbl, p, node, i) { 763 mpath = node->mpath; 764 if (rcu_dereference(mpath->next_hop) == sta && 765 mpath->flags & MESH_PATH_ACTIVE && 766 !(mpath->flags & MESH_PATH_FIXED)) { 767 spin_lock_bh(&mpath->state_lock); 768 mpath->flags &= ~MESH_PATH_ACTIVE; 769 ++mpath->sn; 770 spin_unlock_bh(&mpath->state_lock); 771 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, 772 mpath->dst, cpu_to_le32(mpath->sn), 773 reason, bcast, sdata); 774 } 775 } 776 rcu_read_unlock(); 777 } 778 779 static void mesh_path_node_reclaim(struct rcu_head *rp) 780 { 781 struct mpath_node *node = container_of(rp, struct mpath_node, rcu); 782 struct ieee80211_sub_if_data *sdata = node->mpath->sdata; 783 784 del_timer_sync(&node->mpath->timer); 785 atomic_dec(&sdata->u.mesh.mpaths); 786 kfree(node->mpath); 787 kfree(node); 788 } 789 790 /* needs to be called with the corresponding hashwlock taken */ 791 static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) 792 { 793 struct mesh_path *mpath; 794 mpath = node->mpath; 795 spin_lock(&mpath->state_lock); 796 mpath->flags |= MESH_PATH_RESOLVING; 797 if (mpath->is_gate) 798 mesh_gate_del(tbl, mpath); 799 hlist_del_rcu(&node->list); 800 call_rcu(&node->rcu, mesh_path_node_reclaim); 801 spin_unlock(&mpath->state_lock); 802 atomic_dec(&tbl->entries); 803 } 804 805 /** 806 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches 807 * 808 * @sta - mesh peer to match 809 * 810 * RCU notes: this function is called when a mesh plink transitions from 811 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that 812 * allows path creation. This will happen before the sta can be freed (because 813 * sta_info_destroy() calls this) so any reader in a rcu read block will be 814 * protected against the plink disappearing. 815 */ 816 void mesh_path_flush_by_nexthop(struct sta_info *sta) 817 { 818 struct mesh_table *tbl; 819 struct mesh_path *mpath; 820 struct mpath_node *node; 821 struct hlist_node *p; 822 int i; 823 824 rcu_read_lock(); 825 read_lock_bh(&pathtbl_resize_lock); 826 tbl = resize_dereference_mesh_paths(); 827 for_each_mesh_entry(tbl, p, node, i) { 828 mpath = node->mpath; 829 if (rcu_dereference(mpath->next_hop) == sta) { 830 spin_lock_bh(&tbl->hashwlock[i]); 831 __mesh_path_del(tbl, node); 832 spin_unlock_bh(&tbl->hashwlock[i]); 833 } 834 } 835 read_unlock_bh(&pathtbl_resize_lock); 836 rcu_read_unlock(); 837 } 838 839 static void table_flush_by_iface(struct mesh_table *tbl, 840 struct ieee80211_sub_if_data *sdata) 841 { 842 struct mesh_path *mpath; 843 struct mpath_node *node; 844 struct hlist_node *p; 845 int i; 846 847 WARN_ON(!rcu_read_lock_held()); 848 for_each_mesh_entry(tbl, p, node, i) { 849 mpath = node->mpath; 850 if (mpath->sdata != sdata) 851 continue; 852 spin_lock_bh(&tbl->hashwlock[i]); 853 __mesh_path_del(tbl, node); 854 spin_unlock_bh(&tbl->hashwlock[i]); 855 } 856 } 857 858 /** 859 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface 860 * 861 * This function deletes both mesh paths as well as mesh portal paths. 862 * 863 * @sdata - interface data to match 864 * 865 */ 866 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) 867 { 868 struct mesh_table *tbl; 869 870 rcu_read_lock(); 871 read_lock_bh(&pathtbl_resize_lock); 872 tbl = resize_dereference_mesh_paths(); 873 table_flush_by_iface(tbl, sdata); 874 tbl = resize_dereference_mpp_paths(); 875 table_flush_by_iface(tbl, sdata); 876 read_unlock_bh(&pathtbl_resize_lock); 877 rcu_read_unlock(); 878 } 879 880 /** 881 * mesh_path_del - delete a mesh path from the table 882 * 883 * @addr: dst address (ETH_ALEN length) 884 * @sdata: local subif 885 * 886 * Returns: 0 if successful 887 */ 888 int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) 889 { 890 struct mesh_table *tbl; 891 struct mesh_path *mpath; 892 struct mpath_node *node; 893 struct hlist_head *bucket; 894 struct hlist_node *n; 895 int hash_idx; 896 int err = 0; 897 898 read_lock_bh(&pathtbl_resize_lock); 899 tbl = resize_dereference_mesh_paths(); 900 hash_idx = mesh_table_hash(addr, sdata, tbl); 901 bucket = &tbl->hash_buckets[hash_idx]; 902 903 spin_lock_bh(&tbl->hashwlock[hash_idx]); 904 hlist_for_each_entry(node, n, bucket, list) { 905 mpath = node->mpath; 906 if (mpath->sdata == sdata && 907 memcmp(addr, mpath->dst, ETH_ALEN) == 0) { 908 __mesh_path_del(tbl, node); 909 goto enddel; 910 } 911 } 912 913 err = -ENXIO; 914 enddel: 915 mesh_paths_generation++; 916 spin_unlock_bh(&tbl->hashwlock[hash_idx]); 917 read_unlock_bh(&pathtbl_resize_lock); 918 return err; 919 } 920 921 /** 922 * mesh_path_tx_pending - sends pending frames in a mesh path queue 923 * 924 * @mpath: mesh path to activate 925 * 926 * Locking: the state_lock of the mpath structure must NOT be held when calling 927 * this function. 928 */ 929 void mesh_path_tx_pending(struct mesh_path *mpath) 930 { 931 if (mpath->flags & MESH_PATH_ACTIVE) 932 ieee80211_add_pending_skbs(mpath->sdata->local, 933 &mpath->frame_queue); 934 } 935 936 /** 937 * mesh_path_send_to_gates - sends pending frames to all known mesh gates 938 * 939 * @mpath: mesh path whose queue will be emptied 940 * 941 * If there is only one gate, the frames are transferred from the failed mpath 942 * queue to that gate's queue. If there are more than one gates, the frames 943 * are copied from each gate to the next. After frames are copied, the 944 * mpath queues are emptied onto the transmission queue. 945 */ 946 int mesh_path_send_to_gates(struct mesh_path *mpath) 947 { 948 struct ieee80211_sub_if_data *sdata = mpath->sdata; 949 struct hlist_node *n; 950 struct mesh_table *tbl; 951 struct mesh_path *from_mpath = mpath; 952 struct mpath_node *gate = NULL; 953 bool copy = false; 954 struct hlist_head *known_gates; 955 956 rcu_read_lock(); 957 tbl = rcu_dereference(mesh_paths); 958 known_gates = tbl->known_gates; 959 rcu_read_unlock(); 960 961 if (!known_gates) 962 return -EHOSTUNREACH; 963 964 hlist_for_each_entry_rcu(gate, n, known_gates, list) { 965 if (gate->mpath->sdata != sdata) 966 continue; 967 968 if (gate->mpath->flags & MESH_PATH_ACTIVE) { 969 mpath_dbg("Forwarding to %pM\n", gate->mpath->dst); 970 mesh_path_move_to_queue(gate->mpath, from_mpath, copy); 971 from_mpath = gate->mpath; 972 copy = true; 973 } else { 974 mpath_dbg("Not forwarding %p\n", gate->mpath); 975 mpath_dbg("flags %x\n", gate->mpath->flags); 976 } 977 } 978 979 hlist_for_each_entry_rcu(gate, n, known_gates, list) 980 if (gate->mpath->sdata == sdata) { 981 mpath_dbg("Sending to %pM\n", gate->mpath->dst); 982 mesh_path_tx_pending(gate->mpath); 983 } 984 985 return (from_mpath == mpath) ? -EHOSTUNREACH : 0; 986 } 987 988 /** 989 * mesh_path_discard_frame - discard a frame whose path could not be resolved 990 * 991 * @skb: frame to discard 992 * @sdata: network subif the frame was to be sent through 993 * 994 * If the frame was being forwarded from another MP, a PERR frame will be sent 995 * to the precursor. The precursor's address (i.e. the previous hop) was saved 996 * in addr1 of the frame-to-be-forwarded, and would only be overwritten once 997 * the destination is successfully resolved. 998 * 999 * Locking: the function must me called within a rcu_read_lock region 1000 */ 1001 void mesh_path_discard_frame(struct sk_buff *skb, 1002 struct ieee80211_sub_if_data *sdata) 1003 { 1004 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1005 struct mesh_path *mpath; 1006 u32 sn = 0; 1007 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD); 1008 1009 if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) { 1010 u8 *ra, *da; 1011 1012 da = hdr->addr3; 1013 ra = hdr->addr1; 1014 rcu_read_lock(); 1015 mpath = mesh_path_lookup(da, sdata); 1016 if (mpath) { 1017 spin_lock_bh(&mpath->state_lock); 1018 sn = ++mpath->sn; 1019 spin_unlock_bh(&mpath->state_lock); 1020 } 1021 rcu_read_unlock(); 1022 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data, 1023 cpu_to_le32(sn), reason, ra, sdata); 1024 } 1025 1026 kfree_skb(skb); 1027 sdata->u.mesh.mshstats.dropped_frames_no_route++; 1028 } 1029 1030 /** 1031 * mesh_path_flush_pending - free the pending queue of a mesh path 1032 * 1033 * @mpath: mesh path whose queue has to be freed 1034 * 1035 * Locking: the function must me called within a rcu_read_lock region 1036 */ 1037 void mesh_path_flush_pending(struct mesh_path *mpath) 1038 { 1039 struct sk_buff *skb; 1040 1041 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL) 1042 mesh_path_discard_frame(skb, mpath->sdata); 1043 } 1044 1045 /** 1046 * mesh_path_fix_nexthop - force a specific next hop for a mesh path 1047 * 1048 * @mpath: the mesh path to modify 1049 * @next_hop: the next hop to force 1050 * 1051 * Locking: this function must be called holding mpath->state_lock 1052 */ 1053 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) 1054 { 1055 spin_lock_bh(&mpath->state_lock); 1056 mesh_path_assign_nexthop(mpath, next_hop); 1057 mpath->sn = 0xffff; 1058 mpath->metric = 0; 1059 mpath->hop_count = 0; 1060 mpath->exp_time = 0; 1061 mpath->flags |= MESH_PATH_FIXED; 1062 mesh_path_activate(mpath); 1063 spin_unlock_bh(&mpath->state_lock); 1064 mesh_path_tx_pending(mpath); 1065 } 1066 1067 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs) 1068 { 1069 struct mesh_path *mpath; 1070 struct mpath_node *node = hlist_entry(p, struct mpath_node, list); 1071 mpath = node->mpath; 1072 hlist_del_rcu(p); 1073 if (free_leafs) { 1074 del_timer_sync(&mpath->timer); 1075 kfree(mpath); 1076 } 1077 kfree(node); 1078 } 1079 1080 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) 1081 { 1082 struct mesh_path *mpath; 1083 struct mpath_node *node, *new_node; 1084 u32 hash_idx; 1085 1086 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); 1087 if (new_node == NULL) 1088 return -ENOMEM; 1089 1090 node = hlist_entry(p, struct mpath_node, list); 1091 mpath = node->mpath; 1092 new_node->mpath = mpath; 1093 hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl); 1094 hlist_add_head(&new_node->list, 1095 &newtbl->hash_buckets[hash_idx]); 1096 return 0; 1097 } 1098 1099 int mesh_pathtbl_init(void) 1100 { 1101 struct mesh_table *tbl_path, *tbl_mpp; 1102 int ret; 1103 1104 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 1105 if (!tbl_path) 1106 return -ENOMEM; 1107 tbl_path->free_node = &mesh_path_node_free; 1108 tbl_path->copy_node = &mesh_path_node_copy; 1109 tbl_path->mean_chain_len = MEAN_CHAIN_LEN; 1110 tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); 1111 if (!tbl_path->known_gates) { 1112 ret = -ENOMEM; 1113 goto free_path; 1114 } 1115 INIT_HLIST_HEAD(tbl_path->known_gates); 1116 1117 1118 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 1119 if (!tbl_mpp) { 1120 ret = -ENOMEM; 1121 goto free_path; 1122 } 1123 tbl_mpp->free_node = &mesh_path_node_free; 1124 tbl_mpp->copy_node = &mesh_path_node_copy; 1125 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN; 1126 tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); 1127 if (!tbl_mpp->known_gates) { 1128 ret = -ENOMEM; 1129 goto free_mpp; 1130 } 1131 INIT_HLIST_HEAD(tbl_mpp->known_gates); 1132 1133 /* Need no locking since this is during init */ 1134 RCU_INIT_POINTER(mesh_paths, tbl_path); 1135 RCU_INIT_POINTER(mpp_paths, tbl_mpp); 1136 1137 return 0; 1138 1139 free_mpp: 1140 mesh_table_free(tbl_mpp, true); 1141 free_path: 1142 mesh_table_free(tbl_path, true); 1143 return ret; 1144 } 1145 1146 void mesh_path_expire(struct ieee80211_sub_if_data *sdata) 1147 { 1148 struct mesh_table *tbl; 1149 struct mesh_path *mpath; 1150 struct mpath_node *node; 1151 struct hlist_node *p; 1152 int i; 1153 1154 rcu_read_lock(); 1155 tbl = rcu_dereference(mesh_paths); 1156 for_each_mesh_entry(tbl, p, node, i) { 1157 if (node->mpath->sdata != sdata) 1158 continue; 1159 mpath = node->mpath; 1160 if ((!(mpath->flags & MESH_PATH_RESOLVING)) && 1161 (!(mpath->flags & MESH_PATH_FIXED)) && 1162 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) 1163 mesh_path_del(mpath->dst, mpath->sdata); 1164 } 1165 rcu_read_unlock(); 1166 } 1167 1168 void mesh_pathtbl_unregister(void) 1169 { 1170 /* no need for locking during exit path */ 1171 mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true); 1172 mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true); 1173 } 1174