1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2008, 2009 open80211s Ltd. 4 * Author: Luis Carlos Cobo <luisca@cozybit.com> 5 */ 6 7 #include <linux/etherdevice.h> 8 #include <linux/list.h> 9 #include <linux/random.h> 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/string.h> 13 #include <net/mac80211.h> 14 #include "wme.h" 15 #include "ieee80211_i.h" 16 #include "mesh.h" 17 18 static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath); 19 20 static u32 mesh_table_hash(const void *addr, u32 len, u32 seed) 21 { 22 /* Use last four bytes of hw addr as hash index */ 23 return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed); 24 } 25 26 static const struct rhashtable_params mesh_rht_params = { 27 .nelem_hint = 2, 28 .automatic_shrinking = true, 29 .key_len = ETH_ALEN, 30 .key_offset = offsetof(struct mesh_path, dst), 31 .head_offset = offsetof(struct mesh_path, rhash), 32 .hashfn = mesh_table_hash, 33 }; 34 35 static inline bool mpath_expired(struct mesh_path *mpath) 36 { 37 return (mpath->flags & MESH_PATH_ACTIVE) && 38 time_after(jiffies, mpath->exp_time) && 39 !(mpath->flags & MESH_PATH_FIXED); 40 } 41 42 static void mesh_path_rht_free(void *ptr, void *tblptr) 43 { 44 struct mesh_path *mpath = ptr; 45 struct mesh_table *tbl = tblptr; 46 47 mesh_path_free_rcu(tbl, mpath); 48 } 49 50 static struct mesh_table *mesh_table_alloc(void) 51 { 52 struct mesh_table *newtbl; 53 54 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); 55 if (!newtbl) 56 return NULL; 57 58 INIT_HLIST_HEAD(&newtbl->known_gates); 59 INIT_HLIST_HEAD(&newtbl->walk_head); 60 atomic_set(&newtbl->entries, 0); 61 spin_lock_init(&newtbl->gates_lock); 62 spin_lock_init(&newtbl->walk_lock); 63 64 return newtbl; 65 } 66 67 static void mesh_table_free(struct mesh_table *tbl) 68 { 69 rhashtable_free_and_destroy(&tbl->rhead, 70 mesh_path_rht_free, tbl); 71 kfree(tbl); 72 } 73 74 /** 75 * 76 * mesh_path_assign_nexthop - update mesh path next hop 77 * 78 * @mpath: mesh path to update 79 * @sta: next hop to assign 80 * 81 * Locking: mpath->state_lock must be held when calling this function 82 */ 83 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) 84 { 85 struct sk_buff *skb; 86 struct ieee80211_hdr *hdr; 87 unsigned long flags; 88 89 rcu_assign_pointer(mpath->next_hop, sta); 90 91 spin_lock_irqsave(&mpath->frame_queue.lock, flags); 92 skb_queue_walk(&mpath->frame_queue, skb) { 93 hdr = (struct ieee80211_hdr *) skb->data; 94 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); 95 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN); 96 ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr); 97 } 98 99 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); 100 } 101 102 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, 103 struct mesh_path *gate_mpath) 104 { 105 struct ieee80211_hdr *hdr; 106 struct ieee80211s_hdr *mshdr; 107 int mesh_hdrlen, hdrlen; 108 char *next_hop; 109 110 hdr = (struct ieee80211_hdr *) skb->data; 111 hdrlen = ieee80211_hdrlen(hdr->frame_control); 112 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 113 114 if (!(mshdr->flags & MESH_FLAGS_AE)) { 115 /* size of the fixed part of the mesh header */ 116 mesh_hdrlen = 6; 117 118 /* make room for the two extended addresses */ 119 skb_push(skb, 2 * ETH_ALEN); 120 memmove(skb->data, hdr, hdrlen + mesh_hdrlen); 121 122 hdr = (struct ieee80211_hdr *) skb->data; 123 124 /* we preserve the previous mesh header and only add 125 * the new addreses */ 126 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 127 mshdr->flags = MESH_FLAGS_AE_A5_A6; 128 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN); 129 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN); 130 } 131 132 /* update next hop */ 133 hdr = (struct ieee80211_hdr *) skb->data; 134 rcu_read_lock(); 135 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr; 136 memcpy(hdr->addr1, next_hop, ETH_ALEN); 137 rcu_read_unlock(); 138 memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN); 139 memcpy(hdr->addr3, dst_addr, ETH_ALEN); 140 } 141 142 /** 143 * 144 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another 145 * 146 * This function is used to transfer or copy frames from an unresolved mpath to 147 * a gate mpath. The function also adds the Address Extension field and 148 * updates the next hop. 149 * 150 * If a frame already has an Address Extension field, only the next hop and 151 * destination addresses are updated. 152 * 153 * The gate mpath must be an active mpath with a valid mpath->next_hop. 154 * 155 * @mpath: An active mpath the frames will be sent to (i.e. the gate) 156 * @from_mpath: The failed mpath 157 * @copy: When true, copy all the frames to the new mpath queue. When false, 158 * move them. 159 */ 160 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath, 161 struct mesh_path *from_mpath, 162 bool copy) 163 { 164 struct sk_buff *skb, *fskb, *tmp; 165 struct sk_buff_head failq; 166 unsigned long flags; 167 168 if (WARN_ON(gate_mpath == from_mpath)) 169 return; 170 if (WARN_ON(!gate_mpath->next_hop)) 171 return; 172 173 __skb_queue_head_init(&failq); 174 175 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); 176 skb_queue_splice_init(&from_mpath->frame_queue, &failq); 177 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); 178 179 skb_queue_walk_safe(&failq, fskb, tmp) { 180 if (skb_queue_len(&gate_mpath->frame_queue) >= 181 MESH_FRAME_QUEUE_LEN) { 182 mpath_dbg(gate_mpath->sdata, "mpath queue full!\n"); 183 break; 184 } 185 186 skb = skb_copy(fskb, GFP_ATOMIC); 187 if (WARN_ON(!skb)) 188 break; 189 190 prepare_for_gate(skb, gate_mpath->dst, gate_mpath); 191 skb_queue_tail(&gate_mpath->frame_queue, skb); 192 193 if (copy) 194 continue; 195 196 __skb_unlink(fskb, &failq); 197 kfree_skb(fskb); 198 } 199 200 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n", 201 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue)); 202 203 if (!copy) 204 return; 205 206 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); 207 skb_queue_splice(&failq, &from_mpath->frame_queue); 208 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); 209 } 210 211 212 static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, 213 struct ieee80211_sub_if_data *sdata) 214 { 215 struct mesh_path *mpath; 216 217 mpath = rhashtable_lookup(&tbl->rhead, dst, mesh_rht_params); 218 219 if (mpath && mpath_expired(mpath)) { 220 spin_lock_bh(&mpath->state_lock); 221 mpath->flags &= ~MESH_PATH_ACTIVE; 222 spin_unlock_bh(&mpath->state_lock); 223 } 224 return mpath; 225 } 226 227 /** 228 * mesh_path_lookup - look up a path in the mesh path table 229 * @sdata: local subif 230 * @dst: hardware address (ETH_ALEN length) of destination 231 * 232 * Returns: pointer to the mesh path structure, or NULL if not found 233 * 234 * Locking: must be called within a read rcu section. 235 */ 236 struct mesh_path * 237 mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) 238 { 239 return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata); 240 } 241 242 struct mesh_path * 243 mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) 244 { 245 return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata); 246 } 247 248 static struct mesh_path * 249 __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) 250 { 251 int i = 0; 252 struct mesh_path *mpath; 253 254 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { 255 if (i++ == idx) 256 break; 257 } 258 259 if (!mpath) 260 return NULL; 261 262 if (mpath_expired(mpath)) { 263 spin_lock_bh(&mpath->state_lock); 264 mpath->flags &= ~MESH_PATH_ACTIVE; 265 spin_unlock_bh(&mpath->state_lock); 266 } 267 return mpath; 268 } 269 270 /** 271 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index 272 * @idx: index 273 * @sdata: local subif, or NULL for all entries 274 * 275 * Returns: pointer to the mesh path structure, or NULL if not found. 276 * 277 * Locking: must be called within a read rcu section. 278 */ 279 struct mesh_path * 280 mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) 281 { 282 return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx); 283 } 284 285 /** 286 * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index 287 * @idx: index 288 * @sdata: local subif, or NULL for all entries 289 * 290 * Returns: pointer to the proxy path structure, or NULL if not found. 291 * 292 * Locking: must be called within a read rcu section. 293 */ 294 struct mesh_path * 295 mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) 296 { 297 return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx); 298 } 299 300 /** 301 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table 302 * @mpath: gate path to add to table 303 */ 304 int mesh_path_add_gate(struct mesh_path *mpath) 305 { 306 struct mesh_table *tbl; 307 int err; 308 309 rcu_read_lock(); 310 tbl = mpath->sdata->u.mesh.mesh_paths; 311 312 spin_lock_bh(&mpath->state_lock); 313 if (mpath->is_gate) { 314 err = -EEXIST; 315 spin_unlock_bh(&mpath->state_lock); 316 goto err_rcu; 317 } 318 mpath->is_gate = true; 319 mpath->sdata->u.mesh.num_gates++; 320 321 spin_lock(&tbl->gates_lock); 322 hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates); 323 spin_unlock(&tbl->gates_lock); 324 325 spin_unlock_bh(&mpath->state_lock); 326 327 mpath_dbg(mpath->sdata, 328 "Mesh path: Recorded new gate: %pM. %d known gates\n", 329 mpath->dst, mpath->sdata->u.mesh.num_gates); 330 err = 0; 331 err_rcu: 332 rcu_read_unlock(); 333 return err; 334 } 335 336 /** 337 * mesh_gate_del - remove a mesh gate from the list of known gates 338 * @tbl: table which holds our list of known gates 339 * @mpath: gate mpath 340 */ 341 static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) 342 { 343 lockdep_assert_held(&mpath->state_lock); 344 if (!mpath->is_gate) 345 return; 346 347 mpath->is_gate = false; 348 spin_lock_bh(&tbl->gates_lock); 349 hlist_del_rcu(&mpath->gate_list); 350 mpath->sdata->u.mesh.num_gates--; 351 spin_unlock_bh(&tbl->gates_lock); 352 353 mpath_dbg(mpath->sdata, 354 "Mesh path: Deleted gate: %pM. %d known gates\n", 355 mpath->dst, mpath->sdata->u.mesh.num_gates); 356 } 357 358 /** 359 * mesh_gate_num - number of gates known to this interface 360 * @sdata: subif data 361 */ 362 int mesh_gate_num(struct ieee80211_sub_if_data *sdata) 363 { 364 return sdata->u.mesh.num_gates; 365 } 366 367 static 368 struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata, 369 const u8 *dst, gfp_t gfp_flags) 370 { 371 struct mesh_path *new_mpath; 372 373 new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags); 374 if (!new_mpath) 375 return NULL; 376 377 memcpy(new_mpath->dst, dst, ETH_ALEN); 378 eth_broadcast_addr(new_mpath->rann_snd_addr); 379 new_mpath->is_root = false; 380 new_mpath->sdata = sdata; 381 new_mpath->flags = 0; 382 skb_queue_head_init(&new_mpath->frame_queue); 383 new_mpath->exp_time = jiffies; 384 spin_lock_init(&new_mpath->state_lock); 385 timer_setup(&new_mpath->timer, mesh_path_timer, 0); 386 387 return new_mpath; 388 } 389 390 /** 391 * mesh_path_add - allocate and add a new path to the mesh path table 392 * @dst: destination address of the path (ETH_ALEN length) 393 * @sdata: local subif 394 * 395 * Returns: 0 on success 396 * 397 * State: the initial state of the new path is set to 0 398 */ 399 struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, 400 const u8 *dst) 401 { 402 struct mesh_table *tbl; 403 struct mesh_path *mpath, *new_mpath; 404 405 if (ether_addr_equal(dst, sdata->vif.addr)) 406 /* never add ourselves as neighbours */ 407 return ERR_PTR(-ENOTSUPP); 408 409 if (is_multicast_ether_addr(dst)) 410 return ERR_PTR(-ENOTSUPP); 411 412 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) 413 return ERR_PTR(-ENOSPC); 414 415 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); 416 if (!new_mpath) 417 return ERR_PTR(-ENOMEM); 418 419 tbl = sdata->u.mesh.mesh_paths; 420 spin_lock_bh(&tbl->walk_lock); 421 mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead, 422 &new_mpath->rhash, 423 mesh_rht_params); 424 if (!mpath) 425 hlist_add_head(&new_mpath->walk_list, &tbl->walk_head); 426 spin_unlock_bh(&tbl->walk_lock); 427 428 if (mpath) { 429 kfree(new_mpath); 430 431 if (IS_ERR(mpath)) 432 return mpath; 433 434 new_mpath = mpath; 435 } 436 437 sdata->u.mesh.mesh_paths_generation++; 438 return new_mpath; 439 } 440 441 int mpp_path_add(struct ieee80211_sub_if_data *sdata, 442 const u8 *dst, const u8 *mpp) 443 { 444 struct mesh_table *tbl; 445 struct mesh_path *new_mpath; 446 int ret; 447 448 if (ether_addr_equal(dst, sdata->vif.addr)) 449 /* never add ourselves as neighbours */ 450 return -ENOTSUPP; 451 452 if (is_multicast_ether_addr(dst)) 453 return -ENOTSUPP; 454 455 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); 456 457 if (!new_mpath) 458 return -ENOMEM; 459 460 memcpy(new_mpath->mpp, mpp, ETH_ALEN); 461 tbl = sdata->u.mesh.mpp_paths; 462 463 spin_lock_bh(&tbl->walk_lock); 464 ret = rhashtable_lookup_insert_fast(&tbl->rhead, 465 &new_mpath->rhash, 466 mesh_rht_params); 467 if (!ret) 468 hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head); 469 spin_unlock_bh(&tbl->walk_lock); 470 471 if (ret) 472 kfree(new_mpath); 473 474 sdata->u.mesh.mpp_paths_generation++; 475 return ret; 476 } 477 478 479 /** 480 * mesh_plink_broken - deactivates paths and sends perr when a link breaks 481 * 482 * @sta: broken peer link 483 * 484 * This function must be called from the rate control algorithm if enough 485 * delivery errors suggest that a peer link is no longer usable. 486 */ 487 void mesh_plink_broken(struct sta_info *sta) 488 { 489 struct ieee80211_sub_if_data *sdata = sta->sdata; 490 struct mesh_table *tbl = sdata->u.mesh.mesh_paths; 491 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 492 struct mesh_path *mpath; 493 494 rcu_read_lock(); 495 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { 496 if (rcu_access_pointer(mpath->next_hop) == sta && 497 mpath->flags & MESH_PATH_ACTIVE && 498 !(mpath->flags & MESH_PATH_FIXED)) { 499 spin_lock_bh(&mpath->state_lock); 500 mpath->flags &= ~MESH_PATH_ACTIVE; 501 ++mpath->sn; 502 spin_unlock_bh(&mpath->state_lock); 503 mesh_path_error_tx(sdata, 504 sdata->u.mesh.mshcfg.element_ttl, 505 mpath->dst, mpath->sn, 506 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); 507 } 508 } 509 rcu_read_unlock(); 510 } 511 512 static void mesh_path_free_rcu(struct mesh_table *tbl, 513 struct mesh_path *mpath) 514 { 515 struct ieee80211_sub_if_data *sdata = mpath->sdata; 516 517 spin_lock_bh(&mpath->state_lock); 518 mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED; 519 mesh_gate_del(tbl, mpath); 520 spin_unlock_bh(&mpath->state_lock); 521 del_timer_sync(&mpath->timer); 522 atomic_dec(&sdata->u.mesh.mpaths); 523 atomic_dec(&tbl->entries); 524 mesh_path_flush_pending(mpath); 525 kfree_rcu(mpath, rcu); 526 } 527 528 static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) 529 { 530 hlist_del_rcu(&mpath->walk_list); 531 rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); 532 mesh_path_free_rcu(tbl, mpath); 533 } 534 535 /** 536 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches 537 * 538 * @sta: mesh peer to match 539 * 540 * RCU notes: this function is called when a mesh plink transitions from 541 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that 542 * allows path creation. This will happen before the sta can be freed (because 543 * sta_info_destroy() calls this) so any reader in a rcu read block will be 544 * protected against the plink disappearing. 545 */ 546 void mesh_path_flush_by_nexthop(struct sta_info *sta) 547 { 548 struct ieee80211_sub_if_data *sdata = sta->sdata; 549 struct mesh_table *tbl = sdata->u.mesh.mesh_paths; 550 struct mesh_path *mpath; 551 struct hlist_node *n; 552 553 spin_lock_bh(&tbl->walk_lock); 554 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { 555 if (rcu_access_pointer(mpath->next_hop) == sta) 556 __mesh_path_del(tbl, mpath); 557 } 558 spin_unlock_bh(&tbl->walk_lock); 559 } 560 561 static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, 562 const u8 *proxy) 563 { 564 struct mesh_table *tbl = sdata->u.mesh.mpp_paths; 565 struct mesh_path *mpath; 566 struct hlist_node *n; 567 568 spin_lock_bh(&tbl->walk_lock); 569 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { 570 if (ether_addr_equal(mpath->mpp, proxy)) 571 __mesh_path_del(tbl, mpath); 572 } 573 spin_unlock_bh(&tbl->walk_lock); 574 } 575 576 static void table_flush_by_iface(struct mesh_table *tbl) 577 { 578 struct mesh_path *mpath; 579 struct hlist_node *n; 580 581 spin_lock_bh(&tbl->walk_lock); 582 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { 583 __mesh_path_del(tbl, mpath); 584 } 585 spin_unlock_bh(&tbl->walk_lock); 586 } 587 588 /** 589 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface 590 * 591 * This function deletes both mesh paths as well as mesh portal paths. 592 * 593 * @sdata: interface data to match 594 * 595 */ 596 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) 597 { 598 table_flush_by_iface(sdata->u.mesh.mesh_paths); 599 table_flush_by_iface(sdata->u.mesh.mpp_paths); 600 } 601 602 /** 603 * table_path_del - delete a path from the mesh or mpp table 604 * 605 * @tbl: mesh or mpp path table 606 * @sdata: local subif 607 * @addr: dst address (ETH_ALEN length) 608 * 609 * Returns: 0 if successful 610 */ 611 static int table_path_del(struct mesh_table *tbl, 612 struct ieee80211_sub_if_data *sdata, 613 const u8 *addr) 614 { 615 struct mesh_path *mpath; 616 617 spin_lock_bh(&tbl->walk_lock); 618 mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); 619 if (!mpath) { 620 spin_unlock_bh(&tbl->walk_lock); 621 return -ENXIO; 622 } 623 624 __mesh_path_del(tbl, mpath); 625 spin_unlock_bh(&tbl->walk_lock); 626 return 0; 627 } 628 629 630 /** 631 * mesh_path_del - delete a mesh path from the table 632 * 633 * @addr: dst address (ETH_ALEN length) 634 * @sdata: local subif 635 * 636 * Returns: 0 if successful 637 */ 638 int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) 639 { 640 int err; 641 642 /* flush relevant mpp entries first */ 643 mpp_flush_by_proxy(sdata, addr); 644 645 err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr); 646 sdata->u.mesh.mesh_paths_generation++; 647 return err; 648 } 649 650 /** 651 * mesh_path_tx_pending - sends pending frames in a mesh path queue 652 * 653 * @mpath: mesh path to activate 654 * 655 * Locking: the state_lock of the mpath structure must NOT be held when calling 656 * this function. 657 */ 658 void mesh_path_tx_pending(struct mesh_path *mpath) 659 { 660 if (mpath->flags & MESH_PATH_ACTIVE) 661 ieee80211_add_pending_skbs(mpath->sdata->local, 662 &mpath->frame_queue); 663 } 664 665 /** 666 * mesh_path_send_to_gates - sends pending frames to all known mesh gates 667 * 668 * @mpath: mesh path whose queue will be emptied 669 * 670 * If there is only one gate, the frames are transferred from the failed mpath 671 * queue to that gate's queue. If there are more than one gates, the frames 672 * are copied from each gate to the next. After frames are copied, the 673 * mpath queues are emptied onto the transmission queue. 674 */ 675 int mesh_path_send_to_gates(struct mesh_path *mpath) 676 { 677 struct ieee80211_sub_if_data *sdata = mpath->sdata; 678 struct mesh_table *tbl; 679 struct mesh_path *from_mpath = mpath; 680 struct mesh_path *gate; 681 bool copy = false; 682 683 tbl = sdata->u.mesh.mesh_paths; 684 685 rcu_read_lock(); 686 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { 687 if (gate->flags & MESH_PATH_ACTIVE) { 688 mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst); 689 mesh_path_move_to_queue(gate, from_mpath, copy); 690 from_mpath = gate; 691 copy = true; 692 } else { 693 mpath_dbg(sdata, 694 "Not forwarding to %pM (flags %#x)\n", 695 gate->dst, gate->flags); 696 } 697 } 698 699 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { 700 mpath_dbg(sdata, "Sending to %pM\n", gate->dst); 701 mesh_path_tx_pending(gate); 702 } 703 rcu_read_unlock(); 704 705 return (from_mpath == mpath) ? -EHOSTUNREACH : 0; 706 } 707 708 /** 709 * mesh_path_discard_frame - discard a frame whose path could not be resolved 710 * 711 * @skb: frame to discard 712 * @sdata: network subif the frame was to be sent through 713 * 714 * Locking: the function must me called within a rcu_read_lock region 715 */ 716 void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, 717 struct sk_buff *skb) 718 { 719 kfree_skb(skb); 720 sdata->u.mesh.mshstats.dropped_frames_no_route++; 721 } 722 723 /** 724 * mesh_path_flush_pending - free the pending queue of a mesh path 725 * 726 * @mpath: mesh path whose queue has to be freed 727 * 728 * Locking: the function must me called within a rcu_read_lock region 729 */ 730 void mesh_path_flush_pending(struct mesh_path *mpath) 731 { 732 struct sk_buff *skb; 733 734 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL) 735 mesh_path_discard_frame(mpath->sdata, skb); 736 } 737 738 /** 739 * mesh_path_fix_nexthop - force a specific next hop for a mesh path 740 * 741 * @mpath: the mesh path to modify 742 * @next_hop: the next hop to force 743 * 744 * Locking: this function must be called holding mpath->state_lock 745 */ 746 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) 747 { 748 spin_lock_bh(&mpath->state_lock); 749 mesh_path_assign_nexthop(mpath, next_hop); 750 mpath->sn = 0xffff; 751 mpath->metric = 0; 752 mpath->hop_count = 0; 753 mpath->exp_time = 0; 754 mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID; 755 mesh_path_activate(mpath); 756 spin_unlock_bh(&mpath->state_lock); 757 ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg); 758 /* init it at a low value - 0 start is tricky */ 759 ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1); 760 mesh_path_tx_pending(mpath); 761 } 762 763 int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) 764 { 765 struct mesh_table *tbl_path, *tbl_mpp; 766 int ret; 767 768 tbl_path = mesh_table_alloc(); 769 if (!tbl_path) 770 return -ENOMEM; 771 772 tbl_mpp = mesh_table_alloc(); 773 if (!tbl_mpp) { 774 ret = -ENOMEM; 775 goto free_path; 776 } 777 778 rhashtable_init(&tbl_path->rhead, &mesh_rht_params); 779 rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params); 780 781 sdata->u.mesh.mesh_paths = tbl_path; 782 sdata->u.mesh.mpp_paths = tbl_mpp; 783 784 return 0; 785 786 free_path: 787 mesh_table_free(tbl_path); 788 return ret; 789 } 790 791 static 792 void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, 793 struct mesh_table *tbl) 794 { 795 struct mesh_path *mpath; 796 struct hlist_node *n; 797 798 spin_lock_bh(&tbl->walk_lock); 799 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { 800 if ((!(mpath->flags & MESH_PATH_RESOLVING)) && 801 (!(mpath->flags & MESH_PATH_FIXED)) && 802 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) 803 __mesh_path_del(tbl, mpath); 804 } 805 spin_unlock_bh(&tbl->walk_lock); 806 } 807 808 void mesh_path_expire(struct ieee80211_sub_if_data *sdata) 809 { 810 mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths); 811 mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths); 812 } 813 814 void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata) 815 { 816 mesh_table_free(sdata->u.mesh.mesh_paths); 817 mesh_table_free(sdata->u.mesh.mpp_paths); 818 } 819