1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2008, 2009 open80211s Ltd. 4 * Author: Luis Carlos Cobo <luisca@cozybit.com> 5 */ 6 7 #include <linux/etherdevice.h> 8 #include <linux/list.h> 9 #include <linux/random.h> 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/string.h> 13 #include <net/mac80211.h> 14 #include "wme.h" 15 #include "ieee80211_i.h" 16 #include "mesh.h" 17 #include <linux/rhashtable.h> 18 19 static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath); 20 21 static u32 mesh_table_hash(const void *addr, u32 len, u32 seed) 22 { 23 /* Use last four bytes of hw addr as hash index */ 24 return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed); 25 } 26 27 static const struct rhashtable_params mesh_rht_params = { 28 .nelem_hint = 2, 29 .automatic_shrinking = true, 30 .key_len = ETH_ALEN, 31 .key_offset = offsetof(struct mesh_path, dst), 32 .head_offset = offsetof(struct mesh_path, rhash), 33 .hashfn = mesh_table_hash, 34 }; 35 36 static const struct rhashtable_params fast_tx_rht_params = { 37 .nelem_hint = 10, 38 .automatic_shrinking = true, 39 .key_len = ETH_ALEN, 40 .key_offset = offsetof(struct ieee80211_mesh_fast_tx, addr_key), 41 .head_offset = offsetof(struct ieee80211_mesh_fast_tx, rhash), 42 .hashfn = mesh_table_hash, 43 }; 44 45 static void __mesh_fast_tx_entry_free(void *ptr, void *tblptr) 46 { 47 struct ieee80211_mesh_fast_tx *entry = ptr; 48 49 kfree_rcu(entry, fast_tx.rcu_head); 50 } 51 52 static void mesh_fast_tx_deinit(struct ieee80211_sub_if_data *sdata) 53 { 54 struct mesh_tx_cache *cache; 55 56 cache = &sdata->u.mesh.tx_cache; 57 rhashtable_free_and_destroy(&cache->rht, 58 __mesh_fast_tx_entry_free, NULL); 59 } 60 61 static void mesh_fast_tx_init(struct ieee80211_sub_if_data *sdata) 62 { 63 struct mesh_tx_cache *cache; 64 65 cache = &sdata->u.mesh.tx_cache; 66 rhashtable_init(&cache->rht, &fast_tx_rht_params); 67 INIT_HLIST_HEAD(&cache->walk_head); 68 spin_lock_init(&cache->walk_lock); 69 } 70 71 static inline bool mpath_expired(struct mesh_path *mpath) 72 { 73 return (mpath->flags & MESH_PATH_ACTIVE) && 74 time_after(jiffies, mpath->exp_time) && 75 !(mpath->flags & MESH_PATH_FIXED); 76 } 77 78 static void mesh_path_rht_free(void *ptr, void *tblptr) 79 { 80 struct mesh_path *mpath = ptr; 81 struct mesh_table *tbl = tblptr; 82 83 mesh_path_free_rcu(tbl, mpath); 84 } 85 86 static void mesh_table_init(struct mesh_table *tbl) 87 { 88 INIT_HLIST_HEAD(&tbl->known_gates); 89 INIT_HLIST_HEAD(&tbl->walk_head); 90 atomic_set(&tbl->entries, 0); 91 spin_lock_init(&tbl->gates_lock); 92 spin_lock_init(&tbl->walk_lock); 93 94 /* rhashtable_init() may fail only in case of wrong 95 * mesh_rht_params 96 */ 97 WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params)); 98 } 99 100 static void mesh_table_free(struct mesh_table *tbl) 101 { 102 rhashtable_free_and_destroy(&tbl->rhead, 103 mesh_path_rht_free, tbl); 104 } 105 106 /** 107 * mesh_path_assign_nexthop - update mesh path next hop 108 * 109 * @mpath: mesh path to update 110 * @sta: next hop to assign 111 * 112 * Locking: mpath->state_lock must be held when calling this function 113 */ 114 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) 115 { 116 struct sk_buff *skb; 117 struct ieee80211_hdr *hdr; 118 unsigned long flags; 119 120 rcu_assign_pointer(mpath->next_hop, sta); 121 122 spin_lock_irqsave(&mpath->frame_queue.lock, flags); 123 skb_queue_walk(&mpath->frame_queue, skb) { 124 hdr = (struct ieee80211_hdr *) skb->data; 125 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); 126 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN); 127 ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr); 128 } 129 130 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); 131 } 132 133 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, 134 struct mesh_path *gate_mpath) 135 { 136 struct ieee80211_hdr *hdr; 137 struct ieee80211s_hdr *mshdr; 138 int mesh_hdrlen, hdrlen; 139 char *next_hop; 140 141 hdr = (struct ieee80211_hdr *) skb->data; 142 hdrlen = ieee80211_hdrlen(hdr->frame_control); 143 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 144 145 if (!(mshdr->flags & MESH_FLAGS_AE)) { 146 /* size of the fixed part of the mesh header */ 147 mesh_hdrlen = 6; 148 149 /* make room for the two extended addresses */ 150 skb_push(skb, 2 * ETH_ALEN); 151 memmove(skb->data, hdr, hdrlen + mesh_hdrlen); 152 153 hdr = (struct ieee80211_hdr *) skb->data; 154 155 /* we preserve the previous mesh header and only add 156 * the new addresses */ 157 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 158 mshdr->flags = MESH_FLAGS_AE_A5_A6; 159 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN); 160 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN); 161 } 162 163 /* update next hop */ 164 hdr = (struct ieee80211_hdr *) skb->data; 165 rcu_read_lock(); 166 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr; 167 memcpy(hdr->addr1, next_hop, ETH_ALEN); 168 rcu_read_unlock(); 169 memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN); 170 memcpy(hdr->addr3, dst_addr, ETH_ALEN); 171 } 172 173 /** 174 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another 175 * 176 * This function is used to transfer or copy frames from an unresolved mpath to 177 * a gate mpath. The function also adds the Address Extension field and 178 * updates the next hop. 179 * 180 * If a frame already has an Address Extension field, only the next hop and 181 * destination addresses are updated. 182 * 183 * The gate mpath must be an active mpath with a valid mpath->next_hop. 184 * 185 * @gate_mpath: An active mpath the frames will be sent to (i.e. the gate) 186 * @from_mpath: The failed mpath 187 * @copy: When true, copy all the frames to the new mpath queue. When false, 188 * move them. 189 */ 190 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath, 191 struct mesh_path *from_mpath, 192 bool copy) 193 { 194 struct sk_buff *skb, *fskb, *tmp; 195 struct sk_buff_head failq; 196 unsigned long flags; 197 198 if (WARN_ON(gate_mpath == from_mpath)) 199 return; 200 if (WARN_ON(!gate_mpath->next_hop)) 201 return; 202 203 __skb_queue_head_init(&failq); 204 205 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); 206 skb_queue_splice_init(&from_mpath->frame_queue, &failq); 207 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); 208 209 skb_queue_walk_safe(&failq, fskb, tmp) { 210 if (skb_queue_len(&gate_mpath->frame_queue) >= 211 MESH_FRAME_QUEUE_LEN) { 212 mpath_dbg(gate_mpath->sdata, "mpath queue full!\n"); 213 break; 214 } 215 216 skb = skb_copy(fskb, GFP_ATOMIC); 217 if (WARN_ON(!skb)) 218 break; 219 220 prepare_for_gate(skb, gate_mpath->dst, gate_mpath); 221 skb_queue_tail(&gate_mpath->frame_queue, skb); 222 223 if (copy) 224 continue; 225 226 __skb_unlink(fskb, &failq); 227 kfree_skb(fskb); 228 } 229 230 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n", 231 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue)); 232 233 if (!copy) 234 return; 235 236 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); 237 skb_queue_splice(&failq, &from_mpath->frame_queue); 238 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); 239 } 240 241 242 static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, 243 struct ieee80211_sub_if_data *sdata) 244 { 245 struct mesh_path *mpath; 246 247 mpath = rhashtable_lookup(&tbl->rhead, dst, mesh_rht_params); 248 249 if (mpath && mpath_expired(mpath)) { 250 spin_lock_bh(&mpath->state_lock); 251 mpath->flags &= ~MESH_PATH_ACTIVE; 252 spin_unlock_bh(&mpath->state_lock); 253 } 254 return mpath; 255 } 256 257 /** 258 * mesh_path_lookup - look up a path in the mesh path table 259 * @sdata: local subif 260 * @dst: hardware address (ETH_ALEN length) of destination 261 * 262 * Returns: pointer to the mesh path structure, or NULL if not found 263 * 264 * Locking: must be called within a read rcu section. 265 */ 266 struct mesh_path * 267 mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) 268 { 269 return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata); 270 } 271 272 struct mesh_path * 273 mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) 274 { 275 return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata); 276 } 277 278 static struct mesh_path * 279 __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) 280 { 281 int i = 0; 282 struct mesh_path *mpath; 283 284 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { 285 if (i++ == idx) 286 break; 287 } 288 289 if (!mpath) 290 return NULL; 291 292 if (mpath_expired(mpath)) { 293 spin_lock_bh(&mpath->state_lock); 294 mpath->flags &= ~MESH_PATH_ACTIVE; 295 spin_unlock_bh(&mpath->state_lock); 296 } 297 return mpath; 298 } 299 300 /** 301 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index 302 * @idx: index 303 * @sdata: local subif, or NULL for all entries 304 * 305 * Returns: pointer to the mesh path structure, or NULL if not found. 306 * 307 * Locking: must be called within a read rcu section. 308 */ 309 struct mesh_path * 310 mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) 311 { 312 return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx); 313 } 314 315 /** 316 * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index 317 * @idx: index 318 * @sdata: local subif, or NULL for all entries 319 * 320 * Returns: pointer to the proxy path structure, or NULL if not found. 321 * 322 * Locking: must be called within a read rcu section. 323 */ 324 struct mesh_path * 325 mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) 326 { 327 return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx); 328 } 329 330 /** 331 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table 332 * @mpath: gate path to add to table 333 */ 334 int mesh_path_add_gate(struct mesh_path *mpath) 335 { 336 struct mesh_table *tbl; 337 int err; 338 339 rcu_read_lock(); 340 tbl = &mpath->sdata->u.mesh.mesh_paths; 341 342 spin_lock_bh(&mpath->state_lock); 343 if (mpath->is_gate) { 344 err = -EEXIST; 345 spin_unlock_bh(&mpath->state_lock); 346 goto err_rcu; 347 } 348 mpath->is_gate = true; 349 mpath->sdata->u.mesh.num_gates++; 350 351 spin_lock(&tbl->gates_lock); 352 hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates); 353 spin_unlock(&tbl->gates_lock); 354 355 spin_unlock_bh(&mpath->state_lock); 356 357 mpath_dbg(mpath->sdata, 358 "Mesh path: Recorded new gate: %pM. %d known gates\n", 359 mpath->dst, mpath->sdata->u.mesh.num_gates); 360 err = 0; 361 err_rcu: 362 rcu_read_unlock(); 363 return err; 364 } 365 366 /** 367 * mesh_gate_del - remove a mesh gate from the list of known gates 368 * @tbl: table which holds our list of known gates 369 * @mpath: gate mpath 370 */ 371 static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) 372 { 373 lockdep_assert_held(&mpath->state_lock); 374 if (!mpath->is_gate) 375 return; 376 377 mpath->is_gate = false; 378 spin_lock_bh(&tbl->gates_lock); 379 hlist_del_rcu(&mpath->gate_list); 380 mpath->sdata->u.mesh.num_gates--; 381 spin_unlock_bh(&tbl->gates_lock); 382 383 mpath_dbg(mpath->sdata, 384 "Mesh path: Deleted gate: %pM. %d known gates\n", 385 mpath->dst, mpath->sdata->u.mesh.num_gates); 386 } 387 388 /** 389 * mesh_gate_num - number of gates known to this interface 390 * @sdata: subif data 391 */ 392 int mesh_gate_num(struct ieee80211_sub_if_data *sdata) 393 { 394 return sdata->u.mesh.num_gates; 395 } 396 397 static 398 struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata, 399 const u8 *dst, gfp_t gfp_flags) 400 { 401 struct mesh_path *new_mpath; 402 403 new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags); 404 if (!new_mpath) 405 return NULL; 406 407 memcpy(new_mpath->dst, dst, ETH_ALEN); 408 eth_broadcast_addr(new_mpath->rann_snd_addr); 409 new_mpath->is_root = false; 410 new_mpath->sdata = sdata; 411 new_mpath->flags = 0; 412 skb_queue_head_init(&new_mpath->frame_queue); 413 new_mpath->exp_time = jiffies; 414 spin_lock_init(&new_mpath->state_lock); 415 timer_setup(&new_mpath->timer, mesh_path_timer, 0); 416 417 return new_mpath; 418 } 419 420 static void mesh_fast_tx_entry_free(struct mesh_tx_cache *cache, 421 struct ieee80211_mesh_fast_tx *entry) 422 { 423 hlist_del_rcu(&entry->walk_list); 424 rhashtable_remove_fast(&cache->rht, &entry->rhash, fast_tx_rht_params); 425 kfree_rcu(entry, fast_tx.rcu_head); 426 } 427 428 struct ieee80211_mesh_fast_tx * 429 mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata, const u8 *addr) 430 { 431 struct ieee80211_mesh_fast_tx *entry; 432 struct mesh_tx_cache *cache; 433 434 cache = &sdata->u.mesh.tx_cache; 435 entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params); 436 if (!entry) 437 return NULL; 438 439 if (!(entry->mpath->flags & MESH_PATH_ACTIVE) || 440 mpath_expired(entry->mpath)) { 441 spin_lock_bh(&cache->walk_lock); 442 entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params); 443 if (entry) 444 mesh_fast_tx_entry_free(cache, entry); 445 spin_unlock_bh(&cache->walk_lock); 446 return NULL; 447 } 448 449 mesh_path_refresh(sdata, entry->mpath, NULL); 450 if (entry->mppath) 451 entry->mppath->exp_time = jiffies; 452 entry->timestamp = jiffies; 453 454 return entry; 455 } 456 457 void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata, 458 struct sk_buff *skb, struct mesh_path *mpath) 459 { 460 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 461 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 462 struct ieee80211_mesh_fast_tx *entry, *prev; 463 struct ieee80211_mesh_fast_tx build = {}; 464 struct ieee80211s_hdr *meshhdr; 465 struct mesh_tx_cache *cache; 466 struct ieee80211_key *key; 467 struct mesh_path *mppath; 468 struct sta_info *sta; 469 u8 *qc; 470 471 if (sdata->noack_map || 472 !ieee80211_is_data_qos(hdr->frame_control)) 473 return; 474 475 build.fast_tx.hdr_len = ieee80211_hdrlen(hdr->frame_control); 476 meshhdr = (struct ieee80211s_hdr *)(skb->data + build.fast_tx.hdr_len); 477 build.hdrlen = ieee80211_get_mesh_hdrlen(meshhdr); 478 479 cache = &sdata->u.mesh.tx_cache; 480 if (atomic_read(&cache->rht.nelems) >= MESH_FAST_TX_CACHE_MAX_SIZE) 481 return; 482 483 sta = rcu_dereference(mpath->next_hop); 484 if (!sta) 485 return; 486 487 if ((meshhdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) { 488 /* This is required to keep the mppath alive */ 489 mppath = mpp_path_lookup(sdata, meshhdr->eaddr1); 490 if (!mppath) 491 return; 492 build.mppath = mppath; 493 } else if (ieee80211_has_a4(hdr->frame_control)) { 494 mppath = mpath; 495 } else { 496 return; 497 } 498 499 /* rate limit, in case fast xmit can't be enabled */ 500 if (mppath->fast_tx_check == jiffies) 501 return; 502 503 mppath->fast_tx_check = jiffies; 504 505 /* 506 * Same use of the sta lock as in ieee80211_check_fast_xmit, in order 507 * to protect against concurrent sta key updates. 508 */ 509 spin_lock_bh(&sta->lock); 510 key = rcu_access_pointer(sta->ptk[sta->ptk_idx]); 511 if (!key) 512 key = rcu_access_pointer(sdata->default_unicast_key); 513 build.fast_tx.key = key; 514 515 if (key) { 516 bool gen_iv, iv_spc; 517 518 gen_iv = key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV; 519 iv_spc = key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE; 520 521 if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) || 522 (key->flags & KEY_FLAG_TAINTED)) 523 goto unlock_sta; 524 525 switch (key->conf.cipher) { 526 case WLAN_CIPHER_SUITE_CCMP: 527 case WLAN_CIPHER_SUITE_CCMP_256: 528 if (gen_iv) 529 build.fast_tx.pn_offs = build.fast_tx.hdr_len; 530 if (gen_iv || iv_spc) 531 build.fast_tx.hdr_len += IEEE80211_CCMP_HDR_LEN; 532 break; 533 case WLAN_CIPHER_SUITE_GCMP: 534 case WLAN_CIPHER_SUITE_GCMP_256: 535 if (gen_iv) 536 build.fast_tx.pn_offs = build.fast_tx.hdr_len; 537 if (gen_iv || iv_spc) 538 build.fast_tx.hdr_len += IEEE80211_GCMP_HDR_LEN; 539 break; 540 default: 541 goto unlock_sta; 542 } 543 } 544 545 memcpy(build.addr_key, mppath->dst, ETH_ALEN); 546 build.timestamp = jiffies; 547 build.fast_tx.band = info->band; 548 build.fast_tx.da_offs = offsetof(struct ieee80211_hdr, addr3); 549 build.fast_tx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 550 build.mpath = mpath; 551 memcpy(build.hdr, meshhdr, build.hdrlen); 552 memcpy(build.hdr + build.hdrlen, rfc1042_header, sizeof(rfc1042_header)); 553 build.hdrlen += sizeof(rfc1042_header); 554 memcpy(build.fast_tx.hdr, hdr, build.fast_tx.hdr_len); 555 556 hdr = (struct ieee80211_hdr *)build.fast_tx.hdr; 557 if (build.fast_tx.key) 558 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); 559 560 qc = ieee80211_get_qos_ctl(hdr); 561 qc[1] |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8; 562 563 entry = kmemdup(&build, sizeof(build), GFP_ATOMIC); 564 if (!entry) 565 goto unlock_sta; 566 567 spin_lock(&cache->walk_lock); 568 prev = rhashtable_lookup_get_insert_fast(&cache->rht, 569 &entry->rhash, 570 fast_tx_rht_params); 571 if (unlikely(IS_ERR(prev))) { 572 kfree(entry); 573 goto unlock_cache; 574 } 575 576 /* 577 * replace any previous entry in the hash table, in case we're 578 * replacing it with a different type (e.g. mpath -> mpp) 579 */ 580 if (unlikely(prev)) { 581 rhashtable_replace_fast(&cache->rht, &prev->rhash, 582 &entry->rhash, fast_tx_rht_params); 583 hlist_del_rcu(&prev->walk_list); 584 kfree_rcu(prev, fast_tx.rcu_head); 585 } 586 587 hlist_add_head(&entry->walk_list, &cache->walk_head); 588 589 unlock_cache: 590 spin_unlock(&cache->walk_lock); 591 unlock_sta: 592 spin_unlock_bh(&sta->lock); 593 } 594 595 void mesh_fast_tx_gc(struct ieee80211_sub_if_data *sdata) 596 { 597 unsigned long timeout = msecs_to_jiffies(MESH_FAST_TX_CACHE_TIMEOUT); 598 struct mesh_tx_cache *cache; 599 struct ieee80211_mesh_fast_tx *entry; 600 struct hlist_node *n; 601 602 cache = &sdata->u.mesh.tx_cache; 603 if (atomic_read(&cache->rht.nelems) < MESH_FAST_TX_CACHE_THRESHOLD_SIZE) 604 return; 605 606 spin_lock_bh(&cache->walk_lock); 607 hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list) 608 if (!time_is_after_jiffies(entry->timestamp + timeout)) 609 mesh_fast_tx_entry_free(cache, entry); 610 spin_unlock_bh(&cache->walk_lock); 611 } 612 613 void mesh_fast_tx_flush_mpath(struct mesh_path *mpath) 614 { 615 struct ieee80211_sub_if_data *sdata = mpath->sdata; 616 struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache; 617 struct ieee80211_mesh_fast_tx *entry; 618 struct hlist_node *n; 619 620 cache = &sdata->u.mesh.tx_cache; 621 spin_lock_bh(&cache->walk_lock); 622 hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list) 623 if (entry->mpath == mpath) 624 mesh_fast_tx_entry_free(cache, entry); 625 spin_unlock_bh(&cache->walk_lock); 626 } 627 628 void mesh_fast_tx_flush_sta(struct ieee80211_sub_if_data *sdata, 629 struct sta_info *sta) 630 { 631 struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache; 632 struct ieee80211_mesh_fast_tx *entry; 633 struct hlist_node *n; 634 635 cache = &sdata->u.mesh.tx_cache; 636 spin_lock_bh(&cache->walk_lock); 637 hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list) 638 if (rcu_access_pointer(entry->mpath->next_hop) == sta) 639 mesh_fast_tx_entry_free(cache, entry); 640 spin_unlock_bh(&cache->walk_lock); 641 } 642 643 void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata, 644 const u8 *addr) 645 { 646 struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache; 647 struct ieee80211_mesh_fast_tx *entry; 648 649 cache = &sdata->u.mesh.tx_cache; 650 spin_lock_bh(&cache->walk_lock); 651 entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params); 652 if (entry) 653 mesh_fast_tx_entry_free(cache, entry); 654 spin_unlock_bh(&cache->walk_lock); 655 } 656 657 /** 658 * mesh_path_add - allocate and add a new path to the mesh path table 659 * @dst: destination address of the path (ETH_ALEN length) 660 * @sdata: local subif 661 * 662 * Returns: 0 on success 663 * 664 * State: the initial state of the new path is set to 0 665 */ 666 struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, 667 const u8 *dst) 668 { 669 struct mesh_table *tbl; 670 struct mesh_path *mpath, *new_mpath; 671 672 if (ether_addr_equal(dst, sdata->vif.addr)) 673 /* never add ourselves as neighbours */ 674 return ERR_PTR(-ENOTSUPP); 675 676 if (is_multicast_ether_addr(dst)) 677 return ERR_PTR(-ENOTSUPP); 678 679 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) 680 return ERR_PTR(-ENOSPC); 681 682 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); 683 if (!new_mpath) 684 return ERR_PTR(-ENOMEM); 685 686 tbl = &sdata->u.mesh.mesh_paths; 687 spin_lock_bh(&tbl->walk_lock); 688 mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead, 689 &new_mpath->rhash, 690 mesh_rht_params); 691 if (!mpath) 692 hlist_add_head(&new_mpath->walk_list, &tbl->walk_head); 693 spin_unlock_bh(&tbl->walk_lock); 694 695 if (mpath) { 696 kfree(new_mpath); 697 698 if (IS_ERR(mpath)) 699 return mpath; 700 701 new_mpath = mpath; 702 } 703 704 sdata->u.mesh.mesh_paths_generation++; 705 return new_mpath; 706 } 707 708 int mpp_path_add(struct ieee80211_sub_if_data *sdata, 709 const u8 *dst, const u8 *mpp) 710 { 711 struct mesh_table *tbl; 712 struct mesh_path *new_mpath; 713 int ret; 714 715 if (ether_addr_equal(dst, sdata->vif.addr)) 716 /* never add ourselves as neighbours */ 717 return -ENOTSUPP; 718 719 if (is_multicast_ether_addr(dst)) 720 return -ENOTSUPP; 721 722 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); 723 724 if (!new_mpath) 725 return -ENOMEM; 726 727 memcpy(new_mpath->mpp, mpp, ETH_ALEN); 728 tbl = &sdata->u.mesh.mpp_paths; 729 730 spin_lock_bh(&tbl->walk_lock); 731 ret = rhashtable_lookup_insert_fast(&tbl->rhead, 732 &new_mpath->rhash, 733 mesh_rht_params); 734 if (!ret) 735 hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head); 736 spin_unlock_bh(&tbl->walk_lock); 737 738 if (ret) 739 kfree(new_mpath); 740 else 741 mesh_fast_tx_flush_addr(sdata, dst); 742 743 sdata->u.mesh.mpp_paths_generation++; 744 return ret; 745 } 746 747 748 /** 749 * mesh_plink_broken - deactivates paths and sends perr when a link breaks 750 * 751 * @sta: broken peer link 752 * 753 * This function must be called from the rate control algorithm if enough 754 * delivery errors suggest that a peer link is no longer usable. 755 */ 756 void mesh_plink_broken(struct sta_info *sta) 757 { 758 struct ieee80211_sub_if_data *sdata = sta->sdata; 759 struct mesh_table *tbl = &sdata->u.mesh.mesh_paths; 760 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 761 struct mesh_path *mpath; 762 763 rcu_read_lock(); 764 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { 765 if (rcu_access_pointer(mpath->next_hop) == sta && 766 mpath->flags & MESH_PATH_ACTIVE && 767 !(mpath->flags & MESH_PATH_FIXED)) { 768 spin_lock_bh(&mpath->state_lock); 769 mpath->flags &= ~MESH_PATH_ACTIVE; 770 ++mpath->sn; 771 spin_unlock_bh(&mpath->state_lock); 772 mesh_path_error_tx(sdata, 773 sdata->u.mesh.mshcfg.element_ttl, 774 mpath->dst, mpath->sn, 775 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); 776 } 777 } 778 rcu_read_unlock(); 779 } 780 781 static void mesh_path_free_rcu(struct mesh_table *tbl, 782 struct mesh_path *mpath) 783 { 784 struct ieee80211_sub_if_data *sdata = mpath->sdata; 785 786 spin_lock_bh(&mpath->state_lock); 787 mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED; 788 mesh_gate_del(tbl, mpath); 789 spin_unlock_bh(&mpath->state_lock); 790 timer_shutdown_sync(&mpath->timer); 791 atomic_dec(&sdata->u.mesh.mpaths); 792 atomic_dec(&tbl->entries); 793 mesh_path_flush_pending(mpath); 794 kfree_rcu(mpath, rcu); 795 } 796 797 static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) 798 { 799 hlist_del_rcu(&mpath->walk_list); 800 rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); 801 if (tbl == &mpath->sdata->u.mesh.mpp_paths) 802 mesh_fast_tx_flush_addr(mpath->sdata, mpath->dst); 803 else 804 mesh_fast_tx_flush_mpath(mpath); 805 mesh_path_free_rcu(tbl, mpath); 806 } 807 808 /** 809 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches 810 * 811 * @sta: mesh peer to match 812 * 813 * RCU notes: this function is called when a mesh plink transitions from 814 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that 815 * allows path creation. This will happen before the sta can be freed (because 816 * sta_info_destroy() calls this) so any reader in a rcu read block will be 817 * protected against the plink disappearing. 818 */ 819 void mesh_path_flush_by_nexthop(struct sta_info *sta) 820 { 821 struct ieee80211_sub_if_data *sdata = sta->sdata; 822 struct mesh_table *tbl = &sdata->u.mesh.mesh_paths; 823 struct mesh_path *mpath; 824 struct hlist_node *n; 825 826 spin_lock_bh(&tbl->walk_lock); 827 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { 828 if (rcu_access_pointer(mpath->next_hop) == sta) 829 __mesh_path_del(tbl, mpath); 830 } 831 spin_unlock_bh(&tbl->walk_lock); 832 } 833 834 static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, 835 const u8 *proxy) 836 { 837 struct mesh_table *tbl = &sdata->u.mesh.mpp_paths; 838 struct mesh_path *mpath; 839 struct hlist_node *n; 840 841 spin_lock_bh(&tbl->walk_lock); 842 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { 843 if (ether_addr_equal(mpath->mpp, proxy)) 844 __mesh_path_del(tbl, mpath); 845 } 846 spin_unlock_bh(&tbl->walk_lock); 847 } 848 849 static void table_flush_by_iface(struct mesh_table *tbl) 850 { 851 struct mesh_path *mpath; 852 struct hlist_node *n; 853 854 spin_lock_bh(&tbl->walk_lock); 855 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { 856 __mesh_path_del(tbl, mpath); 857 } 858 spin_unlock_bh(&tbl->walk_lock); 859 } 860 861 /** 862 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface 863 * 864 * This function deletes both mesh paths as well as mesh portal paths. 865 * 866 * @sdata: interface data to match 867 * 868 */ 869 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) 870 { 871 table_flush_by_iface(&sdata->u.mesh.mesh_paths); 872 table_flush_by_iface(&sdata->u.mesh.mpp_paths); 873 } 874 875 /** 876 * table_path_del - delete a path from the mesh or mpp table 877 * 878 * @tbl: mesh or mpp path table 879 * @sdata: local subif 880 * @addr: dst address (ETH_ALEN length) 881 * 882 * Returns: 0 if successful 883 */ 884 static int table_path_del(struct mesh_table *tbl, 885 struct ieee80211_sub_if_data *sdata, 886 const u8 *addr) 887 { 888 struct mesh_path *mpath; 889 890 spin_lock_bh(&tbl->walk_lock); 891 mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); 892 if (!mpath) { 893 spin_unlock_bh(&tbl->walk_lock); 894 return -ENXIO; 895 } 896 897 __mesh_path_del(tbl, mpath); 898 spin_unlock_bh(&tbl->walk_lock); 899 return 0; 900 } 901 902 903 /** 904 * mesh_path_del - delete a mesh path from the table 905 * 906 * @addr: dst address (ETH_ALEN length) 907 * @sdata: local subif 908 * 909 * Returns: 0 if successful 910 */ 911 int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) 912 { 913 int err; 914 915 /* flush relevant mpp entries first */ 916 mpp_flush_by_proxy(sdata, addr); 917 918 err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr); 919 sdata->u.mesh.mesh_paths_generation++; 920 return err; 921 } 922 923 /** 924 * mesh_path_tx_pending - sends pending frames in a mesh path queue 925 * 926 * @mpath: mesh path to activate 927 * 928 * Locking: the state_lock of the mpath structure must NOT be held when calling 929 * this function. 930 */ 931 void mesh_path_tx_pending(struct mesh_path *mpath) 932 { 933 if (mpath->flags & MESH_PATH_ACTIVE) 934 ieee80211_add_pending_skbs(mpath->sdata->local, 935 &mpath->frame_queue); 936 } 937 938 /** 939 * mesh_path_send_to_gates - sends pending frames to all known mesh gates 940 * 941 * @mpath: mesh path whose queue will be emptied 942 * 943 * If there is only one gate, the frames are transferred from the failed mpath 944 * queue to that gate's queue. If there are more than one gates, the frames 945 * are copied from each gate to the next. After frames are copied, the 946 * mpath queues are emptied onto the transmission queue. 947 */ 948 int mesh_path_send_to_gates(struct mesh_path *mpath) 949 { 950 struct ieee80211_sub_if_data *sdata = mpath->sdata; 951 struct mesh_table *tbl; 952 struct mesh_path *from_mpath = mpath; 953 struct mesh_path *gate; 954 bool copy = false; 955 956 tbl = &sdata->u.mesh.mesh_paths; 957 958 rcu_read_lock(); 959 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { 960 if (gate->flags & MESH_PATH_ACTIVE) { 961 mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst); 962 mesh_path_move_to_queue(gate, from_mpath, copy); 963 from_mpath = gate; 964 copy = true; 965 } else { 966 mpath_dbg(sdata, 967 "Not forwarding to %pM (flags %#x)\n", 968 gate->dst, gate->flags); 969 } 970 } 971 972 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { 973 mpath_dbg(sdata, "Sending to %pM\n", gate->dst); 974 mesh_path_tx_pending(gate); 975 } 976 rcu_read_unlock(); 977 978 return (from_mpath == mpath) ? -EHOSTUNREACH : 0; 979 } 980 981 /** 982 * mesh_path_discard_frame - discard a frame whose path could not be resolved 983 * 984 * @skb: frame to discard 985 * @sdata: network subif the frame was to be sent through 986 * 987 * Locking: the function must me called within a rcu_read_lock region 988 */ 989 void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, 990 struct sk_buff *skb) 991 { 992 ieee80211_free_txskb(&sdata->local->hw, skb); 993 sdata->u.mesh.mshstats.dropped_frames_no_route++; 994 } 995 996 /** 997 * mesh_path_flush_pending - free the pending queue of a mesh path 998 * 999 * @mpath: mesh path whose queue has to be freed 1000 * 1001 * Locking: the function must me called within a rcu_read_lock region 1002 */ 1003 void mesh_path_flush_pending(struct mesh_path *mpath) 1004 { 1005 struct sk_buff *skb; 1006 1007 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL) 1008 mesh_path_discard_frame(mpath->sdata, skb); 1009 } 1010 1011 /** 1012 * mesh_path_fix_nexthop - force a specific next hop for a mesh path 1013 * 1014 * @mpath: the mesh path to modify 1015 * @next_hop: the next hop to force 1016 * 1017 * Locking: this function must be called holding mpath->state_lock 1018 */ 1019 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) 1020 { 1021 spin_lock_bh(&mpath->state_lock); 1022 mesh_path_assign_nexthop(mpath, next_hop); 1023 mpath->sn = 0xffff; 1024 mpath->metric = 0; 1025 mpath->hop_count = 0; 1026 mpath->exp_time = 0; 1027 mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID; 1028 mesh_path_activate(mpath); 1029 mesh_fast_tx_flush_mpath(mpath); 1030 spin_unlock_bh(&mpath->state_lock); 1031 ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg); 1032 /* init it at a low value - 0 start is tricky */ 1033 ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1); 1034 mesh_path_tx_pending(mpath); 1035 } 1036 1037 void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) 1038 { 1039 mesh_table_init(&sdata->u.mesh.mesh_paths); 1040 mesh_table_init(&sdata->u.mesh.mpp_paths); 1041 mesh_fast_tx_init(sdata); 1042 } 1043 1044 static 1045 void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, 1046 struct mesh_table *tbl) 1047 { 1048 struct mesh_path *mpath; 1049 struct hlist_node *n; 1050 1051 spin_lock_bh(&tbl->walk_lock); 1052 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { 1053 if ((!(mpath->flags & MESH_PATH_RESOLVING)) && 1054 (!(mpath->flags & MESH_PATH_FIXED)) && 1055 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) 1056 __mesh_path_del(tbl, mpath); 1057 } 1058 spin_unlock_bh(&tbl->walk_lock); 1059 } 1060 1061 void mesh_path_expire(struct ieee80211_sub_if_data *sdata) 1062 { 1063 mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths); 1064 mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths); 1065 } 1066 1067 void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata) 1068 { 1069 mesh_fast_tx_deinit(sdata); 1070 mesh_table_free(&sdata->u.mesh.mesh_paths); 1071 mesh_table_free(&sdata->u.mesh.mpp_paths); 1072 } 1073