1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2008, 2009 open80211s Ltd. 4 * Copyright (C) 2019, 2021-2023, 2025-2026 Intel Corporation 5 * Author: Luis Carlos Cobo <luisca@cozybit.com> 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/etherdevice.h> 10 #include <linux/unaligned.h> 11 #include "wme.h" 12 #include "mesh.h" 13 14 #define TEST_FRAME_LEN 8192 15 #define MAX_METRIC 0xffffffff 16 #define ARITH_SHIFT 8 17 #define LINK_FAIL_THRESH 95 18 19 #define MAX_PREQ_QUEUE_LEN 64 20 21 static void mesh_queue_preq(struct mesh_path *, u8); 22 23 static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae) 24 { 25 if (ae) 26 offset += 6; 27 return get_unaligned_le32(preq_elem + offset); 28 } 29 30 static inline u16 u16_field_get(const u8 *preq_elem, int offset, bool ae) 31 { 32 if (ae) 33 offset += 6; 34 return get_unaligned_le16(preq_elem + offset); 35 } 36 37 /* HWMP IE processing macros */ 38 #define AE_F (1<<6) 39 #define AE_F_SET(x) (*x & AE_F) 40 #define PREQ_IE_FLAGS(x) (*(x)) 41 #define PREQ_IE_HOPCOUNT(x) (*(x + 1)) 42 #define PREQ_IE_TTL(x) (*(x + 2)) 43 #define PREQ_IE_PREQ_ID(x) u32_field_get(x, 3, 0) 44 #define PREQ_IE_ORIG_ADDR(x) (x + 7) 45 #define PREQ_IE_ORIG_SN(x) u32_field_get(x, 13, 0) 46 #define PREQ_IE_LIFETIME(x) u32_field_get(x, 17, AE_F_SET(x)) 47 #define PREQ_IE_METRIC(x) u32_field_get(x, 21, AE_F_SET(x)) 48 #define PREQ_IE_TARGET_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26)) 49 #define PREQ_IE_TARGET_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27) 50 #define PREQ_IE_TARGET_SN(x) u32_field_get(x, 33, AE_F_SET(x)) 51 52 53 #define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x) 54 #define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x) 55 #define PREP_IE_TTL(x) PREQ_IE_TTL(x) 56 #define PREP_IE_ORIG_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21) 57 #define PREP_IE_ORIG_SN(x) u32_field_get(x, 27, AE_F_SET(x)) 58 #define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x)) 59 #define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x)) 60 #define PREP_IE_TARGET_ADDR(x) (x + 3) 61 #define PREP_IE_TARGET_SN(x) u32_field_get(x, 9, 0) 62 63 #define PERR_IE_TTL(x) (*(x)) 64 #define PERR_IE_TARGET_FLAGS(x) (*(x + 2)) 65 #define PERR_IE_TARGET_ADDR(x) (x + 3) 66 #define PERR_IE_TARGET_SN(x) u32_field_get(x, 9, 0) 67 #define PERR_IE_TARGET_RCODE(x) u16_field_get(x, 13, 0) 68 69 #define MSEC_TO_TU(x) (x*1000/1024) 70 #define SN_GT(x, y) ((s32)(y - x) < 0) 71 #define SN_LT(x, y) ((s32)(x - y) < 0) 72 #define MAX_SANE_SN_DELTA 32 73 74 static inline u32 SN_DELTA(u32 x, u32 y) 75 { 76 return x >= y ? x - y : y - x; 77 } 78 79 #define net_traversal_jiffies(s) \ 80 msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime) 81 #define default_lifetime(s) \ 82 MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout) 83 #define min_preq_int_jiff(s) \ 84 (msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval)) 85 #define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries) 86 #define disc_timeout_jiff(s) \ 87 msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout) 88 #define root_path_confirmation_jiffies(s) \ 89 msecs_to_jiffies(sdata->u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval) 90 91 enum mpath_frame_type { 92 MPATH_PREQ = 0, 93 MPATH_PREP, 94 MPATH_PERR, 95 MPATH_RANN 96 }; 97 98 static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 99 100 static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, 101 const u8 *orig_addr, u32 orig_sn, 102 u8 target_flags, const u8 *target, 103 u32 target_sn, const u8 *da, 104 u8 hop_count, u8 ttl, 105 u32 lifetime, u32 metric, u32 preq_id, 106 struct ieee80211_sub_if_data *sdata) 107 { 108 int hdr_len = IEEE80211_MIN_ACTION_SIZE(mesh_action); 109 struct ieee80211_local *local = sdata->local; 110 struct sk_buff *skb; 111 struct ieee80211_mgmt *mgmt; 112 u8 *pos, ie_len; 113 114 skb = dev_alloc_skb(local->tx_headroom + 115 hdr_len + 116 2 + 37); /* max HWMP IE */ 117 if (!skb) 118 return -1; 119 skb_reserve(skb, local->tx_headroom); 120 mgmt = skb_put_zero(skb, hdr_len); 121 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 122 IEEE80211_STYPE_ACTION); 123 124 memcpy(mgmt->da, da, ETH_ALEN); 125 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 126 /* BSSID == SA */ 127 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); 128 mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION; 129 mgmt->u.action.action_code = WLAN_MESH_ACTION_HWMP_PATH_SELECTION; 130 131 switch (action) { 132 case MPATH_PREQ: 133 mhwmp_dbg(sdata, "sending PREQ to %pM\n", target); 134 ie_len = 37; 135 pos = skb_put(skb, 2 + ie_len); 136 *pos++ = WLAN_EID_PREQ; 137 break; 138 case MPATH_PREP: 139 mhwmp_dbg(sdata, "sending PREP to %pM\n", orig_addr); 140 ie_len = 31; 141 pos = skb_put(skb, 2 + ie_len); 142 *pos++ = WLAN_EID_PREP; 143 break; 144 case MPATH_RANN: 145 mhwmp_dbg(sdata, "sending RANN from %pM\n", orig_addr); 146 ie_len = sizeof(struct ieee80211_rann_ie); 147 pos = skb_put(skb, 2 + ie_len); 148 *pos++ = WLAN_EID_RANN; 149 break; 150 default: 151 kfree_skb(skb); 152 return -EOPNOTSUPP; 153 } 154 *pos++ = ie_len; 155 *pos++ = flags; 156 *pos++ = hop_count; 157 *pos++ = ttl; 158 if (action == MPATH_PREP) { 159 memcpy(pos, target, ETH_ALEN); 160 pos += ETH_ALEN; 161 put_unaligned_le32(target_sn, pos); 162 pos += 4; 163 } else { 164 if (action == MPATH_PREQ) { 165 put_unaligned_le32(preq_id, pos); 166 pos += 4; 167 } 168 memcpy(pos, orig_addr, ETH_ALEN); 169 pos += ETH_ALEN; 170 put_unaligned_le32(orig_sn, pos); 171 pos += 4; 172 } 173 put_unaligned_le32(lifetime, pos); /* interval for RANN */ 174 pos += 4; 175 put_unaligned_le32(metric, pos); 176 pos += 4; 177 if (action == MPATH_PREQ) { 178 *pos++ = 1; /* destination count */ 179 *pos++ = target_flags; 180 memcpy(pos, target, ETH_ALEN); 181 pos += ETH_ALEN; 182 put_unaligned_le32(target_sn, pos); 183 pos += 4; 184 } else if (action == MPATH_PREP) { 185 memcpy(pos, orig_addr, ETH_ALEN); 186 pos += ETH_ALEN; 187 put_unaligned_le32(orig_sn, pos); 188 pos += 4; 189 } 190 191 ieee80211_tx_skb(sdata, skb); 192 return 0; 193 } 194 195 196 /* Headroom is not adjusted. Caller should ensure that skb has sufficient 197 * headroom in case the frame is encrypted. */ 198 static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata, 199 struct sk_buff *skb) 200 { 201 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 202 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 203 204 skb_reset_mac_header(skb); 205 skb_reset_network_header(skb); 206 skb_reset_transport_header(skb); 207 208 /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */ 209 skb_set_queue_mapping(skb, IEEE80211_AC_VO); 210 skb->priority = 7; 211 212 info->control.vif = &sdata->vif; 213 info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING; 214 ieee80211_set_qos_hdr(sdata, skb); 215 ieee80211_mps_set_frame_flags(sdata, NULL, hdr); 216 } 217 218 /** 219 * mesh_path_error_tx - Sends a PERR mesh management frame 220 * 221 * @sdata: local mesh subif 222 * @ttl: allowed remaining hops 223 * @target: broken destination 224 * @target_sn: SN of the broken destination 225 * @target_rcode: reason code for this PERR 226 * @ra: node this frame is addressed to 227 * 228 * Note: This function may be called with driver locks taken that the driver 229 * also acquires in the TX path. To avoid a deadlock we don't transmit the 230 * frame directly but add it to the pending queue instead. 231 * 232 * Returns: 0 on success 233 */ 234 int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata, 235 u8 ttl, const u8 *target, u32 target_sn, 236 u16 target_rcode, const u8 *ra) 237 { 238 int hdr_len = IEEE80211_MIN_ACTION_SIZE(mesh_action); 239 struct ieee80211_local *local = sdata->local; 240 struct sk_buff *skb; 241 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 242 struct ieee80211_mgmt *mgmt; 243 u8 *pos, ie_len; 244 245 if (time_before(jiffies, ifmsh->next_perr)) 246 return -EAGAIN; 247 248 skb = dev_alloc_skb(local->tx_headroom + 249 IEEE80211_ENCRYPT_HEADROOM + 250 IEEE80211_ENCRYPT_TAILROOM + 251 hdr_len + 252 2 + 15 /* PERR IE */); 253 if (!skb) 254 return -1; 255 skb_reserve(skb, local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM); 256 mgmt = skb_put_zero(skb, hdr_len); 257 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 258 IEEE80211_STYPE_ACTION); 259 260 memcpy(mgmt->da, ra, ETH_ALEN); 261 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 262 /* BSSID == SA */ 263 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); 264 mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION; 265 mgmt->u.action.action_code = WLAN_MESH_ACTION_HWMP_PATH_SELECTION; 266 ie_len = 15; 267 pos = skb_put(skb, 2 + ie_len); 268 *pos++ = WLAN_EID_PERR; 269 *pos++ = ie_len; 270 /* ttl */ 271 *pos++ = ttl; 272 /* number of destinations */ 273 *pos++ = 1; 274 /* Flags field has AE bit only as defined in 275 * sec 8.4.2.117 IEEE802.11-2012 276 */ 277 *pos = 0; 278 pos++; 279 memcpy(pos, target, ETH_ALEN); 280 pos += ETH_ALEN; 281 put_unaligned_le32(target_sn, pos); 282 pos += 4; 283 put_unaligned_le16(target_rcode, pos); 284 285 /* see note in function header */ 286 prepare_frame_for_deferred_tx(sdata, skb); 287 ifmsh->next_perr = TU_TO_EXP_TIME( 288 ifmsh->mshcfg.dot11MeshHWMPperrMinInterval); 289 ieee80211_add_pending_skb(local, skb); 290 return 0; 291 } 292 293 void ieee80211s_update_metric(struct ieee80211_local *local, 294 struct sta_info *sta, 295 struct ieee80211_tx_status *st) 296 { 297 struct ieee80211_tx_info *txinfo = st->info; 298 int failed; 299 struct rate_info rinfo; 300 301 failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK); 302 303 /* moving average, scaled to 100. 304 * feed failure as 100 and success as 0 305 */ 306 ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, failed * 100); 307 if (ewma_mesh_fail_avg_read(&sta->mesh->fail_avg) > 308 LINK_FAIL_THRESH) 309 mesh_plink_broken(sta); 310 311 /* use rate info set by the driver directly if present */ 312 if (st->n_rates) 313 rinfo = sta->deflink.tx_stats.last_rate_info; 314 else 315 sta_set_rate_info_tx(sta, &sta->deflink.tx_stats.last_rate, &rinfo); 316 317 ewma_mesh_tx_rate_avg_add(&sta->mesh->tx_rate_avg, 318 cfg80211_calculate_bitrate(&rinfo)); 319 } 320 321 u32 airtime_link_metric_get(struct ieee80211_local *local, 322 struct sta_info *sta) 323 { 324 /* This should be adjusted for each device */ 325 int device_constant = 1 << ARITH_SHIFT; 326 int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT; 327 int s_unit = 1 << ARITH_SHIFT; 328 int rate, err; 329 u32 tx_time, estimated_retx; 330 u64 result; 331 unsigned long fail_avg = 332 ewma_mesh_fail_avg_read(&sta->mesh->fail_avg); 333 334 if (sta->mesh->plink_state != NL80211_PLINK_ESTAB) 335 return MAX_METRIC; 336 337 /* Try to get rate based on HW/SW RC algorithm. 338 * Rate is returned in units of Kbps, correct this 339 * to comply with airtime calculation units 340 * Round up in case we get rate < 100Kbps 341 */ 342 rate = DIV_ROUND_UP(sta_get_expected_throughput(sta), 100); 343 344 if (rate) { 345 err = 0; 346 } else { 347 if (fail_avg > LINK_FAIL_THRESH) 348 return MAX_METRIC; 349 350 rate = ewma_mesh_tx_rate_avg_read(&sta->mesh->tx_rate_avg); 351 if (WARN_ON(!rate)) 352 return MAX_METRIC; 353 354 err = (fail_avg << ARITH_SHIFT) / 100; 355 } 356 357 /* bitrate is in units of 100 Kbps, while we need rate in units of 358 * 1Mbps. This will be corrected on tx_time computation. 359 */ 360 tx_time = (device_constant + 10 * test_frame_len / rate); 361 estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err)); 362 result = ((u64)tx_time * estimated_retx) >> (2 * ARITH_SHIFT); 363 return (u32)result; 364 } 365 366 /* Check that the first metric is at least 10% better than the second one */ 367 static bool is_metric_better(u32 x, u32 y) 368 { 369 return (x < y) && (x < (y - x / 10)); 370 } 371 372 /** 373 * hwmp_route_info_get - Update routing info to originator and transmitter 374 * 375 * @sdata: local mesh subif 376 * @mgmt: mesh management frame 377 * @hwmp_ie: hwmp information element (PREP or PREQ) 378 * @action: type of hwmp ie 379 * 380 * This function updates the path routing information to the originator and the 381 * transmitter of a HWMP PREQ or PREP frame. 382 * 383 * Returns: metric to frame originator or 0 if the frame should not be further 384 * processed 385 * 386 * Notes: this function is the only place (besides user-provided info) where 387 * path routing information is updated. 388 */ 389 static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, 390 struct ieee80211_mgmt *mgmt, 391 const u8 *hwmp_ie, enum mpath_frame_type action) 392 { 393 struct ieee80211_local *local = sdata->local; 394 struct mesh_path *mpath; 395 struct sta_info *sta; 396 bool fresh_info; 397 const u8 *orig_addr, *ta; 398 u32 orig_sn, orig_metric; 399 unsigned long orig_lifetime, exp_time; 400 u32 last_hop_metric, new_metric; 401 bool flush_mpath = false; 402 bool process = true; 403 u8 hopcount; 404 405 rcu_read_lock(); 406 sta = sta_info_get(sdata, mgmt->sa); 407 if (!sta) { 408 rcu_read_unlock(); 409 return 0; 410 } 411 412 last_hop_metric = airtime_link_metric_get(local, sta); 413 /* Update and check originator routing info */ 414 fresh_info = true; 415 416 switch (action) { 417 case MPATH_PREQ: 418 orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie); 419 orig_sn = PREQ_IE_ORIG_SN(hwmp_ie); 420 orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie); 421 orig_metric = PREQ_IE_METRIC(hwmp_ie); 422 hopcount = PREQ_IE_HOPCOUNT(hwmp_ie) + 1; 423 break; 424 case MPATH_PREP: 425 /* Originator here refers to the MP that was the target in the 426 * Path Request. We divert from the nomenclature in the draft 427 * so that we can easily use a single function to gather path 428 * information from both PREQ and PREP frames. 429 */ 430 orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie); 431 orig_sn = PREP_IE_TARGET_SN(hwmp_ie); 432 orig_lifetime = PREP_IE_LIFETIME(hwmp_ie); 433 orig_metric = PREP_IE_METRIC(hwmp_ie); 434 hopcount = PREP_IE_HOPCOUNT(hwmp_ie) + 1; 435 break; 436 default: 437 rcu_read_unlock(); 438 return 0; 439 } 440 new_metric = orig_metric + last_hop_metric; 441 if (new_metric < orig_metric) 442 new_metric = MAX_METRIC; 443 exp_time = TU_TO_EXP_TIME(orig_lifetime); 444 445 if (ether_addr_equal(orig_addr, sdata->vif.addr)) { 446 /* This MP is the originator, we are not interested in this 447 * frame, except for updating transmitter's path info. 448 */ 449 process = false; 450 fresh_info = false; 451 } else { 452 mpath = mesh_path_lookup(sdata, orig_addr); 453 if (mpath) { 454 spin_lock_bh(&mpath->state_lock); 455 if (mpath->flags & MESH_PATH_FIXED) 456 fresh_info = false; 457 else if ((mpath->flags & MESH_PATH_ACTIVE) && 458 (mpath->flags & MESH_PATH_SN_VALID)) { 459 if (SN_GT(mpath->sn, orig_sn) || 460 (mpath->sn == orig_sn && 461 (rcu_access_pointer(mpath->next_hop) != 462 sta ? 463 !is_metric_better(new_metric, mpath->metric) : 464 new_metric >= mpath->metric))) { 465 process = false; 466 fresh_info = false; 467 } 468 } else if (!(mpath->flags & MESH_PATH_ACTIVE)) { 469 bool have_sn, newer_sn, bounced; 470 471 have_sn = mpath->flags & MESH_PATH_SN_VALID; 472 newer_sn = have_sn && SN_GT(orig_sn, mpath->sn); 473 bounced = have_sn && 474 (SN_DELTA(orig_sn, mpath->sn) > 475 MAX_SANE_SN_DELTA); 476 477 if (!have_sn || newer_sn) { 478 /* if SN is newer than what we had 479 * then we can take it */; 480 } else if (bounced) { 481 /* if SN is way different than what 482 * we had then assume the other side 483 * rebooted or restarted */; 484 } else { 485 process = false; 486 fresh_info = false; 487 } 488 } 489 } else { 490 mpath = mesh_path_add(sdata, orig_addr); 491 if (IS_ERR(mpath)) { 492 rcu_read_unlock(); 493 return 0; 494 } 495 spin_lock_bh(&mpath->state_lock); 496 } 497 498 if (fresh_info) { 499 if (rcu_access_pointer(mpath->next_hop) != sta) { 500 mpath->path_change_count++; 501 flush_mpath = true; 502 } 503 mesh_path_assign_nexthop(mpath, sta); 504 mpath->flags |= MESH_PATH_SN_VALID; 505 mpath->metric = new_metric; 506 mpath->sn = orig_sn; 507 mpath->exp_time = time_after(mpath->exp_time, exp_time) 508 ? mpath->exp_time : exp_time; 509 mpath->hop_count = hopcount; 510 mesh_path_activate(mpath); 511 spin_unlock_bh(&mpath->state_lock); 512 if (flush_mpath) 513 mesh_fast_tx_flush_mpath(mpath); 514 ewma_mesh_fail_avg_init(&sta->mesh->fail_avg); 515 /* init it at a low value - 0 start is tricky */ 516 ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1); 517 mesh_path_tx_pending(mpath); 518 /* draft says preq_id should be saved to, but there does 519 * not seem to be any use for it, skipping by now 520 */ 521 } else 522 spin_unlock_bh(&mpath->state_lock); 523 } 524 525 /* Update and check transmitter routing info */ 526 ta = mgmt->sa; 527 if (ether_addr_equal(orig_addr, ta)) 528 fresh_info = false; 529 else { 530 fresh_info = true; 531 532 mpath = mesh_path_lookup(sdata, ta); 533 if (mpath) { 534 spin_lock_bh(&mpath->state_lock); 535 if ((mpath->flags & MESH_PATH_FIXED) || 536 ((mpath->flags & MESH_PATH_ACTIVE) && 537 ((rcu_access_pointer(mpath->next_hop) != sta ? 538 !is_metric_better(last_hop_metric, mpath->metric) : 539 last_hop_metric > mpath->metric)))) 540 fresh_info = false; 541 } else { 542 mpath = mesh_path_add(sdata, ta); 543 if (IS_ERR(mpath)) { 544 rcu_read_unlock(); 545 return 0; 546 } 547 spin_lock_bh(&mpath->state_lock); 548 } 549 550 if (fresh_info) { 551 if (rcu_access_pointer(mpath->next_hop) != sta) { 552 mpath->path_change_count++; 553 flush_mpath = true; 554 } 555 mesh_path_assign_nexthop(mpath, sta); 556 mpath->metric = last_hop_metric; 557 mpath->exp_time = time_after(mpath->exp_time, exp_time) 558 ? mpath->exp_time : exp_time; 559 mpath->hop_count = 1; 560 mesh_path_activate(mpath); 561 spin_unlock_bh(&mpath->state_lock); 562 if (flush_mpath) 563 mesh_fast_tx_flush_mpath(mpath); 564 ewma_mesh_fail_avg_init(&sta->mesh->fail_avg); 565 /* init it at a low value - 0 start is tricky */ 566 ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1); 567 mesh_path_tx_pending(mpath); 568 } else 569 spin_unlock_bh(&mpath->state_lock); 570 } 571 572 rcu_read_unlock(); 573 574 return process ? new_metric : 0; 575 } 576 577 static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, 578 struct ieee80211_mgmt *mgmt, 579 const u8 *preq_elem, u32 orig_metric) 580 { 581 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 582 struct mesh_path *mpath = NULL; 583 const u8 *target_addr, *orig_addr; 584 const u8 *da; 585 u8 target_flags, ttl, flags; 586 u32 orig_sn, target_sn, lifetime, target_metric = 0; 587 bool reply = false; 588 bool forward = true; 589 bool root_is_gate; 590 591 /* Update target SN, if present */ 592 target_addr = PREQ_IE_TARGET_ADDR(preq_elem); 593 orig_addr = PREQ_IE_ORIG_ADDR(preq_elem); 594 target_sn = PREQ_IE_TARGET_SN(preq_elem); 595 orig_sn = PREQ_IE_ORIG_SN(preq_elem); 596 target_flags = PREQ_IE_TARGET_F(preq_elem); 597 /* Proactive PREQ gate announcements */ 598 flags = PREQ_IE_FLAGS(preq_elem); 599 root_is_gate = !!(flags & RANN_FLAG_IS_GATE); 600 601 mhwmp_dbg(sdata, "received PREQ from %pM\n", orig_addr); 602 603 if (ether_addr_equal(target_addr, sdata->vif.addr)) { 604 mhwmp_dbg(sdata, "PREQ is for us\n"); 605 forward = false; 606 reply = true; 607 target_metric = 0; 608 609 if (SN_GT(target_sn, ifmsh->sn)) 610 ifmsh->sn = target_sn; 611 612 if (time_after(jiffies, ifmsh->last_sn_update + 613 net_traversal_jiffies(sdata)) || 614 time_before(jiffies, ifmsh->last_sn_update)) { 615 ++ifmsh->sn; 616 ifmsh->last_sn_update = jiffies; 617 } 618 target_sn = ifmsh->sn; 619 } else if (is_broadcast_ether_addr(target_addr) && 620 (target_flags & IEEE80211_PREQ_TO_FLAG)) { 621 rcu_read_lock(); 622 mpath = mesh_path_lookup(sdata, orig_addr); 623 if (mpath) { 624 if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) { 625 reply = true; 626 target_addr = sdata->vif.addr; 627 target_sn = ++ifmsh->sn; 628 target_metric = 0; 629 ifmsh->last_sn_update = jiffies; 630 } 631 if (root_is_gate) 632 mesh_path_add_gate(mpath); 633 } 634 rcu_read_unlock(); 635 } else if (ifmsh->mshcfg.dot11MeshForwarding) { 636 rcu_read_lock(); 637 mpath = mesh_path_lookup(sdata, target_addr); 638 if (mpath) { 639 if ((!(mpath->flags & MESH_PATH_SN_VALID)) || 640 SN_LT(mpath->sn, target_sn)) { 641 mpath->sn = target_sn; 642 mpath->flags |= MESH_PATH_SN_VALID; 643 } else if ((!(target_flags & IEEE80211_PREQ_TO_FLAG)) && 644 (mpath->flags & MESH_PATH_ACTIVE)) { 645 reply = true; 646 target_metric = mpath->metric; 647 target_sn = mpath->sn; 648 /* Case E2 of sec 13.10.9.3 IEEE 802.11-2012*/ 649 target_flags |= IEEE80211_PREQ_TO_FLAG; 650 } 651 } 652 rcu_read_unlock(); 653 } else { 654 forward = false; 655 } 656 657 if (reply) { 658 lifetime = PREQ_IE_LIFETIME(preq_elem); 659 ttl = ifmsh->mshcfg.element_ttl; 660 if (ttl != 0) { 661 mhwmp_dbg(sdata, "replying to the PREQ\n"); 662 mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr, 663 orig_sn, 0, target_addr, 664 target_sn, mgmt->sa, 0, ttl, 665 lifetime, target_metric, 0, 666 sdata); 667 } else { 668 ifmsh->mshstats.dropped_frames_ttl++; 669 } 670 } 671 672 if (forward) { 673 u32 preq_id; 674 u8 hopcount; 675 676 ttl = PREQ_IE_TTL(preq_elem); 677 lifetime = PREQ_IE_LIFETIME(preq_elem); 678 if (ttl <= 1) { 679 ifmsh->mshstats.dropped_frames_ttl++; 680 return; 681 } 682 mhwmp_dbg(sdata, "forwarding the PREQ from %pM\n", orig_addr); 683 --ttl; 684 preq_id = PREQ_IE_PREQ_ID(preq_elem); 685 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; 686 da = (mpath && mpath->is_root) ? 687 mpath->rann_snd_addr : broadcast_addr; 688 689 if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) { 690 target_addr = PREQ_IE_TARGET_ADDR(preq_elem); 691 target_sn = PREQ_IE_TARGET_SN(preq_elem); 692 } 693 694 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, 695 orig_sn, target_flags, target_addr, 696 target_sn, da, hopcount, ttl, lifetime, 697 orig_metric, preq_id, sdata); 698 if (!is_multicast_ether_addr(da)) 699 ifmsh->mshstats.fwded_unicast++; 700 else 701 ifmsh->mshstats.fwded_mcast++; 702 ifmsh->mshstats.fwded_frames++; 703 } 704 } 705 706 707 static inline struct sta_info * 708 next_hop_deref_protected(struct mesh_path *mpath) 709 { 710 return rcu_dereference_protected(mpath->next_hop, 711 lockdep_is_held(&mpath->state_lock)); 712 } 713 714 715 static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, 716 struct ieee80211_mgmt *mgmt, 717 const u8 *prep_elem, u32 metric) 718 { 719 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 720 struct mesh_path *mpath; 721 const u8 *target_addr, *orig_addr; 722 u8 ttl, hopcount, flags; 723 u8 next_hop[ETH_ALEN]; 724 u32 target_sn, orig_sn, lifetime; 725 726 mhwmp_dbg(sdata, "received PREP from %pM\n", 727 PREP_IE_TARGET_ADDR(prep_elem)); 728 729 orig_addr = PREP_IE_ORIG_ADDR(prep_elem); 730 if (ether_addr_equal(orig_addr, sdata->vif.addr)) 731 /* destination, no forwarding required */ 732 return; 733 734 if (!ifmsh->mshcfg.dot11MeshForwarding) 735 return; 736 737 ttl = PREP_IE_TTL(prep_elem); 738 if (ttl <= 1) { 739 sdata->u.mesh.mshstats.dropped_frames_ttl++; 740 return; 741 } 742 743 rcu_read_lock(); 744 mpath = mesh_path_lookup(sdata, orig_addr); 745 if (mpath) 746 spin_lock_bh(&mpath->state_lock); 747 else 748 goto fail; 749 if (!(mpath->flags & MESH_PATH_ACTIVE)) { 750 spin_unlock_bh(&mpath->state_lock); 751 goto fail; 752 } 753 memcpy(next_hop, next_hop_deref_protected(mpath)->sta.addr, ETH_ALEN); 754 spin_unlock_bh(&mpath->state_lock); 755 --ttl; 756 flags = PREP_IE_FLAGS(prep_elem); 757 lifetime = PREP_IE_LIFETIME(prep_elem); 758 hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1; 759 target_addr = PREP_IE_TARGET_ADDR(prep_elem); 760 target_sn = PREP_IE_TARGET_SN(prep_elem); 761 orig_sn = PREP_IE_ORIG_SN(prep_elem); 762 763 mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, orig_sn, 0, 764 target_addr, target_sn, next_hop, hopcount, 765 ttl, lifetime, metric, 0, sdata); 766 rcu_read_unlock(); 767 768 sdata->u.mesh.mshstats.fwded_unicast++; 769 sdata->u.mesh.mshstats.fwded_frames++; 770 return; 771 772 fail: 773 rcu_read_unlock(); 774 sdata->u.mesh.mshstats.dropped_frames_no_route++; 775 } 776 777 static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata, 778 struct ieee80211_mgmt *mgmt, 779 const u8 *perr_elem) 780 { 781 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 782 struct mesh_path *mpath; 783 u8 ttl; 784 const u8 *ta, *target_addr; 785 u32 target_sn; 786 u16 target_rcode; 787 788 ta = mgmt->sa; 789 ttl = PERR_IE_TTL(perr_elem); 790 if (ttl <= 1) { 791 ifmsh->mshstats.dropped_frames_ttl++; 792 return; 793 } 794 ttl--; 795 target_addr = PERR_IE_TARGET_ADDR(perr_elem); 796 target_sn = PERR_IE_TARGET_SN(perr_elem); 797 target_rcode = PERR_IE_TARGET_RCODE(perr_elem); 798 799 rcu_read_lock(); 800 mpath = mesh_path_lookup(sdata, target_addr); 801 if (mpath) { 802 struct sta_info *sta; 803 804 spin_lock_bh(&mpath->state_lock); 805 sta = next_hop_deref_protected(mpath); 806 if (mpath->flags & MESH_PATH_ACTIVE && 807 ether_addr_equal(ta, sta->sta.addr) && 808 !(mpath->flags & MESH_PATH_FIXED) && 809 (!(mpath->flags & MESH_PATH_SN_VALID) || 810 SN_GT(target_sn, mpath->sn) || target_sn == 0)) { 811 mpath->flags &= ~MESH_PATH_ACTIVE; 812 if (target_sn != 0) 813 mpath->sn = target_sn; 814 else 815 mpath->sn += 1; 816 spin_unlock_bh(&mpath->state_lock); 817 if (!ifmsh->mshcfg.dot11MeshForwarding) 818 goto endperr; 819 mesh_path_error_tx(sdata, ttl, target_addr, 820 target_sn, target_rcode, 821 broadcast_addr); 822 } else 823 spin_unlock_bh(&mpath->state_lock); 824 } 825 endperr: 826 rcu_read_unlock(); 827 } 828 829 static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, 830 struct ieee80211_mgmt *mgmt, 831 const struct ieee80211_rann_ie *rann) 832 { 833 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 834 struct ieee80211_local *local = sdata->local; 835 struct sta_info *sta; 836 struct mesh_path *mpath; 837 u8 ttl, flags, hopcount; 838 const u8 *orig_addr; 839 u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval; 840 bool root_is_gate; 841 842 ttl = rann->rann_ttl; 843 flags = rann->rann_flags; 844 root_is_gate = !!(flags & RANN_FLAG_IS_GATE); 845 orig_addr = rann->rann_addr; 846 orig_sn = le32_to_cpu(rann->rann_seq); 847 interval = le32_to_cpu(rann->rann_interval); 848 hopcount = rann->rann_hopcount; 849 hopcount++; 850 orig_metric = le32_to_cpu(rann->rann_metric); 851 852 /* Ignore our own RANNs */ 853 if (ether_addr_equal(orig_addr, sdata->vif.addr)) 854 return; 855 856 mhwmp_dbg(sdata, 857 "received RANN from %pM via neighbour %pM (is_gate=%d)\n", 858 orig_addr, mgmt->sa, root_is_gate); 859 860 rcu_read_lock(); 861 sta = sta_info_get(sdata, mgmt->sa); 862 if (!sta) { 863 rcu_read_unlock(); 864 return; 865 } 866 867 last_hop_metric = airtime_link_metric_get(local, sta); 868 new_metric = orig_metric + last_hop_metric; 869 if (new_metric < orig_metric) 870 new_metric = MAX_METRIC; 871 872 mpath = mesh_path_lookup(sdata, orig_addr); 873 if (!mpath) { 874 mpath = mesh_path_add(sdata, orig_addr); 875 if (IS_ERR(mpath)) { 876 rcu_read_unlock(); 877 sdata->u.mesh.mshstats.dropped_frames_no_route++; 878 return; 879 } 880 } 881 882 if (!(SN_LT(mpath->sn, orig_sn)) && 883 !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) { 884 rcu_read_unlock(); 885 return; 886 } 887 888 if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) || 889 (time_after(jiffies, mpath->last_preq_to_root + 890 root_path_confirmation_jiffies(sdata)) || 891 time_before(jiffies, mpath->last_preq_to_root))) && 892 !(mpath->flags & MESH_PATH_FIXED) && (ttl != 0)) { 893 mhwmp_dbg(sdata, 894 "time to refresh root mpath %pM\n", 895 orig_addr); 896 mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); 897 mpath->last_preq_to_root = jiffies; 898 } 899 900 mpath->sn = orig_sn; 901 mpath->rann_metric = new_metric; 902 mpath->is_root = true; 903 /* Recording RANNs sender address to send individually 904 * addressed PREQs destined for root mesh STA */ 905 memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN); 906 907 if (root_is_gate) 908 mesh_path_add_gate(mpath); 909 910 if (ttl <= 1) { 911 ifmsh->mshstats.dropped_frames_ttl++; 912 rcu_read_unlock(); 913 return; 914 } 915 ttl--; 916 917 if (ifmsh->mshcfg.dot11MeshForwarding) { 918 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, 919 orig_sn, 0, NULL, 0, broadcast_addr, 920 hopcount, ttl, interval, 921 new_metric, 0, sdata); 922 } 923 924 rcu_read_unlock(); 925 } 926 927 928 void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, 929 struct ieee80211_mgmt *mgmt, size_t len) 930 { 931 struct ieee802_11_elems *elems; 932 size_t baselen; 933 u32 path_metric; 934 struct sta_info *sta; 935 936 /* need action_code */ 937 if (len < IEEE80211_MIN_ACTION_SIZE(mesh_action)) 938 return; 939 940 rcu_read_lock(); 941 sta = sta_info_get(sdata, mgmt->sa); 942 if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) { 943 rcu_read_unlock(); 944 return; 945 } 946 rcu_read_unlock(); 947 948 baselen = mgmt->u.action.mesh_action.variable - (u8 *)mgmt; 949 elems = ieee802_11_parse_elems(mgmt->u.action.mesh_action.variable, 950 len - baselen, 951 IEEE80211_FTYPE_MGMT | 952 IEEE80211_STYPE_ACTION, 953 NULL); 954 if (!elems) 955 return; 956 957 if (elems->preq) { 958 if (elems->preq_len != 37) 959 /* Right now we support just 1 destination and no AE */ 960 goto free; 961 path_metric = hwmp_route_info_get(sdata, mgmt, elems->preq, 962 MPATH_PREQ); 963 if (path_metric) 964 hwmp_preq_frame_process(sdata, mgmt, elems->preq, 965 path_metric); 966 } 967 if (elems->prep) { 968 if (elems->prep_len != 31) 969 /* Right now we support no AE */ 970 goto free; 971 path_metric = hwmp_route_info_get(sdata, mgmt, elems->prep, 972 MPATH_PREP); 973 if (path_metric) 974 hwmp_prep_frame_process(sdata, mgmt, elems->prep, 975 path_metric); 976 } 977 if (elems->perr) { 978 if (elems->perr_len != 15) 979 /* Right now we support only one destination per PERR */ 980 goto free; 981 hwmp_perr_frame_process(sdata, mgmt, elems->perr); 982 } 983 if (elems->rann) 984 hwmp_rann_frame_process(sdata, mgmt, elems->rann); 985 free: 986 kfree(elems); 987 } 988 989 /** 990 * mesh_queue_preq - queue a PREQ to a given destination 991 * 992 * @mpath: mesh path to discover 993 * @flags: special attributes of the PREQ to be sent 994 * 995 * Locking: the function must be called from within a rcu read lock block. 996 * 997 */ 998 static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) 999 { 1000 struct ieee80211_sub_if_data *sdata = mpath->sdata; 1001 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 1002 struct mesh_preq_queue *preq_node; 1003 1004 preq_node = kmalloc_obj(struct mesh_preq_queue, GFP_ATOMIC); 1005 if (!preq_node) { 1006 mhwmp_dbg(sdata, "could not allocate PREQ node\n"); 1007 return; 1008 } 1009 1010 spin_lock_bh(&ifmsh->mesh_preq_queue_lock); 1011 if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) { 1012 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); 1013 kfree(preq_node); 1014 if (printk_ratelimit()) 1015 mhwmp_dbg(sdata, "PREQ node queue full\n"); 1016 return; 1017 } 1018 1019 spin_lock(&mpath->state_lock); 1020 if (mpath->flags & MESH_PATH_REQ_QUEUED) { 1021 spin_unlock(&mpath->state_lock); 1022 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); 1023 kfree(preq_node); 1024 return; 1025 } 1026 1027 memcpy(preq_node->dst, mpath->dst, ETH_ALEN); 1028 preq_node->flags = flags; 1029 1030 mpath->flags |= MESH_PATH_REQ_QUEUED; 1031 spin_unlock(&mpath->state_lock); 1032 1033 list_add_tail(&preq_node->list, &ifmsh->preq_queue.list); 1034 ++ifmsh->preq_queue_len; 1035 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); 1036 1037 if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata))) 1038 wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); 1039 1040 else if (time_before(jiffies, ifmsh->last_preq)) { 1041 /* avoid long wait if did not send preqs for a long time 1042 * and jiffies wrapped around 1043 */ 1044 ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1; 1045 wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); 1046 } else 1047 mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq + 1048 min_preq_int_jiff(sdata)); 1049 } 1050 1051 /** 1052 * mesh_path_start_discovery - launch a path discovery from the PREQ queue 1053 * 1054 * @sdata: local mesh subif 1055 */ 1056 void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) 1057 { 1058 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 1059 struct mesh_preq_queue *preq_node; 1060 struct mesh_path *mpath; 1061 u8 ttl, target_flags = 0; 1062 const u8 *da; 1063 u32 lifetime; 1064 1065 spin_lock_bh(&ifmsh->mesh_preq_queue_lock); 1066 if (!ifmsh->preq_queue_len || 1067 time_before(jiffies, ifmsh->last_preq + 1068 min_preq_int_jiff(sdata))) { 1069 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); 1070 return; 1071 } 1072 1073 preq_node = list_first_entry(&ifmsh->preq_queue.list, 1074 struct mesh_preq_queue, list); 1075 list_del(&preq_node->list); 1076 --ifmsh->preq_queue_len; 1077 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); 1078 1079 rcu_read_lock(); 1080 mpath = mesh_path_lookup(sdata, preq_node->dst); 1081 if (!mpath) 1082 goto enddiscovery; 1083 1084 spin_lock_bh(&mpath->state_lock); 1085 if (mpath->flags & (MESH_PATH_DELETED | MESH_PATH_FIXED)) { 1086 spin_unlock_bh(&mpath->state_lock); 1087 goto enddiscovery; 1088 } 1089 mpath->flags &= ~MESH_PATH_REQ_QUEUED; 1090 if (preq_node->flags & PREQ_Q_F_START) { 1091 if (mpath->flags & MESH_PATH_RESOLVING) { 1092 spin_unlock_bh(&mpath->state_lock); 1093 goto enddiscovery; 1094 } else { 1095 mpath->flags &= ~MESH_PATH_RESOLVED; 1096 mpath->flags |= MESH_PATH_RESOLVING; 1097 mpath->discovery_retries = 0; 1098 mpath->discovery_timeout = disc_timeout_jiff(sdata); 1099 } 1100 } else if (!(mpath->flags & MESH_PATH_RESOLVING) || 1101 mpath->flags & MESH_PATH_RESOLVED) { 1102 mpath->flags &= ~MESH_PATH_RESOLVING; 1103 spin_unlock_bh(&mpath->state_lock); 1104 goto enddiscovery; 1105 } 1106 1107 ifmsh->last_preq = jiffies; 1108 1109 if (time_after(jiffies, ifmsh->last_sn_update + 1110 net_traversal_jiffies(sdata)) || 1111 time_before(jiffies, ifmsh->last_sn_update)) { 1112 ++ifmsh->sn; 1113 sdata->u.mesh.last_sn_update = jiffies; 1114 } 1115 lifetime = default_lifetime(sdata); 1116 ttl = sdata->u.mesh.mshcfg.element_ttl; 1117 if (ttl == 0) { 1118 sdata->u.mesh.mshstats.dropped_frames_ttl++; 1119 spin_unlock_bh(&mpath->state_lock); 1120 goto enddiscovery; 1121 } 1122 1123 if (preq_node->flags & PREQ_Q_F_REFRESH) 1124 target_flags |= IEEE80211_PREQ_TO_FLAG; 1125 else 1126 target_flags &= ~IEEE80211_PREQ_TO_FLAG; 1127 1128 spin_unlock_bh(&mpath->state_lock); 1129 da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr; 1130 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn, 1131 target_flags, mpath->dst, mpath->sn, da, 0, 1132 ttl, lifetime, 0, ifmsh->preq_id++, sdata); 1133 1134 spin_lock_bh(&mpath->state_lock); 1135 if (!(mpath->flags & MESH_PATH_DELETED)) 1136 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); 1137 spin_unlock_bh(&mpath->state_lock); 1138 1139 enddiscovery: 1140 rcu_read_unlock(); 1141 kfree(preq_node); 1142 } 1143 1144 /** 1145 * mesh_nexthop_resolve - lookup next hop; conditionally start path discovery 1146 * 1147 * @sdata: network subif the frame will be sent through 1148 * @skb: 802.11 frame to be sent 1149 * 1150 * Lookup next hop for given skb and start path discovery if no 1151 * forwarding information is found. 1152 * 1153 * Returns: 0 if the next hop was found and -ENOENT if the frame was queued. 1154 * skb is freed here if no mpath could be allocated. 1155 */ 1156 int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata, 1157 struct sk_buff *skb) 1158 { 1159 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1160 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1161 struct mesh_path *mpath; 1162 struct sk_buff *skb_to_free = NULL; 1163 u8 *target_addr = hdr->addr3; 1164 1165 /* Nulls are only sent to peers for PS and should be pre-addressed */ 1166 if (ieee80211_is_qos_nullfunc(hdr->frame_control)) 1167 return 0; 1168 1169 /* Allow injected packets to bypass mesh routing */ 1170 if (info->control.flags & IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP) 1171 return 0; 1172 1173 if (!mesh_nexthop_lookup(sdata, skb)) 1174 return 0; 1175 1176 /* no nexthop found, start resolving */ 1177 mpath = mesh_path_lookup(sdata, target_addr); 1178 if (!mpath) { 1179 mpath = mesh_path_add(sdata, target_addr); 1180 if (IS_ERR(mpath)) { 1181 mesh_path_discard_frame(sdata, skb); 1182 return PTR_ERR(mpath); 1183 } 1184 } 1185 1186 if (!(mpath->flags & MESH_PATH_RESOLVING) && 1187 mesh_path_sel_is_hwmp(sdata)) 1188 mesh_queue_preq(mpath, PREQ_Q_F_START); 1189 1190 if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN) 1191 skb_to_free = skb_dequeue(&mpath->frame_queue); 1192 1193 info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING; 1194 ieee80211_set_qos_hdr(sdata, skb); 1195 skb_queue_tail(&mpath->frame_queue, skb); 1196 if (skb_to_free) 1197 mesh_path_discard_frame(sdata, skb_to_free); 1198 1199 return -ENOENT; 1200 } 1201 1202 /** 1203 * mesh_nexthop_lookup_nolearn - try to set next hop without path discovery 1204 * @skb: 802.11 frame to be sent 1205 * @sdata: network subif the frame will be sent through 1206 * 1207 * Check if the meshDA (addr3) of a unicast frame is a direct neighbor. 1208 * And if so, set the RA (addr1) to it to transmit to this node directly, 1209 * avoiding PREQ/PREP path discovery. 1210 * 1211 * Returns: 0 if the next hop was found and -ENOENT otherwise. 1212 */ 1213 static int mesh_nexthop_lookup_nolearn(struct ieee80211_sub_if_data *sdata, 1214 struct sk_buff *skb) 1215 { 1216 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1217 struct sta_info *sta; 1218 1219 if (is_multicast_ether_addr(hdr->addr1)) 1220 return -ENOENT; 1221 1222 rcu_read_lock(); 1223 sta = sta_info_get(sdata, hdr->addr3); 1224 1225 if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) { 1226 rcu_read_unlock(); 1227 return -ENOENT; 1228 } 1229 rcu_read_unlock(); 1230 1231 memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); 1232 memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); 1233 return 0; 1234 } 1235 1236 void mesh_path_refresh(struct ieee80211_sub_if_data *sdata, 1237 struct mesh_path *mpath, const u8 *addr) 1238 { 1239 if (mpath->flags & (MESH_PATH_REQ_QUEUED | MESH_PATH_FIXED | 1240 MESH_PATH_RESOLVING)) 1241 return; 1242 1243 if (time_after(jiffies, 1244 mpath->exp_time - 1245 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) && 1246 (!addr || ether_addr_equal(sdata->vif.addr, addr))) 1247 mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); 1248 } 1249 1250 /** 1251 * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling 1252 * this function is considered "using" the associated mpath, so preempt a path 1253 * refresh if this mpath expires soon. 1254 * 1255 * @sdata: network subif the frame will be sent through 1256 * @skb: 802.11 frame to be sent 1257 * 1258 * Returns: 0 if the next hop was found. Nonzero otherwise. 1259 */ 1260 int mesh_nexthop_lookup(struct ieee80211_sub_if_data *sdata, 1261 struct sk_buff *skb) 1262 { 1263 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 1264 struct mesh_path *mpath; 1265 struct sta_info *next_hop; 1266 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1267 u8 *target_addr = hdr->addr3; 1268 1269 if (ifmsh->mshcfg.dot11MeshNolearn && 1270 !mesh_nexthop_lookup_nolearn(sdata, skb)) 1271 return 0; 1272 1273 mpath = mesh_path_lookup(sdata, target_addr); 1274 if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE)) 1275 return -ENOENT; 1276 1277 mesh_path_refresh(sdata, mpath, hdr->addr4); 1278 1279 next_hop = rcu_dereference(mpath->next_hop); 1280 if (next_hop) { 1281 memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN); 1282 memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); 1283 ieee80211_mps_set_frame_flags(sdata, next_hop, hdr); 1284 if (ieee80211_hw_check(&sdata->local->hw, SUPPORT_FAST_XMIT)) 1285 mesh_fast_tx_cache(sdata, skb, mpath); 1286 return 0; 1287 } 1288 1289 return -ENOENT; 1290 } 1291 1292 void mesh_path_timer(struct timer_list *t) 1293 { 1294 struct mesh_path *mpath = timer_container_of(mpath, t, timer); 1295 struct ieee80211_sub_if_data *sdata = mpath->sdata; 1296 int ret; 1297 1298 if (sdata->local->quiescing) 1299 return; 1300 1301 spin_lock_bh(&mpath->state_lock); 1302 if (mpath->flags & MESH_PATH_RESOLVED || 1303 (!(mpath->flags & MESH_PATH_RESOLVING))) { 1304 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED); 1305 spin_unlock_bh(&mpath->state_lock); 1306 } else if (mpath->discovery_retries < max_preq_retries(sdata)) { 1307 ++mpath->discovery_retries; 1308 mpath->discovery_timeout *= 2; 1309 mpath->flags &= ~MESH_PATH_REQ_QUEUED; 1310 spin_unlock_bh(&mpath->state_lock); 1311 mesh_queue_preq(mpath, 0); 1312 } else { 1313 mpath->flags &= ~(MESH_PATH_RESOLVING | 1314 MESH_PATH_RESOLVED | 1315 MESH_PATH_REQ_QUEUED); 1316 mpath->exp_time = jiffies; 1317 spin_unlock_bh(&mpath->state_lock); 1318 if (!mpath->is_gate && mesh_gate_num(sdata) > 0) { 1319 ret = mesh_path_send_to_gates(mpath); 1320 if (ret) 1321 mhwmp_dbg(sdata, "no gate was reachable\n"); 1322 } else 1323 mesh_path_flush_pending(mpath); 1324 } 1325 } 1326 1327 void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata) 1328 { 1329 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 1330 u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval; 1331 u8 flags, target_flags = 0; 1332 1333 flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol) 1334 ? RANN_FLAG_IS_GATE : 0; 1335 1336 switch (ifmsh->mshcfg.dot11MeshHWMPRootMode) { 1337 case IEEE80211_PROACTIVE_RANN: 1338 mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr, 1339 ++ifmsh->sn, 0, NULL, 0, broadcast_addr, 1340 0, ifmsh->mshcfg.element_ttl, 1341 interval, 0, 0, sdata); 1342 break; 1343 case IEEE80211_PROACTIVE_PREQ_WITH_PREP: 1344 flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG; 1345 fallthrough; 1346 case IEEE80211_PROACTIVE_PREQ_NO_PREP: 1347 interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout; 1348 target_flags |= IEEE80211_PREQ_TO_FLAG | 1349 IEEE80211_PREQ_USN_FLAG; 1350 mesh_path_sel_frame_tx(MPATH_PREQ, flags, sdata->vif.addr, 1351 ++ifmsh->sn, target_flags, 1352 (u8 *) broadcast_addr, 0, broadcast_addr, 1353 0, ifmsh->mshcfg.element_ttl, interval, 1354 0, ifmsh->preq_id++, sdata); 1355 break; 1356 default: 1357 mhwmp_dbg(sdata, "Proactive mechanism not supported\n"); 1358 return; 1359 } 1360 } 1361