1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2002-2005, Instant802 Networks, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2013-2014 Intel Mobile Communications GmbH 6 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH 7 * Copyright (C) 2018-2020 Intel Corporation 8 */ 9 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/etherdevice.h> 13 #include <linux/netdevice.h> 14 #include <linux/types.h> 15 #include <linux/slab.h> 16 #include <linux/skbuff.h> 17 #include <linux/if_arp.h> 18 #include <linux/timer.h> 19 #include <linux/rtnetlink.h> 20 21 #include <net/codel.h> 22 #include <net/mac80211.h> 23 #include "ieee80211_i.h" 24 #include "driver-ops.h" 25 #include "rate.h" 26 #include "sta_info.h" 27 #include "debugfs_sta.h" 28 #include "mesh.h" 29 #include "wme.h" 30 31 /** 32 * DOC: STA information lifetime rules 33 * 34 * STA info structures (&struct sta_info) are managed in a hash table 35 * for faster lookup and a list for iteration. They are managed using 36 * RCU, i.e. access to the list and hash table is protected by RCU. 37 * 38 * Upon allocating a STA info structure with sta_info_alloc(), the caller 39 * owns that structure. It must then insert it into the hash table using 40 * either sta_info_insert() or sta_info_insert_rcu(); only in the latter 41 * case (which acquires an rcu read section but must not be called from 42 * within one) will the pointer still be valid after the call. Note that 43 * the caller may not do much with the STA info before inserting it, in 44 * particular, it may not start any mesh peer link management or add 45 * encryption keys. 46 * 47 * When the insertion fails (sta_info_insert()) returns non-zero), the 48 * structure will have been freed by sta_info_insert()! 49 * 50 * Station entries are added by mac80211 when you establish a link with a 51 * peer. This means different things for the different type of interfaces 52 * we support. For a regular station this mean we add the AP sta when we 53 * receive an association response from the AP. For IBSS this occurs when 54 * get to know about a peer on the same IBSS. For WDS we add the sta for 55 * the peer immediately upon device open. When using AP mode we add stations 56 * for each respective station upon request from userspace through nl80211. 57 * 58 * In order to remove a STA info structure, various sta_info_destroy_*() 59 * calls are available. 60 * 61 * There is no concept of ownership on a STA entry, each structure is 62 * owned by the global hash table/list until it is removed. All users of 63 * the structure need to be RCU protected so that the structure won't be 64 * freed before they are done using it. 65 */ 66 67 static const struct rhashtable_params sta_rht_params = { 68 .nelem_hint = 3, /* start small */ 69 .automatic_shrinking = true, 70 .head_offset = offsetof(struct sta_info, hash_node), 71 .key_offset = offsetof(struct sta_info, addr), 72 .key_len = ETH_ALEN, 73 .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE, 74 }; 75 76 /* Caller must hold local->sta_mtx */ 77 static int sta_info_hash_del(struct ieee80211_local *local, 78 struct sta_info *sta) 79 { 80 return rhltable_remove(&local->sta_hash, &sta->hash_node, 81 sta_rht_params); 82 } 83 84 static void __cleanup_single_sta(struct sta_info *sta) 85 { 86 int ac, i; 87 struct tid_ampdu_tx *tid_tx; 88 struct ieee80211_sub_if_data *sdata = sta->sdata; 89 struct ieee80211_local *local = sdata->local; 90 struct ps_data *ps; 91 92 if (test_sta_flag(sta, WLAN_STA_PS_STA) || 93 test_sta_flag(sta, WLAN_STA_PS_DRIVER) || 94 test_sta_flag(sta, WLAN_STA_PS_DELIVER)) { 95 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 96 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 97 ps = &sdata->bss->ps; 98 else if (ieee80211_vif_is_mesh(&sdata->vif)) 99 ps = &sdata->u.mesh.ps; 100 else 101 return; 102 103 clear_sta_flag(sta, WLAN_STA_PS_STA); 104 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 105 clear_sta_flag(sta, WLAN_STA_PS_DELIVER); 106 107 atomic_dec(&ps->num_sta_ps); 108 } 109 110 if (sta->sta.txq[0]) { 111 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 112 struct txq_info *txqi; 113 114 if (!sta->sta.txq[i]) 115 continue; 116 117 txqi = to_txq_info(sta->sta.txq[i]); 118 119 ieee80211_txq_purge(local, txqi); 120 } 121 } 122 123 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 124 local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]); 125 ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]); 126 ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]); 127 } 128 129 if (ieee80211_vif_is_mesh(&sdata->vif)) 130 mesh_sta_cleanup(sta); 131 132 cancel_work_sync(&sta->drv_deliver_wk); 133 134 /* 135 * Destroy aggregation state here. It would be nice to wait for the 136 * driver to finish aggregation stop and then clean up, but for now 137 * drivers have to handle aggregation stop being requested, followed 138 * directly by station destruction. 139 */ 140 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 141 kfree(sta->ampdu_mlme.tid_start_tx[i]); 142 tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]); 143 if (!tid_tx) 144 continue; 145 ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending); 146 kfree(tid_tx); 147 } 148 } 149 150 static void cleanup_single_sta(struct sta_info *sta) 151 { 152 struct ieee80211_sub_if_data *sdata = sta->sdata; 153 struct ieee80211_local *local = sdata->local; 154 155 __cleanup_single_sta(sta); 156 sta_info_free(local, sta); 157 } 158 159 struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local, 160 const u8 *addr) 161 { 162 return rhltable_lookup(&local->sta_hash, addr, sta_rht_params); 163 } 164 165 /* protected by RCU */ 166 struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, 167 const u8 *addr) 168 { 169 struct ieee80211_local *local = sdata->local; 170 struct rhlist_head *tmp; 171 struct sta_info *sta; 172 173 rcu_read_lock(); 174 for_each_sta_info(local, addr, sta, tmp) { 175 if (sta->sdata == sdata) { 176 rcu_read_unlock(); 177 /* this is safe as the caller must already hold 178 * another rcu read section or the mutex 179 */ 180 return sta; 181 } 182 } 183 rcu_read_unlock(); 184 return NULL; 185 } 186 187 /* 188 * Get sta info either from the specified interface 189 * or from one of its vlans 190 */ 191 struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, 192 const u8 *addr) 193 { 194 struct ieee80211_local *local = sdata->local; 195 struct rhlist_head *tmp; 196 struct sta_info *sta; 197 198 rcu_read_lock(); 199 for_each_sta_info(local, addr, sta, tmp) { 200 if (sta->sdata == sdata || 201 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) { 202 rcu_read_unlock(); 203 /* this is safe as the caller must already hold 204 * another rcu read section or the mutex 205 */ 206 return sta; 207 } 208 } 209 rcu_read_unlock(); 210 return NULL; 211 } 212 213 struct sta_info *sta_info_get_by_addrs(struct ieee80211_local *local, 214 const u8 *sta_addr, const u8 *vif_addr) 215 { 216 struct rhlist_head *tmp; 217 struct sta_info *sta; 218 219 for_each_sta_info(local, sta_addr, sta, tmp) { 220 if (ether_addr_equal(vif_addr, sta->sdata->vif.addr)) 221 return sta; 222 } 223 224 return NULL; 225 } 226 227 struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, 228 int idx) 229 { 230 struct ieee80211_local *local = sdata->local; 231 struct sta_info *sta; 232 int i = 0; 233 234 list_for_each_entry_rcu(sta, &local->sta_list, list) { 235 if (sdata != sta->sdata) 236 continue; 237 if (i < idx) { 238 ++i; 239 continue; 240 } 241 return sta; 242 } 243 244 return NULL; 245 } 246 247 /** 248 * sta_info_free - free STA 249 * 250 * @local: pointer to the global information 251 * @sta: STA info to free 252 * 253 * This function must undo everything done by sta_info_alloc() 254 * that may happen before sta_info_insert(). It may only be 255 * called when sta_info_insert() has not been attempted (and 256 * if that fails, the station is freed anyway.) 257 */ 258 void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) 259 { 260 if (sta->rate_ctrl) 261 rate_control_free_sta(sta); 262 263 sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr); 264 265 if (sta->sta.txq[0]) 266 kfree(to_txq_info(sta->sta.txq[0])); 267 kfree(rcu_dereference_raw(sta->sta.rates)); 268 #ifdef CONFIG_MAC80211_MESH 269 kfree(sta->mesh); 270 #endif 271 free_percpu(sta->pcpu_rx_stats); 272 kfree(sta); 273 } 274 275 /* Caller must hold local->sta_mtx */ 276 static int sta_info_hash_add(struct ieee80211_local *local, 277 struct sta_info *sta) 278 { 279 return rhltable_insert(&local->sta_hash, &sta->hash_node, 280 sta_rht_params); 281 } 282 283 static void sta_deliver_ps_frames(struct work_struct *wk) 284 { 285 struct sta_info *sta; 286 287 sta = container_of(wk, struct sta_info, drv_deliver_wk); 288 289 if (sta->dead) 290 return; 291 292 local_bh_disable(); 293 if (!test_sta_flag(sta, WLAN_STA_PS_STA)) 294 ieee80211_sta_ps_deliver_wakeup(sta); 295 else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL)) 296 ieee80211_sta_ps_deliver_poll_response(sta); 297 else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD)) 298 ieee80211_sta_ps_deliver_uapsd(sta); 299 local_bh_enable(); 300 } 301 302 static int sta_prepare_rate_control(struct ieee80211_local *local, 303 struct sta_info *sta, gfp_t gfp) 304 { 305 if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) 306 return 0; 307 308 sta->rate_ctrl = local->rate_ctrl; 309 sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl, 310 sta, gfp); 311 if (!sta->rate_ctrl_priv) 312 return -ENOMEM; 313 314 return 0; 315 } 316 317 struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, 318 const u8 *addr, gfp_t gfp) 319 { 320 struct ieee80211_local *local = sdata->local; 321 struct ieee80211_hw *hw = &local->hw; 322 struct sta_info *sta; 323 int i; 324 325 sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp); 326 if (!sta) 327 return NULL; 328 329 if (ieee80211_hw_check(hw, USES_RSS)) { 330 sta->pcpu_rx_stats = 331 alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp); 332 if (!sta->pcpu_rx_stats) 333 goto free; 334 } 335 336 spin_lock_init(&sta->lock); 337 spin_lock_init(&sta->ps_lock); 338 INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames); 339 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); 340 mutex_init(&sta->ampdu_mlme.mtx); 341 #ifdef CONFIG_MAC80211_MESH 342 if (ieee80211_vif_is_mesh(&sdata->vif)) { 343 sta->mesh = kzalloc(sizeof(*sta->mesh), gfp); 344 if (!sta->mesh) 345 goto free; 346 sta->mesh->plink_sta = sta; 347 spin_lock_init(&sta->mesh->plink_lock); 348 if (ieee80211_vif_is_mesh(&sdata->vif) && 349 !sdata->u.mesh.user_mpm) 350 timer_setup(&sta->mesh->plink_timer, mesh_plink_timer, 351 0); 352 sta->mesh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE; 353 } 354 #endif 355 356 memcpy(sta->addr, addr, ETH_ALEN); 357 memcpy(sta->sta.addr, addr, ETH_ALEN); 358 sta->sta.max_rx_aggregation_subframes = 359 local->hw.max_rx_aggregation_subframes; 360 361 /* Extended Key ID needs to install keys for keyid 0 and 1 Rx-only. 362 * The Tx path starts to use a key as soon as the key slot ptk_idx 363 * references to is not NULL. To not use the initial Rx-only key 364 * prematurely for Tx initialize ptk_idx to an impossible PTK keyid 365 * which always will refer to a NULL key. 366 */ 367 BUILD_BUG_ON(ARRAY_SIZE(sta->ptk) <= INVALID_PTK_KEYIDX); 368 sta->ptk_idx = INVALID_PTK_KEYIDX; 369 370 sta->local = local; 371 sta->sdata = sdata; 372 sta->rx_stats.last_rx = jiffies; 373 374 u64_stats_init(&sta->rx_stats.syncp); 375 376 sta->sta_state = IEEE80211_STA_NONE; 377 378 /* Mark TID as unreserved */ 379 sta->reserved_tid = IEEE80211_TID_UNRESERVED; 380 381 sta->last_connected = ktime_get_seconds(); 382 ewma_signal_init(&sta->rx_stats_avg.signal); 383 ewma_avg_signal_init(&sta->status_stats.avg_ack_signal); 384 for (i = 0; i < ARRAY_SIZE(sta->rx_stats_avg.chain_signal); i++) 385 ewma_signal_init(&sta->rx_stats_avg.chain_signal[i]); 386 387 if (local->ops->wake_tx_queue) { 388 void *txq_data; 389 int size = sizeof(struct txq_info) + 390 ALIGN(hw->txq_data_size, sizeof(void *)); 391 392 txq_data = kcalloc(ARRAY_SIZE(sta->sta.txq), size, gfp); 393 if (!txq_data) 394 goto free; 395 396 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 397 struct txq_info *txq = txq_data + i * size; 398 399 /* might not do anything for the bufferable MMPDU TXQ */ 400 ieee80211_txq_init(sdata, sta, txq, i); 401 } 402 } 403 404 if (sta_prepare_rate_control(local, sta, gfp)) 405 goto free_txq; 406 407 sta->airtime_weight = IEEE80211_DEFAULT_AIRTIME_WEIGHT; 408 409 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 410 skb_queue_head_init(&sta->ps_tx_buf[i]); 411 skb_queue_head_init(&sta->tx_filtered[i]); 412 sta->airtime[i].deficit = sta->airtime_weight; 413 atomic_set(&sta->airtime[i].aql_tx_pending, 0); 414 sta->airtime[i].aql_limit_low = local->aql_txq_limit_low[i]; 415 sta->airtime[i].aql_limit_high = local->aql_txq_limit_high[i]; 416 } 417 418 for (i = 0; i < IEEE80211_NUM_TIDS; i++) 419 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); 420 421 for (i = 0; i < NUM_NL80211_BANDS; i++) { 422 u32 mandatory = 0; 423 int r; 424 425 if (!hw->wiphy->bands[i]) 426 continue; 427 428 switch (i) { 429 case NL80211_BAND_2GHZ: 430 /* 431 * We use both here, even if we cannot really know for 432 * sure the station will support both, but the only use 433 * for this is when we don't know anything yet and send 434 * management frames, and then we'll pick the lowest 435 * possible rate anyway. 436 * If we don't include _G here, we cannot find a rate 437 * in P2P, and thus trigger the WARN_ONCE() in rate.c 438 */ 439 mandatory = IEEE80211_RATE_MANDATORY_B | 440 IEEE80211_RATE_MANDATORY_G; 441 break; 442 case NL80211_BAND_5GHZ: 443 mandatory = IEEE80211_RATE_MANDATORY_A; 444 break; 445 case NL80211_BAND_60GHZ: 446 WARN_ON(1); 447 mandatory = 0; 448 break; 449 } 450 451 for (r = 0; r < hw->wiphy->bands[i]->n_bitrates; r++) { 452 struct ieee80211_rate *rate; 453 454 rate = &hw->wiphy->bands[i]->bitrates[r]; 455 456 if (!(rate->flags & mandatory)) 457 continue; 458 sta->sta.supp_rates[i] |= BIT(r); 459 } 460 } 461 462 sta->sta.smps_mode = IEEE80211_SMPS_OFF; 463 if (sdata->vif.type == NL80211_IFTYPE_AP || 464 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 465 struct ieee80211_supported_band *sband; 466 u8 smps; 467 468 sband = ieee80211_get_sband(sdata); 469 if (!sband) 470 goto free_txq; 471 472 smps = (sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 473 IEEE80211_HT_CAP_SM_PS_SHIFT; 474 /* 475 * Assume that hostapd advertises our caps in the beacon and 476 * this is the known_smps_mode for a station that just assciated 477 */ 478 switch (smps) { 479 case WLAN_HT_SMPS_CONTROL_DISABLED: 480 sta->known_smps_mode = IEEE80211_SMPS_OFF; 481 break; 482 case WLAN_HT_SMPS_CONTROL_STATIC: 483 sta->known_smps_mode = IEEE80211_SMPS_STATIC; 484 break; 485 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 486 sta->known_smps_mode = IEEE80211_SMPS_DYNAMIC; 487 break; 488 default: 489 WARN_ON(1); 490 } 491 } 492 493 sta->sta.max_rc_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA; 494 495 sta->cparams.ce_threshold = CODEL_DISABLED_THRESHOLD; 496 sta->cparams.target = MS2TIME(20); 497 sta->cparams.interval = MS2TIME(100); 498 sta->cparams.ecn = true; 499 500 sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr); 501 502 return sta; 503 504 free_txq: 505 if (sta->sta.txq[0]) 506 kfree(to_txq_info(sta->sta.txq[0])); 507 free: 508 free_percpu(sta->pcpu_rx_stats); 509 #ifdef CONFIG_MAC80211_MESH 510 kfree(sta->mesh); 511 #endif 512 kfree(sta); 513 return NULL; 514 } 515 516 static int sta_info_insert_check(struct sta_info *sta) 517 { 518 struct ieee80211_sub_if_data *sdata = sta->sdata; 519 520 /* 521 * Can't be a WARN_ON because it can be triggered through a race: 522 * something inserts a STA (on one CPU) without holding the RTNL 523 * and another CPU turns off the net device. 524 */ 525 if (unlikely(!ieee80211_sdata_running(sdata))) 526 return -ENETDOWN; 527 528 if (WARN_ON(ether_addr_equal(sta->sta.addr, sdata->vif.addr) || 529 is_multicast_ether_addr(sta->sta.addr))) 530 return -EINVAL; 531 532 /* The RCU read lock is required by rhashtable due to 533 * asynchronous resize/rehash. We also require the mutex 534 * for correctness. 535 */ 536 rcu_read_lock(); 537 lockdep_assert_held(&sdata->local->sta_mtx); 538 if (ieee80211_hw_check(&sdata->local->hw, NEEDS_UNIQUE_STA_ADDR) && 539 ieee80211_find_sta_by_ifaddr(&sdata->local->hw, sta->addr, NULL)) { 540 rcu_read_unlock(); 541 return -ENOTUNIQ; 542 } 543 rcu_read_unlock(); 544 545 return 0; 546 } 547 548 static int sta_info_insert_drv_state(struct ieee80211_local *local, 549 struct ieee80211_sub_if_data *sdata, 550 struct sta_info *sta) 551 { 552 enum ieee80211_sta_state state; 553 int err = 0; 554 555 for (state = IEEE80211_STA_NOTEXIST; state < sta->sta_state; state++) { 556 err = drv_sta_state(local, sdata, sta, state, state + 1); 557 if (err) 558 break; 559 } 560 561 if (!err) { 562 /* 563 * Drivers using legacy sta_add/sta_remove callbacks only 564 * get uploaded set to true after sta_add is called. 565 */ 566 if (!local->ops->sta_add) 567 sta->uploaded = true; 568 return 0; 569 } 570 571 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 572 sdata_info(sdata, 573 "failed to move IBSS STA %pM to state %d (%d) - keeping it anyway\n", 574 sta->sta.addr, state + 1, err); 575 err = 0; 576 } 577 578 /* unwind on error */ 579 for (; state > IEEE80211_STA_NOTEXIST; state--) 580 WARN_ON(drv_sta_state(local, sdata, sta, state, state - 1)); 581 582 return err; 583 } 584 585 static void 586 ieee80211_recalc_p2p_go_ps_allowed(struct ieee80211_sub_if_data *sdata) 587 { 588 struct ieee80211_local *local = sdata->local; 589 bool allow_p2p_go_ps = sdata->vif.p2p; 590 struct sta_info *sta; 591 592 rcu_read_lock(); 593 list_for_each_entry_rcu(sta, &local->sta_list, list) { 594 if (sdata != sta->sdata || 595 !test_sta_flag(sta, WLAN_STA_ASSOC)) 596 continue; 597 if (!sta->sta.support_p2p_ps) { 598 allow_p2p_go_ps = false; 599 break; 600 } 601 } 602 rcu_read_unlock(); 603 604 if (allow_p2p_go_ps != sdata->vif.bss_conf.allow_p2p_go_ps) { 605 sdata->vif.bss_conf.allow_p2p_go_ps = allow_p2p_go_ps; 606 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_P2P_PS); 607 } 608 } 609 610 /* 611 * should be called with sta_mtx locked 612 * this function replaces the mutex lock 613 * with a RCU lock 614 */ 615 static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) 616 { 617 struct ieee80211_local *local = sta->local; 618 struct ieee80211_sub_if_data *sdata = sta->sdata; 619 struct station_info *sinfo = NULL; 620 int err = 0; 621 622 lockdep_assert_held(&local->sta_mtx); 623 624 /* check if STA exists already */ 625 if (sta_info_get_bss(sdata, sta->sta.addr)) { 626 err = -EEXIST; 627 goto out_err; 628 } 629 630 sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL); 631 if (!sinfo) { 632 err = -ENOMEM; 633 goto out_err; 634 } 635 636 local->num_sta++; 637 local->sta_generation++; 638 smp_mb(); 639 640 /* simplify things and don't accept BA sessions yet */ 641 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 642 643 /* make the station visible */ 644 err = sta_info_hash_add(local, sta); 645 if (err) 646 goto out_drop_sta; 647 648 list_add_tail_rcu(&sta->list, &local->sta_list); 649 650 /* notify driver */ 651 err = sta_info_insert_drv_state(local, sdata, sta); 652 if (err) 653 goto out_remove; 654 655 set_sta_flag(sta, WLAN_STA_INSERTED); 656 657 if (sta->sta_state >= IEEE80211_STA_ASSOC) { 658 ieee80211_recalc_min_chandef(sta->sdata); 659 if (!sta->sta.support_p2p_ps) 660 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 661 } 662 663 /* accept BA sessions now */ 664 clear_sta_flag(sta, WLAN_STA_BLOCK_BA); 665 666 ieee80211_sta_debugfs_add(sta); 667 rate_control_add_sta_debugfs(sta); 668 669 sinfo->generation = local->sta_generation; 670 cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); 671 kfree(sinfo); 672 673 sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr); 674 675 /* move reference to rcu-protected */ 676 rcu_read_lock(); 677 mutex_unlock(&local->sta_mtx); 678 679 if (ieee80211_vif_is_mesh(&sdata->vif)) 680 mesh_accept_plinks_update(sdata); 681 682 return 0; 683 out_remove: 684 sta_info_hash_del(local, sta); 685 list_del_rcu(&sta->list); 686 out_drop_sta: 687 local->num_sta--; 688 synchronize_net(); 689 __cleanup_single_sta(sta); 690 out_err: 691 mutex_unlock(&local->sta_mtx); 692 kfree(sinfo); 693 rcu_read_lock(); 694 return err; 695 } 696 697 int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) 698 { 699 struct ieee80211_local *local = sta->local; 700 int err; 701 702 might_sleep(); 703 704 mutex_lock(&local->sta_mtx); 705 706 err = sta_info_insert_check(sta); 707 if (err) { 708 mutex_unlock(&local->sta_mtx); 709 rcu_read_lock(); 710 goto out_free; 711 } 712 713 err = sta_info_insert_finish(sta); 714 if (err) 715 goto out_free; 716 717 return 0; 718 out_free: 719 sta_info_free(local, sta); 720 return err; 721 } 722 723 int sta_info_insert(struct sta_info *sta) 724 { 725 int err = sta_info_insert_rcu(sta); 726 727 rcu_read_unlock(); 728 729 return err; 730 } 731 732 static inline void __bss_tim_set(u8 *tim, u16 id) 733 { 734 /* 735 * This format has been mandated by the IEEE specifications, 736 * so this line may not be changed to use the __set_bit() format. 737 */ 738 tim[id / 8] |= (1 << (id % 8)); 739 } 740 741 static inline void __bss_tim_clear(u8 *tim, u16 id) 742 { 743 /* 744 * This format has been mandated by the IEEE specifications, 745 * so this line may not be changed to use the __clear_bit() format. 746 */ 747 tim[id / 8] &= ~(1 << (id % 8)); 748 } 749 750 static inline bool __bss_tim_get(u8 *tim, u16 id) 751 { 752 /* 753 * This format has been mandated by the IEEE specifications, 754 * so this line may not be changed to use the test_bit() format. 755 */ 756 return tim[id / 8] & (1 << (id % 8)); 757 } 758 759 static unsigned long ieee80211_tids_for_ac(int ac) 760 { 761 /* If we ever support TIDs > 7, this obviously needs to be adjusted */ 762 switch (ac) { 763 case IEEE80211_AC_VO: 764 return BIT(6) | BIT(7); 765 case IEEE80211_AC_VI: 766 return BIT(4) | BIT(5); 767 case IEEE80211_AC_BE: 768 return BIT(0) | BIT(3); 769 case IEEE80211_AC_BK: 770 return BIT(1) | BIT(2); 771 default: 772 WARN_ON(1); 773 return 0; 774 } 775 } 776 777 static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending) 778 { 779 struct ieee80211_local *local = sta->local; 780 struct ps_data *ps; 781 bool indicate_tim = false; 782 u8 ignore_for_tim = sta->sta.uapsd_queues; 783 int ac; 784 u16 id = sta->sta.aid; 785 786 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 787 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 788 if (WARN_ON_ONCE(!sta->sdata->bss)) 789 return; 790 791 ps = &sta->sdata->bss->ps; 792 #ifdef CONFIG_MAC80211_MESH 793 } else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) { 794 ps = &sta->sdata->u.mesh.ps; 795 #endif 796 } else { 797 return; 798 } 799 800 /* No need to do anything if the driver does all */ 801 if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim) 802 return; 803 804 if (sta->dead) 805 goto done; 806 807 /* 808 * If all ACs are delivery-enabled then we should build 809 * the TIM bit for all ACs anyway; if only some are then 810 * we ignore those and build the TIM bit using only the 811 * non-enabled ones. 812 */ 813 if (ignore_for_tim == BIT(IEEE80211_NUM_ACS) - 1) 814 ignore_for_tim = 0; 815 816 if (ignore_pending) 817 ignore_for_tim = BIT(IEEE80211_NUM_ACS) - 1; 818 819 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 820 unsigned long tids; 821 822 if (ignore_for_tim & ieee80211_ac_to_qos_mask[ac]) 823 continue; 824 825 indicate_tim |= !skb_queue_empty(&sta->tx_filtered[ac]) || 826 !skb_queue_empty(&sta->ps_tx_buf[ac]); 827 if (indicate_tim) 828 break; 829 830 tids = ieee80211_tids_for_ac(ac); 831 832 indicate_tim |= 833 sta->driver_buffered_tids & tids; 834 indicate_tim |= 835 sta->txq_buffered_tids & tids; 836 } 837 838 done: 839 spin_lock_bh(&local->tim_lock); 840 841 if (indicate_tim == __bss_tim_get(ps->tim, id)) 842 goto out_unlock; 843 844 if (indicate_tim) 845 __bss_tim_set(ps->tim, id); 846 else 847 __bss_tim_clear(ps->tim, id); 848 849 if (local->ops->set_tim && !WARN_ON(sta->dead)) { 850 local->tim_in_locked_section = true; 851 drv_set_tim(local, &sta->sta, indicate_tim); 852 local->tim_in_locked_section = false; 853 } 854 855 out_unlock: 856 spin_unlock_bh(&local->tim_lock); 857 } 858 859 void sta_info_recalc_tim(struct sta_info *sta) 860 { 861 __sta_info_recalc_tim(sta, false); 862 } 863 864 static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb) 865 { 866 struct ieee80211_tx_info *info; 867 int timeout; 868 869 if (!skb) 870 return false; 871 872 info = IEEE80211_SKB_CB(skb); 873 874 /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */ 875 timeout = (sta->listen_interval * 876 sta->sdata->vif.bss_conf.beacon_int * 877 32 / 15625) * HZ; 878 if (timeout < STA_TX_BUFFER_EXPIRE) 879 timeout = STA_TX_BUFFER_EXPIRE; 880 return time_after(jiffies, info->control.jiffies + timeout); 881 } 882 883 884 static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local, 885 struct sta_info *sta, int ac) 886 { 887 unsigned long flags; 888 struct sk_buff *skb; 889 890 /* 891 * First check for frames that should expire on the filtered 892 * queue. Frames here were rejected by the driver and are on 893 * a separate queue to avoid reordering with normal PS-buffered 894 * frames. They also aren't accounted for right now in the 895 * total_ps_buffered counter. 896 */ 897 for (;;) { 898 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); 899 skb = skb_peek(&sta->tx_filtered[ac]); 900 if (sta_info_buffer_expired(sta, skb)) 901 skb = __skb_dequeue(&sta->tx_filtered[ac]); 902 else 903 skb = NULL; 904 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); 905 906 /* 907 * Frames are queued in order, so if this one 908 * hasn't expired yet we can stop testing. If 909 * we actually reached the end of the queue we 910 * also need to stop, of course. 911 */ 912 if (!skb) 913 break; 914 ieee80211_free_txskb(&local->hw, skb); 915 } 916 917 /* 918 * Now also check the normal PS-buffered queue, this will 919 * only find something if the filtered queue was emptied 920 * since the filtered frames are all before the normal PS 921 * buffered frames. 922 */ 923 for (;;) { 924 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); 925 skb = skb_peek(&sta->ps_tx_buf[ac]); 926 if (sta_info_buffer_expired(sta, skb)) 927 skb = __skb_dequeue(&sta->ps_tx_buf[ac]); 928 else 929 skb = NULL; 930 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); 931 932 /* 933 * frames are queued in order, so if this one 934 * hasn't expired yet (or we reached the end of 935 * the queue) we can stop testing 936 */ 937 if (!skb) 938 break; 939 940 local->total_ps_buffered--; 941 ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n", 942 sta->sta.addr); 943 ieee80211_free_txskb(&local->hw, skb); 944 } 945 946 /* 947 * Finally, recalculate the TIM bit for this station -- it might 948 * now be clear because the station was too slow to retrieve its 949 * frames. 950 */ 951 sta_info_recalc_tim(sta); 952 953 /* 954 * Return whether there are any frames still buffered, this is 955 * used to check whether the cleanup timer still needs to run, 956 * if there are no frames we don't need to rearm the timer. 957 */ 958 return !(skb_queue_empty(&sta->ps_tx_buf[ac]) && 959 skb_queue_empty(&sta->tx_filtered[ac])); 960 } 961 962 static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local, 963 struct sta_info *sta) 964 { 965 bool have_buffered = false; 966 int ac; 967 968 /* This is only necessary for stations on BSS/MBSS interfaces */ 969 if (!sta->sdata->bss && 970 !ieee80211_vif_is_mesh(&sta->sdata->vif)) 971 return false; 972 973 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 974 have_buffered |= 975 sta_info_cleanup_expire_buffered_ac(local, sta, ac); 976 977 return have_buffered; 978 } 979 980 static int __must_check __sta_info_destroy_part1(struct sta_info *sta) 981 { 982 struct ieee80211_local *local; 983 struct ieee80211_sub_if_data *sdata; 984 int ret; 985 986 might_sleep(); 987 988 if (!sta) 989 return -ENOENT; 990 991 local = sta->local; 992 sdata = sta->sdata; 993 994 lockdep_assert_held(&local->sta_mtx); 995 996 /* 997 * Before removing the station from the driver and 998 * rate control, it might still start new aggregation 999 * sessions -- block that to make sure the tear-down 1000 * will be sufficient. 1001 */ 1002 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 1003 ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA); 1004 1005 /* 1006 * Before removing the station from the driver there might be pending 1007 * rx frames on RSS queues sent prior to the disassociation - wait for 1008 * all such frames to be processed. 1009 */ 1010 drv_sync_rx_queues(local, sta); 1011 1012 ret = sta_info_hash_del(local, sta); 1013 if (WARN_ON(ret)) 1014 return ret; 1015 1016 /* 1017 * for TDLS peers, make sure to return to the base channel before 1018 * removal. 1019 */ 1020 if (test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) { 1021 drv_tdls_cancel_channel_switch(local, sdata, &sta->sta); 1022 clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL); 1023 } 1024 1025 list_del_rcu(&sta->list); 1026 sta->removed = true; 1027 1028 drv_sta_pre_rcu_remove(local, sta->sdata, sta); 1029 1030 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1031 rcu_access_pointer(sdata->u.vlan.sta) == sta) 1032 RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); 1033 1034 return 0; 1035 } 1036 1037 static void __sta_info_destroy_part2(struct sta_info *sta) 1038 { 1039 struct ieee80211_local *local = sta->local; 1040 struct ieee80211_sub_if_data *sdata = sta->sdata; 1041 struct station_info *sinfo; 1042 int ret; 1043 1044 /* 1045 * NOTE: This assumes at least synchronize_net() was done 1046 * after _part1 and before _part2! 1047 */ 1048 1049 might_sleep(); 1050 lockdep_assert_held(&local->sta_mtx); 1051 1052 while (sta->sta_state == IEEE80211_STA_AUTHORIZED) { 1053 ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); 1054 WARN_ON_ONCE(ret); 1055 } 1056 1057 /* now keys can no longer be reached */ 1058 ieee80211_free_sta_keys(local, sta); 1059 1060 /* disable TIM bit - last chance to tell driver */ 1061 __sta_info_recalc_tim(sta, true); 1062 1063 sta->dead = true; 1064 1065 local->num_sta--; 1066 local->sta_generation++; 1067 1068 while (sta->sta_state > IEEE80211_STA_NONE) { 1069 ret = sta_info_move_state(sta, sta->sta_state - 1); 1070 if (ret) { 1071 WARN_ON_ONCE(1); 1072 break; 1073 } 1074 } 1075 1076 if (sta->uploaded) { 1077 ret = drv_sta_state(local, sdata, sta, IEEE80211_STA_NONE, 1078 IEEE80211_STA_NOTEXIST); 1079 WARN_ON_ONCE(ret != 0); 1080 } 1081 1082 sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr); 1083 1084 sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); 1085 if (sinfo) 1086 sta_set_sinfo(sta, sinfo, true); 1087 cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); 1088 kfree(sinfo); 1089 1090 ieee80211_sta_debugfs_remove(sta); 1091 1092 cleanup_single_sta(sta); 1093 } 1094 1095 int __must_check __sta_info_destroy(struct sta_info *sta) 1096 { 1097 int err = __sta_info_destroy_part1(sta); 1098 1099 if (err) 1100 return err; 1101 1102 synchronize_net(); 1103 1104 __sta_info_destroy_part2(sta); 1105 1106 return 0; 1107 } 1108 1109 int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr) 1110 { 1111 struct sta_info *sta; 1112 int ret; 1113 1114 mutex_lock(&sdata->local->sta_mtx); 1115 sta = sta_info_get(sdata, addr); 1116 ret = __sta_info_destroy(sta); 1117 mutex_unlock(&sdata->local->sta_mtx); 1118 1119 return ret; 1120 } 1121 1122 int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata, 1123 const u8 *addr) 1124 { 1125 struct sta_info *sta; 1126 int ret; 1127 1128 mutex_lock(&sdata->local->sta_mtx); 1129 sta = sta_info_get_bss(sdata, addr); 1130 ret = __sta_info_destroy(sta); 1131 mutex_unlock(&sdata->local->sta_mtx); 1132 1133 return ret; 1134 } 1135 1136 static void sta_info_cleanup(struct timer_list *t) 1137 { 1138 struct ieee80211_local *local = from_timer(local, t, sta_cleanup); 1139 struct sta_info *sta; 1140 bool timer_needed = false; 1141 1142 rcu_read_lock(); 1143 list_for_each_entry_rcu(sta, &local->sta_list, list) 1144 if (sta_info_cleanup_expire_buffered(local, sta)) 1145 timer_needed = true; 1146 rcu_read_unlock(); 1147 1148 if (local->quiescing) 1149 return; 1150 1151 if (!timer_needed) 1152 return; 1153 1154 mod_timer(&local->sta_cleanup, 1155 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL)); 1156 } 1157 1158 int sta_info_init(struct ieee80211_local *local) 1159 { 1160 int err; 1161 1162 err = rhltable_init(&local->sta_hash, &sta_rht_params); 1163 if (err) 1164 return err; 1165 1166 spin_lock_init(&local->tim_lock); 1167 mutex_init(&local->sta_mtx); 1168 INIT_LIST_HEAD(&local->sta_list); 1169 1170 timer_setup(&local->sta_cleanup, sta_info_cleanup, 0); 1171 return 0; 1172 } 1173 1174 void sta_info_stop(struct ieee80211_local *local) 1175 { 1176 del_timer_sync(&local->sta_cleanup); 1177 rhltable_destroy(&local->sta_hash); 1178 } 1179 1180 1181 int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans) 1182 { 1183 struct ieee80211_local *local = sdata->local; 1184 struct sta_info *sta, *tmp; 1185 LIST_HEAD(free_list); 1186 int ret = 0; 1187 1188 might_sleep(); 1189 1190 WARN_ON(vlans && sdata->vif.type != NL80211_IFTYPE_AP); 1191 WARN_ON(vlans && !sdata->bss); 1192 1193 mutex_lock(&local->sta_mtx); 1194 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 1195 if (sdata == sta->sdata || 1196 (vlans && sdata->bss == sta->sdata->bss)) { 1197 if (!WARN_ON(__sta_info_destroy_part1(sta))) 1198 list_add(&sta->free_list, &free_list); 1199 ret++; 1200 } 1201 } 1202 1203 if (!list_empty(&free_list)) { 1204 synchronize_net(); 1205 list_for_each_entry_safe(sta, tmp, &free_list, free_list) 1206 __sta_info_destroy_part2(sta); 1207 } 1208 mutex_unlock(&local->sta_mtx); 1209 1210 return ret; 1211 } 1212 1213 void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, 1214 unsigned long exp_time) 1215 { 1216 struct ieee80211_local *local = sdata->local; 1217 struct sta_info *sta, *tmp; 1218 1219 mutex_lock(&local->sta_mtx); 1220 1221 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 1222 unsigned long last_active = ieee80211_sta_last_active(sta); 1223 1224 if (sdata != sta->sdata) 1225 continue; 1226 1227 if (time_is_before_jiffies(last_active + exp_time)) { 1228 sta_dbg(sta->sdata, "expiring inactive STA %pM\n", 1229 sta->sta.addr); 1230 1231 if (ieee80211_vif_is_mesh(&sdata->vif) && 1232 test_sta_flag(sta, WLAN_STA_PS_STA)) 1233 atomic_dec(&sdata->u.mesh.ps.num_sta_ps); 1234 1235 WARN_ON(__sta_info_destroy(sta)); 1236 } 1237 } 1238 1239 mutex_unlock(&local->sta_mtx); 1240 } 1241 1242 struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, 1243 const u8 *addr, 1244 const u8 *localaddr) 1245 { 1246 struct ieee80211_local *local = hw_to_local(hw); 1247 struct rhlist_head *tmp; 1248 struct sta_info *sta; 1249 1250 /* 1251 * Just return a random station if localaddr is NULL 1252 * ... first in list. 1253 */ 1254 for_each_sta_info(local, addr, sta, tmp) { 1255 if (localaddr && 1256 !ether_addr_equal(sta->sdata->vif.addr, localaddr)) 1257 continue; 1258 if (!sta->uploaded) 1259 return NULL; 1260 return &sta->sta; 1261 } 1262 1263 return NULL; 1264 } 1265 EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_ifaddr); 1266 1267 struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif, 1268 const u8 *addr) 1269 { 1270 struct sta_info *sta; 1271 1272 if (!vif) 1273 return NULL; 1274 1275 sta = sta_info_get_bss(vif_to_sdata(vif), addr); 1276 if (!sta) 1277 return NULL; 1278 1279 if (!sta->uploaded) 1280 return NULL; 1281 1282 return &sta->sta; 1283 } 1284 EXPORT_SYMBOL(ieee80211_find_sta); 1285 1286 /* powersave support code */ 1287 void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) 1288 { 1289 struct ieee80211_sub_if_data *sdata = sta->sdata; 1290 struct ieee80211_local *local = sdata->local; 1291 struct sk_buff_head pending; 1292 int filtered = 0, buffered = 0, ac, i; 1293 unsigned long flags; 1294 struct ps_data *ps; 1295 1296 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1297 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 1298 u.ap); 1299 1300 if (sdata->vif.type == NL80211_IFTYPE_AP) 1301 ps = &sdata->bss->ps; 1302 else if (ieee80211_vif_is_mesh(&sdata->vif)) 1303 ps = &sdata->u.mesh.ps; 1304 else 1305 return; 1306 1307 clear_sta_flag(sta, WLAN_STA_SP); 1308 1309 BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1); 1310 sta->driver_buffered_tids = 0; 1311 sta->txq_buffered_tids = 0; 1312 1313 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 1314 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta); 1315 1316 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 1317 if (!sta->sta.txq[i] || !txq_has_queue(sta->sta.txq[i])) 1318 continue; 1319 1320 schedule_and_wake_txq(local, to_txq_info(sta->sta.txq[i])); 1321 } 1322 1323 skb_queue_head_init(&pending); 1324 1325 /* sync with ieee80211_tx_h_unicast_ps_buf */ 1326 spin_lock(&sta->ps_lock); 1327 /* Send all buffered frames to the station */ 1328 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1329 int count = skb_queue_len(&pending), tmp; 1330 1331 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); 1332 skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending); 1333 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); 1334 tmp = skb_queue_len(&pending); 1335 filtered += tmp - count; 1336 count = tmp; 1337 1338 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); 1339 skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending); 1340 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); 1341 tmp = skb_queue_len(&pending); 1342 buffered += tmp - count; 1343 } 1344 1345 ieee80211_add_pending_skbs(local, &pending); 1346 1347 /* now we're no longer in the deliver code */ 1348 clear_sta_flag(sta, WLAN_STA_PS_DELIVER); 1349 1350 /* The station might have polled and then woken up before we responded, 1351 * so clear these flags now to avoid them sticking around. 1352 */ 1353 clear_sta_flag(sta, WLAN_STA_PSPOLL); 1354 clear_sta_flag(sta, WLAN_STA_UAPSD); 1355 spin_unlock(&sta->ps_lock); 1356 1357 atomic_dec(&ps->num_sta_ps); 1358 1359 local->total_ps_buffered -= buffered; 1360 1361 sta_info_recalc_tim(sta); 1362 1363 ps_dbg(sdata, 1364 "STA %pM aid %d sending %d filtered/%d PS frames since STA woke up\n", 1365 sta->sta.addr, sta->sta.aid, filtered, buffered); 1366 1367 ieee80211_check_fast_xmit(sta); 1368 } 1369 1370 static void ieee80211_send_null_response(struct sta_info *sta, int tid, 1371 enum ieee80211_frame_release_type reason, 1372 bool call_driver, bool more_data) 1373 { 1374 struct ieee80211_sub_if_data *sdata = sta->sdata; 1375 struct ieee80211_local *local = sdata->local; 1376 struct ieee80211_qos_hdr *nullfunc; 1377 struct sk_buff *skb; 1378 int size = sizeof(*nullfunc); 1379 __le16 fc; 1380 bool qos = sta->sta.wme; 1381 struct ieee80211_tx_info *info; 1382 struct ieee80211_chanctx_conf *chanctx_conf; 1383 1384 /* Don't send NDPs when STA is connected HE */ 1385 if (sdata->vif.type == NL80211_IFTYPE_STATION && 1386 !(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE)) 1387 return; 1388 1389 if (qos) { 1390 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | 1391 IEEE80211_STYPE_QOS_NULLFUNC | 1392 IEEE80211_FCTL_FROMDS); 1393 } else { 1394 size -= 2; 1395 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | 1396 IEEE80211_STYPE_NULLFUNC | 1397 IEEE80211_FCTL_FROMDS); 1398 } 1399 1400 skb = dev_alloc_skb(local->hw.extra_tx_headroom + size); 1401 if (!skb) 1402 return; 1403 1404 skb_reserve(skb, local->hw.extra_tx_headroom); 1405 1406 nullfunc = skb_put(skb, size); 1407 nullfunc->frame_control = fc; 1408 nullfunc->duration_id = 0; 1409 memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); 1410 memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); 1411 memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); 1412 nullfunc->seq_ctrl = 0; 1413 1414 skb->priority = tid; 1415 skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); 1416 if (qos) { 1417 nullfunc->qos_ctrl = cpu_to_le16(tid); 1418 1419 if (reason == IEEE80211_FRAME_RELEASE_UAPSD) { 1420 nullfunc->qos_ctrl |= 1421 cpu_to_le16(IEEE80211_QOS_CTL_EOSP); 1422 if (more_data) 1423 nullfunc->frame_control |= 1424 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1425 } 1426 } 1427 1428 info = IEEE80211_SKB_CB(skb); 1429 1430 /* 1431 * Tell TX path to send this frame even though the 1432 * STA may still remain is PS mode after this frame 1433 * exchange. Also set EOSP to indicate this packet 1434 * ends the poll/service period. 1435 */ 1436 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER | 1437 IEEE80211_TX_STATUS_EOSP | 1438 IEEE80211_TX_CTL_REQ_TX_STATUS; 1439 1440 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 1441 1442 if (call_driver) 1443 drv_allow_buffered_frames(local, sta, BIT(tid), 1, 1444 reason, false); 1445 1446 skb->dev = sdata->dev; 1447 1448 rcu_read_lock(); 1449 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 1450 if (WARN_ON(!chanctx_conf)) { 1451 rcu_read_unlock(); 1452 kfree_skb(skb); 1453 return; 1454 } 1455 1456 info->band = chanctx_conf->def.chan->band; 1457 ieee80211_xmit(sdata, sta, skb, 0); 1458 rcu_read_unlock(); 1459 } 1460 1461 static int find_highest_prio_tid(unsigned long tids) 1462 { 1463 /* lower 3 TIDs aren't ordered perfectly */ 1464 if (tids & 0xF8) 1465 return fls(tids) - 1; 1466 /* TID 0 is BE just like TID 3 */ 1467 if (tids & BIT(0)) 1468 return 0; 1469 return fls(tids) - 1; 1470 } 1471 1472 /* Indicates if the MORE_DATA bit should be set in the last 1473 * frame obtained by ieee80211_sta_ps_get_frames. 1474 * Note that driver_release_tids is relevant only if 1475 * reason = IEEE80211_FRAME_RELEASE_PSPOLL 1476 */ 1477 static bool 1478 ieee80211_sta_ps_more_data(struct sta_info *sta, u8 ignored_acs, 1479 enum ieee80211_frame_release_type reason, 1480 unsigned long driver_release_tids) 1481 { 1482 int ac; 1483 1484 /* If the driver has data on more than one TID then 1485 * certainly there's more data if we release just a 1486 * single frame now (from a single TID). This will 1487 * only happen for PS-Poll. 1488 */ 1489 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL && 1490 hweight16(driver_release_tids) > 1) 1491 return true; 1492 1493 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1494 if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) 1495 continue; 1496 1497 if (!skb_queue_empty(&sta->tx_filtered[ac]) || 1498 !skb_queue_empty(&sta->ps_tx_buf[ac])) 1499 return true; 1500 } 1501 1502 return false; 1503 } 1504 1505 static void 1506 ieee80211_sta_ps_get_frames(struct sta_info *sta, int n_frames, u8 ignored_acs, 1507 enum ieee80211_frame_release_type reason, 1508 struct sk_buff_head *frames, 1509 unsigned long *driver_release_tids) 1510 { 1511 struct ieee80211_sub_if_data *sdata = sta->sdata; 1512 struct ieee80211_local *local = sdata->local; 1513 int ac; 1514 1515 /* Get response frame(s) and more data bit for the last one. */ 1516 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1517 unsigned long tids; 1518 1519 if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) 1520 continue; 1521 1522 tids = ieee80211_tids_for_ac(ac); 1523 1524 /* if we already have frames from software, then we can't also 1525 * release from hardware queues 1526 */ 1527 if (skb_queue_empty(frames)) { 1528 *driver_release_tids |= 1529 sta->driver_buffered_tids & tids; 1530 *driver_release_tids |= sta->txq_buffered_tids & tids; 1531 } 1532 1533 if (!*driver_release_tids) { 1534 struct sk_buff *skb; 1535 1536 while (n_frames > 0) { 1537 skb = skb_dequeue(&sta->tx_filtered[ac]); 1538 if (!skb) { 1539 skb = skb_dequeue( 1540 &sta->ps_tx_buf[ac]); 1541 if (skb) 1542 local->total_ps_buffered--; 1543 } 1544 if (!skb) 1545 break; 1546 n_frames--; 1547 __skb_queue_tail(frames, skb); 1548 } 1549 } 1550 1551 /* If we have more frames buffered on this AC, then abort the 1552 * loop since we can't send more data from other ACs before 1553 * the buffered frames from this. 1554 */ 1555 if (!skb_queue_empty(&sta->tx_filtered[ac]) || 1556 !skb_queue_empty(&sta->ps_tx_buf[ac])) 1557 break; 1558 } 1559 } 1560 1561 static void 1562 ieee80211_sta_ps_deliver_response(struct sta_info *sta, 1563 int n_frames, u8 ignored_acs, 1564 enum ieee80211_frame_release_type reason) 1565 { 1566 struct ieee80211_sub_if_data *sdata = sta->sdata; 1567 struct ieee80211_local *local = sdata->local; 1568 unsigned long driver_release_tids = 0; 1569 struct sk_buff_head frames; 1570 bool more_data; 1571 1572 /* Service or PS-Poll period starts */ 1573 set_sta_flag(sta, WLAN_STA_SP); 1574 1575 __skb_queue_head_init(&frames); 1576 1577 ieee80211_sta_ps_get_frames(sta, n_frames, ignored_acs, reason, 1578 &frames, &driver_release_tids); 1579 1580 more_data = ieee80211_sta_ps_more_data(sta, ignored_acs, reason, driver_release_tids); 1581 1582 if (driver_release_tids && reason == IEEE80211_FRAME_RELEASE_PSPOLL) 1583 driver_release_tids = 1584 BIT(find_highest_prio_tid(driver_release_tids)); 1585 1586 if (skb_queue_empty(&frames) && !driver_release_tids) { 1587 int tid, ac; 1588 1589 /* 1590 * For PS-Poll, this can only happen due to a race condition 1591 * when we set the TIM bit and the station notices it, but 1592 * before it can poll for the frame we expire it. 1593 * 1594 * For uAPSD, this is said in the standard (11.2.1.5 h): 1595 * At each unscheduled SP for a non-AP STA, the AP shall 1596 * attempt to transmit at least one MSDU or MMPDU, but no 1597 * more than the value specified in the Max SP Length field 1598 * in the QoS Capability element from delivery-enabled ACs, 1599 * that are destined for the non-AP STA. 1600 * 1601 * Since we have no other MSDU/MMPDU, transmit a QoS null frame. 1602 */ 1603 1604 /* This will evaluate to 1, 3, 5 or 7. */ 1605 for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++) 1606 if (!(ignored_acs & ieee80211_ac_to_qos_mask[ac])) 1607 break; 1608 tid = 7 - 2 * ac; 1609 1610 ieee80211_send_null_response(sta, tid, reason, true, false); 1611 } else if (!driver_release_tids) { 1612 struct sk_buff_head pending; 1613 struct sk_buff *skb; 1614 int num = 0; 1615 u16 tids = 0; 1616 bool need_null = false; 1617 1618 skb_queue_head_init(&pending); 1619 1620 while ((skb = __skb_dequeue(&frames))) { 1621 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1622 struct ieee80211_hdr *hdr = (void *) skb->data; 1623 u8 *qoshdr = NULL; 1624 1625 num++; 1626 1627 /* 1628 * Tell TX path to send this frame even though the 1629 * STA may still remain is PS mode after this frame 1630 * exchange. 1631 */ 1632 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; 1633 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 1634 1635 /* 1636 * Use MoreData flag to indicate whether there are 1637 * more buffered frames for this STA 1638 */ 1639 if (more_data || !skb_queue_empty(&frames)) 1640 hdr->frame_control |= 1641 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1642 else 1643 hdr->frame_control &= 1644 cpu_to_le16(~IEEE80211_FCTL_MOREDATA); 1645 1646 if (ieee80211_is_data_qos(hdr->frame_control) || 1647 ieee80211_is_qos_nullfunc(hdr->frame_control)) 1648 qoshdr = ieee80211_get_qos_ctl(hdr); 1649 1650 tids |= BIT(skb->priority); 1651 1652 __skb_queue_tail(&pending, skb); 1653 1654 /* end service period after last frame or add one */ 1655 if (!skb_queue_empty(&frames)) 1656 continue; 1657 1658 if (reason != IEEE80211_FRAME_RELEASE_UAPSD) { 1659 /* for PS-Poll, there's only one frame */ 1660 info->flags |= IEEE80211_TX_STATUS_EOSP | 1661 IEEE80211_TX_CTL_REQ_TX_STATUS; 1662 break; 1663 } 1664 1665 /* For uAPSD, things are a bit more complicated. If the 1666 * last frame has a QoS header (i.e. is a QoS-data or 1667 * QoS-nulldata frame) then just set the EOSP bit there 1668 * and be done. 1669 * If the frame doesn't have a QoS header (which means 1670 * it should be a bufferable MMPDU) then we can't set 1671 * the EOSP bit in the QoS header; add a QoS-nulldata 1672 * frame to the list to send it after the MMPDU. 1673 * 1674 * Note that this code is only in the mac80211-release 1675 * code path, we assume that the driver will not buffer 1676 * anything but QoS-data frames, or if it does, will 1677 * create the QoS-nulldata frame by itself if needed. 1678 * 1679 * Cf. 802.11-2012 10.2.1.10 (c). 1680 */ 1681 if (qoshdr) { 1682 *qoshdr |= IEEE80211_QOS_CTL_EOSP; 1683 1684 info->flags |= IEEE80211_TX_STATUS_EOSP | 1685 IEEE80211_TX_CTL_REQ_TX_STATUS; 1686 } else { 1687 /* The standard isn't completely clear on this 1688 * as it says the more-data bit should be set 1689 * if there are more BUs. The QoS-Null frame 1690 * we're about to send isn't buffered yet, we 1691 * only create it below, but let's pretend it 1692 * was buffered just in case some clients only 1693 * expect more-data=0 when eosp=1. 1694 */ 1695 hdr->frame_control |= 1696 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1697 need_null = true; 1698 num++; 1699 } 1700 break; 1701 } 1702 1703 drv_allow_buffered_frames(local, sta, tids, num, 1704 reason, more_data); 1705 1706 ieee80211_add_pending_skbs(local, &pending); 1707 1708 if (need_null) 1709 ieee80211_send_null_response( 1710 sta, find_highest_prio_tid(tids), 1711 reason, false, false); 1712 1713 sta_info_recalc_tim(sta); 1714 } else { 1715 int tid; 1716 1717 /* 1718 * We need to release a frame that is buffered somewhere in the 1719 * driver ... it'll have to handle that. 1720 * Note that the driver also has to check the number of frames 1721 * on the TIDs we're releasing from - if there are more than 1722 * n_frames it has to set the more-data bit (if we didn't ask 1723 * it to set it anyway due to other buffered frames); if there 1724 * are fewer than n_frames it has to make sure to adjust that 1725 * to allow the service period to end properly. 1726 */ 1727 drv_release_buffered_frames(local, sta, driver_release_tids, 1728 n_frames, reason, more_data); 1729 1730 /* 1731 * Note that we don't recalculate the TIM bit here as it would 1732 * most likely have no effect at all unless the driver told us 1733 * that the TID(s) became empty before returning here from the 1734 * release function. 1735 * Either way, however, when the driver tells us that the TID(s) 1736 * became empty or we find that a txq became empty, we'll do the 1737 * TIM recalculation. 1738 */ 1739 1740 if (!sta->sta.txq[0]) 1741 return; 1742 1743 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { 1744 if (!sta->sta.txq[tid] || 1745 !(driver_release_tids & BIT(tid)) || 1746 txq_has_queue(sta->sta.txq[tid])) 1747 continue; 1748 1749 sta_info_recalc_tim(sta); 1750 break; 1751 } 1752 } 1753 } 1754 1755 void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta) 1756 { 1757 u8 ignore_for_response = sta->sta.uapsd_queues; 1758 1759 /* 1760 * If all ACs are delivery-enabled then we should reply 1761 * from any of them, if only some are enabled we reply 1762 * only from the non-enabled ones. 1763 */ 1764 if (ignore_for_response == BIT(IEEE80211_NUM_ACS) - 1) 1765 ignore_for_response = 0; 1766 1767 ieee80211_sta_ps_deliver_response(sta, 1, ignore_for_response, 1768 IEEE80211_FRAME_RELEASE_PSPOLL); 1769 } 1770 1771 void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta) 1772 { 1773 int n_frames = sta->sta.max_sp; 1774 u8 delivery_enabled = sta->sta.uapsd_queues; 1775 1776 /* 1777 * If we ever grow support for TSPEC this might happen if 1778 * the TSPEC update from hostapd comes in between a trigger 1779 * frame setting WLAN_STA_UAPSD in the RX path and this 1780 * actually getting called. 1781 */ 1782 if (!delivery_enabled) 1783 return; 1784 1785 switch (sta->sta.max_sp) { 1786 case 1: 1787 n_frames = 2; 1788 break; 1789 case 2: 1790 n_frames = 4; 1791 break; 1792 case 3: 1793 n_frames = 6; 1794 break; 1795 case 0: 1796 /* XXX: what is a good value? */ 1797 n_frames = 128; 1798 break; 1799 } 1800 1801 ieee80211_sta_ps_deliver_response(sta, n_frames, ~delivery_enabled, 1802 IEEE80211_FRAME_RELEASE_UAPSD); 1803 } 1804 1805 void ieee80211_sta_block_awake(struct ieee80211_hw *hw, 1806 struct ieee80211_sta *pubsta, bool block) 1807 { 1808 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1809 1810 trace_api_sta_block_awake(sta->local, pubsta, block); 1811 1812 if (block) { 1813 set_sta_flag(sta, WLAN_STA_PS_DRIVER); 1814 ieee80211_clear_fast_xmit(sta); 1815 return; 1816 } 1817 1818 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1819 return; 1820 1821 if (!test_sta_flag(sta, WLAN_STA_PS_STA)) { 1822 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 1823 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 1824 ieee80211_queue_work(hw, &sta->drv_deliver_wk); 1825 } else if (test_sta_flag(sta, WLAN_STA_PSPOLL) || 1826 test_sta_flag(sta, WLAN_STA_UAPSD)) { 1827 /* must be asleep in this case */ 1828 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 1829 ieee80211_queue_work(hw, &sta->drv_deliver_wk); 1830 } else { 1831 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 1832 ieee80211_check_fast_xmit(sta); 1833 } 1834 } 1835 EXPORT_SYMBOL(ieee80211_sta_block_awake); 1836 1837 void ieee80211_sta_eosp(struct ieee80211_sta *pubsta) 1838 { 1839 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1840 struct ieee80211_local *local = sta->local; 1841 1842 trace_api_eosp(local, pubsta); 1843 1844 clear_sta_flag(sta, WLAN_STA_SP); 1845 } 1846 EXPORT_SYMBOL(ieee80211_sta_eosp); 1847 1848 void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid) 1849 { 1850 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1851 enum ieee80211_frame_release_type reason; 1852 bool more_data; 1853 1854 trace_api_send_eosp_nullfunc(sta->local, pubsta, tid); 1855 1856 reason = IEEE80211_FRAME_RELEASE_UAPSD; 1857 more_data = ieee80211_sta_ps_more_data(sta, ~sta->sta.uapsd_queues, 1858 reason, 0); 1859 1860 ieee80211_send_null_response(sta, tid, reason, false, more_data); 1861 } 1862 EXPORT_SYMBOL(ieee80211_send_eosp_nullfunc); 1863 1864 void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta, 1865 u8 tid, bool buffered) 1866 { 1867 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1868 1869 if (WARN_ON(tid >= IEEE80211_NUM_TIDS)) 1870 return; 1871 1872 trace_api_sta_set_buffered(sta->local, pubsta, tid, buffered); 1873 1874 if (buffered) 1875 set_bit(tid, &sta->driver_buffered_tids); 1876 else 1877 clear_bit(tid, &sta->driver_buffered_tids); 1878 1879 sta_info_recalc_tim(sta); 1880 } 1881 EXPORT_SYMBOL(ieee80211_sta_set_buffered); 1882 1883 void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid, 1884 u32 tx_airtime, u32 rx_airtime) 1885 { 1886 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1887 struct ieee80211_local *local = sta->sdata->local; 1888 u8 ac = ieee80211_ac_from_tid(tid); 1889 u32 airtime = 0; 1890 1891 if (sta->local->airtime_flags & AIRTIME_USE_TX) 1892 airtime += tx_airtime; 1893 if (sta->local->airtime_flags & AIRTIME_USE_RX) 1894 airtime += rx_airtime; 1895 1896 spin_lock_bh(&local->active_txq_lock[ac]); 1897 sta->airtime[ac].tx_airtime += tx_airtime; 1898 sta->airtime[ac].rx_airtime += rx_airtime; 1899 sta->airtime[ac].deficit -= airtime; 1900 spin_unlock_bh(&local->active_txq_lock[ac]); 1901 } 1902 EXPORT_SYMBOL(ieee80211_sta_register_airtime); 1903 1904 void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local, 1905 struct sta_info *sta, u8 ac, 1906 u16 tx_airtime, bool tx_completed) 1907 { 1908 int tx_pending; 1909 1910 if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) 1911 return; 1912 1913 if (!tx_completed) { 1914 if (sta) 1915 atomic_add(tx_airtime, 1916 &sta->airtime[ac].aql_tx_pending); 1917 1918 atomic_add(tx_airtime, &local->aql_total_pending_airtime); 1919 return; 1920 } 1921 1922 if (sta) { 1923 tx_pending = atomic_sub_return(tx_airtime, 1924 &sta->airtime[ac].aql_tx_pending); 1925 if (WARN_ONCE(tx_pending < 0, 1926 "STA %pM AC %d txq pending airtime underflow: %u, %u", 1927 sta->addr, ac, tx_pending, tx_airtime)) 1928 atomic_cmpxchg(&sta->airtime[ac].aql_tx_pending, 1929 tx_pending, 0); 1930 } 1931 1932 tx_pending = atomic_sub_return(tx_airtime, 1933 &local->aql_total_pending_airtime); 1934 if (WARN_ONCE(tx_pending < 0, 1935 "Device %s AC %d pending airtime underflow: %u, %u", 1936 wiphy_name(local->hw.wiphy), ac, tx_pending, 1937 tx_airtime)) 1938 atomic_cmpxchg(&local->aql_total_pending_airtime, 1939 tx_pending, 0); 1940 } 1941 1942 int sta_info_move_state(struct sta_info *sta, 1943 enum ieee80211_sta_state new_state) 1944 { 1945 might_sleep(); 1946 1947 if (sta->sta_state == new_state) 1948 return 0; 1949 1950 /* check allowed transitions first */ 1951 1952 switch (new_state) { 1953 case IEEE80211_STA_NONE: 1954 if (sta->sta_state != IEEE80211_STA_AUTH) 1955 return -EINVAL; 1956 break; 1957 case IEEE80211_STA_AUTH: 1958 if (sta->sta_state != IEEE80211_STA_NONE && 1959 sta->sta_state != IEEE80211_STA_ASSOC) 1960 return -EINVAL; 1961 break; 1962 case IEEE80211_STA_ASSOC: 1963 if (sta->sta_state != IEEE80211_STA_AUTH && 1964 sta->sta_state != IEEE80211_STA_AUTHORIZED) 1965 return -EINVAL; 1966 break; 1967 case IEEE80211_STA_AUTHORIZED: 1968 if (sta->sta_state != IEEE80211_STA_ASSOC) 1969 return -EINVAL; 1970 break; 1971 default: 1972 WARN(1, "invalid state %d", new_state); 1973 return -EINVAL; 1974 } 1975 1976 sta_dbg(sta->sdata, "moving STA %pM to state %d\n", 1977 sta->sta.addr, new_state); 1978 1979 /* 1980 * notify the driver before the actual changes so it can 1981 * fail the transition 1982 */ 1983 if (test_sta_flag(sta, WLAN_STA_INSERTED)) { 1984 int err = drv_sta_state(sta->local, sta->sdata, sta, 1985 sta->sta_state, new_state); 1986 if (err) 1987 return err; 1988 } 1989 1990 /* reflect the change in all state variables */ 1991 1992 switch (new_state) { 1993 case IEEE80211_STA_NONE: 1994 if (sta->sta_state == IEEE80211_STA_AUTH) 1995 clear_bit(WLAN_STA_AUTH, &sta->_flags); 1996 break; 1997 case IEEE80211_STA_AUTH: 1998 if (sta->sta_state == IEEE80211_STA_NONE) { 1999 set_bit(WLAN_STA_AUTH, &sta->_flags); 2000 } else if (sta->sta_state == IEEE80211_STA_ASSOC) { 2001 clear_bit(WLAN_STA_ASSOC, &sta->_flags); 2002 ieee80211_recalc_min_chandef(sta->sdata); 2003 if (!sta->sta.support_p2p_ps) 2004 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 2005 } 2006 break; 2007 case IEEE80211_STA_ASSOC: 2008 if (sta->sta_state == IEEE80211_STA_AUTH) { 2009 set_bit(WLAN_STA_ASSOC, &sta->_flags); 2010 sta->assoc_at = ktime_get_boottime_ns(); 2011 ieee80211_recalc_min_chandef(sta->sdata); 2012 if (!sta->sta.support_p2p_ps) 2013 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 2014 } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { 2015 ieee80211_vif_dec_num_mcast(sta->sdata); 2016 clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 2017 ieee80211_clear_fast_xmit(sta); 2018 ieee80211_clear_fast_rx(sta); 2019 } 2020 break; 2021 case IEEE80211_STA_AUTHORIZED: 2022 if (sta->sta_state == IEEE80211_STA_ASSOC) { 2023 ieee80211_vif_inc_num_mcast(sta->sdata); 2024 set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 2025 ieee80211_check_fast_xmit(sta); 2026 ieee80211_check_fast_rx(sta); 2027 } 2028 if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 2029 sta->sdata->vif.type == NL80211_IFTYPE_AP) 2030 cfg80211_send_layer2_update(sta->sdata->dev, 2031 sta->sta.addr); 2032 break; 2033 default: 2034 break; 2035 } 2036 2037 sta->sta_state = new_state; 2038 2039 return 0; 2040 } 2041 2042 u8 sta_info_tx_streams(struct sta_info *sta) 2043 { 2044 struct ieee80211_sta_ht_cap *ht_cap = &sta->sta.ht_cap; 2045 u8 rx_streams; 2046 2047 if (!sta->sta.ht_cap.ht_supported) 2048 return 1; 2049 2050 if (sta->sta.vht_cap.vht_supported) { 2051 int i; 2052 u16 tx_mcs_map = 2053 le16_to_cpu(sta->sta.vht_cap.vht_mcs.tx_mcs_map); 2054 2055 for (i = 7; i >= 0; i--) 2056 if ((tx_mcs_map & (0x3 << (i * 2))) != 2057 IEEE80211_VHT_MCS_NOT_SUPPORTED) 2058 return i + 1; 2059 } 2060 2061 if (ht_cap->mcs.rx_mask[3]) 2062 rx_streams = 4; 2063 else if (ht_cap->mcs.rx_mask[2]) 2064 rx_streams = 3; 2065 else if (ht_cap->mcs.rx_mask[1]) 2066 rx_streams = 2; 2067 else 2068 rx_streams = 1; 2069 2070 if (!(ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_RX_DIFF)) 2071 return rx_streams; 2072 2073 return ((ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) 2074 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1; 2075 } 2076 2077 static struct ieee80211_sta_rx_stats * 2078 sta_get_last_rx_stats(struct sta_info *sta) 2079 { 2080 struct ieee80211_sta_rx_stats *stats = &sta->rx_stats; 2081 struct ieee80211_local *local = sta->local; 2082 int cpu; 2083 2084 if (!ieee80211_hw_check(&local->hw, USES_RSS)) 2085 return stats; 2086 2087 for_each_possible_cpu(cpu) { 2088 struct ieee80211_sta_rx_stats *cpustats; 2089 2090 cpustats = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2091 2092 if (time_after(cpustats->last_rx, stats->last_rx)) 2093 stats = cpustats; 2094 } 2095 2096 return stats; 2097 } 2098 2099 static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate, 2100 struct rate_info *rinfo) 2101 { 2102 rinfo->bw = STA_STATS_GET(BW, rate); 2103 2104 switch (STA_STATS_GET(TYPE, rate)) { 2105 case STA_STATS_RATE_TYPE_VHT: 2106 rinfo->flags = RATE_INFO_FLAGS_VHT_MCS; 2107 rinfo->mcs = STA_STATS_GET(VHT_MCS, rate); 2108 rinfo->nss = STA_STATS_GET(VHT_NSS, rate); 2109 if (STA_STATS_GET(SGI, rate)) 2110 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; 2111 break; 2112 case STA_STATS_RATE_TYPE_HT: 2113 rinfo->flags = RATE_INFO_FLAGS_MCS; 2114 rinfo->mcs = STA_STATS_GET(HT_MCS, rate); 2115 if (STA_STATS_GET(SGI, rate)) 2116 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; 2117 break; 2118 case STA_STATS_RATE_TYPE_LEGACY: { 2119 struct ieee80211_supported_band *sband; 2120 u16 brate; 2121 unsigned int shift; 2122 int band = STA_STATS_GET(LEGACY_BAND, rate); 2123 int rate_idx = STA_STATS_GET(LEGACY_IDX, rate); 2124 2125 sband = local->hw.wiphy->bands[band]; 2126 brate = sband->bitrates[rate_idx].bitrate; 2127 if (rinfo->bw == RATE_INFO_BW_5) 2128 shift = 2; 2129 else if (rinfo->bw == RATE_INFO_BW_10) 2130 shift = 1; 2131 else 2132 shift = 0; 2133 rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); 2134 break; 2135 } 2136 case STA_STATS_RATE_TYPE_HE: 2137 rinfo->flags = RATE_INFO_FLAGS_HE_MCS; 2138 rinfo->mcs = STA_STATS_GET(HE_MCS, rate); 2139 rinfo->nss = STA_STATS_GET(HE_NSS, rate); 2140 rinfo->he_gi = STA_STATS_GET(HE_GI, rate); 2141 rinfo->he_ru_alloc = STA_STATS_GET(HE_RU, rate); 2142 rinfo->he_dcm = STA_STATS_GET(HE_DCM, rate); 2143 break; 2144 } 2145 } 2146 2147 static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) 2148 { 2149 u16 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate); 2150 2151 if (rate == STA_STATS_RATE_INVALID) 2152 return -EINVAL; 2153 2154 sta_stats_decode_rate(sta->local, rate, rinfo); 2155 return 0; 2156 } 2157 2158 static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats, 2159 int tid) 2160 { 2161 unsigned int start; 2162 u64 value; 2163 2164 do { 2165 start = u64_stats_fetch_begin(&rxstats->syncp); 2166 value = rxstats->msdu[tid]; 2167 } while (u64_stats_fetch_retry(&rxstats->syncp, start)); 2168 2169 return value; 2170 } 2171 2172 static void sta_set_tidstats(struct sta_info *sta, 2173 struct cfg80211_tid_stats *tidstats, 2174 int tid) 2175 { 2176 struct ieee80211_local *local = sta->local; 2177 int cpu; 2178 2179 if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) { 2180 if (!ieee80211_hw_check(&local->hw, USES_RSS)) 2181 tidstats->rx_msdu += 2182 sta_get_tidstats_msdu(&sta->rx_stats, tid); 2183 2184 if (sta->pcpu_rx_stats) { 2185 for_each_possible_cpu(cpu) { 2186 struct ieee80211_sta_rx_stats *cpurxs; 2187 2188 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2189 tidstats->rx_msdu += 2190 sta_get_tidstats_msdu(cpurxs, tid); 2191 } 2192 } 2193 2194 tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU); 2195 } 2196 2197 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) { 2198 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU); 2199 tidstats->tx_msdu = sta->tx_stats.msdu[tid]; 2200 } 2201 2202 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) && 2203 ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { 2204 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES); 2205 tidstats->tx_msdu_retries = sta->status_stats.msdu_retries[tid]; 2206 } 2207 2208 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) && 2209 ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { 2210 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED); 2211 tidstats->tx_msdu_failed = sta->status_stats.msdu_failed[tid]; 2212 } 2213 2214 if (local->ops->wake_tx_queue && tid < IEEE80211_NUM_TIDS) { 2215 spin_lock_bh(&local->fq.lock); 2216 rcu_read_lock(); 2217 2218 tidstats->filled |= BIT(NL80211_TID_STATS_TXQ_STATS); 2219 ieee80211_fill_txq_stats(&tidstats->txq_stats, 2220 to_txq_info(sta->sta.txq[tid])); 2221 2222 rcu_read_unlock(); 2223 spin_unlock_bh(&local->fq.lock); 2224 } 2225 } 2226 2227 static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats) 2228 { 2229 unsigned int start; 2230 u64 value; 2231 2232 do { 2233 start = u64_stats_fetch_begin(&rxstats->syncp); 2234 value = rxstats->bytes; 2235 } while (u64_stats_fetch_retry(&rxstats->syncp, start)); 2236 2237 return value; 2238 } 2239 2240 void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, 2241 bool tidstats) 2242 { 2243 struct ieee80211_sub_if_data *sdata = sta->sdata; 2244 struct ieee80211_local *local = sdata->local; 2245 u32 thr = 0; 2246 int i, ac, cpu; 2247 struct ieee80211_sta_rx_stats *last_rxstats; 2248 2249 last_rxstats = sta_get_last_rx_stats(sta); 2250 2251 sinfo->generation = sdata->local->sta_generation; 2252 2253 /* do before driver, so beacon filtering drivers have a 2254 * chance to e.g. just add the number of filtered beacons 2255 * (or just modify the value entirely, of course) 2256 */ 2257 if (sdata->vif.type == NL80211_IFTYPE_STATION) 2258 sinfo->rx_beacon = sdata->u.mgd.count_beacon_signal; 2259 2260 drv_sta_statistics(local, sdata, &sta->sta, sinfo); 2261 2262 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) | 2263 BIT_ULL(NL80211_STA_INFO_STA_FLAGS) | 2264 BIT_ULL(NL80211_STA_INFO_BSS_PARAM) | 2265 BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME) | 2266 BIT_ULL(NL80211_STA_INFO_ASSOC_AT_BOOTTIME) | 2267 BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC); 2268 2269 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 2270 sinfo->beacon_loss_count = sdata->u.mgd.beacon_loss_count; 2271 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS); 2272 } 2273 2274 sinfo->connected_time = ktime_get_seconds() - sta->last_connected; 2275 sinfo->assoc_at = sta->assoc_at; 2276 sinfo->inactive_time = 2277 jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta)); 2278 2279 if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) | 2280 BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) { 2281 sinfo->tx_bytes = 0; 2282 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2283 sinfo->tx_bytes += sta->tx_stats.bytes[ac]; 2284 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); 2285 } 2286 2287 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) { 2288 sinfo->tx_packets = 0; 2289 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2290 sinfo->tx_packets += sta->tx_stats.packets[ac]; 2291 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); 2292 } 2293 2294 if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) | 2295 BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) { 2296 if (!ieee80211_hw_check(&local->hw, USES_RSS)) 2297 sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats); 2298 2299 if (sta->pcpu_rx_stats) { 2300 for_each_possible_cpu(cpu) { 2301 struct ieee80211_sta_rx_stats *cpurxs; 2302 2303 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2304 sinfo->rx_bytes += sta_get_stats_bytes(cpurxs); 2305 } 2306 } 2307 2308 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); 2309 } 2310 2311 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) { 2312 sinfo->rx_packets = sta->rx_stats.packets; 2313 if (sta->pcpu_rx_stats) { 2314 for_each_possible_cpu(cpu) { 2315 struct ieee80211_sta_rx_stats *cpurxs; 2316 2317 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2318 sinfo->rx_packets += cpurxs->packets; 2319 } 2320 } 2321 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); 2322 } 2323 2324 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) { 2325 sinfo->tx_retries = sta->status_stats.retry_count; 2326 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); 2327 } 2328 2329 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) { 2330 sinfo->tx_failed = sta->status_stats.retry_failed; 2331 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); 2332 } 2333 2334 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_DURATION))) { 2335 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2336 sinfo->rx_duration += sta->airtime[ac].rx_airtime; 2337 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); 2338 } 2339 2340 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_DURATION))) { 2341 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2342 sinfo->tx_duration += sta->airtime[ac].tx_airtime; 2343 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION); 2344 } 2345 2346 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) { 2347 sinfo->airtime_weight = sta->airtime_weight; 2348 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT); 2349 } 2350 2351 sinfo->rx_dropped_misc = sta->rx_stats.dropped; 2352 if (sta->pcpu_rx_stats) { 2353 for_each_possible_cpu(cpu) { 2354 struct ieee80211_sta_rx_stats *cpurxs; 2355 2356 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2357 sinfo->rx_dropped_misc += cpurxs->dropped; 2358 } 2359 } 2360 2361 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2362 !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) { 2363 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) | 2364 BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); 2365 sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif); 2366 } 2367 2368 if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) || 2369 ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) { 2370 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL))) { 2371 sinfo->signal = (s8)last_rxstats->last_signal; 2372 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); 2373 } 2374 2375 if (!sta->pcpu_rx_stats && 2376 !(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) { 2377 sinfo->signal_avg = 2378 -ewma_signal_read(&sta->rx_stats_avg.signal); 2379 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); 2380 } 2381 } 2382 2383 /* for the average - if pcpu_rx_stats isn't set - rxstats must point to 2384 * the sta->rx_stats struct, so the check here is fine with and without 2385 * pcpu statistics 2386 */ 2387 if (last_rxstats->chains && 2388 !(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) | 2389 BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) { 2390 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); 2391 if (!sta->pcpu_rx_stats) 2392 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); 2393 2394 sinfo->chains = last_rxstats->chains; 2395 2396 for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { 2397 sinfo->chain_signal[i] = 2398 last_rxstats->chain_signal_last[i]; 2399 sinfo->chain_signal_avg[i] = 2400 -ewma_signal_read(&sta->rx_stats_avg.chain_signal[i]); 2401 } 2402 } 2403 2404 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) { 2405 sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, 2406 &sinfo->txrate); 2407 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); 2408 } 2409 2410 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE))) { 2411 if (sta_set_rate_info_rx(sta, &sinfo->rxrate) == 0) 2412 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); 2413 } 2414 2415 if (tidstats && !cfg80211_sinfo_alloc_tid_stats(sinfo, GFP_KERNEL)) { 2416 for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) 2417 sta_set_tidstats(sta, &sinfo->pertid[i], i); 2418 } 2419 2420 if (ieee80211_vif_is_mesh(&sdata->vif)) { 2421 #ifdef CONFIG_MAC80211_MESH 2422 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_LLID) | 2423 BIT_ULL(NL80211_STA_INFO_PLID) | 2424 BIT_ULL(NL80211_STA_INFO_PLINK_STATE) | 2425 BIT_ULL(NL80211_STA_INFO_LOCAL_PM) | 2426 BIT_ULL(NL80211_STA_INFO_PEER_PM) | 2427 BIT_ULL(NL80211_STA_INFO_NONPEER_PM) | 2428 BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_GATE); 2429 2430 sinfo->llid = sta->mesh->llid; 2431 sinfo->plid = sta->mesh->plid; 2432 sinfo->plink_state = sta->mesh->plink_state; 2433 if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { 2434 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_T_OFFSET); 2435 sinfo->t_offset = sta->mesh->t_offset; 2436 } 2437 sinfo->local_pm = sta->mesh->local_pm; 2438 sinfo->peer_pm = sta->mesh->peer_pm; 2439 sinfo->nonpeer_pm = sta->mesh->nonpeer_pm; 2440 sinfo->connected_to_gate = sta->mesh->connected_to_gate; 2441 #endif 2442 } 2443 2444 sinfo->bss_param.flags = 0; 2445 if (sdata->vif.bss_conf.use_cts_prot) 2446 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT; 2447 if (sdata->vif.bss_conf.use_short_preamble) 2448 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; 2449 if (sdata->vif.bss_conf.use_short_slot) 2450 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; 2451 sinfo->bss_param.dtim_period = sdata->vif.bss_conf.dtim_period; 2452 sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int; 2453 2454 sinfo->sta_flags.set = 0; 2455 sinfo->sta_flags.mask = BIT(NL80211_STA_FLAG_AUTHORIZED) | 2456 BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) | 2457 BIT(NL80211_STA_FLAG_WME) | 2458 BIT(NL80211_STA_FLAG_MFP) | 2459 BIT(NL80211_STA_FLAG_AUTHENTICATED) | 2460 BIT(NL80211_STA_FLAG_ASSOCIATED) | 2461 BIT(NL80211_STA_FLAG_TDLS_PEER); 2462 if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 2463 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED); 2464 if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE)) 2465 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE); 2466 if (sta->sta.wme) 2467 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME); 2468 if (test_sta_flag(sta, WLAN_STA_MFP)) 2469 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP); 2470 if (test_sta_flag(sta, WLAN_STA_AUTH)) 2471 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED); 2472 if (test_sta_flag(sta, WLAN_STA_ASSOC)) 2473 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_ASSOCIATED); 2474 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) 2475 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER); 2476 2477 thr = sta_get_expected_throughput(sta); 2478 2479 if (thr != 0) { 2480 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT); 2481 sinfo->expected_throughput = thr; 2482 } 2483 2484 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL)) && 2485 sta->status_stats.ack_signal_filled) { 2486 sinfo->ack_signal = sta->status_stats.last_ack_signal; 2487 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); 2488 } 2489 2490 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG)) && 2491 sta->status_stats.ack_signal_filled) { 2492 sinfo->avg_ack_signal = 2493 -(s8)ewma_avg_signal_read( 2494 &sta->status_stats.avg_ack_signal); 2495 sinfo->filled |= 2496 BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); 2497 } 2498 2499 if (ieee80211_vif_is_mesh(&sdata->vif)) { 2500 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_LINK_METRIC); 2501 sinfo->airtime_link_metric = 2502 airtime_link_metric_get(local, sta); 2503 } 2504 } 2505 2506 u32 sta_get_expected_throughput(struct sta_info *sta) 2507 { 2508 struct ieee80211_sub_if_data *sdata = sta->sdata; 2509 struct ieee80211_local *local = sdata->local; 2510 struct rate_control_ref *ref = NULL; 2511 u32 thr = 0; 2512 2513 if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) 2514 ref = local->rate_ctrl; 2515 2516 /* check if the driver has a SW RC implementation */ 2517 if (ref && ref->ops->get_expected_throughput) 2518 thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv); 2519 else 2520 thr = drv_get_expected_throughput(local, sta); 2521 2522 return thr; 2523 } 2524 2525 unsigned long ieee80211_sta_last_active(struct sta_info *sta) 2526 { 2527 struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta); 2528 2529 if (!sta->status_stats.last_ack || 2530 time_after(stats->last_rx, sta->status_stats.last_ack)) 2531 return stats->last_rx; 2532 return sta->status_stats.last_ack; 2533 } 2534 2535 static void sta_update_codel_params(struct sta_info *sta, u32 thr) 2536 { 2537 if (!sta->sdata->local->ops->wake_tx_queue) 2538 return; 2539 2540 if (thr && thr < STA_SLOW_THRESHOLD * sta->local->num_sta) { 2541 sta->cparams.target = MS2TIME(50); 2542 sta->cparams.interval = MS2TIME(300); 2543 sta->cparams.ecn = false; 2544 } else { 2545 sta->cparams.target = MS2TIME(20); 2546 sta->cparams.interval = MS2TIME(100); 2547 sta->cparams.ecn = true; 2548 } 2549 } 2550 2551 void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta, 2552 u32 thr) 2553 { 2554 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2555 2556 sta_update_codel_params(sta, thr); 2557 } 2558