1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2002-2005, Instant802 Networks, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2013-2014 Intel Mobile Communications GmbH 6 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH 7 * Copyright (C) 2018-2022 Intel Corporation 8 */ 9 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/etherdevice.h> 13 #include <linux/netdevice.h> 14 #include <linux/types.h> 15 #include <linux/slab.h> 16 #include <linux/skbuff.h> 17 #include <linux/if_arp.h> 18 #include <linux/timer.h> 19 #include <linux/rtnetlink.h> 20 21 #include <net/codel.h> 22 #include <net/mac80211.h> 23 #include "ieee80211_i.h" 24 #include "driver-ops.h" 25 #include "rate.h" 26 #include "sta_info.h" 27 #include "debugfs_sta.h" 28 #include "mesh.h" 29 #include "wme.h" 30 31 /** 32 * DOC: STA information lifetime rules 33 * 34 * STA info structures (&struct sta_info) are managed in a hash table 35 * for faster lookup and a list for iteration. They are managed using 36 * RCU, i.e. access to the list and hash table is protected by RCU. 37 * 38 * Upon allocating a STA info structure with sta_info_alloc(), the caller 39 * owns that structure. It must then insert it into the hash table using 40 * either sta_info_insert() or sta_info_insert_rcu(); only in the latter 41 * case (which acquires an rcu read section but must not be called from 42 * within one) will the pointer still be valid after the call. Note that 43 * the caller may not do much with the STA info before inserting it, in 44 * particular, it may not start any mesh peer link management or add 45 * encryption keys. 46 * 47 * When the insertion fails (sta_info_insert()) returns non-zero), the 48 * structure will have been freed by sta_info_insert()! 49 * 50 * Station entries are added by mac80211 when you establish a link with a 51 * peer. This means different things for the different type of interfaces 52 * we support. For a regular station this mean we add the AP sta when we 53 * receive an association response from the AP. For IBSS this occurs when 54 * get to know about a peer on the same IBSS. For WDS we add the sta for 55 * the peer immediately upon device open. When using AP mode we add stations 56 * for each respective station upon request from userspace through nl80211. 57 * 58 * In order to remove a STA info structure, various sta_info_destroy_*() 59 * calls are available. 60 * 61 * There is no concept of ownership on a STA entry, each structure is 62 * owned by the global hash table/list until it is removed. All users of 63 * the structure need to be RCU protected so that the structure won't be 64 * freed before they are done using it. 65 */ 66 67 struct sta_link_alloc { 68 struct link_sta_info info; 69 struct ieee80211_link_sta sta; 70 struct rcu_head rcu_head; 71 }; 72 73 static const struct rhashtable_params sta_rht_params = { 74 .nelem_hint = 3, /* start small */ 75 .automatic_shrinking = true, 76 .head_offset = offsetof(struct sta_info, hash_node), 77 .key_offset = offsetof(struct sta_info, addr), 78 .key_len = ETH_ALEN, 79 .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE, 80 }; 81 82 static const struct rhashtable_params link_sta_rht_params = { 83 .nelem_hint = 3, /* start small */ 84 .automatic_shrinking = true, 85 .head_offset = offsetof(struct link_sta_info, link_hash_node), 86 .key_offset = offsetof(struct link_sta_info, addr), 87 .key_len = ETH_ALEN, 88 .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE, 89 }; 90 91 /* Caller must hold local->sta_mtx */ 92 static int sta_info_hash_del(struct ieee80211_local *local, 93 struct sta_info *sta) 94 { 95 return rhltable_remove(&local->sta_hash, &sta->hash_node, 96 sta_rht_params); 97 } 98 99 static int link_sta_info_hash_add(struct ieee80211_local *local, 100 struct link_sta_info *link_sta) 101 { 102 lockdep_assert_held(&local->sta_mtx); 103 return rhltable_insert(&local->link_sta_hash, 104 &link_sta->link_hash_node, 105 link_sta_rht_params); 106 } 107 108 static int link_sta_info_hash_del(struct ieee80211_local *local, 109 struct link_sta_info *link_sta) 110 { 111 lockdep_assert_held(&local->sta_mtx); 112 return rhltable_remove(&local->link_sta_hash, 113 &link_sta->link_hash_node, 114 link_sta_rht_params); 115 } 116 117 static void __cleanup_single_sta(struct sta_info *sta) 118 { 119 int ac, i; 120 struct tid_ampdu_tx *tid_tx; 121 struct ieee80211_sub_if_data *sdata = sta->sdata; 122 struct ieee80211_local *local = sdata->local; 123 struct ps_data *ps; 124 125 if (test_sta_flag(sta, WLAN_STA_PS_STA) || 126 test_sta_flag(sta, WLAN_STA_PS_DRIVER) || 127 test_sta_flag(sta, WLAN_STA_PS_DELIVER)) { 128 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 129 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 130 ps = &sdata->bss->ps; 131 else if (ieee80211_vif_is_mesh(&sdata->vif)) 132 ps = &sdata->u.mesh.ps; 133 else 134 return; 135 136 clear_sta_flag(sta, WLAN_STA_PS_STA); 137 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 138 clear_sta_flag(sta, WLAN_STA_PS_DELIVER); 139 140 atomic_dec(&ps->num_sta_ps); 141 } 142 143 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 144 struct txq_info *txqi; 145 146 if (!sta->sta.txq[i]) 147 continue; 148 149 txqi = to_txq_info(sta->sta.txq[i]); 150 151 ieee80211_txq_purge(local, txqi); 152 } 153 154 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 155 local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]); 156 ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]); 157 ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]); 158 } 159 160 if (ieee80211_vif_is_mesh(&sdata->vif)) 161 mesh_sta_cleanup(sta); 162 163 cancel_work_sync(&sta->drv_deliver_wk); 164 165 /* 166 * Destroy aggregation state here. It would be nice to wait for the 167 * driver to finish aggregation stop and then clean up, but for now 168 * drivers have to handle aggregation stop being requested, followed 169 * directly by station destruction. 170 */ 171 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 172 kfree(sta->ampdu_mlme.tid_start_tx[i]); 173 tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]); 174 if (!tid_tx) 175 continue; 176 ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending); 177 kfree(tid_tx); 178 } 179 } 180 181 static void cleanup_single_sta(struct sta_info *sta) 182 { 183 struct ieee80211_sub_if_data *sdata = sta->sdata; 184 struct ieee80211_local *local = sdata->local; 185 186 __cleanup_single_sta(sta); 187 sta_info_free(local, sta); 188 } 189 190 struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local, 191 const u8 *addr) 192 { 193 return rhltable_lookup(&local->sta_hash, addr, sta_rht_params); 194 } 195 196 /* protected by RCU */ 197 struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, 198 const u8 *addr) 199 { 200 struct ieee80211_local *local = sdata->local; 201 struct rhlist_head *tmp; 202 struct sta_info *sta; 203 204 rcu_read_lock(); 205 for_each_sta_info(local, addr, sta, tmp) { 206 if (sta->sdata == sdata) { 207 rcu_read_unlock(); 208 /* this is safe as the caller must already hold 209 * another rcu read section or the mutex 210 */ 211 return sta; 212 } 213 } 214 rcu_read_unlock(); 215 return NULL; 216 } 217 218 /* 219 * Get sta info either from the specified interface 220 * or from one of its vlans 221 */ 222 struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, 223 const u8 *addr) 224 { 225 struct ieee80211_local *local = sdata->local; 226 struct rhlist_head *tmp; 227 struct sta_info *sta; 228 229 rcu_read_lock(); 230 for_each_sta_info(local, addr, sta, tmp) { 231 if (sta->sdata == sdata || 232 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) { 233 rcu_read_unlock(); 234 /* this is safe as the caller must already hold 235 * another rcu read section or the mutex 236 */ 237 return sta; 238 } 239 } 240 rcu_read_unlock(); 241 return NULL; 242 } 243 244 struct rhlist_head *link_sta_info_hash_lookup(struct ieee80211_local *local, 245 const u8 *addr) 246 { 247 return rhltable_lookup(&local->link_sta_hash, addr, 248 link_sta_rht_params); 249 } 250 251 struct link_sta_info * 252 link_sta_info_get_bss(struct ieee80211_sub_if_data *sdata, const u8 *addr) 253 { 254 struct ieee80211_local *local = sdata->local; 255 struct rhlist_head *tmp; 256 struct link_sta_info *link_sta; 257 258 rcu_read_lock(); 259 for_each_link_sta_info(local, addr, link_sta, tmp) { 260 struct sta_info *sta = link_sta->sta; 261 262 if (sta->sdata == sdata || 263 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) { 264 rcu_read_unlock(); 265 /* this is safe as the caller must already hold 266 * another rcu read section or the mutex 267 */ 268 return link_sta; 269 } 270 } 271 rcu_read_unlock(); 272 return NULL; 273 } 274 275 struct ieee80211_sta * 276 ieee80211_find_sta_by_link_addrs(struct ieee80211_hw *hw, 277 const u8 *addr, 278 const u8 *localaddr, 279 unsigned int *link_id) 280 { 281 struct ieee80211_local *local = hw_to_local(hw); 282 struct link_sta_info *link_sta; 283 struct rhlist_head *tmp; 284 285 for_each_link_sta_info(local, addr, link_sta, tmp) { 286 struct sta_info *sta = link_sta->sta; 287 struct ieee80211_link_data *link; 288 u8 _link_id = link_sta->link_id; 289 290 if (!localaddr) { 291 if (link_id) 292 *link_id = _link_id; 293 return &sta->sta; 294 } 295 296 link = rcu_dereference(sta->sdata->link[_link_id]); 297 if (!link) 298 continue; 299 300 if (memcmp(link->conf->addr, localaddr, ETH_ALEN)) 301 continue; 302 303 if (link_id) 304 *link_id = _link_id; 305 return &sta->sta; 306 } 307 308 return NULL; 309 } 310 EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_link_addrs); 311 312 struct sta_info *sta_info_get_by_addrs(struct ieee80211_local *local, 313 const u8 *sta_addr, const u8 *vif_addr) 314 { 315 struct rhlist_head *tmp; 316 struct sta_info *sta; 317 318 for_each_sta_info(local, sta_addr, sta, tmp) { 319 if (ether_addr_equal(vif_addr, sta->sdata->vif.addr)) 320 return sta; 321 } 322 323 return NULL; 324 } 325 326 struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, 327 int idx) 328 { 329 struct ieee80211_local *local = sdata->local; 330 struct sta_info *sta; 331 int i = 0; 332 333 list_for_each_entry_rcu(sta, &local->sta_list, list, 334 lockdep_is_held(&local->sta_mtx)) { 335 if (sdata != sta->sdata) 336 continue; 337 if (i < idx) { 338 ++i; 339 continue; 340 } 341 return sta; 342 } 343 344 return NULL; 345 } 346 347 static void sta_info_free_link(struct link_sta_info *link_sta) 348 { 349 free_percpu(link_sta->pcpu_rx_stats); 350 } 351 352 static void sta_remove_link(struct sta_info *sta, unsigned int link_id, 353 bool unhash) 354 { 355 struct sta_link_alloc *alloc = NULL; 356 struct link_sta_info *link_sta; 357 358 link_sta = rcu_dereference_protected(sta->link[link_id], 359 lockdep_is_held(&sta->local->sta_mtx)); 360 361 if (WARN_ON(!link_sta)) 362 return; 363 364 if (unhash) 365 link_sta_info_hash_del(sta->local, link_sta); 366 367 if (test_sta_flag(sta, WLAN_STA_INSERTED)) 368 ieee80211_link_sta_debugfs_remove(link_sta); 369 370 if (link_sta != &sta->deflink) 371 alloc = container_of(link_sta, typeof(*alloc), info); 372 373 sta->sta.valid_links &= ~BIT(link_id); 374 RCU_INIT_POINTER(sta->link[link_id], NULL); 375 RCU_INIT_POINTER(sta->sta.link[link_id], NULL); 376 if (alloc) { 377 sta_info_free_link(&alloc->info); 378 kfree_rcu(alloc, rcu_head); 379 } 380 381 ieee80211_sta_recalc_aggregates(&sta->sta); 382 } 383 384 /** 385 * sta_info_free - free STA 386 * 387 * @local: pointer to the global information 388 * @sta: STA info to free 389 * 390 * This function must undo everything done by sta_info_alloc() 391 * that may happen before sta_info_insert(). It may only be 392 * called when sta_info_insert() has not been attempted (and 393 * if that fails, the station is freed anyway.) 394 */ 395 void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) 396 { 397 int i; 398 399 for (i = 0; i < ARRAY_SIZE(sta->link); i++) { 400 if (!(sta->sta.valid_links & BIT(i))) 401 continue; 402 403 sta_remove_link(sta, i, false); 404 } 405 406 /* 407 * If we had used sta_info_pre_move_state() then we might not 408 * have gone through the state transitions down again, so do 409 * it here now (and warn if it's inserted). 410 * 411 * This will clear state such as fast TX/RX that may have been 412 * allocated during state transitions. 413 */ 414 while (sta->sta_state > IEEE80211_STA_NONE) { 415 int ret; 416 417 WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED)); 418 419 ret = sta_info_move_state(sta, sta->sta_state - 1); 420 if (WARN_ONCE(ret, "sta_info_move_state() returned %d\n", ret)) 421 break; 422 } 423 424 if (sta->rate_ctrl) 425 rate_control_free_sta(sta); 426 427 sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr); 428 429 kfree(to_txq_info(sta->sta.txq[0])); 430 kfree(rcu_dereference_raw(sta->sta.rates)); 431 #ifdef CONFIG_MAC80211_MESH 432 kfree(sta->mesh); 433 #endif 434 435 sta_info_free_link(&sta->deflink); 436 kfree(sta); 437 } 438 439 /* Caller must hold local->sta_mtx */ 440 static int sta_info_hash_add(struct ieee80211_local *local, 441 struct sta_info *sta) 442 { 443 return rhltable_insert(&local->sta_hash, &sta->hash_node, 444 sta_rht_params); 445 } 446 447 static void sta_deliver_ps_frames(struct work_struct *wk) 448 { 449 struct sta_info *sta; 450 451 sta = container_of(wk, struct sta_info, drv_deliver_wk); 452 453 if (sta->dead) 454 return; 455 456 local_bh_disable(); 457 if (!test_sta_flag(sta, WLAN_STA_PS_STA)) 458 ieee80211_sta_ps_deliver_wakeup(sta); 459 else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL)) 460 ieee80211_sta_ps_deliver_poll_response(sta); 461 else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD)) 462 ieee80211_sta_ps_deliver_uapsd(sta); 463 local_bh_enable(); 464 } 465 466 static int sta_prepare_rate_control(struct ieee80211_local *local, 467 struct sta_info *sta, gfp_t gfp) 468 { 469 if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) 470 return 0; 471 472 sta->rate_ctrl = local->rate_ctrl; 473 sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl, 474 sta, gfp); 475 if (!sta->rate_ctrl_priv) 476 return -ENOMEM; 477 478 return 0; 479 } 480 481 static int sta_info_alloc_link(struct ieee80211_local *local, 482 struct link_sta_info *link_info, 483 gfp_t gfp) 484 { 485 struct ieee80211_hw *hw = &local->hw; 486 int i; 487 488 if (ieee80211_hw_check(hw, USES_RSS)) { 489 link_info->pcpu_rx_stats = 490 alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp); 491 if (!link_info->pcpu_rx_stats) 492 return -ENOMEM; 493 } 494 495 link_info->rx_stats.last_rx = jiffies; 496 u64_stats_init(&link_info->rx_stats.syncp); 497 498 ewma_signal_init(&link_info->rx_stats_avg.signal); 499 ewma_avg_signal_init(&link_info->status_stats.avg_ack_signal); 500 for (i = 0; i < ARRAY_SIZE(link_info->rx_stats_avg.chain_signal); i++) 501 ewma_signal_init(&link_info->rx_stats_avg.chain_signal[i]); 502 503 return 0; 504 } 505 506 static void sta_info_add_link(struct sta_info *sta, 507 unsigned int link_id, 508 struct link_sta_info *link_info, 509 struct ieee80211_link_sta *link_sta) 510 { 511 link_info->sta = sta; 512 link_info->link_id = link_id; 513 link_info->pub = link_sta; 514 link_info->pub->sta = &sta->sta; 515 link_sta->link_id = link_id; 516 rcu_assign_pointer(sta->link[link_id], link_info); 517 rcu_assign_pointer(sta->sta.link[link_id], link_sta); 518 519 link_sta->smps_mode = IEEE80211_SMPS_OFF; 520 link_sta->agg.max_rc_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA; 521 } 522 523 static struct sta_info * 524 __sta_info_alloc(struct ieee80211_sub_if_data *sdata, 525 const u8 *addr, int link_id, const u8 *link_addr, 526 gfp_t gfp) 527 { 528 struct ieee80211_local *local = sdata->local; 529 struct ieee80211_hw *hw = &local->hw; 530 struct sta_info *sta; 531 void *txq_data; 532 int size; 533 int i; 534 535 sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp); 536 if (!sta) 537 return NULL; 538 539 sta->local = local; 540 sta->sdata = sdata; 541 542 if (sta_info_alloc_link(local, &sta->deflink, gfp)) 543 goto free; 544 545 if (link_id >= 0) { 546 sta_info_add_link(sta, link_id, &sta->deflink, 547 &sta->sta.deflink); 548 sta->sta.valid_links = BIT(link_id); 549 } else { 550 sta_info_add_link(sta, 0, &sta->deflink, &sta->sta.deflink); 551 } 552 553 sta->sta.cur = &sta->sta.deflink.agg; 554 555 spin_lock_init(&sta->lock); 556 spin_lock_init(&sta->ps_lock); 557 INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames); 558 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); 559 mutex_init(&sta->ampdu_mlme.mtx); 560 #ifdef CONFIG_MAC80211_MESH 561 if (ieee80211_vif_is_mesh(&sdata->vif)) { 562 sta->mesh = kzalloc(sizeof(*sta->mesh), gfp); 563 if (!sta->mesh) 564 goto free; 565 sta->mesh->plink_sta = sta; 566 spin_lock_init(&sta->mesh->plink_lock); 567 if (!sdata->u.mesh.user_mpm) 568 timer_setup(&sta->mesh->plink_timer, mesh_plink_timer, 569 0); 570 sta->mesh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE; 571 } 572 #endif 573 574 memcpy(sta->addr, addr, ETH_ALEN); 575 memcpy(sta->sta.addr, addr, ETH_ALEN); 576 memcpy(sta->deflink.addr, link_addr, ETH_ALEN); 577 memcpy(sta->sta.deflink.addr, link_addr, ETH_ALEN); 578 sta->sta.max_rx_aggregation_subframes = 579 local->hw.max_rx_aggregation_subframes; 580 581 /* TODO link specific alloc and assignments for MLO Link STA */ 582 583 /* Extended Key ID needs to install keys for keyid 0 and 1 Rx-only. 584 * The Tx path starts to use a key as soon as the key slot ptk_idx 585 * references to is not NULL. To not use the initial Rx-only key 586 * prematurely for Tx initialize ptk_idx to an impossible PTK keyid 587 * which always will refer to a NULL key. 588 */ 589 BUILD_BUG_ON(ARRAY_SIZE(sta->ptk) <= INVALID_PTK_KEYIDX); 590 sta->ptk_idx = INVALID_PTK_KEYIDX; 591 592 593 ieee80211_init_frag_cache(&sta->frags); 594 595 sta->sta_state = IEEE80211_STA_NONE; 596 597 if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 598 sta->amsdu_mesh_control = -1; 599 600 /* Mark TID as unreserved */ 601 sta->reserved_tid = IEEE80211_TID_UNRESERVED; 602 603 sta->last_connected = ktime_get_seconds(); 604 605 size = sizeof(struct txq_info) + 606 ALIGN(hw->txq_data_size, sizeof(void *)); 607 608 txq_data = kcalloc(ARRAY_SIZE(sta->sta.txq), size, gfp); 609 if (!txq_data) 610 goto free; 611 612 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 613 struct txq_info *txq = txq_data + i * size; 614 615 /* might not do anything for the (bufferable) MMPDU TXQ */ 616 ieee80211_txq_init(sdata, sta, txq, i); 617 } 618 619 if (sta_prepare_rate_control(local, sta, gfp)) 620 goto free_txq; 621 622 sta->airtime_weight = IEEE80211_DEFAULT_AIRTIME_WEIGHT; 623 624 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 625 skb_queue_head_init(&sta->ps_tx_buf[i]); 626 skb_queue_head_init(&sta->tx_filtered[i]); 627 sta->airtime[i].deficit = sta->airtime_weight; 628 atomic_set(&sta->airtime[i].aql_tx_pending, 0); 629 sta->airtime[i].aql_limit_low = local->aql_txq_limit_low[i]; 630 sta->airtime[i].aql_limit_high = local->aql_txq_limit_high[i]; 631 } 632 633 for (i = 0; i < IEEE80211_NUM_TIDS; i++) 634 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); 635 636 for (i = 0; i < NUM_NL80211_BANDS; i++) { 637 u32 mandatory = 0; 638 int r; 639 640 if (!hw->wiphy->bands[i]) 641 continue; 642 643 switch (i) { 644 case NL80211_BAND_2GHZ: 645 case NL80211_BAND_LC: 646 /* 647 * We use both here, even if we cannot really know for 648 * sure the station will support both, but the only use 649 * for this is when we don't know anything yet and send 650 * management frames, and then we'll pick the lowest 651 * possible rate anyway. 652 * If we don't include _G here, we cannot find a rate 653 * in P2P, and thus trigger the WARN_ONCE() in rate.c 654 */ 655 mandatory = IEEE80211_RATE_MANDATORY_B | 656 IEEE80211_RATE_MANDATORY_G; 657 break; 658 case NL80211_BAND_5GHZ: 659 mandatory = IEEE80211_RATE_MANDATORY_A; 660 break; 661 case NL80211_BAND_60GHZ: 662 WARN_ON(1); 663 mandatory = 0; 664 break; 665 } 666 667 for (r = 0; r < hw->wiphy->bands[i]->n_bitrates; r++) { 668 struct ieee80211_rate *rate; 669 670 rate = &hw->wiphy->bands[i]->bitrates[r]; 671 672 if (!(rate->flags & mandatory)) 673 continue; 674 sta->sta.deflink.supp_rates[i] |= BIT(r); 675 } 676 } 677 678 sta->cparams.ce_threshold = CODEL_DISABLED_THRESHOLD; 679 sta->cparams.target = MS2TIME(20); 680 sta->cparams.interval = MS2TIME(100); 681 sta->cparams.ecn = true; 682 sta->cparams.ce_threshold_selector = 0; 683 sta->cparams.ce_threshold_mask = 0; 684 685 sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr); 686 687 return sta; 688 689 free_txq: 690 kfree(to_txq_info(sta->sta.txq[0])); 691 free: 692 sta_info_free_link(&sta->deflink); 693 #ifdef CONFIG_MAC80211_MESH 694 kfree(sta->mesh); 695 #endif 696 kfree(sta); 697 return NULL; 698 } 699 700 struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, 701 const u8 *addr, gfp_t gfp) 702 { 703 return __sta_info_alloc(sdata, addr, -1, addr, gfp); 704 } 705 706 struct sta_info *sta_info_alloc_with_link(struct ieee80211_sub_if_data *sdata, 707 const u8 *mld_addr, 708 unsigned int link_id, 709 const u8 *link_addr, 710 gfp_t gfp) 711 { 712 return __sta_info_alloc(sdata, mld_addr, link_id, link_addr, gfp); 713 } 714 715 static int sta_info_insert_check(struct sta_info *sta) 716 { 717 struct ieee80211_sub_if_data *sdata = sta->sdata; 718 719 /* 720 * Can't be a WARN_ON because it can be triggered through a race: 721 * something inserts a STA (on one CPU) without holding the RTNL 722 * and another CPU turns off the net device. 723 */ 724 if (unlikely(!ieee80211_sdata_running(sdata))) 725 return -ENETDOWN; 726 727 if (WARN_ON(ether_addr_equal(sta->sta.addr, sdata->vif.addr) || 728 !is_valid_ether_addr(sta->sta.addr))) 729 return -EINVAL; 730 731 /* The RCU read lock is required by rhashtable due to 732 * asynchronous resize/rehash. We also require the mutex 733 * for correctness. 734 */ 735 rcu_read_lock(); 736 lockdep_assert_held(&sdata->local->sta_mtx); 737 if (ieee80211_hw_check(&sdata->local->hw, NEEDS_UNIQUE_STA_ADDR) && 738 ieee80211_find_sta_by_ifaddr(&sdata->local->hw, sta->addr, NULL)) { 739 rcu_read_unlock(); 740 return -ENOTUNIQ; 741 } 742 rcu_read_unlock(); 743 744 return 0; 745 } 746 747 static int sta_info_insert_drv_state(struct ieee80211_local *local, 748 struct ieee80211_sub_if_data *sdata, 749 struct sta_info *sta) 750 { 751 enum ieee80211_sta_state state; 752 int err = 0; 753 754 for (state = IEEE80211_STA_NOTEXIST; state < sta->sta_state; state++) { 755 err = drv_sta_state(local, sdata, sta, state, state + 1); 756 if (err) 757 break; 758 } 759 760 if (!err) { 761 /* 762 * Drivers using legacy sta_add/sta_remove callbacks only 763 * get uploaded set to true after sta_add is called. 764 */ 765 if (!local->ops->sta_add) 766 sta->uploaded = true; 767 return 0; 768 } 769 770 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 771 sdata_info(sdata, 772 "failed to move IBSS STA %pM to state %d (%d) - keeping it anyway\n", 773 sta->sta.addr, state + 1, err); 774 err = 0; 775 } 776 777 /* unwind on error */ 778 for (; state > IEEE80211_STA_NOTEXIST; state--) 779 WARN_ON(drv_sta_state(local, sdata, sta, state, state - 1)); 780 781 return err; 782 } 783 784 static void 785 ieee80211_recalc_p2p_go_ps_allowed(struct ieee80211_sub_if_data *sdata) 786 { 787 struct ieee80211_local *local = sdata->local; 788 bool allow_p2p_go_ps = sdata->vif.p2p; 789 struct sta_info *sta; 790 791 rcu_read_lock(); 792 list_for_each_entry_rcu(sta, &local->sta_list, list) { 793 if (sdata != sta->sdata || 794 !test_sta_flag(sta, WLAN_STA_ASSOC)) 795 continue; 796 if (!sta->sta.support_p2p_ps) { 797 allow_p2p_go_ps = false; 798 break; 799 } 800 } 801 rcu_read_unlock(); 802 803 if (allow_p2p_go_ps != sdata->vif.bss_conf.allow_p2p_go_ps) { 804 sdata->vif.bss_conf.allow_p2p_go_ps = allow_p2p_go_ps; 805 ieee80211_link_info_change_notify(sdata, &sdata->deflink, 806 BSS_CHANGED_P2P_PS); 807 } 808 } 809 810 /* 811 * should be called with sta_mtx locked 812 * this function replaces the mutex lock 813 * with a RCU lock 814 */ 815 static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) 816 { 817 struct ieee80211_local *local = sta->local; 818 struct ieee80211_sub_if_data *sdata = sta->sdata; 819 struct station_info *sinfo = NULL; 820 int err = 0; 821 822 lockdep_assert_held(&local->sta_mtx); 823 824 /* check if STA exists already */ 825 if (sta_info_get_bss(sdata, sta->sta.addr)) { 826 err = -EEXIST; 827 goto out_cleanup; 828 } 829 830 sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL); 831 if (!sinfo) { 832 err = -ENOMEM; 833 goto out_cleanup; 834 } 835 836 local->num_sta++; 837 local->sta_generation++; 838 smp_mb(); 839 840 /* simplify things and don't accept BA sessions yet */ 841 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 842 843 /* make the station visible */ 844 err = sta_info_hash_add(local, sta); 845 if (err) 846 goto out_drop_sta; 847 848 if (sta->sta.valid_links) { 849 err = link_sta_info_hash_add(local, &sta->deflink); 850 if (err) { 851 sta_info_hash_del(local, sta); 852 goto out_drop_sta; 853 } 854 } 855 856 list_add_tail_rcu(&sta->list, &local->sta_list); 857 858 /* update channel context before notifying the driver about state 859 * change, this enables driver using the updated channel context right away. 860 */ 861 if (sta->sta_state >= IEEE80211_STA_ASSOC) { 862 ieee80211_recalc_min_chandef(sta->sdata, -1); 863 if (!sta->sta.support_p2p_ps) 864 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 865 } 866 867 /* notify driver */ 868 err = sta_info_insert_drv_state(local, sdata, sta); 869 if (err) 870 goto out_remove; 871 872 set_sta_flag(sta, WLAN_STA_INSERTED); 873 874 /* accept BA sessions now */ 875 clear_sta_flag(sta, WLAN_STA_BLOCK_BA); 876 877 ieee80211_sta_debugfs_add(sta); 878 rate_control_add_sta_debugfs(sta); 879 if (sta->sta.valid_links) { 880 int i; 881 882 for (i = 0; i < ARRAY_SIZE(sta->link); i++) { 883 struct link_sta_info *link_sta; 884 885 link_sta = rcu_dereference_protected(sta->link[i], 886 lockdep_is_held(&local->sta_mtx)); 887 888 if (!link_sta) 889 continue; 890 891 ieee80211_link_sta_debugfs_add(link_sta); 892 if (sdata->vif.active_links & BIT(i)) 893 ieee80211_link_sta_debugfs_drv_add(link_sta); 894 } 895 } else { 896 ieee80211_link_sta_debugfs_add(&sta->deflink); 897 ieee80211_link_sta_debugfs_drv_add(&sta->deflink); 898 } 899 900 sinfo->generation = local->sta_generation; 901 cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); 902 kfree(sinfo); 903 904 sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr); 905 906 /* move reference to rcu-protected */ 907 rcu_read_lock(); 908 mutex_unlock(&local->sta_mtx); 909 910 if (ieee80211_vif_is_mesh(&sdata->vif)) 911 mesh_accept_plinks_update(sdata); 912 913 return 0; 914 out_remove: 915 if (sta->sta.valid_links) 916 link_sta_info_hash_del(local, &sta->deflink); 917 sta_info_hash_del(local, sta); 918 list_del_rcu(&sta->list); 919 out_drop_sta: 920 local->num_sta--; 921 synchronize_net(); 922 out_cleanup: 923 cleanup_single_sta(sta); 924 mutex_unlock(&local->sta_mtx); 925 kfree(sinfo); 926 rcu_read_lock(); 927 return err; 928 } 929 930 int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) 931 { 932 struct ieee80211_local *local = sta->local; 933 int err; 934 935 might_sleep(); 936 937 mutex_lock(&local->sta_mtx); 938 939 err = sta_info_insert_check(sta); 940 if (err) { 941 sta_info_free(local, sta); 942 mutex_unlock(&local->sta_mtx); 943 rcu_read_lock(); 944 return err; 945 } 946 947 return sta_info_insert_finish(sta); 948 } 949 950 int sta_info_insert(struct sta_info *sta) 951 { 952 int err = sta_info_insert_rcu(sta); 953 954 rcu_read_unlock(); 955 956 return err; 957 } 958 959 static inline void __bss_tim_set(u8 *tim, u16 id) 960 { 961 /* 962 * This format has been mandated by the IEEE specifications, 963 * so this line may not be changed to use the __set_bit() format. 964 */ 965 tim[id / 8] |= (1 << (id % 8)); 966 } 967 968 static inline void __bss_tim_clear(u8 *tim, u16 id) 969 { 970 /* 971 * This format has been mandated by the IEEE specifications, 972 * so this line may not be changed to use the __clear_bit() format. 973 */ 974 tim[id / 8] &= ~(1 << (id % 8)); 975 } 976 977 static inline bool __bss_tim_get(u8 *tim, u16 id) 978 { 979 /* 980 * This format has been mandated by the IEEE specifications, 981 * so this line may not be changed to use the test_bit() format. 982 */ 983 return tim[id / 8] & (1 << (id % 8)); 984 } 985 986 static unsigned long ieee80211_tids_for_ac(int ac) 987 { 988 /* If we ever support TIDs > 7, this obviously needs to be adjusted */ 989 switch (ac) { 990 case IEEE80211_AC_VO: 991 return BIT(6) | BIT(7); 992 case IEEE80211_AC_VI: 993 return BIT(4) | BIT(5); 994 case IEEE80211_AC_BE: 995 return BIT(0) | BIT(3); 996 case IEEE80211_AC_BK: 997 return BIT(1) | BIT(2); 998 default: 999 WARN_ON(1); 1000 return 0; 1001 } 1002 } 1003 1004 static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending) 1005 { 1006 struct ieee80211_local *local = sta->local; 1007 struct ps_data *ps; 1008 bool indicate_tim = false; 1009 u8 ignore_for_tim = sta->sta.uapsd_queues; 1010 int ac; 1011 u16 id = sta->sta.aid; 1012 1013 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 1014 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 1015 if (WARN_ON_ONCE(!sta->sdata->bss)) 1016 return; 1017 1018 ps = &sta->sdata->bss->ps; 1019 #ifdef CONFIG_MAC80211_MESH 1020 } else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) { 1021 ps = &sta->sdata->u.mesh.ps; 1022 #endif 1023 } else { 1024 return; 1025 } 1026 1027 /* No need to do anything if the driver does all */ 1028 if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim) 1029 return; 1030 1031 if (sta->dead) 1032 goto done; 1033 1034 /* 1035 * If all ACs are delivery-enabled then we should build 1036 * the TIM bit for all ACs anyway; if only some are then 1037 * we ignore those and build the TIM bit using only the 1038 * non-enabled ones. 1039 */ 1040 if (ignore_for_tim == BIT(IEEE80211_NUM_ACS) - 1) 1041 ignore_for_tim = 0; 1042 1043 if (ignore_pending) 1044 ignore_for_tim = BIT(IEEE80211_NUM_ACS) - 1; 1045 1046 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1047 unsigned long tids; 1048 1049 if (ignore_for_tim & ieee80211_ac_to_qos_mask[ac]) 1050 continue; 1051 1052 indicate_tim |= !skb_queue_empty(&sta->tx_filtered[ac]) || 1053 !skb_queue_empty(&sta->ps_tx_buf[ac]); 1054 if (indicate_tim) 1055 break; 1056 1057 tids = ieee80211_tids_for_ac(ac); 1058 1059 indicate_tim |= 1060 sta->driver_buffered_tids & tids; 1061 indicate_tim |= 1062 sta->txq_buffered_tids & tids; 1063 } 1064 1065 done: 1066 spin_lock_bh(&local->tim_lock); 1067 1068 if (indicate_tim == __bss_tim_get(ps->tim, id)) 1069 goto out_unlock; 1070 1071 if (indicate_tim) 1072 __bss_tim_set(ps->tim, id); 1073 else 1074 __bss_tim_clear(ps->tim, id); 1075 1076 if (local->ops->set_tim && !WARN_ON(sta->dead)) { 1077 local->tim_in_locked_section = true; 1078 drv_set_tim(local, &sta->sta, indicate_tim); 1079 local->tim_in_locked_section = false; 1080 } 1081 1082 out_unlock: 1083 spin_unlock_bh(&local->tim_lock); 1084 } 1085 1086 void sta_info_recalc_tim(struct sta_info *sta) 1087 { 1088 __sta_info_recalc_tim(sta, false); 1089 } 1090 1091 static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb) 1092 { 1093 struct ieee80211_tx_info *info; 1094 int timeout; 1095 1096 if (!skb) 1097 return false; 1098 1099 info = IEEE80211_SKB_CB(skb); 1100 1101 /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */ 1102 timeout = (sta->listen_interval * 1103 sta->sdata->vif.bss_conf.beacon_int * 1104 32 / 15625) * HZ; 1105 if (timeout < STA_TX_BUFFER_EXPIRE) 1106 timeout = STA_TX_BUFFER_EXPIRE; 1107 return time_after(jiffies, info->control.jiffies + timeout); 1108 } 1109 1110 1111 static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local, 1112 struct sta_info *sta, int ac) 1113 { 1114 unsigned long flags; 1115 struct sk_buff *skb; 1116 1117 /* 1118 * First check for frames that should expire on the filtered 1119 * queue. Frames here were rejected by the driver and are on 1120 * a separate queue to avoid reordering with normal PS-buffered 1121 * frames. They also aren't accounted for right now in the 1122 * total_ps_buffered counter. 1123 */ 1124 for (;;) { 1125 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); 1126 skb = skb_peek(&sta->tx_filtered[ac]); 1127 if (sta_info_buffer_expired(sta, skb)) 1128 skb = __skb_dequeue(&sta->tx_filtered[ac]); 1129 else 1130 skb = NULL; 1131 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); 1132 1133 /* 1134 * Frames are queued in order, so if this one 1135 * hasn't expired yet we can stop testing. If 1136 * we actually reached the end of the queue we 1137 * also need to stop, of course. 1138 */ 1139 if (!skb) 1140 break; 1141 ieee80211_free_txskb(&local->hw, skb); 1142 } 1143 1144 /* 1145 * Now also check the normal PS-buffered queue, this will 1146 * only find something if the filtered queue was emptied 1147 * since the filtered frames are all before the normal PS 1148 * buffered frames. 1149 */ 1150 for (;;) { 1151 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); 1152 skb = skb_peek(&sta->ps_tx_buf[ac]); 1153 if (sta_info_buffer_expired(sta, skb)) 1154 skb = __skb_dequeue(&sta->ps_tx_buf[ac]); 1155 else 1156 skb = NULL; 1157 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); 1158 1159 /* 1160 * frames are queued in order, so if this one 1161 * hasn't expired yet (or we reached the end of 1162 * the queue) we can stop testing 1163 */ 1164 if (!skb) 1165 break; 1166 1167 local->total_ps_buffered--; 1168 ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n", 1169 sta->sta.addr); 1170 ieee80211_free_txskb(&local->hw, skb); 1171 } 1172 1173 /* 1174 * Finally, recalculate the TIM bit for this station -- it might 1175 * now be clear because the station was too slow to retrieve its 1176 * frames. 1177 */ 1178 sta_info_recalc_tim(sta); 1179 1180 /* 1181 * Return whether there are any frames still buffered, this is 1182 * used to check whether the cleanup timer still needs to run, 1183 * if there are no frames we don't need to rearm the timer. 1184 */ 1185 return !(skb_queue_empty(&sta->ps_tx_buf[ac]) && 1186 skb_queue_empty(&sta->tx_filtered[ac])); 1187 } 1188 1189 static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local, 1190 struct sta_info *sta) 1191 { 1192 bool have_buffered = false; 1193 int ac; 1194 1195 /* This is only necessary for stations on BSS/MBSS interfaces */ 1196 if (!sta->sdata->bss && 1197 !ieee80211_vif_is_mesh(&sta->sdata->vif)) 1198 return false; 1199 1200 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 1201 have_buffered |= 1202 sta_info_cleanup_expire_buffered_ac(local, sta, ac); 1203 1204 return have_buffered; 1205 } 1206 1207 static int __must_check __sta_info_destroy_part1(struct sta_info *sta) 1208 { 1209 struct ieee80211_local *local; 1210 struct ieee80211_sub_if_data *sdata; 1211 int ret, i; 1212 1213 might_sleep(); 1214 1215 if (!sta) 1216 return -ENOENT; 1217 1218 local = sta->local; 1219 sdata = sta->sdata; 1220 1221 lockdep_assert_held(&local->sta_mtx); 1222 1223 /* 1224 * Before removing the station from the driver and 1225 * rate control, it might still start new aggregation 1226 * sessions -- block that to make sure the tear-down 1227 * will be sufficient. 1228 */ 1229 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 1230 ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA); 1231 1232 /* 1233 * Before removing the station from the driver there might be pending 1234 * rx frames on RSS queues sent prior to the disassociation - wait for 1235 * all such frames to be processed. 1236 */ 1237 drv_sync_rx_queues(local, sta); 1238 1239 for (i = 0; i < ARRAY_SIZE(sta->link); i++) { 1240 struct link_sta_info *link_sta; 1241 1242 if (!(sta->sta.valid_links & BIT(i))) 1243 continue; 1244 1245 link_sta = rcu_dereference_protected(sta->link[i], 1246 lockdep_is_held(&local->sta_mtx)); 1247 1248 link_sta_info_hash_del(local, link_sta); 1249 } 1250 1251 ret = sta_info_hash_del(local, sta); 1252 if (WARN_ON(ret)) 1253 return ret; 1254 1255 /* 1256 * for TDLS peers, make sure to return to the base channel before 1257 * removal. 1258 */ 1259 if (test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) { 1260 drv_tdls_cancel_channel_switch(local, sdata, &sta->sta); 1261 clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL); 1262 } 1263 1264 list_del_rcu(&sta->list); 1265 sta->removed = true; 1266 1267 drv_sta_pre_rcu_remove(local, sta->sdata, sta); 1268 1269 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1270 rcu_access_pointer(sdata->u.vlan.sta) == sta) 1271 RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); 1272 1273 return 0; 1274 } 1275 1276 static void __sta_info_destroy_part2(struct sta_info *sta) 1277 { 1278 struct ieee80211_local *local = sta->local; 1279 struct ieee80211_sub_if_data *sdata = sta->sdata; 1280 struct station_info *sinfo; 1281 int ret; 1282 1283 /* 1284 * NOTE: This assumes at least synchronize_net() was done 1285 * after _part1 and before _part2! 1286 */ 1287 1288 might_sleep(); 1289 lockdep_assert_held(&local->sta_mtx); 1290 1291 if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { 1292 ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); 1293 WARN_ON_ONCE(ret); 1294 } 1295 1296 /* now keys can no longer be reached */ 1297 ieee80211_free_sta_keys(local, sta); 1298 1299 /* disable TIM bit - last chance to tell driver */ 1300 __sta_info_recalc_tim(sta, true); 1301 1302 sta->dead = true; 1303 1304 local->num_sta--; 1305 local->sta_generation++; 1306 1307 while (sta->sta_state > IEEE80211_STA_NONE) { 1308 ret = sta_info_move_state(sta, sta->sta_state - 1); 1309 if (ret) { 1310 WARN_ON_ONCE(1); 1311 break; 1312 } 1313 } 1314 1315 if (sta->uploaded) { 1316 ret = drv_sta_state(local, sdata, sta, IEEE80211_STA_NONE, 1317 IEEE80211_STA_NOTEXIST); 1318 WARN_ON_ONCE(ret != 0); 1319 } 1320 1321 sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr); 1322 1323 sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); 1324 if (sinfo) 1325 sta_set_sinfo(sta, sinfo, true); 1326 cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); 1327 kfree(sinfo); 1328 1329 ieee80211_sta_debugfs_remove(sta); 1330 1331 ieee80211_destroy_frag_cache(&sta->frags); 1332 1333 cleanup_single_sta(sta); 1334 } 1335 1336 int __must_check __sta_info_destroy(struct sta_info *sta) 1337 { 1338 int err = __sta_info_destroy_part1(sta); 1339 1340 if (err) 1341 return err; 1342 1343 synchronize_net(); 1344 1345 __sta_info_destroy_part2(sta); 1346 1347 return 0; 1348 } 1349 1350 int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr) 1351 { 1352 struct sta_info *sta; 1353 int ret; 1354 1355 mutex_lock(&sdata->local->sta_mtx); 1356 sta = sta_info_get(sdata, addr); 1357 ret = __sta_info_destroy(sta); 1358 mutex_unlock(&sdata->local->sta_mtx); 1359 1360 return ret; 1361 } 1362 1363 int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata, 1364 const u8 *addr) 1365 { 1366 struct sta_info *sta; 1367 int ret; 1368 1369 mutex_lock(&sdata->local->sta_mtx); 1370 sta = sta_info_get_bss(sdata, addr); 1371 ret = __sta_info_destroy(sta); 1372 mutex_unlock(&sdata->local->sta_mtx); 1373 1374 return ret; 1375 } 1376 1377 static void sta_info_cleanup(struct timer_list *t) 1378 { 1379 struct ieee80211_local *local = from_timer(local, t, sta_cleanup); 1380 struct sta_info *sta; 1381 bool timer_needed = false; 1382 1383 rcu_read_lock(); 1384 list_for_each_entry_rcu(sta, &local->sta_list, list) 1385 if (sta_info_cleanup_expire_buffered(local, sta)) 1386 timer_needed = true; 1387 rcu_read_unlock(); 1388 1389 if (local->quiescing) 1390 return; 1391 1392 if (!timer_needed) 1393 return; 1394 1395 mod_timer(&local->sta_cleanup, 1396 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL)); 1397 } 1398 1399 int sta_info_init(struct ieee80211_local *local) 1400 { 1401 int err; 1402 1403 err = rhltable_init(&local->sta_hash, &sta_rht_params); 1404 if (err) 1405 return err; 1406 1407 err = rhltable_init(&local->link_sta_hash, &link_sta_rht_params); 1408 if (err) { 1409 rhltable_destroy(&local->sta_hash); 1410 return err; 1411 } 1412 1413 spin_lock_init(&local->tim_lock); 1414 mutex_init(&local->sta_mtx); 1415 INIT_LIST_HEAD(&local->sta_list); 1416 1417 timer_setup(&local->sta_cleanup, sta_info_cleanup, 0); 1418 return 0; 1419 } 1420 1421 void sta_info_stop(struct ieee80211_local *local) 1422 { 1423 del_timer_sync(&local->sta_cleanup); 1424 rhltable_destroy(&local->sta_hash); 1425 rhltable_destroy(&local->link_sta_hash); 1426 } 1427 1428 1429 int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans) 1430 { 1431 struct ieee80211_local *local = sdata->local; 1432 struct sta_info *sta, *tmp; 1433 LIST_HEAD(free_list); 1434 int ret = 0; 1435 1436 might_sleep(); 1437 1438 WARN_ON(vlans && sdata->vif.type != NL80211_IFTYPE_AP); 1439 WARN_ON(vlans && !sdata->bss); 1440 1441 mutex_lock(&local->sta_mtx); 1442 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 1443 if (sdata == sta->sdata || 1444 (vlans && sdata->bss == sta->sdata->bss)) { 1445 if (!WARN_ON(__sta_info_destroy_part1(sta))) 1446 list_add(&sta->free_list, &free_list); 1447 ret++; 1448 } 1449 } 1450 1451 if (!list_empty(&free_list)) { 1452 synchronize_net(); 1453 list_for_each_entry_safe(sta, tmp, &free_list, free_list) 1454 __sta_info_destroy_part2(sta); 1455 } 1456 mutex_unlock(&local->sta_mtx); 1457 1458 return ret; 1459 } 1460 1461 void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, 1462 unsigned long exp_time) 1463 { 1464 struct ieee80211_local *local = sdata->local; 1465 struct sta_info *sta, *tmp; 1466 1467 mutex_lock(&local->sta_mtx); 1468 1469 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 1470 unsigned long last_active = ieee80211_sta_last_active(sta); 1471 1472 if (sdata != sta->sdata) 1473 continue; 1474 1475 if (time_is_before_jiffies(last_active + exp_time)) { 1476 sta_dbg(sta->sdata, "expiring inactive STA %pM\n", 1477 sta->sta.addr); 1478 1479 if (ieee80211_vif_is_mesh(&sdata->vif) && 1480 test_sta_flag(sta, WLAN_STA_PS_STA)) 1481 atomic_dec(&sdata->u.mesh.ps.num_sta_ps); 1482 1483 WARN_ON(__sta_info_destroy(sta)); 1484 } 1485 } 1486 1487 mutex_unlock(&local->sta_mtx); 1488 } 1489 1490 struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, 1491 const u8 *addr, 1492 const u8 *localaddr) 1493 { 1494 struct ieee80211_local *local = hw_to_local(hw); 1495 struct rhlist_head *tmp; 1496 struct sta_info *sta; 1497 1498 /* 1499 * Just return a random station if localaddr is NULL 1500 * ... first in list. 1501 */ 1502 for_each_sta_info(local, addr, sta, tmp) { 1503 if (localaddr && 1504 !ether_addr_equal(sta->sdata->vif.addr, localaddr)) 1505 continue; 1506 if (!sta->uploaded) 1507 return NULL; 1508 return &sta->sta; 1509 } 1510 1511 return NULL; 1512 } 1513 EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_ifaddr); 1514 1515 struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif, 1516 const u8 *addr) 1517 { 1518 struct sta_info *sta; 1519 1520 if (!vif) 1521 return NULL; 1522 1523 sta = sta_info_get_bss(vif_to_sdata(vif), addr); 1524 if (!sta) 1525 return NULL; 1526 1527 if (!sta->uploaded) 1528 return NULL; 1529 1530 return &sta->sta; 1531 } 1532 EXPORT_SYMBOL(ieee80211_find_sta); 1533 1534 /* powersave support code */ 1535 void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) 1536 { 1537 struct ieee80211_sub_if_data *sdata = sta->sdata; 1538 struct ieee80211_local *local = sdata->local; 1539 struct sk_buff_head pending; 1540 int filtered = 0, buffered = 0, ac, i; 1541 unsigned long flags; 1542 struct ps_data *ps; 1543 1544 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1545 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 1546 u.ap); 1547 1548 if (sdata->vif.type == NL80211_IFTYPE_AP) 1549 ps = &sdata->bss->ps; 1550 else if (ieee80211_vif_is_mesh(&sdata->vif)) 1551 ps = &sdata->u.mesh.ps; 1552 else 1553 return; 1554 1555 clear_sta_flag(sta, WLAN_STA_SP); 1556 1557 BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1); 1558 sta->driver_buffered_tids = 0; 1559 sta->txq_buffered_tids = 0; 1560 1561 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 1562 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta); 1563 1564 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 1565 if (!sta->sta.txq[i] || !txq_has_queue(sta->sta.txq[i])) 1566 continue; 1567 1568 schedule_and_wake_txq(local, to_txq_info(sta->sta.txq[i])); 1569 } 1570 1571 skb_queue_head_init(&pending); 1572 1573 /* sync with ieee80211_tx_h_unicast_ps_buf */ 1574 spin_lock(&sta->ps_lock); 1575 /* Send all buffered frames to the station */ 1576 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1577 int count = skb_queue_len(&pending), tmp; 1578 1579 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); 1580 skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending); 1581 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); 1582 tmp = skb_queue_len(&pending); 1583 filtered += tmp - count; 1584 count = tmp; 1585 1586 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); 1587 skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending); 1588 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); 1589 tmp = skb_queue_len(&pending); 1590 buffered += tmp - count; 1591 } 1592 1593 ieee80211_add_pending_skbs(local, &pending); 1594 1595 /* now we're no longer in the deliver code */ 1596 clear_sta_flag(sta, WLAN_STA_PS_DELIVER); 1597 1598 /* The station might have polled and then woken up before we responded, 1599 * so clear these flags now to avoid them sticking around. 1600 */ 1601 clear_sta_flag(sta, WLAN_STA_PSPOLL); 1602 clear_sta_flag(sta, WLAN_STA_UAPSD); 1603 spin_unlock(&sta->ps_lock); 1604 1605 atomic_dec(&ps->num_sta_ps); 1606 1607 local->total_ps_buffered -= buffered; 1608 1609 sta_info_recalc_tim(sta); 1610 1611 ps_dbg(sdata, 1612 "STA %pM aid %d sending %d filtered/%d PS frames since STA woke up\n", 1613 sta->sta.addr, sta->sta.aid, filtered, buffered); 1614 1615 ieee80211_check_fast_xmit(sta); 1616 } 1617 1618 static void ieee80211_send_null_response(struct sta_info *sta, int tid, 1619 enum ieee80211_frame_release_type reason, 1620 bool call_driver, bool more_data) 1621 { 1622 struct ieee80211_sub_if_data *sdata = sta->sdata; 1623 struct ieee80211_local *local = sdata->local; 1624 struct ieee80211_qos_hdr *nullfunc; 1625 struct sk_buff *skb; 1626 int size = sizeof(*nullfunc); 1627 __le16 fc; 1628 bool qos = sta->sta.wme; 1629 struct ieee80211_tx_info *info; 1630 struct ieee80211_chanctx_conf *chanctx_conf; 1631 1632 if (qos) { 1633 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | 1634 IEEE80211_STYPE_QOS_NULLFUNC | 1635 IEEE80211_FCTL_FROMDS); 1636 } else { 1637 size -= 2; 1638 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | 1639 IEEE80211_STYPE_NULLFUNC | 1640 IEEE80211_FCTL_FROMDS); 1641 } 1642 1643 skb = dev_alloc_skb(local->hw.extra_tx_headroom + size); 1644 if (!skb) 1645 return; 1646 1647 skb_reserve(skb, local->hw.extra_tx_headroom); 1648 1649 nullfunc = skb_put(skb, size); 1650 nullfunc->frame_control = fc; 1651 nullfunc->duration_id = 0; 1652 memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); 1653 memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); 1654 memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); 1655 nullfunc->seq_ctrl = 0; 1656 1657 skb->priority = tid; 1658 skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); 1659 if (qos) { 1660 nullfunc->qos_ctrl = cpu_to_le16(tid); 1661 1662 if (reason == IEEE80211_FRAME_RELEASE_UAPSD) { 1663 nullfunc->qos_ctrl |= 1664 cpu_to_le16(IEEE80211_QOS_CTL_EOSP); 1665 if (more_data) 1666 nullfunc->frame_control |= 1667 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1668 } 1669 } 1670 1671 info = IEEE80211_SKB_CB(skb); 1672 1673 /* 1674 * Tell TX path to send this frame even though the 1675 * STA may still remain is PS mode after this frame 1676 * exchange. Also set EOSP to indicate this packet 1677 * ends the poll/service period. 1678 */ 1679 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER | 1680 IEEE80211_TX_STATUS_EOSP | 1681 IEEE80211_TX_CTL_REQ_TX_STATUS; 1682 1683 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 1684 1685 if (call_driver) 1686 drv_allow_buffered_frames(local, sta, BIT(tid), 1, 1687 reason, false); 1688 1689 skb->dev = sdata->dev; 1690 1691 rcu_read_lock(); 1692 chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); 1693 if (WARN_ON(!chanctx_conf)) { 1694 rcu_read_unlock(); 1695 kfree_skb(skb); 1696 return; 1697 } 1698 1699 info->band = chanctx_conf->def.chan->band; 1700 ieee80211_xmit(sdata, sta, skb); 1701 rcu_read_unlock(); 1702 } 1703 1704 static int find_highest_prio_tid(unsigned long tids) 1705 { 1706 /* lower 3 TIDs aren't ordered perfectly */ 1707 if (tids & 0xF8) 1708 return fls(tids) - 1; 1709 /* TID 0 is BE just like TID 3 */ 1710 if (tids & BIT(0)) 1711 return 0; 1712 return fls(tids) - 1; 1713 } 1714 1715 /* Indicates if the MORE_DATA bit should be set in the last 1716 * frame obtained by ieee80211_sta_ps_get_frames. 1717 * Note that driver_release_tids is relevant only if 1718 * reason = IEEE80211_FRAME_RELEASE_PSPOLL 1719 */ 1720 static bool 1721 ieee80211_sta_ps_more_data(struct sta_info *sta, u8 ignored_acs, 1722 enum ieee80211_frame_release_type reason, 1723 unsigned long driver_release_tids) 1724 { 1725 int ac; 1726 1727 /* If the driver has data on more than one TID then 1728 * certainly there's more data if we release just a 1729 * single frame now (from a single TID). This will 1730 * only happen for PS-Poll. 1731 */ 1732 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL && 1733 hweight16(driver_release_tids) > 1) 1734 return true; 1735 1736 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1737 if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) 1738 continue; 1739 1740 if (!skb_queue_empty(&sta->tx_filtered[ac]) || 1741 !skb_queue_empty(&sta->ps_tx_buf[ac])) 1742 return true; 1743 } 1744 1745 return false; 1746 } 1747 1748 static void 1749 ieee80211_sta_ps_get_frames(struct sta_info *sta, int n_frames, u8 ignored_acs, 1750 enum ieee80211_frame_release_type reason, 1751 struct sk_buff_head *frames, 1752 unsigned long *driver_release_tids) 1753 { 1754 struct ieee80211_sub_if_data *sdata = sta->sdata; 1755 struct ieee80211_local *local = sdata->local; 1756 int ac; 1757 1758 /* Get response frame(s) and more data bit for the last one. */ 1759 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1760 unsigned long tids; 1761 1762 if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) 1763 continue; 1764 1765 tids = ieee80211_tids_for_ac(ac); 1766 1767 /* if we already have frames from software, then we can't also 1768 * release from hardware queues 1769 */ 1770 if (skb_queue_empty(frames)) { 1771 *driver_release_tids |= 1772 sta->driver_buffered_tids & tids; 1773 *driver_release_tids |= sta->txq_buffered_tids & tids; 1774 } 1775 1776 if (!*driver_release_tids) { 1777 struct sk_buff *skb; 1778 1779 while (n_frames > 0) { 1780 skb = skb_dequeue(&sta->tx_filtered[ac]); 1781 if (!skb) { 1782 skb = skb_dequeue( 1783 &sta->ps_tx_buf[ac]); 1784 if (skb) 1785 local->total_ps_buffered--; 1786 } 1787 if (!skb) 1788 break; 1789 n_frames--; 1790 __skb_queue_tail(frames, skb); 1791 } 1792 } 1793 1794 /* If we have more frames buffered on this AC, then abort the 1795 * loop since we can't send more data from other ACs before 1796 * the buffered frames from this. 1797 */ 1798 if (!skb_queue_empty(&sta->tx_filtered[ac]) || 1799 !skb_queue_empty(&sta->ps_tx_buf[ac])) 1800 break; 1801 } 1802 } 1803 1804 static void 1805 ieee80211_sta_ps_deliver_response(struct sta_info *sta, 1806 int n_frames, u8 ignored_acs, 1807 enum ieee80211_frame_release_type reason) 1808 { 1809 struct ieee80211_sub_if_data *sdata = sta->sdata; 1810 struct ieee80211_local *local = sdata->local; 1811 unsigned long driver_release_tids = 0; 1812 struct sk_buff_head frames; 1813 bool more_data; 1814 1815 /* Service or PS-Poll period starts */ 1816 set_sta_flag(sta, WLAN_STA_SP); 1817 1818 __skb_queue_head_init(&frames); 1819 1820 ieee80211_sta_ps_get_frames(sta, n_frames, ignored_acs, reason, 1821 &frames, &driver_release_tids); 1822 1823 more_data = ieee80211_sta_ps_more_data(sta, ignored_acs, reason, driver_release_tids); 1824 1825 if (driver_release_tids && reason == IEEE80211_FRAME_RELEASE_PSPOLL) 1826 driver_release_tids = 1827 BIT(find_highest_prio_tid(driver_release_tids)); 1828 1829 if (skb_queue_empty(&frames) && !driver_release_tids) { 1830 int tid, ac; 1831 1832 /* 1833 * For PS-Poll, this can only happen due to a race condition 1834 * when we set the TIM bit and the station notices it, but 1835 * before it can poll for the frame we expire it. 1836 * 1837 * For uAPSD, this is said in the standard (11.2.1.5 h): 1838 * At each unscheduled SP for a non-AP STA, the AP shall 1839 * attempt to transmit at least one MSDU or MMPDU, but no 1840 * more than the value specified in the Max SP Length field 1841 * in the QoS Capability element from delivery-enabled ACs, 1842 * that are destined for the non-AP STA. 1843 * 1844 * Since we have no other MSDU/MMPDU, transmit a QoS null frame. 1845 */ 1846 1847 /* This will evaluate to 1, 3, 5 or 7. */ 1848 for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++) 1849 if (!(ignored_acs & ieee80211_ac_to_qos_mask[ac])) 1850 break; 1851 tid = 7 - 2 * ac; 1852 1853 ieee80211_send_null_response(sta, tid, reason, true, false); 1854 } else if (!driver_release_tids) { 1855 struct sk_buff_head pending; 1856 struct sk_buff *skb; 1857 int num = 0; 1858 u16 tids = 0; 1859 bool need_null = false; 1860 1861 skb_queue_head_init(&pending); 1862 1863 while ((skb = __skb_dequeue(&frames))) { 1864 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1865 struct ieee80211_hdr *hdr = (void *) skb->data; 1866 u8 *qoshdr = NULL; 1867 1868 num++; 1869 1870 /* 1871 * Tell TX path to send this frame even though the 1872 * STA may still remain is PS mode after this frame 1873 * exchange. 1874 */ 1875 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; 1876 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 1877 1878 /* 1879 * Use MoreData flag to indicate whether there are 1880 * more buffered frames for this STA 1881 */ 1882 if (more_data || !skb_queue_empty(&frames)) 1883 hdr->frame_control |= 1884 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1885 else 1886 hdr->frame_control &= 1887 cpu_to_le16(~IEEE80211_FCTL_MOREDATA); 1888 1889 if (ieee80211_is_data_qos(hdr->frame_control) || 1890 ieee80211_is_qos_nullfunc(hdr->frame_control)) 1891 qoshdr = ieee80211_get_qos_ctl(hdr); 1892 1893 tids |= BIT(skb->priority); 1894 1895 __skb_queue_tail(&pending, skb); 1896 1897 /* end service period after last frame or add one */ 1898 if (!skb_queue_empty(&frames)) 1899 continue; 1900 1901 if (reason != IEEE80211_FRAME_RELEASE_UAPSD) { 1902 /* for PS-Poll, there's only one frame */ 1903 info->flags |= IEEE80211_TX_STATUS_EOSP | 1904 IEEE80211_TX_CTL_REQ_TX_STATUS; 1905 break; 1906 } 1907 1908 /* For uAPSD, things are a bit more complicated. If the 1909 * last frame has a QoS header (i.e. is a QoS-data or 1910 * QoS-nulldata frame) then just set the EOSP bit there 1911 * and be done. 1912 * If the frame doesn't have a QoS header (which means 1913 * it should be a bufferable MMPDU) then we can't set 1914 * the EOSP bit in the QoS header; add a QoS-nulldata 1915 * frame to the list to send it after the MMPDU. 1916 * 1917 * Note that this code is only in the mac80211-release 1918 * code path, we assume that the driver will not buffer 1919 * anything but QoS-data frames, or if it does, will 1920 * create the QoS-nulldata frame by itself if needed. 1921 * 1922 * Cf. 802.11-2012 10.2.1.10 (c). 1923 */ 1924 if (qoshdr) { 1925 *qoshdr |= IEEE80211_QOS_CTL_EOSP; 1926 1927 info->flags |= IEEE80211_TX_STATUS_EOSP | 1928 IEEE80211_TX_CTL_REQ_TX_STATUS; 1929 } else { 1930 /* The standard isn't completely clear on this 1931 * as it says the more-data bit should be set 1932 * if there are more BUs. The QoS-Null frame 1933 * we're about to send isn't buffered yet, we 1934 * only create it below, but let's pretend it 1935 * was buffered just in case some clients only 1936 * expect more-data=0 when eosp=1. 1937 */ 1938 hdr->frame_control |= 1939 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1940 need_null = true; 1941 num++; 1942 } 1943 break; 1944 } 1945 1946 drv_allow_buffered_frames(local, sta, tids, num, 1947 reason, more_data); 1948 1949 ieee80211_add_pending_skbs(local, &pending); 1950 1951 if (need_null) 1952 ieee80211_send_null_response( 1953 sta, find_highest_prio_tid(tids), 1954 reason, false, false); 1955 1956 sta_info_recalc_tim(sta); 1957 } else { 1958 int tid; 1959 1960 /* 1961 * We need to release a frame that is buffered somewhere in the 1962 * driver ... it'll have to handle that. 1963 * Note that the driver also has to check the number of frames 1964 * on the TIDs we're releasing from - if there are more than 1965 * n_frames it has to set the more-data bit (if we didn't ask 1966 * it to set it anyway due to other buffered frames); if there 1967 * are fewer than n_frames it has to make sure to adjust that 1968 * to allow the service period to end properly. 1969 */ 1970 drv_release_buffered_frames(local, sta, driver_release_tids, 1971 n_frames, reason, more_data); 1972 1973 /* 1974 * Note that we don't recalculate the TIM bit here as it would 1975 * most likely have no effect at all unless the driver told us 1976 * that the TID(s) became empty before returning here from the 1977 * release function. 1978 * Either way, however, when the driver tells us that the TID(s) 1979 * became empty or we find that a txq became empty, we'll do the 1980 * TIM recalculation. 1981 */ 1982 1983 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { 1984 if (!sta->sta.txq[tid] || 1985 !(driver_release_tids & BIT(tid)) || 1986 txq_has_queue(sta->sta.txq[tid])) 1987 continue; 1988 1989 sta_info_recalc_tim(sta); 1990 break; 1991 } 1992 } 1993 } 1994 1995 void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta) 1996 { 1997 u8 ignore_for_response = sta->sta.uapsd_queues; 1998 1999 /* 2000 * If all ACs are delivery-enabled then we should reply 2001 * from any of them, if only some are enabled we reply 2002 * only from the non-enabled ones. 2003 */ 2004 if (ignore_for_response == BIT(IEEE80211_NUM_ACS) - 1) 2005 ignore_for_response = 0; 2006 2007 ieee80211_sta_ps_deliver_response(sta, 1, ignore_for_response, 2008 IEEE80211_FRAME_RELEASE_PSPOLL); 2009 } 2010 2011 void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta) 2012 { 2013 int n_frames = sta->sta.max_sp; 2014 u8 delivery_enabled = sta->sta.uapsd_queues; 2015 2016 /* 2017 * If we ever grow support for TSPEC this might happen if 2018 * the TSPEC update from hostapd comes in between a trigger 2019 * frame setting WLAN_STA_UAPSD in the RX path and this 2020 * actually getting called. 2021 */ 2022 if (!delivery_enabled) 2023 return; 2024 2025 switch (sta->sta.max_sp) { 2026 case 1: 2027 n_frames = 2; 2028 break; 2029 case 2: 2030 n_frames = 4; 2031 break; 2032 case 3: 2033 n_frames = 6; 2034 break; 2035 case 0: 2036 /* XXX: what is a good value? */ 2037 n_frames = 128; 2038 break; 2039 } 2040 2041 ieee80211_sta_ps_deliver_response(sta, n_frames, ~delivery_enabled, 2042 IEEE80211_FRAME_RELEASE_UAPSD); 2043 } 2044 2045 void ieee80211_sta_block_awake(struct ieee80211_hw *hw, 2046 struct ieee80211_sta *pubsta, bool block) 2047 { 2048 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2049 2050 trace_api_sta_block_awake(sta->local, pubsta, block); 2051 2052 if (block) { 2053 set_sta_flag(sta, WLAN_STA_PS_DRIVER); 2054 ieee80211_clear_fast_xmit(sta); 2055 return; 2056 } 2057 2058 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 2059 return; 2060 2061 if (!test_sta_flag(sta, WLAN_STA_PS_STA)) { 2062 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 2063 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 2064 ieee80211_queue_work(hw, &sta->drv_deliver_wk); 2065 } else if (test_sta_flag(sta, WLAN_STA_PSPOLL) || 2066 test_sta_flag(sta, WLAN_STA_UAPSD)) { 2067 /* must be asleep in this case */ 2068 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 2069 ieee80211_queue_work(hw, &sta->drv_deliver_wk); 2070 } else { 2071 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 2072 ieee80211_check_fast_xmit(sta); 2073 } 2074 } 2075 EXPORT_SYMBOL(ieee80211_sta_block_awake); 2076 2077 void ieee80211_sta_eosp(struct ieee80211_sta *pubsta) 2078 { 2079 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2080 struct ieee80211_local *local = sta->local; 2081 2082 trace_api_eosp(local, pubsta); 2083 2084 clear_sta_flag(sta, WLAN_STA_SP); 2085 } 2086 EXPORT_SYMBOL(ieee80211_sta_eosp); 2087 2088 void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid) 2089 { 2090 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2091 enum ieee80211_frame_release_type reason; 2092 bool more_data; 2093 2094 trace_api_send_eosp_nullfunc(sta->local, pubsta, tid); 2095 2096 reason = IEEE80211_FRAME_RELEASE_UAPSD; 2097 more_data = ieee80211_sta_ps_more_data(sta, ~sta->sta.uapsd_queues, 2098 reason, 0); 2099 2100 ieee80211_send_null_response(sta, tid, reason, false, more_data); 2101 } 2102 EXPORT_SYMBOL(ieee80211_send_eosp_nullfunc); 2103 2104 void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta, 2105 u8 tid, bool buffered) 2106 { 2107 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2108 2109 if (WARN_ON(tid >= IEEE80211_NUM_TIDS)) 2110 return; 2111 2112 trace_api_sta_set_buffered(sta->local, pubsta, tid, buffered); 2113 2114 if (buffered) 2115 set_bit(tid, &sta->driver_buffered_tids); 2116 else 2117 clear_bit(tid, &sta->driver_buffered_tids); 2118 2119 sta_info_recalc_tim(sta); 2120 } 2121 EXPORT_SYMBOL(ieee80211_sta_set_buffered); 2122 2123 void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid, 2124 u32 tx_airtime, u32 rx_airtime) 2125 { 2126 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2127 struct ieee80211_local *local = sta->sdata->local; 2128 u8 ac = ieee80211_ac_from_tid(tid); 2129 u32 airtime = 0; 2130 u32 diff; 2131 2132 if (sta->local->airtime_flags & AIRTIME_USE_TX) 2133 airtime += tx_airtime; 2134 if (sta->local->airtime_flags & AIRTIME_USE_RX) 2135 airtime += rx_airtime; 2136 2137 spin_lock_bh(&local->active_txq_lock[ac]); 2138 sta->airtime[ac].tx_airtime += tx_airtime; 2139 sta->airtime[ac].rx_airtime += rx_airtime; 2140 2141 diff = (u32)jiffies - sta->airtime[ac].last_active; 2142 if (diff <= AIRTIME_ACTIVE_DURATION) 2143 sta->airtime[ac].deficit -= airtime; 2144 2145 spin_unlock_bh(&local->active_txq_lock[ac]); 2146 } 2147 EXPORT_SYMBOL(ieee80211_sta_register_airtime); 2148 2149 void __ieee80211_sta_recalc_aggregates(struct sta_info *sta, u16 active_links) 2150 { 2151 bool first = true; 2152 int link_id; 2153 2154 if (!sta->sta.valid_links || !sta->sta.mlo) { 2155 sta->sta.cur = &sta->sta.deflink.agg; 2156 return; 2157 } 2158 2159 rcu_read_lock(); 2160 for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) { 2161 struct ieee80211_link_sta *link_sta; 2162 int i; 2163 2164 if (!(active_links & BIT(link_id))) 2165 continue; 2166 2167 link_sta = rcu_dereference(sta->sta.link[link_id]); 2168 if (!link_sta) 2169 continue; 2170 2171 if (first) { 2172 sta->cur = sta->sta.deflink.agg; 2173 first = false; 2174 continue; 2175 } 2176 2177 sta->cur.max_amsdu_len = 2178 min(sta->cur.max_amsdu_len, 2179 link_sta->agg.max_amsdu_len); 2180 sta->cur.max_rc_amsdu_len = 2181 min(sta->cur.max_rc_amsdu_len, 2182 link_sta->agg.max_rc_amsdu_len); 2183 2184 for (i = 0; i < ARRAY_SIZE(sta->cur.max_tid_amsdu_len); i++) 2185 sta->cur.max_tid_amsdu_len[i] = 2186 min(sta->cur.max_tid_amsdu_len[i], 2187 link_sta->agg.max_tid_amsdu_len[i]); 2188 } 2189 rcu_read_unlock(); 2190 2191 sta->sta.cur = &sta->cur; 2192 } 2193 2194 void ieee80211_sta_recalc_aggregates(struct ieee80211_sta *pubsta) 2195 { 2196 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2197 2198 __ieee80211_sta_recalc_aggregates(sta, sta->sdata->vif.active_links); 2199 } 2200 EXPORT_SYMBOL(ieee80211_sta_recalc_aggregates); 2201 2202 void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local, 2203 struct sta_info *sta, u8 ac, 2204 u16 tx_airtime, bool tx_completed) 2205 { 2206 int tx_pending; 2207 2208 if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) 2209 return; 2210 2211 if (!tx_completed) { 2212 if (sta) 2213 atomic_add(tx_airtime, 2214 &sta->airtime[ac].aql_tx_pending); 2215 2216 atomic_add(tx_airtime, &local->aql_total_pending_airtime); 2217 atomic_add(tx_airtime, &local->aql_ac_pending_airtime[ac]); 2218 return; 2219 } 2220 2221 if (sta) { 2222 tx_pending = atomic_sub_return(tx_airtime, 2223 &sta->airtime[ac].aql_tx_pending); 2224 if (tx_pending < 0) 2225 atomic_cmpxchg(&sta->airtime[ac].aql_tx_pending, 2226 tx_pending, 0); 2227 } 2228 2229 atomic_sub(tx_airtime, &local->aql_total_pending_airtime); 2230 tx_pending = atomic_sub_return(tx_airtime, 2231 &local->aql_ac_pending_airtime[ac]); 2232 if (WARN_ONCE(tx_pending < 0, 2233 "Device %s AC %d pending airtime underflow: %u, %u", 2234 wiphy_name(local->hw.wiphy), ac, tx_pending, 2235 tx_airtime)) { 2236 atomic_cmpxchg(&local->aql_ac_pending_airtime[ac], 2237 tx_pending, 0); 2238 atomic_sub(tx_pending, &local->aql_total_pending_airtime); 2239 } 2240 } 2241 2242 int sta_info_move_state(struct sta_info *sta, 2243 enum ieee80211_sta_state new_state) 2244 { 2245 might_sleep(); 2246 2247 if (sta->sta_state == new_state) 2248 return 0; 2249 2250 /* check allowed transitions first */ 2251 2252 switch (new_state) { 2253 case IEEE80211_STA_NONE: 2254 if (sta->sta_state != IEEE80211_STA_AUTH) 2255 return -EINVAL; 2256 break; 2257 case IEEE80211_STA_AUTH: 2258 if (sta->sta_state != IEEE80211_STA_NONE && 2259 sta->sta_state != IEEE80211_STA_ASSOC) 2260 return -EINVAL; 2261 break; 2262 case IEEE80211_STA_ASSOC: 2263 if (sta->sta_state != IEEE80211_STA_AUTH && 2264 sta->sta_state != IEEE80211_STA_AUTHORIZED) 2265 return -EINVAL; 2266 break; 2267 case IEEE80211_STA_AUTHORIZED: 2268 if (sta->sta_state != IEEE80211_STA_ASSOC) 2269 return -EINVAL; 2270 break; 2271 default: 2272 WARN(1, "invalid state %d", new_state); 2273 return -EINVAL; 2274 } 2275 2276 sta_dbg(sta->sdata, "moving STA %pM to state %d\n", 2277 sta->sta.addr, new_state); 2278 2279 /* 2280 * notify the driver before the actual changes so it can 2281 * fail the transition 2282 */ 2283 if (test_sta_flag(sta, WLAN_STA_INSERTED)) { 2284 int err = drv_sta_state(sta->local, sta->sdata, sta, 2285 sta->sta_state, new_state); 2286 if (err) 2287 return err; 2288 } 2289 2290 /* reflect the change in all state variables */ 2291 2292 switch (new_state) { 2293 case IEEE80211_STA_NONE: 2294 if (sta->sta_state == IEEE80211_STA_AUTH) 2295 clear_bit(WLAN_STA_AUTH, &sta->_flags); 2296 break; 2297 case IEEE80211_STA_AUTH: 2298 if (sta->sta_state == IEEE80211_STA_NONE) { 2299 set_bit(WLAN_STA_AUTH, &sta->_flags); 2300 } else if (sta->sta_state == IEEE80211_STA_ASSOC) { 2301 clear_bit(WLAN_STA_ASSOC, &sta->_flags); 2302 ieee80211_recalc_min_chandef(sta->sdata, -1); 2303 if (!sta->sta.support_p2p_ps) 2304 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 2305 } 2306 break; 2307 case IEEE80211_STA_ASSOC: 2308 if (sta->sta_state == IEEE80211_STA_AUTH) { 2309 set_bit(WLAN_STA_ASSOC, &sta->_flags); 2310 sta->assoc_at = ktime_get_boottime_ns(); 2311 ieee80211_recalc_min_chandef(sta->sdata, -1); 2312 if (!sta->sta.support_p2p_ps) 2313 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 2314 } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { 2315 ieee80211_vif_dec_num_mcast(sta->sdata); 2316 clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 2317 ieee80211_clear_fast_xmit(sta); 2318 ieee80211_clear_fast_rx(sta); 2319 } 2320 break; 2321 case IEEE80211_STA_AUTHORIZED: 2322 if (sta->sta_state == IEEE80211_STA_ASSOC) { 2323 ieee80211_vif_inc_num_mcast(sta->sdata); 2324 set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 2325 ieee80211_check_fast_xmit(sta); 2326 ieee80211_check_fast_rx(sta); 2327 } 2328 if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 2329 sta->sdata->vif.type == NL80211_IFTYPE_AP) 2330 cfg80211_send_layer2_update(sta->sdata->dev, 2331 sta->sta.addr); 2332 break; 2333 default: 2334 break; 2335 } 2336 2337 sta->sta_state = new_state; 2338 2339 return 0; 2340 } 2341 2342 static struct ieee80211_sta_rx_stats * 2343 sta_get_last_rx_stats(struct sta_info *sta) 2344 { 2345 struct ieee80211_sta_rx_stats *stats = &sta->deflink.rx_stats; 2346 int cpu; 2347 2348 if (!sta->deflink.pcpu_rx_stats) 2349 return stats; 2350 2351 for_each_possible_cpu(cpu) { 2352 struct ieee80211_sta_rx_stats *cpustats; 2353 2354 cpustats = per_cpu_ptr(sta->deflink.pcpu_rx_stats, cpu); 2355 2356 if (time_after(cpustats->last_rx, stats->last_rx)) 2357 stats = cpustats; 2358 } 2359 2360 return stats; 2361 } 2362 2363 static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate, 2364 struct rate_info *rinfo) 2365 { 2366 rinfo->bw = STA_STATS_GET(BW, rate); 2367 2368 switch (STA_STATS_GET(TYPE, rate)) { 2369 case STA_STATS_RATE_TYPE_VHT: 2370 rinfo->flags = RATE_INFO_FLAGS_VHT_MCS; 2371 rinfo->mcs = STA_STATS_GET(VHT_MCS, rate); 2372 rinfo->nss = STA_STATS_GET(VHT_NSS, rate); 2373 if (STA_STATS_GET(SGI, rate)) 2374 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; 2375 break; 2376 case STA_STATS_RATE_TYPE_HT: 2377 rinfo->flags = RATE_INFO_FLAGS_MCS; 2378 rinfo->mcs = STA_STATS_GET(HT_MCS, rate); 2379 if (STA_STATS_GET(SGI, rate)) 2380 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; 2381 break; 2382 case STA_STATS_RATE_TYPE_LEGACY: { 2383 struct ieee80211_supported_band *sband; 2384 u16 brate; 2385 unsigned int shift; 2386 int band = STA_STATS_GET(LEGACY_BAND, rate); 2387 int rate_idx = STA_STATS_GET(LEGACY_IDX, rate); 2388 2389 sband = local->hw.wiphy->bands[band]; 2390 2391 if (WARN_ON_ONCE(!sband->bitrates)) 2392 break; 2393 2394 brate = sband->bitrates[rate_idx].bitrate; 2395 if (rinfo->bw == RATE_INFO_BW_5) 2396 shift = 2; 2397 else if (rinfo->bw == RATE_INFO_BW_10) 2398 shift = 1; 2399 else 2400 shift = 0; 2401 rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); 2402 break; 2403 } 2404 case STA_STATS_RATE_TYPE_HE: 2405 rinfo->flags = RATE_INFO_FLAGS_HE_MCS; 2406 rinfo->mcs = STA_STATS_GET(HE_MCS, rate); 2407 rinfo->nss = STA_STATS_GET(HE_NSS, rate); 2408 rinfo->he_gi = STA_STATS_GET(HE_GI, rate); 2409 rinfo->he_ru_alloc = STA_STATS_GET(HE_RU, rate); 2410 rinfo->he_dcm = STA_STATS_GET(HE_DCM, rate); 2411 break; 2412 case STA_STATS_RATE_TYPE_EHT: 2413 rinfo->flags = RATE_INFO_FLAGS_EHT_MCS; 2414 rinfo->mcs = STA_STATS_GET(EHT_MCS, rate); 2415 rinfo->nss = STA_STATS_GET(EHT_NSS, rate); 2416 rinfo->eht_gi = STA_STATS_GET(EHT_GI, rate); 2417 rinfo->eht_ru_alloc = STA_STATS_GET(EHT_RU, rate); 2418 break; 2419 } 2420 } 2421 2422 static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) 2423 { 2424 u32 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate); 2425 2426 if (rate == STA_STATS_RATE_INVALID) 2427 return -EINVAL; 2428 2429 sta_stats_decode_rate(sta->local, rate, rinfo); 2430 return 0; 2431 } 2432 2433 static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats, 2434 int tid) 2435 { 2436 unsigned int start; 2437 u64 value; 2438 2439 do { 2440 start = u64_stats_fetch_begin(&rxstats->syncp); 2441 value = rxstats->msdu[tid]; 2442 } while (u64_stats_fetch_retry(&rxstats->syncp, start)); 2443 2444 return value; 2445 } 2446 2447 static void sta_set_tidstats(struct sta_info *sta, 2448 struct cfg80211_tid_stats *tidstats, 2449 int tid) 2450 { 2451 struct ieee80211_local *local = sta->local; 2452 int cpu; 2453 2454 if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) { 2455 tidstats->rx_msdu += sta_get_tidstats_msdu(&sta->deflink.rx_stats, 2456 tid); 2457 2458 if (sta->deflink.pcpu_rx_stats) { 2459 for_each_possible_cpu(cpu) { 2460 struct ieee80211_sta_rx_stats *cpurxs; 2461 2462 cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, 2463 cpu); 2464 tidstats->rx_msdu += 2465 sta_get_tidstats_msdu(cpurxs, tid); 2466 } 2467 } 2468 2469 tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU); 2470 } 2471 2472 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) { 2473 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU); 2474 tidstats->tx_msdu = sta->deflink.tx_stats.msdu[tid]; 2475 } 2476 2477 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) && 2478 ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { 2479 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES); 2480 tidstats->tx_msdu_retries = sta->deflink.status_stats.msdu_retries[tid]; 2481 } 2482 2483 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) && 2484 ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { 2485 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED); 2486 tidstats->tx_msdu_failed = sta->deflink.status_stats.msdu_failed[tid]; 2487 } 2488 2489 if (tid < IEEE80211_NUM_TIDS) { 2490 spin_lock_bh(&local->fq.lock); 2491 rcu_read_lock(); 2492 2493 tidstats->filled |= BIT(NL80211_TID_STATS_TXQ_STATS); 2494 ieee80211_fill_txq_stats(&tidstats->txq_stats, 2495 to_txq_info(sta->sta.txq[tid])); 2496 2497 rcu_read_unlock(); 2498 spin_unlock_bh(&local->fq.lock); 2499 } 2500 } 2501 2502 static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats) 2503 { 2504 unsigned int start; 2505 u64 value; 2506 2507 do { 2508 start = u64_stats_fetch_begin(&rxstats->syncp); 2509 value = rxstats->bytes; 2510 } while (u64_stats_fetch_retry(&rxstats->syncp, start)); 2511 2512 return value; 2513 } 2514 2515 void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, 2516 bool tidstats) 2517 { 2518 struct ieee80211_sub_if_data *sdata = sta->sdata; 2519 struct ieee80211_local *local = sdata->local; 2520 u32 thr = 0; 2521 int i, ac, cpu; 2522 struct ieee80211_sta_rx_stats *last_rxstats; 2523 2524 last_rxstats = sta_get_last_rx_stats(sta); 2525 2526 sinfo->generation = sdata->local->sta_generation; 2527 2528 /* do before driver, so beacon filtering drivers have a 2529 * chance to e.g. just add the number of filtered beacons 2530 * (or just modify the value entirely, of course) 2531 */ 2532 if (sdata->vif.type == NL80211_IFTYPE_STATION) 2533 sinfo->rx_beacon = sdata->deflink.u.mgd.count_beacon_signal; 2534 2535 drv_sta_statistics(local, sdata, &sta->sta, sinfo); 2536 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) | 2537 BIT_ULL(NL80211_STA_INFO_STA_FLAGS) | 2538 BIT_ULL(NL80211_STA_INFO_BSS_PARAM) | 2539 BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME) | 2540 BIT_ULL(NL80211_STA_INFO_ASSOC_AT_BOOTTIME) | 2541 BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC); 2542 2543 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 2544 sinfo->beacon_loss_count = 2545 sdata->deflink.u.mgd.beacon_loss_count; 2546 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS); 2547 } 2548 2549 sinfo->connected_time = ktime_get_seconds() - sta->last_connected; 2550 sinfo->assoc_at = sta->assoc_at; 2551 sinfo->inactive_time = 2552 jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta)); 2553 2554 if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) | 2555 BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) { 2556 sinfo->tx_bytes = 0; 2557 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2558 sinfo->tx_bytes += sta->deflink.tx_stats.bytes[ac]; 2559 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); 2560 } 2561 2562 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) { 2563 sinfo->tx_packets = 0; 2564 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2565 sinfo->tx_packets += sta->deflink.tx_stats.packets[ac]; 2566 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); 2567 } 2568 2569 if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) | 2570 BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) { 2571 sinfo->rx_bytes += sta_get_stats_bytes(&sta->deflink.rx_stats); 2572 2573 if (sta->deflink.pcpu_rx_stats) { 2574 for_each_possible_cpu(cpu) { 2575 struct ieee80211_sta_rx_stats *cpurxs; 2576 2577 cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, 2578 cpu); 2579 sinfo->rx_bytes += sta_get_stats_bytes(cpurxs); 2580 } 2581 } 2582 2583 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); 2584 } 2585 2586 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) { 2587 sinfo->rx_packets = sta->deflink.rx_stats.packets; 2588 if (sta->deflink.pcpu_rx_stats) { 2589 for_each_possible_cpu(cpu) { 2590 struct ieee80211_sta_rx_stats *cpurxs; 2591 2592 cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, 2593 cpu); 2594 sinfo->rx_packets += cpurxs->packets; 2595 } 2596 } 2597 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); 2598 } 2599 2600 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) { 2601 sinfo->tx_retries = sta->deflink.status_stats.retry_count; 2602 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); 2603 } 2604 2605 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) { 2606 sinfo->tx_failed = sta->deflink.status_stats.retry_failed; 2607 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); 2608 } 2609 2610 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_DURATION))) { 2611 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2612 sinfo->rx_duration += sta->airtime[ac].rx_airtime; 2613 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); 2614 } 2615 2616 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_DURATION))) { 2617 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2618 sinfo->tx_duration += sta->airtime[ac].tx_airtime; 2619 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION); 2620 } 2621 2622 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) { 2623 sinfo->airtime_weight = sta->airtime_weight; 2624 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT); 2625 } 2626 2627 sinfo->rx_dropped_misc = sta->deflink.rx_stats.dropped; 2628 if (sta->deflink.pcpu_rx_stats) { 2629 for_each_possible_cpu(cpu) { 2630 struct ieee80211_sta_rx_stats *cpurxs; 2631 2632 cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, cpu); 2633 sinfo->rx_dropped_misc += cpurxs->dropped; 2634 } 2635 } 2636 2637 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2638 !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) { 2639 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) | 2640 BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); 2641 sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif); 2642 } 2643 2644 if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) || 2645 ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) { 2646 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL))) { 2647 sinfo->signal = (s8)last_rxstats->last_signal; 2648 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); 2649 } 2650 2651 if (!sta->deflink.pcpu_rx_stats && 2652 !(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) { 2653 sinfo->signal_avg = 2654 -ewma_signal_read(&sta->deflink.rx_stats_avg.signal); 2655 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); 2656 } 2657 } 2658 2659 /* for the average - if pcpu_rx_stats isn't set - rxstats must point to 2660 * the sta->rx_stats struct, so the check here is fine with and without 2661 * pcpu statistics 2662 */ 2663 if (last_rxstats->chains && 2664 !(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) | 2665 BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) { 2666 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); 2667 if (!sta->deflink.pcpu_rx_stats) 2668 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); 2669 2670 sinfo->chains = last_rxstats->chains; 2671 2672 for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { 2673 sinfo->chain_signal[i] = 2674 last_rxstats->chain_signal_last[i]; 2675 sinfo->chain_signal_avg[i] = 2676 -ewma_signal_read(&sta->deflink.rx_stats_avg.chain_signal[i]); 2677 } 2678 } 2679 2680 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) && 2681 !sta->sta.valid_links) { 2682 sta_set_rate_info_tx(sta, &sta->deflink.tx_stats.last_rate, 2683 &sinfo->txrate); 2684 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); 2685 } 2686 2687 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) && 2688 !sta->sta.valid_links) { 2689 if (sta_set_rate_info_rx(sta, &sinfo->rxrate) == 0) 2690 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); 2691 } 2692 2693 if (tidstats && !cfg80211_sinfo_alloc_tid_stats(sinfo, GFP_KERNEL)) { 2694 for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) 2695 sta_set_tidstats(sta, &sinfo->pertid[i], i); 2696 } 2697 2698 if (ieee80211_vif_is_mesh(&sdata->vif)) { 2699 #ifdef CONFIG_MAC80211_MESH 2700 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_LLID) | 2701 BIT_ULL(NL80211_STA_INFO_PLID) | 2702 BIT_ULL(NL80211_STA_INFO_PLINK_STATE) | 2703 BIT_ULL(NL80211_STA_INFO_LOCAL_PM) | 2704 BIT_ULL(NL80211_STA_INFO_PEER_PM) | 2705 BIT_ULL(NL80211_STA_INFO_NONPEER_PM) | 2706 BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_GATE) | 2707 BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_AS); 2708 2709 sinfo->llid = sta->mesh->llid; 2710 sinfo->plid = sta->mesh->plid; 2711 sinfo->plink_state = sta->mesh->plink_state; 2712 if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { 2713 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_T_OFFSET); 2714 sinfo->t_offset = sta->mesh->t_offset; 2715 } 2716 sinfo->local_pm = sta->mesh->local_pm; 2717 sinfo->peer_pm = sta->mesh->peer_pm; 2718 sinfo->nonpeer_pm = sta->mesh->nonpeer_pm; 2719 sinfo->connected_to_gate = sta->mesh->connected_to_gate; 2720 sinfo->connected_to_as = sta->mesh->connected_to_as; 2721 #endif 2722 } 2723 2724 sinfo->bss_param.flags = 0; 2725 if (sdata->vif.bss_conf.use_cts_prot) 2726 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT; 2727 if (sdata->vif.bss_conf.use_short_preamble) 2728 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; 2729 if (sdata->vif.bss_conf.use_short_slot) 2730 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; 2731 sinfo->bss_param.dtim_period = sdata->vif.bss_conf.dtim_period; 2732 sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int; 2733 2734 sinfo->sta_flags.set = 0; 2735 sinfo->sta_flags.mask = BIT(NL80211_STA_FLAG_AUTHORIZED) | 2736 BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) | 2737 BIT(NL80211_STA_FLAG_WME) | 2738 BIT(NL80211_STA_FLAG_MFP) | 2739 BIT(NL80211_STA_FLAG_AUTHENTICATED) | 2740 BIT(NL80211_STA_FLAG_ASSOCIATED) | 2741 BIT(NL80211_STA_FLAG_TDLS_PEER); 2742 if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 2743 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED); 2744 if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE)) 2745 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE); 2746 if (sta->sta.wme) 2747 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME); 2748 if (test_sta_flag(sta, WLAN_STA_MFP)) 2749 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP); 2750 if (test_sta_flag(sta, WLAN_STA_AUTH)) 2751 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED); 2752 if (test_sta_flag(sta, WLAN_STA_ASSOC)) 2753 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_ASSOCIATED); 2754 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) 2755 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER); 2756 2757 thr = sta_get_expected_throughput(sta); 2758 2759 if (thr != 0) { 2760 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT); 2761 sinfo->expected_throughput = thr; 2762 } 2763 2764 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL)) && 2765 sta->deflink.status_stats.ack_signal_filled) { 2766 sinfo->ack_signal = sta->deflink.status_stats.last_ack_signal; 2767 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); 2768 } 2769 2770 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG)) && 2771 sta->deflink.status_stats.ack_signal_filled) { 2772 sinfo->avg_ack_signal = 2773 -(s8)ewma_avg_signal_read( 2774 &sta->deflink.status_stats.avg_ack_signal); 2775 sinfo->filled |= 2776 BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); 2777 } 2778 2779 if (ieee80211_vif_is_mesh(&sdata->vif)) { 2780 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_LINK_METRIC); 2781 sinfo->airtime_link_metric = 2782 airtime_link_metric_get(local, sta); 2783 } 2784 } 2785 2786 u32 sta_get_expected_throughput(struct sta_info *sta) 2787 { 2788 struct ieee80211_sub_if_data *sdata = sta->sdata; 2789 struct ieee80211_local *local = sdata->local; 2790 struct rate_control_ref *ref = NULL; 2791 u32 thr = 0; 2792 2793 if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) 2794 ref = local->rate_ctrl; 2795 2796 /* check if the driver has a SW RC implementation */ 2797 if (ref && ref->ops->get_expected_throughput) 2798 thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv); 2799 else 2800 thr = drv_get_expected_throughput(local, sta); 2801 2802 return thr; 2803 } 2804 2805 unsigned long ieee80211_sta_last_active(struct sta_info *sta) 2806 { 2807 struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta); 2808 2809 if (!sta->deflink.status_stats.last_ack || 2810 time_after(stats->last_rx, sta->deflink.status_stats.last_ack)) 2811 return stats->last_rx; 2812 return sta->deflink.status_stats.last_ack; 2813 } 2814 2815 static void sta_update_codel_params(struct sta_info *sta, u32 thr) 2816 { 2817 if (thr && thr < STA_SLOW_THRESHOLD * sta->local->num_sta) { 2818 sta->cparams.target = MS2TIME(50); 2819 sta->cparams.interval = MS2TIME(300); 2820 sta->cparams.ecn = false; 2821 } else { 2822 sta->cparams.target = MS2TIME(20); 2823 sta->cparams.interval = MS2TIME(100); 2824 sta->cparams.ecn = true; 2825 } 2826 } 2827 2828 void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta, 2829 u32 thr) 2830 { 2831 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2832 2833 sta_update_codel_params(sta, thr); 2834 } 2835 2836 int ieee80211_sta_allocate_link(struct sta_info *sta, unsigned int link_id) 2837 { 2838 struct ieee80211_sub_if_data *sdata = sta->sdata; 2839 struct sta_link_alloc *alloc; 2840 int ret; 2841 2842 lockdep_assert_held(&sdata->local->sta_mtx); 2843 2844 /* must represent an MLD from the start */ 2845 if (WARN_ON(!sta->sta.valid_links)) 2846 return -EINVAL; 2847 2848 if (WARN_ON(sta->sta.valid_links & BIT(link_id) || 2849 sta->link[link_id])) 2850 return -EBUSY; 2851 2852 alloc = kzalloc(sizeof(*alloc), GFP_KERNEL); 2853 if (!alloc) 2854 return -ENOMEM; 2855 2856 ret = sta_info_alloc_link(sdata->local, &alloc->info, GFP_KERNEL); 2857 if (ret) { 2858 kfree(alloc); 2859 return ret; 2860 } 2861 2862 sta_info_add_link(sta, link_id, &alloc->info, &alloc->sta); 2863 2864 ieee80211_link_sta_debugfs_add(&alloc->info); 2865 2866 return 0; 2867 } 2868 2869 void ieee80211_sta_free_link(struct sta_info *sta, unsigned int link_id) 2870 { 2871 lockdep_assert_held(&sta->sdata->local->sta_mtx); 2872 2873 sta_remove_link(sta, link_id, false); 2874 } 2875 2876 int ieee80211_sta_activate_link(struct sta_info *sta, unsigned int link_id) 2877 { 2878 struct ieee80211_sub_if_data *sdata = sta->sdata; 2879 struct link_sta_info *link_sta; 2880 u16 old_links = sta->sta.valid_links; 2881 u16 new_links = old_links | BIT(link_id); 2882 int ret; 2883 2884 link_sta = rcu_dereference_protected(sta->link[link_id], 2885 lockdep_is_held(&sdata->local->sta_mtx)); 2886 2887 if (WARN_ON(old_links == new_links || !link_sta)) 2888 return -EINVAL; 2889 2890 rcu_read_lock(); 2891 if (link_sta_info_hash_lookup(sdata->local, link_sta->addr)) { 2892 rcu_read_unlock(); 2893 return -EALREADY; 2894 } 2895 /* we only modify under the mutex so this is fine */ 2896 rcu_read_unlock(); 2897 2898 sta->sta.valid_links = new_links; 2899 2900 if (!test_sta_flag(sta, WLAN_STA_INSERTED)) 2901 goto hash; 2902 2903 /* Ensure the values are updated for the driver, 2904 * redone by sta_remove_link on failure. 2905 */ 2906 ieee80211_sta_recalc_aggregates(&sta->sta); 2907 2908 ret = drv_change_sta_links(sdata->local, sdata, &sta->sta, 2909 old_links, new_links); 2910 if (ret) { 2911 sta->sta.valid_links = old_links; 2912 sta_remove_link(sta, link_id, false); 2913 return ret; 2914 } 2915 2916 hash: 2917 ret = link_sta_info_hash_add(sdata->local, link_sta); 2918 WARN_ON(ret); 2919 return 0; 2920 } 2921 2922 void ieee80211_sta_remove_link(struct sta_info *sta, unsigned int link_id) 2923 { 2924 struct ieee80211_sub_if_data *sdata = sta->sdata; 2925 u16 old_links = sta->sta.valid_links; 2926 2927 lockdep_assert_held(&sdata->local->sta_mtx); 2928 2929 sta->sta.valid_links &= ~BIT(link_id); 2930 2931 if (test_sta_flag(sta, WLAN_STA_INSERTED)) 2932 drv_change_sta_links(sdata->local, sdata, &sta->sta, 2933 old_links, sta->sta.valid_links); 2934 2935 sta_remove_link(sta, link_id, true); 2936 } 2937 2938 void ieee80211_sta_set_max_amsdu_subframes(struct sta_info *sta, 2939 const u8 *ext_capab, 2940 unsigned int ext_capab_len) 2941 { 2942 u8 val; 2943 2944 sta->sta.max_amsdu_subframes = 0; 2945 2946 if (ext_capab_len < 8) 2947 return; 2948 2949 /* The sender might not have sent the last bit, consider it to be 0 */ 2950 val = u8_get_bits(ext_capab[7], WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB); 2951 2952 /* we did get all the bits, take the MSB as well */ 2953 if (ext_capab_len >= 9) 2954 val |= u8_get_bits(ext_capab[8], 2955 WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB) << 1; 2956 2957 if (val) 2958 sta->sta.max_amsdu_subframes = 4 << val; 2959 } 2960 2961 #ifdef CONFIG_LOCKDEP 2962 bool lockdep_sta_mutex_held(struct ieee80211_sta *pubsta) 2963 { 2964 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2965 2966 return lockdep_is_held(&sta->local->sta_mtx); 2967 } 2968 EXPORT_SYMBOL(lockdep_sta_mutex_held); 2969 #endif 2970