1 /* 2 * HT handling 3 * 4 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> 5 * Copyright 2002-2005, Instant802 Networks, Inc. 6 * Copyright 2005-2006, Devicescape Software, Inc. 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 9 * Copyright 2007-2010, Intel Corporation 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 */ 15 16 #include <linux/ieee80211.h> 17 #include <linux/slab.h> 18 #include <linux/export.h> 19 #include <net/mac80211.h> 20 #include "ieee80211_i.h" 21 #include "driver-ops.h" 22 #include "wme.h" 23 24 /** 25 * DOC: TX A-MPDU aggregation 26 * 27 * Aggregation on the TX side requires setting the hardware flag 28 * %IEEE80211_HW_AMPDU_AGGREGATION. The driver will then be handed 29 * packets with a flag indicating A-MPDU aggregation. The driver 30 * or device is responsible for actually aggregating the frames, 31 * as well as deciding how many and which to aggregate. 32 * 33 * When TX aggregation is started by some subsystem (usually the rate 34 * control algorithm would be appropriate) by calling the 35 * ieee80211_start_tx_ba_session() function, the driver will be 36 * notified via its @ampdu_action function, with the 37 * %IEEE80211_AMPDU_TX_START action. 38 * 39 * In response to that, the driver is later required to call the 40 * ieee80211_start_tx_ba_cb_irqsafe() function, which will really 41 * start the aggregation session after the peer has also responded. 42 * If the peer responds negatively, the session will be stopped 43 * again right away. Note that it is possible for the aggregation 44 * session to be stopped before the driver has indicated that it 45 * is done setting it up, in which case it must not indicate the 46 * setup completion. 47 * 48 * Also note that, since we also need to wait for a response from 49 * the peer, the driver is notified of the completion of the 50 * handshake by the %IEEE80211_AMPDU_TX_OPERATIONAL action to the 51 * @ampdu_action callback. 52 * 53 * Similarly, when the aggregation session is stopped by the peer 54 * or something calling ieee80211_stop_tx_ba_session(), the driver's 55 * @ampdu_action function will be called with the action 56 * %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail, 57 * and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe(). 58 * Note that the sta can get destroyed before the BA tear down is 59 * complete. 60 */ 61 62 static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, 63 const u8 *da, u16 tid, 64 u8 dialog_token, u16 start_seq_num, 65 u16 agg_size, u16 timeout) 66 { 67 struct ieee80211_local *local = sdata->local; 68 struct sk_buff *skb; 69 struct ieee80211_mgmt *mgmt; 70 u16 capab; 71 72 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); 73 74 if (!skb) 75 return; 76 77 skb_reserve(skb, local->hw.extra_tx_headroom); 78 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 79 memset(mgmt, 0, 24); 80 memcpy(mgmt->da, da, ETH_ALEN); 81 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 82 if (sdata->vif.type == NL80211_IFTYPE_AP || 83 sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 84 sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 85 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); 86 else if (sdata->vif.type == NL80211_IFTYPE_STATION) 87 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); 88 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 89 memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN); 90 91 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 92 IEEE80211_STYPE_ACTION); 93 94 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req)); 95 96 mgmt->u.action.category = WLAN_CATEGORY_BACK; 97 mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ; 98 99 mgmt->u.action.u.addba_req.dialog_token = dialog_token; 100 capab = (u16)(1 << 1); /* bit 1 aggregation policy */ 101 capab |= (u16)(tid << 2); /* bit 5:2 TID number */ 102 capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */ 103 104 mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab); 105 106 mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout); 107 mgmt->u.action.u.addba_req.start_seq_num = 108 cpu_to_le16(start_seq_num << 4); 109 110 ieee80211_tx_skb(sdata, skb); 111 } 112 113 void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn) 114 { 115 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 116 struct ieee80211_local *local = sdata->local; 117 struct sk_buff *skb; 118 struct ieee80211_bar *bar; 119 u16 bar_control = 0; 120 121 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); 122 if (!skb) 123 return; 124 125 skb_reserve(skb, local->hw.extra_tx_headroom); 126 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar)); 127 memset(bar, 0, sizeof(*bar)); 128 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | 129 IEEE80211_STYPE_BACK_REQ); 130 memcpy(bar->ra, ra, ETH_ALEN); 131 memcpy(bar->ta, sdata->vif.addr, ETH_ALEN); 132 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; 133 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; 134 bar_control |= (u16)(tid << IEEE80211_BAR_CTRL_TID_INFO_SHIFT); 135 bar->control = cpu_to_le16(bar_control); 136 bar->start_seq_num = cpu_to_le16(ssn); 137 138 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | 139 IEEE80211_TX_CTL_REQ_TX_STATUS; 140 ieee80211_tx_skb_tid(sdata, skb, tid); 141 } 142 EXPORT_SYMBOL(ieee80211_send_bar); 143 144 void ieee80211_assign_tid_tx(struct sta_info *sta, int tid, 145 struct tid_ampdu_tx *tid_tx) 146 { 147 lockdep_assert_held(&sta->ampdu_mlme.mtx); 148 lockdep_assert_held(&sta->lock); 149 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx); 150 } 151 152 static inline int ieee80211_ac_from_tid(int tid) 153 { 154 return ieee802_1d_to_ac[tid & 7]; 155 } 156 157 /* 158 * When multiple aggregation sessions on multiple stations 159 * are being created/destroyed simultaneously, we need to 160 * refcount the global queue stop caused by that in order 161 * to not get into a situation where one of the aggregation 162 * setup or teardown re-enables queues before the other is 163 * ready to handle that. 164 * 165 * These two functions take care of this issue by keeping 166 * a global "agg_queue_stop" refcount. 167 */ 168 static void __acquires(agg_queue) 169 ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) 170 { 171 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; 172 173 /* we do refcounting here, so don't use the queue reason refcounting */ 174 175 if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1) 176 ieee80211_stop_queue_by_reason( 177 &sdata->local->hw, queue, 178 IEEE80211_QUEUE_STOP_REASON_AGGREGATION, 179 false); 180 __acquire(agg_queue); 181 } 182 183 static void __releases(agg_queue) 184 ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) 185 { 186 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; 187 188 if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0) 189 ieee80211_wake_queue_by_reason( 190 &sdata->local->hw, queue, 191 IEEE80211_QUEUE_STOP_REASON_AGGREGATION, 192 false); 193 __release(agg_queue); 194 } 195 196 /* 197 * splice packets from the STA's pending to the local pending, 198 * requires a call to ieee80211_agg_splice_finish later 199 */ 200 static void __acquires(agg_queue) 201 ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata, 202 struct tid_ampdu_tx *tid_tx, u16 tid) 203 { 204 struct ieee80211_local *local = sdata->local; 205 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; 206 unsigned long flags; 207 208 ieee80211_stop_queue_agg(sdata, tid); 209 210 if (WARN(!tid_tx, 211 "TID %d gone but expected when splicing aggregates from the pending queue\n", 212 tid)) 213 return; 214 215 if (!skb_queue_empty(&tid_tx->pending)) { 216 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 217 /* copy over remaining packets */ 218 skb_queue_splice_tail_init(&tid_tx->pending, 219 &local->pending[queue]); 220 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 221 } 222 } 223 224 static void __releases(agg_queue) 225 ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid) 226 { 227 ieee80211_wake_queue_agg(sdata, tid); 228 } 229 230 static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid) 231 { 232 struct tid_ampdu_tx *tid_tx; 233 234 lockdep_assert_held(&sta->ampdu_mlme.mtx); 235 lockdep_assert_held(&sta->lock); 236 237 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 238 239 /* 240 * When we get here, the TX path will not be lockless any more wrt. 241 * aggregation, since the OPERATIONAL bit has long been cleared. 242 * Thus it will block on getting the lock, if it occurs. So if we 243 * stop the queue now, we will not get any more packets, and any 244 * that might be being processed will wait for us here, thereby 245 * guaranteeing that no packets go to the tid_tx pending queue any 246 * more. 247 */ 248 249 ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid); 250 251 /* future packets must not find the tid_tx struct any more */ 252 ieee80211_assign_tid_tx(sta, tid, NULL); 253 254 ieee80211_agg_splice_finish(sta->sdata, tid); 255 256 kfree_rcu(tid_tx, rcu_head); 257 } 258 259 int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 260 enum ieee80211_agg_stop_reason reason) 261 { 262 struct ieee80211_local *local = sta->local; 263 struct tid_ampdu_tx *tid_tx; 264 enum ieee80211_ampdu_mlme_action action; 265 int ret; 266 267 lockdep_assert_held(&sta->ampdu_mlme.mtx); 268 269 switch (reason) { 270 case AGG_STOP_DECLINED: 271 case AGG_STOP_LOCAL_REQUEST: 272 case AGG_STOP_PEER_REQUEST: 273 action = IEEE80211_AMPDU_TX_STOP_CONT; 274 break; 275 case AGG_STOP_DESTROY_STA: 276 action = IEEE80211_AMPDU_TX_STOP_FLUSH; 277 break; 278 default: 279 WARN_ON_ONCE(1); 280 return -EINVAL; 281 } 282 283 spin_lock_bh(&sta->lock); 284 285 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 286 if (!tid_tx) { 287 spin_unlock_bh(&sta->lock); 288 return -ENOENT; 289 } 290 291 /* 292 * if we're already stopping ignore any new requests to stop 293 * unless we're destroying it in which case notify the driver 294 */ 295 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 296 spin_unlock_bh(&sta->lock); 297 if (reason != AGG_STOP_DESTROY_STA) 298 return -EALREADY; 299 ret = drv_ampdu_action(local, sta->sdata, 300 IEEE80211_AMPDU_TX_STOP_FLUSH_CONT, 301 &sta->sta, tid, NULL, 0); 302 WARN_ON_ONCE(ret); 303 return 0; 304 } 305 306 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { 307 /* not even started yet! */ 308 ieee80211_assign_tid_tx(sta, tid, NULL); 309 spin_unlock_bh(&sta->lock); 310 kfree_rcu(tid_tx, rcu_head); 311 return 0; 312 } 313 314 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); 315 316 spin_unlock_bh(&sta->lock); 317 318 ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n", 319 sta->sta.addr, tid); 320 321 del_timer_sync(&tid_tx->addba_resp_timer); 322 del_timer_sync(&tid_tx->session_timer); 323 324 /* 325 * After this packets are no longer handed right through 326 * to the driver but are put onto tid_tx->pending instead, 327 * with locking to ensure proper access. 328 */ 329 clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); 330 331 /* 332 * There might be a few packets being processed right now (on 333 * another CPU) that have already gotten past the aggregation 334 * check when it was still OPERATIONAL and consequently have 335 * IEEE80211_TX_CTL_AMPDU set. In that case, this code might 336 * call into the driver at the same time or even before the 337 * TX paths calls into it, which could confuse the driver. 338 * 339 * Wait for all currently running TX paths to finish before 340 * telling the driver. New packets will not go through since 341 * the aggregation session is no longer OPERATIONAL. 342 */ 343 synchronize_net(); 344 345 tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ? 346 WLAN_BACK_RECIPIENT : 347 WLAN_BACK_INITIATOR; 348 tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST; 349 350 ret = drv_ampdu_action(local, sta->sdata, action, 351 &sta->sta, tid, NULL, 0); 352 353 /* HW shall not deny going back to legacy */ 354 if (WARN_ON(ret)) { 355 /* 356 * We may have pending packets get stuck in this case... 357 * Not bothering with a workaround for now. 358 */ 359 } 360 361 /* 362 * In the case of AGG_STOP_DESTROY_STA, the driver won't 363 * necessarily call ieee80211_stop_tx_ba_cb(), so this may 364 * seem like we can leave the tid_tx data pending forever. 365 * This is true, in a way, but "forever" is only until the 366 * station struct is actually destroyed. In the meantime, 367 * leaving it around ensures that we don't transmit packets 368 * to the driver on this TID which might confuse it. 369 */ 370 371 return 0; 372 } 373 374 /* 375 * After sending add Block Ack request we activated a timer until 376 * add Block Ack response will arrive from the recipient. 377 * If this timer expires sta_addba_resp_timer_expired will be executed. 378 */ 379 static void sta_addba_resp_timer_expired(unsigned long data) 380 { 381 /* not an elegant detour, but there is no choice as the timer passes 382 * only one argument, and both sta_info and TID are needed, so init 383 * flow in sta_info_create gives the TID as data, while the timer_to_id 384 * array gives the sta through container_of */ 385 u16 tid = *(u8 *)data; 386 struct sta_info *sta = container_of((void *)data, 387 struct sta_info, timer_to_tid[tid]); 388 struct tid_ampdu_tx *tid_tx; 389 390 /* check if the TID waits for addBA response */ 391 rcu_read_lock(); 392 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); 393 if (!tid_tx || 394 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { 395 rcu_read_unlock(); 396 ht_dbg(sta->sdata, 397 "timer expired on %pM tid %d but we are not (or no longer) expecting addBA response there\n", 398 sta->sta.addr, tid); 399 return; 400 } 401 402 ht_dbg(sta->sdata, "addBA response timer expired on %pM tid %d\n", 403 sta->sta.addr, tid); 404 405 ieee80211_stop_tx_ba_session(&sta->sta, tid); 406 rcu_read_unlock(); 407 } 408 409 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) 410 { 411 struct tid_ampdu_tx *tid_tx; 412 struct ieee80211_local *local = sta->local; 413 struct ieee80211_sub_if_data *sdata = sta->sdata; 414 u16 start_seq_num; 415 int ret; 416 417 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 418 419 /* 420 * Start queuing up packets for this aggregation session. 421 * We're going to release them once the driver is OK with 422 * that. 423 */ 424 clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); 425 426 /* 427 * Make sure no packets are being processed. This ensures that 428 * we have a valid starting sequence number and that in-flight 429 * packets have been flushed out and no packets for this TID 430 * will go into the driver during the ampdu_action call. 431 */ 432 synchronize_net(); 433 434 start_seq_num = sta->tid_seq[tid] >> 4; 435 436 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START, 437 &sta->sta, tid, &start_seq_num, 0); 438 if (ret) { 439 ht_dbg(sdata, 440 "BA request denied - HW unavailable for %pM tid %d\n", 441 sta->sta.addr, tid); 442 spin_lock_bh(&sta->lock); 443 ieee80211_agg_splice_packets(sdata, tid_tx, tid); 444 ieee80211_assign_tid_tx(sta, tid, NULL); 445 ieee80211_agg_splice_finish(sdata, tid); 446 spin_unlock_bh(&sta->lock); 447 448 kfree_rcu(tid_tx, rcu_head); 449 return; 450 } 451 452 /* activate the timer for the recipient's addBA response */ 453 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); 454 ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n", 455 sta->sta.addr, tid); 456 457 spin_lock_bh(&sta->lock); 458 sta->ampdu_mlme.last_addba_req_time[tid] = jiffies; 459 sta->ampdu_mlme.addba_req_num[tid]++; 460 spin_unlock_bh(&sta->lock); 461 462 /* send AddBA request */ 463 ieee80211_send_addba_request(sdata, sta->sta.addr, tid, 464 tid_tx->dialog_token, start_seq_num, 465 local->hw.max_tx_aggregation_subframes, 466 tid_tx->timeout); 467 } 468 469 /* 470 * After accepting the AddBA Response we activated a timer, 471 * resetting it after each frame that we send. 472 */ 473 static void sta_tx_agg_session_timer_expired(unsigned long data) 474 { 475 /* not an elegant detour, but there is no choice as the timer passes 476 * only one argument, and various sta_info are needed here, so init 477 * flow in sta_info_create gives the TID as data, while the timer_to_id 478 * array gives the sta through container_of */ 479 u8 *ptid = (u8 *)data; 480 u8 *timer_to_id = ptid - *ptid; 481 struct sta_info *sta = container_of(timer_to_id, struct sta_info, 482 timer_to_tid[0]); 483 struct tid_ampdu_tx *tid_tx; 484 unsigned long timeout; 485 486 rcu_read_lock(); 487 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[*ptid]); 488 if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 489 rcu_read_unlock(); 490 return; 491 } 492 493 timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout); 494 if (time_is_after_jiffies(timeout)) { 495 mod_timer(&tid_tx->session_timer, timeout); 496 rcu_read_unlock(); 497 return; 498 } 499 500 rcu_read_unlock(); 501 502 ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n", 503 sta->sta.addr, (u16)*ptid); 504 505 ieee80211_stop_tx_ba_session(&sta->sta, *ptid); 506 } 507 508 int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, 509 u16 timeout) 510 { 511 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 512 struct ieee80211_sub_if_data *sdata = sta->sdata; 513 struct ieee80211_local *local = sdata->local; 514 struct tid_ampdu_tx *tid_tx; 515 int ret = 0; 516 517 trace_api_start_tx_ba_session(pubsta, tid); 518 519 if (WARN_ON_ONCE(!local->ops->ampdu_action)) 520 return -EINVAL; 521 522 if ((tid >= IEEE80211_NUM_TIDS) || 523 !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) || 524 (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)) 525 return -EINVAL; 526 527 ht_dbg(sdata, "Open BA session requested for %pM tid %u\n", 528 pubsta->addr, tid); 529 530 if (sdata->vif.type != NL80211_IFTYPE_STATION && 531 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 532 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 533 sdata->vif.type != NL80211_IFTYPE_AP && 534 sdata->vif.type != NL80211_IFTYPE_ADHOC) 535 return -EINVAL; 536 537 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { 538 ht_dbg(sdata, 539 "BA sessions blocked - Denying BA session request %pM tid %d\n", 540 sta->sta.addr, tid); 541 return -EINVAL; 542 } 543 544 /* 545 * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a 546 * member of an IBSS, and has no other existing Block Ack agreement 547 * with the recipient STA, then the initiating STA shall transmit a 548 * Probe Request frame to the recipient STA and shall not transmit an 549 * ADDBA Request frame unless it receives a Probe Response frame 550 * from the recipient within dot11ADDBAFailureTimeout. 551 * 552 * The probe request mechanism for ADDBA is currently not implemented, 553 * but we only build up Block Ack session with HT STAs. This information 554 * is set when we receive a bss info from a probe response or a beacon. 555 */ 556 if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC && 557 !sta->sta.ht_cap.ht_supported) { 558 ht_dbg(sdata, 559 "BA request denied - IBSS STA %pM does not advertise HT support\n", 560 pubsta->addr); 561 return -EINVAL; 562 } 563 564 spin_lock_bh(&sta->lock); 565 566 /* we have tried too many times, receiver does not want A-MPDU */ 567 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { 568 ret = -EBUSY; 569 goto err_unlock_sta; 570 } 571 572 /* 573 * if we have tried more than HT_AGG_BURST_RETRIES times we 574 * will spread our requests in time to avoid stalling connection 575 * for too long 576 */ 577 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES && 578 time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] + 579 HT_AGG_RETRIES_PERIOD)) { 580 ht_dbg(sdata, 581 "BA request denied - waiting a grace period after %d failed requests on %pM tid %u\n", 582 sta->ampdu_mlme.addba_req_num[tid], sta->sta.addr, tid); 583 ret = -EBUSY; 584 goto err_unlock_sta; 585 } 586 587 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 588 /* check if the TID is not in aggregation flow already */ 589 if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) { 590 ht_dbg(sdata, 591 "BA request denied - session is not idle on %pM tid %u\n", 592 sta->sta.addr, tid); 593 ret = -EAGAIN; 594 goto err_unlock_sta; 595 } 596 597 /* prepare A-MPDU MLME for Tx aggregation */ 598 tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); 599 if (!tid_tx) { 600 ret = -ENOMEM; 601 goto err_unlock_sta; 602 } 603 604 skb_queue_head_init(&tid_tx->pending); 605 __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); 606 607 tid_tx->timeout = timeout; 608 609 /* response timer */ 610 tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired; 611 tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid]; 612 init_timer(&tid_tx->addba_resp_timer); 613 614 /* tx timer */ 615 tid_tx->session_timer.function = sta_tx_agg_session_timer_expired; 616 tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; 617 init_timer_deferrable(&tid_tx->session_timer); 618 619 /* assign a dialog token */ 620 sta->ampdu_mlme.dialog_token_allocator++; 621 tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator; 622 623 /* 624 * Finally, assign it to the start array; the work item will 625 * collect it and move it to the normal array. 626 */ 627 sta->ampdu_mlme.tid_start_tx[tid] = tid_tx; 628 629 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); 630 631 /* this flow continues off the work */ 632 err_unlock_sta: 633 spin_unlock_bh(&sta->lock); 634 return ret; 635 } 636 EXPORT_SYMBOL(ieee80211_start_tx_ba_session); 637 638 static void ieee80211_agg_tx_operational(struct ieee80211_local *local, 639 struct sta_info *sta, u16 tid) 640 { 641 struct tid_ampdu_tx *tid_tx; 642 643 lockdep_assert_held(&sta->ampdu_mlme.mtx); 644 645 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 646 647 ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n", 648 sta->sta.addr, tid); 649 650 drv_ampdu_action(local, sta->sdata, 651 IEEE80211_AMPDU_TX_OPERATIONAL, 652 &sta->sta, tid, NULL, tid_tx->buf_size); 653 654 /* 655 * synchronize with TX path, while splicing the TX path 656 * should block so it won't put more packets onto pending. 657 */ 658 spin_lock_bh(&sta->lock); 659 660 ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid); 661 /* 662 * Now mark as operational. This will be visible 663 * in the TX path, and lets it go lock-free in 664 * the common case. 665 */ 666 set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); 667 ieee80211_agg_splice_finish(sta->sdata, tid); 668 669 spin_unlock_bh(&sta->lock); 670 } 671 672 void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) 673 { 674 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 675 struct ieee80211_local *local = sdata->local; 676 struct sta_info *sta; 677 struct tid_ampdu_tx *tid_tx; 678 679 trace_api_start_tx_ba_cb(sdata, ra, tid); 680 681 if (tid >= IEEE80211_NUM_TIDS) { 682 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", 683 tid, IEEE80211_NUM_TIDS); 684 return; 685 } 686 687 mutex_lock(&local->sta_mtx); 688 sta = sta_info_get_bss(sdata, ra); 689 if (!sta) { 690 mutex_unlock(&local->sta_mtx); 691 ht_dbg(sdata, "Could not find station: %pM\n", ra); 692 return; 693 } 694 695 mutex_lock(&sta->ampdu_mlme.mtx); 696 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 697 698 if (WARN_ON(!tid_tx)) { 699 ht_dbg(sdata, "addBA was not requested!\n"); 700 goto unlock; 701 } 702 703 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))) 704 goto unlock; 705 706 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) 707 ieee80211_agg_tx_operational(local, sta, tid); 708 709 unlock: 710 mutex_unlock(&sta->ampdu_mlme.mtx); 711 mutex_unlock(&local->sta_mtx); 712 } 713 714 void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 715 const u8 *ra, u16 tid) 716 { 717 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 718 struct ieee80211_local *local = sdata->local; 719 struct ieee80211_ra_tid *ra_tid; 720 struct sk_buff *skb = dev_alloc_skb(0); 721 722 if (unlikely(!skb)) 723 return; 724 725 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 726 memcpy(&ra_tid->ra, ra, ETH_ALEN); 727 ra_tid->tid = tid; 728 729 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START; 730 skb_queue_tail(&sdata->skb_queue, skb); 731 ieee80211_queue_work(&local->hw, &sdata->work); 732 } 733 EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); 734 735 int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 736 enum ieee80211_agg_stop_reason reason) 737 { 738 int ret; 739 740 mutex_lock(&sta->ampdu_mlme.mtx); 741 742 ret = ___ieee80211_stop_tx_ba_session(sta, tid, reason); 743 744 mutex_unlock(&sta->ampdu_mlme.mtx); 745 746 return ret; 747 } 748 749 int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) 750 { 751 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 752 struct ieee80211_sub_if_data *sdata = sta->sdata; 753 struct ieee80211_local *local = sdata->local; 754 struct tid_ampdu_tx *tid_tx; 755 int ret = 0; 756 757 trace_api_stop_tx_ba_session(pubsta, tid); 758 759 if (!local->ops->ampdu_action) 760 return -EINVAL; 761 762 if (tid >= IEEE80211_NUM_TIDS) 763 return -EINVAL; 764 765 spin_lock_bh(&sta->lock); 766 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 767 768 if (!tid_tx) { 769 ret = -ENOENT; 770 goto unlock; 771 } 772 773 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 774 /* already in progress stopping it */ 775 ret = 0; 776 goto unlock; 777 } 778 779 set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state); 780 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); 781 782 unlock: 783 spin_unlock_bh(&sta->lock); 784 return ret; 785 } 786 EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); 787 788 void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) 789 { 790 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 791 struct ieee80211_local *local = sdata->local; 792 struct sta_info *sta; 793 struct tid_ampdu_tx *tid_tx; 794 795 trace_api_stop_tx_ba_cb(sdata, ra, tid); 796 797 if (tid >= IEEE80211_NUM_TIDS) { 798 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", 799 tid, IEEE80211_NUM_TIDS); 800 return; 801 } 802 803 ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid); 804 805 mutex_lock(&local->sta_mtx); 806 807 sta = sta_info_get_bss(sdata, ra); 808 if (!sta) { 809 ht_dbg(sdata, "Could not find station: %pM\n", ra); 810 goto unlock; 811 } 812 813 mutex_lock(&sta->ampdu_mlme.mtx); 814 spin_lock_bh(&sta->lock); 815 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 816 817 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 818 ht_dbg(sdata, 819 "unexpected callback to A-MPDU stop for %pM tid %d\n", 820 sta->sta.addr, tid); 821 goto unlock_sta; 822 } 823 824 if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR && tid_tx->tx_stop) 825 ieee80211_send_delba(sta->sdata, ra, tid, 826 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); 827 828 ieee80211_remove_tid_tx(sta, tid); 829 830 unlock_sta: 831 spin_unlock_bh(&sta->lock); 832 mutex_unlock(&sta->ampdu_mlme.mtx); 833 unlock: 834 mutex_unlock(&local->sta_mtx); 835 } 836 837 void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 838 const u8 *ra, u16 tid) 839 { 840 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 841 struct ieee80211_local *local = sdata->local; 842 struct ieee80211_ra_tid *ra_tid; 843 struct sk_buff *skb = dev_alloc_skb(0); 844 845 if (unlikely(!skb)) 846 return; 847 848 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 849 memcpy(&ra_tid->ra, ra, ETH_ALEN); 850 ra_tid->tid = tid; 851 852 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP; 853 skb_queue_tail(&sdata->skb_queue, skb); 854 ieee80211_queue_work(&local->hw, &sdata->work); 855 } 856 EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); 857 858 859 void ieee80211_process_addba_resp(struct ieee80211_local *local, 860 struct sta_info *sta, 861 struct ieee80211_mgmt *mgmt, 862 size_t len) 863 { 864 struct tid_ampdu_tx *tid_tx; 865 u16 capab, tid; 866 u8 buf_size; 867 868 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); 869 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; 870 buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; 871 872 mutex_lock(&sta->ampdu_mlme.mtx); 873 874 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 875 if (!tid_tx) 876 goto out; 877 878 if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) { 879 ht_dbg(sta->sdata, "wrong addBA response token, %pM tid %d\n", 880 sta->sta.addr, tid); 881 goto out; 882 } 883 884 del_timer_sync(&tid_tx->addba_resp_timer); 885 886 ht_dbg(sta->sdata, "switched off addBA timer for %pM tid %d\n", 887 sta->sta.addr, tid); 888 889 /* 890 * addba_resp_timer may have fired before we got here, and 891 * caused WANT_STOP to be set. If the stop then was already 892 * processed further, STOPPING might be set. 893 */ 894 if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) || 895 test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 896 ht_dbg(sta->sdata, 897 "got addBA resp for %pM tid %d but we already gave up\n", 898 sta->sta.addr, tid); 899 goto out; 900 } 901 902 /* 903 * IEEE 802.11-2007 7.3.1.14: 904 * In an ADDBA Response frame, when the Status Code field 905 * is set to 0, the Buffer Size subfield is set to a value 906 * of at least 1. 907 */ 908 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) 909 == WLAN_STATUS_SUCCESS && buf_size) { 910 if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED, 911 &tid_tx->state)) { 912 /* ignore duplicate response */ 913 goto out; 914 } 915 916 tid_tx->buf_size = buf_size; 917 918 if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)) 919 ieee80211_agg_tx_operational(local, sta, tid); 920 921 sta->ampdu_mlme.addba_req_num[tid] = 0; 922 923 if (tid_tx->timeout) { 924 mod_timer(&tid_tx->session_timer, 925 TU_TO_EXP_TIME(tid_tx->timeout)); 926 tid_tx->last_tx = jiffies; 927 } 928 929 } else { 930 ___ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_DECLINED); 931 } 932 933 out: 934 mutex_unlock(&sta->ampdu_mlme.mtx); 935 } 936