1 /* 2 * HT handling 3 * 4 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> 5 * Copyright 2002-2005, Instant802 Networks, Inc. 6 * Copyright 2005-2006, Devicescape Software, Inc. 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 9 * Copyright 2007-2010, Intel Corporation 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 */ 15 16 #include <linux/ieee80211.h> 17 #include <linux/slab.h> 18 #include <linux/export.h> 19 #include <net/mac80211.h> 20 #include "ieee80211_i.h" 21 #include "driver-ops.h" 22 #include "wme.h" 23 24 /** 25 * DOC: TX A-MPDU aggregation 26 * 27 * Aggregation on the TX side requires setting the hardware flag 28 * %IEEE80211_HW_AMPDU_AGGREGATION. The driver will then be handed 29 * packets with a flag indicating A-MPDU aggregation. The driver 30 * or device is responsible for actually aggregating the frames, 31 * as well as deciding how many and which to aggregate. 32 * 33 * When TX aggregation is started by some subsystem (usually the rate 34 * control algorithm would be appropriate) by calling the 35 * ieee80211_start_tx_ba_session() function, the driver will be 36 * notified via its @ampdu_action function, with the 37 * %IEEE80211_AMPDU_TX_START action. 38 * 39 * In response to that, the driver is later required to call the 40 * ieee80211_start_tx_ba_cb_irqsafe() function, which will really 41 * start the aggregation session after the peer has also responded. 42 * If the peer responds negatively, the session will be stopped 43 * again right away. Note that it is possible for the aggregation 44 * session to be stopped before the driver has indicated that it 45 * is done setting it up, in which case it must not indicate the 46 * setup completion. 47 * 48 * Also note that, since we also need to wait for a response from 49 * the peer, the driver is notified of the completion of the 50 * handshake by the %IEEE80211_AMPDU_TX_OPERATIONAL action to the 51 * @ampdu_action callback. 52 * 53 * Similarly, when the aggregation session is stopped by the peer 54 * or something calling ieee80211_stop_tx_ba_session(), the driver's 55 * @ampdu_action function will be called with the action 56 * %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail, 57 * and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe(). 58 * Note that the sta can get destroyed before the BA tear down is 59 * complete. 60 */ 61 62 static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, 63 const u8 *da, u16 tid, 64 u8 dialog_token, u16 start_seq_num, 65 u16 agg_size, u16 timeout) 66 { 67 struct ieee80211_local *local = sdata->local; 68 struct sk_buff *skb; 69 struct ieee80211_mgmt *mgmt; 70 u16 capab; 71 72 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); 73 74 if (!skb) 75 return; 76 77 skb_reserve(skb, local->hw.extra_tx_headroom); 78 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 79 memset(mgmt, 0, 24); 80 memcpy(mgmt->da, da, ETH_ALEN); 81 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 82 if (sdata->vif.type == NL80211_IFTYPE_AP || 83 sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 84 sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 85 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); 86 else if (sdata->vif.type == NL80211_IFTYPE_STATION) 87 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); 88 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 89 memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN); 90 91 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 92 IEEE80211_STYPE_ACTION); 93 94 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req)); 95 96 mgmt->u.action.category = WLAN_CATEGORY_BACK; 97 mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ; 98 99 mgmt->u.action.u.addba_req.dialog_token = dialog_token; 100 capab = (u16)(1 << 0); /* bit 0 A-MSDU support */ 101 capab |= (u16)(1 << 1); /* bit 1 aggregation policy */ 102 capab |= (u16)(tid << 2); /* bit 5:2 TID number */ 103 capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */ 104 105 mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab); 106 107 mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout); 108 mgmt->u.action.u.addba_req.start_seq_num = 109 cpu_to_le16(start_seq_num << 4); 110 111 ieee80211_tx_skb(sdata, skb); 112 } 113 114 void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn) 115 { 116 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 117 struct ieee80211_local *local = sdata->local; 118 struct sk_buff *skb; 119 struct ieee80211_bar *bar; 120 u16 bar_control = 0; 121 122 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); 123 if (!skb) 124 return; 125 126 skb_reserve(skb, local->hw.extra_tx_headroom); 127 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar)); 128 memset(bar, 0, sizeof(*bar)); 129 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | 130 IEEE80211_STYPE_BACK_REQ); 131 memcpy(bar->ra, ra, ETH_ALEN); 132 memcpy(bar->ta, sdata->vif.addr, ETH_ALEN); 133 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; 134 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; 135 bar_control |= (u16)(tid << IEEE80211_BAR_CTRL_TID_INFO_SHIFT); 136 bar->control = cpu_to_le16(bar_control); 137 bar->start_seq_num = cpu_to_le16(ssn); 138 139 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | 140 IEEE80211_TX_CTL_REQ_TX_STATUS; 141 ieee80211_tx_skb_tid(sdata, skb, tid); 142 } 143 EXPORT_SYMBOL(ieee80211_send_bar); 144 145 void ieee80211_assign_tid_tx(struct sta_info *sta, int tid, 146 struct tid_ampdu_tx *tid_tx) 147 { 148 lockdep_assert_held(&sta->ampdu_mlme.mtx); 149 lockdep_assert_held(&sta->lock); 150 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx); 151 } 152 153 /* 154 * When multiple aggregation sessions on multiple stations 155 * are being created/destroyed simultaneously, we need to 156 * refcount the global queue stop caused by that in order 157 * to not get into a situation where one of the aggregation 158 * setup or teardown re-enables queues before the other is 159 * ready to handle that. 160 * 161 * These two functions take care of this issue by keeping 162 * a global "agg_queue_stop" refcount. 163 */ 164 static void __acquires(agg_queue) 165 ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) 166 { 167 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; 168 169 /* we do refcounting here, so don't use the queue reason refcounting */ 170 171 if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1) 172 ieee80211_stop_queue_by_reason( 173 &sdata->local->hw, queue, 174 IEEE80211_QUEUE_STOP_REASON_AGGREGATION, 175 false); 176 __acquire(agg_queue); 177 } 178 179 static void __releases(agg_queue) 180 ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) 181 { 182 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; 183 184 if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0) 185 ieee80211_wake_queue_by_reason( 186 &sdata->local->hw, queue, 187 IEEE80211_QUEUE_STOP_REASON_AGGREGATION, 188 false); 189 __release(agg_queue); 190 } 191 192 static void 193 ieee80211_agg_stop_txq(struct sta_info *sta, int tid) 194 { 195 struct ieee80211_txq *txq = sta->sta.txq[tid]; 196 struct txq_info *txqi; 197 198 if (!txq) 199 return; 200 201 txqi = to_txq_info(txq); 202 203 /* Lock here to protect against further seqno updates on dequeue */ 204 spin_lock_bh(&txqi->queue.lock); 205 set_bit(IEEE80211_TXQ_STOP, &txqi->flags); 206 spin_unlock_bh(&txqi->queue.lock); 207 } 208 209 static void 210 ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable) 211 { 212 struct ieee80211_txq *txq = sta->sta.txq[tid]; 213 struct txq_info *txqi; 214 215 if (!txq) 216 return; 217 218 txqi = to_txq_info(txq); 219 220 if (enable) 221 set_bit(IEEE80211_TXQ_AMPDU, &txqi->flags); 222 else 223 clear_bit(IEEE80211_TXQ_AMPDU, &txqi->flags); 224 225 clear_bit(IEEE80211_TXQ_STOP, &txqi->flags); 226 drv_wake_tx_queue(sta->sdata->local, txqi); 227 } 228 229 /* 230 * splice packets from the STA's pending to the local pending, 231 * requires a call to ieee80211_agg_splice_finish later 232 */ 233 static void __acquires(agg_queue) 234 ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata, 235 struct tid_ampdu_tx *tid_tx, u16 tid) 236 { 237 struct ieee80211_local *local = sdata->local; 238 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; 239 unsigned long flags; 240 241 ieee80211_stop_queue_agg(sdata, tid); 242 243 if (WARN(!tid_tx, 244 "TID %d gone but expected when splicing aggregates from the pending queue\n", 245 tid)) 246 return; 247 248 if (!skb_queue_empty(&tid_tx->pending)) { 249 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 250 /* copy over remaining packets */ 251 skb_queue_splice_tail_init(&tid_tx->pending, 252 &local->pending[queue]); 253 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 254 } 255 } 256 257 static void __releases(agg_queue) 258 ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid) 259 { 260 ieee80211_wake_queue_agg(sdata, tid); 261 } 262 263 static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid) 264 { 265 struct tid_ampdu_tx *tid_tx; 266 267 lockdep_assert_held(&sta->ampdu_mlme.mtx); 268 lockdep_assert_held(&sta->lock); 269 270 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 271 272 /* 273 * When we get here, the TX path will not be lockless any more wrt. 274 * aggregation, since the OPERATIONAL bit has long been cleared. 275 * Thus it will block on getting the lock, if it occurs. So if we 276 * stop the queue now, we will not get any more packets, and any 277 * that might be being processed will wait for us here, thereby 278 * guaranteeing that no packets go to the tid_tx pending queue any 279 * more. 280 */ 281 282 ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid); 283 284 /* future packets must not find the tid_tx struct any more */ 285 ieee80211_assign_tid_tx(sta, tid, NULL); 286 287 ieee80211_agg_splice_finish(sta->sdata, tid); 288 ieee80211_agg_start_txq(sta, tid, false); 289 290 kfree_rcu(tid_tx, rcu_head); 291 } 292 293 int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 294 enum ieee80211_agg_stop_reason reason) 295 { 296 struct ieee80211_local *local = sta->local; 297 struct tid_ampdu_tx *tid_tx; 298 enum ieee80211_ampdu_mlme_action action; 299 int ret; 300 301 lockdep_assert_held(&sta->ampdu_mlme.mtx); 302 303 switch (reason) { 304 case AGG_STOP_DECLINED: 305 case AGG_STOP_LOCAL_REQUEST: 306 case AGG_STOP_PEER_REQUEST: 307 action = IEEE80211_AMPDU_TX_STOP_CONT; 308 break; 309 case AGG_STOP_DESTROY_STA: 310 action = IEEE80211_AMPDU_TX_STOP_FLUSH; 311 break; 312 default: 313 WARN_ON_ONCE(1); 314 return -EINVAL; 315 } 316 317 spin_lock_bh(&sta->lock); 318 319 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 320 if (!tid_tx) { 321 spin_unlock_bh(&sta->lock); 322 return -ENOENT; 323 } 324 325 /* 326 * if we're already stopping ignore any new requests to stop 327 * unless we're destroying it in which case notify the driver 328 */ 329 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 330 spin_unlock_bh(&sta->lock); 331 if (reason != AGG_STOP_DESTROY_STA) 332 return -EALREADY; 333 ret = drv_ampdu_action(local, sta->sdata, 334 IEEE80211_AMPDU_TX_STOP_FLUSH_CONT, 335 &sta->sta, tid, NULL, 0, false); 336 WARN_ON_ONCE(ret); 337 return 0; 338 } 339 340 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { 341 /* not even started yet! */ 342 ieee80211_assign_tid_tx(sta, tid, NULL); 343 spin_unlock_bh(&sta->lock); 344 kfree_rcu(tid_tx, rcu_head); 345 return 0; 346 } 347 348 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); 349 350 spin_unlock_bh(&sta->lock); 351 352 ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n", 353 sta->sta.addr, tid); 354 355 del_timer_sync(&tid_tx->addba_resp_timer); 356 del_timer_sync(&tid_tx->session_timer); 357 358 /* 359 * After this packets are no longer handed right through 360 * to the driver but are put onto tid_tx->pending instead, 361 * with locking to ensure proper access. 362 */ 363 clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); 364 365 /* 366 * There might be a few packets being processed right now (on 367 * another CPU) that have already gotten past the aggregation 368 * check when it was still OPERATIONAL and consequently have 369 * IEEE80211_TX_CTL_AMPDU set. In that case, this code might 370 * call into the driver at the same time or even before the 371 * TX paths calls into it, which could confuse the driver. 372 * 373 * Wait for all currently running TX paths to finish before 374 * telling the driver. New packets will not go through since 375 * the aggregation session is no longer OPERATIONAL. 376 */ 377 synchronize_net(); 378 379 tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ? 380 WLAN_BACK_RECIPIENT : 381 WLAN_BACK_INITIATOR; 382 tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST; 383 384 ret = drv_ampdu_action(local, sta->sdata, action, 385 &sta->sta, tid, NULL, 0, false); 386 387 /* HW shall not deny going back to legacy */ 388 if (WARN_ON(ret)) { 389 /* 390 * We may have pending packets get stuck in this case... 391 * Not bothering with a workaround for now. 392 */ 393 } 394 395 /* 396 * In the case of AGG_STOP_DESTROY_STA, the driver won't 397 * necessarily call ieee80211_stop_tx_ba_cb(), so this may 398 * seem like we can leave the tid_tx data pending forever. 399 * This is true, in a way, but "forever" is only until the 400 * station struct is actually destroyed. In the meantime, 401 * leaving it around ensures that we don't transmit packets 402 * to the driver on this TID which might confuse it. 403 */ 404 405 return 0; 406 } 407 408 /* 409 * After sending add Block Ack request we activated a timer until 410 * add Block Ack response will arrive from the recipient. 411 * If this timer expires sta_addba_resp_timer_expired will be executed. 412 */ 413 static void sta_addba_resp_timer_expired(unsigned long data) 414 { 415 /* not an elegant detour, but there is no choice as the timer passes 416 * only one argument, and both sta_info and TID are needed, so init 417 * flow in sta_info_create gives the TID as data, while the timer_to_id 418 * array gives the sta through container_of */ 419 u16 tid = *(u8 *)data; 420 struct sta_info *sta = container_of((void *)data, 421 struct sta_info, timer_to_tid[tid]); 422 struct tid_ampdu_tx *tid_tx; 423 424 /* check if the TID waits for addBA response */ 425 rcu_read_lock(); 426 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); 427 if (!tid_tx || 428 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { 429 rcu_read_unlock(); 430 ht_dbg(sta->sdata, 431 "timer expired on %pM tid %d but we are not (or no longer) expecting addBA response there\n", 432 sta->sta.addr, tid); 433 return; 434 } 435 436 ht_dbg(sta->sdata, "addBA response timer expired on %pM tid %d\n", 437 sta->sta.addr, tid); 438 439 ieee80211_stop_tx_ba_session(&sta->sta, tid); 440 rcu_read_unlock(); 441 } 442 443 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) 444 { 445 struct tid_ampdu_tx *tid_tx; 446 struct ieee80211_local *local = sta->local; 447 struct ieee80211_sub_if_data *sdata = sta->sdata; 448 u16 start_seq_num; 449 int ret; 450 451 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 452 453 /* 454 * Start queuing up packets for this aggregation session. 455 * We're going to release them once the driver is OK with 456 * that. 457 */ 458 clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); 459 460 ieee80211_agg_stop_txq(sta, tid); 461 462 /* 463 * Make sure no packets are being processed. This ensures that 464 * we have a valid starting sequence number and that in-flight 465 * packets have been flushed out and no packets for this TID 466 * will go into the driver during the ampdu_action call. 467 */ 468 synchronize_net(); 469 470 start_seq_num = sta->tid_seq[tid] >> 4; 471 472 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START, 473 &sta->sta, tid, &start_seq_num, 0, false); 474 if (ret) { 475 ht_dbg(sdata, 476 "BA request denied - HW unavailable for %pM tid %d\n", 477 sta->sta.addr, tid); 478 spin_lock_bh(&sta->lock); 479 ieee80211_agg_splice_packets(sdata, tid_tx, tid); 480 ieee80211_assign_tid_tx(sta, tid, NULL); 481 ieee80211_agg_splice_finish(sdata, tid); 482 spin_unlock_bh(&sta->lock); 483 484 ieee80211_agg_start_txq(sta, tid, false); 485 486 kfree_rcu(tid_tx, rcu_head); 487 return; 488 } 489 490 /* activate the timer for the recipient's addBA response */ 491 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); 492 ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n", 493 sta->sta.addr, tid); 494 495 spin_lock_bh(&sta->lock); 496 sta->ampdu_mlme.last_addba_req_time[tid] = jiffies; 497 sta->ampdu_mlme.addba_req_num[tid]++; 498 spin_unlock_bh(&sta->lock); 499 500 /* send AddBA request */ 501 ieee80211_send_addba_request(sdata, sta->sta.addr, tid, 502 tid_tx->dialog_token, start_seq_num, 503 IEEE80211_MAX_AMPDU_BUF, 504 tid_tx->timeout); 505 } 506 507 /* 508 * After accepting the AddBA Response we activated a timer, 509 * resetting it after each frame that we send. 510 */ 511 static void sta_tx_agg_session_timer_expired(unsigned long data) 512 { 513 /* not an elegant detour, but there is no choice as the timer passes 514 * only one argument, and various sta_info are needed here, so init 515 * flow in sta_info_create gives the TID as data, while the timer_to_id 516 * array gives the sta through container_of */ 517 u8 *ptid = (u8 *)data; 518 u8 *timer_to_id = ptid - *ptid; 519 struct sta_info *sta = container_of(timer_to_id, struct sta_info, 520 timer_to_tid[0]); 521 struct tid_ampdu_tx *tid_tx; 522 unsigned long timeout; 523 524 rcu_read_lock(); 525 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[*ptid]); 526 if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 527 rcu_read_unlock(); 528 return; 529 } 530 531 timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout); 532 if (time_is_after_jiffies(timeout)) { 533 mod_timer(&tid_tx->session_timer, timeout); 534 rcu_read_unlock(); 535 return; 536 } 537 538 rcu_read_unlock(); 539 540 ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n", 541 sta->sta.addr, (u16)*ptid); 542 543 ieee80211_stop_tx_ba_session(&sta->sta, *ptid); 544 } 545 546 int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, 547 u16 timeout) 548 { 549 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 550 struct ieee80211_sub_if_data *sdata = sta->sdata; 551 struct ieee80211_local *local = sdata->local; 552 struct tid_ampdu_tx *tid_tx; 553 int ret = 0; 554 555 trace_api_start_tx_ba_session(pubsta, tid); 556 557 if (WARN(sta->reserved_tid == tid, 558 "Requested to start BA session on reserved tid=%d", tid)) 559 return -EINVAL; 560 561 if (!pubsta->ht_cap.ht_supported) 562 return -EINVAL; 563 564 if (WARN_ON_ONCE(!local->ops->ampdu_action)) 565 return -EINVAL; 566 567 if ((tid >= IEEE80211_NUM_TIDS) || 568 !ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) || 569 ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW)) 570 return -EINVAL; 571 572 ht_dbg(sdata, "Open BA session requested for %pM tid %u\n", 573 pubsta->addr, tid); 574 575 if (sdata->vif.type != NL80211_IFTYPE_STATION && 576 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 577 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 578 sdata->vif.type != NL80211_IFTYPE_AP && 579 sdata->vif.type != NL80211_IFTYPE_ADHOC) 580 return -EINVAL; 581 582 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { 583 ht_dbg(sdata, 584 "BA sessions blocked - Denying BA session request %pM tid %d\n", 585 sta->sta.addr, tid); 586 return -EINVAL; 587 } 588 589 /* 590 * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a 591 * member of an IBSS, and has no other existing Block Ack agreement 592 * with the recipient STA, then the initiating STA shall transmit a 593 * Probe Request frame to the recipient STA and shall not transmit an 594 * ADDBA Request frame unless it receives a Probe Response frame 595 * from the recipient within dot11ADDBAFailureTimeout. 596 * 597 * The probe request mechanism for ADDBA is currently not implemented, 598 * but we only build up Block Ack session with HT STAs. This information 599 * is set when we receive a bss info from a probe response or a beacon. 600 */ 601 if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC && 602 !sta->sta.ht_cap.ht_supported) { 603 ht_dbg(sdata, 604 "BA request denied - IBSS STA %pM does not advertise HT support\n", 605 pubsta->addr); 606 return -EINVAL; 607 } 608 609 spin_lock_bh(&sta->lock); 610 611 /* we have tried too many times, receiver does not want A-MPDU */ 612 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { 613 ret = -EBUSY; 614 goto err_unlock_sta; 615 } 616 617 /* 618 * if we have tried more than HT_AGG_BURST_RETRIES times we 619 * will spread our requests in time to avoid stalling connection 620 * for too long 621 */ 622 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES && 623 time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] + 624 HT_AGG_RETRIES_PERIOD)) { 625 ht_dbg(sdata, 626 "BA request denied - waiting a grace period after %d failed requests on %pM tid %u\n", 627 sta->ampdu_mlme.addba_req_num[tid], sta->sta.addr, tid); 628 ret = -EBUSY; 629 goto err_unlock_sta; 630 } 631 632 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 633 /* check if the TID is not in aggregation flow already */ 634 if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) { 635 ht_dbg(sdata, 636 "BA request denied - session is not idle on %pM tid %u\n", 637 sta->sta.addr, tid); 638 ret = -EAGAIN; 639 goto err_unlock_sta; 640 } 641 642 /* prepare A-MPDU MLME for Tx aggregation */ 643 tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); 644 if (!tid_tx) { 645 ret = -ENOMEM; 646 goto err_unlock_sta; 647 } 648 649 skb_queue_head_init(&tid_tx->pending); 650 __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); 651 652 tid_tx->timeout = timeout; 653 654 /* response timer */ 655 tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired; 656 tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid]; 657 init_timer(&tid_tx->addba_resp_timer); 658 659 /* tx timer */ 660 tid_tx->session_timer.function = sta_tx_agg_session_timer_expired; 661 tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; 662 init_timer_deferrable(&tid_tx->session_timer); 663 664 /* assign a dialog token */ 665 sta->ampdu_mlme.dialog_token_allocator++; 666 tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator; 667 668 /* 669 * Finally, assign it to the start array; the work item will 670 * collect it and move it to the normal array. 671 */ 672 sta->ampdu_mlme.tid_start_tx[tid] = tid_tx; 673 674 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); 675 676 /* this flow continues off the work */ 677 err_unlock_sta: 678 spin_unlock_bh(&sta->lock); 679 return ret; 680 } 681 EXPORT_SYMBOL(ieee80211_start_tx_ba_session); 682 683 static void ieee80211_agg_tx_operational(struct ieee80211_local *local, 684 struct sta_info *sta, u16 tid) 685 { 686 struct tid_ampdu_tx *tid_tx; 687 688 lockdep_assert_held(&sta->ampdu_mlme.mtx); 689 690 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 691 692 ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n", 693 sta->sta.addr, tid); 694 695 drv_ampdu_action(local, sta->sdata, 696 IEEE80211_AMPDU_TX_OPERATIONAL, 697 &sta->sta, tid, NULL, tid_tx->buf_size, 698 tid_tx->amsdu); 699 700 /* 701 * synchronize with TX path, while splicing the TX path 702 * should block so it won't put more packets onto pending. 703 */ 704 spin_lock_bh(&sta->lock); 705 706 ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid); 707 /* 708 * Now mark as operational. This will be visible 709 * in the TX path, and lets it go lock-free in 710 * the common case. 711 */ 712 set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); 713 ieee80211_agg_splice_finish(sta->sdata, tid); 714 715 spin_unlock_bh(&sta->lock); 716 717 ieee80211_agg_start_txq(sta, tid, true); 718 } 719 720 void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) 721 { 722 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 723 struct ieee80211_local *local = sdata->local; 724 struct sta_info *sta; 725 struct tid_ampdu_tx *tid_tx; 726 727 trace_api_start_tx_ba_cb(sdata, ra, tid); 728 729 if (tid >= IEEE80211_NUM_TIDS) { 730 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", 731 tid, IEEE80211_NUM_TIDS); 732 return; 733 } 734 735 mutex_lock(&local->sta_mtx); 736 sta = sta_info_get_bss(sdata, ra); 737 if (!sta) { 738 mutex_unlock(&local->sta_mtx); 739 ht_dbg(sdata, "Could not find station: %pM\n", ra); 740 return; 741 } 742 743 mutex_lock(&sta->ampdu_mlme.mtx); 744 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 745 746 if (WARN_ON(!tid_tx)) { 747 ht_dbg(sdata, "addBA was not requested!\n"); 748 goto unlock; 749 } 750 751 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))) 752 goto unlock; 753 754 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) 755 ieee80211_agg_tx_operational(local, sta, tid); 756 757 unlock: 758 mutex_unlock(&sta->ampdu_mlme.mtx); 759 mutex_unlock(&local->sta_mtx); 760 } 761 762 void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 763 const u8 *ra, u16 tid) 764 { 765 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 766 struct ieee80211_local *local = sdata->local; 767 struct ieee80211_ra_tid *ra_tid; 768 struct sk_buff *skb = dev_alloc_skb(0); 769 770 if (unlikely(!skb)) 771 return; 772 773 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 774 memcpy(&ra_tid->ra, ra, ETH_ALEN); 775 ra_tid->tid = tid; 776 777 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START; 778 skb_queue_tail(&sdata->skb_queue, skb); 779 ieee80211_queue_work(&local->hw, &sdata->work); 780 } 781 EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); 782 783 int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 784 enum ieee80211_agg_stop_reason reason) 785 { 786 int ret; 787 788 mutex_lock(&sta->ampdu_mlme.mtx); 789 790 ret = ___ieee80211_stop_tx_ba_session(sta, tid, reason); 791 792 mutex_unlock(&sta->ampdu_mlme.mtx); 793 794 return ret; 795 } 796 797 int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) 798 { 799 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 800 struct ieee80211_sub_if_data *sdata = sta->sdata; 801 struct ieee80211_local *local = sdata->local; 802 struct tid_ampdu_tx *tid_tx; 803 int ret = 0; 804 805 trace_api_stop_tx_ba_session(pubsta, tid); 806 807 if (!local->ops->ampdu_action) 808 return -EINVAL; 809 810 if (tid >= IEEE80211_NUM_TIDS) 811 return -EINVAL; 812 813 spin_lock_bh(&sta->lock); 814 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 815 816 if (!tid_tx) { 817 ret = -ENOENT; 818 goto unlock; 819 } 820 821 WARN(sta->reserved_tid == tid, 822 "Requested to stop BA session on reserved tid=%d", tid); 823 824 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 825 /* already in progress stopping it */ 826 ret = 0; 827 goto unlock; 828 } 829 830 set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state); 831 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); 832 833 unlock: 834 spin_unlock_bh(&sta->lock); 835 return ret; 836 } 837 EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); 838 839 void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) 840 { 841 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 842 struct ieee80211_local *local = sdata->local; 843 struct sta_info *sta; 844 struct tid_ampdu_tx *tid_tx; 845 bool send_delba = false; 846 847 trace_api_stop_tx_ba_cb(sdata, ra, tid); 848 849 if (tid >= IEEE80211_NUM_TIDS) { 850 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", 851 tid, IEEE80211_NUM_TIDS); 852 return; 853 } 854 855 ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid); 856 857 mutex_lock(&local->sta_mtx); 858 859 sta = sta_info_get_bss(sdata, ra); 860 if (!sta) { 861 ht_dbg(sdata, "Could not find station: %pM\n", ra); 862 goto unlock; 863 } 864 865 mutex_lock(&sta->ampdu_mlme.mtx); 866 spin_lock_bh(&sta->lock); 867 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 868 869 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 870 ht_dbg(sdata, 871 "unexpected callback to A-MPDU stop for %pM tid %d\n", 872 sta->sta.addr, tid); 873 goto unlock_sta; 874 } 875 876 if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR && tid_tx->tx_stop) 877 send_delba = true; 878 879 ieee80211_remove_tid_tx(sta, tid); 880 881 unlock_sta: 882 spin_unlock_bh(&sta->lock); 883 884 if (send_delba) 885 ieee80211_send_delba(sdata, ra, tid, 886 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); 887 888 mutex_unlock(&sta->ampdu_mlme.mtx); 889 unlock: 890 mutex_unlock(&local->sta_mtx); 891 } 892 893 void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 894 const u8 *ra, u16 tid) 895 { 896 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 897 struct ieee80211_local *local = sdata->local; 898 struct ieee80211_ra_tid *ra_tid; 899 struct sk_buff *skb = dev_alloc_skb(0); 900 901 if (unlikely(!skb)) 902 return; 903 904 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 905 memcpy(&ra_tid->ra, ra, ETH_ALEN); 906 ra_tid->tid = tid; 907 908 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP; 909 skb_queue_tail(&sdata->skb_queue, skb); 910 ieee80211_queue_work(&local->hw, &sdata->work); 911 } 912 EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); 913 914 915 void ieee80211_process_addba_resp(struct ieee80211_local *local, 916 struct sta_info *sta, 917 struct ieee80211_mgmt *mgmt, 918 size_t len) 919 { 920 struct tid_ampdu_tx *tid_tx; 921 u16 capab, tid; 922 u8 buf_size; 923 bool amsdu; 924 925 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); 926 amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK; 927 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; 928 buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; 929 buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes); 930 931 mutex_lock(&sta->ampdu_mlme.mtx); 932 933 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 934 if (!tid_tx) 935 goto out; 936 937 if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) { 938 ht_dbg(sta->sdata, "wrong addBA response token, %pM tid %d\n", 939 sta->sta.addr, tid); 940 goto out; 941 } 942 943 del_timer_sync(&tid_tx->addba_resp_timer); 944 945 ht_dbg(sta->sdata, "switched off addBA timer for %pM tid %d\n", 946 sta->sta.addr, tid); 947 948 /* 949 * addba_resp_timer may have fired before we got here, and 950 * caused WANT_STOP to be set. If the stop then was already 951 * processed further, STOPPING might be set. 952 */ 953 if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) || 954 test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 955 ht_dbg(sta->sdata, 956 "got addBA resp for %pM tid %d but we already gave up\n", 957 sta->sta.addr, tid); 958 goto out; 959 } 960 961 /* 962 * IEEE 802.11-2007 7.3.1.14: 963 * In an ADDBA Response frame, when the Status Code field 964 * is set to 0, the Buffer Size subfield is set to a value 965 * of at least 1. 966 */ 967 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) 968 == WLAN_STATUS_SUCCESS && buf_size) { 969 if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED, 970 &tid_tx->state)) { 971 /* ignore duplicate response */ 972 goto out; 973 } 974 975 tid_tx->buf_size = buf_size; 976 tid_tx->amsdu = amsdu; 977 978 if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)) 979 ieee80211_agg_tx_operational(local, sta, tid); 980 981 sta->ampdu_mlme.addba_req_num[tid] = 0; 982 983 if (tid_tx->timeout) { 984 mod_timer(&tid_tx->session_timer, 985 TU_TO_EXP_TIME(tid_tx->timeout)); 986 tid_tx->last_tx = jiffies; 987 } 988 989 } else { 990 ___ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_DECLINED); 991 } 992 993 out: 994 mutex_unlock(&sta->ampdu_mlme.mtx); 995 } 996