1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2024-2025 Intel Corporation 4 */ 5 #include "agg.h" 6 #include "sta.h" 7 #include "hcmd.h" 8 #if defined(__FreeBSD__) 9 #include <linux/cache.h> 10 #endif 11 12 static void 13 iwl_mld_reorder_release_frames(struct iwl_mld *mld, struct ieee80211_sta *sta, 14 struct napi_struct *napi, 15 struct iwl_mld_baid_data *baid_data, 16 struct iwl_mld_reorder_buffer *reorder_buf, 17 u16 nssn) 18 { 19 struct iwl_mld_reorder_buf_entry *entries = 20 &baid_data->entries[reorder_buf->queue * 21 baid_data->entries_per_queue]; 22 u16 ssn = reorder_buf->head_sn; 23 24 while (ieee80211_sn_less(ssn, nssn)) { 25 int index = ssn % baid_data->buf_size; 26 struct sk_buff_head *skb_list = &entries[index].frames; 27 struct sk_buff *skb; 28 29 ssn = ieee80211_sn_inc(ssn); 30 31 /* Empty the list. Will have more than one frame for A-MSDU. 32 * Empty list is valid as well since nssn indicates frames were 33 * received. 34 */ 35 while ((skb = __skb_dequeue(skb_list))) { 36 iwl_mld_pass_packet_to_mac80211(mld, napi, skb, 37 reorder_buf->queue, 38 sta); 39 reorder_buf->num_stored--; 40 } 41 } 42 reorder_buf->head_sn = nssn; 43 } 44 45 static void iwl_mld_release_frames_from_notif(struct iwl_mld *mld, 46 struct napi_struct *napi, 47 u8 baid, u16 nssn, int queue) 48 { 49 struct iwl_mld_reorder_buffer *reorder_buf; 50 struct iwl_mld_baid_data *ba_data; 51 struct ieee80211_link_sta *link_sta; 52 u32 sta_id; 53 54 IWL_DEBUG_HT(mld, "Frame release notification for BAID %u, NSSN %d\n", 55 baid, nssn); 56 57 if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID || 58 baid >= ARRAY_SIZE(mld->fw_id_to_ba))) 59 return; 60 61 rcu_read_lock(); 62 63 ba_data = rcu_dereference(mld->fw_id_to_ba[baid]); 64 if (!ba_data) { 65 IWL_DEBUG_HT(mld, "BAID %d not found in map\n", baid); 66 goto out_unlock; 67 } 68 69 /* pick any STA ID to find the pointer */ 70 sta_id = ffs(ba_data->sta_mask) - 1; 71 link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]); 72 if (WARN_ON_ONCE(IS_ERR_OR_NULL(link_sta) || !link_sta->sta)) 73 goto out_unlock; 74 75 reorder_buf = &ba_data->reorder_buf[queue]; 76 77 iwl_mld_reorder_release_frames(mld, link_sta->sta, napi, ba_data, 78 reorder_buf, nssn); 79 out_unlock: 80 rcu_read_unlock(); 81 } 82 83 void iwl_mld_handle_frame_release_notif(struct iwl_mld *mld, 84 struct napi_struct *napi, 85 struct iwl_rx_packet *pkt, int queue) 86 { 87 struct iwl_frame_release *release = (void *)pkt->data; 88 u32 pkt_len = iwl_rx_packet_payload_len(pkt); 89 90 if (IWL_FW_CHECK(mld, pkt_len < sizeof(*release), 91 "Unexpected frame release notif size %u (expected %zu)\n", 92 pkt_len, sizeof(*release))) 93 return; 94 95 iwl_mld_release_frames_from_notif(mld, napi, release->baid, 96 le16_to_cpu(release->nssn), 97 queue); 98 } 99 100 void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld, 101 struct napi_struct *napi, 102 struct iwl_rx_packet *pkt, 103 int queue) 104 { 105 struct iwl_bar_frame_release *release = (void *)pkt->data; 106 struct iwl_mld_baid_data *baid_data; 107 unsigned int baid, nssn, sta_id, tid; 108 u32 pkt_len = iwl_rx_packet_payload_len(pkt); 109 110 if (IWL_FW_CHECK(mld, pkt_len < sizeof(*release), 111 "Unexpected frame release notif size %u (expected %zu)\n", 112 pkt_len, sizeof(*release))) 113 return; 114 115 baid = le32_get_bits(release->ba_info, 116 IWL_BAR_FRAME_RELEASE_BAID_MASK); 117 nssn = le32_get_bits(release->ba_info, 118 IWL_BAR_FRAME_RELEASE_NSSN_MASK); 119 sta_id = le32_get_bits(release->sta_tid, 120 IWL_BAR_FRAME_RELEASE_STA_MASK); 121 tid = le32_get_bits(release->sta_tid, 122 IWL_BAR_FRAME_RELEASE_TID_MASK); 123 124 if (IWL_FW_CHECK(mld, baid >= ARRAY_SIZE(mld->fw_id_to_ba), 125 "BAR release: invalid BAID (%x)\n", baid)) 126 return; 127 128 rcu_read_lock(); 129 baid_data = rcu_dereference(mld->fw_id_to_ba[baid]); 130 if (!baid_data) { 131 IWL_DEBUG_HT(mld, 132 "Got valid BAID %d but not allocated\n", 133 baid); 134 goto out_unlock; 135 } 136 137 if (IWL_FW_CHECK(mld, tid != baid_data->tid || 138 sta_id > mld->fw->ucode_capa.num_stations || 139 !(baid_data->sta_mask & BIT(sta_id)), 140 "BAID 0x%x is mapped to sta_mask:0x%x tid:%d, but BAR release received for sta:%d tid:%d\n", 141 baid, baid_data->sta_mask, baid_data->tid, sta_id, 142 tid)) 143 goto out_unlock; 144 145 IWL_DEBUG_DROP(mld, "Received a BAR, expect packet loss: nssn %d\n", 146 nssn); 147 148 iwl_mld_release_frames_from_notif(mld, napi, baid, nssn, queue); 149 out_unlock: 150 rcu_read_unlock(); 151 } 152 153 void iwl_mld_del_ba(struct iwl_mld *mld, int queue, 154 struct iwl_mld_delba_data *data) 155 { 156 struct iwl_mld_baid_data *ba_data; 157 struct iwl_mld_reorder_buffer *reorder_buf; 158 struct ieee80211_link_sta *link_sta; 159 u8 baid = data->baid; 160 u32 sta_id; 161 162 if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid)) 163 return; 164 165 rcu_read_lock(); 166 167 ba_data = rcu_dereference(mld->fw_id_to_ba[baid]); 168 if (WARN_ON_ONCE(!ba_data)) 169 goto out_unlock; 170 171 /* pick any STA ID to find the pointer */ 172 sta_id = ffs(ba_data->sta_mask) - 1; 173 link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]); 174 if (WARN_ON_ONCE(IS_ERR_OR_NULL(link_sta) || !link_sta->sta)) 175 goto out_unlock; 176 177 reorder_buf = &ba_data->reorder_buf[queue]; 178 179 /* release all frames that are in the reorder buffer to the stack */ 180 iwl_mld_reorder_release_frames(mld, link_sta->sta, NULL, 181 ba_data, reorder_buf, 182 ieee80211_sn_add(reorder_buf->head_sn, 183 ba_data->buf_size)); 184 out_unlock: 185 rcu_read_unlock(); 186 } 187 188 /* Returns true if the MPDU was buffered\dropped, false if it should be passed 189 * to upper layer. 190 */ 191 enum iwl_mld_reorder_result 192 iwl_mld_reorder(struct iwl_mld *mld, struct napi_struct *napi, 193 int queue, struct ieee80211_sta *sta, 194 struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc) 195 { 196 struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb); 197 struct iwl_mld_baid_data *baid_data; 198 struct iwl_mld_reorder_buffer *buffer; 199 struct iwl_mld_reorder_buf_entry *entries; 200 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); 201 struct iwl_mld_link_sta *mld_link_sta; 202 u32 reorder = le32_to_cpu(desc->reorder_data); 203 bool amsdu, last_subframe, is_old_sn, is_dup; 204 #if defined(__linux__) 205 u8 tid = ieee80211_get_tid(hdr); 206 #elif defined(__FreeBSD__) 207 u8 tid; 208 #endif 209 u8 baid; 210 u16 nssn, sn; 211 u32 sta_mask = 0; 212 int index; 213 u8 link_id; 214 215 baid = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_BAID_MASK); 216 217 /* This also covers the case of receiving a Block Ack Request 218 * outside a BA session; we'll pass it to mac80211 and that 219 * then sends a delBA action frame. 220 * This also covers pure monitor mode, in which case we won't 221 * have any BA sessions. 222 */ 223 if (baid == IWL_RX_REORDER_DATA_INVALID_BAID) 224 return IWL_MLD_PASS_SKB; 225 226 /* no sta yet */ 227 if (WARN_ONCE(!sta, 228 "Got valid BAID without a valid station assigned\n")) 229 return IWL_MLD_PASS_SKB; 230 231 /* not a data packet */ 232 if (!ieee80211_is_data_qos(hdr->frame_control) || 233 is_multicast_ether_addr(hdr->addr1)) 234 return IWL_MLD_PASS_SKB; 235 236 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 237 return IWL_MLD_PASS_SKB; 238 239 baid_data = rcu_dereference(mld->fw_id_to_ba[baid]); 240 if (!baid_data) { 241 IWL_DEBUG_HT(mld, 242 "Got valid BAID but no baid allocated, bypass re-ordering (BAID=%d reorder=0x%x)\n", 243 baid, reorder); 244 return IWL_MLD_PASS_SKB; 245 } 246 247 for_each_mld_link_sta(mld_sta, mld_link_sta, link_id) 248 sta_mask |= BIT(mld_link_sta->fw_id); 249 250 #if defined(__FreeBSD__) 251 tid = ieee80211_get_tid(hdr); 252 #endif 253 254 /* verify the BAID is correctly mapped to the sta and tid */ 255 if (IWL_FW_CHECK(mld, 256 tid != baid_data->tid || 257 !(sta_mask & baid_data->sta_mask), 258 "BAID 0x%x is mapped to sta_mask:0x%x tid:%d, but was received for sta_mask:0x%x tid:%d\n", 259 baid, baid_data->sta_mask, baid_data->tid, 260 sta_mask, tid)) 261 return IWL_MLD_PASS_SKB; 262 263 buffer = &baid_data->reorder_buf[queue]; 264 entries = &baid_data->entries[queue * baid_data->entries_per_queue]; 265 266 is_old_sn = !!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN); 267 268 if (!buffer->valid && is_old_sn) 269 return IWL_MLD_PASS_SKB; 270 271 buffer->valid = true; 272 273 is_dup = !!(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE)); 274 275 /* drop any duplicated or outdated packets */ 276 if (is_dup || is_old_sn) 277 return IWL_MLD_DROP_SKB; 278 279 sn = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_SN_MASK); 280 nssn = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_NSSN_MASK); 281 amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU; 282 last_subframe = desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME; 283 284 /* release immediately if allowed by nssn and no stored frames */ 285 if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) { 286 if (!amsdu || last_subframe) 287 buffer->head_sn = nssn; 288 return IWL_MLD_PASS_SKB; 289 } 290 291 /* release immediately if there are no stored frames, and the sn is 292 * equal to the head. 293 * This can happen due to reorder timer, where NSSN is behind head_sn. 294 * When we released everything, and we got the next frame in the 295 * sequence, according to the NSSN we can't release immediately, 296 * while technically there is no hole and we can move forward. 297 */ 298 if (!buffer->num_stored && sn == buffer->head_sn) { 299 if (!amsdu || last_subframe) 300 buffer->head_sn = ieee80211_sn_inc(buffer->head_sn); 301 return IWL_MLD_PASS_SKB; 302 } 303 304 /* put in reorder buffer */ 305 index = sn % baid_data->buf_size; 306 __skb_queue_tail(&entries[index].frames, skb); 307 buffer->num_stored++; 308 309 /* We cannot trust NSSN for AMSDU sub-frames that are not the last. The 310 * reason is that NSSN advances on the first sub-frame, and may cause 311 * the reorder buffer to advance before all the sub-frames arrive. 312 * 313 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with 314 * SN 1. NSSN for first sub frame will be 3 with the result of driver 315 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is 316 * already ahead and it will be dropped. 317 * If the last sub-frame is not on this queue - we will get frame 318 * release notification with up to date NSSN. 319 * If this is the first frame that is stored in the buffer, the head_sn 320 * may be outdated. Update it based on the last NSSN to make sure it 321 * will be released when the frame release notification arrives. 322 */ 323 if (!amsdu || last_subframe) 324 iwl_mld_reorder_release_frames(mld, sta, napi, baid_data, 325 buffer, nssn); 326 else if (buffer->num_stored == 1) 327 buffer->head_sn = nssn; 328 329 return IWL_MLD_BUFFERED_SKB; 330 } 331 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_reorder); 332 333 static void iwl_mld_rx_agg_session_expired(struct timer_list *t) 334 { 335 struct iwl_mld_baid_data *data = 336 timer_container_of(data, t, session_timer); 337 struct iwl_mld_baid_data __rcu **rcu_ptr = data->rcu_ptr; 338 struct iwl_mld_baid_data *ba_data; 339 struct ieee80211_link_sta *link_sta; 340 struct iwl_mld_sta *mld_sta; 341 unsigned long timeout; 342 unsigned int sta_id; 343 344 rcu_read_lock(); 345 346 ba_data = rcu_dereference(*rcu_ptr); 347 if (WARN_ON(!ba_data)) 348 goto unlock; 349 350 if (WARN_ON(!ba_data->timeout)) 351 goto unlock; 352 353 timeout = ba_data->last_rx_timestamp + 354 TU_TO_JIFFIES(ba_data->timeout * 2); 355 if (time_is_after_jiffies(timeout)) { 356 mod_timer(&ba_data->session_timer, timeout); 357 goto unlock; 358 } 359 360 /* timer expired, pick any STA ID to find the pointer */ 361 sta_id = ffs(ba_data->sta_mask) - 1; 362 link_sta = rcu_dereference(ba_data->mld->fw_id_to_link_sta[sta_id]); 363 364 /* sta should be valid unless the following happens: 365 * The firmware asserts which triggers a reconfig flow, but 366 * the reconfig fails before we set the pointer to sta into 367 * the fw_id_to_link_sta pointer table. mac80211 can't stop 368 * A-MPDU and hence the timer continues to run. Then, the 369 * timer expires and sta is NULL. 370 */ 371 if (IS_ERR_OR_NULL(link_sta) || WARN_ON(!link_sta->sta)) 372 goto unlock; 373 374 mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta); 375 ieee80211_rx_ba_timer_expired(mld_sta->vif, link_sta->sta->addr, 376 ba_data->tid); 377 unlock: 378 rcu_read_unlock(); 379 } 380 381 static int 382 iwl_mld_stop_ba_in_fw(struct iwl_mld *mld, struct ieee80211_sta *sta, int tid) 383 { 384 struct iwl_rx_baid_cfg_cmd cmd = { 385 .action = cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE), 386 .remove.sta_id_mask = 387 cpu_to_le32(iwl_mld_fw_sta_id_mask(mld, sta)), 388 .remove.tid = cpu_to_le32(tid), 389 390 }; 391 int ret; 392 393 ret = iwl_mld_send_cmd_pdu(mld, 394 WIDE_ID(DATA_PATH_GROUP, 395 RX_BAID_ALLOCATION_CONFIG_CMD), 396 &cmd); 397 if (ret) 398 return ret; 399 400 IWL_DEBUG_HT(mld, "RX BA Session stopped in fw\n"); 401 402 return ret; 403 } 404 405 static int 406 iwl_mld_start_ba_in_fw(struct iwl_mld *mld, struct ieee80211_sta *sta, 407 int tid, u16 ssn, u16 buf_size) 408 { 409 struct iwl_rx_baid_cfg_cmd cmd = { 410 .action = cpu_to_le32(IWL_RX_BAID_ACTION_ADD), 411 .alloc.sta_id_mask = 412 cpu_to_le32(iwl_mld_fw_sta_id_mask(mld, sta)), 413 .alloc.tid = tid, 414 .alloc.ssn = cpu_to_le16(ssn), 415 .alloc.win_size = cpu_to_le16(buf_size), 416 }; 417 struct iwl_host_cmd hcmd = { 418 .id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD), 419 .flags = CMD_WANT_SKB, 420 .len[0] = sizeof(cmd), 421 .data[0] = &cmd, 422 }; 423 struct iwl_rx_baid_cfg_resp *resp; 424 struct iwl_rx_packet *pkt; 425 u32 resp_len; 426 int ret, baid; 427 428 BUILD_BUG_ON(sizeof(*resp) != sizeof(baid)); 429 430 ret = iwl_mld_send_cmd(mld, &hcmd); 431 if (ret) 432 return ret; 433 434 pkt = hcmd.resp_pkt; 435 436 resp_len = iwl_rx_packet_payload_len(pkt); 437 if (IWL_FW_CHECK(mld, resp_len != sizeof(*resp), 438 "BAID_ALLOC_CMD: unexpected response length %d\n", 439 resp_len)) { 440 ret = -EIO; 441 goto out; 442 } 443 444 IWL_DEBUG_HT(mld, "RX BA Session started in fw\n"); 445 446 resp = (void *)pkt->data; 447 baid = le32_to_cpu(resp->baid); 448 449 if (IWL_FW_CHECK(mld, baid < 0 || baid >= ARRAY_SIZE(mld->fw_id_to_ba), 450 "BAID_ALLOC_CMD: invalid BAID response %d\n", baid)) { 451 ret = -EINVAL; 452 goto out; 453 } 454 455 ret = baid; 456 out: 457 iwl_free_resp(&hcmd); 458 return ret; 459 } 460 461 static void iwl_mld_init_reorder_buffer(struct iwl_mld *mld, 462 struct iwl_mld_baid_data *data, 463 u16 ssn) 464 { 465 for (int i = 0; i < mld->trans->info.num_rxqs; i++) { 466 struct iwl_mld_reorder_buffer *reorder_buf = 467 &data->reorder_buf[i]; 468 struct iwl_mld_reorder_buf_entry *entries = 469 &data->entries[i * data->entries_per_queue]; 470 471 reorder_buf->head_sn = ssn; 472 reorder_buf->queue = i; 473 474 for (int j = 0; j < data->buf_size; j++) 475 __skb_queue_head_init(&entries[j].frames); 476 } 477 } 478 479 static void iwl_mld_free_reorder_buffer(struct iwl_mld *mld, 480 struct iwl_mld_baid_data *data) 481 { 482 struct iwl_mld_delba_data delba_data = { 483 .baid = data->baid, 484 }; 485 486 iwl_mld_sync_rx_queues(mld, IWL_MLD_RXQ_NOTIF_DEL_BA, 487 &delba_data, sizeof(delba_data)); 488 489 for (int i = 0; i < mld->trans->info.num_rxqs; i++) { 490 struct iwl_mld_reorder_buffer *reorder_buf = 491 &data->reorder_buf[i]; 492 struct iwl_mld_reorder_buf_entry *entries = 493 &data->entries[i * data->entries_per_queue]; 494 495 if (likely(!reorder_buf->num_stored)) 496 continue; 497 498 /* This shouldn't happen in regular DELBA since the RX queues 499 * sync internal DELBA notification should trigger a release 500 * of all frames in the reorder buffer. 501 */ 502 WARN_ON(1); 503 504 for (int j = 0; j < data->buf_size; j++) 505 __skb_queue_purge(&entries[j].frames); 506 } 507 } 508 509 int iwl_mld_ampdu_rx_start(struct iwl_mld *mld, struct ieee80211_sta *sta, 510 int tid, u16 ssn, u16 buf_size, u16 timeout) 511 { 512 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); 513 struct iwl_mld_baid_data *baid_data = NULL; 514 u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); 515 int ret, baid; 516 u32 sta_mask; 517 518 lockdep_assert_wiphy(mld->wiphy); 519 520 if (mld->num_rx_ba_sessions >= IWL_MAX_BAID) { 521 IWL_DEBUG_HT(mld, 522 "Max num of RX BA sessions reached; blocking new session\n"); 523 return -ENOSPC; 524 } 525 526 sta_mask = iwl_mld_fw_sta_id_mask(mld, sta); 527 if (WARN_ON(!sta_mask)) 528 return -EINVAL; 529 530 /* sparse doesn't like the __align() so don't check */ 531 #ifndef __CHECKER__ 532 /* The division below will be OK if either the cache line size 533 * can be divided by the entry size (ALIGN will round up) or if 534 * the entry size can be divided by the cache line size, in which 535 * case the ALIGN() will do nothing. 536 */ 537 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) && 538 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES); 539 #endif 540 541 /* Upward align the reorder buffer size to fill an entire cache 542 * line for each queue, to avoid sharing cache lines between 543 * different queues. 544 */ 545 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES); 546 547 /* Allocate here so if allocation fails we can bail out early 548 * before starting the BA session in the firmware 549 */ 550 baid_data = kzalloc(sizeof(*baid_data) + 551 mld->trans->info.num_rxqs * reorder_buf_size, 552 GFP_KERNEL); 553 if (!baid_data) 554 return -ENOMEM; 555 556 /* This division is why we need the above BUILD_BUG_ON(), 557 * if that doesn't hold then this will not be right. 558 */ 559 baid_data->entries_per_queue = 560 reorder_buf_size / sizeof(baid_data->entries[0]); 561 562 baid = iwl_mld_start_ba_in_fw(mld, sta, tid, ssn, buf_size); 563 if (baid < 0) { 564 ret = baid; 565 goto out_free; 566 } 567 568 mld->num_rx_ba_sessions++; 569 mld_sta->tid_to_baid[tid] = baid; 570 571 baid_data->baid = baid; 572 baid_data->mld = mld; 573 baid_data->tid = tid; 574 baid_data->buf_size = buf_size; 575 baid_data->sta_mask = sta_mask; 576 baid_data->timeout = timeout; 577 baid_data->last_rx_timestamp = jiffies; 578 baid_data->rcu_ptr = &mld->fw_id_to_ba[baid]; 579 580 iwl_mld_init_reorder_buffer(mld, baid_data, ssn); 581 582 timer_setup(&baid_data->session_timer, iwl_mld_rx_agg_session_expired, 583 0); 584 if (timeout) 585 mod_timer(&baid_data->session_timer, 586 TU_TO_EXP_TIME(timeout * 2)); 587 588 IWL_DEBUG_HT(mld, "STA mask=0x%x (tid=%d) is assigned to BAID %d\n", 589 baid_data->sta_mask, tid, baid); 590 591 /* protect the BA data with RCU to cover a case where our 592 * internal RX sync mechanism will timeout (not that it's 593 * supposed to happen) and we will free the session data while 594 * RX is being processed in parallel 595 */ 596 WARN_ON(rcu_access_pointer(mld->fw_id_to_ba[baid])); 597 rcu_assign_pointer(mld->fw_id_to_ba[baid], baid_data); 598 599 return 0; 600 601 out_free: 602 kfree(baid_data); 603 return ret; 604 } 605 606 int iwl_mld_ampdu_rx_stop(struct iwl_mld *mld, struct ieee80211_sta *sta, 607 int tid) 608 { 609 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); 610 int baid = mld_sta->tid_to_baid[tid]; 611 struct iwl_mld_baid_data *baid_data; 612 int ret; 613 614 lockdep_assert_wiphy(mld->wiphy); 615 616 /* during firmware restart, do not send the command as the firmware no 617 * longer recognizes the session. instead, only clear the driver BA 618 * session data. 619 */ 620 if (!mld->fw_status.in_hw_restart) { 621 ret = iwl_mld_stop_ba_in_fw(mld, sta, tid); 622 if (ret) 623 return ret; 624 } 625 626 if (!WARN_ON(mld->num_rx_ba_sessions == 0)) 627 mld->num_rx_ba_sessions--; 628 629 baid_data = wiphy_dereference(mld->wiphy, mld->fw_id_to_ba[baid]); 630 if (WARN_ON(!baid_data)) 631 return -EINVAL; 632 633 if (timer_pending(&baid_data->session_timer)) 634 timer_shutdown_sync(&baid_data->session_timer); 635 636 iwl_mld_free_reorder_buffer(mld, baid_data); 637 638 RCU_INIT_POINTER(mld->fw_id_to_ba[baid], NULL); 639 kfree_rcu(baid_data, rcu_head); 640 641 IWL_DEBUG_HT(mld, "BAID %d is free\n", baid); 642 643 return 0; 644 } 645 646 int iwl_mld_update_sta_baids(struct iwl_mld *mld, 647 u32 old_sta_mask, 648 u32 new_sta_mask) 649 { 650 struct iwl_rx_baid_cfg_cmd cmd = { 651 .action = cpu_to_le32(IWL_RX_BAID_ACTION_MODIFY), 652 .modify.old_sta_id_mask = cpu_to_le32(old_sta_mask), 653 .modify.new_sta_id_mask = cpu_to_le32(new_sta_mask), 654 }; 655 u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD); 656 int baid; 657 658 /* mac80211 will remove sessions later, but we ignore all that */ 659 if (mld->fw_status.in_hw_restart) 660 return 0; 661 662 BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid)); 663 664 for (baid = 0; baid < ARRAY_SIZE(mld->fw_id_to_ba); baid++) { 665 struct iwl_mld_baid_data *data; 666 int ret; 667 668 data = wiphy_dereference(mld->wiphy, mld->fw_id_to_ba[baid]); 669 if (!data) 670 continue; 671 672 if (!(data->sta_mask & old_sta_mask)) 673 continue; 674 675 WARN_ONCE(data->sta_mask != old_sta_mask, 676 "BAID data for %d corrupted - expected 0x%x found 0x%x\n", 677 baid, old_sta_mask, data->sta_mask); 678 679 cmd.modify.tid = cpu_to_le32(data->tid); 680 681 ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd); 682 if (ret) 683 return ret; 684 data->sta_mask = new_sta_mask; 685 } 686 687 return 0; 688 } 689