1*d1e879ecSMiri Korenblit // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2*d1e879ecSMiri Korenblit /* 3*d1e879ecSMiri Korenblit * Copyright (C) 2024-2025 Intel Corporation 4*d1e879ecSMiri Korenblit */ 5*d1e879ecSMiri Korenblit #include "agg.h" 6*d1e879ecSMiri Korenblit #include "sta.h" 7*d1e879ecSMiri Korenblit #include "hcmd.h" 8*d1e879ecSMiri Korenblit 9*d1e879ecSMiri Korenblit static void 10*d1e879ecSMiri Korenblit iwl_mld_reorder_release_frames(struct iwl_mld *mld, struct ieee80211_sta *sta, 11*d1e879ecSMiri Korenblit struct napi_struct *napi, 12*d1e879ecSMiri Korenblit struct iwl_mld_baid_data *baid_data, 13*d1e879ecSMiri Korenblit struct iwl_mld_reorder_buffer *reorder_buf, 14*d1e879ecSMiri Korenblit u16 nssn) 15*d1e879ecSMiri Korenblit { 16*d1e879ecSMiri Korenblit struct iwl_mld_reorder_buf_entry *entries = 17*d1e879ecSMiri Korenblit &baid_data->entries[reorder_buf->queue * 18*d1e879ecSMiri Korenblit baid_data->entries_per_queue]; 19*d1e879ecSMiri Korenblit u16 ssn = reorder_buf->head_sn; 20*d1e879ecSMiri Korenblit 21*d1e879ecSMiri Korenblit while (ieee80211_sn_less(ssn, nssn)) { 22*d1e879ecSMiri Korenblit int index = ssn % baid_data->buf_size; 23*d1e879ecSMiri Korenblit struct sk_buff_head *skb_list = &entries[index].frames; 24*d1e879ecSMiri Korenblit struct sk_buff *skb; 25*d1e879ecSMiri Korenblit 26*d1e879ecSMiri Korenblit ssn = ieee80211_sn_inc(ssn); 27*d1e879ecSMiri Korenblit 28*d1e879ecSMiri Korenblit /* Empty the list. Will have more than one frame for A-MSDU. 29*d1e879ecSMiri Korenblit * Empty list is valid as well since nssn indicates frames were 30*d1e879ecSMiri Korenblit * received. 31*d1e879ecSMiri Korenblit */ 32*d1e879ecSMiri Korenblit while ((skb = __skb_dequeue(skb_list))) { 33*d1e879ecSMiri Korenblit iwl_mld_pass_packet_to_mac80211(mld, napi, skb, 34*d1e879ecSMiri Korenblit reorder_buf->queue, 35*d1e879ecSMiri Korenblit sta); 36*d1e879ecSMiri Korenblit reorder_buf->num_stored--; 37*d1e879ecSMiri Korenblit } 38*d1e879ecSMiri Korenblit } 39*d1e879ecSMiri Korenblit reorder_buf->head_sn = nssn; 40*d1e879ecSMiri Korenblit } 41*d1e879ecSMiri Korenblit 42*d1e879ecSMiri Korenblit static void iwl_mld_release_frames_from_notif(struct iwl_mld *mld, 43*d1e879ecSMiri Korenblit struct napi_struct *napi, 44*d1e879ecSMiri Korenblit u8 baid, u16 nssn, int queue) 45*d1e879ecSMiri Korenblit { 46*d1e879ecSMiri Korenblit struct iwl_mld_reorder_buffer *reorder_buf; 47*d1e879ecSMiri Korenblit struct iwl_mld_baid_data *ba_data; 48*d1e879ecSMiri Korenblit struct ieee80211_link_sta *link_sta; 49*d1e879ecSMiri Korenblit u32 sta_id; 50*d1e879ecSMiri Korenblit 51*d1e879ecSMiri Korenblit IWL_DEBUG_HT(mld, "Frame release notification for BAID %u, NSSN %d\n", 52*d1e879ecSMiri Korenblit baid, nssn); 53*d1e879ecSMiri Korenblit 54*d1e879ecSMiri Korenblit if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID || 55*d1e879ecSMiri Korenblit baid >= ARRAY_SIZE(mld->fw_id_to_ba))) 56*d1e879ecSMiri Korenblit return; 57*d1e879ecSMiri Korenblit 58*d1e879ecSMiri Korenblit rcu_read_lock(); 59*d1e879ecSMiri Korenblit 60*d1e879ecSMiri Korenblit ba_data = rcu_dereference(mld->fw_id_to_ba[baid]); 61*d1e879ecSMiri Korenblit if (!ba_data) { 62*d1e879ecSMiri Korenblit IWL_DEBUG_HT(mld, "BAID %d not found in map\n", baid); 63*d1e879ecSMiri Korenblit goto out_unlock; 64*d1e879ecSMiri Korenblit } 65*d1e879ecSMiri Korenblit 66*d1e879ecSMiri Korenblit /* pick any STA ID to find the pointer */ 67*d1e879ecSMiri Korenblit sta_id = ffs(ba_data->sta_mask) - 1; 68*d1e879ecSMiri Korenblit link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]); 69*d1e879ecSMiri Korenblit if (WARN_ON_ONCE(IS_ERR_OR_NULL(link_sta) || !link_sta->sta)) 70*d1e879ecSMiri Korenblit goto out_unlock; 71*d1e879ecSMiri Korenblit 72*d1e879ecSMiri Korenblit reorder_buf = &ba_data->reorder_buf[queue]; 73*d1e879ecSMiri Korenblit 74*d1e879ecSMiri Korenblit iwl_mld_reorder_release_frames(mld, link_sta->sta, napi, ba_data, 75*d1e879ecSMiri Korenblit reorder_buf, nssn); 76*d1e879ecSMiri Korenblit out_unlock: 77*d1e879ecSMiri Korenblit rcu_read_unlock(); 78*d1e879ecSMiri Korenblit } 79*d1e879ecSMiri Korenblit 80*d1e879ecSMiri Korenblit void iwl_mld_handle_frame_release_notif(struct iwl_mld *mld, 81*d1e879ecSMiri Korenblit struct napi_struct *napi, 82*d1e879ecSMiri Korenblit struct iwl_rx_packet *pkt, int queue) 83*d1e879ecSMiri Korenblit { 84*d1e879ecSMiri Korenblit struct iwl_frame_release *release = (void *)pkt->data; 85*d1e879ecSMiri Korenblit u32 pkt_len = iwl_rx_packet_payload_len(pkt); 86*d1e879ecSMiri Korenblit 87*d1e879ecSMiri Korenblit if (IWL_FW_CHECK(mld, pkt_len < sizeof(*release), 88*d1e879ecSMiri Korenblit "Unexpected frame release notif size %u (expected %zu)\n", 89*d1e879ecSMiri Korenblit pkt_len, sizeof(*release))) 90*d1e879ecSMiri Korenblit return; 91*d1e879ecSMiri Korenblit 92*d1e879ecSMiri Korenblit iwl_mld_release_frames_from_notif(mld, napi, release->baid, 93*d1e879ecSMiri Korenblit le16_to_cpu(release->nssn), 94*d1e879ecSMiri Korenblit queue); 95*d1e879ecSMiri Korenblit } 96*d1e879ecSMiri Korenblit 97*d1e879ecSMiri Korenblit void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld, 98*d1e879ecSMiri Korenblit struct napi_struct *napi, 99*d1e879ecSMiri Korenblit struct iwl_rx_packet *pkt, 100*d1e879ecSMiri Korenblit int queue) 101*d1e879ecSMiri Korenblit { 102*d1e879ecSMiri Korenblit struct iwl_bar_frame_release *release = (void *)pkt->data; 103*d1e879ecSMiri Korenblit struct iwl_mld_baid_data *baid_data; 104*d1e879ecSMiri Korenblit unsigned int baid, nssn, sta_id, tid; 105*d1e879ecSMiri Korenblit u32 pkt_len = iwl_rx_packet_payload_len(pkt); 106*d1e879ecSMiri Korenblit 107*d1e879ecSMiri Korenblit if (IWL_FW_CHECK(mld, pkt_len < sizeof(*release), 108*d1e879ecSMiri Korenblit "Unexpected frame release notif size %u (expected %zu)\n", 109*d1e879ecSMiri Korenblit pkt_len, sizeof(*release))) 110*d1e879ecSMiri Korenblit return; 111*d1e879ecSMiri Korenblit 112*d1e879ecSMiri Korenblit baid = le32_get_bits(release->ba_info, 113*d1e879ecSMiri Korenblit IWL_BAR_FRAME_RELEASE_BAID_MASK); 114*d1e879ecSMiri Korenblit nssn = le32_get_bits(release->ba_info, 115*d1e879ecSMiri Korenblit IWL_BAR_FRAME_RELEASE_NSSN_MASK); 116*d1e879ecSMiri Korenblit sta_id = le32_get_bits(release->sta_tid, 117*d1e879ecSMiri Korenblit IWL_BAR_FRAME_RELEASE_STA_MASK); 118*d1e879ecSMiri Korenblit tid = le32_get_bits(release->sta_tid, 119*d1e879ecSMiri Korenblit IWL_BAR_FRAME_RELEASE_TID_MASK); 120*d1e879ecSMiri Korenblit 121*d1e879ecSMiri Korenblit if (IWL_FW_CHECK(mld, baid >= ARRAY_SIZE(mld->fw_id_to_ba), 122*d1e879ecSMiri Korenblit "BAR release: invalid BAID (%x)\n", baid)) 123*d1e879ecSMiri Korenblit return; 124*d1e879ecSMiri Korenblit 125*d1e879ecSMiri Korenblit rcu_read_lock(); 126*d1e879ecSMiri Korenblit baid_data = rcu_dereference(mld->fw_id_to_ba[baid]); 127*d1e879ecSMiri Korenblit if (!IWL_FW_CHECK(mld, !baid_data, 128*d1e879ecSMiri Korenblit "Got valid BAID %d but not allocated, invalid BAR release!\n", 129*d1e879ecSMiri Korenblit baid)) 130*d1e879ecSMiri Korenblit goto out_unlock; 131*d1e879ecSMiri Korenblit 132*d1e879ecSMiri Korenblit if (IWL_FW_CHECK(mld, tid != baid_data->tid || 133*d1e879ecSMiri Korenblit sta_id > mld->fw->ucode_capa.num_stations || 134*d1e879ecSMiri Korenblit !(baid_data->sta_mask & BIT(sta_id)), 135*d1e879ecSMiri Korenblit "BAID 0x%x is mapped to sta_mask:0x%x tid:%d, but BAR release received for sta:%d tid:%d\n", 136*d1e879ecSMiri Korenblit baid, baid_data->sta_mask, baid_data->tid, sta_id, 137*d1e879ecSMiri Korenblit tid)) 138*d1e879ecSMiri Korenblit goto out_unlock; 139*d1e879ecSMiri Korenblit 140*d1e879ecSMiri Korenblit IWL_DEBUG_DROP(mld, "Received a BAR, expect packet loss: nssn %d\n", 141*d1e879ecSMiri Korenblit nssn); 142*d1e879ecSMiri Korenblit 143*d1e879ecSMiri Korenblit iwl_mld_release_frames_from_notif(mld, napi, baid, nssn, queue); 144*d1e879ecSMiri Korenblit out_unlock: 145*d1e879ecSMiri Korenblit rcu_read_unlock(); 146*d1e879ecSMiri Korenblit } 147*d1e879ecSMiri Korenblit 148*d1e879ecSMiri Korenblit void iwl_mld_del_ba(struct iwl_mld *mld, int queue, 149*d1e879ecSMiri Korenblit struct iwl_mld_delba_data *data) 150*d1e879ecSMiri Korenblit { 151*d1e879ecSMiri Korenblit struct iwl_mld_baid_data *ba_data; 152*d1e879ecSMiri Korenblit struct iwl_mld_reorder_buffer *reorder_buf; 153*d1e879ecSMiri Korenblit struct ieee80211_link_sta *link_sta; 154*d1e879ecSMiri Korenblit u8 baid = data->baid; 155*d1e879ecSMiri Korenblit u32 sta_id; 156*d1e879ecSMiri Korenblit 157*d1e879ecSMiri Korenblit if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid)) 158*d1e879ecSMiri Korenblit return; 159*d1e879ecSMiri Korenblit 160*d1e879ecSMiri Korenblit rcu_read_lock(); 161*d1e879ecSMiri Korenblit 162*d1e879ecSMiri Korenblit ba_data = rcu_dereference(mld->fw_id_to_ba[baid]); 163*d1e879ecSMiri Korenblit if (WARN_ON_ONCE(!ba_data)) 164*d1e879ecSMiri Korenblit goto out_unlock; 165*d1e879ecSMiri Korenblit 166*d1e879ecSMiri Korenblit /* pick any STA ID to find the pointer */ 167*d1e879ecSMiri Korenblit sta_id = ffs(ba_data->sta_mask) - 1; 168*d1e879ecSMiri Korenblit link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]); 169*d1e879ecSMiri Korenblit if (WARN_ON_ONCE(IS_ERR_OR_NULL(link_sta) || !link_sta->sta)) 170*d1e879ecSMiri Korenblit goto out_unlock; 171*d1e879ecSMiri Korenblit 172*d1e879ecSMiri Korenblit reorder_buf = &ba_data->reorder_buf[queue]; 173*d1e879ecSMiri Korenblit 174*d1e879ecSMiri Korenblit /* release all frames that are in the reorder buffer to the stack */ 175*d1e879ecSMiri Korenblit iwl_mld_reorder_release_frames(mld, link_sta->sta, NULL, 176*d1e879ecSMiri Korenblit ba_data, reorder_buf, 177*d1e879ecSMiri Korenblit ieee80211_sn_add(reorder_buf->head_sn, 178*d1e879ecSMiri Korenblit ba_data->buf_size)); 179*d1e879ecSMiri Korenblit out_unlock: 180*d1e879ecSMiri Korenblit rcu_read_unlock(); 181*d1e879ecSMiri Korenblit } 182*d1e879ecSMiri Korenblit 183*d1e879ecSMiri Korenblit /* Returns true if the MPDU was buffered\dropped, false if it should be passed 184*d1e879ecSMiri Korenblit * to upper layer. 185*d1e879ecSMiri Korenblit */ 186*d1e879ecSMiri Korenblit enum iwl_mld_reorder_result 187*d1e879ecSMiri Korenblit iwl_mld_reorder(struct iwl_mld *mld, struct napi_struct *napi, 188*d1e879ecSMiri Korenblit int queue, struct ieee80211_sta *sta, 189*d1e879ecSMiri Korenblit struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc) 190*d1e879ecSMiri Korenblit { 191*d1e879ecSMiri Korenblit struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb); 192*d1e879ecSMiri Korenblit struct iwl_mld_baid_data *baid_data; 193*d1e879ecSMiri Korenblit struct iwl_mld_reorder_buffer *buffer; 194*d1e879ecSMiri Korenblit struct iwl_mld_reorder_buf_entry *entries; 195*d1e879ecSMiri Korenblit struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); 196*d1e879ecSMiri Korenblit struct iwl_mld_link_sta *mld_link_sta; 197*d1e879ecSMiri Korenblit u32 reorder = le32_to_cpu(desc->reorder_data); 198*d1e879ecSMiri Korenblit bool amsdu, last_subframe, is_old_sn, is_dup; 199*d1e879ecSMiri Korenblit u8 tid = ieee80211_get_tid(hdr); 200*d1e879ecSMiri Korenblit u8 baid; 201*d1e879ecSMiri Korenblit u16 nssn, sn; 202*d1e879ecSMiri Korenblit u32 sta_mask = 0; 203*d1e879ecSMiri Korenblit int index; 204*d1e879ecSMiri Korenblit u8 link_id; 205*d1e879ecSMiri Korenblit 206*d1e879ecSMiri Korenblit baid = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_BAID_MASK); 207*d1e879ecSMiri Korenblit 208*d1e879ecSMiri Korenblit /* This also covers the case of receiving a Block Ack Request 209*d1e879ecSMiri Korenblit * outside a BA session; we'll pass it to mac80211 and that 210*d1e879ecSMiri Korenblit * then sends a delBA action frame. 211*d1e879ecSMiri Korenblit * This also covers pure monitor mode, in which case we won't 212*d1e879ecSMiri Korenblit * have any BA sessions. 213*d1e879ecSMiri Korenblit */ 214*d1e879ecSMiri Korenblit if (baid == IWL_RX_REORDER_DATA_INVALID_BAID) 215*d1e879ecSMiri Korenblit return IWL_MLD_PASS_SKB; 216*d1e879ecSMiri Korenblit 217*d1e879ecSMiri Korenblit /* no sta yet */ 218*d1e879ecSMiri Korenblit if (WARN_ONCE(!sta, 219*d1e879ecSMiri Korenblit "Got valid BAID without a valid station assigned\n")) 220*d1e879ecSMiri Korenblit return IWL_MLD_PASS_SKB; 221*d1e879ecSMiri Korenblit 222*d1e879ecSMiri Korenblit /* not a data packet */ 223*d1e879ecSMiri Korenblit if (!ieee80211_is_data_qos(hdr->frame_control) || 224*d1e879ecSMiri Korenblit is_multicast_ether_addr(hdr->addr1)) 225*d1e879ecSMiri Korenblit return IWL_MLD_PASS_SKB; 226*d1e879ecSMiri Korenblit 227*d1e879ecSMiri Korenblit if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 228*d1e879ecSMiri Korenblit return IWL_MLD_PASS_SKB; 229*d1e879ecSMiri Korenblit 230*d1e879ecSMiri Korenblit baid_data = rcu_dereference(mld->fw_id_to_ba[baid]); 231*d1e879ecSMiri Korenblit if (!baid_data) { 232*d1e879ecSMiri Korenblit IWL_DEBUG_HT(mld, 233*d1e879ecSMiri Korenblit "Got valid BAID but no baid allocated, bypass re-ordering (BAID=%d reorder=0x%x)\n", 234*d1e879ecSMiri Korenblit baid, reorder); 235*d1e879ecSMiri Korenblit return IWL_MLD_PASS_SKB; 236*d1e879ecSMiri Korenblit } 237*d1e879ecSMiri Korenblit 238*d1e879ecSMiri Korenblit for_each_mld_link_sta(mld_sta, mld_link_sta, link_id) 239*d1e879ecSMiri Korenblit sta_mask |= BIT(mld_link_sta->fw_id); 240*d1e879ecSMiri Korenblit 241*d1e879ecSMiri Korenblit /* verify the BAID is correctly mapped to the sta and tid */ 242*d1e879ecSMiri Korenblit if (IWL_FW_CHECK(mld, 243*d1e879ecSMiri Korenblit tid != baid_data->tid || 244*d1e879ecSMiri Korenblit !(sta_mask & baid_data->sta_mask), 245*d1e879ecSMiri Korenblit "BAID 0x%x is mapped to sta_mask:0x%x tid:%d, but was received for sta_mask:0x%x tid:%d\n", 246*d1e879ecSMiri Korenblit baid, baid_data->sta_mask, baid_data->tid, 247*d1e879ecSMiri Korenblit sta_mask, tid)) 248*d1e879ecSMiri Korenblit return IWL_MLD_PASS_SKB; 249*d1e879ecSMiri Korenblit 250*d1e879ecSMiri Korenblit buffer = &baid_data->reorder_buf[queue]; 251*d1e879ecSMiri Korenblit entries = &baid_data->entries[queue * baid_data->entries_per_queue]; 252*d1e879ecSMiri Korenblit 253*d1e879ecSMiri Korenblit is_old_sn = !!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN); 254*d1e879ecSMiri Korenblit 255*d1e879ecSMiri Korenblit if (!buffer->valid && is_old_sn) 256*d1e879ecSMiri Korenblit return IWL_MLD_PASS_SKB; 257*d1e879ecSMiri Korenblit 258*d1e879ecSMiri Korenblit buffer->valid = true; 259*d1e879ecSMiri Korenblit 260*d1e879ecSMiri Korenblit is_dup = !!(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE)); 261*d1e879ecSMiri Korenblit 262*d1e879ecSMiri Korenblit /* drop any duplicated or outdated packets */ 263*d1e879ecSMiri Korenblit if (is_dup || is_old_sn) 264*d1e879ecSMiri Korenblit return IWL_MLD_DROP_SKB; 265*d1e879ecSMiri Korenblit 266*d1e879ecSMiri Korenblit sn = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_SN_MASK); 267*d1e879ecSMiri Korenblit nssn = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_NSSN_MASK); 268*d1e879ecSMiri Korenblit amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU; 269*d1e879ecSMiri Korenblit last_subframe = desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME; 270*d1e879ecSMiri Korenblit 271*d1e879ecSMiri Korenblit /* release immediately if allowed by nssn and no stored frames */ 272*d1e879ecSMiri Korenblit if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) { 273*d1e879ecSMiri Korenblit if (!amsdu || last_subframe) 274*d1e879ecSMiri Korenblit buffer->head_sn = nssn; 275*d1e879ecSMiri Korenblit return IWL_MLD_PASS_SKB; 276*d1e879ecSMiri Korenblit } 277*d1e879ecSMiri Korenblit 278*d1e879ecSMiri Korenblit /* release immediately if there are no stored frames, and the sn is 279*d1e879ecSMiri Korenblit * equal to the head. 280*d1e879ecSMiri Korenblit * This can happen due to reorder timer, where NSSN is behind head_sn. 281*d1e879ecSMiri Korenblit * When we released everything, and we got the next frame in the 282*d1e879ecSMiri Korenblit * sequence, according to the NSSN we can't release immediately, 283*d1e879ecSMiri Korenblit * while technically there is no hole and we can move forward. 284*d1e879ecSMiri Korenblit */ 285*d1e879ecSMiri Korenblit if (!buffer->num_stored && sn == buffer->head_sn) { 286*d1e879ecSMiri Korenblit if (!amsdu || last_subframe) 287*d1e879ecSMiri Korenblit buffer->head_sn = ieee80211_sn_inc(buffer->head_sn); 288*d1e879ecSMiri Korenblit return IWL_MLD_PASS_SKB; 289*d1e879ecSMiri Korenblit } 290*d1e879ecSMiri Korenblit 291*d1e879ecSMiri Korenblit /* put in reorder buffer */ 292*d1e879ecSMiri Korenblit index = sn % baid_data->buf_size; 293*d1e879ecSMiri Korenblit __skb_queue_tail(&entries[index].frames, skb); 294*d1e879ecSMiri Korenblit buffer->num_stored++; 295*d1e879ecSMiri Korenblit 296*d1e879ecSMiri Korenblit /* We cannot trust NSSN for AMSDU sub-frames that are not the last. The 297*d1e879ecSMiri Korenblit * reason is that NSSN advances on the first sub-frame, and may cause 298*d1e879ecSMiri Korenblit * the reorder buffer to advance before all the sub-frames arrive. 299*d1e879ecSMiri Korenblit * 300*d1e879ecSMiri Korenblit * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with 301*d1e879ecSMiri Korenblit * SN 1. NSSN for first sub frame will be 3 with the result of driver 302*d1e879ecSMiri Korenblit * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is 303*d1e879ecSMiri Korenblit * already ahead and it will be dropped. 304*d1e879ecSMiri Korenblit * If the last sub-frame is not on this queue - we will get frame 305*d1e879ecSMiri Korenblit * release notification with up to date NSSN. 306*d1e879ecSMiri Korenblit */ 307*d1e879ecSMiri Korenblit if (!amsdu || last_subframe) 308*d1e879ecSMiri Korenblit iwl_mld_reorder_release_frames(mld, sta, napi, baid_data, 309*d1e879ecSMiri Korenblit buffer, nssn); 310*d1e879ecSMiri Korenblit 311*d1e879ecSMiri Korenblit return IWL_MLD_BUFFERED_SKB; 312*d1e879ecSMiri Korenblit } 313*d1e879ecSMiri Korenblit EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_reorder); 314*d1e879ecSMiri Korenblit 315*d1e879ecSMiri Korenblit static void iwl_mld_rx_agg_session_expired(struct timer_list *t) 316*d1e879ecSMiri Korenblit { 317*d1e879ecSMiri Korenblit struct iwl_mld_baid_data *data = 318*d1e879ecSMiri Korenblit from_timer(data, t, session_timer); 319*d1e879ecSMiri Korenblit struct iwl_mld_baid_data __rcu **rcu_ptr = data->rcu_ptr; 320*d1e879ecSMiri Korenblit struct iwl_mld_baid_data *ba_data; 321*d1e879ecSMiri Korenblit struct ieee80211_link_sta *link_sta; 322*d1e879ecSMiri Korenblit struct iwl_mld_sta *mld_sta; 323*d1e879ecSMiri Korenblit unsigned long timeout; 324*d1e879ecSMiri Korenblit unsigned int sta_id; 325*d1e879ecSMiri Korenblit 326*d1e879ecSMiri Korenblit rcu_read_lock(); 327*d1e879ecSMiri Korenblit 328*d1e879ecSMiri Korenblit ba_data = rcu_dereference(*rcu_ptr); 329*d1e879ecSMiri Korenblit if (WARN_ON(!ba_data)) 330*d1e879ecSMiri Korenblit goto unlock; 331*d1e879ecSMiri Korenblit 332*d1e879ecSMiri Korenblit if (WARN_ON(!ba_data->timeout)) 333*d1e879ecSMiri Korenblit goto unlock; 334*d1e879ecSMiri Korenblit 335*d1e879ecSMiri Korenblit timeout = ba_data->last_rx_timestamp + 336*d1e879ecSMiri Korenblit TU_TO_JIFFIES(ba_data->timeout * 2); 337*d1e879ecSMiri Korenblit if (time_is_after_jiffies(timeout)) { 338*d1e879ecSMiri Korenblit mod_timer(&ba_data->session_timer, timeout); 339*d1e879ecSMiri Korenblit goto unlock; 340*d1e879ecSMiri Korenblit } 341*d1e879ecSMiri Korenblit 342*d1e879ecSMiri Korenblit /* timer expired, pick any STA ID to find the pointer */ 343*d1e879ecSMiri Korenblit sta_id = ffs(ba_data->sta_mask) - 1; 344*d1e879ecSMiri Korenblit link_sta = rcu_dereference(ba_data->mld->fw_id_to_link_sta[sta_id]); 345*d1e879ecSMiri Korenblit 346*d1e879ecSMiri Korenblit /* sta should be valid unless the following happens: 347*d1e879ecSMiri Korenblit * The firmware asserts which triggers a reconfig flow, but 348*d1e879ecSMiri Korenblit * the reconfig fails before we set the pointer to sta into 349*d1e879ecSMiri Korenblit * the fw_id_to_link_sta pointer table. mac80211 can't stop 350*d1e879ecSMiri Korenblit * A-MPDU and hence the timer continues to run. Then, the 351*d1e879ecSMiri Korenblit * timer expires and sta is NULL. 352*d1e879ecSMiri Korenblit */ 353*d1e879ecSMiri Korenblit if (IS_ERR_OR_NULL(link_sta) || WARN_ON(!link_sta->sta)) 354*d1e879ecSMiri Korenblit goto unlock; 355*d1e879ecSMiri Korenblit 356*d1e879ecSMiri Korenblit mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta); 357*d1e879ecSMiri Korenblit ieee80211_rx_ba_timer_expired(mld_sta->vif, link_sta->sta->addr, 358*d1e879ecSMiri Korenblit ba_data->tid); 359*d1e879ecSMiri Korenblit unlock: 360*d1e879ecSMiri Korenblit rcu_read_unlock(); 361*d1e879ecSMiri Korenblit } 362*d1e879ecSMiri Korenblit 363*d1e879ecSMiri Korenblit static int 364*d1e879ecSMiri Korenblit iwl_mld_stop_ba_in_fw(struct iwl_mld *mld, struct ieee80211_sta *sta, int tid) 365*d1e879ecSMiri Korenblit { 366*d1e879ecSMiri Korenblit struct iwl_rx_baid_cfg_cmd cmd = { 367*d1e879ecSMiri Korenblit .action = cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE), 368*d1e879ecSMiri Korenblit .remove.sta_id_mask = 369*d1e879ecSMiri Korenblit cpu_to_le32(iwl_mld_fw_sta_id_mask(mld, sta)), 370*d1e879ecSMiri Korenblit .remove.tid = cpu_to_le32(tid), 371*d1e879ecSMiri Korenblit 372*d1e879ecSMiri Korenblit }; 373*d1e879ecSMiri Korenblit int ret; 374*d1e879ecSMiri Korenblit 375*d1e879ecSMiri Korenblit ret = iwl_mld_send_cmd_pdu(mld, 376*d1e879ecSMiri Korenblit WIDE_ID(DATA_PATH_GROUP, 377*d1e879ecSMiri Korenblit RX_BAID_ALLOCATION_CONFIG_CMD), 378*d1e879ecSMiri Korenblit &cmd); 379*d1e879ecSMiri Korenblit if (ret) 380*d1e879ecSMiri Korenblit return ret; 381*d1e879ecSMiri Korenblit 382*d1e879ecSMiri Korenblit IWL_DEBUG_HT(mld, "RX BA Session stopped in fw\n"); 383*d1e879ecSMiri Korenblit 384*d1e879ecSMiri Korenblit return ret; 385*d1e879ecSMiri Korenblit } 386*d1e879ecSMiri Korenblit 387*d1e879ecSMiri Korenblit static int 388*d1e879ecSMiri Korenblit iwl_mld_start_ba_in_fw(struct iwl_mld *mld, struct ieee80211_sta *sta, 389*d1e879ecSMiri Korenblit int tid, u16 ssn, u16 buf_size) 390*d1e879ecSMiri Korenblit { 391*d1e879ecSMiri Korenblit struct iwl_rx_baid_cfg_cmd cmd = { 392*d1e879ecSMiri Korenblit .action = cpu_to_le32(IWL_RX_BAID_ACTION_ADD), 393*d1e879ecSMiri Korenblit .alloc.sta_id_mask = 394*d1e879ecSMiri Korenblit cpu_to_le32(iwl_mld_fw_sta_id_mask(mld, sta)), 395*d1e879ecSMiri Korenblit .alloc.tid = tid, 396*d1e879ecSMiri Korenblit .alloc.ssn = cpu_to_le16(ssn), 397*d1e879ecSMiri Korenblit .alloc.win_size = cpu_to_le16(buf_size), 398*d1e879ecSMiri Korenblit }; 399*d1e879ecSMiri Korenblit struct iwl_host_cmd hcmd = { 400*d1e879ecSMiri Korenblit .id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD), 401*d1e879ecSMiri Korenblit .flags = CMD_WANT_SKB, 402*d1e879ecSMiri Korenblit .len[0] = sizeof(cmd), 403*d1e879ecSMiri Korenblit .data[0] = &cmd, 404*d1e879ecSMiri Korenblit }; 405*d1e879ecSMiri Korenblit struct iwl_rx_baid_cfg_resp *resp; 406*d1e879ecSMiri Korenblit struct iwl_rx_packet *pkt; 407*d1e879ecSMiri Korenblit u32 resp_len; 408*d1e879ecSMiri Korenblit int ret, baid; 409*d1e879ecSMiri Korenblit 410*d1e879ecSMiri Korenblit BUILD_BUG_ON(sizeof(*resp) != sizeof(baid)); 411*d1e879ecSMiri Korenblit 412*d1e879ecSMiri Korenblit ret = iwl_mld_send_cmd(mld, &hcmd); 413*d1e879ecSMiri Korenblit if (ret) 414*d1e879ecSMiri Korenblit return ret; 415*d1e879ecSMiri Korenblit 416*d1e879ecSMiri Korenblit pkt = hcmd.resp_pkt; 417*d1e879ecSMiri Korenblit 418*d1e879ecSMiri Korenblit resp_len = iwl_rx_packet_payload_len(pkt); 419*d1e879ecSMiri Korenblit if (IWL_FW_CHECK(mld, resp_len != sizeof(*resp), 420*d1e879ecSMiri Korenblit "BAID_ALLOC_CMD: unexpected response length %d\n", 421*d1e879ecSMiri Korenblit resp_len)) { 422*d1e879ecSMiri Korenblit ret = -EIO; 423*d1e879ecSMiri Korenblit goto out; 424*d1e879ecSMiri Korenblit } 425*d1e879ecSMiri Korenblit 426*d1e879ecSMiri Korenblit IWL_DEBUG_HT(mld, "RX BA Session started in fw\n"); 427*d1e879ecSMiri Korenblit 428*d1e879ecSMiri Korenblit resp = (void *)pkt->data; 429*d1e879ecSMiri Korenblit baid = le32_to_cpu(resp->baid); 430*d1e879ecSMiri Korenblit 431*d1e879ecSMiri Korenblit if (IWL_FW_CHECK(mld, baid < 0 || baid >= ARRAY_SIZE(mld->fw_id_to_ba), 432*d1e879ecSMiri Korenblit "BAID_ALLOC_CMD: invalid BAID response %d\n", baid)) { 433*d1e879ecSMiri Korenblit ret = -EINVAL; 434*d1e879ecSMiri Korenblit goto out; 435*d1e879ecSMiri Korenblit } 436*d1e879ecSMiri Korenblit 437*d1e879ecSMiri Korenblit ret = baid; 438*d1e879ecSMiri Korenblit out: 439*d1e879ecSMiri Korenblit iwl_free_resp(&hcmd); 440*d1e879ecSMiri Korenblit return ret; 441*d1e879ecSMiri Korenblit } 442*d1e879ecSMiri Korenblit 443*d1e879ecSMiri Korenblit static void iwl_mld_init_reorder_buffer(struct iwl_mld *mld, 444*d1e879ecSMiri Korenblit struct iwl_mld_baid_data *data, 445*d1e879ecSMiri Korenblit u16 ssn) 446*d1e879ecSMiri Korenblit { 447*d1e879ecSMiri Korenblit for (int i = 0; i < mld->trans->num_rx_queues; i++) { 448*d1e879ecSMiri Korenblit struct iwl_mld_reorder_buffer *reorder_buf = 449*d1e879ecSMiri Korenblit &data->reorder_buf[i]; 450*d1e879ecSMiri Korenblit struct iwl_mld_reorder_buf_entry *entries = 451*d1e879ecSMiri Korenblit &data->entries[i * data->entries_per_queue]; 452*d1e879ecSMiri Korenblit 453*d1e879ecSMiri Korenblit reorder_buf->head_sn = ssn; 454*d1e879ecSMiri Korenblit reorder_buf->queue = i; 455*d1e879ecSMiri Korenblit 456*d1e879ecSMiri Korenblit for (int j = 0; j < data->buf_size; j++) 457*d1e879ecSMiri Korenblit __skb_queue_head_init(&entries[j].frames); 458*d1e879ecSMiri Korenblit } 459*d1e879ecSMiri Korenblit } 460*d1e879ecSMiri Korenblit 461*d1e879ecSMiri Korenblit static void iwl_mld_free_reorder_buffer(struct iwl_mld *mld, 462*d1e879ecSMiri Korenblit struct iwl_mld_baid_data *data) 463*d1e879ecSMiri Korenblit { 464*d1e879ecSMiri Korenblit struct iwl_mld_delba_data delba_data = { 465*d1e879ecSMiri Korenblit .baid = data->baid, 466*d1e879ecSMiri Korenblit }; 467*d1e879ecSMiri Korenblit 468*d1e879ecSMiri Korenblit iwl_mld_sync_rx_queues(mld, IWL_MLD_RXQ_NOTIF_DEL_BA, 469*d1e879ecSMiri Korenblit &delba_data, sizeof(delba_data)); 470*d1e879ecSMiri Korenblit 471*d1e879ecSMiri Korenblit for (int i = 0; i < mld->trans->num_rx_queues; i++) { 472*d1e879ecSMiri Korenblit struct iwl_mld_reorder_buffer *reorder_buf = 473*d1e879ecSMiri Korenblit &data->reorder_buf[i]; 474*d1e879ecSMiri Korenblit struct iwl_mld_reorder_buf_entry *entries = 475*d1e879ecSMiri Korenblit &data->entries[i * data->entries_per_queue]; 476*d1e879ecSMiri Korenblit 477*d1e879ecSMiri Korenblit if (likely(!reorder_buf->num_stored)) 478*d1e879ecSMiri Korenblit continue; 479*d1e879ecSMiri Korenblit 480*d1e879ecSMiri Korenblit /* This shouldn't happen in regular DELBA since the RX queues 481*d1e879ecSMiri Korenblit * sync internal DELBA notification should trigger a release 482*d1e879ecSMiri Korenblit * of all frames in the reorder buffer. 483*d1e879ecSMiri Korenblit */ 484*d1e879ecSMiri Korenblit WARN_ON(1); 485*d1e879ecSMiri Korenblit 486*d1e879ecSMiri Korenblit for (int j = 0; j < data->buf_size; j++) 487*d1e879ecSMiri Korenblit __skb_queue_purge(&entries[j].frames); 488*d1e879ecSMiri Korenblit } 489*d1e879ecSMiri Korenblit } 490*d1e879ecSMiri Korenblit 491*d1e879ecSMiri Korenblit int iwl_mld_ampdu_rx_start(struct iwl_mld *mld, struct ieee80211_sta *sta, 492*d1e879ecSMiri Korenblit int tid, u16 ssn, u16 buf_size, u16 timeout) 493*d1e879ecSMiri Korenblit { 494*d1e879ecSMiri Korenblit struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); 495*d1e879ecSMiri Korenblit struct iwl_mld_baid_data *baid_data = NULL; 496*d1e879ecSMiri Korenblit u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); 497*d1e879ecSMiri Korenblit int ret, baid; 498*d1e879ecSMiri Korenblit u32 sta_mask; 499*d1e879ecSMiri Korenblit 500*d1e879ecSMiri Korenblit lockdep_assert_wiphy(mld->wiphy); 501*d1e879ecSMiri Korenblit 502*d1e879ecSMiri Korenblit if (mld->num_rx_ba_sessions >= IWL_MAX_BAID) { 503*d1e879ecSMiri Korenblit IWL_DEBUG_HT(mld, 504*d1e879ecSMiri Korenblit "Max num of RX BA sessions reached; blocking new session\n"); 505*d1e879ecSMiri Korenblit return -ENOSPC; 506*d1e879ecSMiri Korenblit } 507*d1e879ecSMiri Korenblit 508*d1e879ecSMiri Korenblit sta_mask = iwl_mld_fw_sta_id_mask(mld, sta); 509*d1e879ecSMiri Korenblit if (WARN_ON(!sta_mask)) 510*d1e879ecSMiri Korenblit return -EINVAL; 511*d1e879ecSMiri Korenblit 512*d1e879ecSMiri Korenblit /* sparse doesn't like the __align() so don't check */ 513*d1e879ecSMiri Korenblit #ifndef __CHECKER__ 514*d1e879ecSMiri Korenblit /* The division below will be OK if either the cache line size 515*d1e879ecSMiri Korenblit * can be divided by the entry size (ALIGN will round up) or if 516*d1e879ecSMiri Korenblit * the entry size can be divided by the cache line size, in which 517*d1e879ecSMiri Korenblit * case the ALIGN() will do nothing. 518*d1e879ecSMiri Korenblit */ 519*d1e879ecSMiri Korenblit BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) && 520*d1e879ecSMiri Korenblit sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES); 521*d1e879ecSMiri Korenblit #endif 522*d1e879ecSMiri Korenblit 523*d1e879ecSMiri Korenblit /* Upward align the reorder buffer size to fill an entire cache 524*d1e879ecSMiri Korenblit * line for each queue, to avoid sharing cache lines between 525*d1e879ecSMiri Korenblit * different queues. 526*d1e879ecSMiri Korenblit */ 527*d1e879ecSMiri Korenblit reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES); 528*d1e879ecSMiri Korenblit 529*d1e879ecSMiri Korenblit /* Allocate here so if allocation fails we can bail out early 530*d1e879ecSMiri Korenblit * before starting the BA session in the firmware 531*d1e879ecSMiri Korenblit */ 532*d1e879ecSMiri Korenblit baid_data = kzalloc(sizeof(*baid_data) + 533*d1e879ecSMiri Korenblit mld->trans->num_rx_queues * reorder_buf_size, 534*d1e879ecSMiri Korenblit GFP_KERNEL); 535*d1e879ecSMiri Korenblit if (!baid_data) 536*d1e879ecSMiri Korenblit return -ENOMEM; 537*d1e879ecSMiri Korenblit 538*d1e879ecSMiri Korenblit /* This division is why we need the above BUILD_BUG_ON(), 539*d1e879ecSMiri Korenblit * if that doesn't hold then this will not be right. 540*d1e879ecSMiri Korenblit */ 541*d1e879ecSMiri Korenblit baid_data->entries_per_queue = 542*d1e879ecSMiri Korenblit reorder_buf_size / sizeof(baid_data->entries[0]); 543*d1e879ecSMiri Korenblit 544*d1e879ecSMiri Korenblit baid = iwl_mld_start_ba_in_fw(mld, sta, tid, ssn, buf_size); 545*d1e879ecSMiri Korenblit if (baid < 0) { 546*d1e879ecSMiri Korenblit ret = baid; 547*d1e879ecSMiri Korenblit goto out_free; 548*d1e879ecSMiri Korenblit } 549*d1e879ecSMiri Korenblit 550*d1e879ecSMiri Korenblit mld->num_rx_ba_sessions++; 551*d1e879ecSMiri Korenblit mld_sta->tid_to_baid[tid] = baid; 552*d1e879ecSMiri Korenblit 553*d1e879ecSMiri Korenblit baid_data->baid = baid; 554*d1e879ecSMiri Korenblit baid_data->mld = mld; 555*d1e879ecSMiri Korenblit baid_data->tid = tid; 556*d1e879ecSMiri Korenblit baid_data->buf_size = buf_size; 557*d1e879ecSMiri Korenblit baid_data->sta_mask = sta_mask; 558*d1e879ecSMiri Korenblit baid_data->timeout = timeout; 559*d1e879ecSMiri Korenblit baid_data->last_rx_timestamp = jiffies; 560*d1e879ecSMiri Korenblit baid_data->rcu_ptr = &mld->fw_id_to_ba[baid]; 561*d1e879ecSMiri Korenblit 562*d1e879ecSMiri Korenblit iwl_mld_init_reorder_buffer(mld, baid_data, ssn); 563*d1e879ecSMiri Korenblit 564*d1e879ecSMiri Korenblit timer_setup(&baid_data->session_timer, iwl_mld_rx_agg_session_expired, 565*d1e879ecSMiri Korenblit 0); 566*d1e879ecSMiri Korenblit if (timeout) 567*d1e879ecSMiri Korenblit mod_timer(&baid_data->session_timer, 568*d1e879ecSMiri Korenblit TU_TO_EXP_TIME(timeout * 2)); 569*d1e879ecSMiri Korenblit 570*d1e879ecSMiri Korenblit IWL_DEBUG_HT(mld, "STA mask=0x%x (tid=%d) is assigned to BAID %d\n", 571*d1e879ecSMiri Korenblit baid_data->sta_mask, tid, baid); 572*d1e879ecSMiri Korenblit 573*d1e879ecSMiri Korenblit /* protect the BA data with RCU to cover a case where our 574*d1e879ecSMiri Korenblit * internal RX sync mechanism will timeout (not that it's 575*d1e879ecSMiri Korenblit * supposed to happen) and we will free the session data while 576*d1e879ecSMiri Korenblit * RX is being processed in parallel 577*d1e879ecSMiri Korenblit */ 578*d1e879ecSMiri Korenblit WARN_ON(rcu_access_pointer(mld->fw_id_to_ba[baid])); 579*d1e879ecSMiri Korenblit rcu_assign_pointer(mld->fw_id_to_ba[baid], baid_data); 580*d1e879ecSMiri Korenblit 581*d1e879ecSMiri Korenblit return 0; 582*d1e879ecSMiri Korenblit 583*d1e879ecSMiri Korenblit out_free: 584*d1e879ecSMiri Korenblit kfree(baid_data); 585*d1e879ecSMiri Korenblit return ret; 586*d1e879ecSMiri Korenblit } 587*d1e879ecSMiri Korenblit 588*d1e879ecSMiri Korenblit int iwl_mld_ampdu_rx_stop(struct iwl_mld *mld, struct ieee80211_sta *sta, 589*d1e879ecSMiri Korenblit int tid) 590*d1e879ecSMiri Korenblit { 591*d1e879ecSMiri Korenblit struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); 592*d1e879ecSMiri Korenblit int baid = mld_sta->tid_to_baid[tid]; 593*d1e879ecSMiri Korenblit struct iwl_mld_baid_data *baid_data; 594*d1e879ecSMiri Korenblit int ret; 595*d1e879ecSMiri Korenblit 596*d1e879ecSMiri Korenblit lockdep_assert_wiphy(mld->wiphy); 597*d1e879ecSMiri Korenblit 598*d1e879ecSMiri Korenblit /* during firmware restart, do not send the command as the firmware no 599*d1e879ecSMiri Korenblit * longer recognizes the session. instead, only clear the driver BA 600*d1e879ecSMiri Korenblit * session data. 601*d1e879ecSMiri Korenblit */ 602*d1e879ecSMiri Korenblit if (!mld->fw_status.in_hw_restart) { 603*d1e879ecSMiri Korenblit ret = iwl_mld_stop_ba_in_fw(mld, sta, tid); 604*d1e879ecSMiri Korenblit if (ret) 605*d1e879ecSMiri Korenblit return ret; 606*d1e879ecSMiri Korenblit } 607*d1e879ecSMiri Korenblit 608*d1e879ecSMiri Korenblit if (!WARN_ON(mld->num_rx_ba_sessions == 0)) 609*d1e879ecSMiri Korenblit mld->num_rx_ba_sessions--; 610*d1e879ecSMiri Korenblit 611*d1e879ecSMiri Korenblit baid_data = wiphy_dereference(mld->wiphy, mld->fw_id_to_ba[baid]); 612*d1e879ecSMiri Korenblit if (WARN_ON(!baid_data)) 613*d1e879ecSMiri Korenblit return -EINVAL; 614*d1e879ecSMiri Korenblit 615*d1e879ecSMiri Korenblit if (timer_pending(&baid_data->session_timer)) 616*d1e879ecSMiri Korenblit timer_shutdown_sync(&baid_data->session_timer); 617*d1e879ecSMiri Korenblit 618*d1e879ecSMiri Korenblit iwl_mld_free_reorder_buffer(mld, baid_data); 619*d1e879ecSMiri Korenblit 620*d1e879ecSMiri Korenblit RCU_INIT_POINTER(mld->fw_id_to_ba[baid], NULL); 621*d1e879ecSMiri Korenblit kfree_rcu(baid_data, rcu_head); 622*d1e879ecSMiri Korenblit 623*d1e879ecSMiri Korenblit IWL_DEBUG_HT(mld, "BAID %d is free\n", baid); 624*d1e879ecSMiri Korenblit 625*d1e879ecSMiri Korenblit return 0; 626*d1e879ecSMiri Korenblit } 627*d1e879ecSMiri Korenblit 628*d1e879ecSMiri Korenblit int iwl_mld_update_sta_baids(struct iwl_mld *mld, 629*d1e879ecSMiri Korenblit u32 old_sta_mask, 630*d1e879ecSMiri Korenblit u32 new_sta_mask) 631*d1e879ecSMiri Korenblit { 632*d1e879ecSMiri Korenblit struct iwl_rx_baid_cfg_cmd cmd = { 633*d1e879ecSMiri Korenblit .action = cpu_to_le32(IWL_RX_BAID_ACTION_MODIFY), 634*d1e879ecSMiri Korenblit .modify.old_sta_id_mask = cpu_to_le32(old_sta_mask), 635*d1e879ecSMiri Korenblit .modify.new_sta_id_mask = cpu_to_le32(new_sta_mask), 636*d1e879ecSMiri Korenblit }; 637*d1e879ecSMiri Korenblit u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD); 638*d1e879ecSMiri Korenblit int baid; 639*d1e879ecSMiri Korenblit 640*d1e879ecSMiri Korenblit /* mac80211 will remove sessions later, but we ignore all that */ 641*d1e879ecSMiri Korenblit if (mld->fw_status.in_hw_restart) 642*d1e879ecSMiri Korenblit return 0; 643*d1e879ecSMiri Korenblit 644*d1e879ecSMiri Korenblit BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid)); 645*d1e879ecSMiri Korenblit 646*d1e879ecSMiri Korenblit for (baid = 0; baid < ARRAY_SIZE(mld->fw_id_to_ba); baid++) { 647*d1e879ecSMiri Korenblit struct iwl_mld_baid_data *data; 648*d1e879ecSMiri Korenblit int ret; 649*d1e879ecSMiri Korenblit 650*d1e879ecSMiri Korenblit data = wiphy_dereference(mld->wiphy, mld->fw_id_to_ba[baid]); 651*d1e879ecSMiri Korenblit if (!data) 652*d1e879ecSMiri Korenblit continue; 653*d1e879ecSMiri Korenblit 654*d1e879ecSMiri Korenblit if (!(data->sta_mask & old_sta_mask)) 655*d1e879ecSMiri Korenblit continue; 656*d1e879ecSMiri Korenblit 657*d1e879ecSMiri Korenblit WARN_ONCE(data->sta_mask != old_sta_mask, 658*d1e879ecSMiri Korenblit "BAID data for %d corrupted - expected 0x%x found 0x%x\n", 659*d1e879ecSMiri Korenblit baid, old_sta_mask, data->sta_mask); 660*d1e879ecSMiri Korenblit 661*d1e879ecSMiri Korenblit cmd.modify.tid = cpu_to_le32(data->tid); 662*d1e879ecSMiri Korenblit 663*d1e879ecSMiri Korenblit ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd); 664*d1e879ecSMiri Korenblit if (ret) 665*d1e879ecSMiri Korenblit return ret; 666*d1e879ecSMiri Korenblit data->sta_mask = new_sta_mask; 667*d1e879ecSMiri Korenblit } 668*d1e879ecSMiri Korenblit 669*d1e879ecSMiri Korenblit return 0; 670*d1e879ecSMiri Korenblit } 671