1bfcc09ddSBjoern A. Zeeb /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2bfcc09ddSBjoern A. Zeeb /* 3*a4128aadSBjoern A. Zeeb * Copyright (C) 2012-2014, 2018-2024 Intel Corporation 4bfcc09ddSBjoern A. Zeeb * Copyright (C) 2013-2014 Intel Mobile Communications GmbH 5bfcc09ddSBjoern A. Zeeb * Copyright (C) 2015-2016 Intel Deutschland GmbH 6bfcc09ddSBjoern A. Zeeb */ 7bfcc09ddSBjoern A. Zeeb #ifndef __sta_h__ 8bfcc09ddSBjoern A. Zeeb #define __sta_h__ 9bfcc09ddSBjoern A. Zeeb 10bfcc09ddSBjoern A. Zeeb #include <linux/spinlock.h> 11bfcc09ddSBjoern A. Zeeb #include <net/mac80211.h> 12bfcc09ddSBjoern A. Zeeb #include <linux/wait.h> 13bfcc09ddSBjoern A. Zeeb 14bfcc09ddSBjoern A. Zeeb #include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */ 15bfcc09ddSBjoern A. Zeeb #include "fw-api.h" /* IWL_MVM_STATION_COUNT_MAX */ 16bfcc09ddSBjoern A. Zeeb #include "rs.h" 17bfcc09ddSBjoern A. Zeeb 18bfcc09ddSBjoern A. Zeeb struct iwl_mvm; 19bfcc09ddSBjoern A. Zeeb struct iwl_mvm_vif; 20bfcc09ddSBjoern A. Zeeb 21bfcc09ddSBjoern A. Zeeb /** 22bfcc09ddSBjoern A. Zeeb * DOC: DQA - Dynamic Queue Allocation -introduction 23bfcc09ddSBjoern A. Zeeb * 24bfcc09ddSBjoern A. Zeeb * Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi 25bfcc09ddSBjoern A. Zeeb * driver to allow dynamic allocation of queues on-demand, rather than allocate 26bfcc09ddSBjoern A. Zeeb * them statically ahead of time. Ideally, we would like to allocate one queue 27bfcc09ddSBjoern A. Zeeb * per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2 28bfcc09ddSBjoern A. Zeeb * even if it also needs to send traffic to a sleeping STA1, without being 29bfcc09ddSBjoern A. Zeeb * blocked by the sleeping station. 30bfcc09ddSBjoern A. Zeeb * 31bfcc09ddSBjoern A. Zeeb * Although the queues in DQA mode are dynamically allocated, there are still 32bfcc09ddSBjoern A. Zeeb * some queues that are statically allocated: 33bfcc09ddSBjoern A. Zeeb * TXQ #0 - command queue 34bfcc09ddSBjoern A. Zeeb * TXQ #1 - aux frames 35bfcc09ddSBjoern A. Zeeb * TXQ #2 - P2P device frames 36bfcc09ddSBjoern A. Zeeb * TXQ #3 - P2P GO/SoftAP GCAST/BCAST frames 37bfcc09ddSBjoern A. Zeeb * TXQ #4 - BSS DATA frames queue 38bfcc09ddSBjoern A. Zeeb * TXQ #5-8 - Non-QoS and MGMT frames queue pool 39bfcc09ddSBjoern A. Zeeb * TXQ #9 - P2P GO/SoftAP probe responses 40bfcc09ddSBjoern A. Zeeb * TXQ #10-31 - DATA frames queue pool 41bfcc09ddSBjoern A. Zeeb * The queues are dynamically taken from either the MGMT frames queue pool or 42bfcc09ddSBjoern A. Zeeb * the DATA frames one. See the %iwl_mvm_dqa_txq for more information on every 43bfcc09ddSBjoern A. Zeeb * queue. 44bfcc09ddSBjoern A. Zeeb * 45bfcc09ddSBjoern A. Zeeb * When a frame for a previously unseen RA/TID comes in, it needs to be deferred 46bfcc09ddSBjoern A. Zeeb * until a queue is allocated for it, and only then can be TXed. Therefore, it 47bfcc09ddSBjoern A. Zeeb * is placed into %iwl_mvm_tid_data.deferred_tx_frames, and a worker called 48bfcc09ddSBjoern A. Zeeb * %mvm->add_stream_wk later allocates the queues and TXes the deferred frames. 49bfcc09ddSBjoern A. Zeeb * 50bfcc09ddSBjoern A. Zeeb * For convenience, MGMT is considered as if it has TID=8, and go to the MGMT 51bfcc09ddSBjoern A. Zeeb * queues in the pool. If there is no longer a free MGMT queue to allocate, a 52bfcc09ddSBjoern A. Zeeb * queue will be allocated from the DATA pool instead. Since QoS NDPs can create 53bfcc09ddSBjoern A. Zeeb * a problem for aggregations, they too will use a MGMT queue. 54bfcc09ddSBjoern A. Zeeb * 55bfcc09ddSBjoern A. Zeeb * When adding a STA, a DATA queue is reserved for it so that it can TX from 56bfcc09ddSBjoern A. Zeeb * it. If no such free queue exists for reserving, the STA addition will fail. 57bfcc09ddSBjoern A. Zeeb * 58bfcc09ddSBjoern A. Zeeb * If the DATA queue pool gets exhausted, no new STA will be accepted, and if a 59bfcc09ddSBjoern A. Zeeb * new RA/TID comes in for an existing STA, one of the STA's queues will become 60bfcc09ddSBjoern A. Zeeb * shared and will serve more than the single TID (but always for the same RA!). 61bfcc09ddSBjoern A. Zeeb * 62bfcc09ddSBjoern A. Zeeb * When a RA/TID needs to become aggregated, no new queue is required to be 63bfcc09ddSBjoern A. Zeeb * allocated, only mark the queue as aggregated via the ADD_STA command. Note, 64bfcc09ddSBjoern A. Zeeb * however, that a shared queue cannot be aggregated, and only after the other 65bfcc09ddSBjoern A. Zeeb * TIDs become inactive and are removed - only then can the queue be 66bfcc09ddSBjoern A. Zeeb * reconfigured and become aggregated. 67bfcc09ddSBjoern A. Zeeb * 68bfcc09ddSBjoern A. Zeeb * When removing a station, its queues are returned to the pool for reuse. Here 69bfcc09ddSBjoern A. Zeeb * we also need to make sure that we are synced with the worker thread that TXes 70bfcc09ddSBjoern A. Zeeb * the deferred frames so we don't get into a situation where the queues are 71bfcc09ddSBjoern A. Zeeb * removed and then the worker puts deferred frames onto the released queues or 72bfcc09ddSBjoern A. Zeeb * tries to allocate new queues for a STA we don't need anymore. 73bfcc09ddSBjoern A. Zeeb */ 74bfcc09ddSBjoern A. Zeeb 75bfcc09ddSBjoern A. Zeeb /** 76bfcc09ddSBjoern A. Zeeb * DOC: station table - introduction 77bfcc09ddSBjoern A. Zeeb * 78bfcc09ddSBjoern A. Zeeb * The station table is a list of data structure that reprensent the stations. 79bfcc09ddSBjoern A. Zeeb * In STA/P2P client mode, the driver will hold one station for the AP/ GO. 80bfcc09ddSBjoern A. Zeeb * In GO/AP mode, the driver will have as many stations as associated clients. 81bfcc09ddSBjoern A. Zeeb * All these stations are reflected in the fw's station table. The driver 82bfcc09ddSBjoern A. Zeeb * keeps the fw's station table up to date with the ADD_STA command. Stations 83bfcc09ddSBjoern A. Zeeb * can be removed by the REMOVE_STA command. 84bfcc09ddSBjoern A. Zeeb * 85bfcc09ddSBjoern A. Zeeb * All the data related to a station is held in the structure %iwl_mvm_sta 86bfcc09ddSBjoern A. Zeeb * which is embed in the mac80211's %ieee80211_sta (in the drv_priv) area. 87bfcc09ddSBjoern A. Zeeb * This data includes the index of the station in the fw, per tid information 88bfcc09ddSBjoern A. Zeeb * (sequence numbers, Block-ack state machine, etc...). The stations are 89bfcc09ddSBjoern A. Zeeb * created and deleted by the %sta_state callback from %ieee80211_ops. 90bfcc09ddSBjoern A. Zeeb * 91bfcc09ddSBjoern A. Zeeb * The driver holds a map: %fw_id_to_mac_id that allows to fetch a 92bfcc09ddSBjoern A. Zeeb * %ieee80211_sta (and the %iwl_mvm_sta embedded into it) based on a fw 93bfcc09ddSBjoern A. Zeeb * station index. That way, the driver is able to get the tid related data in 94bfcc09ddSBjoern A. Zeeb * O(1) in time sensitive paths (Tx / Tx response / BA notification). These 95bfcc09ddSBjoern A. Zeeb * paths are triggered by the fw, and the driver needs to get a pointer to the 96bfcc09ddSBjoern A. Zeeb * %ieee80211 structure. This map helps to get that pointer quickly. 97bfcc09ddSBjoern A. Zeeb */ 98bfcc09ddSBjoern A. Zeeb 99bfcc09ddSBjoern A. Zeeb /** 100bfcc09ddSBjoern A. Zeeb * DOC: station table - locking 101bfcc09ddSBjoern A. Zeeb * 102bfcc09ddSBjoern A. Zeeb * As stated before, the station is created / deleted by mac80211's %sta_state 103bfcc09ddSBjoern A. Zeeb * callback from %ieee80211_ops which can sleep. The next paragraph explains 104bfcc09ddSBjoern A. Zeeb * the locking of a single stations, the next ones relates to the station 105bfcc09ddSBjoern A. Zeeb * table. 106bfcc09ddSBjoern A. Zeeb * 107bfcc09ddSBjoern A. Zeeb * The station holds the sequence number per tid. So this data needs to be 108bfcc09ddSBjoern A. Zeeb * accessed in the Tx path (which is softIRQ). It also holds the Block-Ack 109bfcc09ddSBjoern A. Zeeb * information (the state machine / and the logic that checks if the queues 110bfcc09ddSBjoern A. Zeeb * were drained), so it also needs to be accessible from the Tx response flow. 111bfcc09ddSBjoern A. Zeeb * In short, the station needs to be access from sleepable context as well as 112bfcc09ddSBjoern A. Zeeb * from tasklets, so the station itself needs a spinlock. 113bfcc09ddSBjoern A. Zeeb * 114bfcc09ddSBjoern A. Zeeb * The writers of %fw_id_to_mac_id map are serialized by the global mutex of 115bfcc09ddSBjoern A. Zeeb * the mvm op_mode. This is possible since %sta_state can sleep. 116bfcc09ddSBjoern A. Zeeb * The pointers in this map are RCU protected, hence we won't replace the 117bfcc09ddSBjoern A. Zeeb * station while we have Tx / Tx response / BA notification running. 118bfcc09ddSBjoern A. Zeeb * 119bfcc09ddSBjoern A. Zeeb * If a station is deleted while it still has packets in its A-MPDU queues, 120bfcc09ddSBjoern A. Zeeb * then the reclaim flow will notice that there is no station in the map for 121bfcc09ddSBjoern A. Zeeb * sta_id and it will dump the responses. 122bfcc09ddSBjoern A. Zeeb */ 123bfcc09ddSBjoern A. Zeeb 124bfcc09ddSBjoern A. Zeeb /** 125bfcc09ddSBjoern A. Zeeb * DOC: station table - internal stations 126bfcc09ddSBjoern A. Zeeb * 127bfcc09ddSBjoern A. Zeeb * The FW needs a few internal stations that are not reflected in 128bfcc09ddSBjoern A. Zeeb * mac80211, such as broadcast station in AP / GO mode, or AUX sta for 129bfcc09ddSBjoern A. Zeeb * scanning and P2P device (during the GO negotiation). 130bfcc09ddSBjoern A. Zeeb * For these kind of stations we have %iwl_mvm_int_sta struct which holds the 131bfcc09ddSBjoern A. Zeeb * data relevant for them from both %iwl_mvm_sta and %ieee80211_sta. 132bfcc09ddSBjoern A. Zeeb * Usually the data for these stations is static, so no locking is required, 133bfcc09ddSBjoern A. Zeeb * and no TID data as this is also not needed. 134bfcc09ddSBjoern A. Zeeb * One thing to note, is that these stations have an ID in the fw, but not 135bfcc09ddSBjoern A. Zeeb * in mac80211. In order to "reserve" them a sta_id in %fw_id_to_mac_id 136bfcc09ddSBjoern A. Zeeb * we fill ERR_PTR(EINVAL) in this mapping and all other dereferencing of 137bfcc09ddSBjoern A. Zeeb * pointers from this mapping need to check that the value is not error 138bfcc09ddSBjoern A. Zeeb * or NULL. 139bfcc09ddSBjoern A. Zeeb * 140bfcc09ddSBjoern A. Zeeb * Currently there is only one auxiliary station for scanning, initialized 141bfcc09ddSBjoern A. Zeeb * on init. 142bfcc09ddSBjoern A. Zeeb */ 143bfcc09ddSBjoern A. Zeeb 144bfcc09ddSBjoern A. Zeeb /** 145bfcc09ddSBjoern A. Zeeb * DOC: station table - AP Station in STA mode 146bfcc09ddSBjoern A. Zeeb * 147bfcc09ddSBjoern A. Zeeb * %iwl_mvm_vif includes the index of the AP station in the fw's STA table: 148bfcc09ddSBjoern A. Zeeb * %ap_sta_id. To get the point to the corresponding %ieee80211_sta, 149bfcc09ddSBjoern A. Zeeb * &fw_id_to_mac_id can be used. Due to the way the fw works, we must not remove 150bfcc09ddSBjoern A. Zeeb * the AP station from the fw before setting the MAC context as unassociated. 151bfcc09ddSBjoern A. Zeeb * Hence, %fw_id_to_mac_id[%ap_sta_id] will be NULLed when the AP station is 152bfcc09ddSBjoern A. Zeeb * removed by mac80211, but the station won't be removed in the fw until the 153bfcc09ddSBjoern A. Zeeb * VIF is set as unassociated. Then, %ap_sta_id will be invalidated. 154bfcc09ddSBjoern A. Zeeb */ 155bfcc09ddSBjoern A. Zeeb 156bfcc09ddSBjoern A. Zeeb /** 157bfcc09ddSBjoern A. Zeeb * DOC: station table - Drain vs. Flush 158bfcc09ddSBjoern A. Zeeb * 159bfcc09ddSBjoern A. Zeeb * Flush means that all the frames in the SCD queue are dumped regardless the 160bfcc09ddSBjoern A. Zeeb * station to which they were sent. We do that when we disassociate and before 161bfcc09ddSBjoern A. Zeeb * we remove the STA of the AP. The flush can be done synchronously against the 162bfcc09ddSBjoern A. Zeeb * fw. 163bfcc09ddSBjoern A. Zeeb * Drain means that the fw will drop all the frames sent to a specific station. 164bfcc09ddSBjoern A. Zeeb * This is useful when a client (if we are IBSS / GO or AP) disassociates. 165bfcc09ddSBjoern A. Zeeb */ 166bfcc09ddSBjoern A. Zeeb 167bfcc09ddSBjoern A. Zeeb /** 168bfcc09ddSBjoern A. Zeeb * DOC: station table - fw restart 169bfcc09ddSBjoern A. Zeeb * 170bfcc09ddSBjoern A. Zeeb * When the fw asserts, or we have any other issue that requires to reset the 171bfcc09ddSBjoern A. Zeeb * driver, we require mac80211 to reconfigure the driver. Since the private 172bfcc09ddSBjoern A. Zeeb * data of the stations is embed in mac80211's %ieee80211_sta, that data will 173bfcc09ddSBjoern A. Zeeb * not be zeroed and needs to be reinitialized manually. 174bfcc09ddSBjoern A. Zeeb * %IWL_MVM_STATUS_IN_HW_RESTART is set during restart and that will hint us 175bfcc09ddSBjoern A. Zeeb * that we must not allocate a new sta_id but reuse the previous one. This 176bfcc09ddSBjoern A. Zeeb * means that the stations being re-added after the reset will have the same 177bfcc09ddSBjoern A. Zeeb * place in the fw as before the reset. We do need to zero the %fw_id_to_mac_id 178bfcc09ddSBjoern A. Zeeb * map, since the stations aren't in the fw any more. Internal stations that 179bfcc09ddSBjoern A. Zeeb * are not added by mac80211 will be re-added in the init flow that is called 180bfcc09ddSBjoern A. Zeeb * after the restart: mac80211 call's %iwl_mvm_mac_start which calls to 181bfcc09ddSBjoern A. Zeeb * %iwl_mvm_up. 182bfcc09ddSBjoern A. Zeeb */ 183bfcc09ddSBjoern A. Zeeb 184bfcc09ddSBjoern A. Zeeb /** 185bfcc09ddSBjoern A. Zeeb * DOC: AP mode - PS 186bfcc09ddSBjoern A. Zeeb * 187bfcc09ddSBjoern A. Zeeb * When a station is asleep, the fw will set it as "asleep". All frames on 188bfcc09ddSBjoern A. Zeeb * shared queues (i.e. non-aggregation queues) to that station will be dropped 189bfcc09ddSBjoern A. Zeeb * by the fw (%TX_STATUS_FAIL_DEST_PS failure code). 190bfcc09ddSBjoern A. Zeeb * 191bfcc09ddSBjoern A. Zeeb * AMPDUs are in a separate queue that is stopped by the fw. We just need to 192bfcc09ddSBjoern A. Zeeb * let mac80211 know when there are frames in these queues so that it can 193bfcc09ddSBjoern A. Zeeb * properly handle trigger frames. 194bfcc09ddSBjoern A. Zeeb * 195bfcc09ddSBjoern A. Zeeb * When a trigger frame is received, mac80211 tells the driver to send frames 196bfcc09ddSBjoern A. Zeeb * from the AMPDU queues or sends frames to non-aggregation queues itself, 197bfcc09ddSBjoern A. Zeeb * depending on which ACs are delivery-enabled and what TID has frames to 198bfcc09ddSBjoern A. Zeeb * transmit. Note that mac80211 has all the knowledge since all the non-agg 199bfcc09ddSBjoern A. Zeeb * frames are buffered / filtered, and the driver tells mac80211 about agg 200bfcc09ddSBjoern A. Zeeb * frames). The driver needs to tell the fw to let frames out even if the 201bfcc09ddSBjoern A. Zeeb * station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count. 202bfcc09ddSBjoern A. Zeeb * 203bfcc09ddSBjoern A. Zeeb * When we receive a frame from that station with PM bit unset, the driver 204bfcc09ddSBjoern A. Zeeb * needs to let the fw know that this station isn't asleep any more. This is 205bfcc09ddSBjoern A. Zeeb * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signaling the 206bfcc09ddSBjoern A. Zeeb * station's wakeup. 207bfcc09ddSBjoern A. Zeeb * 208bfcc09ddSBjoern A. Zeeb * For a GO, the Service Period might be cut short due to an absence period 209bfcc09ddSBjoern A. Zeeb * of the GO. In this (and all other cases) the firmware notifies us with the 210bfcc09ddSBjoern A. Zeeb * EOSP_NOTIFICATION, and we notify mac80211 of that. Further frames that we 211bfcc09ddSBjoern A. Zeeb * already sent to the device will be rejected again. 212bfcc09ddSBjoern A. Zeeb * 213bfcc09ddSBjoern A. Zeeb * See also "AP support for powersaving clients" in mac80211.h. 214bfcc09ddSBjoern A. Zeeb */ 215bfcc09ddSBjoern A. Zeeb 216bfcc09ddSBjoern A. Zeeb /** 217bfcc09ddSBjoern A. Zeeb * enum iwl_mvm_agg_state 218bfcc09ddSBjoern A. Zeeb * 219bfcc09ddSBjoern A. Zeeb * The state machine of the BA agreement establishment / tear down. 220bfcc09ddSBjoern A. Zeeb * These states relate to a specific RA / TID. 221bfcc09ddSBjoern A. Zeeb * 222bfcc09ddSBjoern A. Zeeb * @IWL_AGG_OFF: aggregation is not used 223bfcc09ddSBjoern A. Zeeb * @IWL_AGG_QUEUED: aggregation start work has been queued 224bfcc09ddSBjoern A. Zeeb * @IWL_AGG_STARTING: aggregation are starting (between start and oper) 225bfcc09ddSBjoern A. Zeeb * @IWL_AGG_ON: aggregation session is up 226bfcc09ddSBjoern A. Zeeb * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the 227bfcc09ddSBjoern A. Zeeb * HW queue to be empty from packets for this RA /TID. 228bfcc09ddSBjoern A. Zeeb * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the 229bfcc09ddSBjoern A. Zeeb * HW queue to be empty from packets for this RA /TID. 230bfcc09ddSBjoern A. Zeeb */ 231bfcc09ddSBjoern A. Zeeb enum iwl_mvm_agg_state { 232bfcc09ddSBjoern A. Zeeb IWL_AGG_OFF = 0, 233bfcc09ddSBjoern A. Zeeb IWL_AGG_QUEUED, 234bfcc09ddSBjoern A. Zeeb IWL_AGG_STARTING, 235bfcc09ddSBjoern A. Zeeb IWL_AGG_ON, 236bfcc09ddSBjoern A. Zeeb IWL_EMPTYING_HW_QUEUE_ADDBA, 237bfcc09ddSBjoern A. Zeeb IWL_EMPTYING_HW_QUEUE_DELBA, 238bfcc09ddSBjoern A. Zeeb }; 239bfcc09ddSBjoern A. Zeeb 240bfcc09ddSBjoern A. Zeeb /** 241bfcc09ddSBjoern A. Zeeb * struct iwl_mvm_tid_data - holds the states for each RA / TID 242bfcc09ddSBjoern A. Zeeb * @seq_number: the next WiFi sequence number to use 243bfcc09ddSBjoern A. Zeeb * @next_reclaimed: the WiFi sequence number of the next packet to be acked. 244bfcc09ddSBjoern A. Zeeb * This is basically (last acked packet++). 245bfcc09ddSBjoern A. Zeeb * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the 246bfcc09ddSBjoern A. Zeeb * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). 247bfcc09ddSBjoern A. Zeeb * @lq_color: the color of the LQ command as it appears in tx response. 248bfcc09ddSBjoern A. Zeeb * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. 249bfcc09ddSBjoern A. Zeeb * @state: state of the BA agreement establishment / tear down. 250bfcc09ddSBjoern A. Zeeb * @txq_id: Tx queue used by the BA session / DQA 251bfcc09ddSBjoern A. Zeeb * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or 252bfcc09ddSBjoern A. Zeeb * the first packet to be sent in legacy HW queue in Tx AGG stop flow. 253bfcc09ddSBjoern A. Zeeb * Basically when next_reclaimed reaches ssn, we can tell mac80211 that 254bfcc09ddSBjoern A. Zeeb * we are ready to finish the Tx AGG stop / start flow. 255bfcc09ddSBjoern A. Zeeb * @tx_time: medium time consumed by this A-MPDU 256bfcc09ddSBjoern A. Zeeb * @tpt_meas_start: time of the throughput measurements start, is reset every HZ 257bfcc09ddSBjoern A. Zeeb * @tx_count_last: number of frames transmitted during the last second 258bfcc09ddSBjoern A. Zeeb * @tx_count: counts the number of frames transmitted since the last reset of 259bfcc09ddSBjoern A. Zeeb * tpt_meas_start 260bfcc09ddSBjoern A. Zeeb */ 261bfcc09ddSBjoern A. Zeeb struct iwl_mvm_tid_data { 262bfcc09ddSBjoern A. Zeeb u16 seq_number; 263bfcc09ddSBjoern A. Zeeb u16 next_reclaimed; 264bfcc09ddSBjoern A. Zeeb /* The rest is Tx AGG related */ 265bfcc09ddSBjoern A. Zeeb u32 rate_n_flags; 266bfcc09ddSBjoern A. Zeeb u8 lq_color; 267bfcc09ddSBjoern A. Zeeb bool amsdu_in_ampdu_allowed; 268bfcc09ddSBjoern A. Zeeb enum iwl_mvm_agg_state state; 269bfcc09ddSBjoern A. Zeeb u16 txq_id; 270bfcc09ddSBjoern A. Zeeb u16 ssn; 271bfcc09ddSBjoern A. Zeeb u16 tx_time; 272bfcc09ddSBjoern A. Zeeb unsigned long tpt_meas_start; 273bfcc09ddSBjoern A. Zeeb u32 tx_count_last; 274bfcc09ddSBjoern A. Zeeb u32 tx_count; 275bfcc09ddSBjoern A. Zeeb }; 276bfcc09ddSBjoern A. Zeeb 277bfcc09ddSBjoern A. Zeeb struct iwl_mvm_key_pn { 278bfcc09ddSBjoern A. Zeeb struct rcu_head rcu_head; 279bfcc09ddSBjoern A. Zeeb struct { 280bfcc09ddSBjoern A. Zeeb u8 pn[IWL_MAX_TID_COUNT][IEEE80211_CCMP_PN_LEN]; 281bfcc09ddSBjoern A. Zeeb } ____cacheline_aligned_in_smp q[]; 282bfcc09ddSBjoern A. Zeeb }; 283bfcc09ddSBjoern A. Zeeb 284bfcc09ddSBjoern A. Zeeb /** 285bfcc09ddSBjoern A. Zeeb * enum iwl_mvm_rxq_notif_type - Internal message identifier 286bfcc09ddSBjoern A. Zeeb * 287bfcc09ddSBjoern A. Zeeb * @IWL_MVM_RXQ_EMPTY: empty sync notification 288bfcc09ddSBjoern A. Zeeb * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA 289bfcc09ddSBjoern A. Zeeb */ 290bfcc09ddSBjoern A. Zeeb enum iwl_mvm_rxq_notif_type { 291bfcc09ddSBjoern A. Zeeb IWL_MVM_RXQ_EMPTY, 292bfcc09ddSBjoern A. Zeeb IWL_MVM_RXQ_NOTIF_DEL_BA, 293bfcc09ddSBjoern A. Zeeb }; 294bfcc09ddSBjoern A. Zeeb 295bfcc09ddSBjoern A. Zeeb /** 296bfcc09ddSBjoern A. Zeeb * struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent 297bfcc09ddSBjoern A. Zeeb * in &iwl_rxq_sync_cmd. Should be DWORD aligned. 298bfcc09ddSBjoern A. Zeeb * FW is agnostic to the payload, so there are no endianity requirements. 299bfcc09ddSBjoern A. Zeeb * 300bfcc09ddSBjoern A. Zeeb * @type: value from &iwl_mvm_rxq_notif_type 301bfcc09ddSBjoern A. Zeeb * @sync: ctrl path is waiting for all notifications to be received 302bfcc09ddSBjoern A. Zeeb * @cookie: internal cookie to identify old notifications 303bfcc09ddSBjoern A. Zeeb * @data: payload 304bfcc09ddSBjoern A. Zeeb */ 305bfcc09ddSBjoern A. Zeeb struct iwl_mvm_internal_rxq_notif { 306bfcc09ddSBjoern A. Zeeb u16 type; 307bfcc09ddSBjoern A. Zeeb u16 sync; 308bfcc09ddSBjoern A. Zeeb u32 cookie; 309bfcc09ddSBjoern A. Zeeb u8 data[]; 310bfcc09ddSBjoern A. Zeeb } __packed; 311bfcc09ddSBjoern A. Zeeb 312bfcc09ddSBjoern A. Zeeb struct iwl_mvm_delba_data { 313bfcc09ddSBjoern A. Zeeb u32 baid; 314bfcc09ddSBjoern A. Zeeb } __packed; 315bfcc09ddSBjoern A. Zeeb 316bfcc09ddSBjoern A. Zeeb /** 317bfcc09ddSBjoern A. Zeeb * struct iwl_mvm_rxq_dup_data - per station per rx queue data 318bfcc09ddSBjoern A. Zeeb * @last_seq: last sequence per tid for duplicate packet detection 319bfcc09ddSBjoern A. Zeeb * @last_sub_frame: last subframe packet 320bfcc09ddSBjoern A. Zeeb */ 321bfcc09ddSBjoern A. Zeeb struct iwl_mvm_rxq_dup_data { 322bfcc09ddSBjoern A. Zeeb __le16 last_seq[IWL_MAX_TID_COUNT + 1]; 323bfcc09ddSBjoern A. Zeeb u8 last_sub_frame[IWL_MAX_TID_COUNT + 1]; 324bfcc09ddSBjoern A. Zeeb } ____cacheline_aligned_in_smp; 325bfcc09ddSBjoern A. Zeeb 326bfcc09ddSBjoern A. Zeeb /** 3279af1bba4SBjoern A. Zeeb * struct iwl_mvm_link_sta - link specific parameters of a station 3289af1bba4SBjoern A. Zeeb * @rcu_head: used for freeing the data 3299af1bba4SBjoern A. Zeeb * @sta_id: the index of the station in the fw 3309af1bba4SBjoern A. Zeeb * @lq_sta: holds rate scaling data, either for the case when RS is done in 3319af1bba4SBjoern A. Zeeb * the driver - %rs_drv or in the FW - %rs_fw. 3329af1bba4SBjoern A. Zeeb * @orig_amsdu_len: used to save the original amsdu_len when it is changed via 3339af1bba4SBjoern A. Zeeb * debugfs. If it's set to 0, it means that it is it's not set via 3349af1bba4SBjoern A. Zeeb * debugfs. 3359af1bba4SBjoern A. Zeeb * @avg_energy: energy as reported by FW statistics notification 3369af1bba4SBjoern A. Zeeb */ 3379af1bba4SBjoern A. Zeeb struct iwl_mvm_link_sta { 3389af1bba4SBjoern A. Zeeb struct rcu_head rcu_head; 3399af1bba4SBjoern A. Zeeb u32 sta_id; 3409af1bba4SBjoern A. Zeeb union { 3419af1bba4SBjoern A. Zeeb struct iwl_lq_sta_rs_fw rs_fw; 3429af1bba4SBjoern A. Zeeb struct iwl_lq_sta rs_drv; 3439af1bba4SBjoern A. Zeeb } lq_sta; 3449af1bba4SBjoern A. Zeeb 3459af1bba4SBjoern A. Zeeb u16 orig_amsdu_len; 3469af1bba4SBjoern A. Zeeb 3479af1bba4SBjoern A. Zeeb u8 avg_energy; 3489af1bba4SBjoern A. Zeeb }; 3499af1bba4SBjoern A. Zeeb 350*a4128aadSBjoern A. Zeeb struct iwl_mvm_mpdu_counter { 351*a4128aadSBjoern A. Zeeb u32 tx; 352*a4128aadSBjoern A. Zeeb u32 rx; 353*a4128aadSBjoern A. Zeeb }; 354*a4128aadSBjoern A. Zeeb 355*a4128aadSBjoern A. Zeeb /** 356*a4128aadSBjoern A. Zeeb * struct iwl_mvm_tpt_counter - per-queue MPDU counter 357*a4128aadSBjoern A. Zeeb * 358*a4128aadSBjoern A. Zeeb * @lock: Needed to protect the counters when modified from statistics. 359*a4128aadSBjoern A. Zeeb * @per_link: per-link counters. 360*a4128aadSBjoern A. Zeeb * @window_start: timestamp of the counting-window start 361*a4128aadSBjoern A. Zeeb */ 362*a4128aadSBjoern A. Zeeb struct iwl_mvm_tpt_counter { 363*a4128aadSBjoern A. Zeeb spinlock_t lock; 364*a4128aadSBjoern A. Zeeb struct iwl_mvm_mpdu_counter per_link[IWL_MVM_FW_MAX_LINK_ID]; 365*a4128aadSBjoern A. Zeeb unsigned long window_start; 366*a4128aadSBjoern A. Zeeb } ____cacheline_aligned_in_smp; 367*a4128aadSBjoern A. Zeeb 3689af1bba4SBjoern A. Zeeb /** 369bfcc09ddSBjoern A. Zeeb * struct iwl_mvm_sta - representation of a station in the driver 370*a4128aadSBjoern A. Zeeb * @vif: the interface the station belongs to 371bfcc09ddSBjoern A. Zeeb * @tfd_queue_msk: the tfd queues used by the station 372bfcc09ddSBjoern A. Zeeb * @mac_id_n_color: the MAC context this station is linked to 373bfcc09ddSBjoern A. Zeeb * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for 374bfcc09ddSBjoern A. Zeeb * tid. 375bfcc09ddSBjoern A. Zeeb * @sta_type: station type 3769af1bba4SBjoern A. Zeeb * @authorized: indicates station is authorized 377bfcc09ddSBjoern A. Zeeb * @sta_state: station state according to enum %ieee80211_sta_state 378bfcc09ddSBjoern A. Zeeb * @bt_reduced_txpower: is reduced tx power enabled for this station 379bfcc09ddSBjoern A. Zeeb * @next_status_eosp: the next reclaimed packet is a PS-Poll response and 380bfcc09ddSBjoern A. Zeeb * we need to signal the EOSP 381bfcc09ddSBjoern A. Zeeb * @lock: lock to protect the whole struct. Since %tid_data is access from Tx 382bfcc09ddSBjoern A. Zeeb * and from Tx response flow, it needs a spinlock. 383bfcc09ddSBjoern A. Zeeb * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data. 384bfcc09ddSBjoern A. Zeeb * @tid_to_baid: a simple map of TID to baid 3859af1bba4SBjoern A. Zeeb * @vif: a vif pointer 386bfcc09ddSBjoern A. Zeeb * @reserved_queue: the queue reserved for this STA for DQA purposes 387bfcc09ddSBjoern A. Zeeb * Every STA has is given one reserved queue to allow it to operate. If no 388bfcc09ddSBjoern A. Zeeb * such queue can be guaranteed, the STA addition will fail. 389bfcc09ddSBjoern A. Zeeb * @tx_protection: reference counter for controlling the Tx protection. 390bfcc09ddSBjoern A. Zeeb * @tt_tx_protection: is thermal throttling enable Tx protection? 391bfcc09ddSBjoern A. Zeeb * @disable_tx: is tx to this STA disabled? 392bfcc09ddSBjoern A. Zeeb * @amsdu_enabled: bitmap of TX AMSDU allowed TIDs. 393bfcc09ddSBjoern A. Zeeb * In case TLC offload is not active it is either 0xFFFF or 0. 394bfcc09ddSBjoern A. Zeeb * @max_amsdu_len: max AMSDU length 395*a4128aadSBjoern A. Zeeb * @sleeping: indicates the station is sleeping (when not offloaded to FW) 396bfcc09ddSBjoern A. Zeeb * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON) 3979af1bba4SBjoern A. Zeeb * @sleeping: sta sleep transitions in power management 398bfcc09ddSBjoern A. Zeeb * @sleep_tx_count: the number of frames that we told the firmware to let out 399bfcc09ddSBjoern A. Zeeb * even when that station is asleep. This is useful in case the queue 400bfcc09ddSBjoern A. Zeeb * gets empty before all the frames were sent, which can happen when 401bfcc09ddSBjoern A. Zeeb * we are sending frames from an AMPDU queue and there was a hole in 402bfcc09ddSBjoern A. Zeeb * the BA window. To be used for UAPSD only. 403bfcc09ddSBjoern A. Zeeb * @ptk_pn: per-queue PTK PN data structures 404bfcc09ddSBjoern A. Zeeb * @dup_data: per queue duplicate packet detection data 405bfcc09ddSBjoern A. Zeeb * @tx_ant: the index of the antenna to use for data tx to this station. Only 406bfcc09ddSBjoern A. Zeeb * used during connection establishment (e.g. for the 4 way handshake 407bfcc09ddSBjoern A. Zeeb * exchange). 408d9836fb4SBjoern A. Zeeb * @pairwise_cipher: used to feed iwlmei upon authorization 4099af1bba4SBjoern A. Zeeb * @deflink: the default link station, for non-MLO STA, all link specific data 4109af1bba4SBjoern A. Zeeb * is accessed via deflink (or link[0]). For MLO, it will hold data of the 4119af1bba4SBjoern A. Zeeb * first added link STA. 4129af1bba4SBjoern A. Zeeb * @link: per link sta entries. For non-MLO only link[0] holds data. For MLO, 4139af1bba4SBjoern A. Zeeb * link[0] points to deflink and link[link_id] is allocated when new link 4149af1bba4SBjoern A. Zeeb * sta is added. 415*a4128aadSBjoern A. Zeeb * @mpdu_counters: RX/TX MPDUs counters for each queue. 416bfcc09ddSBjoern A. Zeeb * 417bfcc09ddSBjoern A. Zeeb * When mac80211 creates a station it reserves some space (hw->sta_data_size) 418bfcc09ddSBjoern A. Zeeb * in the structure for use by driver. This structure is placed in that 419bfcc09ddSBjoern A. Zeeb * space. 420bfcc09ddSBjoern A. Zeeb * 421bfcc09ddSBjoern A. Zeeb */ 422bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta { 423bfcc09ddSBjoern A. Zeeb u32 tfd_queue_msk; 424bfcc09ddSBjoern A. Zeeb u32 mac_id_n_color; 425bfcc09ddSBjoern A. Zeeb u16 tid_disable_agg; 4269af1bba4SBjoern A. Zeeb u8 sta_type; 427bfcc09ddSBjoern A. Zeeb enum ieee80211_sta_state sta_state; 428bfcc09ddSBjoern A. Zeeb bool bt_reduced_txpower; 429bfcc09ddSBjoern A. Zeeb bool next_status_eosp; 4309af1bba4SBjoern A. Zeeb bool authorized; 431bfcc09ddSBjoern A. Zeeb spinlock_t lock; 432bfcc09ddSBjoern A. Zeeb struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1]; 433bfcc09ddSBjoern A. Zeeb u8 tid_to_baid[IWL_MAX_TID_COUNT]; 434bfcc09ddSBjoern A. Zeeb struct ieee80211_vif *vif; 435bfcc09ddSBjoern A. Zeeb struct iwl_mvm_key_pn __rcu *ptk_pn[4]; 436bfcc09ddSBjoern A. Zeeb struct iwl_mvm_rxq_dup_data *dup_data; 437bfcc09ddSBjoern A. Zeeb 438bfcc09ddSBjoern A. Zeeb u8 reserved_queue; 439bfcc09ddSBjoern A. Zeeb 440bfcc09ddSBjoern A. Zeeb /* Temporary, until the new TLC will control the Tx protection */ 441bfcc09ddSBjoern A. Zeeb s8 tx_protection; 442bfcc09ddSBjoern A. Zeeb bool tt_tx_protection; 443bfcc09ddSBjoern A. Zeeb 444bfcc09ddSBjoern A. Zeeb bool disable_tx; 445bfcc09ddSBjoern A. Zeeb u16 amsdu_enabled; 446bfcc09ddSBjoern A. Zeeb u16 max_amsdu_len; 447bfcc09ddSBjoern A. Zeeb bool sleeping; 448bfcc09ddSBjoern A. Zeeb u8 agg_tids; 449bfcc09ddSBjoern A. Zeeb u8 sleep_tx_count; 450bfcc09ddSBjoern A. Zeeb u8 tx_ant; 451d9836fb4SBjoern A. Zeeb u32 pairwise_cipher; 4529af1bba4SBjoern A. Zeeb 4539af1bba4SBjoern A. Zeeb struct iwl_mvm_link_sta deflink; 4549af1bba4SBjoern A. Zeeb struct iwl_mvm_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; 455*a4128aadSBjoern A. Zeeb 456*a4128aadSBjoern A. Zeeb struct iwl_mvm_tpt_counter *mpdu_counters; 457bfcc09ddSBjoern A. Zeeb }; 458bfcc09ddSBjoern A. Zeeb 459bfcc09ddSBjoern A. Zeeb u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data); 460bfcc09ddSBjoern A. Zeeb 461bfcc09ddSBjoern A. Zeeb static inline struct iwl_mvm_sta * 462bfcc09ddSBjoern A. Zeeb iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta) 463bfcc09ddSBjoern A. Zeeb { 464bfcc09ddSBjoern A. Zeeb return (void *)sta->drv_priv; 465bfcc09ddSBjoern A. Zeeb } 466bfcc09ddSBjoern A. Zeeb 467bfcc09ddSBjoern A. Zeeb /** 468bfcc09ddSBjoern A. Zeeb * struct iwl_mvm_int_sta - representation of an internal station (auxiliary or 469bfcc09ddSBjoern A. Zeeb * broadcast) 470bfcc09ddSBjoern A. Zeeb * @sta_id: the index of the station in the fw (will be replaced by id_n_color) 471bfcc09ddSBjoern A. Zeeb * @type: station type 472bfcc09ddSBjoern A. Zeeb * @tfd_queue_msk: the tfd queues used by the station 473bfcc09ddSBjoern A. Zeeb */ 474bfcc09ddSBjoern A. Zeeb struct iwl_mvm_int_sta { 475bfcc09ddSBjoern A. Zeeb u32 sta_id; 4769af1bba4SBjoern A. Zeeb u8 type; 477bfcc09ddSBjoern A. Zeeb u32 tfd_queue_msk; 478bfcc09ddSBjoern A. Zeeb }; 479bfcc09ddSBjoern A. Zeeb 480bfcc09ddSBjoern A. Zeeb /** 481*a4128aadSBjoern A. Zeeb * iwl_mvm_sta_send_to_fw - Send the STA info to the FW. 482bfcc09ddSBjoern A. Zeeb * 483bfcc09ddSBjoern A. Zeeb * @mvm: the iwl_mvm* to use 484bfcc09ddSBjoern A. Zeeb * @sta: the STA 485bfcc09ddSBjoern A. Zeeb * @update: this is true if the FW is being updated about a STA it already knows 486bfcc09ddSBjoern A. Zeeb * about. Otherwise (if this is a new STA), this should be false. 487bfcc09ddSBjoern A. Zeeb * @flags: if update==true, this marks what is being changed via ORs of values 488bfcc09ddSBjoern A. Zeeb * from enum iwl_sta_modify_flag. Otherwise, this is ignored. 489bfcc09ddSBjoern A. Zeeb */ 490bfcc09ddSBjoern A. Zeeb int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 491bfcc09ddSBjoern A. Zeeb bool update, unsigned int flags); 4929af1bba4SBjoern A. Zeeb int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype); 4939af1bba4SBjoern A. Zeeb int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 4949af1bba4SBjoern A. Zeeb struct ieee80211_sta *sta, int sta_id, u8 sta_type); 495bfcc09ddSBjoern A. Zeeb int iwl_mvm_add_sta(struct iwl_mvm *mvm, 496bfcc09ddSBjoern A. Zeeb struct ieee80211_vif *vif, 497bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta); 498bfcc09ddSBjoern A. Zeeb 499bfcc09ddSBjoern A. Zeeb static inline int iwl_mvm_update_sta(struct iwl_mvm *mvm, 500bfcc09ddSBjoern A. Zeeb struct ieee80211_vif *vif, 501bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta) 502bfcc09ddSBjoern A. Zeeb { 503bfcc09ddSBjoern A. Zeeb return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0); 504bfcc09ddSBjoern A. Zeeb } 505bfcc09ddSBjoern A. Zeeb 5069af1bba4SBjoern A. Zeeb void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, 5079af1bba4SBjoern A. Zeeb struct ieee80211_sta *sta); 508bfcc09ddSBjoern A. Zeeb int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, 509bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta); 5109af1bba4SBjoern A. Zeeb bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 5119af1bba4SBjoern A. Zeeb struct ieee80211_sta *sta, 5129af1bba4SBjoern A. Zeeb struct ieee80211_link_sta *link_sta, int *ret); 513bfcc09ddSBjoern A. Zeeb int iwl_mvm_rm_sta(struct iwl_mvm *mvm, 514bfcc09ddSBjoern A. Zeeb struct ieee80211_vif *vif, 515bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta); 516bfcc09ddSBjoern A. Zeeb int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, 517bfcc09ddSBjoern A. Zeeb struct ieee80211_vif *vif, 518bfcc09ddSBjoern A. Zeeb u8 sta_id); 519bfcc09ddSBjoern A. Zeeb int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, 520bfcc09ddSBjoern A. Zeeb struct ieee80211_vif *vif, 521bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, 522bfcc09ddSBjoern A. Zeeb struct ieee80211_key_conf *keyconf, 523bfcc09ddSBjoern A. Zeeb u8 key_offset); 524bfcc09ddSBjoern A. Zeeb int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, 525bfcc09ddSBjoern A. Zeeb struct ieee80211_vif *vif, 526bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, 527bfcc09ddSBjoern A. Zeeb struct ieee80211_key_conf *keyconf); 528bfcc09ddSBjoern A. Zeeb 529bfcc09ddSBjoern A. Zeeb void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, 530bfcc09ddSBjoern A. Zeeb struct ieee80211_vif *vif, 531bfcc09ddSBjoern A. Zeeb struct ieee80211_key_conf *keyconf, 532bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, u32 iv32, 533bfcc09ddSBjoern A. Zeeb u16 *phase1key); 534bfcc09ddSBjoern A. Zeeb 535bfcc09ddSBjoern A. Zeeb void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, 536bfcc09ddSBjoern A. Zeeb struct iwl_rx_cmd_buffer *rxb); 537bfcc09ddSBjoern A. Zeeb 538*a4128aadSBjoern A. Zeeb void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count, 539*a4128aadSBjoern A. Zeeb bool tx, int queue); 540*a4128aadSBjoern A. Zeeb 541bfcc09ddSBjoern A. Zeeb /* AMPDU */ 542bfcc09ddSBjoern A. Zeeb int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 543bfcc09ddSBjoern A. Zeeb int tid, u16 ssn, bool start, u16 buf_size, u16 timeout); 544bfcc09ddSBjoern A. Zeeb int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 545bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, u16 tid, u16 *ssn); 546bfcc09ddSBjoern A. Zeeb int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 547bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, u16 tid, u16 buf_size, 548bfcc09ddSBjoern A. Zeeb bool amsdu); 549bfcc09ddSBjoern A. Zeeb int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 550bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, u16 tid); 551bfcc09ddSBjoern A. Zeeb int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 552bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, u16 tid); 553bfcc09ddSBjoern A. Zeeb 554bfcc09ddSBjoern A. Zeeb int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 555bfcc09ddSBjoern A. Zeeb int tid, u8 queue, bool start); 556bfcc09ddSBjoern A. Zeeb 557bfcc09ddSBjoern A. Zeeb int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id); 558bfcc09ddSBjoern A. Zeeb int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm); 559bfcc09ddSBjoern A. Zeeb 560bfcc09ddSBjoern A. Zeeb int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 5619af1bba4SBjoern A. Zeeb void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, 5629af1bba4SBjoern A. Zeeb struct ieee80211_vif *vif); 563bfcc09ddSBjoern A. Zeeb int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 564bfcc09ddSBjoern A. Zeeb int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 565bfcc09ddSBjoern A. Zeeb int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 566bfcc09ddSBjoern A. Zeeb int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 567bfcc09ddSBjoern A. Zeeb int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 568bfcc09ddSBjoern A. Zeeb int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 569bfcc09ddSBjoern A. Zeeb int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, 570bfcc09ddSBjoern A. Zeeb struct iwl_mvm_int_sta *sta, 571bfcc09ddSBjoern A. Zeeb u32 qmask, enum nl80211_iftype iftype, 5729af1bba4SBjoern A. Zeeb u8 type); 573bfcc09ddSBjoern A. Zeeb void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 574bfcc09ddSBjoern A. Zeeb void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta); 575bfcc09ddSBjoern A. Zeeb int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 576bfcc09ddSBjoern A. Zeeb int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 577bfcc09ddSBjoern A. Zeeb void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm); 578bfcc09ddSBjoern A. Zeeb 579bfcc09ddSBjoern A. Zeeb void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, 580bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta); 581bfcc09ddSBjoern A. Zeeb void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, 582bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, 583bfcc09ddSBjoern A. Zeeb enum ieee80211_frame_release_type reason, 584bfcc09ddSBjoern A. Zeeb u16 cnt, u16 tids, bool more_data, 585bfcc09ddSBjoern A. Zeeb bool single_sta_queue); 586bfcc09ddSBjoern A. Zeeb int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, 587bfcc09ddSBjoern A. Zeeb bool drain); 588bfcc09ddSBjoern A. Zeeb void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, 589bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvmsta, bool disable); 590bfcc09ddSBjoern A. Zeeb void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, 591bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, 592bfcc09ddSBjoern A. Zeeb bool disable); 593bfcc09ddSBjoern A. Zeeb void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, 594bfcc09ddSBjoern A. Zeeb struct iwl_mvm_vif *mvmvif, 595bfcc09ddSBjoern A. Zeeb bool disable); 5969af1bba4SBjoern A. Zeeb 597bfcc09ddSBjoern A. Zeeb void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 598*a4128aadSBjoern A. Zeeb int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm, struct ieee80211_txq *txq); 599bfcc09ddSBjoern A. Zeeb void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); 6007db7bfe1SBjoern A. Zeeb #if defined(__linux__) 601bfcc09ddSBjoern A. Zeeb int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 602bfcc09ddSBjoern A. Zeeb struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher, 603*a4128aadSBjoern A. Zeeb u8 *key, u32 key_len, 604*a4128aadSBjoern A. Zeeb struct ieee80211_key_conf *key_conf_out); 6057db7bfe1SBjoern A. Zeeb #endif 606d9836fb4SBjoern A. Zeeb void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm, 607d9836fb4SBjoern A. Zeeb struct ieee80211_vif *vif, 6089af1bba4SBjoern A. Zeeb u32 id); 6099af1bba4SBjoern A. Zeeb /* Queues */ 6109af1bba4SBjoern A. Zeeb int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, 6119af1bba4SBjoern A. Zeeb struct ieee80211_sta *sta, 6129af1bba4SBjoern A. Zeeb u8 sta_id, u8 tid, unsigned int timeout); 6139af1bba4SBjoern A. Zeeb 6149af1bba4SBjoern A. Zeeb /* Sta state */ 6159af1bba4SBjoern A. Zeeb /** 6169af1bba4SBjoern A. Zeeb * struct iwl_mvm_sta_state_ops - callbacks for the sta_state() ops 6179af1bba4SBjoern A. Zeeb * 6189af1bba4SBjoern A. Zeeb * Since the only difference between both MLD and 6199af1bba4SBjoern A. Zeeb * non-MLD versions of sta_state() is these function calls, 6209af1bba4SBjoern A. Zeeb * each version will send its specific function calls to 6219af1bba4SBjoern A. Zeeb * %iwl_mvm_mac_sta_state_common(). 6229af1bba4SBjoern A. Zeeb * 6239af1bba4SBjoern A. Zeeb * @add_sta: pointer to the function that adds a new sta 6249af1bba4SBjoern A. Zeeb * @update_sta: pointer to the function that updates a sta 6259af1bba4SBjoern A. Zeeb * @rm_sta: pointer to the functions that removes a sta 6269af1bba4SBjoern A. Zeeb * @mac_ctxt_changed: pointer to the function that handles a change in mac ctxt 6279af1bba4SBjoern A. Zeeb */ 6289af1bba4SBjoern A. Zeeb struct iwl_mvm_sta_state_ops { 6299af1bba4SBjoern A. Zeeb int (*add_sta)(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 6309af1bba4SBjoern A. Zeeb struct ieee80211_sta *sta); 6319af1bba4SBjoern A. Zeeb int (*update_sta)(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 6329af1bba4SBjoern A. Zeeb struct ieee80211_sta *sta); 6339af1bba4SBjoern A. Zeeb int (*rm_sta)(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 6349af1bba4SBjoern A. Zeeb struct ieee80211_sta *sta); 6359af1bba4SBjoern A. Zeeb int (*mac_ctxt_changed)(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 6369af1bba4SBjoern A. Zeeb bool force_assoc_off); 6379af1bba4SBjoern A. Zeeb }; 6389af1bba4SBjoern A. Zeeb 6399af1bba4SBjoern A. Zeeb int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw, 6409af1bba4SBjoern A. Zeeb struct ieee80211_vif *vif, 6419af1bba4SBjoern A. Zeeb struct ieee80211_sta *sta, 6429af1bba4SBjoern A. Zeeb enum ieee80211_sta_state old_state, 6439af1bba4SBjoern A. Zeeb enum ieee80211_sta_state new_state, 6449af1bba4SBjoern A. Zeeb const struct iwl_mvm_sta_state_ops *callbacks); 6459af1bba4SBjoern A. Zeeb 6469af1bba4SBjoern A. Zeeb /* New MLD STA related APIs */ 6479af1bba4SBjoern A. Zeeb /* STA */ 6489af1bba4SBjoern A. Zeeb int iwl_mvm_mld_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 6499af1bba4SBjoern A. Zeeb struct ieee80211_bss_conf *link_conf); 6509af1bba4SBjoern A. Zeeb int iwl_mvm_mld_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 6519af1bba4SBjoern A. Zeeb struct ieee80211_bss_conf *link_conf); 6529af1bba4SBjoern A. Zeeb int iwl_mvm_mld_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 6539af1bba4SBjoern A. Zeeb struct ieee80211_bss_conf *link_conf); 6549af1bba4SBjoern A. Zeeb int iwl_mvm_mld_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id); 6559af1bba4SBjoern A. Zeeb int iwl_mvm_mld_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 6569af1bba4SBjoern A. Zeeb struct ieee80211_bss_conf *link_conf); 6579af1bba4SBjoern A. Zeeb int iwl_mvm_mld_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 6589af1bba4SBjoern A. Zeeb int iwl_mvm_mld_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 6599af1bba4SBjoern A. Zeeb struct ieee80211_bss_conf *link_conf); 6609af1bba4SBjoern A. Zeeb int iwl_mvm_mld_rm_aux_sta(struct iwl_mvm *mvm); 6619af1bba4SBjoern A. Zeeb int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 6629af1bba4SBjoern A. Zeeb struct ieee80211_sta *sta); 6639af1bba4SBjoern A. Zeeb int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 6649af1bba4SBjoern A. Zeeb struct ieee80211_sta *sta); 6659af1bba4SBjoern A. Zeeb int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 6669af1bba4SBjoern A. Zeeb struct ieee80211_sta *sta); 667*a4128aadSBjoern A. Zeeb void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm, 668*a4128aadSBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta, 669*a4128aadSBjoern A. Zeeb struct iwl_mvm_link_sta *mvm_sta_link, 670*a4128aadSBjoern A. Zeeb unsigned int link_id, 671*a4128aadSBjoern A. Zeeb bool is_in_fw); 6729af1bba4SBjoern A. Zeeb int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id); 6739af1bba4SBjoern A. Zeeb int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm, 6749af1bba4SBjoern A. Zeeb struct ieee80211_vif *vif, 6759af1bba4SBjoern A. Zeeb struct ieee80211_sta *sta, 6769af1bba4SBjoern A. Zeeb u16 old_links, u16 new_links); 6779af1bba4SBjoern A. Zeeb u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 6789af1bba4SBjoern A. Zeeb int filter_link_id); 6799af1bba4SBjoern A. Zeeb int iwl_mvm_mld_add_int_sta_with_queue(struct iwl_mvm *mvm, 6809af1bba4SBjoern A. Zeeb struct iwl_mvm_int_sta *sta, 6819af1bba4SBjoern A. Zeeb const u8 *addr, int link_id, 6829af1bba4SBjoern A. Zeeb u16 *queue, u8 tid, 6839af1bba4SBjoern A. Zeeb unsigned int *_wdg_timeout); 6849af1bba4SBjoern A. Zeeb 6859af1bba4SBjoern A. Zeeb /* Queues */ 6869af1bba4SBjoern A. Zeeb void iwl_mvm_mld_modify_all_sta_disable_tx(struct iwl_mvm *mvm, 6879af1bba4SBjoern A. Zeeb struct iwl_mvm_vif *mvmvif, 6889af1bba4SBjoern A. Zeeb bool disable); 6899af1bba4SBjoern A. Zeeb void iwl_mvm_mld_sta_modify_disable_tx(struct iwl_mvm *mvm, 6909af1bba4SBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta, 6919af1bba4SBjoern A. Zeeb bool disable); 6929af1bba4SBjoern A. Zeeb void iwl_mvm_mld_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, 6939af1bba4SBjoern A. Zeeb struct ieee80211_sta *sta, 6949af1bba4SBjoern A. Zeeb bool disable); 695bfcc09ddSBjoern A. Zeeb #endif /* __sta_h__ */ 696