xref: /linux/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h (revision f3956ebb3bf06ab2266ad5ee2214aed46405810c)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /*
3  * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
4  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5  * Copyright (C) 2016-2017 Intel Deutschland GmbH
6  */
7 #ifndef __IWL_MVM_H__
8 #define __IWL_MVM_H__
9 
10 #include <linux/list.h>
11 #include <linux/spinlock.h>
12 #include <linux/leds.h>
13 #include <linux/in6.h>
14 
15 #ifdef CONFIG_THERMAL
16 #include <linux/thermal.h>
17 #endif
18 
19 #include <linux/ktime.h>
20 
21 #include "iwl-op-mode.h"
22 #include "iwl-trans.h"
23 #include "fw/notif-wait.h"
24 #include "iwl-eeprom-parse.h"
25 #include "fw/file.h"
26 #include "iwl-config.h"
27 #include "sta.h"
28 #include "fw-api.h"
29 #include "constants.h"
30 #include "fw/runtime.h"
31 #include "fw/dbg.h"
32 #include "fw/acpi.h"
33 #include "iwl-nvm-parse.h"
34 
35 #include <linux/average.h>
36 
37 #define IWL_MVM_MAX_ADDRESSES		5
38 /* RSSI offset for WkP */
39 #define IWL_RSSI_OFFSET 50
40 #define IWL_MVM_MISSED_BEACONS_THRESHOLD 8
41 #define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 16
42 
43 /* A TimeUnit is 1024 microsecond */
44 #define MSEC_TO_TU(_msec)	(_msec*1000/1024)
45 
46 /* For GO, this value represents the number of TUs before CSA "beacon
47  * 0" TBTT when the CSA time-event needs to be scheduled to start.  It
48  * must be big enough to ensure that we switch in time.
49  */
50 #define IWL_MVM_CHANNEL_SWITCH_TIME_GO		40
51 
52 /* For client, this value represents the number of TUs before CSA
53  * "beacon 1" TBTT, instead.  This is because we don't know when the
54  * GO/AP will be in the new channel, so we switch early enough.
55  */
56 #define IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT	10
57 
58 /*
59  * This value (in TUs) is used to fine tune the CSA NoA end time which should
60  * be just before "beacon 0" TBTT.
61  */
62 #define IWL_MVM_CHANNEL_SWITCH_MARGIN 4
63 
64 /*
65  * Number of beacons to transmit on a new channel until we unblock tx to
66  * the stations, even if we didn't identify them on a new channel
67  */
68 #define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3
69 
70 /* offchannel queue towards mac80211 */
71 #define IWL_MVM_OFFCHANNEL_QUEUE 0
72 
73 extern const struct ieee80211_ops iwl_mvm_hw_ops;
74 
75 /**
76  * struct iwl_mvm_mod_params - module parameters for iwlmvm
77  * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted.
78  *	We will register to mac80211 to have testmode working. The NIC must not
79  *	be up'ed after the INIT fw asserted. This is useful to be able to use
80  *	proprietary tools over testmode to debug the INIT fw.
81  * @power_scheme: one of enum iwl_power_scheme
82  */
83 struct iwl_mvm_mod_params {
84 	bool init_dbg;
85 	int power_scheme;
86 };
87 extern struct iwl_mvm_mod_params iwlmvm_mod_params;
88 
89 struct iwl_mvm_phy_ctxt {
90 	u16 id;
91 	u16 color;
92 	u32 ref;
93 
94 	enum nl80211_chan_width width;
95 
96 	/*
97 	 * TODO: This should probably be removed. Currently here only for rate
98 	 * scaling algorithm
99 	 */
100 	struct ieee80211_channel *channel;
101 };
102 
103 struct iwl_mvm_time_event_data {
104 	struct ieee80211_vif *vif;
105 	struct list_head list;
106 	unsigned long end_jiffies;
107 	u32 duration;
108 	bool running;
109 	u32 uid;
110 
111 	/*
112 	 * The access to the 'id' field must be done when the
113 	 * mvm->time_event_lock is held, as it value is used to indicate
114 	 * if the te is in the time event list or not (when id == TE_MAX)
115 	 */
116 	u32 id;
117 };
118 
119  /* Power management */
120 
121 /**
122  * enum iwl_power_scheme
123  * @IWL_POWER_LEVEL_CAM - Continuously Active Mode
124  * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default)
125  * @IWL_POWER_LEVEL_LP  - Low Power
126  */
127 enum iwl_power_scheme {
128 	IWL_POWER_SCHEME_CAM = 1,
129 	IWL_POWER_SCHEME_BPS,
130 	IWL_POWER_SCHEME_LP
131 };
132 
133 #define IWL_CONN_MAX_LISTEN_INTERVAL	10
134 #define IWL_UAPSD_MAX_SP		IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
135 
136 #ifdef CONFIG_IWLWIFI_DEBUGFS
137 enum iwl_dbgfs_pm_mask {
138 	MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0),
139 	MVM_DEBUGFS_PM_SKIP_OVER_DTIM = BIT(1),
140 	MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2),
141 	MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
142 	MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
143 	MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
144 	MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
145 	MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
146 	MVM_DEBUGFS_PM_UAPSD_MISBEHAVING = BIT(9),
147 	MVM_DEBUGFS_PM_USE_PS_POLL = BIT(10),
148 };
149 
150 struct iwl_dbgfs_pm {
151 	u16 keep_alive_seconds;
152 	u32 rx_data_timeout;
153 	u32 tx_data_timeout;
154 	bool skip_over_dtim;
155 	u8 skip_dtim_periods;
156 	bool lprx_ena;
157 	u32 lprx_rssi_threshold;
158 	bool snooze_ena;
159 	bool uapsd_misbehaving;
160 	bool use_ps_poll;
161 	int mask;
162 };
163 
164 /* beacon filtering */
165 
166 enum iwl_dbgfs_bf_mask {
167 	MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0),
168 	MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1),
169 	MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2),
170 	MVM_DEBUGFS_BF_TEMP_THRESHOLD = BIT(3),
171 	MVM_DEBUGFS_BF_TEMP_FAST_FILTER = BIT(4),
172 	MVM_DEBUGFS_BF_TEMP_SLOW_FILTER = BIT(5),
173 	MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(6),
174 	MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(7),
175 	MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(8),
176 	MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(9),
177 	MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(10),
178 };
179 
180 struct iwl_dbgfs_bf {
181 	u32 bf_energy_delta;
182 	u32 bf_roaming_energy_delta;
183 	u32 bf_roaming_state;
184 	u32 bf_temp_threshold;
185 	u32 bf_temp_fast_filter;
186 	u32 bf_temp_slow_filter;
187 	u32 bf_enable_beacon_filter;
188 	u32 bf_debug_flag;
189 	u32 bf_escape_timer;
190 	u32 ba_escape_timer;
191 	u32 ba_enable_beacon_abort;
192 	int mask;
193 };
194 #endif
195 
196 enum iwl_mvm_smps_type_request {
197 	IWL_MVM_SMPS_REQ_BT_COEX,
198 	IWL_MVM_SMPS_REQ_TT,
199 	IWL_MVM_SMPS_REQ_PROT,
200 	IWL_MVM_SMPS_REQ_FW,
201 	NUM_IWL_MVM_SMPS_REQ,
202 };
203 
204 enum iwl_bt_force_ant_mode {
205 	BT_FORCE_ANT_DIS = 0,
206 	BT_FORCE_ANT_AUTO,
207 	BT_FORCE_ANT_BT,
208 	BT_FORCE_ANT_WIFI,
209 
210 	BT_FORCE_ANT_MAX,
211 };
212 
213 /**
214  * struct iwl_mvm_low_latency_force - low latency force mode set by debugfs
215  * @LOW_LATENCY_FORCE_UNSET: unset force mode
216  * @LOW_LATENCY_FORCE_ON: for low latency on
217  * @LOW_LATENCY_FORCE_OFF: for low latency off
218  * @NUM_LOW_LATENCY_FORCE: max num of modes
219  */
220 enum iwl_mvm_low_latency_force {
221 	LOW_LATENCY_FORCE_UNSET,
222 	LOW_LATENCY_FORCE_ON,
223 	LOW_LATENCY_FORCE_OFF,
224 	NUM_LOW_LATENCY_FORCE
225 };
226 
227 /**
228 * struct iwl_mvm_low_latency_cause - low latency set causes
229 * @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected
230 * @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs
231 * @LOW_LATENCY_VCMD: low latency mode set from vendor command
232 * @LOW_LATENCY_VIF_TYPE: low latency mode set because of vif type (ap)
233 * @LOW_LATENCY_DEBUGFS_FORCE_ENABLE: indicate that force mode is enabled
234 *	the actual set/unset is done with LOW_LATENCY_DEBUGFS_FORCE
235 * @LOW_LATENCY_DEBUGFS_FORCE: low latency force mode from debugfs
236 *	set this with LOW_LATENCY_DEBUGFS_FORCE_ENABLE flag
237 *	in low_latency.
238 */
239 enum iwl_mvm_low_latency_cause {
240 	LOW_LATENCY_TRAFFIC = BIT(0),
241 	LOW_LATENCY_DEBUGFS = BIT(1),
242 	LOW_LATENCY_VCMD = BIT(2),
243 	LOW_LATENCY_VIF_TYPE = BIT(3),
244 	LOW_LATENCY_DEBUGFS_FORCE_ENABLE = BIT(4),
245 	LOW_LATENCY_DEBUGFS_FORCE = BIT(5),
246 };
247 
248 /**
249 * struct iwl_mvm_vif_bf_data - beacon filtering related data
250 * @bf_enabled: indicates if beacon filtering is enabled
251 * @ba_enabled: indicated if beacon abort is enabled
252 * @ave_beacon_signal: average beacon signal
253 * @last_cqm_event: rssi of the last cqm event
254 * @bt_coex_min_thold: minimum threshold for BT coex
255 * @bt_coex_max_thold: maximum threshold for BT coex
256 * @last_bt_coex_event: rssi of the last BT coex event
257 */
258 struct iwl_mvm_vif_bf_data {
259 	bool bf_enabled;
260 	bool ba_enabled;
261 	int ave_beacon_signal;
262 	int last_cqm_event;
263 	int bt_coex_min_thold;
264 	int bt_coex_max_thold;
265 	int last_bt_coex_event;
266 };
267 
268 /**
269  * struct iwl_probe_resp_data - data for NoA/CSA updates
270  * @rcu_head: used for freeing the data on update
271  * @notif: notification data
272  * @noa_len: length of NoA attribute, calculated from the notification
273  */
274 struct iwl_probe_resp_data {
275 	struct rcu_head rcu_head;
276 	struct iwl_probe_resp_data_notif notif;
277 	int noa_len;
278 };
279 
280 /**
281  * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
282  * @id: between 0 and 3
283  * @color: to solve races upon MAC addition and removal
284  * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA
285  * @bssid: BSSID for this (client) interface
286  * @associated: indicates that we're currently associated, used only for
287  *	managing the firmware state in iwl_mvm_bss_info_changed_station()
288  * @ap_assoc_sta_count: count of stations associated to us - valid only
289  *	if VIF type is AP
290  * @uploaded: indicates the MAC context has been added to the device
291  * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
292  *	should get quota etc.
293  * @pm_enabled - Indicate if MAC power management is allowed
294  * @monitor_active: indicates that monitor context is configured, and that the
295  *	interface should get quota etc.
296  * @low_latency: bit flags for low latency
297  *	see enum &iwl_mvm_low_latency_cause for causes.
298  * @low_latency_actual: boolean, indicates low latency is set,
299  *	as a result from low_latency bit flags and takes force into account.
300  * @ps_disabled: indicates that this interface requires PS to be disabled
301  * @queue_params: QoS params for this MAC
302  * @bcast_sta: station used for broadcast packets. Used by the following
303  *  vifs: P2P_DEVICE, GO and AP.
304  * @beacon_skb: the skb used to hold the AP/GO beacon template
305  * @smps_requests: the SMPS requests of different parts of the driver,
306  *	combined on update to yield the overall request to mac80211.
307  * @beacon_stats: beacon statistics, containing the # of received beacons,
308  *	# of received beacons accumulated over FW restart, and the current
309  *	average signal of beacons retrieved from the firmware
310  * @csa_failed: CSA failed to schedule time event, report an error later
311  * @features: hw features active for this vif
312  * @probe_resp_data: data from FW notification to store NOA and CSA related
313  *	data to be inserted into probe response.
314  */
315 struct iwl_mvm_vif {
316 	struct iwl_mvm *mvm;
317 	u16 id;
318 	u16 color;
319 	u8 ap_sta_id;
320 
321 	u8 bssid[ETH_ALEN];
322 	bool associated;
323 	u8 ap_assoc_sta_count;
324 
325 	u16 cab_queue;
326 
327 	bool uploaded;
328 	bool ap_ibss_active;
329 	bool pm_enabled;
330 	bool monitor_active;
331 	u8 low_latency: 6;
332 	u8 low_latency_actual: 1;
333 	bool ps_disabled;
334 	struct iwl_mvm_vif_bf_data bf_data;
335 
336 	struct {
337 		u32 num_beacons, accu_num_beacons;
338 		u8 avg_signal;
339 	} beacon_stats;
340 
341 	u32 ap_beacon_time;
342 
343 	enum iwl_tsf_id tsf_id;
344 
345 	/*
346 	 * QoS data from mac80211, need to store this here
347 	 * as mac80211 has a separate callback but we need
348 	 * to have the data for the MAC context
349 	 */
350 	struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
351 	struct iwl_mvm_time_event_data time_event_data;
352 	struct iwl_mvm_time_event_data hs_time_event_data;
353 
354 	struct iwl_mvm_int_sta bcast_sta;
355 	struct iwl_mvm_int_sta mcast_sta;
356 
357 	/*
358 	 * Assigned while mac80211 has the interface in a channel context,
359 	 * or, for P2P Device, while it exists.
360 	 */
361 	struct iwl_mvm_phy_ctxt *phy_ctxt;
362 
363 #ifdef CONFIG_PM
364 	/* WoWLAN GTK rekey data */
365 	struct {
366 		u8 kck[NL80211_KCK_EXT_LEN];
367 		u8 kek[NL80211_KEK_EXT_LEN];
368 		size_t kek_len;
369 		size_t kck_len;
370 		u32 akm;
371 		__le64 replay_ctr;
372 		bool valid;
373 	} rekey_data;
374 
375 	int tx_key_idx;
376 
377 	bool seqno_valid;
378 	u16 seqno;
379 #endif
380 
381 #if IS_ENABLED(CONFIG_IPV6)
382 	/* IPv6 addresses for WoWLAN */
383 	struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
384 	unsigned long tentative_addrs[BITS_TO_LONGS(IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)];
385 	int num_target_ipv6_addrs;
386 #endif
387 
388 #ifdef CONFIG_IWLWIFI_DEBUGFS
389 	struct dentry *dbgfs_dir;
390 	struct dentry *dbgfs_slink;
391 	struct iwl_dbgfs_pm dbgfs_pm;
392 	struct iwl_dbgfs_bf dbgfs_bf;
393 	struct iwl_mac_power_cmd mac_pwr_cmd;
394 	int dbgfs_quota_min;
395 #endif
396 
397 	enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
398 
399 	/* FW identified misbehaving AP */
400 	u8 uapsd_misbehaving_bssid[ETH_ALEN];
401 
402 	struct delayed_work uapsd_nonagg_detected_wk;
403 
404 	/* Indicates that CSA countdown may be started */
405 	bool csa_countdown;
406 	bool csa_failed;
407 	u16 csa_target_freq;
408 	u16 csa_count;
409 	u16 csa_misbehave;
410 	struct delayed_work csa_work;
411 
412 	/* Indicates that we are waiting for a beacon on a new channel */
413 	bool csa_bcn_pending;
414 
415 	/* TCP Checksum Offload */
416 	netdev_features_t features;
417 
418 	struct iwl_probe_resp_data __rcu *probe_resp_data;
419 
420 	/* we can only have 2 GTK + 2 IGTK active at a time */
421 	struct ieee80211_key_conf *ap_early_keys[4];
422 
423 	/* 26-tone RU OFDMA transmissions should be blocked */
424 	bool he_ru_2mhz_block;
425 
426 	struct {
427 		struct ieee80211_key_conf __rcu *keys[2];
428 	} bcn_prot;
429 };
430 
431 static inline struct iwl_mvm_vif *
432 iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
433 {
434 	return (void *)vif->drv_priv;
435 }
436 
437 extern const u8 tid_to_mac80211_ac[];
438 
439 #define IWL_MVM_SCAN_STOPPING_SHIFT	8
440 
441 enum iwl_scan_status {
442 	IWL_MVM_SCAN_REGULAR		= BIT(0),
443 	IWL_MVM_SCAN_SCHED		= BIT(1),
444 	IWL_MVM_SCAN_NETDETECT		= BIT(2),
445 
446 	IWL_MVM_SCAN_STOPPING_REGULAR	= BIT(8),
447 	IWL_MVM_SCAN_STOPPING_SCHED	= BIT(9),
448 	IWL_MVM_SCAN_STOPPING_NETDETECT	= BIT(10),
449 
450 	IWL_MVM_SCAN_REGULAR_MASK	= IWL_MVM_SCAN_REGULAR |
451 					  IWL_MVM_SCAN_STOPPING_REGULAR,
452 	IWL_MVM_SCAN_SCHED_MASK		= IWL_MVM_SCAN_SCHED |
453 					  IWL_MVM_SCAN_STOPPING_SCHED,
454 	IWL_MVM_SCAN_NETDETECT_MASK	= IWL_MVM_SCAN_NETDETECT |
455 					  IWL_MVM_SCAN_STOPPING_NETDETECT,
456 
457 	IWL_MVM_SCAN_STOPPING_MASK	= 0xff << IWL_MVM_SCAN_STOPPING_SHIFT,
458 	IWL_MVM_SCAN_MASK		= 0xff,
459 };
460 
461 enum iwl_mvm_scan_type {
462 	IWL_SCAN_TYPE_NOT_SET,
463 	IWL_SCAN_TYPE_UNASSOC,
464 	IWL_SCAN_TYPE_WILD,
465 	IWL_SCAN_TYPE_MILD,
466 	IWL_SCAN_TYPE_FRAGMENTED,
467 	IWL_SCAN_TYPE_FAST_BALANCE,
468 };
469 
470 enum iwl_mvm_sched_scan_pass_all_states {
471 	SCHED_SCAN_PASS_ALL_DISABLED,
472 	SCHED_SCAN_PASS_ALL_ENABLED,
473 	SCHED_SCAN_PASS_ALL_FOUND,
474 };
475 
476 /**
477  * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure
478  * @ct_kill_exit: worker to exit thermal kill
479  * @dynamic_smps: Is thermal throttling enabled dynamic_smps?
480  * @tx_backoff: The current thremal throttling tx backoff in uSec.
481  * @min_backoff: The minimal tx backoff due to power restrictions
482  * @params: Parameters to configure the thermal throttling algorithm.
483  * @throttle: Is thermal throttling is active?
484  */
485 struct iwl_mvm_tt_mgmt {
486 	struct delayed_work ct_kill_exit;
487 	bool dynamic_smps;
488 	u32 tx_backoff;
489 	u32 min_backoff;
490 	struct iwl_tt_params params;
491 	bool throttle;
492 };
493 
494 #ifdef CONFIG_THERMAL
495 /**
496  *struct iwl_mvm_thermal_device - thermal zone related data
497  * @temp_trips: temperature thresholds for report
498  * @fw_trips_index: keep indexes to original array - temp_trips
499  * @tzone: thermal zone device data
500 */
501 struct iwl_mvm_thermal_device {
502 	s16 temp_trips[IWL_MAX_DTS_TRIPS];
503 	u8 fw_trips_index[IWL_MAX_DTS_TRIPS];
504 	struct thermal_zone_device *tzone;
505 };
506 
507 /*
508  * struct iwl_mvm_cooling_device
509  * @cur_state: current state
510  * @cdev: struct thermal cooling device
511  */
512 struct iwl_mvm_cooling_device {
513 	u32 cur_state;
514 	struct thermal_cooling_device *cdev;
515 };
516 #endif
517 
518 #define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8
519 
520 struct iwl_mvm_frame_stats {
521 	u32 legacy_frames;
522 	u32 ht_frames;
523 	u32 vht_frames;
524 	u32 bw_20_frames;
525 	u32 bw_40_frames;
526 	u32 bw_80_frames;
527 	u32 bw_160_frames;
528 	u32 sgi_frames;
529 	u32 ngi_frames;
530 	u32 siso_frames;
531 	u32 mimo2_frames;
532 	u32 agg_frames;
533 	u32 ampdu_count;
534 	u32 success_frames;
535 	u32 fail_frames;
536 	u32 last_rates[IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES];
537 	int last_frame_idx;
538 };
539 
540 #define IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE 0xff
541 #define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100
542 #define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200
543 
544 enum iwl_mvm_tdls_cs_state {
545 	IWL_MVM_TDLS_SW_IDLE = 0,
546 	IWL_MVM_TDLS_SW_REQ_SENT,
547 	IWL_MVM_TDLS_SW_RESP_RCVD,
548 	IWL_MVM_TDLS_SW_REQ_RCVD,
549 	IWL_MVM_TDLS_SW_ACTIVE,
550 };
551 
552 enum iwl_mvm_traffic_load {
553 	IWL_MVM_TRAFFIC_LOW,
554 	IWL_MVM_TRAFFIC_MEDIUM,
555 	IWL_MVM_TRAFFIC_HIGH,
556 };
557 
558 DECLARE_EWMA(rate, 16, 16)
559 
560 struct iwl_mvm_tcm_mac {
561 	struct {
562 		u32 pkts[IEEE80211_NUM_ACS];
563 		u32 airtime;
564 	} tx;
565 	struct {
566 		u32 pkts[IEEE80211_NUM_ACS];
567 		u32 airtime;
568 		u32 last_ampdu_ref;
569 	} rx;
570 	struct {
571 		/* track AP's transfer in client mode */
572 		u64 rx_bytes;
573 		struct ewma_rate rate;
574 		bool detected;
575 	} uapsd_nonagg_detect;
576 	bool opened_rx_ba_sessions;
577 };
578 
579 struct iwl_mvm_tcm {
580 	struct delayed_work work;
581 	spinlock_t lock; /* used when time elapsed */
582 	unsigned long ts; /* timestamp when period ends */
583 	unsigned long ll_ts;
584 	unsigned long uapsd_nonagg_ts;
585 	bool paused;
586 	struct iwl_mvm_tcm_mac data[NUM_MAC_INDEX_DRIVER];
587 	struct {
588 		u32 elapsed; /* milliseconds for this TCM period */
589 		u32 airtime[NUM_MAC_INDEX_DRIVER];
590 		enum iwl_mvm_traffic_load load[NUM_MAC_INDEX_DRIVER];
591 		enum iwl_mvm_traffic_load band_load[NUM_NL80211_BANDS];
592 		enum iwl_mvm_traffic_load global_load;
593 		bool low_latency[NUM_MAC_INDEX_DRIVER];
594 		bool change[NUM_MAC_INDEX_DRIVER];
595 	} result;
596 };
597 
598 /**
599  * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer
600  * @head_sn: reorder window head sn
601  * @num_stored: number of mpdus stored in the buffer
602  * @buf_size: the reorder buffer size as set by the last addba request
603  * @queue: queue of this reorder buffer
604  * @last_amsdu: track last ASMDU SN for duplication detection
605  * @last_sub_index: track ASMDU sub frame index for duplication detection
606  * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU
607  *	it is the time of last received sub-frame
608  * @removed: prevent timer re-arming
609  * @valid: reordering is valid for this queue
610  * @lock: protect reorder buffer internal state
611  * @mvm: mvm pointer, needed for frame timer context
612  * @consec_oldsn_drops: consecutive drops due to old SN
613  * @consec_oldsn_ampdu_gp2: A-MPDU GP2 timestamp to track
614  *	when to apply old SN consecutive drop workaround
615  * @consec_oldsn_prev_drop: track whether or not an MPDU
616  *	that was single/part of the previous A-MPDU was
617  *	dropped due to old SN
618  */
619 struct iwl_mvm_reorder_buffer {
620 	u16 head_sn;
621 	u16 num_stored;
622 	u16 buf_size;
623 	int queue;
624 	u16 last_amsdu;
625 	u8 last_sub_index;
626 	struct timer_list reorder_timer;
627 	bool removed;
628 	bool valid;
629 	spinlock_t lock;
630 	struct iwl_mvm *mvm;
631 	unsigned int consec_oldsn_drops;
632 	u32 consec_oldsn_ampdu_gp2;
633 	unsigned int consec_oldsn_prev_drop:1;
634 } ____cacheline_aligned_in_smp;
635 
636 /**
637  * struct _iwl_mvm_reorder_buf_entry - reorder buffer entry per-queue/per-seqno
638  * @frames: list of skbs stored
639  * @reorder_time: time the packet was stored in the reorder buffer
640  */
641 struct _iwl_mvm_reorder_buf_entry {
642 	struct sk_buff_head frames;
643 	unsigned long reorder_time;
644 };
645 
646 /* make this indirection to get the aligned thing */
647 struct iwl_mvm_reorder_buf_entry {
648 	struct _iwl_mvm_reorder_buf_entry e;
649 }
650 #ifndef __CHECKER__
651 /* sparse doesn't like this construct: "bad integer constant expression" */
652 __aligned(roundup_pow_of_two(sizeof(struct _iwl_mvm_reorder_buf_entry)))
653 #endif
654 ;
655 
656 /**
657  * struct iwl_mvm_baid_data - BA session data
658  * @sta_id: station id
659  * @tid: tid of the session
660  * @baid baid of the session
661  * @timeout: the timeout set in the addba request
662  * @entries_per_queue: # of buffers per queue, this actually gets
663  *	aligned up to avoid cache line sharing between queues
664  * @last_rx: last rx jiffies, updated only if timeout passed from last update
665  * @session_timer: timer to check if BA session expired, runs at 2 * timeout
666  * @mvm: mvm pointer, needed for timer context
667  * @reorder_buf: reorder buffer, allocated per queue
668  * @reorder_buf_data: data
669  */
670 struct iwl_mvm_baid_data {
671 	struct rcu_head rcu_head;
672 	u8 sta_id;
673 	u8 tid;
674 	u8 baid;
675 	u16 timeout;
676 	u16 entries_per_queue;
677 	unsigned long last_rx;
678 	struct timer_list session_timer;
679 	struct iwl_mvm_baid_data __rcu **rcu_ptr;
680 	struct iwl_mvm *mvm;
681 	struct iwl_mvm_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES];
682 	struct iwl_mvm_reorder_buf_entry entries[];
683 };
684 
685 static inline struct iwl_mvm_baid_data *
686 iwl_mvm_baid_data_from_reorder_buf(struct iwl_mvm_reorder_buffer *buf)
687 {
688 	return (void *)((u8 *)buf -
689 			offsetof(struct iwl_mvm_baid_data, reorder_buf) -
690 			sizeof(*buf) * buf->queue);
691 }
692 
693 /*
694  * enum iwl_mvm_queue_status - queue status
695  * @IWL_MVM_QUEUE_FREE: the queue is not allocated nor reserved
696  *	Basically, this means that this queue can be used for any purpose
697  * @IWL_MVM_QUEUE_RESERVED: queue is reserved but not yet in use
698  *	This is the state of a queue that has been dedicated for some RATID
699  *	(agg'd or not), but that hasn't yet gone through the actual enablement
700  *	of iwl_mvm_enable_txq(), and therefore no traffic can go through it yet.
701  *	Note that in this state there is no requirement to already know what TID
702  *	should be used with this queue, it is just marked as a queue that will
703  *	be used, and shouldn't be allocated to anyone else.
704  * @IWL_MVM_QUEUE_READY: queue is ready to be used
705  *	This is the state of a queue that has been fully configured (including
706  *	SCD pointers, etc), has a specific RA/TID assigned to it, and can be
707  *	used to send traffic.
708  * @IWL_MVM_QUEUE_SHARED: queue is shared, or in a process of becoming shared
709  *	This is a state in which a single queue serves more than one TID, all of
710  *	which are not aggregated. Note that the queue is only associated to one
711  *	RA.
712  */
713 enum iwl_mvm_queue_status {
714 	IWL_MVM_QUEUE_FREE,
715 	IWL_MVM_QUEUE_RESERVED,
716 	IWL_MVM_QUEUE_READY,
717 	IWL_MVM_QUEUE_SHARED,
718 };
719 
720 #define IWL_MVM_DQA_QUEUE_TIMEOUT	(5 * HZ)
721 #define IWL_MVM_INVALID_QUEUE		0xFFFF
722 
723 #define IWL_MVM_NUM_CIPHERS             10
724 
725 
726 struct iwl_mvm_txq {
727 	struct list_head list;
728 	u16 txq_id;
729 	atomic_t tx_request;
730 	bool stopped;
731 };
732 
733 static inline struct iwl_mvm_txq *
734 iwl_mvm_txq_from_mac80211(struct ieee80211_txq *txq)
735 {
736 	return (void *)txq->drv_priv;
737 }
738 
739 static inline struct iwl_mvm_txq *
740 iwl_mvm_txq_from_tid(struct ieee80211_sta *sta, u8 tid)
741 {
742 	if (tid == IWL_MAX_TID_COUNT)
743 		tid = IEEE80211_NUM_TIDS;
744 
745 	return (void *)sta->txq[tid]->drv_priv;
746 }
747 
748 /**
749  * struct iwl_mvm_tvqm_txq_info - maps TVQM hw queue to tid
750  *
751  * @sta_id: sta id
752  * @txq_tid: txq tid
753  */
754 struct iwl_mvm_tvqm_txq_info {
755 	u8 sta_id;
756 	u8 txq_tid;
757 };
758 
759 struct iwl_mvm_dqa_txq_info {
760 	u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
761 	bool reserved; /* Is this the TXQ reserved for a STA */
762 	u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
763 	u8 txq_tid; /* The TID "owner" of this queue*/
764 	u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
765 	/* Timestamp for inactivation per TID of this queue */
766 	unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
767 	enum iwl_mvm_queue_status status;
768 };
769 
770 struct iwl_mvm {
771 	/* for logger access */
772 	struct device *dev;
773 
774 	struct iwl_trans *trans;
775 	const struct iwl_fw *fw;
776 	const struct iwl_cfg *cfg;
777 	struct iwl_phy_db *phy_db;
778 	struct ieee80211_hw *hw;
779 
780 	/* for protecting access to iwl_mvm */
781 	struct mutex mutex;
782 	struct list_head async_handlers_list;
783 	spinlock_t async_handlers_lock;
784 	struct work_struct async_handlers_wk;
785 
786 	struct work_struct roc_done_wk;
787 
788 	unsigned long init_status;
789 
790 	unsigned long status;
791 
792 	u32 queue_sync_cookie;
793 	unsigned long queue_sync_state;
794 	/*
795 	 * for beacon filtering -
796 	 * currently only one interface can be supported
797 	 */
798 	struct iwl_mvm_vif *bf_allowed_vif;
799 
800 	bool hw_registered;
801 	bool rfkill_safe_init_done;
802 
803 	u8 cca_40mhz_workaround;
804 
805 	u32 ampdu_ref;
806 	bool ampdu_toggle;
807 
808 	struct iwl_notif_wait_data notif_wait;
809 
810 	union {
811 		struct mvm_statistics_rx_v3 rx_stats_v3;
812 		struct mvm_statistics_rx rx_stats;
813 	};
814 
815 	struct {
816 		u64 rx_time;
817 		u64 tx_time;
818 		u64 on_time_rf;
819 		u64 on_time_scan;
820 	} radio_stats, accu_radio_stats;
821 
822 	struct list_head add_stream_txqs;
823 	union {
824 		struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
825 		struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
826 	};
827 	struct work_struct add_stream_wk; /* To add streams to queues */
828 
829 	const char *nvm_file_name;
830 	struct iwl_nvm_data *nvm_data;
831 	/* NVM sections */
832 	struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
833 
834 	struct iwl_fw_runtime fwrt;
835 
836 	/* EEPROM MAC addresses */
837 	struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
838 
839 	/* data related to data path */
840 	struct iwl_rx_phy_info last_phy_info;
841 	struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT_MAX];
842 	u8 rx_ba_sessions;
843 
844 	/* configured by mac80211 */
845 	u32 rts_threshold;
846 
847 	/* Scan status, cmd (pre-allocated) and auxiliary station */
848 	unsigned int scan_status;
849 	void *scan_cmd;
850 	struct iwl_mcast_filter_cmd *mcast_filter_cmd;
851 	/* For CDB this is low band scan type, for non-CDB - type. */
852 	enum iwl_mvm_scan_type scan_type;
853 	enum iwl_mvm_scan_type hb_scan_type;
854 
855 	enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all;
856 	struct delayed_work scan_timeout_dwork;
857 
858 	/* max number of simultaneous scans the FW supports */
859 	unsigned int max_scans;
860 
861 	/* UMAC scan tracking */
862 	u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS];
863 
864 	/* start time of last scan in TSF of the mac that requested the scan */
865 	u64 scan_start;
866 
867 	/* the vif that requested the current scan */
868 	struct iwl_mvm_vif *scan_vif;
869 
870 	/* rx chain antennas set through debugfs for the scan command */
871 	u8 scan_rx_ant;
872 
873 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
874 	/* broadcast filters to configure for each associated station */
875 	const struct iwl_fw_bcast_filter *bcast_filters;
876 #ifdef CONFIG_IWLWIFI_DEBUGFS
877 	struct {
878 		bool override;
879 		struct iwl_bcast_filter_cmd cmd;
880 	} dbgfs_bcast_filtering;
881 #endif
882 #endif
883 
884 	/* Internal station */
885 	struct iwl_mvm_int_sta aux_sta;
886 	struct iwl_mvm_int_sta snif_sta;
887 
888 	bool last_ebs_successful;
889 
890 	u8 scan_last_antenna_idx; /* to toggle TX between antennas */
891 	u8 mgmt_last_antenna_idx;
892 
893 	/* last smart fifo state that was successfully sent to firmware */
894 	enum iwl_sf_state sf_state;
895 
896 	/*
897 	 * Leave this pointer outside the ifdef below so that it can be
898 	 * assigned without ifdef in the source code.
899 	 */
900 	struct dentry *debugfs_dir;
901 #ifdef CONFIG_IWLWIFI_DEBUGFS
902 	u32 dbgfs_sram_offset, dbgfs_sram_len;
903 	u32 dbgfs_prph_reg_addr;
904 	bool disable_power_off;
905 	bool disable_power_off_d3;
906 	bool beacon_inject_active;
907 
908 	bool scan_iter_notif_enabled;
909 
910 	struct debugfs_blob_wrapper nvm_hw_blob;
911 	struct debugfs_blob_wrapper nvm_sw_blob;
912 	struct debugfs_blob_wrapper nvm_calib_blob;
913 	struct debugfs_blob_wrapper nvm_prod_blob;
914 	struct debugfs_blob_wrapper nvm_phy_sku_blob;
915 	struct debugfs_blob_wrapper nvm_reg_blob;
916 
917 	struct iwl_mvm_frame_stats drv_rx_stats;
918 	spinlock_t drv_stats_lock;
919 	u16 dbgfs_rx_phyinfo;
920 #endif
921 
922 	struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
923 
924 	struct list_head time_event_list;
925 	spinlock_t time_event_lock;
926 
927 	/*
928 	 * A bitmap indicating the index of the key in use. The firmware
929 	 * can hold 16 keys at most. Reflect this fact.
930 	 */
931 	unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
932 	u8 fw_key_deleted[STA_KEY_MAX_NUM];
933 
934 	u8 vif_count;
935 	struct ieee80211_vif __rcu *vif_id_to_mac[NUM_MAC_INDEX_DRIVER];
936 
937 	/* -1 for always, 0 for never, >0 for that many times */
938 	s8 fw_restart;
939 	u8 *error_recovery_buf;
940 
941 #ifdef CONFIG_IWLWIFI_LEDS
942 	struct led_classdev led;
943 #endif
944 
945 	struct ieee80211_vif *p2p_device_vif;
946 
947 #ifdef CONFIG_PM
948 	struct wiphy_wowlan_support wowlan;
949 	int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
950 
951 	/* sched scan settings for net detect */
952 	struct ieee80211_scan_ies nd_ies;
953 	struct cfg80211_match_set *nd_match_sets;
954 	int n_nd_match_sets;
955 	struct ieee80211_channel **nd_channels;
956 	int n_nd_channels;
957 	bool net_detect;
958 	u8 offload_tid;
959 #ifdef CONFIG_IWLWIFI_DEBUGFS
960 	bool d3_wake_sysassert;
961 	bool d3_test_active;
962 	u32 d3_test_pme_ptr;
963 	struct ieee80211_vif *keep_vif;
964 	u32 last_netdetect_scans; /* no. of scans in the last net-detect wake */
965 #endif
966 #endif
967 
968 	wait_queue_head_t rx_sync_waitq;
969 
970 	/* BT-Coex */
971 	struct iwl_bt_coex_profile_notif last_bt_notif;
972 	struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
973 
974 	u8 bt_tx_prio;
975 	enum iwl_bt_force_ant_mode bt_force_ant_mode;
976 
977 	/* Aux ROC */
978 	struct list_head aux_roc_te_list;
979 
980 	/* Thermal Throttling and CTkill */
981 	struct iwl_mvm_tt_mgmt thermal_throttle;
982 #ifdef CONFIG_THERMAL
983 	struct iwl_mvm_thermal_device tz_device;
984 	struct iwl_mvm_cooling_device cooling_dev;
985 #endif
986 
987 	s32 temperature;	/* Celsius */
988 	/*
989 	 * Debug option to set the NIC temperature. This option makes the
990 	 * driver think this is the actual NIC temperature, and ignore the
991 	 * real temperature that is received from the fw
992 	 */
993 	bool temperature_test;  /* Debug test temperature is enabled */
994 
995 	bool fw_static_smps_request;
996 
997 	unsigned long bt_coex_last_tcm_ts;
998 	struct iwl_mvm_tcm tcm;
999 
1000 	u8 uapsd_noagg_bssid_write_idx;
1001 	struct mac_address uapsd_noagg_bssids[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM]
1002 		__aligned(2);
1003 
1004 	struct iwl_time_quota_cmd last_quota_cmd;
1005 
1006 #ifdef CONFIG_NL80211_TESTMODE
1007 	u32 noa_duration;
1008 	struct ieee80211_vif *noa_vif;
1009 #endif
1010 
1011 	/* Tx queues */
1012 	u16 aux_queue;
1013 	u16 snif_queue;
1014 	u16 probe_queue;
1015 	u16 p2p_dev_queue;
1016 
1017 	/* Indicate if device power save is allowed */
1018 	u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */
1019 	/* Indicate if 32Khz external clock is valid */
1020 	u32 ext_clock_valid;
1021 
1022 	struct ieee80211_vif __rcu *csa_vif;
1023 	struct ieee80211_vif __rcu *csa_tx_blocked_vif;
1024 	u8 csa_tx_block_bcn_timeout;
1025 
1026 	/* system time of last beacon (for AP/GO interface) */
1027 	u32 ap_last_beacon_gp2;
1028 
1029 	/* indicates that we transmitted the last beacon */
1030 	bool ibss_manager;
1031 
1032 	bool lar_regdom_set;
1033 	enum iwl_mcc_source mcc_src;
1034 
1035 	/* TDLS channel switch data */
1036 	struct {
1037 		struct delayed_work dwork;
1038 		enum iwl_mvm_tdls_cs_state state;
1039 
1040 		/*
1041 		 * Current cs sta - might be different from periodic cs peer
1042 		 * station. Value is meaningless when the cs-state is idle.
1043 		 */
1044 		u8 cur_sta_id;
1045 
1046 		/* TDLS periodic channel-switch peer */
1047 		struct {
1048 			u8 sta_id;
1049 			u8 op_class;
1050 			bool initiator; /* are we the link initiator */
1051 			struct cfg80211_chan_def chandef;
1052 			struct sk_buff *skb; /* ch sw template */
1053 			u32 ch_sw_tm_ie;
1054 
1055 			/* timestamp of last ch-sw request sent (GP2 time) */
1056 			u32 sent_timestamp;
1057 		} peer;
1058 	} tdls_cs;
1059 
1060 
1061 	u32 ciphers[IWL_MVM_NUM_CIPHERS];
1062 	struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
1063 
1064 	struct cfg80211_ftm_responder_stats ftm_resp_stats;
1065 	struct {
1066 		struct cfg80211_pmsr_request *req;
1067 		struct wireless_dev *req_wdev;
1068 		struct list_head loc_list;
1069 		int responses[IWL_MVM_TOF_MAX_APS];
1070 		struct {
1071 			struct list_head resp;
1072 		} smooth;
1073 		struct list_head pasn_list;
1074 	} ftm_initiator;
1075 
1076 	struct list_head resp_pasn_list;
1077 
1078 	struct {
1079 		u8 d0i3_resp;
1080 		u8 range_resp;
1081 	} cmd_ver;
1082 
1083 	struct ieee80211_vif *nan_vif;
1084 #define IWL_MAX_BAID	32
1085 	struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID];
1086 
1087 	/*
1088 	 * Drop beacons from other APs in AP mode when there are no connected
1089 	 * clients.
1090 	 */
1091 	bool drop_bcn_ap_mode;
1092 
1093 	struct delayed_work cs_tx_unblock_dwork;
1094 
1095 	/* does a monitor vif exist (only one can exist hence bool) */
1096 	bool monitor_on;
1097 
1098 	/* sniffer data to include in radiotap */
1099 	__le16 cur_aid;
1100 	u8 cur_bssid[ETH_ALEN];
1101 
1102 	unsigned long last_6ghz_passive_scan_jiffies;
1103 	unsigned long last_reset_or_resume_time_jiffies;
1104 };
1105 
1106 /* Extract MVM priv from op_mode and _hw */
1107 #define IWL_OP_MODE_GET_MVM(_iwl_op_mode)		\
1108 	((struct iwl_mvm *)(_iwl_op_mode)->op_mode_specific)
1109 
1110 #define IWL_MAC80211_GET_MVM(_hw)			\
1111 	IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv))
1112 
1113 /**
1114  * enum iwl_mvm_status - MVM status bits
1115  * @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted
1116  * @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active
1117  * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running
1118  * @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested
1119  * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
1120  * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
1121  * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
1122  * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
1123  * @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
1124  */
1125 enum iwl_mvm_status {
1126 	IWL_MVM_STATUS_HW_RFKILL,
1127 	IWL_MVM_STATUS_HW_CTKILL,
1128 	IWL_MVM_STATUS_ROC_RUNNING,
1129 	IWL_MVM_STATUS_HW_RESTART_REQUESTED,
1130 	IWL_MVM_STATUS_IN_HW_RESTART,
1131 	IWL_MVM_STATUS_ROC_AUX_RUNNING,
1132 	IWL_MVM_STATUS_FIRMWARE_RUNNING,
1133 	IWL_MVM_STATUS_NEED_FLUSH_P2P,
1134 	IWL_MVM_STATUS_IN_D3,
1135 };
1136 
1137 /* Keep track of completed init configuration */
1138 enum iwl_mvm_init_status {
1139 	IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE = BIT(0),
1140 	IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE = BIT(1),
1141 };
1142 
1143 static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
1144 {
1145 	return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status) ||
1146 	       test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
1147 }
1148 
1149 static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm)
1150 {
1151 	return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
1152 }
1153 
1154 static inline bool iwl_mvm_firmware_running(struct iwl_mvm *mvm)
1155 {
1156 	return test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
1157 }
1158 
1159 /* Must be called with rcu_read_lock() held and it can only be
1160  * released when mvmsta is not needed anymore.
1161  */
1162 static inline struct iwl_mvm_sta *
1163 iwl_mvm_sta_from_staid_rcu(struct iwl_mvm *mvm, u8 sta_id)
1164 {
1165 	struct ieee80211_sta *sta;
1166 
1167 	if (sta_id >= mvm->fw->ucode_capa.num_stations)
1168 		return NULL;
1169 
1170 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1171 
1172 	/* This can happen if the station has been removed right now */
1173 	if (IS_ERR_OR_NULL(sta))
1174 		return NULL;
1175 
1176 	return iwl_mvm_sta_from_mac80211(sta);
1177 }
1178 
1179 static inline struct iwl_mvm_sta *
1180 iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
1181 {
1182 	struct ieee80211_sta *sta;
1183 
1184 	if (sta_id >= mvm->fw->ucode_capa.num_stations)
1185 		return NULL;
1186 
1187 	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1188 					lockdep_is_held(&mvm->mutex));
1189 
1190 	/* This can happen if the station has been removed right now */
1191 	if (IS_ERR_OR_NULL(sta))
1192 		return NULL;
1193 
1194 	return iwl_mvm_sta_from_mac80211(sta);
1195 }
1196 
1197 static inline struct ieee80211_vif *
1198 iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu)
1199 {
1200 	if (WARN_ON(vif_id >= ARRAY_SIZE(mvm->vif_id_to_mac)))
1201 		return NULL;
1202 
1203 	if (rcu)
1204 		return rcu_dereference(mvm->vif_id_to_mac[vif_id]);
1205 
1206 	return rcu_dereference_protected(mvm->vif_id_to_mac[vif_id],
1207 					 lockdep_is_held(&mvm->mutex));
1208 }
1209 
1210 static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm)
1211 {
1212 	return fw_has_api(&mvm->fw->ucode_capa,
1213 			  IWL_UCODE_TLV_API_ADAPTIVE_DWELL);
1214 }
1215 
1216 static inline bool iwl_mvm_is_adaptive_dwell_v2_supported(struct iwl_mvm *mvm)
1217 {
1218 	return fw_has_api(&mvm->fw->ucode_capa,
1219 			  IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2);
1220 }
1221 
1222 static inline bool iwl_mvm_is_adwell_hb_ap_num_supported(struct iwl_mvm *mvm)
1223 {
1224 	return fw_has_api(&mvm->fw->ucode_capa,
1225 			  IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP);
1226 }
1227 
1228 static inline bool iwl_mvm_is_oce_supported(struct iwl_mvm *mvm)
1229 {
1230 	/* OCE should never be enabled for LMAC scan FWs */
1231 	return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_OCE);
1232 }
1233 
1234 static inline bool iwl_mvm_is_frag_ebs_supported(struct iwl_mvm *mvm)
1235 {
1236 	return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAG_EBS);
1237 }
1238 
1239 static inline bool iwl_mvm_is_short_beacon_notif_supported(struct iwl_mvm *mvm)
1240 {
1241 	return fw_has_api(&mvm->fw->ucode_capa,
1242 			  IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF);
1243 }
1244 
1245 static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm *mvm, u8 queue)
1246 {
1247 	return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) &&
1248 	       (queue <= IWL_MVM_DQA_MAX_DATA_QUEUE);
1249 }
1250 
1251 static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm *mvm, u8 queue)
1252 {
1253 	return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) &&
1254 	       (queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE);
1255 }
1256 
1257 static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
1258 {
1259 	bool nvm_lar = mvm->nvm_data->lar_enabled;
1260 	bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
1261 				   IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
1262 
1263 	/*
1264 	 * Enable LAR only if it is supported by the FW (TLV) &&
1265 	 * enabled in the NVM
1266 	 */
1267 	if (mvm->cfg->nvm_type == IWL_NVM_EXT)
1268 		return nvm_lar && tlv_lar;
1269 	else
1270 		return tlv_lar;
1271 }
1272 
1273 static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm)
1274 {
1275 	return fw_has_api(&mvm->fw->ucode_capa,
1276 			  IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
1277 	       fw_has_capa(&mvm->fw->ucode_capa,
1278 			   IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC);
1279 }
1280 
1281 static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
1282 {
1283 	return fw_has_capa(&mvm->fw->ucode_capa,
1284 			   IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
1285 		IWL_MVM_BT_COEX_RRC;
1286 }
1287 
1288 static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm)
1289 {
1290 	return fw_has_capa(&mvm->fw->ucode_capa,
1291 			   IWL_UCODE_TLV_CAPA_CSUM_SUPPORT) &&
1292                !IWL_MVM_HW_CSUM_DISABLE;
1293 }
1294 
1295 static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm)
1296 {
1297 	return fw_has_capa(&mvm->fw->ucode_capa,
1298 			   IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT) &&
1299 		IWL_MVM_BT_COEX_MPLUT;
1300 }
1301 
1302 static inline
1303 bool iwl_mvm_is_p2p_scm_uapsd_supported(struct iwl_mvm *mvm)
1304 {
1305 	return fw_has_capa(&mvm->fw->ucode_capa,
1306 			   IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD) &&
1307 		!(iwlwifi_mod_params.uapsd_disable &
1308 		  IWL_DISABLE_UAPSD_P2P_CLIENT);
1309 }
1310 
1311 static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
1312 {
1313 	return fw_has_capa(&mvm->fw->ucode_capa,
1314 			   IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT);
1315 }
1316 
1317 static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
1318 {
1319 	/* TODO - replace with TLV once defined */
1320 	return mvm->trans->trans_cfg->use_tfh;
1321 }
1322 
1323 static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm)
1324 {
1325 	/* TODO - better define this */
1326 	return mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000;
1327 }
1328 
1329 static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
1330 {
1331 	/*
1332 	 * TODO:
1333 	 * The issue of how to determine CDB APIs and usage is still not fully
1334 	 * defined.
1335 	 * There is a compilation for CDB and non-CDB FW, but there may
1336 	 * be also runtime check.
1337 	 * For now there is a TLV for checking compilation mode, but a
1338 	 * runtime check will also have to be here - once defined.
1339 	 */
1340 	return fw_has_capa(&mvm->fw->ucode_capa,
1341 			   IWL_UCODE_TLV_CAPA_CDB_SUPPORT);
1342 }
1343 
1344 static inline bool iwl_mvm_cdb_scan_api(struct iwl_mvm *mvm)
1345 {
1346 	/*
1347 	 * TODO: should this be the same as iwl_mvm_is_cdb_supported()?
1348 	 * but then there's a little bit of code in scan that won't make
1349 	 * any sense...
1350 	 */
1351 	return mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000;
1352 }
1353 
1354 static inline bool iwl_mvm_is_scan_ext_chan_supported(struct iwl_mvm *mvm)
1355 {
1356 	return fw_has_api(&mvm->fw->ucode_capa,
1357 			  IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER);
1358 }
1359 
1360 
1361 static inline bool iwl_mvm_is_reduced_config_scan_supported(struct iwl_mvm *mvm)
1362 {
1363 	return fw_has_api(&mvm->fw->ucode_capa,
1364 			  IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG);
1365 }
1366 
1367 static inline bool iwl_mvm_is_band_in_rx_supported(struct iwl_mvm *mvm)
1368 {
1369 	return fw_has_api(&mvm->fw->ucode_capa,
1370 			   IWL_UCODE_TLV_API_BAND_IN_RX_DATA);
1371 }
1372 
1373 static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm)
1374 {
1375 	return fw_has_api(&mvm->fw->ucode_capa,
1376 			  IWL_UCODE_TLV_API_NEW_RX_STATS);
1377 }
1378 
1379 static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm)
1380 {
1381 	return fw_has_api(&mvm->fw->ucode_capa,
1382 			  IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY);
1383 }
1384 
1385 static inline bool iwl_mvm_has_tlc_offload(const struct iwl_mvm *mvm)
1386 {
1387 	return fw_has_capa(&mvm->fw->ucode_capa,
1388 			   IWL_UCODE_TLV_CAPA_TLC_OFFLOAD);
1389 }
1390 
1391 static inline struct agg_tx_status *
1392 iwl_mvm_get_agg_status(struct iwl_mvm *mvm, void *tx_resp)
1393 {
1394 	if (iwl_mvm_has_new_tx_api(mvm))
1395 		return &((struct iwl_mvm_tx_resp *)tx_resp)->status;
1396 	else
1397 		return ((struct iwl_mvm_tx_resp_v3 *)tx_resp)->status;
1398 }
1399 
1400 static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
1401 {
1402 	/* these two TLV are redundant since the responsibility to CT-kill by
1403 	 * FW happens only after we send at least one command of
1404 	 * temperature THs report.
1405 	 */
1406 	return fw_has_capa(&mvm->fw->ucode_capa,
1407 			   IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW) &&
1408 	       fw_has_capa(&mvm->fw->ucode_capa,
1409 			   IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT);
1410 }
1411 
1412 static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm)
1413 {
1414 	return fw_has_capa(&mvm->fw->ucode_capa,
1415 			   IWL_UCODE_TLV_CAPA_CTDP_SUPPORT);
1416 }
1417 
1418 extern const u8 iwl_mvm_ac_to_tx_fifo[];
1419 extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[];
1420 
1421 static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm,
1422 					   enum ieee80211_ac_numbers ac)
1423 {
1424 	return iwl_mvm_has_new_tx_api(mvm) ?
1425 		iwl_mvm_ac_to_gen2_tx_fifo[ac] : iwl_mvm_ac_to_tx_fifo[ac];
1426 }
1427 
1428 struct iwl_rate_info {
1429 	u8 plcp;	/* uCode API:  IWL_RATE_6M_PLCP, etc. */
1430 	u8 plcp_siso;	/* uCode API:  IWL_RATE_SISO_6M_PLCP, etc. */
1431 	u8 plcp_mimo2;	/* uCode API:  IWL_RATE_MIMO2_6M_PLCP, etc. */
1432 	u8 plcp_mimo3;  /* uCode API:  IWL_RATE_MIMO3_6M_PLCP, etc. */
1433 	u8 ieee;	/* MAC header:  IWL_RATE_6M_IEEE, etc. */
1434 };
1435 
1436 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm);
1437 int __iwl_mvm_mac_start(struct iwl_mvm *mvm);
1438 
1439 /******************
1440  * MVM Methods
1441  ******************/
1442 /* uCode */
1443 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm);
1444 
1445 /* Utils */
1446 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
1447 					enum nl80211_band band);
1448 void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
1449 			       enum nl80211_band band,
1450 			       struct ieee80211_tx_rate *r);
1451 u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
1452 u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac);
1453 
1454 static inline void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
1455 {
1456 	iwl_fwrt_dump_error_logs(&mvm->fwrt);
1457 }
1458 
1459 u8 first_antenna(u8 mask);
1460 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
1461 void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2,
1462 			   u64 *boottime, ktime_t *realtime);
1463 u32 iwl_mvm_get_systime(struct iwl_mvm *mvm);
1464 
1465 /* Tx / Host Commands */
1466 int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
1467 				  struct iwl_host_cmd *cmd);
1468 int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
1469 				      u32 flags, u16 len, const void *data);
1470 int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
1471 					 struct iwl_host_cmd *cmd,
1472 					 u32 *status);
1473 int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
1474 					     u16 len, const void *data,
1475 					     u32 *status);
1476 int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
1477 		       struct ieee80211_sta *sta);
1478 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
1479 void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
1480 			struct iwl_tx_cmd *tx_cmd,
1481 			struct ieee80211_tx_info *info, u8 sta_id);
1482 void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
1483 			    struct ieee80211_tx_info *info,
1484 			    struct ieee80211_sta *sta, __le16 fc);
1485 void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
1486 unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
1487 				    struct ieee80211_sta *sta,
1488 				    unsigned int tid);
1489 
1490 #ifdef CONFIG_IWLWIFI_DEBUG
1491 const char *iwl_mvm_get_tx_fail_reason(u32 status);
1492 #else
1493 static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
1494 #endif
1495 int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk);
1496 int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal);
1497 int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids);
1498 
1499 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
1500 
1501 static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
1502 					   struct iwl_tx_cmd *tx_cmd)
1503 {
1504 	struct ieee80211_key_conf *keyconf = info->control.hw_key;
1505 
1506 	tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
1507 	memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
1508 }
1509 
1510 static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
1511 {
1512 	flush_work(&mvm->async_handlers_wk);
1513 }
1514 
1515 /* Statistics */
1516 void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
1517 				  struct iwl_rx_packet *pkt);
1518 void iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
1519 			   struct iwl_rx_cmd_buffer *rxb);
1520 int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
1521 void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
1522 
1523 /* NVM */
1524 int iwl_nvm_init(struct iwl_mvm *mvm);
1525 int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
1526 
1527 static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm)
1528 {
1529 	return mvm->nvm_data && mvm->nvm_data->valid_tx_ant ?
1530 	       mvm->fw->valid_tx_ant & mvm->nvm_data->valid_tx_ant :
1531 	       mvm->fw->valid_tx_ant;
1532 }
1533 
1534 static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm)
1535 {
1536 	return mvm->nvm_data && mvm->nvm_data->valid_rx_ant ?
1537 	       mvm->fw->valid_rx_ant & mvm->nvm_data->valid_rx_ant :
1538 	       mvm->fw->valid_rx_ant;
1539 }
1540 
1541 static inline void iwl_mvm_toggle_tx_ant(struct iwl_mvm *mvm, u8 *ant)
1542 {
1543 	*ant = iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), *ant);
1544 }
1545 
1546 static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm)
1547 {
1548 	u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN |
1549 			   FW_PHY_CFG_RX_CHAIN);
1550 	u32 valid_rx_ant = iwl_mvm_get_valid_rx_ant(mvm);
1551 	u32 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
1552 
1553 	phy_config |= valid_tx_ant << FW_PHY_CFG_TX_CHAIN_POS |
1554 		      valid_rx_ant << FW_PHY_CFG_RX_CHAIN_POS;
1555 
1556 	return mvm->fw->phy_config & phy_config;
1557 }
1558 
1559 int iwl_mvm_up(struct iwl_mvm *mvm);
1560 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
1561 
1562 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
1563 bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
1564 				    struct iwl_bcast_filter_cmd *cmd);
1565 
1566 /*
1567  * FW notifications / CMD responses handlers
1568  * Convention: iwl_mvm_rx_<NAME OF THE CMD>
1569  */
1570 void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
1571 		   struct napi_struct *napi,
1572 		   struct iwl_rx_cmd_buffer *rxb);
1573 void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1574 void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
1575 			struct iwl_rx_cmd_buffer *rxb);
1576 void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
1577 			struct iwl_rx_cmd_buffer *rxb, int queue);
1578 void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
1579 				struct iwl_rx_cmd_buffer *rxb, int queue);
1580 void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
1581 			      struct iwl_rx_cmd_buffer *rxb, int queue);
1582 void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
1583 				  struct iwl_rx_cmd_buffer *rxb, int queue);
1584 void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
1585 			    struct iwl_rx_cmd_buffer *rxb, int queue);
1586 void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1587 void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
1588 				   struct iwl_rx_cmd_buffer *rxb);
1589 void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags);
1590 void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1591 void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
1592 				   struct iwl_rx_cmd_buffer *rxb);
1593 void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1594 void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
1595 				 struct iwl_rx_cmd_buffer *rxb);
1596 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
1597 			     struct iwl_rx_cmd_buffer *rxb);
1598 void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
1599 				     struct iwl_rx_cmd_buffer *rxb);
1600 
1601 /* MVM PHY */
1602 int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
1603 			 struct cfg80211_chan_def *chandef,
1604 			 u8 chains_static, u8 chains_dynamic);
1605 int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
1606 			     struct cfg80211_chan_def *chandef,
1607 			     u8 chains_static, u8 chains_dynamic);
1608 void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm,
1609 			  struct iwl_mvm_phy_ctxt *ctxt);
1610 void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
1611 			    struct iwl_mvm_phy_ctxt *ctxt);
1612 int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm);
1613 u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef);
1614 u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef);
1615 
1616 /* MAC (virtual interface) programming */
1617 int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1618 int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1619 int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1620 			     bool force_assoc_off, const u8 *bssid_override);
1621 int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1622 int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
1623 				    struct ieee80211_vif *vif);
1624 int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
1625 				 struct ieee80211_vif *vif,
1626 				 struct sk_buff *beacon);
1627 int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm,
1628 				     struct sk_buff *beacon,
1629 				     void *data, int len);
1630 u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info,
1631 				    struct ieee80211_vif *vif);
1632 void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
1633 			      __le32 *tim_index, __le32 *tim_size,
1634 			      u8 *beacon, u32 frame_size);
1635 void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
1636 			     struct iwl_rx_cmd_buffer *rxb);
1637 void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
1638 				     struct iwl_rx_cmd_buffer *rxb);
1639 void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
1640 				    struct iwl_rx_cmd_buffer *rxb);
1641 void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
1642 			       struct iwl_rx_cmd_buffer *rxb);
1643 void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1644 void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
1645 				 struct iwl_rx_cmd_buffer *rxb);
1646 void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
1647 				    struct ieee80211_vif *vif);
1648 void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
1649 				   struct iwl_rx_cmd_buffer *rxb);
1650 void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
1651 				 struct iwl_rx_cmd_buffer *rxb);
1652 void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
1653 				      struct iwl_rx_cmd_buffer *rxb);
1654 /* Bindings */
1655 int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1656 int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1657 
1658 /* Quota management */
1659 static inline size_t iwl_mvm_quota_cmd_size(struct iwl_mvm *mvm)
1660 {
1661 	return iwl_mvm_has_quota_low_latency(mvm) ?
1662 		sizeof(struct iwl_time_quota_cmd) :
1663 		sizeof(struct iwl_time_quota_cmd_v1);
1664 }
1665 
1666 static inline struct iwl_time_quota_data
1667 *iwl_mvm_quota_cmd_get_quota(struct iwl_mvm *mvm,
1668 			     struct iwl_time_quota_cmd *cmd,
1669 			     int i)
1670 {
1671 	struct iwl_time_quota_data_v1 *quotas;
1672 
1673 	if (iwl_mvm_has_quota_low_latency(mvm))
1674 		return &cmd->quotas[i];
1675 
1676 	quotas = (struct iwl_time_quota_data_v1 *)cmd->quotas;
1677 	return (struct iwl_time_quota_data *)&quotas[i];
1678 }
1679 
1680 int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload,
1681 			  struct ieee80211_vif *disabled_vif);
1682 
1683 /* Scanning */
1684 int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1685 			   struct cfg80211_scan_request *req,
1686 			   struct ieee80211_scan_ies *ies);
1687 int iwl_mvm_scan_size(struct iwl_mvm *mvm);
1688 int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
1689 int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
1690 void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
1691 void iwl_mvm_scan_timeout_wk(struct work_struct *work);
1692 
1693 /* Scheduled scan */
1694 void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
1695 					 struct iwl_rx_cmd_buffer *rxb);
1696 void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
1697 					      struct iwl_rx_cmd_buffer *rxb);
1698 int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
1699 			     struct ieee80211_vif *vif,
1700 			     struct cfg80211_sched_scan_request *req,
1701 			     struct ieee80211_scan_ies *ies,
1702 			     int type);
1703 void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
1704 				 struct iwl_rx_cmd_buffer *rxb);
1705 
1706 /* UMAC scan */
1707 int iwl_mvm_config_scan(struct iwl_mvm *mvm);
1708 void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
1709 					 struct iwl_rx_cmd_buffer *rxb);
1710 void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
1711 					      struct iwl_rx_cmd_buffer *rxb);
1712 
1713 /* MVM debugfs */
1714 #ifdef CONFIG_IWLWIFI_DEBUGFS
1715 void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm);
1716 void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1717 void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1718 #else
1719 static inline void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
1720 {
1721 }
1722 static inline void
1723 iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1724 {
1725 }
1726 static inline void
1727 iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1728 {
1729 }
1730 #endif /* CONFIG_IWLWIFI_DEBUGFS */
1731 
1732 /* rate scaling */
1733 int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq);
1734 void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
1735 int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
1736 void rs_update_last_rssi(struct iwl_mvm *mvm,
1737 			 struct iwl_mvm_sta *mvmsta,
1738 			 struct ieee80211_rx_status *rx_status);
1739 
1740 /* power management */
1741 int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
1742 int iwl_mvm_power_update_mac(struct iwl_mvm *mvm);
1743 int iwl_mvm_power_update_ps(struct iwl_mvm *mvm);
1744 int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1745 				 char *buf, int bufsz);
1746 
1747 void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1748 void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
1749 					      struct iwl_rx_cmd_buffer *rxb);
1750 
1751 #ifdef CONFIG_IWLWIFI_LEDS
1752 int iwl_mvm_leds_init(struct iwl_mvm *mvm);
1753 void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
1754 void iwl_mvm_leds_sync(struct iwl_mvm *mvm);
1755 #else
1756 static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm)
1757 {
1758 	return 0;
1759 }
1760 static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
1761 {
1762 }
1763 static inline void iwl_mvm_leds_sync(struct iwl_mvm *mvm)
1764 {
1765 }
1766 #endif
1767 
1768 /* D3 (WoWLAN, NetDetect) */
1769 int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
1770 int iwl_mvm_resume(struct ieee80211_hw *hw);
1771 void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled);
1772 void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
1773 			    struct ieee80211_vif *vif,
1774 			    struct cfg80211_gtk_rekey_data *data);
1775 void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
1776 			      struct ieee80211_vif *vif,
1777 			      struct inet6_dev *idev);
1778 void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
1779 				     struct ieee80211_vif *vif, int idx);
1780 extern const struct file_operations iwl_dbgfs_d3_test_ops;
1781 #ifdef CONFIG_PM
1782 void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
1783 				 struct ieee80211_vif *vif);
1784 #else
1785 static inline void
1786 iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1787 {
1788 }
1789 #endif
1790 void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
1791 				struct iwl_wowlan_config_cmd *cmd);
1792 int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
1793 			       struct ieee80211_vif *vif,
1794 			       bool disable_offloading,
1795 			       bool offload_ns,
1796 			       u32 cmd_flags);
1797 
1798 /* BT Coex */
1799 int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm);
1800 void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
1801 			      struct iwl_rx_cmd_buffer *rxb);
1802 void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1803 			   enum ieee80211_rssi_event_data);
1804 void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
1805 u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
1806 				struct ieee80211_sta *sta);
1807 bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
1808 				     struct ieee80211_sta *sta);
1809 bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant);
1810 bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm);
1811 bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
1812 				    enum nl80211_band band);
1813 u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants);
1814 u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
1815 			   struct ieee80211_tx_info *info, u8 ac);
1816 
1817 /* beacon filtering */
1818 #ifdef CONFIG_IWLWIFI_DEBUGFS
1819 void
1820 iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
1821 					 struct iwl_beacon_filter_cmd *cmd);
1822 #else
1823 static inline void
1824 iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
1825 					 struct iwl_beacon_filter_cmd *cmd)
1826 {}
1827 #endif
1828 int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
1829 				 struct ieee80211_vif *vif,
1830 				 u32 flags);
1831 int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
1832 				  struct ieee80211_vif *vif,
1833 				  u32 flags);
1834 /* SMPS */
1835 void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1836 				enum iwl_mvm_smps_type_request req_type,
1837 				enum ieee80211_smps_mode smps_request);
1838 bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
1839 				  struct iwl_mvm_phy_ctxt *ctxt);
1840 void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif);
1841 
1842 /* Low latency */
1843 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1844 			      bool low_latency,
1845 			      enum iwl_mvm_low_latency_cause cause);
1846 /* get SystemLowLatencyMode - only needed for beacon threshold? */
1847 bool iwl_mvm_low_latency(struct iwl_mvm *mvm);
1848 bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band);
1849 void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm, bool low_latency,
1850 				  u16 mac_id);
1851 
1852 /* get VMACLowLatencyMode */
1853 static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
1854 {
1855 	/*
1856 	 * should this consider associated/active/... state?
1857 	 *
1858 	 * Normally low-latency should only be active on interfaces
1859 	 * that are active, but at least with debugfs it can also be
1860 	 * enabled on interfaces that aren't active. However, when
1861 	 * interface aren't active then they aren't added into the
1862 	 * binding, so this has no real impact. For now, just return
1863 	 * the current desired low-latency state.
1864 	 */
1865 	return mvmvif->low_latency_actual;
1866 }
1867 
1868 static inline
1869 void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set,
1870 				 enum iwl_mvm_low_latency_cause cause)
1871 {
1872 	u8 new_state;
1873 
1874 	if (set)
1875 		mvmvif->low_latency |= cause;
1876 	else
1877 		mvmvif->low_latency &= ~cause;
1878 
1879 	/*
1880 	 * if LOW_LATENCY_DEBUGFS_FORCE_ENABLE is enabled no changes are
1881 	 * allowed to actual mode.
1882 	 */
1883 	if (mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE_ENABLE &&
1884 	    cause != LOW_LATENCY_DEBUGFS_FORCE_ENABLE)
1885 		return;
1886 
1887 	if (cause == LOW_LATENCY_DEBUGFS_FORCE_ENABLE && set)
1888 		/*
1889 		 * We enter force state
1890 		 */
1891 		new_state = !!(mvmvif->low_latency &
1892 			       LOW_LATENCY_DEBUGFS_FORCE);
1893 	else
1894 		/*
1895 		 * Check if any other one set low latency
1896 		 */
1897 		new_state = !!(mvmvif->low_latency &
1898 				  ~(LOW_LATENCY_DEBUGFS_FORCE_ENABLE |
1899 				    LOW_LATENCY_DEBUGFS_FORCE));
1900 
1901 	mvmvif->low_latency_actual = new_state;
1902 }
1903 
1904 /* Return a bitmask with all the hw supported queues, except for the
1905  * command queue, which can't be flushed.
1906  */
1907 static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
1908 {
1909 	return ((BIT(mvm->trans->trans_cfg->base_params->num_of_queues) - 1) &
1910 		~BIT(IWL_MVM_DQA_CMD_QUEUE));
1911 }
1912 
1913 void iwl_mvm_stop_device(struct iwl_mvm *mvm);
1914 
1915 /* Re-configure the SCD for a queue that has already been configured */
1916 int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
1917 			 int tid, int frame_limit, u16 ssn);
1918 
1919 /* Thermal management and CT-kill */
1920 void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
1921 void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
1922 			struct iwl_rx_cmd_buffer *rxb);
1923 void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
1924 void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff);
1925 void iwl_mvm_thermal_exit(struct iwl_mvm *mvm);
1926 void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
1927 int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp);
1928 void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1929 void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm);
1930 int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm);
1931 int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget);
1932 
1933 /* Location Aware Regulatory */
1934 struct iwl_mcc_update_resp *
1935 iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
1936 		   enum iwl_mcc_source src_id);
1937 int iwl_mvm_init_mcc(struct iwl_mvm *mvm);
1938 void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
1939 				struct iwl_rx_cmd_buffer *rxb);
1940 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
1941 						  const char *alpha2,
1942 						  enum iwl_mcc_source src_id,
1943 						  bool *changed);
1944 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
1945 							  bool *changed);
1946 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm);
1947 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm);
1948 
1949 /* smart fifo */
1950 int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1951 		      bool added_vif);
1952 
1953 /* FTM responder */
1954 int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1955 void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm,
1956 				   struct ieee80211_vif *vif);
1957 void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm,
1958 				 struct iwl_rx_cmd_buffer *rxb);
1959 int iwl_mvm_ftm_resp_remove_pasn_sta(struct iwl_mvm *mvm,
1960 				     struct ieee80211_vif *vif, u8 *addr);
1961 int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
1962 				      struct ieee80211_vif *vif,
1963 				      u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
1964 				      u8 *hltk, u32 hltk_len);
1965 void iwl_mvm_ftm_responder_clear(struct iwl_mvm *mvm,
1966 				 struct ieee80211_vif *vif);
1967 
1968 /* FTM initiator */
1969 void iwl_mvm_ftm_restart(struct iwl_mvm *mvm);
1970 void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm,
1971 			    struct iwl_rx_cmd_buffer *rxb);
1972 void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm,
1973 			  struct iwl_rx_cmd_buffer *rxb);
1974 int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1975 		      struct cfg80211_pmsr_request *request);
1976 void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req);
1977 void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm);
1978 void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm);
1979 int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1980 			     u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
1981 			     u8 *hltk, u32 hltk_len);
1982 void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr);
1983 
1984 /* TDLS */
1985 
1986 /*
1987  * We use TID 4 (VI) as a FW-used-only TID when TDLS connections are present.
1988  * This TID is marked as used vs the AP and all connected TDLS peers.
1989  */
1990 #define IWL_MVM_TDLS_FW_TID 4
1991 
1992 int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1993 void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm);
1994 void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1995 			       bool sta_added);
1996 void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
1997 					   struct ieee80211_vif *vif);
1998 int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
1999 				struct ieee80211_vif *vif,
2000 				struct ieee80211_sta *sta, u8 oper_class,
2001 				struct cfg80211_chan_def *chandef,
2002 				struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie);
2003 void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
2004 				      struct ieee80211_vif *vif,
2005 				      struct ieee80211_tdls_ch_sw_params *params);
2006 void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
2007 					struct ieee80211_vif *vif,
2008 					struct ieee80211_sta *sta);
2009 void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
2010 void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
2011 
2012 void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
2013 				     enum iwl_mvm_rxq_notif_type type,
2014 				     bool sync,
2015 				     const void *data, u32 size);
2016 void iwl_mvm_reorder_timer_expired(struct timer_list *t);
2017 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
2018 struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid);
2019 bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
2020 
2021 #define MVM_TCM_PERIOD_MSEC 500
2022 #define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000)
2023 #define MVM_LL_PERIOD (10 * HZ)
2024 void iwl_mvm_tcm_work(struct work_struct *work);
2025 void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm);
2026 void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel);
2027 void iwl_mvm_resume_tcm(struct iwl_mvm *mvm);
2028 void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
2029 void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
2030 u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed);
2031 
2032 void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
2033 unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
2034 				    struct ieee80211_vif *vif,
2035 				    bool tdls, bool cmd_q);
2036 void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2037 			     const char *errmsg);
2038 void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
2039 					  struct ieee80211_vif *vif,
2040 					  const struct ieee80211_sta *sta,
2041 					  u16 tid);
2042 
2043 int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
2044 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm);
2045 int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm);
2046 void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm);
2047 #ifdef CONFIG_IWLWIFI_DEBUGFS
2048 void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
2049 			     struct ieee80211_vif *vif,
2050 			     struct ieee80211_sta *sta,
2051 			     struct dentry *dir);
2052 #endif
2053 
2054 int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm,
2055 			    struct iwl_rfi_lut_entry *rfi_table);
2056 struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm);
2057 
2058 static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band)
2059 {
2060 	switch (band) {
2061 	case NL80211_BAND_2GHZ:
2062 		return PHY_BAND_24;
2063 	case NL80211_BAND_5GHZ:
2064 		return PHY_BAND_5;
2065 	case NL80211_BAND_6GHZ:
2066 		return PHY_BAND_6;
2067 	default:
2068 		WARN_ONCE(1, "Unsupported band (%u)\n", band);
2069 		return PHY_BAND_5;
2070 	}
2071 }
2072 
2073 /* Channel info utils */
2074 static inline bool iwl_mvm_has_ultra_hb_channel(struct iwl_mvm *mvm)
2075 {
2076 	return fw_has_capa(&mvm->fw->ucode_capa,
2077 			   IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS);
2078 }
2079 
2080 static inline void *iwl_mvm_chan_info_cmd_tail(struct iwl_mvm *mvm,
2081 					       struct iwl_fw_channel_info *ci)
2082 {
2083 	return (u8 *)ci + (iwl_mvm_has_ultra_hb_channel(mvm) ?
2084 			   sizeof(struct iwl_fw_channel_info) :
2085 			   sizeof(struct iwl_fw_channel_info_v1));
2086 }
2087 
2088 static inline size_t iwl_mvm_chan_info_padding(struct iwl_mvm *mvm)
2089 {
2090 	return iwl_mvm_has_ultra_hb_channel(mvm) ? 0 :
2091 		sizeof(struct iwl_fw_channel_info) -
2092 		sizeof(struct iwl_fw_channel_info_v1);
2093 }
2094 
2095 static inline void iwl_mvm_set_chan_info(struct iwl_mvm *mvm,
2096 					 struct iwl_fw_channel_info *ci,
2097 					 u32 chan, u8 band, u8 width,
2098 					 u8 ctrl_pos)
2099 {
2100 	if (iwl_mvm_has_ultra_hb_channel(mvm)) {
2101 		ci->channel = cpu_to_le32(chan);
2102 		ci->band = band;
2103 		ci->width = width;
2104 		ci->ctrl_pos = ctrl_pos;
2105 	} else {
2106 		struct iwl_fw_channel_info_v1 *ci_v1 =
2107 					(struct iwl_fw_channel_info_v1 *)ci;
2108 
2109 		ci_v1->channel = chan;
2110 		ci_v1->band = band;
2111 		ci_v1->width = width;
2112 		ci_v1->ctrl_pos = ctrl_pos;
2113 	}
2114 }
2115 
2116 static inline void
2117 iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
2118 			      struct iwl_fw_channel_info *ci,
2119 			      struct cfg80211_chan_def *chandef)
2120 {
2121 	enum nl80211_band band = chandef->chan->band;
2122 
2123 	iwl_mvm_set_chan_info(mvm, ci, chandef->chan->hw_value,
2124 			      iwl_mvm_phy_band_from_nl80211(band),
2125 			      iwl_mvm_get_channel_width(chandef),
2126 			      iwl_mvm_get_ctrl_pos(chandef));
2127 }
2128 
2129 static inline int iwl_umac_scan_get_max_profiles(const struct iwl_fw *fw)
2130 {
2131 	u8 ver = iwl_fw_lookup_cmd_ver(fw, IWL_ALWAYS_LONG_GROUP,
2132 				       SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
2133 				       IWL_FW_CMD_VER_UNKNOWN);
2134 	return (ver == IWL_FW_CMD_VER_UNKNOWN || ver < 3) ?
2135 		IWL_SCAN_MAX_PROFILES : IWL_SCAN_MAX_PROFILES_V2;
2136 }
2137 
2138 static inline
2139 enum iwl_location_cipher iwl_mvm_cipher_to_location_cipher(u32 cipher)
2140 {
2141 	switch (cipher) {
2142 	case WLAN_CIPHER_SUITE_CCMP:
2143 		return IWL_LOCATION_CIPHER_CCMP_128;
2144 	case WLAN_CIPHER_SUITE_GCMP:
2145 		return IWL_LOCATION_CIPHER_GCMP_128;
2146 	case WLAN_CIPHER_SUITE_GCMP_256:
2147 		return IWL_LOCATION_CIPHER_GCMP_256;
2148 	default:
2149 		return IWL_LOCATION_CIPHER_INVALID;
2150 	}
2151 }
2152 #endif /* __IWL_MVM_H__ */
2153