1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
4 * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
5 * Copyright (C) 2015-2017 Intel Deutschland GmbH
6 */
7 #include <net/mac80211.h>
8
9 #include "iwl-debug.h"
10 #include "iwl-io.h"
11 #include "iwl-prph.h"
12 #include "iwl-csr.h"
13 #include "mvm.h"
14 #include "fw/api/rs.h"
15 #include "fw/img.h"
16
17 /*
18 * Will return 0 even if the cmd failed when RFKILL is asserted unless
19 * CMD_WANT_SKB is set in cmd->flags.
20 */
iwl_mvm_send_cmd(struct iwl_mvm * mvm,struct iwl_host_cmd * cmd)21 int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
22 {
23 int ret;
24
25 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
26 if (WARN_ON(mvm->d3_test_active))
27 return -EIO;
28 #endif
29
30 /*
31 * Synchronous commands from this op-mode must hold
32 * the mutex, this ensures we don't try to send two
33 * (or more) synchronous commands at a time.
34 */
35 if (!(cmd->flags & CMD_ASYNC))
36 lockdep_assert_held(&mvm->mutex);
37
38 ret = iwl_trans_send_cmd(mvm->trans, cmd);
39
40 /*
41 * If the caller wants the SKB, then don't hide any problems, the
42 * caller might access the response buffer which will be NULL if
43 * the command failed.
44 */
45 if (cmd->flags & CMD_WANT_SKB)
46 return ret;
47
48 /*
49 * Silently ignore failures if RFKILL is asserted or
50 * we are in suspend\resume process
51 */
52 if (!ret || ret == -ERFKILL || ret == -EHOSTDOWN)
53 return 0;
54 return ret;
55 }
56
iwl_mvm_send_cmd_pdu(struct iwl_mvm * mvm,u32 id,u32 flags,u16 len,const void * data)57 int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
58 u32 flags, u16 len, const void *data)
59 {
60 struct iwl_host_cmd cmd = {
61 .id = id,
62 .len = { len, },
63 .data = { data, },
64 .flags = flags,
65 };
66
67 return iwl_mvm_send_cmd(mvm, &cmd);
68 }
69
70 /*
71 * We assume that the caller set the status to the success value
72 */
iwl_mvm_send_cmd_status(struct iwl_mvm * mvm,struct iwl_host_cmd * cmd,u32 * status)73 int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
74 u32 *status)
75 {
76 struct iwl_rx_packet *pkt;
77 struct iwl_cmd_response *resp;
78 int ret, resp_len;
79
80 lockdep_assert_held(&mvm->mutex);
81
82 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
83 if (WARN_ON(mvm->d3_test_active))
84 return -EIO;
85 #endif
86
87 /*
88 * Only synchronous commands can wait for status,
89 * we use WANT_SKB so the caller can't.
90 */
91 if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
92 "cmd flags %x", cmd->flags))
93 return -EINVAL;
94
95 cmd->flags |= CMD_WANT_SKB;
96
97 ret = iwl_trans_send_cmd(mvm->trans, cmd);
98 if (ret == -ERFKILL) {
99 /*
100 * The command failed because of RFKILL, don't update
101 * the status, leave it as success and return 0.
102 */
103 return 0;
104 } else if (ret) {
105 return ret;
106 }
107
108 pkt = cmd->resp_pkt;
109
110 resp_len = iwl_rx_packet_payload_len(pkt);
111 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
112 ret = -EIO;
113 goto out_free_resp;
114 }
115
116 resp = (void *)pkt->data;
117 *status = le32_to_cpu(resp->status);
118 out_free_resp:
119 iwl_free_resp(cmd);
120 return ret;
121 }
122
123 /*
124 * We assume that the caller set the status to the sucess value
125 */
iwl_mvm_send_cmd_pdu_status(struct iwl_mvm * mvm,u32 id,u16 len,const void * data,u32 * status)126 int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
127 const void *data, u32 *status)
128 {
129 struct iwl_host_cmd cmd = {
130 .id = id,
131 .len = { len, },
132 .data = { data, },
133 };
134
135 return iwl_mvm_send_cmd_status(mvm, &cmd, status);
136 }
137
iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,enum nl80211_band band)138 int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
139 enum nl80211_band band)
140 {
141 int format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
142 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
143 bool is_LB = band == NL80211_BAND_2GHZ;
144
145 if (format == RATE_MCS_LEGACY_OFDM_MSK)
146 return is_LB ? rate + IWL_FIRST_OFDM_RATE :
147 rate;
148
149 /* CCK is not allowed in HB */
150 return is_LB ? rate : -1;
151 }
152
iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,enum nl80211_band band)153 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
154 enum nl80211_band band)
155 {
156 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
157 int idx;
158 int band_offset = 0;
159
160 /* Legacy rate format, search for match in table */
161 if (band != NL80211_BAND_2GHZ)
162 band_offset = IWL_FIRST_OFDM_RATE;
163 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
164 if (iwl_fw_rate_idx_to_plcp(idx) == rate)
165 return idx - band_offset;
166
167 return -1;
168 }
169
iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw * fw,int rate_idx)170 u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
171 {
172 if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8)
173 /* In the new rate legacy rates are indexed:
174 * 0 - 3 for CCK and 0 - 7 for OFDM.
175 */
176 return (rate_idx >= IWL_FIRST_OFDM_RATE ?
177 rate_idx - IWL_FIRST_OFDM_RATE :
178 rate_idx);
179
180 return iwl_fw_rate_idx_to_plcp(rate_idx);
181 }
182
iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)183 u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)
184 {
185 static const u8 mac80211_ac_to_ucode_ac[] = {
186 AC_VO,
187 AC_VI,
188 AC_BE,
189 AC_BK
190 };
191
192 return mac80211_ac_to_ucode_ac[ac];
193 }
194
iwl_mvm_rx_fw_error(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)195 void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
196 {
197 struct iwl_rx_packet *pkt = rxb_addr(rxb);
198 struct iwl_error_resp *err_resp = (void *)pkt->data;
199
200 IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
201 le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
202 IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
203 le16_to_cpu(err_resp->bad_cmd_seq_num),
204 le32_to_cpu(err_resp->error_service));
205 IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n",
206 le64_to_cpu(err_resp->timestamp));
207 }
208
209 /*
210 * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
211 * The parameter should also be a combination of ANT_[ABC].
212 */
first_antenna(u8 mask)213 u8 first_antenna(u8 mask)
214 {
215 BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
216 if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */
217 return BIT(0);
218 return BIT(ffs(mask) - 1);
219 }
220
221 #define MAX_ANT_NUM 2
222 /*
223 * Toggles between TX antennas to send the probe request on.
224 * Receives the bitmask of valid TX antennas and the *index* used
225 * for the last TX, and returns the next valid *index* to use.
226 * In order to set it in the tx_cmd, must do BIT(idx).
227 */
iwl_mvm_next_antenna(struct iwl_mvm * mvm,u8 valid,u8 last_idx)228 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
229 {
230 u8 ind = last_idx;
231 int i;
232
233 for (i = 0; i < MAX_ANT_NUM; i++) {
234 ind = (ind + 1) % MAX_ANT_NUM;
235 if (valid & BIT(ind))
236 return ind;
237 }
238
239 WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
240 return last_idx;
241 }
242
243 /**
244 * iwl_mvm_send_lq_cmd() - Send link quality command
245 * @mvm: Driver data.
246 * @lq: Link quality command to send.
247 *
248 * The link quality command is sent as the last step of station creation.
249 * This is the special case in which init is set and we call a callback in
250 * this case to clear the state indicating that station creation is in
251 * progress.
252 *
253 * Returns: an error code indicating success or failure
254 */
iwl_mvm_send_lq_cmd(struct iwl_mvm * mvm,struct iwl_lq_cmd * lq)255 int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
256 {
257 struct iwl_host_cmd cmd = {
258 .id = LQ_CMD,
259 .len = { sizeof(struct iwl_lq_cmd), },
260 .flags = CMD_ASYNC,
261 .data = { lq, },
262 };
263
264 if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA ||
265 iwl_mvm_has_tlc_offload(mvm)))
266 return -EINVAL;
267
268 return iwl_mvm_send_cmd(mvm, &cmd);
269 }
270
271 /**
272 * iwl_mvm_update_smps - Get a request to change the SMPS mode
273 * @mvm: Driver data.
274 * @vif: Pointer to the ieee80211_vif structure
275 * @req_type: The part of the driver who call for a change.
276 * @smps_request: The request to change the SMPS mode.
277 * @link_id: for MLO link_id, otherwise 0 (deflink)
278 *
279 * Get a requst to change the SMPS mode,
280 * and change it according to all other requests in the driver.
281 */
iwl_mvm_update_smps(struct iwl_mvm * mvm,struct ieee80211_vif * vif,enum iwl_mvm_smps_type_request req_type,enum ieee80211_smps_mode smps_request,unsigned int link_id)282 void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
283 enum iwl_mvm_smps_type_request req_type,
284 enum ieee80211_smps_mode smps_request,
285 unsigned int link_id)
286 {
287 struct iwl_mvm_vif *mvmvif;
288 enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
289 int i;
290
291 lockdep_assert_held(&mvm->mutex);
292
293 /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
294 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
295 return;
296
297 if (vif->type != NL80211_IFTYPE_STATION)
298 return;
299
300 /* SMPS is handled by firmware */
301 if (iwl_mvm_has_rlc_offload(mvm))
302 return;
303
304 mvmvif = iwl_mvm_vif_from_mac80211(vif);
305
306 if (WARN_ON_ONCE(!mvmvif->link[link_id]))
307 return;
308
309 mvmvif->link[link_id]->smps_requests[req_type] = smps_request;
310 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
311 if (mvmvif->link[link_id]->smps_requests[i] ==
312 IEEE80211_SMPS_STATIC) {
313 smps_mode = IEEE80211_SMPS_STATIC;
314 break;
315 }
316 if (mvmvif->link[link_id]->smps_requests[i] ==
317 IEEE80211_SMPS_DYNAMIC)
318 smps_mode = IEEE80211_SMPS_DYNAMIC;
319 }
320
321 /* SMPS is disabled in eSR */
322 if (mvmvif->esr_active)
323 smps_mode = IEEE80211_SMPS_OFF;
324
325 ieee80211_request_smps(vif, link_id, smps_mode);
326 }
327
iwl_mvm_update_smps_on_active_links(struct iwl_mvm * mvm,struct ieee80211_vif * vif,enum iwl_mvm_smps_type_request req_type,enum ieee80211_smps_mode smps_request)328 void iwl_mvm_update_smps_on_active_links(struct iwl_mvm *mvm,
329 struct ieee80211_vif *vif,
330 enum iwl_mvm_smps_type_request req_type,
331 enum ieee80211_smps_mode smps_request)
332 {
333 struct ieee80211_bss_conf *link_conf;
334 unsigned int link_id;
335
336 rcu_read_lock();
337 for_each_vif_active_link(vif, link_conf, link_id)
338 iwl_mvm_update_smps(mvm, vif, req_type, smps_request,
339 link_id);
340 rcu_read_unlock();
341 }
342
iwl_wait_stats_complete(struct iwl_notif_wait_data * notif_wait,struct iwl_rx_packet * pkt,void * data)343 static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait,
344 struct iwl_rx_packet *pkt, void *data)
345 {
346 WARN_ON(pkt->hdr.cmd != STATISTICS_NOTIFICATION);
347
348 return true;
349 }
350
351 #define PERIODIC_STAT_RATE 5
352
iwl_mvm_request_periodic_system_statistics(struct iwl_mvm * mvm,bool enable)353 int iwl_mvm_request_periodic_system_statistics(struct iwl_mvm *mvm, bool enable)
354 {
355 u32 flags = enable ? 0 : IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK;
356 u32 type = enable ? (IWL_STATS_NTFY_TYPE_ID_OPER |
357 IWL_STATS_NTFY_TYPE_ID_OPER_PART1) : 0;
358 struct iwl_system_statistics_cmd system_cmd = {
359 .cfg_mask = cpu_to_le32(flags),
360 .config_time_sec = cpu_to_le32(enable ?
361 PERIODIC_STAT_RATE : 0),
362 .type_id_mask = cpu_to_le32(type),
363 };
364
365 return iwl_mvm_send_cmd_pdu(mvm,
366 WIDE_ID(SYSTEM_GROUP,
367 SYSTEM_STATISTICS_CMD),
368 0, sizeof(system_cmd), &system_cmd);
369 }
370
iwl_mvm_request_system_statistics(struct iwl_mvm * mvm,bool clear,u8 cmd_ver)371 static int iwl_mvm_request_system_statistics(struct iwl_mvm *mvm, bool clear,
372 u8 cmd_ver)
373 {
374 struct iwl_system_statistics_cmd system_cmd = {
375 .cfg_mask = clear ?
376 cpu_to_le32(IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK) :
377 cpu_to_le32(IWL_STATS_CFG_FLG_RESET_MSK |
378 IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK),
379 .type_id_mask = cpu_to_le32(IWL_STATS_NTFY_TYPE_ID_OPER |
380 IWL_STATS_NTFY_TYPE_ID_OPER_PART1),
381 };
382 struct iwl_host_cmd cmd = {
383 .id = WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_CMD),
384 .len[0] = sizeof(system_cmd),
385 .data[0] = &system_cmd,
386 };
387 struct iwl_notification_wait stats_wait;
388 static const u16 stats_complete[] = {
389 WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF),
390 };
391 int ret;
392
393 if (cmd_ver != 1) {
394 IWL_FW_CHECK_FAILED(mvm,
395 "Invalid system statistics command version:%d\n",
396 cmd_ver);
397 return -EOPNOTSUPP;
398 }
399
400 iwl_init_notification_wait(&mvm->notif_wait, &stats_wait,
401 stats_complete, ARRAY_SIZE(stats_complete),
402 NULL, NULL);
403
404 mvm->statistics_clear = clear;
405 ret = iwl_mvm_send_cmd(mvm, &cmd);
406 if (ret) {
407 iwl_remove_notification(&mvm->notif_wait, &stats_wait);
408 return ret;
409 }
410
411 /* 500ms for OPERATIONAL, PART1 and END notification should be enough
412 * for FW to collect data from all LMACs and send
413 * STATISTICS_NOTIFICATION to host
414 */
415 ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 2);
416 if (ret)
417 return ret;
418
419 if (clear)
420 iwl_mvm_accu_radio_stats(mvm);
421
422 return ret;
423 }
424
iwl_mvm_request_statistics(struct iwl_mvm * mvm,bool clear)425 int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
426 {
427 struct iwl_statistics_cmd scmd = {
428 .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
429 };
430
431 struct iwl_host_cmd cmd = {
432 .id = STATISTICS_CMD,
433 .len[0] = sizeof(scmd),
434 .data[0] = &scmd,
435 };
436 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
437 WIDE_ID(SYSTEM_GROUP,
438 SYSTEM_STATISTICS_CMD),
439 IWL_FW_CMD_VER_UNKNOWN);
440 int ret;
441
442 /*
443 * Don't request statistics during restart, they'll not have any useful
444 * information right after restart, nor is clearing needed
445 */
446 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
447 return 0;
448
449 if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN)
450 return iwl_mvm_request_system_statistics(mvm, clear, cmd_ver);
451
452 /* From version 15 - STATISTICS_NOTIFICATION, the reply for
453 * STATISTICS_CMD is empty, and the response is with
454 * STATISTICS_NOTIFICATION notification
455 */
456 if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
457 STATISTICS_NOTIFICATION, 0) < 15) {
458 cmd.flags = CMD_WANT_SKB;
459
460 ret = iwl_mvm_send_cmd(mvm, &cmd);
461 if (ret)
462 return ret;
463
464 iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
465 iwl_free_resp(&cmd);
466 } else {
467 struct iwl_notification_wait stats_wait;
468 static const u16 stats_complete[] = {
469 STATISTICS_NOTIFICATION,
470 };
471
472 iwl_init_notification_wait(&mvm->notif_wait, &stats_wait,
473 stats_complete, ARRAY_SIZE(stats_complete),
474 iwl_wait_stats_complete, NULL);
475
476 ret = iwl_mvm_send_cmd(mvm, &cmd);
477 if (ret) {
478 iwl_remove_notification(&mvm->notif_wait, &stats_wait);
479 return ret;
480 }
481
482 /* 200ms should be enough for FW to collect data from all
483 * LMACs and send STATISTICS_NOTIFICATION to host
484 */
485 ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 5);
486 if (ret)
487 return ret;
488 }
489
490 if (clear)
491 iwl_mvm_accu_radio_stats(mvm);
492
493 return 0;
494 }
495
iwl_mvm_accu_radio_stats(struct iwl_mvm * mvm)496 void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
497 {
498 mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
499 mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
500 mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
501 mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
502 }
503
504 struct iwl_mvm_diversity_iter_data {
505 struct iwl_mvm_phy_ctxt *ctxt;
506 bool result;
507 };
508
iwl_mvm_diversity_iter(void * _data,u8 * mac,struct ieee80211_vif * vif)509 static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
510 struct ieee80211_vif *vif)
511 {
512 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
513 struct iwl_mvm_diversity_iter_data *data = _data;
514 int i, link_id;
515
516 for_each_mvm_vif_valid_link(mvmvif, link_id) {
517 struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
518
519 if (link_info->phy_ctxt != data->ctxt)
520 continue;
521
522 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
523 if (link_info->smps_requests[i] == IEEE80211_SMPS_STATIC ||
524 link_info->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) {
525 data->result = false;
526 break;
527 }
528 }
529 }
530 }
531
iwl_mvm_rx_diversity_allowed(struct iwl_mvm * mvm,struct iwl_mvm_phy_ctxt * ctxt)532 bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
533 struct iwl_mvm_phy_ctxt *ctxt)
534 {
535 struct iwl_mvm_diversity_iter_data data = {
536 .ctxt = ctxt,
537 .result = true,
538 };
539
540 lockdep_assert_held(&mvm->mutex);
541
542 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
543 return false;
544
545 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
546 return false;
547
548 if (mvm->cfg->rx_with_siso_diversity)
549 return false;
550
551 ieee80211_iterate_active_interfaces_atomic(
552 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
553 iwl_mvm_diversity_iter, &data);
554
555 return data.result;
556 }
557
iwl_mvm_send_low_latency_cmd(struct iwl_mvm * mvm,bool low_latency,u16 mac_id)558 void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
559 bool low_latency, u16 mac_id)
560 {
561 struct iwl_mac_low_latency_cmd cmd = {
562 .mac_id = cpu_to_le32(mac_id)
563 };
564
565 if (!fw_has_capa(&mvm->fw->ucode_capa,
566 IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
567 return;
568
569 if (low_latency) {
570 /* currently we don't care about the direction */
571 cmd.low_latency_rx = 1;
572 cmd.low_latency_tx = 1;
573 }
574
575 if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD),
576 0, sizeof(cmd), &cmd))
577 IWL_ERR(mvm, "Failed to send low latency command\n");
578 }
579
iwl_mvm_update_low_latency(struct iwl_mvm * mvm,struct ieee80211_vif * vif,bool low_latency,enum iwl_mvm_low_latency_cause cause)580 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
581 bool low_latency,
582 enum iwl_mvm_low_latency_cause cause)
583 {
584 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
585 int res;
586 bool prev;
587
588 lockdep_assert_held(&mvm->mutex);
589
590 prev = iwl_mvm_vif_low_latency(mvmvif);
591 iwl_mvm_vif_set_low_latency(mvmvif, low_latency, cause);
592
593 low_latency = iwl_mvm_vif_low_latency(mvmvif);
594
595 if (low_latency == prev)
596 return 0;
597
598 iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id);
599
600 res = iwl_mvm_update_quotas(mvm, false, NULL);
601 if (res)
602 return res;
603
604 iwl_mvm_bt_coex_vif_change(mvm);
605
606 return iwl_mvm_power_update_mac(mvm);
607 }
608
609 struct iwl_mvm_low_latency_iter {
610 bool result;
611 bool result_per_band[NUM_NL80211_BANDS];
612 };
613
iwl_mvm_ll_iter(void * _data,u8 * mac,struct ieee80211_vif * vif)614 static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
615 {
616 struct iwl_mvm_low_latency_iter *result = _data;
617 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
618 enum nl80211_band band;
619
620 if (iwl_mvm_vif_low_latency(mvmvif)) {
621 result->result = true;
622
623 if (!mvmvif->deflink.phy_ctxt)
624 return;
625
626 band = mvmvif->deflink.phy_ctxt->channel->band;
627 result->result_per_band[band] = true;
628 }
629 }
630
iwl_mvm_low_latency(struct iwl_mvm * mvm)631 bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
632 {
633 struct iwl_mvm_low_latency_iter data = {};
634
635 ieee80211_iterate_active_interfaces_atomic(
636 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
637 iwl_mvm_ll_iter, &data);
638
639 return data.result;
640 }
641
iwl_mvm_low_latency_band(struct iwl_mvm * mvm,enum nl80211_band band)642 bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band)
643 {
644 struct iwl_mvm_low_latency_iter data = {};
645
646 ieee80211_iterate_active_interfaces_atomic(
647 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
648 iwl_mvm_ll_iter, &data);
649
650 return data.result_per_band[band];
651 }
652
653 struct iwl_bss_iter_data {
654 struct ieee80211_vif *vif;
655 bool error;
656 };
657
iwl_mvm_bss_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)658 static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
659 struct ieee80211_vif *vif)
660 {
661 struct iwl_bss_iter_data *data = _data;
662
663 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
664 return;
665
666 if (data->vif) {
667 data->error = true;
668 return;
669 }
670
671 data->vif = vif;
672 }
673
iwl_mvm_get_bss_vif(struct iwl_mvm * mvm)674 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
675 {
676 struct iwl_bss_iter_data bss_iter_data = {};
677
678 ieee80211_iterate_active_interfaces_atomic(
679 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
680 iwl_mvm_bss_iface_iterator, &bss_iter_data);
681
682 if (bss_iter_data.error) {
683 IWL_ERR(mvm, "More than one managed interface active!\n");
684 return ERR_PTR(-EINVAL);
685 }
686
687 return bss_iter_data.vif;
688 }
689
690 struct iwl_bss_find_iter_data {
691 struct ieee80211_vif *vif;
692 u32 macid;
693 };
694
iwl_mvm_bss_find_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)695 static void iwl_mvm_bss_find_iface_iterator(void *_data, u8 *mac,
696 struct ieee80211_vif *vif)
697 {
698 struct iwl_bss_find_iter_data *data = _data;
699 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
700
701 if (mvmvif->id == data->macid)
702 data->vif = vif;
703 }
704
iwl_mvm_get_vif_by_macid(struct iwl_mvm * mvm,u32 macid)705 struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid)
706 {
707 struct iwl_bss_find_iter_data data = {
708 .macid = macid,
709 };
710
711 lockdep_assert_held(&mvm->mutex);
712
713 ieee80211_iterate_active_interfaces_atomic(
714 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
715 iwl_mvm_bss_find_iface_iterator, &data);
716
717 return data.vif;
718 }
719
720 struct iwl_sta_iter_data {
721 bool assoc;
722 };
723
iwl_mvm_sta_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)724 static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
725 struct ieee80211_vif *vif)
726 {
727 struct iwl_sta_iter_data *data = _data;
728
729 if (vif->type != NL80211_IFTYPE_STATION)
730 return;
731
732 if (vif->cfg.assoc)
733 data->assoc = true;
734 }
735
iwl_mvm_is_vif_assoc(struct iwl_mvm * mvm)736 bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
737 {
738 struct iwl_sta_iter_data data = {
739 .assoc = false,
740 };
741
742 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
743 IEEE80211_IFACE_ITER_NORMAL,
744 iwl_mvm_sta_iface_iterator,
745 &data);
746 return data.assoc;
747 }
748
iwl_mvm_get_wd_timeout(struct iwl_mvm * mvm,struct ieee80211_vif * vif)749 unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
750 struct ieee80211_vif *vif)
751 {
752 unsigned int default_timeout =
753 mvm->trans->trans_cfg->base_params->wd_timeout;
754
755 /*
756 * We can't know when the station is asleep or awake, so we
757 * must disable the queue hang detection.
758 */
759 if (fw_has_capa(&mvm->fw->ucode_capa,
760 IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
761 vif->type == NL80211_IFTYPE_AP)
762 return IWL_WATCHDOG_DISABLED;
763 return default_timeout;
764 }
765
iwl_mvm_connection_loss(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const char * errmsg)766 void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
767 const char *errmsg)
768 {
769 struct iwl_fw_dbg_trigger_tlv *trig;
770 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
771
772 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
773 FW_DBG_TRIGGER_MLME);
774 if (!trig)
775 goto out;
776
777 trig_mlme = (void *)trig->data;
778
779 if (trig_mlme->stop_connection_loss &&
780 --trig_mlme->stop_connection_loss)
781 goto out;
782
783 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg);
784
785 out:
786 ieee80211_connection_loss(vif);
787 }
788
iwl_mvm_event_frame_timeout_callback(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const struct ieee80211_sta * sta,u16 tid)789 void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
790 struct ieee80211_vif *vif,
791 const struct ieee80211_sta *sta,
792 u16 tid)
793 {
794 struct iwl_fw_dbg_trigger_tlv *trig;
795 struct iwl_fw_dbg_trigger_ba *ba_trig;
796
797 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
798 FW_DBG_TRIGGER_BA);
799 if (!trig)
800 return;
801
802 ba_trig = (void *)trig->data;
803
804 if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
805 return;
806
807 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
808 "Frame from %pM timed out, tid %d",
809 sta->addr, tid);
810 }
811
iwl_mvm_tcm_load_percentage(u32 airtime,u32 elapsed)812 u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed)
813 {
814 if (!elapsed)
815 return 0;
816
817 return (100 * airtime / elapsed) / USEC_PER_MSEC;
818 }
819
820 static enum iwl_mvm_traffic_load
iwl_mvm_tcm_load(struct iwl_mvm * mvm,u32 airtime,unsigned long elapsed)821 iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed)
822 {
823 u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed);
824
825 if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH)
826 return IWL_MVM_TRAFFIC_HIGH;
827 if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH)
828 return IWL_MVM_TRAFFIC_MEDIUM;
829
830 return IWL_MVM_TRAFFIC_LOW;
831 }
832
iwl_mvm_tcm_iter(void * _data,u8 * mac,struct ieee80211_vif * vif)833 static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
834 {
835 struct iwl_mvm *mvm = _data;
836 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
837 bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC;
838
839 if (mvmvif->id >= NUM_MAC_INDEX_DRIVER)
840 return;
841
842 low_latency = mvm->tcm.result.low_latency[mvmvif->id];
843
844 if (!mvm->tcm.result.change[mvmvif->id] &&
845 prev == low_latency) {
846 iwl_mvm_update_quotas(mvm, false, NULL);
847 return;
848 }
849
850 if (prev != low_latency) {
851 /* this sends traffic load and updates quota as well */
852 iwl_mvm_update_low_latency(mvm, vif, low_latency,
853 LOW_LATENCY_TRAFFIC);
854 } else {
855 iwl_mvm_update_quotas(mvm, false, NULL);
856 }
857 }
858
iwl_mvm_tcm_results(struct iwl_mvm * mvm)859 static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
860 {
861 guard(mvm)(mvm);
862
863 ieee80211_iterate_active_interfaces(
864 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
865 iwl_mvm_tcm_iter, mvm);
866
867 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
868 iwl_mvm_config_scan(mvm);
869 }
870
iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct * wk)871 static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
872 {
873 struct iwl_mvm *mvm;
874 struct iwl_mvm_vif *mvmvif;
875 struct ieee80211_vif *vif;
876
877 mvmvif = container_of(wk, struct iwl_mvm_vif,
878 uapsd_nonagg_detected_wk.work);
879 vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
880 mvm = mvmvif->mvm;
881
882 if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions)
883 return;
884
885 /* remember that this AP is broken */
886 memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr,
887 vif->bss_conf.bssid, ETH_ALEN);
888 mvm->uapsd_noagg_bssid_write_idx++;
889 if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN)
890 mvm->uapsd_noagg_bssid_write_idx = 0;
891
892 iwl_mvm_connection_loss(mvm, vif,
893 "AP isn't using AMPDU with uAPSD enabled");
894 }
895
iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm * mvm,struct ieee80211_vif * vif)896 static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm,
897 struct ieee80211_vif *vif)
898 {
899 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
900
901 if (vif->type != NL80211_IFTYPE_STATION)
902 return;
903
904 if (!vif->cfg.assoc)
905 return;
906
907 if (!mvmvif->deflink.queue_params[IEEE80211_AC_VO].uapsd &&
908 !mvmvif->deflink.queue_params[IEEE80211_AC_VI].uapsd &&
909 !mvmvif->deflink.queue_params[IEEE80211_AC_BE].uapsd &&
910 !mvmvif->deflink.queue_params[IEEE80211_AC_BK].uapsd)
911 return;
912
913 if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected)
914 return;
915
916 mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true;
917 IWL_INFO(mvm,
918 "detected AP should do aggregation but isn't, likely due to U-APSD\n");
919 schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk,
920 15 * HZ);
921 }
922
iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm * mvm,unsigned int elapsed,int mac)923 static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
924 unsigned int elapsed,
925 int mac)
926 {
927 u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes;
928 u64 tpt;
929 unsigned long rate;
930 struct ieee80211_vif *vif;
931
932 rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate);
933
934 if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions ||
935 mvm->tcm.data[mac].uapsd_nonagg_detect.detected)
936 return;
937
938 if (iwl_mvm_has_new_rx_api(mvm)) {
939 tpt = 8 * bytes; /* kbps */
940 do_div(tpt, elapsed);
941 rate *= 1000; /* kbps */
942 if (tpt < 22 * rate / 100)
943 return;
944 } else {
945 /*
946 * the rate here is actually the threshold, in 100Kbps units,
947 * so do the needed conversion from bytes to 100Kbps:
948 * 100kb = bits / (100 * 1000),
949 * 100kbps = 100kb / (msecs / 1000) ==
950 * (bits / (100 * 1000)) / (msecs / 1000) ==
951 * bits / (100 * msecs)
952 */
953 tpt = (8 * bytes);
954 do_div(tpt, elapsed * 100);
955 if (tpt < rate)
956 return;
957 }
958
959 rcu_read_lock();
960 vif = rcu_dereference(mvm->vif_id_to_mac[mac]);
961 if (vif)
962 iwl_mvm_uapsd_agg_disconnect(mvm, vif);
963 rcu_read_unlock();
964 }
965
iwl_mvm_tcm_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)966 static void iwl_mvm_tcm_iterator(void *_data, u8 *mac,
967 struct ieee80211_vif *vif)
968 {
969 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
970 u32 *band = _data;
971
972 if (!mvmvif->deflink.phy_ctxt)
973 return;
974
975 band[mvmvif->id] = mvmvif->deflink.phy_ctxt->channel->band;
976 }
977
iwl_mvm_calc_tcm_stats(struct iwl_mvm * mvm,unsigned long ts,bool handle_uapsd)978 static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm,
979 unsigned long ts,
980 bool handle_uapsd)
981 {
982 unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts);
983 unsigned int uapsd_elapsed =
984 jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts);
985 u32 total_airtime = 0;
986 u32 band_airtime[NUM_NL80211_BANDS] = {0};
987 u32 band[NUM_MAC_INDEX_DRIVER] = {0};
988 int ac, mac, i;
989 bool low_latency = false;
990 enum iwl_mvm_traffic_load load, band_load;
991 bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD);
992
993 if (handle_ll)
994 mvm->tcm.ll_ts = ts;
995 if (handle_uapsd)
996 mvm->tcm.uapsd_nonagg_ts = ts;
997
998 mvm->tcm.result.elapsed = elapsed;
999
1000 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1001 IEEE80211_IFACE_ITER_NORMAL,
1002 iwl_mvm_tcm_iterator,
1003 &band);
1004
1005 for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
1006 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1007 u32 vo_vi_pkts = 0;
1008 u32 airtime = mdata->rx.airtime + mdata->tx.airtime;
1009
1010 total_airtime += airtime;
1011 band_airtime[band[mac]] += airtime;
1012
1013 load = iwl_mvm_tcm_load(mvm, airtime, elapsed);
1014 mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac];
1015 mvm->tcm.result.load[mac] = load;
1016 mvm->tcm.result.airtime[mac] = airtime;
1017
1018 for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++)
1019 vo_vi_pkts += mdata->rx.pkts[ac] +
1020 mdata->tx.pkts[ac];
1021
1022 /* enable immediately with enough packets but defer disabling */
1023 if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH)
1024 mvm->tcm.result.low_latency[mac] = true;
1025 else if (handle_ll)
1026 mvm->tcm.result.low_latency[mac] = false;
1027
1028 if (handle_ll) {
1029 /* clear old data */
1030 memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
1031 memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
1032 }
1033 low_latency |= mvm->tcm.result.low_latency[mac];
1034
1035 if (!mvm->tcm.result.low_latency[mac] && handle_uapsd)
1036 iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed,
1037 mac);
1038 /* clear old data */
1039 if (handle_uapsd)
1040 mdata->uapsd_nonagg_detect.rx_bytes = 0;
1041 memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
1042 memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
1043 }
1044
1045 load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed);
1046 mvm->tcm.result.global_load = load;
1047
1048 for (i = 0; i < NUM_NL80211_BANDS; i++) {
1049 band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed);
1050 mvm->tcm.result.band_load[i] = band_load;
1051 }
1052
1053 /*
1054 * If the current load isn't low we need to force re-evaluation
1055 * in the TCM period, so that we can return to low load if there
1056 * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get
1057 * triggered by traffic).
1058 */
1059 if (load != IWL_MVM_TRAFFIC_LOW)
1060 return MVM_TCM_PERIOD;
1061 /*
1062 * If low-latency is active we need to force re-evaluation after
1063 * (the longer) MVM_LL_PERIOD, so that we can disable low-latency
1064 * when there's no traffic at all.
1065 */
1066 if (low_latency)
1067 return MVM_LL_PERIOD;
1068 /*
1069 * Otherwise, we don't need to run the work struct because we're
1070 * in the default "idle" state - traffic indication is low (which
1071 * also covers the "no traffic" case) and low-latency is disabled
1072 * so there's no state that may need to be disabled when there's
1073 * no traffic at all.
1074 *
1075 * Note that this has no impact on the regular scheduling of the
1076 * updates triggered by traffic - those happen whenever one of the
1077 * two timeouts expire (if there's traffic at all.)
1078 */
1079 return 0;
1080 }
1081
iwl_mvm_recalc_tcm(struct iwl_mvm * mvm)1082 void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
1083 {
1084 unsigned long ts = jiffies;
1085 bool handle_uapsd =
1086 time_after(ts, mvm->tcm.uapsd_nonagg_ts +
1087 msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD));
1088
1089 spin_lock(&mvm->tcm.lock);
1090 if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1091 spin_unlock(&mvm->tcm.lock);
1092 return;
1093 }
1094 spin_unlock(&mvm->tcm.lock);
1095
1096 if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) {
1097 guard(mvm)(mvm);
1098 if (iwl_mvm_request_statistics(mvm, true))
1099 handle_uapsd = false;
1100 }
1101
1102 spin_lock(&mvm->tcm.lock);
1103 /* re-check if somebody else won the recheck race */
1104 if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1105 /* calculate statistics */
1106 unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts,
1107 handle_uapsd);
1108
1109 /* the memset needs to be visible before the timestamp */
1110 smp_mb();
1111 mvm->tcm.ts = ts;
1112 if (work_delay)
1113 schedule_delayed_work(&mvm->tcm.work, work_delay);
1114 }
1115 spin_unlock(&mvm->tcm.lock);
1116
1117 iwl_mvm_tcm_results(mvm);
1118 }
1119
iwl_mvm_tcm_work(struct work_struct * work)1120 void iwl_mvm_tcm_work(struct work_struct *work)
1121 {
1122 struct delayed_work *delayed_work = to_delayed_work(work);
1123 struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
1124 tcm.work);
1125
1126 iwl_mvm_recalc_tcm(mvm);
1127 }
1128
iwl_mvm_pause_tcm(struct iwl_mvm * mvm,bool with_cancel)1129 void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel)
1130 {
1131 spin_lock_bh(&mvm->tcm.lock);
1132 mvm->tcm.paused = true;
1133 spin_unlock_bh(&mvm->tcm.lock);
1134 if (with_cancel)
1135 cancel_delayed_work_sync(&mvm->tcm.work);
1136 }
1137
iwl_mvm_resume_tcm(struct iwl_mvm * mvm)1138 void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
1139 {
1140 int mac;
1141 bool low_latency = false;
1142
1143 spin_lock_bh(&mvm->tcm.lock);
1144 mvm->tcm.ts = jiffies;
1145 mvm->tcm.ll_ts = jiffies;
1146 for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
1147 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1148
1149 memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
1150 memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
1151 memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
1152 memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
1153
1154 if (mvm->tcm.result.low_latency[mac])
1155 low_latency = true;
1156 }
1157 /* The TCM data needs to be reset before "paused" flag changes */
1158 smp_mb();
1159 mvm->tcm.paused = false;
1160
1161 /*
1162 * if the current load is not low or low latency is active, force
1163 * re-evaluation to cover the case of no traffic.
1164 */
1165 if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW)
1166 schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD);
1167 else if (low_latency)
1168 schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD);
1169
1170 spin_unlock_bh(&mvm->tcm.lock);
1171 }
1172
iwl_mvm_tcm_add_vif(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1173 void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1174 {
1175 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1176
1177 INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk,
1178 iwl_mvm_tcm_uapsd_nonagg_detected_wk);
1179 }
1180
iwl_mvm_tcm_rm_vif(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1181 void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1182 {
1183 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1184
1185 cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk);
1186 }
1187
iwl_mvm_get_systime(struct iwl_mvm * mvm)1188 u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
1189 {
1190 u32 reg_addr = DEVICE_SYSTEM_TIME_REG;
1191
1192 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000 &&
1193 mvm->trans->cfg->gp2_reg_addr)
1194 reg_addr = mvm->trans->cfg->gp2_reg_addr;
1195
1196 return iwl_read_prph(mvm->trans, reg_addr);
1197 }
1198
iwl_mvm_get_sync_time(struct iwl_mvm * mvm,int clock_type,u32 * gp2,u64 * boottime,ktime_t * realtime)1199 void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type,
1200 u32 *gp2, u64 *boottime, ktime_t *realtime)
1201 {
1202 bool ps_disabled;
1203
1204 lockdep_assert_held(&mvm->mutex);
1205
1206 /* Disable power save when reading GP2 */
1207 ps_disabled = mvm->ps_disabled;
1208 if (!ps_disabled) {
1209 mvm->ps_disabled = true;
1210 iwl_mvm_power_update_device(mvm);
1211 }
1212
1213 *gp2 = iwl_mvm_get_systime(mvm);
1214
1215 if (clock_type == CLOCK_BOOTTIME && boottime)
1216 *boottime = ktime_get_boottime_ns();
1217 else if (clock_type == CLOCK_REALTIME && realtime)
1218 *realtime = ktime_get_real();
1219
1220 if (!ps_disabled) {
1221 mvm->ps_disabled = ps_disabled;
1222 iwl_mvm_power_update_device(mvm);
1223 }
1224 }
1225
1226 /* Find if at least two links from different vifs use same channel
1227 * FIXME: consider having a refcount array in struct iwl_mvm_vif for
1228 * used phy_ctxt ids.
1229 */
iwl_mvm_have_links_same_channel(struct iwl_mvm_vif * vif1,struct iwl_mvm_vif * vif2)1230 bool iwl_mvm_have_links_same_channel(struct iwl_mvm_vif *vif1,
1231 struct iwl_mvm_vif *vif2)
1232 {
1233 unsigned int i, j;
1234
1235 for_each_mvm_vif_valid_link(vif1, i) {
1236 for_each_mvm_vif_valid_link(vif2, j) {
1237 if (vif1->link[i]->phy_ctxt == vif2->link[j]->phy_ctxt)
1238 return true;
1239 }
1240 }
1241
1242 return false;
1243 }
1244
iwl_mvm_vif_is_active(struct iwl_mvm_vif * mvmvif)1245 bool iwl_mvm_vif_is_active(struct iwl_mvm_vif *mvmvif)
1246 {
1247 unsigned int i;
1248
1249 /* FIXME: can it fail when phy_ctxt is assigned? */
1250 for_each_mvm_vif_valid_link(mvmvif, i) {
1251 if (mvmvif->link[i]->phy_ctxt &&
1252 mvmvif->link[i]->phy_ctxt->id < NUM_PHY_CTX)
1253 return true;
1254 }
1255
1256 return false;
1257 }
1258