1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2015-2017 Intel Deutschland GmbH
4 * Copyright (C) 2018-2025 Intel Corporation
5 */
6 #include <linux/etherdevice.h>
7 #include <linux/math64.h>
8 #include <net/cfg80211.h>
9 #include "mvm.h"
10 #include "iwl-io.h"
11 #include "iwl-prph.h"
12 #include "constants.h"
13
14 struct iwl_mvm_loc_entry {
15 struct list_head list;
16 u8 addr[ETH_ALEN];
17 u8 lci_len, civic_len;
18 u8 buf[];
19 };
20
21 struct iwl_mvm_smooth_entry {
22 struct list_head list;
23 u8 addr[ETH_ALEN];
24 s64 rtt_avg;
25 u64 host_time;
26 };
27
28 enum iwl_mvm_pasn_flags {
29 IWL_MVM_PASN_FLAG_HAS_HLTK = BIT(0),
30 };
31
32 struct iwl_mvm_ftm_pasn_entry {
33 struct list_head list;
34 u8 addr[ETH_ALEN];
35 u8 hltk[HLTK_11AZ_LEN];
36 u8 tk[TK_11AZ_LEN];
37 u8 cipher;
38 u8 tx_pn[IEEE80211_CCMP_PN_LEN];
39 u8 rx_pn[IEEE80211_CCMP_PN_LEN];
40 u32 flags;
41 };
42
43 struct iwl_mvm_ftm_iter_data {
44 u8 *cipher;
45 u8 *bssid;
46 u8 *tk;
47 };
48
iwl_mvm_ftm_reset(struct iwl_mvm * mvm)49 static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm)
50 {
51 struct iwl_mvm_loc_entry *e, *t;
52
53 mvm->ftm_initiator.req = NULL;
54 mvm->ftm_initiator.req_wdev = NULL;
55 memset(mvm->ftm_initiator.responses, 0,
56 sizeof(mvm->ftm_initiator.responses));
57
58 list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) {
59 list_del(&e->list);
60 kfree(e);
61 }
62 }
63
iwl_mvm_ftm_restart(struct iwl_mvm * mvm)64 void iwl_mvm_ftm_restart(struct iwl_mvm *mvm)
65 {
66 struct cfg80211_pmsr_result result = {
67 .status = NL80211_PMSR_STATUS_FAILURE,
68 .final = 1,
69 .host_time = ktime_get_boottime_ns(),
70 .type = NL80211_PMSR_TYPE_FTM,
71 };
72 int i;
73
74 lockdep_assert_held(&mvm->mutex);
75
76 if (!mvm->ftm_initiator.req)
77 return;
78
79 for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) {
80 memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr,
81 ETH_ALEN);
82 result.ftm.burst_index = mvm->ftm_initiator.responses[i];
83
84 cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev,
85 mvm->ftm_initiator.req,
86 &result, GFP_KERNEL);
87 }
88
89 cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev,
90 mvm->ftm_initiator.req, GFP_KERNEL);
91 iwl_mvm_ftm_reset(mvm);
92 }
93
iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm * mvm)94 void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm)
95 {
96 INIT_LIST_HEAD(&mvm->ftm_initiator.smooth.resp);
97
98 IWL_DEBUG_INFO(mvm,
99 "enable=%u, alpha=%u, age_jiffies=%u, thresh=(%u:%u)\n",
100 IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH,
101 IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA,
102 IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * HZ,
103 IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT,
104 IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT);
105 }
106
iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm * mvm)107 void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm)
108 {
109 struct iwl_mvm_smooth_entry *se, *st;
110
111 list_for_each_entry_safe(se, st, &mvm->ftm_initiator.smooth.resp,
112 list) {
113 list_del(&se->list);
114 kfree(se);
115 }
116 }
117
118 static int
iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s)119 iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s)
120 {
121 switch (s) {
122 case IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS:
123 return 0;
124 case IWL_TOF_RANGE_REQUEST_STATUS_BUSY:
125 return -EBUSY;
126 default:
127 WARN_ON_ONCE(1);
128 return -EIO;
129 }
130 }
131
iwl_mvm_ftm_cmd_v5(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_tof_range_req_cmd_v5 * cmd,struct cfg80211_pmsr_request * req)132 static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
133 struct iwl_tof_range_req_cmd_v5 *cmd,
134 struct cfg80211_pmsr_request *req)
135 {
136 int i;
137
138 cmd->request_id = req->cookie;
139 cmd->num_of_ap = req->n_peers;
140
141 /* use maximum for "no timeout" or bigger than what we can do */
142 if (!req->timeout || req->timeout > 255 * 100)
143 cmd->req_timeout = 255;
144 else
145 cmd->req_timeout = DIV_ROUND_UP(req->timeout, 100);
146
147 /*
148 * We treat it always as random, since if not we'll
149 * have filled our local address there instead.
150 */
151 cmd->macaddr_random = 1;
152 memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
153 for (i = 0; i < ETH_ALEN; i++)
154 cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
155
156 if (vif->cfg.assoc)
157 memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
158 else
159 eth_broadcast_addr(cmd->range_req_bssid);
160 }
161
iwl_mvm_ftm_cmd_common(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_tof_range_req_cmd_v9 * cmd,struct cfg80211_pmsr_request * req)162 static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm,
163 struct ieee80211_vif *vif,
164 #if defined(__linux__)
165 struct iwl_tof_range_req_cmd_v9 *cmd,
166 #elif defined(__FreeBSD__)
167 struct iwl_tof_range_req_cmd_v9 *cmd, /* XXX-BZ Probably better solved by a common struct in fw for top parts of the struct. */
168 #endif
169 struct cfg80211_pmsr_request *req)
170 {
171 int i;
172
173 cmd->initiator_flags =
174 cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM |
175 IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT);
176 cmd->request_id = req->cookie;
177 cmd->num_of_ap = req->n_peers;
178
179 /*
180 * Use a large value for "no timeout". Don't use the maximum value
181 * because of fw limitations.
182 */
183 if (req->timeout)
184 cmd->req_timeout_ms = cpu_to_le32(req->timeout);
185 else
186 cmd->req_timeout_ms = cpu_to_le32(0xfffff);
187
188 memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
189 for (i = 0; i < ETH_ALEN; i++)
190 cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
191
192 if (vif->cfg.assoc) {
193 memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
194
195 /* AP's TSF is only relevant if associated */
196 for (i = 0; i < req->n_peers; i++) {
197 if (req->peers[i].report_ap_tsf) {
198 struct iwl_mvm_vif *mvmvif =
199 iwl_mvm_vif_from_mac80211(vif);
200
201 cmd->tsf_mac_id = cpu_to_le32(mvmvif->id);
202 return;
203 }
204 }
205 } else {
206 eth_broadcast_addr(cmd->range_req_bssid);
207 }
208
209 /* Don't report AP's TSF */
210 cmd->tsf_mac_id = cpu_to_le32(0xff);
211 }
212
iwl_mvm_ftm_cmd_v8(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_tof_range_req_cmd_v8 * cmd,struct cfg80211_pmsr_request * req)213 static void iwl_mvm_ftm_cmd_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
214 struct iwl_tof_range_req_cmd_v8 *cmd,
215 struct cfg80211_pmsr_request *req)
216 {
217 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)cmd, req);
218 }
219
220 static int
iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm * mvm,struct cfg80211_pmsr_request_peer * peer,u8 * channel,u8 * bandwidth,u8 * ctrl_ch_position)221 iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm,
222 struct cfg80211_pmsr_request_peer *peer,
223 u8 *channel, u8 *bandwidth,
224 u8 *ctrl_ch_position)
225 {
226 u32 freq = peer->chandef.chan->center_freq;
227
228 *channel = ieee80211_frequency_to_channel(freq);
229
230 switch (peer->chandef.width) {
231 case NL80211_CHAN_WIDTH_20_NOHT:
232 *bandwidth = IWL_TOF_BW_20_LEGACY;
233 break;
234 case NL80211_CHAN_WIDTH_20:
235 *bandwidth = IWL_TOF_BW_20_HT;
236 break;
237 case NL80211_CHAN_WIDTH_40:
238 *bandwidth = IWL_TOF_BW_40;
239 break;
240 case NL80211_CHAN_WIDTH_80:
241 *bandwidth = IWL_TOF_BW_80;
242 break;
243 default:
244 IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
245 peer->chandef.width);
246 return -EINVAL;
247 }
248
249 *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
250 iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
251
252 return 0;
253 }
254
255 static int
iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm * mvm,struct cfg80211_pmsr_request_peer * peer,u8 * channel,u8 * format_bw,u8 * ctrl_ch_position)256 iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
257 struct cfg80211_pmsr_request_peer *peer,
258 u8 *channel, u8 *format_bw,
259 u8 *ctrl_ch_position)
260 {
261 u32 freq = peer->chandef.chan->center_freq;
262 u8 cmd_ver;
263
264 *channel = ieee80211_frequency_to_channel(freq);
265
266 switch (peer->chandef.width) {
267 case NL80211_CHAN_WIDTH_20_NOHT:
268 *format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
269 *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
270 break;
271 case NL80211_CHAN_WIDTH_20:
272 *format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
273 *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
274 break;
275 case NL80211_CHAN_WIDTH_40:
276 *format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
277 *format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
278 break;
279 case NL80211_CHAN_WIDTH_80:
280 *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
281 *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
282 break;
283 case NL80211_CHAN_WIDTH_160:
284 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
285 WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
286 IWL_FW_CMD_VER_UNKNOWN);
287
288 if (cmd_ver >= 13) {
289 *format_bw = IWL_LOCATION_FRAME_FORMAT_HE;
290 *format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS;
291 break;
292 }
293 fallthrough;
294 default:
295 IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
296 peer->chandef.width);
297 return -EINVAL;
298 }
299
300 /* non EDCA based measurement must use HE preamble */
301 if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
302 *format_bw |= IWL_LOCATION_FRAME_FORMAT_HE;
303
304 *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
305 iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
306
307 return 0;
308 }
309
310 static int
iwl_mvm_ftm_put_target_v2(struct iwl_mvm * mvm,struct cfg80211_pmsr_request_peer * peer,struct iwl_tof_range_req_ap_entry_v2 * target)311 iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
312 struct cfg80211_pmsr_request_peer *peer,
313 struct iwl_tof_range_req_ap_entry_v2 *target)
314 {
315 int ret;
316
317 ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
318 &target->bandwidth,
319 &target->ctrl_ch_position);
320 if (ret)
321 return ret;
322
323 memcpy(target->bssid, peer->addr, ETH_ALEN);
324 target->burst_period =
325 cpu_to_le16(peer->ftm.burst_period);
326 target->samples_per_burst = peer->ftm.ftms_per_burst;
327 target->num_of_bursts = peer->ftm.num_bursts_exp;
328 target->measure_type = 0; /* regular two-sided FTM */
329 target->retries_per_sample = peer->ftm.ftmr_retries;
330 target->asap_mode = peer->ftm.asap;
331 target->enable_dyn_ack = IWL_MVM_FTM_INITIATOR_DYNACK;
332
333 if (peer->ftm.request_lci)
334 target->location_req |= IWL_TOF_LOC_LCI;
335 if (peer->ftm.request_civicloc)
336 target->location_req |= IWL_TOF_LOC_CIVIC;
337
338 target->algo_type = IWL_MVM_FTM_INITIATOR_ALGO;
339
340 return 0;
341 }
342
343 #define FTM_SET_FLAG(flag) (*flags |= \
344 cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag))
345
346 static void
iwl_mvm_ftm_set_target_flags(struct iwl_mvm * mvm,struct cfg80211_pmsr_request_peer * peer,__le32 * flags)347 iwl_mvm_ftm_set_target_flags(struct iwl_mvm *mvm,
348 struct cfg80211_pmsr_request_peer *peer,
349 __le32 *flags)
350 {
351 *flags = cpu_to_le32(0);
352
353 if (peer->ftm.asap)
354 FTM_SET_FLAG(ASAP);
355
356 if (peer->ftm.request_lci)
357 FTM_SET_FLAG(LCI_REQUEST);
358
359 if (peer->ftm.request_civicloc)
360 FTM_SET_FLAG(CIVIC_REQUEST);
361
362 if (IWL_MVM_FTM_INITIATOR_DYNACK)
363 FTM_SET_FLAG(DYN_ACK);
364
365 if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG)
366 FTM_SET_FLAG(ALGO_LR);
367 else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
368 FTM_SET_FLAG(ALGO_FFT);
369
370 if (peer->ftm.trigger_based)
371 FTM_SET_FLAG(TB);
372 else if (peer->ftm.non_trigger_based)
373 FTM_SET_FLAG(NON_TB);
374
375 if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) &&
376 peer->ftm.lmr_feedback)
377 FTM_SET_FLAG(LMR_FEEDBACK);
378 }
379
380 static void
iwl_mvm_ftm_put_target_common(struct iwl_mvm * mvm,struct cfg80211_pmsr_request_peer * peer,struct iwl_tof_range_req_ap_entry_v6 * target)381 iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
382 struct cfg80211_pmsr_request_peer *peer,
383 struct iwl_tof_range_req_ap_entry_v6 *target)
384 {
385 memcpy(target->bssid, peer->addr, ETH_ALEN);
386 target->burst_period =
387 cpu_to_le16(peer->ftm.burst_period);
388 target->samples_per_burst = peer->ftm.ftms_per_burst;
389 target->num_of_bursts = peer->ftm.num_bursts_exp;
390 target->ftmr_max_retries = peer->ftm.ftmr_retries;
391 iwl_mvm_ftm_set_target_flags(mvm, peer, &target->initiator_ap_flags);
392 }
393
394 static int
iwl_mvm_ftm_put_target_v3(struct iwl_mvm * mvm,struct cfg80211_pmsr_request_peer * peer,struct iwl_tof_range_req_ap_entry_v3 * target)395 iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm,
396 struct cfg80211_pmsr_request_peer *peer,
397 struct iwl_tof_range_req_ap_entry_v3 *target)
398 {
399 int ret;
400
401 ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
402 &target->bandwidth,
403 &target->ctrl_ch_position);
404 if (ret)
405 return ret;
406
407 /*
408 * Versions 3 and 4 has some common fields, so
409 * iwl_mvm_ftm_put_target_common() can be used for version 7 too.
410 */
411 iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
412
413 return 0;
414 }
415
416 static int
iwl_mvm_ftm_put_target_v4(struct iwl_mvm * mvm,struct cfg80211_pmsr_request_peer * peer,struct iwl_tof_range_req_ap_entry_v4 * target)417 iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
418 struct cfg80211_pmsr_request_peer *peer,
419 struct iwl_tof_range_req_ap_entry_v4 *target)
420 {
421 int ret;
422
423 ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
424 &target->format_bw,
425 &target->ctrl_ch_position);
426 if (ret)
427 return ret;
428
429 iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
430
431 return 0;
432 }
433
iwl_mvm_ftm_set_sta(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request_peer * peer,u8 * sta_id,__le32 * flags)434 static int iwl_mvm_ftm_set_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
435 struct cfg80211_pmsr_request_peer *peer,
436 u8 *sta_id, __le32 *flags)
437 {
438 if (vif->cfg.assoc) {
439 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
440 struct ieee80211_sta *sta;
441 struct ieee80211_bss_conf *link_conf;
442 unsigned int link_id;
443
444 rcu_read_lock();
445 for_each_vif_active_link(vif, link_conf, link_id) {
446 if (memcmp(peer->addr, link_conf->bssid, ETH_ALEN))
447 continue;
448
449 *sta_id = mvmvif->link[link_id]->ap_sta_id;
450 sta = rcu_dereference(mvm->fw_id_to_mac_id[*sta_id]);
451 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
452 rcu_read_unlock();
453 return PTR_ERR_OR_ZERO(sta);
454 }
455
456 if (sta->mfp && (peer->ftm.trigger_based ||
457 peer->ftm.non_trigger_based))
458 FTM_SET_FLAG(PMF);
459 break;
460 }
461 rcu_read_unlock();
462
463 #ifdef CONFIG_IWLWIFI_DEBUGFS
464 if (mvmvif->ftm_unprotected) {
465 *sta_id = IWL_INVALID_STA;
466 *flags &= ~cpu_to_le32(IWL_INITIATOR_AP_FLAGS_PMF);
467 }
468 #endif
469 } else {
470 *sta_id = IWL_INVALID_STA;
471 }
472
473 return 0;
474 }
475
476 static int
iwl_mvm_ftm_put_target(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request_peer * peer,struct iwl_tof_range_req_ap_entry_v6 * target)477 iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
478 struct cfg80211_pmsr_request_peer *peer,
479 struct iwl_tof_range_req_ap_entry_v6 *target)
480 {
481 int ret;
482
483 ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
484 &target->format_bw,
485 &target->ctrl_ch_position);
486 if (ret)
487 return ret;
488
489 iwl_mvm_ftm_put_target_common(mvm, peer, target);
490
491 iwl_mvm_ftm_set_sta(mvm, vif, peer, &target->sta_id,
492 &target->initiator_ap_flags);
493
494 /*
495 * TODO: Beacon interval is currently unknown, so use the common value
496 * of 100 TUs.
497 */
498 target->beacon_interval = cpu_to_le16(100);
499 return 0;
500 }
501
iwl_mvm_ftm_send_cmd(struct iwl_mvm * mvm,struct iwl_host_cmd * hcmd)502 static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd)
503 {
504 u32 status;
505 int err = iwl_mvm_send_cmd_status(mvm, hcmd, &status);
506
507 if (!err && status) {
508 IWL_ERR(mvm, "FTM range request command failure, status: %u\n",
509 status);
510 err = iwl_ftm_range_request_status_to_err(status);
511 }
512
513 return err;
514 }
515
iwl_mvm_ftm_start_v5(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request * req)516 static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
517 struct cfg80211_pmsr_request *req)
518 {
519 struct iwl_tof_range_req_cmd_v5 cmd_v5;
520 struct iwl_host_cmd hcmd = {
521 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
522 .dataflags[0] = IWL_HCMD_DFL_DUP,
523 .data[0] = &cmd_v5,
524 .len[0] = sizeof(cmd_v5),
525 };
526 u8 i;
527 int err;
528
529 iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req);
530
531 for (i = 0; i < cmd_v5.num_of_ap; i++) {
532 struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
533
534 err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]);
535 if (err)
536 return err;
537 }
538
539 return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
540 }
541
iwl_mvm_ftm_start_v7(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request * req)542 static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
543 struct cfg80211_pmsr_request *req)
544 {
545 struct iwl_tof_range_req_cmd_v7 cmd_v7;
546 struct iwl_host_cmd hcmd = {
547 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
548 .dataflags[0] = IWL_HCMD_DFL_DUP,
549 .data[0] = &cmd_v7,
550 .len[0] = sizeof(cmd_v7),
551 };
552 u8 i;
553 int err;
554
555 /*
556 * Versions 7 and 8 has the same structure except from the responders
557 * list, so iwl_mvm_ftm_cmd() can be used for version 7 too.
558 */
559 iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd_v7, req);
560
561 for (i = 0; i < cmd_v7.num_of_ap; i++) {
562 struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
563
564 err = iwl_mvm_ftm_put_target_v3(mvm, peer, &cmd_v7.ap[i]);
565 if (err)
566 return err;
567 }
568
569 return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
570 }
571
iwl_mvm_ftm_start_v8(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request * req)572 static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
573 struct cfg80211_pmsr_request *req)
574 {
575 struct iwl_tof_range_req_cmd_v8 cmd;
576 struct iwl_host_cmd hcmd = {
577 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
578 .dataflags[0] = IWL_HCMD_DFL_DUP,
579 .data[0] = &cmd,
580 .len[0] = sizeof(cmd),
581 };
582 u8 i;
583 int err;
584
585 iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd, req);
586
587 for (i = 0; i < cmd.num_of_ap; i++) {
588 struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
589
590 err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]);
591 if (err)
592 return err;
593 }
594
595 return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
596 }
597
iwl_mvm_ftm_start_v9(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request * req)598 static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
599 struct cfg80211_pmsr_request *req)
600 {
601 struct iwl_tof_range_req_cmd_v9 cmd;
602 struct iwl_host_cmd hcmd = {
603 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
604 .dataflags[0] = IWL_HCMD_DFL_DUP,
605 .data[0] = &cmd,
606 .len[0] = sizeof(cmd),
607 };
608 u8 i;
609 int err;
610
611 iwl_mvm_ftm_cmd_common(mvm, vif, &cmd, req);
612
613 for (i = 0; i < cmd.num_of_ap; i++) {
614 struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
615 struct iwl_tof_range_req_ap_entry_v6 *target = &cmd.ap[i];
616
617 err = iwl_mvm_ftm_put_target(mvm, vif, peer, target);
618 if (err)
619 return err;
620 }
621
622 return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
623 }
624
iter(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key,void * data)625 static void iter(struct ieee80211_hw *hw,
626 struct ieee80211_vif *vif,
627 struct ieee80211_sta *sta,
628 struct ieee80211_key_conf *key,
629 void *data)
630 {
631 struct iwl_mvm_ftm_iter_data *target = data;
632
633 if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN))
634 return;
635
636 WARN_ON(!sta->mfp);
637
638 target->tk = key->key;
639 *target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher);
640 WARN_ON(*target->cipher == IWL_LOCATION_CIPHER_INVALID);
641 }
642
643 static void
iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm * mvm,struct ieee80211_vif * vif,u8 * bssid,u8 * cipher,u8 * hltk,u8 * tk,u8 * rx_pn,u8 * tx_pn,__le32 * flags)644 iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
645 u8 *bssid, u8 *cipher, u8 *hltk, u8 *tk,
646 u8 *rx_pn, u8 *tx_pn, __le32 *flags)
647 {
648 struct iwl_mvm_ftm_pasn_entry *entry;
649 #ifdef CONFIG_IWLWIFI_DEBUGFS
650 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
651
652 if (mvmvif->ftm_unprotected)
653 return;
654 #endif
655
656 if (!(le32_to_cpu(*flags) & (IWL_INITIATOR_AP_FLAGS_NON_TB |
657 IWL_INITIATOR_AP_FLAGS_TB)))
658 return;
659
660 lockdep_assert_held(&mvm->mutex);
661
662 list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
663 if (memcmp(entry->addr, bssid, sizeof(entry->addr)))
664 continue;
665
666 *cipher = entry->cipher;
667
668 if (entry->flags & IWL_MVM_PASN_FLAG_HAS_HLTK)
669 memcpy(hltk, entry->hltk, sizeof(entry->hltk));
670 else
671 memset(hltk, 0, sizeof(entry->hltk));
672
673 if (vif->cfg.assoc &&
674 !memcmp(vif->bss_conf.bssid, bssid, ETH_ALEN)) {
675 struct iwl_mvm_ftm_iter_data target;
676
677 target.bssid = bssid;
678 target.cipher = cipher;
679 target.tk = NULL;
680 ieee80211_iter_keys(mvm->hw, vif, iter, &target);
681
682 if (!WARN_ON(!target.tk))
683 memcpy(tk, target.tk, TK_11AZ_LEN);
684 } else {
685 memcpy(tk, entry->tk, sizeof(entry->tk));
686 }
687
688 memcpy(rx_pn, entry->rx_pn, sizeof(entry->rx_pn));
689 memcpy(tx_pn, entry->tx_pn, sizeof(entry->tx_pn));
690
691 FTM_SET_FLAG(SECURED);
692 return;
693 }
694 }
695
696 static int
iwl_mvm_ftm_put_target_v7(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request_peer * peer,struct iwl_tof_range_req_ap_entry_v7 * target)697 iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
698 struct cfg80211_pmsr_request_peer *peer,
699 struct iwl_tof_range_req_ap_entry_v7 *target)
700 {
701 int err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target);
702 if (err)
703 return err;
704
705 iwl_mvm_ftm_set_secured_ranging(mvm, vif, target->bssid,
706 &target->cipher, target->hltk,
707 target->tk, target->rx_pn,
708 target->tx_pn,
709 &target->initiator_ap_flags);
710 return err;
711 }
712
iwl_mvm_ftm_start_v11(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request * req)713 static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm,
714 struct ieee80211_vif *vif,
715 struct cfg80211_pmsr_request *req)
716 {
717 struct iwl_tof_range_req_cmd_v11 cmd;
718 struct iwl_host_cmd hcmd = {
719 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
720 .dataflags[0] = IWL_HCMD_DFL_DUP,
721 .data[0] = &cmd,
722 .len[0] = sizeof(cmd),
723 };
724 u8 i;
725 int err;
726
727 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
728
729 for (i = 0; i < cmd.num_of_ap; i++) {
730 struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
731 struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i];
732
733 err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, target);
734 if (err)
735 return err;
736 }
737
738 return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
739 }
740
741 static void
iwl_mvm_ftm_set_ndp_params(struct iwl_mvm * mvm,struct iwl_tof_range_req_ap_entry_v8 * target)742 iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm,
743 struct iwl_tof_range_req_ap_entry_v8 *target)
744 {
745 /* Only 2 STS are supported on Tx */
746 u32 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 :
747 IWL_MVM_FTM_I2R_MAX_STS;
748
749 target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP |
750 (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS);
751 target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP |
752 (i2r_max_sts << IWL_LOCATION_MAX_STS_POS);
753 target->r2i_max_total_ltf = IWL_MVM_FTM_R2I_MAX_TOTAL_LTF;
754 target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF;
755 }
756
757 static int
iwl_mvm_ftm_put_target_v8(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request_peer * peer,struct iwl_tof_range_req_ap_entry_v8 * target)758 iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
759 struct cfg80211_pmsr_request_peer *peer,
760 struct iwl_tof_range_req_ap_entry_v8 *target)
761 {
762 u32 flags;
763 int ret = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target);
764
765 if (ret)
766 return ret;
767
768 iwl_mvm_ftm_set_ndp_params(mvm, target);
769
770 /*
771 * If secure LTF is turned off, replace the flag with PMF only
772 */
773 flags = le32_to_cpu(target->initiator_ap_flags);
774 if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) {
775 if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF)
776 flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
777
778 flags |= IWL_INITIATOR_AP_FLAGS_PMF;
779 target->initiator_ap_flags = cpu_to_le32(flags);
780 }
781
782 return 0;
783 }
784
iwl_mvm_ftm_start_v12(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request * req)785 static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm,
786 struct ieee80211_vif *vif,
787 struct cfg80211_pmsr_request *req)
788 {
789 struct iwl_tof_range_req_cmd_v12 cmd;
790 struct iwl_host_cmd hcmd = {
791 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
792 .dataflags[0] = IWL_HCMD_DFL_DUP,
793 .data[0] = &cmd,
794 .len[0] = sizeof(cmd),
795 };
796 u8 i;
797 int err;
798
799 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
800
801 for (i = 0; i < cmd.num_of_ap; i++) {
802 struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
803 struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i];
804
805 err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, target);
806 if (err)
807 return err;
808 }
809
810 return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
811 }
812
iwl_mvm_ftm_start_v13(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request * req)813 static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm,
814 struct ieee80211_vif *vif,
815 struct cfg80211_pmsr_request *req)
816 {
817 struct iwl_tof_range_req_cmd_v13 cmd;
818 struct iwl_host_cmd hcmd = {
819 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
820 .dataflags[0] = IWL_HCMD_DFL_DUP,
821 .data[0] = &cmd,
822 .len[0] = sizeof(cmd),
823 };
824 u8 i;
825 int err;
826
827 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
828
829 for (i = 0; i < cmd.num_of_ap; i++) {
830 struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
831 struct iwl_tof_range_req_ap_entry_v9 *target = &cmd.ap[i];
832
833 err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, (void *)target);
834 if (err)
835 return err;
836
837 if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
838 target->bss_color = peer->ftm.bss_color;
839
840 if (peer->ftm.non_trigger_based) {
841 target->min_time_between_msr =
842 cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR);
843 target->burst_period =
844 cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR);
845 } else {
846 target->min_time_between_msr = cpu_to_le16(0);
847 }
848
849 target->band =
850 iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band);
851 }
852
853 return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
854 }
855
856 static int
iwl_mvm_ftm_put_target_v10(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request_peer * peer,struct iwl_tof_range_req_ap_entry * target)857 iwl_mvm_ftm_put_target_v10(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
858 struct cfg80211_pmsr_request_peer *peer,
859 struct iwl_tof_range_req_ap_entry *target)
860 {
861 u32 i2r_max_sts, flags;
862 int ret;
863
864 ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
865 &target->format_bw,
866 &target->ctrl_ch_position);
867 if (ret)
868 return ret;
869
870 memcpy(target->bssid, peer->addr, ETH_ALEN);
871 target->burst_period =
872 cpu_to_le16(peer->ftm.burst_period);
873 target->samples_per_burst = peer->ftm.ftms_per_burst;
874 target->num_of_bursts = peer->ftm.num_bursts_exp;
875 iwl_mvm_ftm_set_target_flags(mvm, peer, &target->initiator_ap_flags);
876 iwl_mvm_ftm_set_sta(mvm, vif, peer, &target->sta_id,
877 &target->initiator_ap_flags);
878 iwl_mvm_ftm_set_secured_ranging(mvm, vif, target->bssid,
879 &target->cipher, target->hltk,
880 target->tk, target->rx_pn,
881 target->tx_pn,
882 &target->initiator_ap_flags);
883
884 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 :
885 IWL_MVM_FTM_I2R_MAX_STS;
886
887 target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP |
888 (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS) |
889 (IWL_MVM_FTM_R2I_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS);
890 target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP |
891 (i2r_max_sts << IWL_LOCATION_MAX_STS_POS) |
892 (IWL_MVM_FTM_I2R_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS);
893
894 if (peer->ftm.non_trigger_based) {
895 target->min_time_between_msr =
896 cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR);
897 target->burst_period =
898 cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR);
899 } else {
900 target->min_time_between_msr = cpu_to_le16(0);
901 }
902
903 target->band =
904 iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band);
905
906 /*
907 * TODO: Beacon interval is currently unknown, so use the common value
908 * of 100 TUs.
909 */
910 target->beacon_interval = cpu_to_le16(100);
911
912 /*
913 * If secure LTF is turned off, replace the flag with PMF only
914 */
915 flags = le32_to_cpu(target->initiator_ap_flags);
916 if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) {
917 if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF)
918 flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
919
920 flags |= IWL_INITIATOR_AP_FLAGS_PMF;
921 target->initiator_ap_flags = cpu_to_le32(flags);
922 }
923
924 return 0;
925 }
926
iwl_mvm_ftm_start_v14(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request * req)927 static int iwl_mvm_ftm_start_v14(struct iwl_mvm *mvm,
928 struct ieee80211_vif *vif,
929 struct cfg80211_pmsr_request *req)
930 {
931 struct iwl_tof_range_req_cmd cmd;
932 struct iwl_host_cmd hcmd = {
933 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
934 .dataflags[0] = IWL_HCMD_DFL_DUP,
935 .data[0] = &cmd,
936 .len[0] = sizeof(cmd),
937 };
938 u8 i;
939 int err;
940
941 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
942
943 for (i = 0; i < cmd.num_of_ap; i++) {
944 struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
945 struct iwl_tof_range_req_ap_entry *target = &cmd.ap[i];
946
947 err = iwl_mvm_ftm_put_target_v10(mvm, vif, peer, target);
948 if (err)
949 return err;
950 }
951
952 return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
953 }
954
iwl_mvm_ftm_start(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request * req)955 int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
956 struct cfg80211_pmsr_request *req)
957 {
958 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
959 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
960 int err;
961
962 lockdep_assert_held(&mvm->mutex);
963
964 if (mvm->ftm_initiator.req)
965 return -EBUSY;
966
967 if (new_api) {
968 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
969 WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
970 IWL_FW_CMD_VER_UNKNOWN);
971
972 switch (cmd_ver) {
973 case 15:
974 /* Version 15 has the same struct as 14 */
975 case 14:
976 err = iwl_mvm_ftm_start_v14(mvm, vif, req);
977 break;
978 case 13:
979 err = iwl_mvm_ftm_start_v13(mvm, vif, req);
980 break;
981 case 12:
982 err = iwl_mvm_ftm_start_v12(mvm, vif, req);
983 break;
984 case 11:
985 err = iwl_mvm_ftm_start_v11(mvm, vif, req);
986 break;
987 case 9:
988 case 10:
989 err = iwl_mvm_ftm_start_v9(mvm, vif, req);
990 break;
991 case 8:
992 err = iwl_mvm_ftm_start_v8(mvm, vif, req);
993 break;
994 default:
995 err = iwl_mvm_ftm_start_v7(mvm, vif, req);
996 break;
997 }
998 } else {
999 err = iwl_mvm_ftm_start_v5(mvm, vif, req);
1000 }
1001
1002 if (!err) {
1003 mvm->ftm_initiator.req = req;
1004 mvm->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif);
1005 }
1006
1007 return err;
1008 }
1009
iwl_mvm_ftm_abort(struct iwl_mvm * mvm,struct cfg80211_pmsr_request * req)1010 void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
1011 {
1012 struct iwl_tof_range_abort_cmd cmd = {
1013 .request_id = req->cookie,
1014 };
1015
1016 lockdep_assert_held(&mvm->mutex);
1017
1018 if (req != mvm->ftm_initiator.req)
1019 return;
1020
1021 iwl_mvm_ftm_reset(mvm);
1022
1023 if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD),
1024 0, sizeof(cmd), &cmd))
1025 IWL_ERR(mvm, "failed to abort FTM process\n");
1026 }
1027
iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request * req,const u8 * addr)1028 static int iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req,
1029 const u8 *addr)
1030 {
1031 int i;
1032
1033 for (i = 0; i < req->n_peers; i++) {
1034 struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
1035
1036 if (ether_addr_equal_unaligned(peer->addr, addr))
1037 return i;
1038 }
1039
1040 return -ENOENT;
1041 }
1042
iwl_mvm_ftm_get_host_time(struct iwl_mvm * mvm,__le32 fw_gp2_ts)1043 static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts)
1044 {
1045 u32 gp2_ts = le32_to_cpu(fw_gp2_ts);
1046 u32 curr_gp2, diff;
1047 u64 now_from_boot_ns;
1048
1049 iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2,
1050 &now_from_boot_ns, NULL);
1051
1052 if (curr_gp2 >= gp2_ts)
1053 diff = curr_gp2 - gp2_ts;
1054 else
1055 diff = curr_gp2 + (U32_MAX - gp2_ts + 1);
1056
1057 return now_from_boot_ns - (u64)diff * 1000;
1058 }
1059
iwl_mvm_ftm_get_lci_civic(struct iwl_mvm * mvm,struct cfg80211_pmsr_result * res)1060 static void iwl_mvm_ftm_get_lci_civic(struct iwl_mvm *mvm,
1061 struct cfg80211_pmsr_result *res)
1062 {
1063 struct iwl_mvm_loc_entry *entry;
1064
1065 list_for_each_entry(entry, &mvm->ftm_initiator.loc_list, list) {
1066 if (!ether_addr_equal_unaligned(res->addr, entry->addr))
1067 continue;
1068
1069 if (entry->lci_len) {
1070 res->ftm.lci_len = entry->lci_len;
1071 res->ftm.lci = entry->buf;
1072 }
1073
1074 if (entry->civic_len) {
1075 res->ftm.civicloc_len = entry->civic_len;
1076 res->ftm.civicloc = entry->buf + entry->lci_len;
1077 }
1078
1079 /* we found the entry we needed */
1080 break;
1081 }
1082 }
1083
iwl_mvm_ftm_range_resp_valid(struct iwl_mvm * mvm,u8 request_id,u8 num_of_aps)1084 static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id,
1085 u8 num_of_aps)
1086 {
1087 lockdep_assert_held(&mvm->mutex);
1088
1089 if (request_id != (u8)mvm->ftm_initiator.req->cookie) {
1090 IWL_ERR(mvm, "Request ID mismatch, got %u, active %u\n",
1091 request_id, (u8)mvm->ftm_initiator.req->cookie);
1092 return -EINVAL;
1093 }
1094
1095 if (num_of_aps > mvm->ftm_initiator.req->n_peers) {
1096 IWL_ERR(mvm, "FTM range response invalid\n");
1097 return -EINVAL;
1098 }
1099
1100 return 0;
1101 }
1102
iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm * mvm,struct cfg80211_pmsr_result * res)1103 static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm,
1104 struct cfg80211_pmsr_result *res)
1105 {
1106 struct iwl_mvm_smooth_entry *resp = NULL, *iter;
1107 s64 rtt_avg, rtt = res->ftm.rtt_avg;
1108 u32 undershoot, overshoot;
1109 u8 alpha;
1110
1111 if (!IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH)
1112 return;
1113
1114 WARN_ON(rtt < 0);
1115
1116 if (res->status != NL80211_PMSR_STATUS_SUCCESS) {
1117 IWL_DEBUG_INFO(mvm,
1118 ": %pM: ignore failed measurement. Status=%u\n",
1119 res->addr, res->status);
1120 return;
1121 }
1122
1123 list_for_each_entry(iter, &mvm->ftm_initiator.smooth.resp, list) {
1124 if (!memcmp(res->addr, iter->addr, ETH_ALEN)) {
1125 resp = iter;
1126 break;
1127 }
1128 }
1129
1130 if (!resp) {
1131 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
1132 if (!resp)
1133 return;
1134
1135 memcpy(resp->addr, res->addr, ETH_ALEN);
1136 list_add_tail(&resp->list, &mvm->ftm_initiator.smooth.resp);
1137
1138 resp->rtt_avg = rtt;
1139
1140 IWL_DEBUG_INFO(mvm, "new: %pM: rtt_avg=%lld\n",
1141 resp->addr, resp->rtt_avg);
1142 goto update_time;
1143 }
1144
1145 if (res->host_time - resp->host_time >
1146 IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * 1000000000) {
1147 resp->rtt_avg = rtt;
1148
1149 IWL_DEBUG_INFO(mvm, "expired: %pM: rtt_avg=%lld\n",
1150 resp->addr, resp->rtt_avg);
1151 goto update_time;
1152 }
1153
1154 /* Smooth the results based on the tracked RTT average */
1155 undershoot = IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT;
1156 overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT;
1157 alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA;
1158
1159 rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100);
1160
1161 IWL_DEBUG_INFO(mvm,
1162 "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n",
1163 resp->addr, resp->rtt_avg, rtt_avg, rtt);
1164
1165 /*
1166 * update the responder's average RTT results regardless of
1167 * the under/over shoot logic below
1168 */
1169 resp->rtt_avg = rtt_avg;
1170
1171 /* smooth the results */
1172 if (rtt_avg > rtt && (rtt_avg - rtt) > undershoot) {
1173 res->ftm.rtt_avg = rtt_avg;
1174
1175 IWL_DEBUG_INFO(mvm,
1176 "undershoot: val=%lld\n",
1177 (rtt_avg - rtt));
1178 } else if (rtt_avg < rtt && (rtt - rtt_avg) >
1179 overshoot) {
1180 res->ftm.rtt_avg = rtt_avg;
1181 IWL_DEBUG_INFO(mvm,
1182 "overshoot: val=%lld\n",
1183 (rtt - rtt_avg));
1184 }
1185
1186 update_time:
1187 resp->host_time = res->host_time;
1188 }
1189
iwl_mvm_debug_range_resp(struct iwl_mvm * mvm,u8 index,struct cfg80211_pmsr_result * res)1190 static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
1191 struct cfg80211_pmsr_result *res)
1192 {
1193 s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
1194
1195 IWL_DEBUG_INFO(mvm, "entry %d\n", index);
1196 IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
1197 IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr);
1198 IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time);
1199 IWL_DEBUG_INFO(mvm, "\tburst index: %d\n", res->ftm.burst_index);
1200 IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes);
1201 IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg);
1202 IWL_DEBUG_INFO(mvm, "\trssi spread: %d\n", res->ftm.rssi_spread);
1203 IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg);
1204 IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance);
1205 IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread);
1206 IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg);
1207 }
1208
1209 static void
iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm * mvm,struct iwl_tof_range_rsp_ap_entry_ntfy * fw_ap)1210 iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm,
1211 struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap)
1212 {
1213 struct iwl_mvm_ftm_pasn_entry *entry;
1214
1215 lockdep_assert_held(&mvm->mutex);
1216
1217 list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
1218 if (memcmp(fw_ap->bssid, entry->addr, sizeof(entry->addr)))
1219 continue;
1220
1221 memcpy(entry->rx_pn, fw_ap->rx_pn, sizeof(entry->rx_pn));
1222 memcpy(entry->tx_pn, fw_ap->tx_pn, sizeof(entry->tx_pn));
1223 return;
1224 }
1225 }
1226
iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm * mvm)1227 static u8 iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm *mvm)
1228 {
1229 if (!fw_has_api(&mvm->fw->ucode_capa,
1230 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ))
1231 return 5;
1232
1233 /* Starting from version 8, the FW advertises the version */
1234 if (mvm->cmd_ver.range_resp >= 8)
1235 return mvm->cmd_ver.range_resp;
1236 else if (fw_has_api(&mvm->fw->ucode_capa,
1237 IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
1238 return 7;
1239
1240 /* The first version of the new range request API */
1241 return 6;
1242 }
1243
iwl_mvm_ftm_resp_size_validation(u8 ver,unsigned int pkt_len)1244 static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len)
1245 {
1246 switch (ver) {
1247 case 9:
1248 case 8:
1249 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy);
1250 case 7:
1251 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v7);
1252 case 6:
1253 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v6);
1254 case 5:
1255 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v5);
1256 default:
1257 WARN_ONCE(1, "FTM: unsupported range response version %u", ver);
1258 return false;
1259 }
1260 }
1261
iwl_mvm_ftm_range_resp(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)1262 void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1263 {
1264 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1265 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
1266 struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data;
1267 struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data;
1268 struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data;
1269 struct iwl_tof_range_rsp_ntfy *fw_resp_v8 = (void *)pkt->data;
1270 int i;
1271 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
1272 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
1273 u8 num_of_aps, last_in_batch;
1274 u8 notif_ver = iwl_mvm_ftm_get_range_resp_ver(mvm);
1275
1276 lockdep_assert_held(&mvm->mutex);
1277
1278 if (!mvm->ftm_initiator.req) {
1279 return;
1280 }
1281
1282 if (unlikely(!iwl_mvm_ftm_resp_size_validation(notif_ver, pkt_len)))
1283 return;
1284
1285 if (new_api) {
1286 if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v8->request_id,
1287 fw_resp_v8->num_of_aps))
1288 return;
1289
1290 num_of_aps = fw_resp_v8->num_of_aps;
1291 last_in_batch = fw_resp_v8->last_report;
1292 } else {
1293 if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id,
1294 fw_resp_v5->num_of_aps))
1295 return;
1296
1297 num_of_aps = fw_resp_v5->num_of_aps;
1298 last_in_batch = fw_resp_v5->last_in_batch;
1299 }
1300
1301 IWL_DEBUG_INFO(mvm, "Range response received\n");
1302 IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %u\n",
1303 mvm->ftm_initiator.req->cookie, num_of_aps);
1304
1305 for (i = 0; i < num_of_aps && i < IWL_TOF_MAX_APS; i++) {
1306 struct cfg80211_pmsr_result result = {};
1307 struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap;
1308 int peer_idx;
1309
1310 if (new_api) {
1311 if (notif_ver >= 8) {
1312 fw_ap = &fw_resp_v8->ap[i];
1313 iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap);
1314 } else if (notif_ver == 7) {
1315 fw_ap = (void *)&fw_resp_v7->ap[i];
1316 } else {
1317 fw_ap = (void *)&fw_resp_v6->ap[i];
1318 }
1319
1320 result.final = fw_ap->last_burst;
1321 result.ap_tsf = le32_to_cpu(fw_ap->start_tsf);
1322 result.ap_tsf_valid = 1;
1323 } else {
1324 /* the first part is the same for old and new APIs */
1325 fw_ap = (void *)&fw_resp_v5->ap[i];
1326 /*
1327 * FIXME: the firmware needs to report this, we don't
1328 * even know the number of bursts the responder picked
1329 * (if we asked it to)
1330 */
1331 result.final = 0;
1332 }
1333
1334 peer_idx = iwl_mvm_ftm_find_peer(mvm->ftm_initiator.req,
1335 fw_ap->bssid);
1336 if (peer_idx < 0) {
1337 IWL_WARN(mvm,
1338 "Unknown address (%pM, target #%d) in FTM response\n",
1339 fw_ap->bssid, i);
1340 continue;
1341 }
1342
1343 switch (fw_ap->measure_status) {
1344 case IWL_TOF_ENTRY_SUCCESS:
1345 result.status = NL80211_PMSR_STATUS_SUCCESS;
1346 break;
1347 case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT:
1348 result.status = NL80211_PMSR_STATUS_TIMEOUT;
1349 break;
1350 case IWL_TOF_ENTRY_NO_RESPONSE:
1351 result.status = NL80211_PMSR_STATUS_FAILURE;
1352 result.ftm.failure_reason =
1353 NL80211_PMSR_FTM_FAILURE_NO_RESPONSE;
1354 break;
1355 case IWL_TOF_ENTRY_REQUEST_REJECTED:
1356 result.status = NL80211_PMSR_STATUS_FAILURE;
1357 result.ftm.failure_reason =
1358 NL80211_PMSR_FTM_FAILURE_PEER_BUSY;
1359 result.ftm.busy_retry_time = fw_ap->refusal_period;
1360 break;
1361 default:
1362 result.status = NL80211_PMSR_STATUS_FAILURE;
1363 result.ftm.failure_reason =
1364 NL80211_PMSR_FTM_FAILURE_UNSPECIFIED;
1365 break;
1366 }
1367 memcpy(result.addr, fw_ap->bssid, ETH_ALEN);
1368 result.host_time = iwl_mvm_ftm_get_host_time(mvm,
1369 fw_ap->timestamp);
1370 result.type = NL80211_PMSR_TYPE_FTM;
1371 result.ftm.burst_index = mvm->ftm_initiator.responses[peer_idx];
1372 mvm->ftm_initiator.responses[peer_idx]++;
1373 result.ftm.rssi_avg = fw_ap->rssi;
1374 result.ftm.rssi_avg_valid = 1;
1375 result.ftm.rssi_spread = fw_ap->rssi_spread;
1376 result.ftm.rssi_spread_valid = 1;
1377 result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt);
1378 result.ftm.rtt_avg_valid = 1;
1379 result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance);
1380 result.ftm.rtt_variance_valid = 1;
1381 result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread);
1382 result.ftm.rtt_spread_valid = 1;
1383
1384 iwl_mvm_ftm_get_lci_civic(mvm, &result);
1385
1386 iwl_mvm_ftm_rtt_smoothing(mvm, &result);
1387
1388 cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev,
1389 mvm->ftm_initiator.req,
1390 &result, GFP_KERNEL);
1391
1392 if (fw_has_api(&mvm->fw->ucode_capa,
1393 IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
1394 IWL_DEBUG_INFO(mvm, "RTT confidence: %u\n",
1395 fw_ap->rttConfidence);
1396
1397 iwl_mvm_debug_range_resp(mvm, i, &result);
1398 }
1399
1400 if (last_in_batch) {
1401 cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev,
1402 mvm->ftm_initiator.req,
1403 GFP_KERNEL);
1404 iwl_mvm_ftm_reset(mvm);
1405 }
1406 }
1407
iwl_mvm_ftm_lc_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)1408 void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1409 {
1410 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1411 const struct ieee80211_mgmt *mgmt = (void *)pkt->data;
1412 size_t len = iwl_rx_packet_payload_len(pkt);
1413 struct iwl_mvm_loc_entry *entry;
1414 const u8 *ies, *lci, *civic, *msr_ie;
1415 size_t ies_len, lci_len = 0, civic_len = 0;
1416 size_t baselen = IEEE80211_MIN_ACTION_SIZE +
1417 sizeof(mgmt->u.action.u.ftm);
1418 static const u8 rprt_type_lci = IEEE80211_SPCT_MSR_RPRT_TYPE_LCI;
1419 static const u8 rprt_type_civic = IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC;
1420
1421 if (len <= baselen)
1422 return;
1423
1424 lockdep_assert_held(&mvm->mutex);
1425
1426 ies = mgmt->u.action.u.ftm.variable;
1427 ies_len = len - baselen;
1428
1429 msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len,
1430 &rprt_type_lci, 1, 4);
1431 if (msr_ie) {
1432 lci = msr_ie + 2;
1433 lci_len = msr_ie[1];
1434 }
1435
1436 msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len,
1437 &rprt_type_civic, 1, 4);
1438 if (msr_ie) {
1439 civic = msr_ie + 2;
1440 civic_len = msr_ie[1];
1441 }
1442
1443 entry = kmalloc(sizeof(*entry) + lci_len + civic_len, GFP_KERNEL);
1444 if (!entry)
1445 return;
1446
1447 memcpy(entry->addr, mgmt->bssid, ETH_ALEN);
1448
1449 entry->lci_len = lci_len;
1450 if (lci_len)
1451 memcpy(entry->buf, lci, lci_len);
1452
1453 entry->civic_len = civic_len;
1454 if (civic_len)
1455 memcpy(entry->buf + lci_len, civic, civic_len);
1456
1457 list_add_tail(&entry->list, &mvm->ftm_initiator.loc_list);
1458 }
1459