1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2015-2017 Intel Deutschland GmbH 4 * Copyright (C) 2018-2024 Intel Corporation 5 */ 6 #include <linux/etherdevice.h> 7 #include <linux/math64.h> 8 #include <net/cfg80211.h> 9 #include "mvm.h" 10 #include "iwl-io.h" 11 #include "iwl-prph.h" 12 #include "constants.h" 13 14 struct iwl_mvm_loc_entry { 15 struct list_head list; 16 u8 addr[ETH_ALEN]; 17 u8 lci_len, civic_len; 18 u8 buf[]; 19 }; 20 21 struct iwl_mvm_smooth_entry { 22 struct list_head list; 23 u8 addr[ETH_ALEN]; 24 s64 rtt_avg; 25 u64 host_time; 26 }; 27 28 enum iwl_mvm_pasn_flags { 29 IWL_MVM_PASN_FLAG_HAS_HLTK = BIT(0), 30 }; 31 32 struct iwl_mvm_ftm_pasn_entry { 33 struct list_head list; 34 u8 addr[ETH_ALEN]; 35 u8 hltk[HLTK_11AZ_LEN]; 36 u8 tk[TK_11AZ_LEN]; 37 u8 cipher; 38 u8 tx_pn[IEEE80211_CCMP_PN_LEN]; 39 u8 rx_pn[IEEE80211_CCMP_PN_LEN]; 40 u32 flags; 41 }; 42 43 struct iwl_mvm_ftm_iter_data { 44 u8 *cipher; 45 u8 *bssid; 46 u8 *tk; 47 }; 48 49 int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 50 u8 *addr, u32 cipher, u8 *tk, u32 tk_len, 51 u8 *hltk, u32 hltk_len) 52 { 53 struct iwl_mvm_ftm_pasn_entry *pasn = kzalloc(sizeof(*pasn), 54 GFP_KERNEL); 55 u32 expected_tk_len; 56 57 lockdep_assert_held(&mvm->mutex); 58 59 if (!pasn) 60 return -ENOBUFS; 61 62 iwl_mvm_ftm_remove_pasn_sta(mvm, addr); 63 64 pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher); 65 66 switch (pasn->cipher) { 67 case IWL_LOCATION_CIPHER_CCMP_128: 68 case IWL_LOCATION_CIPHER_GCMP_128: 69 expected_tk_len = WLAN_KEY_LEN_CCMP; 70 break; 71 case IWL_LOCATION_CIPHER_GCMP_256: 72 expected_tk_len = WLAN_KEY_LEN_GCMP_256; 73 break; 74 default: 75 goto out; 76 } 77 78 /* 79 * If associated to this AP and already have security context, 80 * the TK is already configured for this station, so it 81 * shouldn't be set again here. 82 */ 83 if (vif->cfg.assoc) { 84 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 85 struct ieee80211_bss_conf *link_conf; 86 unsigned int link_id; 87 struct ieee80211_sta *sta; 88 u8 sta_id; 89 90 rcu_read_lock(); 91 for_each_vif_active_link(vif, link_conf, link_id) { 92 if (memcmp(addr, link_conf->bssid, ETH_ALEN)) 93 continue; 94 95 sta_id = mvmvif->link[link_id]->ap_sta_id; 96 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 97 if (!IS_ERR_OR_NULL(sta) && sta->mfp) 98 expected_tk_len = 0; 99 break; 100 } 101 rcu_read_unlock(); 102 } 103 104 if (tk_len != expected_tk_len || 105 (hltk_len && hltk_len != sizeof(pasn->hltk))) { 106 IWL_ERR(mvm, "Invalid key length: tk_len=%u hltk_len=%u\n", 107 tk_len, hltk_len); 108 goto out; 109 } 110 111 if (!expected_tk_len && !hltk_len) { 112 IWL_ERR(mvm, "TK and HLTK not set\n"); 113 goto out; 114 } 115 116 memcpy(pasn->addr, addr, sizeof(pasn->addr)); 117 118 if (hltk_len) { 119 memcpy(pasn->hltk, hltk, sizeof(pasn->hltk)); 120 pasn->flags |= IWL_MVM_PASN_FLAG_HAS_HLTK; 121 } 122 123 if (tk && tk_len) 124 memcpy(pasn->tk, tk, sizeof(pasn->tk)); 125 126 list_add_tail(&pasn->list, &mvm->ftm_initiator.pasn_list); 127 return 0; 128 out: 129 kfree(pasn); 130 return -EINVAL; 131 } 132 133 void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr) 134 { 135 struct iwl_mvm_ftm_pasn_entry *entry, *prev; 136 137 lockdep_assert_held(&mvm->mutex); 138 139 list_for_each_entry_safe(entry, prev, &mvm->ftm_initiator.pasn_list, 140 list) { 141 if (memcmp(entry->addr, addr, sizeof(entry->addr))) 142 continue; 143 144 list_del(&entry->list); 145 kfree(entry); 146 return; 147 } 148 } 149 150 static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm) 151 { 152 struct iwl_mvm_loc_entry *e, *t; 153 154 mvm->ftm_initiator.req = NULL; 155 mvm->ftm_initiator.req_wdev = NULL; 156 memset(mvm->ftm_initiator.responses, 0, 157 sizeof(mvm->ftm_initiator.responses)); 158 159 list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) { 160 list_del(&e->list); 161 kfree(e); 162 } 163 } 164 165 void iwl_mvm_ftm_restart(struct iwl_mvm *mvm) 166 { 167 struct cfg80211_pmsr_result result = { 168 .status = NL80211_PMSR_STATUS_FAILURE, 169 .final = 1, 170 .host_time = ktime_get_boottime_ns(), 171 .type = NL80211_PMSR_TYPE_FTM, 172 }; 173 int i; 174 175 lockdep_assert_held(&mvm->mutex); 176 177 if (!mvm->ftm_initiator.req) 178 return; 179 180 for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) { 181 memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr, 182 ETH_ALEN); 183 result.ftm.burst_index = mvm->ftm_initiator.responses[i]; 184 185 cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, 186 mvm->ftm_initiator.req, 187 &result, GFP_KERNEL); 188 } 189 190 cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev, 191 mvm->ftm_initiator.req, GFP_KERNEL); 192 iwl_mvm_ftm_reset(mvm); 193 } 194 195 void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm) 196 { 197 INIT_LIST_HEAD(&mvm->ftm_initiator.smooth.resp); 198 199 IWL_DEBUG_INFO(mvm, 200 "enable=%u, alpha=%u, age_jiffies=%u, thresh=(%u:%u)\n", 201 IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH, 202 IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA, 203 IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * HZ, 204 IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT, 205 IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT); 206 } 207 208 void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm) 209 { 210 struct iwl_mvm_smooth_entry *se, *st; 211 212 list_for_each_entry_safe(se, st, &mvm->ftm_initiator.smooth.resp, 213 list) { 214 list_del(&se->list); 215 kfree(se); 216 } 217 } 218 219 static int 220 iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s) 221 { 222 switch (s) { 223 case IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS: 224 return 0; 225 case IWL_TOF_RANGE_REQUEST_STATUS_BUSY: 226 return -EBUSY; 227 default: 228 WARN_ON_ONCE(1); 229 return -EIO; 230 } 231 } 232 233 static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 234 struct iwl_tof_range_req_cmd_v5 *cmd, 235 struct cfg80211_pmsr_request *req) 236 { 237 int i; 238 239 cmd->request_id = req->cookie; 240 cmd->num_of_ap = req->n_peers; 241 242 /* use maximum for "no timeout" or bigger than what we can do */ 243 if (!req->timeout || req->timeout > 255 * 100) 244 cmd->req_timeout = 255; 245 else 246 cmd->req_timeout = DIV_ROUND_UP(req->timeout, 100); 247 248 /* 249 * We treat it always as random, since if not we'll 250 * have filled our local address there instead. 251 */ 252 cmd->macaddr_random = 1; 253 memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); 254 for (i = 0; i < ETH_ALEN; i++) 255 cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; 256 257 if (vif->cfg.assoc) 258 memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); 259 else 260 eth_broadcast_addr(cmd->range_req_bssid); 261 } 262 263 static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm, 264 struct ieee80211_vif *vif, 265 #if defined(__linux__) 266 struct iwl_tof_range_req_cmd_v9 *cmd, 267 #elif defined(__FreeBSD__) 268 struct iwl_tof_range_req_cmd_v9 *cmd, /* XXX-BZ Probably better solved by a common struct in fw for top parts of the struct. */ 269 #endif 270 struct cfg80211_pmsr_request *req) 271 { 272 int i; 273 274 cmd->initiator_flags = 275 cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM | 276 IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT); 277 cmd->request_id = req->cookie; 278 cmd->num_of_ap = req->n_peers; 279 280 /* 281 * Use a large value for "no timeout". Don't use the maximum value 282 * because of fw limitations. 283 */ 284 if (req->timeout) 285 cmd->req_timeout_ms = cpu_to_le32(req->timeout); 286 else 287 cmd->req_timeout_ms = cpu_to_le32(0xfffff); 288 289 memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); 290 for (i = 0; i < ETH_ALEN; i++) 291 cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; 292 293 if (vif->cfg.assoc) { 294 memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); 295 296 /* AP's TSF is only relevant if associated */ 297 for (i = 0; i < req->n_peers; i++) { 298 if (req->peers[i].report_ap_tsf) { 299 struct iwl_mvm_vif *mvmvif = 300 iwl_mvm_vif_from_mac80211(vif); 301 302 cmd->tsf_mac_id = cpu_to_le32(mvmvif->id); 303 return; 304 } 305 } 306 } else { 307 eth_broadcast_addr(cmd->range_req_bssid); 308 } 309 310 /* Don't report AP's TSF */ 311 cmd->tsf_mac_id = cpu_to_le32(0xff); 312 } 313 314 static void iwl_mvm_ftm_cmd_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 315 struct iwl_tof_range_req_cmd_v8 *cmd, 316 struct cfg80211_pmsr_request *req) 317 { 318 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)cmd, req); 319 } 320 321 static int 322 iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm, 323 struct cfg80211_pmsr_request_peer *peer, 324 u8 *channel, u8 *bandwidth, 325 u8 *ctrl_ch_position) 326 { 327 u32 freq = peer->chandef.chan->center_freq; 328 329 *channel = ieee80211_frequency_to_channel(freq); 330 331 switch (peer->chandef.width) { 332 case NL80211_CHAN_WIDTH_20_NOHT: 333 *bandwidth = IWL_TOF_BW_20_LEGACY; 334 break; 335 case NL80211_CHAN_WIDTH_20: 336 *bandwidth = IWL_TOF_BW_20_HT; 337 break; 338 case NL80211_CHAN_WIDTH_40: 339 *bandwidth = IWL_TOF_BW_40; 340 break; 341 case NL80211_CHAN_WIDTH_80: 342 *bandwidth = IWL_TOF_BW_80; 343 break; 344 default: 345 IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n", 346 peer->chandef.width); 347 return -EINVAL; 348 } 349 350 *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ? 351 iwl_mvm_get_ctrl_pos(&peer->chandef) : 0; 352 353 return 0; 354 } 355 356 static int 357 iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm, 358 struct cfg80211_pmsr_request_peer *peer, 359 u8 *channel, u8 *format_bw, 360 u8 *ctrl_ch_position) 361 { 362 u32 freq = peer->chandef.chan->center_freq; 363 u8 cmd_ver; 364 365 *channel = ieee80211_frequency_to_channel(freq); 366 367 switch (peer->chandef.width) { 368 case NL80211_CHAN_WIDTH_20_NOHT: 369 *format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY; 370 *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; 371 break; 372 case NL80211_CHAN_WIDTH_20: 373 *format_bw = IWL_LOCATION_FRAME_FORMAT_HT; 374 *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; 375 break; 376 case NL80211_CHAN_WIDTH_40: 377 *format_bw = IWL_LOCATION_FRAME_FORMAT_HT; 378 *format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS; 379 break; 380 case NL80211_CHAN_WIDTH_80: 381 *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT; 382 *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS; 383 break; 384 case NL80211_CHAN_WIDTH_160: 385 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, 386 WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 387 IWL_FW_CMD_VER_UNKNOWN); 388 389 if (cmd_ver >= 13) { 390 *format_bw = IWL_LOCATION_FRAME_FORMAT_HE; 391 *format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS; 392 break; 393 } 394 fallthrough; 395 default: 396 IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n", 397 peer->chandef.width); 398 return -EINVAL; 399 } 400 401 /* non EDCA based measurement must use HE preamble */ 402 if (peer->ftm.trigger_based || peer->ftm.non_trigger_based) 403 *format_bw |= IWL_LOCATION_FRAME_FORMAT_HE; 404 405 *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ? 406 iwl_mvm_get_ctrl_pos(&peer->chandef) : 0; 407 408 return 0; 409 } 410 411 static int 412 iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm, 413 struct cfg80211_pmsr_request_peer *peer, 414 struct iwl_tof_range_req_ap_entry_v2 *target) 415 { 416 int ret; 417 418 ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num, 419 &target->bandwidth, 420 &target->ctrl_ch_position); 421 if (ret) 422 return ret; 423 424 memcpy(target->bssid, peer->addr, ETH_ALEN); 425 target->burst_period = 426 cpu_to_le16(peer->ftm.burst_period); 427 target->samples_per_burst = peer->ftm.ftms_per_burst; 428 target->num_of_bursts = peer->ftm.num_bursts_exp; 429 target->measure_type = 0; /* regular two-sided FTM */ 430 target->retries_per_sample = peer->ftm.ftmr_retries; 431 target->asap_mode = peer->ftm.asap; 432 target->enable_dyn_ack = IWL_MVM_FTM_INITIATOR_DYNACK; 433 434 if (peer->ftm.request_lci) 435 target->location_req |= IWL_TOF_LOC_LCI; 436 if (peer->ftm.request_civicloc) 437 target->location_req |= IWL_TOF_LOC_CIVIC; 438 439 target->algo_type = IWL_MVM_FTM_INITIATOR_ALGO; 440 441 return 0; 442 } 443 444 #define FTM_SET_FLAG(flag) (*flags |= \ 445 cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag)) 446 447 static void 448 iwl_mvm_ftm_set_target_flags(struct iwl_mvm *mvm, 449 struct cfg80211_pmsr_request_peer *peer, 450 __le32 *flags) 451 { 452 *flags = cpu_to_le32(0); 453 454 if (peer->ftm.asap) 455 FTM_SET_FLAG(ASAP); 456 457 if (peer->ftm.request_lci) 458 FTM_SET_FLAG(LCI_REQUEST); 459 460 if (peer->ftm.request_civicloc) 461 FTM_SET_FLAG(CIVIC_REQUEST); 462 463 if (IWL_MVM_FTM_INITIATOR_DYNACK) 464 FTM_SET_FLAG(DYN_ACK); 465 466 if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG) 467 FTM_SET_FLAG(ALGO_LR); 468 else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT) 469 FTM_SET_FLAG(ALGO_FFT); 470 471 if (peer->ftm.trigger_based) 472 FTM_SET_FLAG(TB); 473 else if (peer->ftm.non_trigger_based) 474 FTM_SET_FLAG(NON_TB); 475 476 if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) && 477 peer->ftm.lmr_feedback) 478 FTM_SET_FLAG(LMR_FEEDBACK); 479 } 480 481 static void 482 iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm, 483 struct cfg80211_pmsr_request_peer *peer, 484 struct iwl_tof_range_req_ap_entry_v6 *target) 485 { 486 memcpy(target->bssid, peer->addr, ETH_ALEN); 487 target->burst_period = 488 cpu_to_le16(peer->ftm.burst_period); 489 target->samples_per_burst = peer->ftm.ftms_per_burst; 490 target->num_of_bursts = peer->ftm.num_bursts_exp; 491 target->ftmr_max_retries = peer->ftm.ftmr_retries; 492 iwl_mvm_ftm_set_target_flags(mvm, peer, &target->initiator_ap_flags); 493 } 494 495 static int 496 iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm, 497 struct cfg80211_pmsr_request_peer *peer, 498 struct iwl_tof_range_req_ap_entry_v3 *target) 499 { 500 int ret; 501 502 ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num, 503 &target->bandwidth, 504 &target->ctrl_ch_position); 505 if (ret) 506 return ret; 507 508 /* 509 * Versions 3 and 4 has some common fields, so 510 * iwl_mvm_ftm_put_target_common() can be used for version 7 too. 511 */ 512 iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target); 513 514 return 0; 515 } 516 517 static int 518 iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm, 519 struct cfg80211_pmsr_request_peer *peer, 520 struct iwl_tof_range_req_ap_entry_v4 *target) 521 { 522 int ret; 523 524 ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, 525 &target->format_bw, 526 &target->ctrl_ch_position); 527 if (ret) 528 return ret; 529 530 iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target); 531 532 return 0; 533 } 534 535 static int iwl_mvm_ftm_set_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 536 struct cfg80211_pmsr_request_peer *peer, 537 u8 *sta_id, __le32 *flags) 538 { 539 if (vif->cfg.assoc) { 540 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 541 struct ieee80211_sta *sta; 542 struct ieee80211_bss_conf *link_conf; 543 unsigned int link_id; 544 545 rcu_read_lock(); 546 for_each_vif_active_link(vif, link_conf, link_id) { 547 if (memcmp(peer->addr, link_conf->bssid, ETH_ALEN)) 548 continue; 549 550 *sta_id = mvmvif->link[link_id]->ap_sta_id; 551 sta = rcu_dereference(mvm->fw_id_to_mac_id[*sta_id]); 552 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { 553 rcu_read_unlock(); 554 return PTR_ERR_OR_ZERO(sta); 555 } 556 557 if (sta->mfp && (peer->ftm.trigger_based || 558 peer->ftm.non_trigger_based)) 559 FTM_SET_FLAG(PMF); 560 break; 561 } 562 rcu_read_unlock(); 563 564 #ifdef CONFIG_IWLWIFI_DEBUGFS 565 if (mvmvif->ftm_unprotected) { 566 *sta_id = IWL_MVM_INVALID_STA; 567 *flags &= ~cpu_to_le32(IWL_INITIATOR_AP_FLAGS_PMF); 568 } 569 #endif 570 } else { 571 *sta_id = IWL_MVM_INVALID_STA; 572 } 573 574 return 0; 575 } 576 577 static int 578 iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 579 struct cfg80211_pmsr_request_peer *peer, 580 struct iwl_tof_range_req_ap_entry_v6 *target) 581 { 582 int ret; 583 584 ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, 585 &target->format_bw, 586 &target->ctrl_ch_position); 587 if (ret) 588 return ret; 589 590 iwl_mvm_ftm_put_target_common(mvm, peer, target); 591 592 iwl_mvm_ftm_set_sta(mvm, vif, peer, &target->sta_id, 593 &target->initiator_ap_flags); 594 595 /* 596 * TODO: Beacon interval is currently unknown, so use the common value 597 * of 100 TUs. 598 */ 599 target->beacon_interval = cpu_to_le16(100); 600 return 0; 601 } 602 603 static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd) 604 { 605 u32 status; 606 int err = iwl_mvm_send_cmd_status(mvm, hcmd, &status); 607 608 if (!err && status) { 609 IWL_ERR(mvm, "FTM range request command failure, status: %u\n", 610 status); 611 err = iwl_ftm_range_request_status_to_err(status); 612 } 613 614 return err; 615 } 616 617 static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 618 struct cfg80211_pmsr_request *req) 619 { 620 struct iwl_tof_range_req_cmd_v5 cmd_v5; 621 struct iwl_host_cmd hcmd = { 622 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 623 .dataflags[0] = IWL_HCMD_DFL_DUP, 624 .data[0] = &cmd_v5, 625 .len[0] = sizeof(cmd_v5), 626 }; 627 u8 i; 628 int err; 629 630 iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req); 631 632 for (i = 0; i < cmd_v5.num_of_ap; i++) { 633 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 634 635 err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]); 636 if (err) 637 return err; 638 } 639 640 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 641 } 642 643 static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 644 struct cfg80211_pmsr_request *req) 645 { 646 struct iwl_tof_range_req_cmd_v7 cmd_v7; 647 struct iwl_host_cmd hcmd = { 648 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 649 .dataflags[0] = IWL_HCMD_DFL_DUP, 650 .data[0] = &cmd_v7, 651 .len[0] = sizeof(cmd_v7), 652 }; 653 u8 i; 654 int err; 655 656 /* 657 * Versions 7 and 8 has the same structure except from the responders 658 * list, so iwl_mvm_ftm_cmd() can be used for version 7 too. 659 */ 660 iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd_v7, req); 661 662 for (i = 0; i < cmd_v7.num_of_ap; i++) { 663 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 664 665 err = iwl_mvm_ftm_put_target_v3(mvm, peer, &cmd_v7.ap[i]); 666 if (err) 667 return err; 668 } 669 670 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 671 } 672 673 static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 674 struct cfg80211_pmsr_request *req) 675 { 676 struct iwl_tof_range_req_cmd_v8 cmd; 677 struct iwl_host_cmd hcmd = { 678 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 679 .dataflags[0] = IWL_HCMD_DFL_DUP, 680 .data[0] = &cmd, 681 .len[0] = sizeof(cmd), 682 }; 683 u8 i; 684 int err; 685 686 iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd, req); 687 688 for (i = 0; i < cmd.num_of_ap; i++) { 689 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 690 691 err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]); 692 if (err) 693 return err; 694 } 695 696 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 697 } 698 699 static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 700 struct cfg80211_pmsr_request *req) 701 { 702 struct iwl_tof_range_req_cmd_v9 cmd; 703 struct iwl_host_cmd hcmd = { 704 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 705 .dataflags[0] = IWL_HCMD_DFL_DUP, 706 .data[0] = &cmd, 707 .len[0] = sizeof(cmd), 708 }; 709 u8 i; 710 int err; 711 712 iwl_mvm_ftm_cmd_common(mvm, vif, &cmd, req); 713 714 for (i = 0; i < cmd.num_of_ap; i++) { 715 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 716 struct iwl_tof_range_req_ap_entry_v6 *target = &cmd.ap[i]; 717 718 err = iwl_mvm_ftm_put_target(mvm, vif, peer, target); 719 if (err) 720 return err; 721 } 722 723 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 724 } 725 726 static void iter(struct ieee80211_hw *hw, 727 struct ieee80211_vif *vif, 728 struct ieee80211_sta *sta, 729 struct ieee80211_key_conf *key, 730 void *data) 731 { 732 struct iwl_mvm_ftm_iter_data *target = data; 733 734 if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN)) 735 return; 736 737 WARN_ON(!sta->mfp); 738 739 target->tk = key->key; 740 *target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher); 741 WARN_ON(*target->cipher == IWL_LOCATION_CIPHER_INVALID); 742 } 743 744 static void 745 iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 746 u8 *bssid, u8 *cipher, u8 *hltk, u8 *tk, 747 u8 *rx_pn, u8 *tx_pn, __le32 *flags) 748 { 749 struct iwl_mvm_ftm_pasn_entry *entry; 750 #ifdef CONFIG_IWLWIFI_DEBUGFS 751 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 752 753 if (mvmvif->ftm_unprotected) 754 return; 755 #endif 756 757 if (!(le32_to_cpu(*flags) & (IWL_INITIATOR_AP_FLAGS_NON_TB | 758 IWL_INITIATOR_AP_FLAGS_TB))) 759 return; 760 761 lockdep_assert_held(&mvm->mutex); 762 763 list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) { 764 if (memcmp(entry->addr, bssid, sizeof(entry->addr))) 765 continue; 766 767 *cipher = entry->cipher; 768 769 if (entry->flags & IWL_MVM_PASN_FLAG_HAS_HLTK) 770 memcpy(hltk, entry->hltk, sizeof(entry->hltk)); 771 else 772 memset(hltk, 0, sizeof(entry->hltk)); 773 774 if (vif->cfg.assoc && 775 !memcmp(vif->bss_conf.bssid, bssid, ETH_ALEN)) { 776 struct iwl_mvm_ftm_iter_data target; 777 778 target.bssid = bssid; 779 ieee80211_iter_keys(mvm->hw, vif, iter, &target); 780 } else { 781 memcpy(tk, entry->tk, sizeof(entry->tk)); 782 } 783 784 memcpy(rx_pn, entry->rx_pn, sizeof(entry->rx_pn)); 785 memcpy(tx_pn, entry->tx_pn, sizeof(entry->tx_pn)); 786 787 FTM_SET_FLAG(SECURED); 788 return; 789 } 790 } 791 792 static int 793 iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 794 struct cfg80211_pmsr_request_peer *peer, 795 struct iwl_tof_range_req_ap_entry_v7 *target) 796 { 797 int err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target); 798 if (err) 799 return err; 800 801 iwl_mvm_ftm_set_secured_ranging(mvm, vif, target->bssid, 802 &target->cipher, target->hltk, 803 target->tk, target->rx_pn, 804 target->tx_pn, 805 &target->initiator_ap_flags); 806 return err; 807 } 808 809 static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm, 810 struct ieee80211_vif *vif, 811 struct cfg80211_pmsr_request *req) 812 { 813 struct iwl_tof_range_req_cmd_v11 cmd; 814 struct iwl_host_cmd hcmd = { 815 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 816 .dataflags[0] = IWL_HCMD_DFL_DUP, 817 .data[0] = &cmd, 818 .len[0] = sizeof(cmd), 819 }; 820 u8 i; 821 int err; 822 823 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 824 825 for (i = 0; i < cmd.num_of_ap; i++) { 826 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 827 struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i]; 828 829 err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, target); 830 if (err) 831 return err; 832 } 833 834 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 835 } 836 837 static void 838 iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm, 839 struct iwl_tof_range_req_ap_entry_v8 *target) 840 { 841 /* Only 2 STS are supported on Tx */ 842 u32 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 : 843 IWL_MVM_FTM_I2R_MAX_STS; 844 845 target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP | 846 (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS); 847 target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP | 848 (i2r_max_sts << IWL_LOCATION_MAX_STS_POS); 849 target->r2i_max_total_ltf = IWL_MVM_FTM_R2I_MAX_TOTAL_LTF; 850 target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF; 851 } 852 853 static int 854 iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 855 struct cfg80211_pmsr_request_peer *peer, 856 struct iwl_tof_range_req_ap_entry_v8 *target) 857 { 858 u32 flags; 859 int ret = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target); 860 861 if (ret) 862 return ret; 863 864 iwl_mvm_ftm_set_ndp_params(mvm, target); 865 866 /* 867 * If secure LTF is turned off, replace the flag with PMF only 868 */ 869 flags = le32_to_cpu(target->initiator_ap_flags); 870 if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) { 871 if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF) 872 flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; 873 874 flags |= IWL_INITIATOR_AP_FLAGS_PMF; 875 target->initiator_ap_flags = cpu_to_le32(flags); 876 } 877 878 return 0; 879 } 880 881 static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm, 882 struct ieee80211_vif *vif, 883 struct cfg80211_pmsr_request *req) 884 { 885 struct iwl_tof_range_req_cmd_v12 cmd; 886 struct iwl_host_cmd hcmd = { 887 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 888 .dataflags[0] = IWL_HCMD_DFL_DUP, 889 .data[0] = &cmd, 890 .len[0] = sizeof(cmd), 891 }; 892 u8 i; 893 int err; 894 895 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 896 897 for (i = 0; i < cmd.num_of_ap; i++) { 898 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 899 struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i]; 900 901 err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, target); 902 if (err) 903 return err; 904 } 905 906 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 907 } 908 909 static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm, 910 struct ieee80211_vif *vif, 911 struct cfg80211_pmsr_request *req) 912 { 913 struct iwl_tof_range_req_cmd_v13 cmd; 914 struct iwl_host_cmd hcmd = { 915 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 916 .dataflags[0] = IWL_HCMD_DFL_DUP, 917 .data[0] = &cmd, 918 .len[0] = sizeof(cmd), 919 }; 920 u8 i; 921 int err; 922 923 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 924 925 for (i = 0; i < cmd.num_of_ap; i++) { 926 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 927 struct iwl_tof_range_req_ap_entry_v9 *target = &cmd.ap[i]; 928 929 err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, (void *)target); 930 if (err) 931 return err; 932 933 if (peer->ftm.trigger_based || peer->ftm.non_trigger_based) 934 target->bss_color = peer->ftm.bss_color; 935 936 if (peer->ftm.non_trigger_based) { 937 target->min_time_between_msr = 938 cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); 939 target->burst_period = 940 cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); 941 } else { 942 target->min_time_between_msr = cpu_to_le16(0); 943 } 944 945 target->band = 946 iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band); 947 } 948 949 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 950 } 951 952 static int 953 iwl_mvm_ftm_put_target_v10(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 954 struct cfg80211_pmsr_request_peer *peer, 955 struct iwl_tof_range_req_ap_entry_v10 *target) 956 { 957 u32 i2r_max_sts, flags; 958 int ret; 959 960 ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, 961 &target->format_bw, 962 &target->ctrl_ch_position); 963 if (ret) 964 return ret; 965 966 memcpy(target->bssid, peer->addr, ETH_ALEN); 967 target->burst_period = 968 cpu_to_le16(peer->ftm.burst_period); 969 target->samples_per_burst = peer->ftm.ftms_per_burst; 970 target->num_of_bursts = peer->ftm.num_bursts_exp; 971 iwl_mvm_ftm_set_target_flags(mvm, peer, &target->initiator_ap_flags); 972 iwl_mvm_ftm_set_sta(mvm, vif, peer, &target->sta_id, 973 &target->initiator_ap_flags); 974 iwl_mvm_ftm_set_secured_ranging(mvm, vif, target->bssid, 975 &target->cipher, target->hltk, 976 target->tk, target->rx_pn, 977 target->tx_pn, 978 &target->initiator_ap_flags); 979 980 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 : 981 IWL_MVM_FTM_I2R_MAX_STS; 982 983 target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP | 984 (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS) | 985 (IWL_MVM_FTM_R2I_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS); 986 target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP | 987 (i2r_max_sts << IWL_LOCATION_MAX_STS_POS) | 988 (IWL_MVM_FTM_I2R_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS); 989 990 if (peer->ftm.non_trigger_based) { 991 target->min_time_between_msr = 992 cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); 993 target->burst_period = 994 cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); 995 } else { 996 target->min_time_between_msr = cpu_to_le16(0); 997 } 998 999 target->band = 1000 iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band); 1001 1002 /* 1003 * TODO: Beacon interval is currently unknown, so use the common value 1004 * of 100 TUs. 1005 */ 1006 target->beacon_interval = cpu_to_le16(100); 1007 1008 /* 1009 * If secure LTF is turned off, replace the flag with PMF only 1010 */ 1011 flags = le32_to_cpu(target->initiator_ap_flags); 1012 if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) { 1013 if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF) 1014 flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; 1015 1016 flags |= IWL_INITIATOR_AP_FLAGS_PMF; 1017 target->initiator_ap_flags = cpu_to_le32(flags); 1018 } 1019 1020 return 0; 1021 } 1022 1023 static int iwl_mvm_ftm_start_v14(struct iwl_mvm *mvm, 1024 struct ieee80211_vif *vif, 1025 struct cfg80211_pmsr_request *req) 1026 { 1027 struct iwl_tof_range_req_cmd_v14 cmd; 1028 struct iwl_host_cmd hcmd = { 1029 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 1030 .dataflags[0] = IWL_HCMD_DFL_DUP, 1031 .data[0] = &cmd, 1032 .len[0] = sizeof(cmd), 1033 }; 1034 u8 i; 1035 int err; 1036 1037 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 1038 1039 for (i = 0; i < cmd.num_of_ap; i++) { 1040 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 1041 struct iwl_tof_range_req_ap_entry_v10 *target = &cmd.ap[i]; 1042 1043 err = iwl_mvm_ftm_put_target_v10(mvm, vif, peer, target); 1044 if (err) 1045 return err; 1046 } 1047 1048 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 1049 } 1050 1051 int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1052 struct cfg80211_pmsr_request *req) 1053 { 1054 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 1055 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); 1056 int err; 1057 1058 lockdep_assert_held(&mvm->mutex); 1059 1060 if (mvm->ftm_initiator.req) 1061 return -EBUSY; 1062 1063 if (new_api) { 1064 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, 1065 WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 1066 IWL_FW_CMD_VER_UNKNOWN); 1067 1068 switch (cmd_ver) { 1069 case 14: 1070 err = iwl_mvm_ftm_start_v14(mvm, vif, req); 1071 break; 1072 case 13: 1073 err = iwl_mvm_ftm_start_v13(mvm, vif, req); 1074 break; 1075 case 12: 1076 err = iwl_mvm_ftm_start_v12(mvm, vif, req); 1077 break; 1078 case 11: 1079 err = iwl_mvm_ftm_start_v11(mvm, vif, req); 1080 break; 1081 case 9: 1082 case 10: 1083 err = iwl_mvm_ftm_start_v9(mvm, vif, req); 1084 break; 1085 case 8: 1086 err = iwl_mvm_ftm_start_v8(mvm, vif, req); 1087 break; 1088 default: 1089 err = iwl_mvm_ftm_start_v7(mvm, vif, req); 1090 break; 1091 } 1092 } else { 1093 err = iwl_mvm_ftm_start_v5(mvm, vif, req); 1094 } 1095 1096 if (!err) { 1097 mvm->ftm_initiator.req = req; 1098 mvm->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif); 1099 } 1100 1101 return err; 1102 } 1103 1104 void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req) 1105 { 1106 struct iwl_tof_range_abort_cmd cmd = { 1107 .request_id = req->cookie, 1108 }; 1109 1110 lockdep_assert_held(&mvm->mutex); 1111 1112 if (req != mvm->ftm_initiator.req) 1113 return; 1114 1115 iwl_mvm_ftm_reset(mvm); 1116 1117 if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD), 1118 0, sizeof(cmd), &cmd)) 1119 IWL_ERR(mvm, "failed to abort FTM process\n"); 1120 } 1121 1122 static int iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req, 1123 const u8 *addr) 1124 { 1125 int i; 1126 1127 for (i = 0; i < req->n_peers; i++) { 1128 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 1129 1130 if (ether_addr_equal_unaligned(peer->addr, addr)) 1131 return i; 1132 } 1133 1134 return -ENOENT; 1135 } 1136 1137 static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts) 1138 { 1139 u32 gp2_ts = le32_to_cpu(fw_gp2_ts); 1140 u32 curr_gp2, diff; 1141 u64 now_from_boot_ns; 1142 1143 iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2, 1144 &now_from_boot_ns, NULL); 1145 1146 if (curr_gp2 >= gp2_ts) 1147 diff = curr_gp2 - gp2_ts; 1148 else 1149 diff = curr_gp2 + (U32_MAX - gp2_ts + 1); 1150 1151 return now_from_boot_ns - (u64)diff * 1000; 1152 } 1153 1154 static void iwl_mvm_ftm_get_lci_civic(struct iwl_mvm *mvm, 1155 struct cfg80211_pmsr_result *res) 1156 { 1157 struct iwl_mvm_loc_entry *entry; 1158 1159 list_for_each_entry(entry, &mvm->ftm_initiator.loc_list, list) { 1160 if (!ether_addr_equal_unaligned(res->addr, entry->addr)) 1161 continue; 1162 1163 if (entry->lci_len) { 1164 res->ftm.lci_len = entry->lci_len; 1165 res->ftm.lci = entry->buf; 1166 } 1167 1168 if (entry->civic_len) { 1169 res->ftm.civicloc_len = entry->civic_len; 1170 res->ftm.civicloc = entry->buf + entry->lci_len; 1171 } 1172 1173 /* we found the entry we needed */ 1174 break; 1175 } 1176 } 1177 1178 static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id, 1179 u8 num_of_aps) 1180 { 1181 lockdep_assert_held(&mvm->mutex); 1182 1183 if (request_id != (u8)mvm->ftm_initiator.req->cookie) { 1184 IWL_ERR(mvm, "Request ID mismatch, got %u, active %u\n", 1185 request_id, (u8)mvm->ftm_initiator.req->cookie); 1186 return -EINVAL; 1187 } 1188 1189 if (num_of_aps > mvm->ftm_initiator.req->n_peers) { 1190 IWL_ERR(mvm, "FTM range response invalid\n"); 1191 return -EINVAL; 1192 } 1193 1194 return 0; 1195 } 1196 1197 static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm, 1198 struct cfg80211_pmsr_result *res) 1199 { 1200 struct iwl_mvm_smooth_entry *resp = NULL, *iter; 1201 s64 rtt_avg, rtt = res->ftm.rtt_avg; 1202 u32 undershoot, overshoot; 1203 u8 alpha; 1204 1205 if (!IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH) 1206 return; 1207 1208 WARN_ON(rtt < 0); 1209 1210 if (res->status != NL80211_PMSR_STATUS_SUCCESS) { 1211 IWL_DEBUG_INFO(mvm, 1212 ": %pM: ignore failed measurement. Status=%u\n", 1213 res->addr, res->status); 1214 return; 1215 } 1216 1217 list_for_each_entry(iter, &mvm->ftm_initiator.smooth.resp, list) { 1218 if (!memcmp(res->addr, iter->addr, ETH_ALEN)) { 1219 resp = iter; 1220 break; 1221 } 1222 } 1223 1224 if (!resp) { 1225 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 1226 if (!resp) 1227 return; 1228 1229 memcpy(resp->addr, res->addr, ETH_ALEN); 1230 list_add_tail(&resp->list, &mvm->ftm_initiator.smooth.resp); 1231 1232 resp->rtt_avg = rtt; 1233 1234 IWL_DEBUG_INFO(mvm, "new: %pM: rtt_avg=%lld\n", 1235 resp->addr, resp->rtt_avg); 1236 goto update_time; 1237 } 1238 1239 if (res->host_time - resp->host_time > 1240 IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * 1000000000) { 1241 resp->rtt_avg = rtt; 1242 1243 IWL_DEBUG_INFO(mvm, "expired: %pM: rtt_avg=%lld\n", 1244 resp->addr, resp->rtt_avg); 1245 goto update_time; 1246 } 1247 1248 /* Smooth the results based on the tracked RTT average */ 1249 undershoot = IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT; 1250 overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT; 1251 alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA; 1252 1253 rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100); 1254 1255 IWL_DEBUG_INFO(mvm, 1256 "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n", 1257 resp->addr, resp->rtt_avg, rtt_avg, rtt); 1258 1259 /* 1260 * update the responder's average RTT results regardless of 1261 * the under/over shoot logic below 1262 */ 1263 resp->rtt_avg = rtt_avg; 1264 1265 /* smooth the results */ 1266 if (rtt_avg > rtt && (rtt_avg - rtt) > undershoot) { 1267 res->ftm.rtt_avg = rtt_avg; 1268 1269 IWL_DEBUG_INFO(mvm, 1270 "undershoot: val=%lld\n", 1271 (rtt_avg - rtt)); 1272 } else if (rtt_avg < rtt && (rtt - rtt_avg) > 1273 overshoot) { 1274 res->ftm.rtt_avg = rtt_avg; 1275 IWL_DEBUG_INFO(mvm, 1276 "overshoot: val=%lld\n", 1277 (rtt - rtt_avg)); 1278 } 1279 1280 update_time: 1281 resp->host_time = res->host_time; 1282 } 1283 1284 static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index, 1285 struct cfg80211_pmsr_result *res) 1286 { 1287 s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666); 1288 1289 IWL_DEBUG_INFO(mvm, "entry %d\n", index); 1290 IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status); 1291 IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr); 1292 IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time); 1293 IWL_DEBUG_INFO(mvm, "\tburst index: %d\n", res->ftm.burst_index); 1294 IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes); 1295 IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg); 1296 IWL_DEBUG_INFO(mvm, "\trssi spread: %d\n", res->ftm.rssi_spread); 1297 IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg); 1298 IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance); 1299 IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread); 1300 IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg); 1301 } 1302 1303 static void 1304 iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm, 1305 struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap) 1306 { 1307 struct iwl_mvm_ftm_pasn_entry *entry; 1308 1309 lockdep_assert_held(&mvm->mutex); 1310 1311 list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) { 1312 if (memcmp(fw_ap->bssid, entry->addr, sizeof(entry->addr))) 1313 continue; 1314 1315 memcpy(entry->rx_pn, fw_ap->rx_pn, sizeof(entry->rx_pn)); 1316 memcpy(entry->tx_pn, fw_ap->tx_pn, sizeof(entry->tx_pn)); 1317 return; 1318 } 1319 } 1320 1321 static u8 iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm *mvm) 1322 { 1323 if (!fw_has_api(&mvm->fw->ucode_capa, 1324 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ)) 1325 return 5; 1326 1327 /* Starting from version 8, the FW advertises the version */ 1328 if (mvm->cmd_ver.range_resp >= 8) 1329 return mvm->cmd_ver.range_resp; 1330 else if (fw_has_api(&mvm->fw->ucode_capa, 1331 IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) 1332 return 7; 1333 1334 /* The first version of the new range request API */ 1335 return 6; 1336 } 1337 1338 static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len) 1339 { 1340 switch (ver) { 1341 case 9: 1342 case 8: 1343 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v8); 1344 case 7: 1345 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v7); 1346 case 6: 1347 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v6); 1348 case 5: 1349 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v5); 1350 default: 1351 WARN_ONCE(1, "FTM: unsupported range response version %u", ver); 1352 return false; 1353 } 1354 } 1355 1356 void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 1357 { 1358 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1359 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); 1360 struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data; 1361 struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data; 1362 struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data; 1363 struct iwl_tof_range_rsp_ntfy_v8 *fw_resp_v8 = (void *)pkt->data; 1364 int i; 1365 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 1366 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); 1367 u8 num_of_aps, last_in_batch; 1368 u8 notif_ver = iwl_mvm_ftm_get_range_resp_ver(mvm); 1369 1370 lockdep_assert_held(&mvm->mutex); 1371 1372 if (!mvm->ftm_initiator.req) { 1373 return; 1374 } 1375 1376 if (unlikely(!iwl_mvm_ftm_resp_size_validation(notif_ver, pkt_len))) 1377 return; 1378 1379 if (new_api) { 1380 if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v8->request_id, 1381 fw_resp_v8->num_of_aps)) 1382 return; 1383 1384 num_of_aps = fw_resp_v8->num_of_aps; 1385 last_in_batch = fw_resp_v8->last_report; 1386 } else { 1387 if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id, 1388 fw_resp_v5->num_of_aps)) 1389 return; 1390 1391 num_of_aps = fw_resp_v5->num_of_aps; 1392 last_in_batch = fw_resp_v5->last_in_batch; 1393 } 1394 1395 IWL_DEBUG_INFO(mvm, "Range response received\n"); 1396 IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %u\n", 1397 mvm->ftm_initiator.req->cookie, num_of_aps); 1398 1399 for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) { 1400 struct cfg80211_pmsr_result result = {}; 1401 struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap; 1402 int peer_idx; 1403 1404 if (new_api) { 1405 if (notif_ver >= 8) { 1406 fw_ap = &fw_resp_v8->ap[i]; 1407 iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap); 1408 } else if (notif_ver == 7) { 1409 fw_ap = (void *)&fw_resp_v7->ap[i]; 1410 } else { 1411 fw_ap = (void *)&fw_resp_v6->ap[i]; 1412 } 1413 1414 result.final = fw_ap->last_burst; 1415 result.ap_tsf = le32_to_cpu(fw_ap->start_tsf); 1416 result.ap_tsf_valid = 1; 1417 } else { 1418 /* the first part is the same for old and new APIs */ 1419 fw_ap = (void *)&fw_resp_v5->ap[i]; 1420 /* 1421 * FIXME: the firmware needs to report this, we don't 1422 * even know the number of bursts the responder picked 1423 * (if we asked it to) 1424 */ 1425 result.final = 0; 1426 } 1427 1428 peer_idx = iwl_mvm_ftm_find_peer(mvm->ftm_initiator.req, 1429 fw_ap->bssid); 1430 if (peer_idx < 0) { 1431 IWL_WARN(mvm, 1432 "Unknown address (%pM, target #%d) in FTM response\n", 1433 fw_ap->bssid, i); 1434 continue; 1435 } 1436 1437 switch (fw_ap->measure_status) { 1438 case IWL_TOF_ENTRY_SUCCESS: 1439 result.status = NL80211_PMSR_STATUS_SUCCESS; 1440 break; 1441 case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT: 1442 result.status = NL80211_PMSR_STATUS_TIMEOUT; 1443 break; 1444 case IWL_TOF_ENTRY_NO_RESPONSE: 1445 result.status = NL80211_PMSR_STATUS_FAILURE; 1446 result.ftm.failure_reason = 1447 NL80211_PMSR_FTM_FAILURE_NO_RESPONSE; 1448 break; 1449 case IWL_TOF_ENTRY_REQUEST_REJECTED: 1450 result.status = NL80211_PMSR_STATUS_FAILURE; 1451 result.ftm.failure_reason = 1452 NL80211_PMSR_FTM_FAILURE_PEER_BUSY; 1453 result.ftm.busy_retry_time = fw_ap->refusal_period; 1454 break; 1455 default: 1456 result.status = NL80211_PMSR_STATUS_FAILURE; 1457 result.ftm.failure_reason = 1458 NL80211_PMSR_FTM_FAILURE_UNSPECIFIED; 1459 break; 1460 } 1461 memcpy(result.addr, fw_ap->bssid, ETH_ALEN); 1462 result.host_time = iwl_mvm_ftm_get_host_time(mvm, 1463 fw_ap->timestamp); 1464 result.type = NL80211_PMSR_TYPE_FTM; 1465 result.ftm.burst_index = mvm->ftm_initiator.responses[peer_idx]; 1466 mvm->ftm_initiator.responses[peer_idx]++; 1467 result.ftm.rssi_avg = fw_ap->rssi; 1468 result.ftm.rssi_avg_valid = 1; 1469 result.ftm.rssi_spread = fw_ap->rssi_spread; 1470 result.ftm.rssi_spread_valid = 1; 1471 result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt); 1472 result.ftm.rtt_avg_valid = 1; 1473 result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance); 1474 result.ftm.rtt_variance_valid = 1; 1475 result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread); 1476 result.ftm.rtt_spread_valid = 1; 1477 1478 iwl_mvm_ftm_get_lci_civic(mvm, &result); 1479 1480 iwl_mvm_ftm_rtt_smoothing(mvm, &result); 1481 1482 cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, 1483 mvm->ftm_initiator.req, 1484 &result, GFP_KERNEL); 1485 1486 if (fw_has_api(&mvm->fw->ucode_capa, 1487 IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) 1488 IWL_DEBUG_INFO(mvm, "RTT confidence: %u\n", 1489 fw_ap->rttConfidence); 1490 1491 iwl_mvm_debug_range_resp(mvm, i, &result); 1492 } 1493 1494 if (last_in_batch) { 1495 cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev, 1496 mvm->ftm_initiator.req, 1497 GFP_KERNEL); 1498 iwl_mvm_ftm_reset(mvm); 1499 } 1500 } 1501 1502 void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 1503 { 1504 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1505 const struct ieee80211_mgmt *mgmt = (void *)pkt->data; 1506 size_t len = iwl_rx_packet_payload_len(pkt); 1507 struct iwl_mvm_loc_entry *entry; 1508 const u8 *ies, *lci, *civic, *msr_ie; 1509 size_t ies_len, lci_len = 0, civic_len = 0; 1510 size_t baselen = IEEE80211_MIN_ACTION_SIZE + 1511 sizeof(mgmt->u.action.u.ftm); 1512 static const u8 rprt_type_lci = IEEE80211_SPCT_MSR_RPRT_TYPE_LCI; 1513 static const u8 rprt_type_civic = IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC; 1514 1515 if (len <= baselen) 1516 return; 1517 1518 lockdep_assert_held(&mvm->mutex); 1519 1520 ies = mgmt->u.action.u.ftm.variable; 1521 ies_len = len - baselen; 1522 1523 msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len, 1524 &rprt_type_lci, 1, 4); 1525 if (msr_ie) { 1526 lci = msr_ie + 2; 1527 lci_len = msr_ie[1]; 1528 } 1529 1530 msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len, 1531 &rprt_type_civic, 1, 4); 1532 if (msr_ie) { 1533 civic = msr_ie + 2; 1534 civic_len = msr_ie[1]; 1535 } 1536 1537 entry = kmalloc(sizeof(*entry) + lci_len + civic_len, GFP_KERNEL); 1538 if (!entry) 1539 return; 1540 1541 memcpy(entry->addr, mgmt->bssid, ETH_ALEN); 1542 1543 entry->lci_len = lci_len; 1544 if (lci_len) 1545 memcpy(entry->buf, lci, lci_len); 1546 1547 entry->civic_len = civic_len; 1548 if (civic_len) 1549 memcpy(entry->buf + lci_len, civic, civic_len); 1550 1551 list_add_tail(&entry->list, &mvm->ftm_initiator.loc_list); 1552 } 1553