1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2015-2017 Intel Deutschland GmbH 4 * Copyright (C) 2018-2024 Intel Corporation 5 */ 6 #include <linux/etherdevice.h> 7 #include <linux/math64.h> 8 #include <net/cfg80211.h> 9 #include "mvm.h" 10 #include "iwl-io.h" 11 #include "iwl-prph.h" 12 #include "constants.h" 13 14 struct iwl_mvm_loc_entry { 15 struct list_head list; 16 u8 addr[ETH_ALEN]; 17 u8 lci_len, civic_len; 18 u8 buf[]; 19 }; 20 21 struct iwl_mvm_smooth_entry { 22 struct list_head list; 23 u8 addr[ETH_ALEN]; 24 s64 rtt_avg; 25 u64 host_time; 26 }; 27 28 enum iwl_mvm_pasn_flags { 29 IWL_MVM_PASN_FLAG_HAS_HLTK = BIT(0), 30 }; 31 32 struct iwl_mvm_ftm_pasn_entry { 33 struct list_head list; 34 u8 addr[ETH_ALEN]; 35 u8 hltk[HLTK_11AZ_LEN]; 36 u8 tk[TK_11AZ_LEN]; 37 u8 cipher; 38 u8 tx_pn[IEEE80211_CCMP_PN_LEN]; 39 u8 rx_pn[IEEE80211_CCMP_PN_LEN]; 40 u32 flags; 41 }; 42 43 int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 44 u8 *addr, u32 cipher, u8 *tk, u32 tk_len, 45 u8 *hltk, u32 hltk_len) 46 { 47 struct iwl_mvm_ftm_pasn_entry *pasn = kzalloc(sizeof(*pasn), 48 GFP_KERNEL); 49 u32 expected_tk_len; 50 51 lockdep_assert_held(&mvm->mutex); 52 53 if (!pasn) 54 return -ENOBUFS; 55 56 pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher); 57 58 switch (pasn->cipher) { 59 case IWL_LOCATION_CIPHER_CCMP_128: 60 case IWL_LOCATION_CIPHER_GCMP_128: 61 expected_tk_len = WLAN_KEY_LEN_CCMP; 62 break; 63 case IWL_LOCATION_CIPHER_GCMP_256: 64 expected_tk_len = WLAN_KEY_LEN_GCMP_256; 65 break; 66 default: 67 goto out; 68 } 69 70 /* 71 * If associated to this AP and already have security context, 72 * the TK is already configured for this station, so it 73 * shouldn't be set again here. 74 */ 75 if (vif->cfg.assoc) { 76 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 77 struct ieee80211_bss_conf *link_conf; 78 unsigned int link_id; 79 struct ieee80211_sta *sta; 80 u8 sta_id; 81 82 rcu_read_lock(); 83 for_each_vif_active_link(vif, link_conf, link_id) { 84 if (memcmp(addr, link_conf->bssid, ETH_ALEN)) 85 continue; 86 87 sta_id = mvmvif->link[link_id]->ap_sta_id; 88 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 89 if (!IS_ERR_OR_NULL(sta) && sta->mfp) 90 expected_tk_len = 0; 91 break; 92 } 93 rcu_read_unlock(); 94 } 95 96 if (tk_len != expected_tk_len || 97 (hltk_len && hltk_len != sizeof(pasn->hltk))) { 98 IWL_ERR(mvm, "Invalid key length: tk_len=%u hltk_len=%u\n", 99 tk_len, hltk_len); 100 goto out; 101 } 102 103 if (!expected_tk_len && !hltk_len) { 104 IWL_ERR(mvm, "TK and HLTK not set\n"); 105 goto out; 106 } 107 108 memcpy(pasn->addr, addr, sizeof(pasn->addr)); 109 110 if (hltk_len) { 111 memcpy(pasn->hltk, hltk, sizeof(pasn->hltk)); 112 pasn->flags |= IWL_MVM_PASN_FLAG_HAS_HLTK; 113 } 114 115 if (tk && tk_len) 116 memcpy(pasn->tk, tk, sizeof(pasn->tk)); 117 118 list_add_tail(&pasn->list, &mvm->ftm_initiator.pasn_list); 119 return 0; 120 out: 121 kfree(pasn); 122 return -EINVAL; 123 } 124 125 void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr) 126 { 127 struct iwl_mvm_ftm_pasn_entry *entry, *prev; 128 129 lockdep_assert_held(&mvm->mutex); 130 131 list_for_each_entry_safe(entry, prev, &mvm->ftm_initiator.pasn_list, 132 list) { 133 if (memcmp(entry->addr, addr, sizeof(entry->addr))) 134 continue; 135 136 list_del(&entry->list); 137 kfree(entry); 138 return; 139 } 140 } 141 142 static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm) 143 { 144 struct iwl_mvm_loc_entry *e, *t; 145 146 mvm->ftm_initiator.req = NULL; 147 mvm->ftm_initiator.req_wdev = NULL; 148 memset(mvm->ftm_initiator.responses, 0, 149 sizeof(mvm->ftm_initiator.responses)); 150 151 list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) { 152 list_del(&e->list); 153 kfree(e); 154 } 155 } 156 157 void iwl_mvm_ftm_restart(struct iwl_mvm *mvm) 158 { 159 struct cfg80211_pmsr_result result = { 160 .status = NL80211_PMSR_STATUS_FAILURE, 161 .final = 1, 162 .host_time = ktime_get_boottime_ns(), 163 .type = NL80211_PMSR_TYPE_FTM, 164 }; 165 int i; 166 167 lockdep_assert_held(&mvm->mutex); 168 169 if (!mvm->ftm_initiator.req) 170 return; 171 172 for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) { 173 memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr, 174 ETH_ALEN); 175 result.ftm.burst_index = mvm->ftm_initiator.responses[i]; 176 177 cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, 178 mvm->ftm_initiator.req, 179 &result, GFP_KERNEL); 180 } 181 182 cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev, 183 mvm->ftm_initiator.req, GFP_KERNEL); 184 iwl_mvm_ftm_reset(mvm); 185 } 186 187 void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm) 188 { 189 INIT_LIST_HEAD(&mvm->ftm_initiator.smooth.resp); 190 191 IWL_DEBUG_INFO(mvm, 192 "enable=%u, alpha=%u, age_jiffies=%u, thresh=(%u:%u)\n", 193 IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH, 194 IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA, 195 IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * HZ, 196 IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT, 197 IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT); 198 } 199 200 void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm) 201 { 202 struct iwl_mvm_smooth_entry *se, *st; 203 204 list_for_each_entry_safe(se, st, &mvm->ftm_initiator.smooth.resp, 205 list) { 206 list_del(&se->list); 207 kfree(se); 208 } 209 } 210 211 static int 212 iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s) 213 { 214 switch (s) { 215 case IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS: 216 return 0; 217 case IWL_TOF_RANGE_REQUEST_STATUS_BUSY: 218 return -EBUSY; 219 default: 220 WARN_ON_ONCE(1); 221 return -EIO; 222 } 223 } 224 225 static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 226 struct iwl_tof_range_req_cmd_v5 *cmd, 227 struct cfg80211_pmsr_request *req) 228 { 229 int i; 230 231 cmd->request_id = req->cookie; 232 cmd->num_of_ap = req->n_peers; 233 234 /* use maximum for "no timeout" or bigger than what we can do */ 235 if (!req->timeout || req->timeout > 255 * 100) 236 cmd->req_timeout = 255; 237 else 238 cmd->req_timeout = DIV_ROUND_UP(req->timeout, 100); 239 240 /* 241 * We treat it always as random, since if not we'll 242 * have filled our local address there instead. 243 */ 244 cmd->macaddr_random = 1; 245 memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); 246 for (i = 0; i < ETH_ALEN; i++) 247 cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; 248 249 if (vif->cfg.assoc) 250 memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); 251 else 252 eth_broadcast_addr(cmd->range_req_bssid); 253 } 254 255 static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm, 256 struct ieee80211_vif *vif, 257 struct iwl_tof_range_req_cmd_v9 *cmd, 258 struct cfg80211_pmsr_request *req) 259 { 260 int i; 261 262 cmd->initiator_flags = 263 cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM | 264 IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT); 265 cmd->request_id = req->cookie; 266 cmd->num_of_ap = req->n_peers; 267 268 /* 269 * Use a large value for "no timeout". Don't use the maximum value 270 * because of fw limitations. 271 */ 272 if (req->timeout) 273 cmd->req_timeout_ms = cpu_to_le32(req->timeout); 274 else 275 cmd->req_timeout_ms = cpu_to_le32(0xfffff); 276 277 memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); 278 for (i = 0; i < ETH_ALEN; i++) 279 cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; 280 281 if (vif->cfg.assoc) { 282 memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); 283 284 /* AP's TSF is only relevant if associated */ 285 for (i = 0; i < req->n_peers; i++) { 286 if (req->peers[i].report_ap_tsf) { 287 struct iwl_mvm_vif *mvmvif = 288 iwl_mvm_vif_from_mac80211(vif); 289 290 cmd->tsf_mac_id = cpu_to_le32(mvmvif->id); 291 return; 292 } 293 } 294 } else { 295 eth_broadcast_addr(cmd->range_req_bssid); 296 } 297 298 /* Don't report AP's TSF */ 299 cmd->tsf_mac_id = cpu_to_le32(0xff); 300 } 301 302 static void iwl_mvm_ftm_cmd_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 303 struct iwl_tof_range_req_cmd_v8 *cmd, 304 struct cfg80211_pmsr_request *req) 305 { 306 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)cmd, req); 307 } 308 309 static int 310 iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm, 311 struct cfg80211_pmsr_request_peer *peer, 312 u8 *channel, u8 *bandwidth, 313 u8 *ctrl_ch_position) 314 { 315 u32 freq = peer->chandef.chan->center_freq; 316 317 *channel = ieee80211_frequency_to_channel(freq); 318 319 switch (peer->chandef.width) { 320 case NL80211_CHAN_WIDTH_20_NOHT: 321 *bandwidth = IWL_TOF_BW_20_LEGACY; 322 break; 323 case NL80211_CHAN_WIDTH_20: 324 *bandwidth = IWL_TOF_BW_20_HT; 325 break; 326 case NL80211_CHAN_WIDTH_40: 327 *bandwidth = IWL_TOF_BW_40; 328 break; 329 case NL80211_CHAN_WIDTH_80: 330 *bandwidth = IWL_TOF_BW_80; 331 break; 332 default: 333 IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n", 334 peer->chandef.width); 335 return -EINVAL; 336 } 337 338 *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ? 339 iwl_mvm_get_ctrl_pos(&peer->chandef) : 0; 340 341 return 0; 342 } 343 344 static int 345 iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm, 346 struct cfg80211_pmsr_request_peer *peer, 347 u8 *channel, u8 *format_bw, 348 u8 *ctrl_ch_position) 349 { 350 u32 freq = peer->chandef.chan->center_freq; 351 u8 cmd_ver; 352 353 *channel = ieee80211_frequency_to_channel(freq); 354 355 switch (peer->chandef.width) { 356 case NL80211_CHAN_WIDTH_20_NOHT: 357 *format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY; 358 *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; 359 break; 360 case NL80211_CHAN_WIDTH_20: 361 *format_bw = IWL_LOCATION_FRAME_FORMAT_HT; 362 *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; 363 break; 364 case NL80211_CHAN_WIDTH_40: 365 *format_bw = IWL_LOCATION_FRAME_FORMAT_HT; 366 *format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS; 367 break; 368 case NL80211_CHAN_WIDTH_80: 369 *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT; 370 *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS; 371 break; 372 case NL80211_CHAN_WIDTH_160: 373 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, 374 WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 375 IWL_FW_CMD_VER_UNKNOWN); 376 377 if (cmd_ver >= 13) { 378 *format_bw = IWL_LOCATION_FRAME_FORMAT_HE; 379 *format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS; 380 break; 381 } 382 fallthrough; 383 default: 384 IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n", 385 peer->chandef.width); 386 return -EINVAL; 387 } 388 389 /* non EDCA based measurement must use HE preamble */ 390 if (peer->ftm.trigger_based || peer->ftm.non_trigger_based) 391 *format_bw |= IWL_LOCATION_FRAME_FORMAT_HE; 392 393 *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ? 394 iwl_mvm_get_ctrl_pos(&peer->chandef) : 0; 395 396 return 0; 397 } 398 399 static int 400 iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm, 401 struct cfg80211_pmsr_request_peer *peer, 402 struct iwl_tof_range_req_ap_entry_v2 *target) 403 { 404 int ret; 405 406 ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num, 407 &target->bandwidth, 408 &target->ctrl_ch_position); 409 if (ret) 410 return ret; 411 412 memcpy(target->bssid, peer->addr, ETH_ALEN); 413 target->burst_period = 414 cpu_to_le16(peer->ftm.burst_period); 415 target->samples_per_burst = peer->ftm.ftms_per_burst; 416 target->num_of_bursts = peer->ftm.num_bursts_exp; 417 target->measure_type = 0; /* regular two-sided FTM */ 418 target->retries_per_sample = peer->ftm.ftmr_retries; 419 target->asap_mode = peer->ftm.asap; 420 target->enable_dyn_ack = IWL_MVM_FTM_INITIATOR_DYNACK; 421 422 if (peer->ftm.request_lci) 423 target->location_req |= IWL_TOF_LOC_LCI; 424 if (peer->ftm.request_civicloc) 425 target->location_req |= IWL_TOF_LOC_CIVIC; 426 427 target->algo_type = IWL_MVM_FTM_INITIATOR_ALGO; 428 429 return 0; 430 } 431 432 #define FTM_PUT_FLAG(flag) (target->initiator_ap_flags |= \ 433 cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag)) 434 435 static void 436 iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm, 437 struct cfg80211_pmsr_request_peer *peer, 438 struct iwl_tof_range_req_ap_entry_v6 *target) 439 { 440 memcpy(target->bssid, peer->addr, ETH_ALEN); 441 target->burst_period = 442 cpu_to_le16(peer->ftm.burst_period); 443 target->samples_per_burst = peer->ftm.ftms_per_burst; 444 target->num_of_bursts = peer->ftm.num_bursts_exp; 445 target->ftmr_max_retries = peer->ftm.ftmr_retries; 446 target->initiator_ap_flags = cpu_to_le32(0); 447 448 if (peer->ftm.asap) 449 FTM_PUT_FLAG(ASAP); 450 451 if (peer->ftm.request_lci) 452 FTM_PUT_FLAG(LCI_REQUEST); 453 454 if (peer->ftm.request_civicloc) 455 FTM_PUT_FLAG(CIVIC_REQUEST); 456 457 if (IWL_MVM_FTM_INITIATOR_DYNACK) 458 FTM_PUT_FLAG(DYN_ACK); 459 460 if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG) 461 FTM_PUT_FLAG(ALGO_LR); 462 else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT) 463 FTM_PUT_FLAG(ALGO_FFT); 464 465 if (peer->ftm.trigger_based) 466 FTM_PUT_FLAG(TB); 467 else if (peer->ftm.non_trigger_based) 468 FTM_PUT_FLAG(NON_TB); 469 470 if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) && 471 peer->ftm.lmr_feedback) 472 FTM_PUT_FLAG(LMR_FEEDBACK); 473 } 474 475 static int 476 iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm, 477 struct cfg80211_pmsr_request_peer *peer, 478 struct iwl_tof_range_req_ap_entry_v3 *target) 479 { 480 int ret; 481 482 ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num, 483 &target->bandwidth, 484 &target->ctrl_ch_position); 485 if (ret) 486 return ret; 487 488 /* 489 * Versions 3 and 4 has some common fields, so 490 * iwl_mvm_ftm_put_target_common() can be used for version 7 too. 491 */ 492 iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target); 493 494 return 0; 495 } 496 497 static int 498 iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm, 499 struct cfg80211_pmsr_request_peer *peer, 500 struct iwl_tof_range_req_ap_entry_v4 *target) 501 { 502 int ret; 503 504 ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, 505 &target->format_bw, 506 &target->ctrl_ch_position); 507 if (ret) 508 return ret; 509 510 iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target); 511 512 return 0; 513 } 514 515 static int 516 iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 517 struct cfg80211_pmsr_request_peer *peer, 518 struct iwl_tof_range_req_ap_entry_v6 *target) 519 { 520 int ret; 521 522 ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, 523 &target->format_bw, 524 &target->ctrl_ch_position); 525 if (ret) 526 return ret; 527 528 iwl_mvm_ftm_put_target_common(mvm, peer, target); 529 530 if (vif->cfg.assoc) { 531 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 532 struct ieee80211_sta *sta; 533 struct ieee80211_bss_conf *link_conf; 534 unsigned int link_id; 535 536 rcu_read_lock(); 537 for_each_vif_active_link(vif, link_conf, link_id) { 538 if (memcmp(peer->addr, link_conf->bssid, ETH_ALEN)) 539 continue; 540 541 target->sta_id = mvmvif->link[link_id]->ap_sta_id; 542 sta = rcu_dereference(mvm->fw_id_to_mac_id[target->sta_id]); 543 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { 544 rcu_read_unlock(); 545 return PTR_ERR_OR_ZERO(sta); 546 } 547 548 if (sta->mfp && (peer->ftm.trigger_based || 549 peer->ftm.non_trigger_based)) 550 FTM_PUT_FLAG(PMF); 551 break; 552 } 553 rcu_read_unlock(); 554 555 #ifdef CONFIG_IWLWIFI_DEBUGFS 556 if (mvmvif->ftm_unprotected) { 557 target->sta_id = IWL_MVM_INVALID_STA; 558 target->initiator_ap_flags &= 559 ~cpu_to_le32(IWL_INITIATOR_AP_FLAGS_PMF); 560 } 561 562 #endif 563 } else { 564 target->sta_id = IWL_MVM_INVALID_STA; 565 } 566 567 /* 568 * TODO: Beacon interval is currently unknown, so use the common value 569 * of 100 TUs. 570 */ 571 target->beacon_interval = cpu_to_le16(100); 572 return 0; 573 } 574 575 static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd) 576 { 577 u32 status; 578 int err = iwl_mvm_send_cmd_status(mvm, hcmd, &status); 579 580 if (!err && status) { 581 IWL_ERR(mvm, "FTM range request command failure, status: %u\n", 582 status); 583 err = iwl_ftm_range_request_status_to_err(status); 584 } 585 586 return err; 587 } 588 589 static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 590 struct cfg80211_pmsr_request *req) 591 { 592 struct iwl_tof_range_req_cmd_v5 cmd_v5; 593 struct iwl_host_cmd hcmd = { 594 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 595 .dataflags[0] = IWL_HCMD_DFL_DUP, 596 .data[0] = &cmd_v5, 597 .len[0] = sizeof(cmd_v5), 598 }; 599 u8 i; 600 int err; 601 602 iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req); 603 604 for (i = 0; i < cmd_v5.num_of_ap; i++) { 605 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 606 607 err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]); 608 if (err) 609 return err; 610 } 611 612 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 613 } 614 615 static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 616 struct cfg80211_pmsr_request *req) 617 { 618 struct iwl_tof_range_req_cmd_v7 cmd_v7; 619 struct iwl_host_cmd hcmd = { 620 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 621 .dataflags[0] = IWL_HCMD_DFL_DUP, 622 .data[0] = &cmd_v7, 623 .len[0] = sizeof(cmd_v7), 624 }; 625 u8 i; 626 int err; 627 628 /* 629 * Versions 7 and 8 has the same structure except from the responders 630 * list, so iwl_mvm_ftm_cmd() can be used for version 7 too. 631 */ 632 iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd_v7, req); 633 634 for (i = 0; i < cmd_v7.num_of_ap; i++) { 635 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 636 637 err = iwl_mvm_ftm_put_target_v3(mvm, peer, &cmd_v7.ap[i]); 638 if (err) 639 return err; 640 } 641 642 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 643 } 644 645 static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 646 struct cfg80211_pmsr_request *req) 647 { 648 struct iwl_tof_range_req_cmd_v8 cmd; 649 struct iwl_host_cmd hcmd = { 650 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 651 .dataflags[0] = IWL_HCMD_DFL_DUP, 652 .data[0] = &cmd, 653 .len[0] = sizeof(cmd), 654 }; 655 u8 i; 656 int err; 657 658 iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd, req); 659 660 for (i = 0; i < cmd.num_of_ap; i++) { 661 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 662 663 err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]); 664 if (err) 665 return err; 666 } 667 668 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 669 } 670 671 static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 672 struct cfg80211_pmsr_request *req) 673 { 674 struct iwl_tof_range_req_cmd_v9 cmd; 675 struct iwl_host_cmd hcmd = { 676 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 677 .dataflags[0] = IWL_HCMD_DFL_DUP, 678 .data[0] = &cmd, 679 .len[0] = sizeof(cmd), 680 }; 681 u8 i; 682 int err; 683 684 iwl_mvm_ftm_cmd_common(mvm, vif, &cmd, req); 685 686 for (i = 0; i < cmd.num_of_ap; i++) { 687 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 688 struct iwl_tof_range_req_ap_entry_v6 *target = &cmd.ap[i]; 689 690 err = iwl_mvm_ftm_put_target(mvm, vif, peer, target); 691 if (err) 692 return err; 693 } 694 695 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 696 } 697 698 static void iter(struct ieee80211_hw *hw, 699 struct ieee80211_vif *vif, 700 struct ieee80211_sta *sta, 701 struct ieee80211_key_conf *key, 702 void *data) 703 { 704 struct iwl_tof_range_req_ap_entry_v6 *target = data; 705 706 if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN)) 707 return; 708 709 WARN_ON(!sta->mfp); 710 711 if (WARN_ON(key->keylen > sizeof(target->tk))) 712 return; 713 714 memcpy(target->tk, key->key, key->keylen); 715 target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher); 716 WARN_ON(target->cipher == IWL_LOCATION_CIPHER_INVALID); 717 } 718 719 static void 720 iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 721 struct iwl_tof_range_req_ap_entry_v7 *target) 722 { 723 struct iwl_mvm_ftm_pasn_entry *entry; 724 u32 flags = le32_to_cpu(target->initiator_ap_flags); 725 #ifdef CONFIG_IWLWIFI_DEBUGFS 726 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 727 728 if (mvmvif->ftm_unprotected) 729 return; 730 #endif 731 732 if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB | 733 IWL_INITIATOR_AP_FLAGS_TB))) 734 return; 735 736 lockdep_assert_held(&mvm->mutex); 737 738 list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) { 739 if (memcmp(entry->addr, target->bssid, sizeof(entry->addr))) 740 continue; 741 742 target->cipher = entry->cipher; 743 744 if (entry->flags & IWL_MVM_PASN_FLAG_HAS_HLTK) 745 memcpy(target->hltk, entry->hltk, sizeof(target->hltk)); 746 else 747 memset(target->hltk, 0, sizeof(target->hltk)); 748 749 if (vif->cfg.assoc && 750 !memcmp(vif->bss_conf.bssid, target->bssid, 751 sizeof(target->bssid))) 752 ieee80211_iter_keys(mvm->hw, vif, iter, target); 753 else 754 memcpy(target->tk, entry->tk, sizeof(target->tk)); 755 756 memcpy(target->rx_pn, entry->rx_pn, sizeof(target->rx_pn)); 757 memcpy(target->tx_pn, entry->tx_pn, sizeof(target->tx_pn)); 758 759 target->initiator_ap_flags |= 760 cpu_to_le32(IWL_INITIATOR_AP_FLAGS_SECURED); 761 return; 762 } 763 } 764 765 static int 766 iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 767 struct cfg80211_pmsr_request_peer *peer, 768 struct iwl_tof_range_req_ap_entry_v7 *target) 769 { 770 int err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target); 771 if (err) 772 return err; 773 774 iwl_mvm_ftm_set_secured_ranging(mvm, vif, target); 775 return err; 776 } 777 778 static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm, 779 struct ieee80211_vif *vif, 780 struct cfg80211_pmsr_request *req) 781 { 782 struct iwl_tof_range_req_cmd_v11 cmd; 783 struct iwl_host_cmd hcmd = { 784 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 785 .dataflags[0] = IWL_HCMD_DFL_DUP, 786 .data[0] = &cmd, 787 .len[0] = sizeof(cmd), 788 }; 789 u8 i; 790 int err; 791 792 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 793 794 for (i = 0; i < cmd.num_of_ap; i++) { 795 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 796 struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i]; 797 798 err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, target); 799 if (err) 800 return err; 801 } 802 803 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 804 } 805 806 static void 807 iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm, 808 struct iwl_tof_range_req_ap_entry_v8 *target) 809 { 810 /* Only 2 STS are supported on Tx */ 811 u32 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 : 812 IWL_MVM_FTM_I2R_MAX_STS; 813 814 target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP | 815 (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS); 816 target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP | 817 (i2r_max_sts << IWL_LOCATION_MAX_STS_POS); 818 target->r2i_max_total_ltf = IWL_MVM_FTM_R2I_MAX_TOTAL_LTF; 819 target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF; 820 } 821 822 static int 823 iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 824 struct cfg80211_pmsr_request_peer *peer, 825 struct iwl_tof_range_req_ap_entry_v8 *target) 826 { 827 u32 flags; 828 int ret = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target); 829 830 if (ret) 831 return ret; 832 833 iwl_mvm_ftm_set_ndp_params(mvm, target); 834 835 /* 836 * If secure LTF is turned off, replace the flag with PMF only 837 */ 838 flags = le32_to_cpu(target->initiator_ap_flags); 839 if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) { 840 if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF) 841 flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; 842 843 flags |= IWL_INITIATOR_AP_FLAGS_PMF; 844 target->initiator_ap_flags = cpu_to_le32(flags); 845 } 846 847 return 0; 848 } 849 850 static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm, 851 struct ieee80211_vif *vif, 852 struct cfg80211_pmsr_request *req) 853 { 854 struct iwl_tof_range_req_cmd_v12 cmd; 855 struct iwl_host_cmd hcmd = { 856 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 857 .dataflags[0] = IWL_HCMD_DFL_DUP, 858 .data[0] = &cmd, 859 .len[0] = sizeof(cmd), 860 }; 861 u8 i; 862 int err; 863 864 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 865 866 for (i = 0; i < cmd.num_of_ap; i++) { 867 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 868 struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i]; 869 870 err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, target); 871 if (err) 872 return err; 873 } 874 875 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 876 } 877 878 static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm, 879 struct ieee80211_vif *vif, 880 struct cfg80211_pmsr_request *req) 881 { 882 struct iwl_tof_range_req_cmd_v13 cmd; 883 struct iwl_host_cmd hcmd = { 884 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 885 .dataflags[0] = IWL_HCMD_DFL_DUP, 886 .data[0] = &cmd, 887 .len[0] = sizeof(cmd), 888 }; 889 u8 i; 890 int err; 891 892 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 893 894 for (i = 0; i < cmd.num_of_ap; i++) { 895 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 896 struct iwl_tof_range_req_ap_entry_v9 *target = &cmd.ap[i]; 897 898 err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, (void *)target); 899 if (err) 900 return err; 901 902 if (peer->ftm.trigger_based || peer->ftm.non_trigger_based) 903 target->bss_color = peer->ftm.bss_color; 904 905 if (peer->ftm.non_trigger_based) { 906 target->min_time_between_msr = 907 cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); 908 target->burst_period = 909 cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); 910 } else { 911 target->min_time_between_msr = cpu_to_le16(0); 912 } 913 914 target->band = 915 iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band); 916 } 917 918 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 919 } 920 921 int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 922 struct cfg80211_pmsr_request *req) 923 { 924 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 925 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); 926 int err; 927 928 lockdep_assert_held(&mvm->mutex); 929 930 if (mvm->ftm_initiator.req) 931 return -EBUSY; 932 933 if (new_api) { 934 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, 935 WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 936 IWL_FW_CMD_VER_UNKNOWN); 937 938 switch (cmd_ver) { 939 case 13: 940 err = iwl_mvm_ftm_start_v13(mvm, vif, req); 941 break; 942 case 12: 943 err = iwl_mvm_ftm_start_v12(mvm, vif, req); 944 break; 945 case 11: 946 err = iwl_mvm_ftm_start_v11(mvm, vif, req); 947 break; 948 case 9: 949 case 10: 950 err = iwl_mvm_ftm_start_v9(mvm, vif, req); 951 break; 952 case 8: 953 err = iwl_mvm_ftm_start_v8(mvm, vif, req); 954 break; 955 default: 956 err = iwl_mvm_ftm_start_v7(mvm, vif, req); 957 break; 958 } 959 } else { 960 err = iwl_mvm_ftm_start_v5(mvm, vif, req); 961 } 962 963 if (!err) { 964 mvm->ftm_initiator.req = req; 965 mvm->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif); 966 } 967 968 return err; 969 } 970 971 void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req) 972 { 973 struct iwl_tof_range_abort_cmd cmd = { 974 .request_id = req->cookie, 975 }; 976 977 lockdep_assert_held(&mvm->mutex); 978 979 if (req != mvm->ftm_initiator.req) 980 return; 981 982 iwl_mvm_ftm_reset(mvm); 983 984 if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD), 985 0, sizeof(cmd), &cmd)) 986 IWL_ERR(mvm, "failed to abort FTM process\n"); 987 } 988 989 static int iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req, 990 const u8 *addr) 991 { 992 int i; 993 994 for (i = 0; i < req->n_peers; i++) { 995 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 996 997 if (ether_addr_equal_unaligned(peer->addr, addr)) 998 return i; 999 } 1000 1001 return -ENOENT; 1002 } 1003 1004 static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts) 1005 { 1006 u32 gp2_ts = le32_to_cpu(fw_gp2_ts); 1007 u32 curr_gp2, diff; 1008 u64 now_from_boot_ns; 1009 1010 iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2, 1011 &now_from_boot_ns, NULL); 1012 1013 if (curr_gp2 >= gp2_ts) 1014 diff = curr_gp2 - gp2_ts; 1015 else 1016 diff = curr_gp2 + (U32_MAX - gp2_ts + 1); 1017 1018 return now_from_boot_ns - (u64)diff * 1000; 1019 } 1020 1021 static void iwl_mvm_ftm_get_lci_civic(struct iwl_mvm *mvm, 1022 struct cfg80211_pmsr_result *res) 1023 { 1024 struct iwl_mvm_loc_entry *entry; 1025 1026 list_for_each_entry(entry, &mvm->ftm_initiator.loc_list, list) { 1027 if (!ether_addr_equal_unaligned(res->addr, entry->addr)) 1028 continue; 1029 1030 if (entry->lci_len) { 1031 res->ftm.lci_len = entry->lci_len; 1032 res->ftm.lci = entry->buf; 1033 } 1034 1035 if (entry->civic_len) { 1036 res->ftm.civicloc_len = entry->civic_len; 1037 res->ftm.civicloc = entry->buf + entry->lci_len; 1038 } 1039 1040 /* we found the entry we needed */ 1041 break; 1042 } 1043 } 1044 1045 static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id, 1046 u8 num_of_aps) 1047 { 1048 lockdep_assert_held(&mvm->mutex); 1049 1050 if (request_id != (u8)mvm->ftm_initiator.req->cookie) { 1051 IWL_ERR(mvm, "Request ID mismatch, got %u, active %u\n", 1052 request_id, (u8)mvm->ftm_initiator.req->cookie); 1053 return -EINVAL; 1054 } 1055 1056 if (num_of_aps > mvm->ftm_initiator.req->n_peers) { 1057 IWL_ERR(mvm, "FTM range response invalid\n"); 1058 return -EINVAL; 1059 } 1060 1061 return 0; 1062 } 1063 1064 static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm, 1065 struct cfg80211_pmsr_result *res) 1066 { 1067 struct iwl_mvm_smooth_entry *resp = NULL, *iter; 1068 s64 rtt_avg, rtt = res->ftm.rtt_avg; 1069 u32 undershoot, overshoot; 1070 u8 alpha; 1071 1072 if (!IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH) 1073 return; 1074 1075 WARN_ON(rtt < 0); 1076 1077 if (res->status != NL80211_PMSR_STATUS_SUCCESS) { 1078 IWL_DEBUG_INFO(mvm, 1079 ": %pM: ignore failed measurement. Status=%u\n", 1080 res->addr, res->status); 1081 return; 1082 } 1083 1084 list_for_each_entry(iter, &mvm->ftm_initiator.smooth.resp, list) { 1085 if (!memcmp(res->addr, iter->addr, ETH_ALEN)) { 1086 resp = iter; 1087 break; 1088 } 1089 } 1090 1091 if (!resp) { 1092 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 1093 if (!resp) 1094 return; 1095 1096 memcpy(resp->addr, res->addr, ETH_ALEN); 1097 list_add_tail(&resp->list, &mvm->ftm_initiator.smooth.resp); 1098 1099 resp->rtt_avg = rtt; 1100 1101 IWL_DEBUG_INFO(mvm, "new: %pM: rtt_avg=%lld\n", 1102 resp->addr, resp->rtt_avg); 1103 goto update_time; 1104 } 1105 1106 if (res->host_time - resp->host_time > 1107 IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * 1000000000) { 1108 resp->rtt_avg = rtt; 1109 1110 IWL_DEBUG_INFO(mvm, "expired: %pM: rtt_avg=%lld\n", 1111 resp->addr, resp->rtt_avg); 1112 goto update_time; 1113 } 1114 1115 /* Smooth the results based on the tracked RTT average */ 1116 undershoot = IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT; 1117 overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT; 1118 alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA; 1119 1120 rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100); 1121 1122 IWL_DEBUG_INFO(mvm, 1123 "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n", 1124 resp->addr, resp->rtt_avg, rtt_avg, rtt); 1125 1126 /* 1127 * update the responder's average RTT results regardless of 1128 * the under/over shoot logic below 1129 */ 1130 resp->rtt_avg = rtt_avg; 1131 1132 /* smooth the results */ 1133 if (rtt_avg > rtt && (rtt_avg - rtt) > undershoot) { 1134 res->ftm.rtt_avg = rtt_avg; 1135 1136 IWL_DEBUG_INFO(mvm, 1137 "undershoot: val=%lld\n", 1138 (rtt_avg - rtt)); 1139 } else if (rtt_avg < rtt && (rtt - rtt_avg) > 1140 overshoot) { 1141 res->ftm.rtt_avg = rtt_avg; 1142 IWL_DEBUG_INFO(mvm, 1143 "overshoot: val=%lld\n", 1144 (rtt - rtt_avg)); 1145 } 1146 1147 update_time: 1148 resp->host_time = res->host_time; 1149 } 1150 1151 static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index, 1152 struct cfg80211_pmsr_result *res) 1153 { 1154 s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666); 1155 1156 IWL_DEBUG_INFO(mvm, "entry %d\n", index); 1157 IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status); 1158 IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr); 1159 IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time); 1160 IWL_DEBUG_INFO(mvm, "\tburst index: %d\n", res->ftm.burst_index); 1161 IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes); 1162 IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg); 1163 IWL_DEBUG_INFO(mvm, "\trssi spread: %d\n", res->ftm.rssi_spread); 1164 IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg); 1165 IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance); 1166 IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread); 1167 IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg); 1168 } 1169 1170 static void 1171 iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm, 1172 struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap) 1173 { 1174 struct iwl_mvm_ftm_pasn_entry *entry; 1175 1176 lockdep_assert_held(&mvm->mutex); 1177 1178 list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) { 1179 if (memcmp(fw_ap->bssid, entry->addr, sizeof(entry->addr))) 1180 continue; 1181 1182 memcpy(entry->rx_pn, fw_ap->rx_pn, sizeof(entry->rx_pn)); 1183 memcpy(entry->tx_pn, fw_ap->tx_pn, sizeof(entry->tx_pn)); 1184 return; 1185 } 1186 } 1187 1188 static u8 iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm *mvm) 1189 { 1190 if (!fw_has_api(&mvm->fw->ucode_capa, 1191 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ)) 1192 return 5; 1193 1194 /* Starting from version 8, the FW advertises the version */ 1195 if (mvm->cmd_ver.range_resp >= 8) 1196 return mvm->cmd_ver.range_resp; 1197 else if (fw_has_api(&mvm->fw->ucode_capa, 1198 IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) 1199 return 7; 1200 1201 /* The first version of the new range request API */ 1202 return 6; 1203 } 1204 1205 static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len) 1206 { 1207 switch (ver) { 1208 case 9: 1209 case 8: 1210 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v8); 1211 case 7: 1212 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v7); 1213 case 6: 1214 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v6); 1215 case 5: 1216 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v5); 1217 default: 1218 WARN_ONCE(1, "FTM: unsupported range response version %u", ver); 1219 return false; 1220 } 1221 } 1222 1223 void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 1224 { 1225 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1226 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); 1227 struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data; 1228 struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data; 1229 struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data; 1230 struct iwl_tof_range_rsp_ntfy_v8 *fw_resp_v8 = (void *)pkt->data; 1231 int i; 1232 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 1233 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); 1234 u8 num_of_aps, last_in_batch; 1235 u8 notif_ver = iwl_mvm_ftm_get_range_resp_ver(mvm); 1236 1237 lockdep_assert_held(&mvm->mutex); 1238 1239 if (!mvm->ftm_initiator.req) { 1240 return; 1241 } 1242 1243 if (unlikely(!iwl_mvm_ftm_resp_size_validation(notif_ver, pkt_len))) 1244 return; 1245 1246 if (new_api) { 1247 if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v8->request_id, 1248 fw_resp_v8->num_of_aps)) 1249 return; 1250 1251 num_of_aps = fw_resp_v8->num_of_aps; 1252 last_in_batch = fw_resp_v8->last_report; 1253 } else { 1254 if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id, 1255 fw_resp_v5->num_of_aps)) 1256 return; 1257 1258 num_of_aps = fw_resp_v5->num_of_aps; 1259 last_in_batch = fw_resp_v5->last_in_batch; 1260 } 1261 1262 IWL_DEBUG_INFO(mvm, "Range response received\n"); 1263 IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %u\n", 1264 mvm->ftm_initiator.req->cookie, num_of_aps); 1265 1266 for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) { 1267 struct cfg80211_pmsr_result result = {}; 1268 struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap; 1269 int peer_idx; 1270 1271 if (new_api) { 1272 if (notif_ver >= 8) { 1273 fw_ap = &fw_resp_v8->ap[i]; 1274 iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap); 1275 } else if (notif_ver == 7) { 1276 fw_ap = (void *)&fw_resp_v7->ap[i]; 1277 } else { 1278 fw_ap = (void *)&fw_resp_v6->ap[i]; 1279 } 1280 1281 result.final = fw_ap->last_burst; 1282 result.ap_tsf = le32_to_cpu(fw_ap->start_tsf); 1283 result.ap_tsf_valid = 1; 1284 } else { 1285 /* the first part is the same for old and new APIs */ 1286 fw_ap = (void *)&fw_resp_v5->ap[i]; 1287 /* 1288 * FIXME: the firmware needs to report this, we don't 1289 * even know the number of bursts the responder picked 1290 * (if we asked it to) 1291 */ 1292 result.final = 0; 1293 } 1294 1295 peer_idx = iwl_mvm_ftm_find_peer(mvm->ftm_initiator.req, 1296 fw_ap->bssid); 1297 if (peer_idx < 0) { 1298 IWL_WARN(mvm, 1299 "Unknown address (%pM, target #%d) in FTM response\n", 1300 fw_ap->bssid, i); 1301 continue; 1302 } 1303 1304 switch (fw_ap->measure_status) { 1305 case IWL_TOF_ENTRY_SUCCESS: 1306 result.status = NL80211_PMSR_STATUS_SUCCESS; 1307 break; 1308 case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT: 1309 result.status = NL80211_PMSR_STATUS_TIMEOUT; 1310 break; 1311 case IWL_TOF_ENTRY_NO_RESPONSE: 1312 result.status = NL80211_PMSR_STATUS_FAILURE; 1313 result.ftm.failure_reason = 1314 NL80211_PMSR_FTM_FAILURE_NO_RESPONSE; 1315 break; 1316 case IWL_TOF_ENTRY_REQUEST_REJECTED: 1317 result.status = NL80211_PMSR_STATUS_FAILURE; 1318 result.ftm.failure_reason = 1319 NL80211_PMSR_FTM_FAILURE_PEER_BUSY; 1320 result.ftm.busy_retry_time = fw_ap->refusal_period; 1321 break; 1322 default: 1323 result.status = NL80211_PMSR_STATUS_FAILURE; 1324 result.ftm.failure_reason = 1325 NL80211_PMSR_FTM_FAILURE_UNSPECIFIED; 1326 break; 1327 } 1328 memcpy(result.addr, fw_ap->bssid, ETH_ALEN); 1329 result.host_time = iwl_mvm_ftm_get_host_time(mvm, 1330 fw_ap->timestamp); 1331 result.type = NL80211_PMSR_TYPE_FTM; 1332 result.ftm.burst_index = mvm->ftm_initiator.responses[peer_idx]; 1333 mvm->ftm_initiator.responses[peer_idx]++; 1334 result.ftm.rssi_avg = fw_ap->rssi; 1335 result.ftm.rssi_avg_valid = 1; 1336 result.ftm.rssi_spread = fw_ap->rssi_spread; 1337 result.ftm.rssi_spread_valid = 1; 1338 result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt); 1339 result.ftm.rtt_avg_valid = 1; 1340 result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance); 1341 result.ftm.rtt_variance_valid = 1; 1342 result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread); 1343 result.ftm.rtt_spread_valid = 1; 1344 1345 iwl_mvm_ftm_get_lci_civic(mvm, &result); 1346 1347 iwl_mvm_ftm_rtt_smoothing(mvm, &result); 1348 1349 cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, 1350 mvm->ftm_initiator.req, 1351 &result, GFP_KERNEL); 1352 1353 if (fw_has_api(&mvm->fw->ucode_capa, 1354 IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) 1355 IWL_DEBUG_INFO(mvm, "RTT confidence: %u\n", 1356 fw_ap->rttConfidence); 1357 1358 iwl_mvm_debug_range_resp(mvm, i, &result); 1359 } 1360 1361 if (last_in_batch) { 1362 cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev, 1363 mvm->ftm_initiator.req, 1364 GFP_KERNEL); 1365 iwl_mvm_ftm_reset(mvm); 1366 } 1367 } 1368 1369 void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 1370 { 1371 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1372 const struct ieee80211_mgmt *mgmt = (void *)pkt->data; 1373 size_t len = iwl_rx_packet_payload_len(pkt); 1374 struct iwl_mvm_loc_entry *entry; 1375 const u8 *ies, *lci, *civic, *msr_ie; 1376 size_t ies_len, lci_len = 0, civic_len = 0; 1377 size_t baselen = IEEE80211_MIN_ACTION_SIZE + 1378 sizeof(mgmt->u.action.u.ftm); 1379 static const u8 rprt_type_lci = IEEE80211_SPCT_MSR_RPRT_TYPE_LCI; 1380 static const u8 rprt_type_civic = IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC; 1381 1382 if (len <= baselen) 1383 return; 1384 1385 lockdep_assert_held(&mvm->mutex); 1386 1387 ies = mgmt->u.action.u.ftm.variable; 1388 ies_len = len - baselen; 1389 1390 msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len, 1391 &rprt_type_lci, 1, 4); 1392 if (msr_ie) { 1393 lci = msr_ie + 2; 1394 lci_len = msr_ie[1]; 1395 } 1396 1397 msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len, 1398 &rprt_type_civic, 1, 4); 1399 if (msr_ie) { 1400 civic = msr_ie + 2; 1401 civic_len = msr_ie[1]; 1402 } 1403 1404 entry = kmalloc(sizeof(*entry) + lci_len + civic_len, GFP_KERNEL); 1405 if (!entry) 1406 return; 1407 1408 memcpy(entry->addr, mgmt->bssid, ETH_ALEN); 1409 1410 entry->lci_len = lci_len; 1411 if (lci_len) 1412 memcpy(entry->buf, lci, lci_len); 1413 1414 entry->civic_len = civic_len; 1415 if (civic_len) 1416 memcpy(entry->buf + lci_len, civic, civic_len); 1417 1418 list_add_tail(&entry->list, &mvm->ftm_initiator.loc_list); 1419 } 1420