1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2015-2017 Intel Deutschland GmbH 4 * Copyright (C) 2018-2024 Intel Corporation 5 */ 6 #include <linux/etherdevice.h> 7 #include <linux/math64.h> 8 #include <net/cfg80211.h> 9 #include "mvm.h" 10 #include "iwl-io.h" 11 #include "iwl-prph.h" 12 #include "constants.h" 13 14 struct iwl_mvm_loc_entry { 15 struct list_head list; 16 u8 addr[ETH_ALEN]; 17 u8 lci_len, civic_len; 18 u8 buf[]; 19 }; 20 21 struct iwl_mvm_smooth_entry { 22 struct list_head list; 23 u8 addr[ETH_ALEN]; 24 s64 rtt_avg; 25 u64 host_time; 26 }; 27 28 enum iwl_mvm_pasn_flags { 29 IWL_MVM_PASN_FLAG_HAS_HLTK = BIT(0), 30 }; 31 32 struct iwl_mvm_ftm_pasn_entry { 33 struct list_head list; 34 u8 addr[ETH_ALEN]; 35 u8 hltk[HLTK_11AZ_LEN]; 36 u8 tk[TK_11AZ_LEN]; 37 u8 cipher; 38 u8 tx_pn[IEEE80211_CCMP_PN_LEN]; 39 u8 rx_pn[IEEE80211_CCMP_PN_LEN]; 40 u32 flags; 41 }; 42 43 struct iwl_mvm_ftm_iter_data { 44 u8 *cipher; 45 u8 *bssid; 46 u8 *tk; 47 }; 48 49 int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 50 u8 *addr, u32 cipher, u8 *tk, u32 tk_len, 51 u8 *hltk, u32 hltk_len) 52 { 53 struct iwl_mvm_ftm_pasn_entry *pasn = kzalloc(sizeof(*pasn), 54 GFP_KERNEL); 55 u32 expected_tk_len; 56 57 lockdep_assert_held(&mvm->mutex); 58 59 if (!pasn) 60 return -ENOBUFS; 61 62 iwl_mvm_ftm_remove_pasn_sta(mvm, addr); 63 64 pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher); 65 66 switch (pasn->cipher) { 67 case IWL_LOCATION_CIPHER_CCMP_128: 68 case IWL_LOCATION_CIPHER_GCMP_128: 69 expected_tk_len = WLAN_KEY_LEN_CCMP; 70 break; 71 case IWL_LOCATION_CIPHER_GCMP_256: 72 expected_tk_len = WLAN_KEY_LEN_GCMP_256; 73 break; 74 default: 75 goto out; 76 } 77 78 /* 79 * If associated to this AP and already have security context, 80 * the TK is already configured for this station, so it 81 * shouldn't be set again here. 82 */ 83 if (vif->cfg.assoc) { 84 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 85 struct ieee80211_bss_conf *link_conf; 86 unsigned int link_id; 87 struct ieee80211_sta *sta; 88 u8 sta_id; 89 90 rcu_read_lock(); 91 for_each_vif_active_link(vif, link_conf, link_id) { 92 if (memcmp(addr, link_conf->bssid, ETH_ALEN)) 93 continue; 94 95 sta_id = mvmvif->link[link_id]->ap_sta_id; 96 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 97 if (!IS_ERR_OR_NULL(sta) && sta->mfp) 98 expected_tk_len = 0; 99 break; 100 } 101 rcu_read_unlock(); 102 } 103 104 if (tk_len != expected_tk_len || 105 (hltk_len && hltk_len != sizeof(pasn->hltk))) { 106 IWL_ERR(mvm, "Invalid key length: tk_len=%u hltk_len=%u\n", 107 tk_len, hltk_len); 108 goto out; 109 } 110 111 if (!expected_tk_len && !hltk_len) { 112 IWL_ERR(mvm, "TK and HLTK not set\n"); 113 goto out; 114 } 115 116 memcpy(pasn->addr, addr, sizeof(pasn->addr)); 117 118 if (hltk_len) { 119 memcpy(pasn->hltk, hltk, sizeof(pasn->hltk)); 120 pasn->flags |= IWL_MVM_PASN_FLAG_HAS_HLTK; 121 } 122 123 if (tk && tk_len) 124 memcpy(pasn->tk, tk, sizeof(pasn->tk)); 125 126 list_add_tail(&pasn->list, &mvm->ftm_initiator.pasn_list); 127 return 0; 128 out: 129 kfree(pasn); 130 return -EINVAL; 131 } 132 133 void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr) 134 { 135 struct iwl_mvm_ftm_pasn_entry *entry, *prev; 136 137 lockdep_assert_held(&mvm->mutex); 138 139 list_for_each_entry_safe(entry, prev, &mvm->ftm_initiator.pasn_list, 140 list) { 141 if (memcmp(entry->addr, addr, sizeof(entry->addr))) 142 continue; 143 144 list_del(&entry->list); 145 kfree(entry); 146 return; 147 } 148 } 149 150 static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm) 151 { 152 struct iwl_mvm_loc_entry *e, *t; 153 154 mvm->ftm_initiator.req = NULL; 155 mvm->ftm_initiator.req_wdev = NULL; 156 memset(mvm->ftm_initiator.responses, 0, 157 sizeof(mvm->ftm_initiator.responses)); 158 159 list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) { 160 list_del(&e->list); 161 kfree(e); 162 } 163 } 164 165 void iwl_mvm_ftm_restart(struct iwl_mvm *mvm) 166 { 167 struct cfg80211_pmsr_result result = { 168 .status = NL80211_PMSR_STATUS_FAILURE, 169 .final = 1, 170 .host_time = ktime_get_boottime_ns(), 171 .type = NL80211_PMSR_TYPE_FTM, 172 }; 173 int i; 174 175 lockdep_assert_held(&mvm->mutex); 176 177 if (!mvm->ftm_initiator.req) 178 return; 179 180 for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) { 181 memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr, 182 ETH_ALEN); 183 result.ftm.burst_index = mvm->ftm_initiator.responses[i]; 184 185 cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, 186 mvm->ftm_initiator.req, 187 &result, GFP_KERNEL); 188 } 189 190 cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev, 191 mvm->ftm_initiator.req, GFP_KERNEL); 192 iwl_mvm_ftm_reset(mvm); 193 } 194 195 void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm) 196 { 197 INIT_LIST_HEAD(&mvm->ftm_initiator.smooth.resp); 198 199 IWL_DEBUG_INFO(mvm, 200 "enable=%u, alpha=%u, age_jiffies=%u, thresh=(%u:%u)\n", 201 IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH, 202 IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA, 203 IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * HZ, 204 IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT, 205 IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT); 206 } 207 208 void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm) 209 { 210 struct iwl_mvm_smooth_entry *se, *st; 211 212 list_for_each_entry_safe(se, st, &mvm->ftm_initiator.smooth.resp, 213 list) { 214 list_del(&se->list); 215 kfree(se); 216 } 217 } 218 219 static int 220 iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s) 221 { 222 switch (s) { 223 case IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS: 224 return 0; 225 case IWL_TOF_RANGE_REQUEST_STATUS_BUSY: 226 return -EBUSY; 227 default: 228 WARN_ON_ONCE(1); 229 return -EIO; 230 } 231 } 232 233 static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 234 struct iwl_tof_range_req_cmd_v5 *cmd, 235 struct cfg80211_pmsr_request *req) 236 { 237 int i; 238 239 cmd->request_id = req->cookie; 240 cmd->num_of_ap = req->n_peers; 241 242 /* use maximum for "no timeout" or bigger than what we can do */ 243 if (!req->timeout || req->timeout > 255 * 100) 244 cmd->req_timeout = 255; 245 else 246 cmd->req_timeout = DIV_ROUND_UP(req->timeout, 100); 247 248 /* 249 * We treat it always as random, since if not we'll 250 * have filled our local address there instead. 251 */ 252 cmd->macaddr_random = 1; 253 memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); 254 for (i = 0; i < ETH_ALEN; i++) 255 cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; 256 257 if (vif->cfg.assoc) 258 memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); 259 else 260 eth_broadcast_addr(cmd->range_req_bssid); 261 } 262 263 static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm, 264 struct ieee80211_vif *vif, 265 struct iwl_tof_range_req_cmd_v9 *cmd, 266 struct cfg80211_pmsr_request *req) 267 { 268 int i; 269 270 cmd->initiator_flags = 271 cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM | 272 IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT); 273 cmd->request_id = req->cookie; 274 cmd->num_of_ap = req->n_peers; 275 276 /* 277 * Use a large value for "no timeout". Don't use the maximum value 278 * because of fw limitations. 279 */ 280 if (req->timeout) 281 cmd->req_timeout_ms = cpu_to_le32(req->timeout); 282 else 283 cmd->req_timeout_ms = cpu_to_le32(0xfffff); 284 285 memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); 286 for (i = 0; i < ETH_ALEN; i++) 287 cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; 288 289 if (vif->cfg.assoc) { 290 memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); 291 292 /* AP's TSF is only relevant if associated */ 293 for (i = 0; i < req->n_peers; i++) { 294 if (req->peers[i].report_ap_tsf) { 295 struct iwl_mvm_vif *mvmvif = 296 iwl_mvm_vif_from_mac80211(vif); 297 298 cmd->tsf_mac_id = cpu_to_le32(mvmvif->id); 299 return; 300 } 301 } 302 } else { 303 eth_broadcast_addr(cmd->range_req_bssid); 304 } 305 306 /* Don't report AP's TSF */ 307 cmd->tsf_mac_id = cpu_to_le32(0xff); 308 } 309 310 static void iwl_mvm_ftm_cmd_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 311 struct iwl_tof_range_req_cmd_v8 *cmd, 312 struct cfg80211_pmsr_request *req) 313 { 314 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)cmd, req); 315 } 316 317 static int 318 iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm, 319 struct cfg80211_pmsr_request_peer *peer, 320 u8 *channel, u8 *bandwidth, 321 u8 *ctrl_ch_position) 322 { 323 u32 freq = peer->chandef.chan->center_freq; 324 325 *channel = ieee80211_frequency_to_channel(freq); 326 327 switch (peer->chandef.width) { 328 case NL80211_CHAN_WIDTH_20_NOHT: 329 *bandwidth = IWL_TOF_BW_20_LEGACY; 330 break; 331 case NL80211_CHAN_WIDTH_20: 332 *bandwidth = IWL_TOF_BW_20_HT; 333 break; 334 case NL80211_CHAN_WIDTH_40: 335 *bandwidth = IWL_TOF_BW_40; 336 break; 337 case NL80211_CHAN_WIDTH_80: 338 *bandwidth = IWL_TOF_BW_80; 339 break; 340 default: 341 IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n", 342 peer->chandef.width); 343 return -EINVAL; 344 } 345 346 *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ? 347 iwl_mvm_get_ctrl_pos(&peer->chandef) : 0; 348 349 return 0; 350 } 351 352 static int 353 iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm, 354 struct cfg80211_pmsr_request_peer *peer, 355 u8 *channel, u8 *format_bw, 356 u8 *ctrl_ch_position) 357 { 358 u32 freq = peer->chandef.chan->center_freq; 359 u8 cmd_ver; 360 361 *channel = ieee80211_frequency_to_channel(freq); 362 363 switch (peer->chandef.width) { 364 case NL80211_CHAN_WIDTH_20_NOHT: 365 *format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY; 366 *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; 367 break; 368 case NL80211_CHAN_WIDTH_20: 369 *format_bw = IWL_LOCATION_FRAME_FORMAT_HT; 370 *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; 371 break; 372 case NL80211_CHAN_WIDTH_40: 373 *format_bw = IWL_LOCATION_FRAME_FORMAT_HT; 374 *format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS; 375 break; 376 case NL80211_CHAN_WIDTH_80: 377 *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT; 378 *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS; 379 break; 380 case NL80211_CHAN_WIDTH_160: 381 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, 382 WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 383 IWL_FW_CMD_VER_UNKNOWN); 384 385 if (cmd_ver >= 13) { 386 *format_bw = IWL_LOCATION_FRAME_FORMAT_HE; 387 *format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS; 388 break; 389 } 390 fallthrough; 391 default: 392 IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n", 393 peer->chandef.width); 394 return -EINVAL; 395 } 396 397 /* non EDCA based measurement must use HE preamble */ 398 if (peer->ftm.trigger_based || peer->ftm.non_trigger_based) 399 *format_bw |= IWL_LOCATION_FRAME_FORMAT_HE; 400 401 *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ? 402 iwl_mvm_get_ctrl_pos(&peer->chandef) : 0; 403 404 return 0; 405 } 406 407 static int 408 iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm, 409 struct cfg80211_pmsr_request_peer *peer, 410 struct iwl_tof_range_req_ap_entry_v2 *target) 411 { 412 int ret; 413 414 ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num, 415 &target->bandwidth, 416 &target->ctrl_ch_position); 417 if (ret) 418 return ret; 419 420 memcpy(target->bssid, peer->addr, ETH_ALEN); 421 target->burst_period = 422 cpu_to_le16(peer->ftm.burst_period); 423 target->samples_per_burst = peer->ftm.ftms_per_burst; 424 target->num_of_bursts = peer->ftm.num_bursts_exp; 425 target->measure_type = 0; /* regular two-sided FTM */ 426 target->retries_per_sample = peer->ftm.ftmr_retries; 427 target->asap_mode = peer->ftm.asap; 428 target->enable_dyn_ack = IWL_MVM_FTM_INITIATOR_DYNACK; 429 430 if (peer->ftm.request_lci) 431 target->location_req |= IWL_TOF_LOC_LCI; 432 if (peer->ftm.request_civicloc) 433 target->location_req |= IWL_TOF_LOC_CIVIC; 434 435 target->algo_type = IWL_MVM_FTM_INITIATOR_ALGO; 436 437 return 0; 438 } 439 440 #define FTM_SET_FLAG(flag) (*flags |= \ 441 cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag)) 442 443 static void 444 iwl_mvm_ftm_set_target_flags(struct iwl_mvm *mvm, 445 struct cfg80211_pmsr_request_peer *peer, 446 __le32 *flags) 447 { 448 *flags = cpu_to_le32(0); 449 450 if (peer->ftm.asap) 451 FTM_SET_FLAG(ASAP); 452 453 if (peer->ftm.request_lci) 454 FTM_SET_FLAG(LCI_REQUEST); 455 456 if (peer->ftm.request_civicloc) 457 FTM_SET_FLAG(CIVIC_REQUEST); 458 459 if (IWL_MVM_FTM_INITIATOR_DYNACK) 460 FTM_SET_FLAG(DYN_ACK); 461 462 if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG) 463 FTM_SET_FLAG(ALGO_LR); 464 else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT) 465 FTM_SET_FLAG(ALGO_FFT); 466 467 if (peer->ftm.trigger_based) 468 FTM_SET_FLAG(TB); 469 else if (peer->ftm.non_trigger_based) 470 FTM_SET_FLAG(NON_TB); 471 472 if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) && 473 peer->ftm.lmr_feedback) 474 FTM_SET_FLAG(LMR_FEEDBACK); 475 } 476 477 static void 478 iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm, 479 struct cfg80211_pmsr_request_peer *peer, 480 struct iwl_tof_range_req_ap_entry_v6 *target) 481 { 482 memcpy(target->bssid, peer->addr, ETH_ALEN); 483 target->burst_period = 484 cpu_to_le16(peer->ftm.burst_period); 485 target->samples_per_burst = peer->ftm.ftms_per_burst; 486 target->num_of_bursts = peer->ftm.num_bursts_exp; 487 target->ftmr_max_retries = peer->ftm.ftmr_retries; 488 iwl_mvm_ftm_set_target_flags(mvm, peer, &target->initiator_ap_flags); 489 } 490 491 static int 492 iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm, 493 struct cfg80211_pmsr_request_peer *peer, 494 struct iwl_tof_range_req_ap_entry_v3 *target) 495 { 496 int ret; 497 498 ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num, 499 &target->bandwidth, 500 &target->ctrl_ch_position); 501 if (ret) 502 return ret; 503 504 /* 505 * Versions 3 and 4 has some common fields, so 506 * iwl_mvm_ftm_put_target_common() can be used for version 7 too. 507 */ 508 iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target); 509 510 return 0; 511 } 512 513 static int 514 iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm, 515 struct cfg80211_pmsr_request_peer *peer, 516 struct iwl_tof_range_req_ap_entry_v4 *target) 517 { 518 int ret; 519 520 ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, 521 &target->format_bw, 522 &target->ctrl_ch_position); 523 if (ret) 524 return ret; 525 526 iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target); 527 528 return 0; 529 } 530 531 static int iwl_mvm_ftm_set_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 532 struct cfg80211_pmsr_request_peer *peer, 533 u8 *sta_id, __le32 *flags) 534 { 535 if (vif->cfg.assoc) { 536 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 537 struct ieee80211_sta *sta; 538 struct ieee80211_bss_conf *link_conf; 539 unsigned int link_id; 540 541 rcu_read_lock(); 542 for_each_vif_active_link(vif, link_conf, link_id) { 543 if (memcmp(peer->addr, link_conf->bssid, ETH_ALEN)) 544 continue; 545 546 *sta_id = mvmvif->link[link_id]->ap_sta_id; 547 sta = rcu_dereference(mvm->fw_id_to_mac_id[*sta_id]); 548 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { 549 rcu_read_unlock(); 550 return PTR_ERR_OR_ZERO(sta); 551 } 552 553 if (sta->mfp && (peer->ftm.trigger_based || 554 peer->ftm.non_trigger_based)) 555 FTM_SET_FLAG(PMF); 556 break; 557 } 558 rcu_read_unlock(); 559 560 #ifdef CONFIG_IWLWIFI_DEBUGFS 561 if (mvmvif->ftm_unprotected) { 562 *sta_id = IWL_MVM_INVALID_STA; 563 *flags &= ~cpu_to_le32(IWL_INITIATOR_AP_FLAGS_PMF); 564 } 565 #endif 566 } else { 567 *sta_id = IWL_MVM_INVALID_STA; 568 } 569 570 return 0; 571 } 572 573 static int 574 iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 575 struct cfg80211_pmsr_request_peer *peer, 576 struct iwl_tof_range_req_ap_entry_v6 *target) 577 { 578 int ret; 579 580 ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, 581 &target->format_bw, 582 &target->ctrl_ch_position); 583 if (ret) 584 return ret; 585 586 iwl_mvm_ftm_put_target_common(mvm, peer, target); 587 588 iwl_mvm_ftm_set_sta(mvm, vif, peer, &target->sta_id, 589 &target->initiator_ap_flags); 590 591 /* 592 * TODO: Beacon interval is currently unknown, so use the common value 593 * of 100 TUs. 594 */ 595 target->beacon_interval = cpu_to_le16(100); 596 return 0; 597 } 598 599 static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd) 600 { 601 u32 status; 602 int err = iwl_mvm_send_cmd_status(mvm, hcmd, &status); 603 604 if (!err && status) { 605 IWL_ERR(mvm, "FTM range request command failure, status: %u\n", 606 status); 607 err = iwl_ftm_range_request_status_to_err(status); 608 } 609 610 return err; 611 } 612 613 static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 614 struct cfg80211_pmsr_request *req) 615 { 616 struct iwl_tof_range_req_cmd_v5 cmd_v5; 617 struct iwl_host_cmd hcmd = { 618 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 619 .dataflags[0] = IWL_HCMD_DFL_DUP, 620 .data[0] = &cmd_v5, 621 .len[0] = sizeof(cmd_v5), 622 }; 623 u8 i; 624 int err; 625 626 iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req); 627 628 for (i = 0; i < cmd_v5.num_of_ap; i++) { 629 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 630 631 err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]); 632 if (err) 633 return err; 634 } 635 636 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 637 } 638 639 static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 640 struct cfg80211_pmsr_request *req) 641 { 642 struct iwl_tof_range_req_cmd_v7 cmd_v7; 643 struct iwl_host_cmd hcmd = { 644 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 645 .dataflags[0] = IWL_HCMD_DFL_DUP, 646 .data[0] = &cmd_v7, 647 .len[0] = sizeof(cmd_v7), 648 }; 649 u8 i; 650 int err; 651 652 /* 653 * Versions 7 and 8 has the same structure except from the responders 654 * list, so iwl_mvm_ftm_cmd() can be used for version 7 too. 655 */ 656 iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd_v7, req); 657 658 for (i = 0; i < cmd_v7.num_of_ap; i++) { 659 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 660 661 err = iwl_mvm_ftm_put_target_v3(mvm, peer, &cmd_v7.ap[i]); 662 if (err) 663 return err; 664 } 665 666 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 667 } 668 669 static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 670 struct cfg80211_pmsr_request *req) 671 { 672 struct iwl_tof_range_req_cmd_v8 cmd; 673 struct iwl_host_cmd hcmd = { 674 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 675 .dataflags[0] = IWL_HCMD_DFL_DUP, 676 .data[0] = &cmd, 677 .len[0] = sizeof(cmd), 678 }; 679 u8 i; 680 int err; 681 682 iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd, req); 683 684 for (i = 0; i < cmd.num_of_ap; i++) { 685 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 686 687 err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]); 688 if (err) 689 return err; 690 } 691 692 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 693 } 694 695 static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 696 struct cfg80211_pmsr_request *req) 697 { 698 struct iwl_tof_range_req_cmd_v9 cmd; 699 struct iwl_host_cmd hcmd = { 700 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 701 .dataflags[0] = IWL_HCMD_DFL_DUP, 702 .data[0] = &cmd, 703 .len[0] = sizeof(cmd), 704 }; 705 u8 i; 706 int err; 707 708 iwl_mvm_ftm_cmd_common(mvm, vif, &cmd, req); 709 710 for (i = 0; i < cmd.num_of_ap; i++) { 711 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 712 struct iwl_tof_range_req_ap_entry_v6 *target = &cmd.ap[i]; 713 714 err = iwl_mvm_ftm_put_target(mvm, vif, peer, target); 715 if (err) 716 return err; 717 } 718 719 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 720 } 721 722 static void iter(struct ieee80211_hw *hw, 723 struct ieee80211_vif *vif, 724 struct ieee80211_sta *sta, 725 struct ieee80211_key_conf *key, 726 void *data) 727 { 728 struct iwl_mvm_ftm_iter_data *target = data; 729 730 if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN)) 731 return; 732 733 WARN_ON(!sta->mfp); 734 735 if (WARN_ON(key->keylen > sizeof(target->tk))) 736 return; 737 738 memcpy(target->tk, key->key, key->keylen); 739 *target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher); 740 WARN_ON(*target->cipher == IWL_LOCATION_CIPHER_INVALID); 741 } 742 743 static void 744 iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 745 u8 *bssid, u8 *cipher, u8 *hltk, u8 *tk, 746 u8 *rx_pn, u8 *tx_pn, __le32 *flags) 747 { 748 struct iwl_mvm_ftm_pasn_entry *entry; 749 #ifdef CONFIG_IWLWIFI_DEBUGFS 750 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 751 752 if (mvmvif->ftm_unprotected) 753 return; 754 #endif 755 756 if (!(le32_to_cpu(*flags) & (IWL_INITIATOR_AP_FLAGS_NON_TB | 757 IWL_INITIATOR_AP_FLAGS_TB))) 758 return; 759 760 lockdep_assert_held(&mvm->mutex); 761 762 list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) { 763 if (memcmp(entry->addr, bssid, sizeof(entry->addr))) 764 continue; 765 766 *cipher = entry->cipher; 767 768 if (entry->flags & IWL_MVM_PASN_FLAG_HAS_HLTK) 769 memcpy(hltk, entry->hltk, sizeof(entry->hltk)); 770 else 771 memset(hltk, 0, sizeof(entry->hltk)); 772 773 if (vif->cfg.assoc && 774 !memcmp(vif->bss_conf.bssid, bssid, ETH_ALEN)) { 775 struct iwl_mvm_ftm_iter_data target; 776 777 target.cipher = cipher; 778 target.bssid = bssid; 779 target.tk = tk; 780 ieee80211_iter_keys(mvm->hw, vif, iter, &target); 781 } else { 782 memcpy(tk, entry->tk, sizeof(entry->tk)); 783 } 784 785 memcpy(rx_pn, entry->rx_pn, sizeof(entry->rx_pn)); 786 memcpy(tx_pn, entry->tx_pn, sizeof(entry->tx_pn)); 787 788 FTM_SET_FLAG(SECURED); 789 return; 790 } 791 } 792 793 static int 794 iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 795 struct cfg80211_pmsr_request_peer *peer, 796 struct iwl_tof_range_req_ap_entry_v7 *target) 797 { 798 int err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target); 799 if (err) 800 return err; 801 802 iwl_mvm_ftm_set_secured_ranging(mvm, vif, target->bssid, 803 &target->cipher, target->hltk, 804 target->tk, target->rx_pn, 805 target->tx_pn, 806 &target->initiator_ap_flags); 807 return err; 808 } 809 810 static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm, 811 struct ieee80211_vif *vif, 812 struct cfg80211_pmsr_request *req) 813 { 814 struct iwl_tof_range_req_cmd_v11 cmd; 815 struct iwl_host_cmd hcmd = { 816 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 817 .dataflags[0] = IWL_HCMD_DFL_DUP, 818 .data[0] = &cmd, 819 .len[0] = sizeof(cmd), 820 }; 821 u8 i; 822 int err; 823 824 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 825 826 for (i = 0; i < cmd.num_of_ap; i++) { 827 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 828 struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i]; 829 830 err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, target); 831 if (err) 832 return err; 833 } 834 835 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 836 } 837 838 static void 839 iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm, 840 struct iwl_tof_range_req_ap_entry_v8 *target) 841 { 842 /* Only 2 STS are supported on Tx */ 843 u32 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 : 844 IWL_MVM_FTM_I2R_MAX_STS; 845 846 target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP | 847 (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS); 848 target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP | 849 (i2r_max_sts << IWL_LOCATION_MAX_STS_POS); 850 target->r2i_max_total_ltf = IWL_MVM_FTM_R2I_MAX_TOTAL_LTF; 851 target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF; 852 } 853 854 static int 855 iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 856 struct cfg80211_pmsr_request_peer *peer, 857 struct iwl_tof_range_req_ap_entry_v8 *target) 858 { 859 u32 flags; 860 int ret = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target); 861 862 if (ret) 863 return ret; 864 865 iwl_mvm_ftm_set_ndp_params(mvm, target); 866 867 /* 868 * If secure LTF is turned off, replace the flag with PMF only 869 */ 870 flags = le32_to_cpu(target->initiator_ap_flags); 871 if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) { 872 if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF) 873 flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; 874 875 flags |= IWL_INITIATOR_AP_FLAGS_PMF; 876 target->initiator_ap_flags = cpu_to_le32(flags); 877 } 878 879 return 0; 880 } 881 882 static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm, 883 struct ieee80211_vif *vif, 884 struct cfg80211_pmsr_request *req) 885 { 886 struct iwl_tof_range_req_cmd_v12 cmd; 887 struct iwl_host_cmd hcmd = { 888 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 889 .dataflags[0] = IWL_HCMD_DFL_DUP, 890 .data[0] = &cmd, 891 .len[0] = sizeof(cmd), 892 }; 893 u8 i; 894 int err; 895 896 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 897 898 for (i = 0; i < cmd.num_of_ap; i++) { 899 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 900 struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i]; 901 902 err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, target); 903 if (err) 904 return err; 905 } 906 907 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 908 } 909 910 static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm, 911 struct ieee80211_vif *vif, 912 struct cfg80211_pmsr_request *req) 913 { 914 struct iwl_tof_range_req_cmd_v13 cmd; 915 struct iwl_host_cmd hcmd = { 916 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 917 .dataflags[0] = IWL_HCMD_DFL_DUP, 918 .data[0] = &cmd, 919 .len[0] = sizeof(cmd), 920 }; 921 u8 i; 922 int err; 923 924 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 925 926 for (i = 0; i < cmd.num_of_ap; i++) { 927 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 928 struct iwl_tof_range_req_ap_entry_v9 *target = &cmd.ap[i]; 929 930 err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, (void *)target); 931 if (err) 932 return err; 933 934 if (peer->ftm.trigger_based || peer->ftm.non_trigger_based) 935 target->bss_color = peer->ftm.bss_color; 936 937 if (peer->ftm.non_trigger_based) { 938 target->min_time_between_msr = 939 cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); 940 target->burst_period = 941 cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); 942 } else { 943 target->min_time_between_msr = cpu_to_le16(0); 944 } 945 946 target->band = 947 iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band); 948 } 949 950 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 951 } 952 953 static int 954 iwl_mvm_ftm_put_target_v10(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 955 struct cfg80211_pmsr_request_peer *peer, 956 struct iwl_tof_range_req_ap_entry_v10 *target) 957 { 958 u32 i2r_max_sts, flags; 959 int ret; 960 961 ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, 962 &target->format_bw, 963 &target->ctrl_ch_position); 964 if (ret) 965 return ret; 966 967 memcpy(target->bssid, peer->addr, ETH_ALEN); 968 target->burst_period = 969 cpu_to_le16(peer->ftm.burst_period); 970 target->samples_per_burst = peer->ftm.ftms_per_burst; 971 target->num_of_bursts = peer->ftm.num_bursts_exp; 972 iwl_mvm_ftm_set_target_flags(mvm, peer, &target->initiator_ap_flags); 973 iwl_mvm_ftm_set_sta(mvm, vif, peer, &target->sta_id, 974 &target->initiator_ap_flags); 975 iwl_mvm_ftm_set_secured_ranging(mvm, vif, target->bssid, 976 &target->cipher, target->hltk, 977 target->tk, target->rx_pn, 978 target->tx_pn, 979 &target->initiator_ap_flags); 980 981 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 : 982 IWL_MVM_FTM_I2R_MAX_STS; 983 984 target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP | 985 (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS) | 986 (IWL_MVM_FTM_R2I_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS); 987 target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP | 988 (i2r_max_sts << IWL_LOCATION_MAX_STS_POS) | 989 (IWL_MVM_FTM_I2R_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS); 990 991 if (peer->ftm.non_trigger_based) { 992 target->min_time_between_msr = 993 cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); 994 target->burst_period = 995 cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); 996 } else { 997 target->min_time_between_msr = cpu_to_le16(0); 998 } 999 1000 target->band = 1001 iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band); 1002 1003 /* 1004 * TODO: Beacon interval is currently unknown, so use the common value 1005 * of 100 TUs. 1006 */ 1007 target->beacon_interval = cpu_to_le16(100); 1008 1009 /* 1010 * If secure LTF is turned off, replace the flag with PMF only 1011 */ 1012 flags = le32_to_cpu(target->initiator_ap_flags); 1013 if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) { 1014 if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF) 1015 flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; 1016 1017 flags |= IWL_INITIATOR_AP_FLAGS_PMF; 1018 target->initiator_ap_flags = cpu_to_le32(flags); 1019 } 1020 1021 return 0; 1022 } 1023 1024 static int iwl_mvm_ftm_start_v14(struct iwl_mvm *mvm, 1025 struct ieee80211_vif *vif, 1026 struct cfg80211_pmsr_request *req) 1027 { 1028 struct iwl_tof_range_req_cmd_v14 cmd; 1029 struct iwl_host_cmd hcmd = { 1030 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 1031 .dataflags[0] = IWL_HCMD_DFL_DUP, 1032 .data[0] = &cmd, 1033 .len[0] = sizeof(cmd), 1034 }; 1035 u8 i; 1036 int err; 1037 1038 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 1039 1040 for (i = 0; i < cmd.num_of_ap; i++) { 1041 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 1042 struct iwl_tof_range_req_ap_entry_v10 *target = &cmd.ap[i]; 1043 1044 err = iwl_mvm_ftm_put_target_v10(mvm, vif, peer, target); 1045 if (err) 1046 return err; 1047 } 1048 1049 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 1050 } 1051 1052 int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1053 struct cfg80211_pmsr_request *req) 1054 { 1055 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 1056 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); 1057 int err; 1058 1059 lockdep_assert_held(&mvm->mutex); 1060 1061 if (mvm->ftm_initiator.req) 1062 return -EBUSY; 1063 1064 if (new_api) { 1065 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, 1066 WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 1067 IWL_FW_CMD_VER_UNKNOWN); 1068 1069 switch (cmd_ver) { 1070 case 14: 1071 err = iwl_mvm_ftm_start_v14(mvm, vif, req); 1072 break; 1073 case 13: 1074 err = iwl_mvm_ftm_start_v13(mvm, vif, req); 1075 break; 1076 case 12: 1077 err = iwl_mvm_ftm_start_v12(mvm, vif, req); 1078 break; 1079 case 11: 1080 err = iwl_mvm_ftm_start_v11(mvm, vif, req); 1081 break; 1082 case 9: 1083 case 10: 1084 err = iwl_mvm_ftm_start_v9(mvm, vif, req); 1085 break; 1086 case 8: 1087 err = iwl_mvm_ftm_start_v8(mvm, vif, req); 1088 break; 1089 default: 1090 err = iwl_mvm_ftm_start_v7(mvm, vif, req); 1091 break; 1092 } 1093 } else { 1094 err = iwl_mvm_ftm_start_v5(mvm, vif, req); 1095 } 1096 1097 if (!err) { 1098 mvm->ftm_initiator.req = req; 1099 mvm->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif); 1100 } 1101 1102 return err; 1103 } 1104 1105 void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req) 1106 { 1107 struct iwl_tof_range_abort_cmd cmd = { 1108 .request_id = req->cookie, 1109 }; 1110 1111 lockdep_assert_held(&mvm->mutex); 1112 1113 if (req != mvm->ftm_initiator.req) 1114 return; 1115 1116 iwl_mvm_ftm_reset(mvm); 1117 1118 if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD), 1119 0, sizeof(cmd), &cmd)) 1120 IWL_ERR(mvm, "failed to abort FTM process\n"); 1121 } 1122 1123 static int iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req, 1124 const u8 *addr) 1125 { 1126 int i; 1127 1128 for (i = 0; i < req->n_peers; i++) { 1129 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 1130 1131 if (ether_addr_equal_unaligned(peer->addr, addr)) 1132 return i; 1133 } 1134 1135 return -ENOENT; 1136 } 1137 1138 static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts) 1139 { 1140 u32 gp2_ts = le32_to_cpu(fw_gp2_ts); 1141 u32 curr_gp2, diff; 1142 u64 now_from_boot_ns; 1143 1144 iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2, 1145 &now_from_boot_ns, NULL); 1146 1147 if (curr_gp2 >= gp2_ts) 1148 diff = curr_gp2 - gp2_ts; 1149 else 1150 diff = curr_gp2 + (U32_MAX - gp2_ts + 1); 1151 1152 return now_from_boot_ns - (u64)diff * 1000; 1153 } 1154 1155 static void iwl_mvm_ftm_get_lci_civic(struct iwl_mvm *mvm, 1156 struct cfg80211_pmsr_result *res) 1157 { 1158 struct iwl_mvm_loc_entry *entry; 1159 1160 list_for_each_entry(entry, &mvm->ftm_initiator.loc_list, list) { 1161 if (!ether_addr_equal_unaligned(res->addr, entry->addr)) 1162 continue; 1163 1164 if (entry->lci_len) { 1165 res->ftm.lci_len = entry->lci_len; 1166 res->ftm.lci = entry->buf; 1167 } 1168 1169 if (entry->civic_len) { 1170 res->ftm.civicloc_len = entry->civic_len; 1171 res->ftm.civicloc = entry->buf + entry->lci_len; 1172 } 1173 1174 /* we found the entry we needed */ 1175 break; 1176 } 1177 } 1178 1179 static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id, 1180 u8 num_of_aps) 1181 { 1182 lockdep_assert_held(&mvm->mutex); 1183 1184 if (request_id != (u8)mvm->ftm_initiator.req->cookie) { 1185 IWL_ERR(mvm, "Request ID mismatch, got %u, active %u\n", 1186 request_id, (u8)mvm->ftm_initiator.req->cookie); 1187 return -EINVAL; 1188 } 1189 1190 if (num_of_aps > mvm->ftm_initiator.req->n_peers) { 1191 IWL_ERR(mvm, "FTM range response invalid\n"); 1192 return -EINVAL; 1193 } 1194 1195 return 0; 1196 } 1197 1198 static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm, 1199 struct cfg80211_pmsr_result *res) 1200 { 1201 struct iwl_mvm_smooth_entry *resp = NULL, *iter; 1202 s64 rtt_avg, rtt = res->ftm.rtt_avg; 1203 u32 undershoot, overshoot; 1204 u8 alpha; 1205 1206 if (!IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH) 1207 return; 1208 1209 WARN_ON(rtt < 0); 1210 1211 if (res->status != NL80211_PMSR_STATUS_SUCCESS) { 1212 IWL_DEBUG_INFO(mvm, 1213 ": %pM: ignore failed measurement. Status=%u\n", 1214 res->addr, res->status); 1215 return; 1216 } 1217 1218 list_for_each_entry(iter, &mvm->ftm_initiator.smooth.resp, list) { 1219 if (!memcmp(res->addr, iter->addr, ETH_ALEN)) { 1220 resp = iter; 1221 break; 1222 } 1223 } 1224 1225 if (!resp) { 1226 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 1227 if (!resp) 1228 return; 1229 1230 memcpy(resp->addr, res->addr, ETH_ALEN); 1231 list_add_tail(&resp->list, &mvm->ftm_initiator.smooth.resp); 1232 1233 resp->rtt_avg = rtt; 1234 1235 IWL_DEBUG_INFO(mvm, "new: %pM: rtt_avg=%lld\n", 1236 resp->addr, resp->rtt_avg); 1237 goto update_time; 1238 } 1239 1240 if (res->host_time - resp->host_time > 1241 IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * 1000000000) { 1242 resp->rtt_avg = rtt; 1243 1244 IWL_DEBUG_INFO(mvm, "expired: %pM: rtt_avg=%lld\n", 1245 resp->addr, resp->rtt_avg); 1246 goto update_time; 1247 } 1248 1249 /* Smooth the results based on the tracked RTT average */ 1250 undershoot = IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT; 1251 overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT; 1252 alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA; 1253 1254 rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100); 1255 1256 IWL_DEBUG_INFO(mvm, 1257 "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n", 1258 resp->addr, resp->rtt_avg, rtt_avg, rtt); 1259 1260 /* 1261 * update the responder's average RTT results regardless of 1262 * the under/over shoot logic below 1263 */ 1264 resp->rtt_avg = rtt_avg; 1265 1266 /* smooth the results */ 1267 if (rtt_avg > rtt && (rtt_avg - rtt) > undershoot) { 1268 res->ftm.rtt_avg = rtt_avg; 1269 1270 IWL_DEBUG_INFO(mvm, 1271 "undershoot: val=%lld\n", 1272 (rtt_avg - rtt)); 1273 } else if (rtt_avg < rtt && (rtt - rtt_avg) > 1274 overshoot) { 1275 res->ftm.rtt_avg = rtt_avg; 1276 IWL_DEBUG_INFO(mvm, 1277 "overshoot: val=%lld\n", 1278 (rtt - rtt_avg)); 1279 } 1280 1281 update_time: 1282 resp->host_time = res->host_time; 1283 } 1284 1285 static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index, 1286 struct cfg80211_pmsr_result *res) 1287 { 1288 s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666); 1289 1290 IWL_DEBUG_INFO(mvm, "entry %d\n", index); 1291 IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status); 1292 IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr); 1293 IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time); 1294 IWL_DEBUG_INFO(mvm, "\tburst index: %d\n", res->ftm.burst_index); 1295 IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes); 1296 IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg); 1297 IWL_DEBUG_INFO(mvm, "\trssi spread: %d\n", res->ftm.rssi_spread); 1298 IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg); 1299 IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance); 1300 IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread); 1301 IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg); 1302 } 1303 1304 static void 1305 iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm, 1306 struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap) 1307 { 1308 struct iwl_mvm_ftm_pasn_entry *entry; 1309 1310 lockdep_assert_held(&mvm->mutex); 1311 1312 list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) { 1313 if (memcmp(fw_ap->bssid, entry->addr, sizeof(entry->addr))) 1314 continue; 1315 1316 memcpy(entry->rx_pn, fw_ap->rx_pn, sizeof(entry->rx_pn)); 1317 memcpy(entry->tx_pn, fw_ap->tx_pn, sizeof(entry->tx_pn)); 1318 return; 1319 } 1320 } 1321 1322 static u8 iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm *mvm) 1323 { 1324 if (!fw_has_api(&mvm->fw->ucode_capa, 1325 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ)) 1326 return 5; 1327 1328 /* Starting from version 8, the FW advertises the version */ 1329 if (mvm->cmd_ver.range_resp >= 8) 1330 return mvm->cmd_ver.range_resp; 1331 else if (fw_has_api(&mvm->fw->ucode_capa, 1332 IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) 1333 return 7; 1334 1335 /* The first version of the new range request API */ 1336 return 6; 1337 } 1338 1339 static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len) 1340 { 1341 switch (ver) { 1342 case 9: 1343 case 8: 1344 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v8); 1345 case 7: 1346 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v7); 1347 case 6: 1348 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v6); 1349 case 5: 1350 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v5); 1351 default: 1352 WARN_ONCE(1, "FTM: unsupported range response version %u", ver); 1353 return false; 1354 } 1355 } 1356 1357 void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 1358 { 1359 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1360 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); 1361 struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data; 1362 struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data; 1363 struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data; 1364 struct iwl_tof_range_rsp_ntfy_v8 *fw_resp_v8 = (void *)pkt->data; 1365 int i; 1366 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 1367 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); 1368 u8 num_of_aps, last_in_batch; 1369 u8 notif_ver = iwl_mvm_ftm_get_range_resp_ver(mvm); 1370 1371 lockdep_assert_held(&mvm->mutex); 1372 1373 if (!mvm->ftm_initiator.req) { 1374 return; 1375 } 1376 1377 if (unlikely(!iwl_mvm_ftm_resp_size_validation(notif_ver, pkt_len))) 1378 return; 1379 1380 if (new_api) { 1381 if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v8->request_id, 1382 fw_resp_v8->num_of_aps)) 1383 return; 1384 1385 num_of_aps = fw_resp_v8->num_of_aps; 1386 last_in_batch = fw_resp_v8->last_report; 1387 } else { 1388 if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id, 1389 fw_resp_v5->num_of_aps)) 1390 return; 1391 1392 num_of_aps = fw_resp_v5->num_of_aps; 1393 last_in_batch = fw_resp_v5->last_in_batch; 1394 } 1395 1396 IWL_DEBUG_INFO(mvm, "Range response received\n"); 1397 IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %u\n", 1398 mvm->ftm_initiator.req->cookie, num_of_aps); 1399 1400 for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) { 1401 struct cfg80211_pmsr_result result = {}; 1402 struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap; 1403 int peer_idx; 1404 1405 if (new_api) { 1406 if (notif_ver >= 8) { 1407 fw_ap = &fw_resp_v8->ap[i]; 1408 iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap); 1409 } else if (notif_ver == 7) { 1410 fw_ap = (void *)&fw_resp_v7->ap[i]; 1411 } else { 1412 fw_ap = (void *)&fw_resp_v6->ap[i]; 1413 } 1414 1415 result.final = fw_ap->last_burst; 1416 result.ap_tsf = le32_to_cpu(fw_ap->start_tsf); 1417 result.ap_tsf_valid = 1; 1418 } else { 1419 /* the first part is the same for old and new APIs */ 1420 fw_ap = (void *)&fw_resp_v5->ap[i]; 1421 /* 1422 * FIXME: the firmware needs to report this, we don't 1423 * even know the number of bursts the responder picked 1424 * (if we asked it to) 1425 */ 1426 result.final = 0; 1427 } 1428 1429 peer_idx = iwl_mvm_ftm_find_peer(mvm->ftm_initiator.req, 1430 fw_ap->bssid); 1431 if (peer_idx < 0) { 1432 IWL_WARN(mvm, 1433 "Unknown address (%pM, target #%d) in FTM response\n", 1434 fw_ap->bssid, i); 1435 continue; 1436 } 1437 1438 switch (fw_ap->measure_status) { 1439 case IWL_TOF_ENTRY_SUCCESS: 1440 result.status = NL80211_PMSR_STATUS_SUCCESS; 1441 break; 1442 case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT: 1443 result.status = NL80211_PMSR_STATUS_TIMEOUT; 1444 break; 1445 case IWL_TOF_ENTRY_NO_RESPONSE: 1446 result.status = NL80211_PMSR_STATUS_FAILURE; 1447 result.ftm.failure_reason = 1448 NL80211_PMSR_FTM_FAILURE_NO_RESPONSE; 1449 break; 1450 case IWL_TOF_ENTRY_REQUEST_REJECTED: 1451 result.status = NL80211_PMSR_STATUS_FAILURE; 1452 result.ftm.failure_reason = 1453 NL80211_PMSR_FTM_FAILURE_PEER_BUSY; 1454 result.ftm.busy_retry_time = fw_ap->refusal_period; 1455 break; 1456 default: 1457 result.status = NL80211_PMSR_STATUS_FAILURE; 1458 result.ftm.failure_reason = 1459 NL80211_PMSR_FTM_FAILURE_UNSPECIFIED; 1460 break; 1461 } 1462 memcpy(result.addr, fw_ap->bssid, ETH_ALEN); 1463 result.host_time = iwl_mvm_ftm_get_host_time(mvm, 1464 fw_ap->timestamp); 1465 result.type = NL80211_PMSR_TYPE_FTM; 1466 result.ftm.burst_index = mvm->ftm_initiator.responses[peer_idx]; 1467 mvm->ftm_initiator.responses[peer_idx]++; 1468 result.ftm.rssi_avg = fw_ap->rssi; 1469 result.ftm.rssi_avg_valid = 1; 1470 result.ftm.rssi_spread = fw_ap->rssi_spread; 1471 result.ftm.rssi_spread_valid = 1; 1472 result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt); 1473 result.ftm.rtt_avg_valid = 1; 1474 result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance); 1475 result.ftm.rtt_variance_valid = 1; 1476 result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread); 1477 result.ftm.rtt_spread_valid = 1; 1478 1479 iwl_mvm_ftm_get_lci_civic(mvm, &result); 1480 1481 iwl_mvm_ftm_rtt_smoothing(mvm, &result); 1482 1483 cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, 1484 mvm->ftm_initiator.req, 1485 &result, GFP_KERNEL); 1486 1487 if (fw_has_api(&mvm->fw->ucode_capa, 1488 IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) 1489 IWL_DEBUG_INFO(mvm, "RTT confidence: %u\n", 1490 fw_ap->rttConfidence); 1491 1492 iwl_mvm_debug_range_resp(mvm, i, &result); 1493 } 1494 1495 if (last_in_batch) { 1496 cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev, 1497 mvm->ftm_initiator.req, 1498 GFP_KERNEL); 1499 iwl_mvm_ftm_reset(mvm); 1500 } 1501 } 1502 1503 void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 1504 { 1505 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1506 const struct ieee80211_mgmt *mgmt = (void *)pkt->data; 1507 size_t len = iwl_rx_packet_payload_len(pkt); 1508 struct iwl_mvm_loc_entry *entry; 1509 const u8 *ies, *lci, *civic, *msr_ie; 1510 size_t ies_len, lci_len = 0, civic_len = 0; 1511 size_t baselen = IEEE80211_MIN_ACTION_SIZE + 1512 sizeof(mgmt->u.action.u.ftm); 1513 static const u8 rprt_type_lci = IEEE80211_SPCT_MSR_RPRT_TYPE_LCI; 1514 static const u8 rprt_type_civic = IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC; 1515 1516 if (len <= baselen) 1517 return; 1518 1519 lockdep_assert_held(&mvm->mutex); 1520 1521 ies = mgmt->u.action.u.ftm.variable; 1522 ies_len = len - baselen; 1523 1524 msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len, 1525 &rprt_type_lci, 1, 4); 1526 if (msr_ie) { 1527 lci = msr_ie + 2; 1528 lci_len = msr_ie[1]; 1529 } 1530 1531 msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len, 1532 &rprt_type_civic, 1, 4); 1533 if (msr_ie) { 1534 civic = msr_ie + 2; 1535 civic_len = msr_ie[1]; 1536 } 1537 1538 entry = kmalloc(sizeof(*entry) + lci_len + civic_len, GFP_KERNEL); 1539 if (!entry) 1540 return; 1541 1542 memcpy(entry->addr, mgmt->bssid, ETH_ALEN); 1543 1544 entry->lci_len = lci_len; 1545 if (lci_len) 1546 memcpy(entry->buf, lci, lci_len); 1547 1548 entry->civic_len = civic_len; 1549 if (civic_len) 1550 memcpy(entry->buf + lci_len, civic, civic_len); 1551 1552 list_add_tail(&entry->list, &mvm->ftm_initiator.loc_list); 1553 } 1554