1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2015-2017 Intel Deutschland GmbH 4 * Copyright (C) 2018-2024 Intel Corporation 5 */ 6 #include <linux/etherdevice.h> 7 #include <linux/math64.h> 8 #include <net/cfg80211.h> 9 #include "mvm.h" 10 #include "iwl-io.h" 11 #include "iwl-prph.h" 12 #include "constants.h" 13 14 struct iwl_mvm_loc_entry { 15 struct list_head list; 16 u8 addr[ETH_ALEN]; 17 u8 lci_len, civic_len; 18 u8 buf[]; 19 }; 20 21 struct iwl_mvm_smooth_entry { 22 struct list_head list; 23 u8 addr[ETH_ALEN]; 24 s64 rtt_avg; 25 u64 host_time; 26 }; 27 28 enum iwl_mvm_pasn_flags { 29 IWL_MVM_PASN_FLAG_HAS_HLTK = BIT(0), 30 }; 31 32 struct iwl_mvm_ftm_pasn_entry { 33 struct list_head list; 34 u8 addr[ETH_ALEN]; 35 u8 hltk[HLTK_11AZ_LEN]; 36 u8 tk[TK_11AZ_LEN]; 37 u8 cipher; 38 u8 tx_pn[IEEE80211_CCMP_PN_LEN]; 39 u8 rx_pn[IEEE80211_CCMP_PN_LEN]; 40 u32 flags; 41 }; 42 43 int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 44 u8 *addr, u32 cipher, u8 *tk, u32 tk_len, 45 u8 *hltk, u32 hltk_len) 46 { 47 struct iwl_mvm_ftm_pasn_entry *pasn = kzalloc(sizeof(*pasn), 48 GFP_KERNEL); 49 u32 expected_tk_len; 50 51 lockdep_assert_held(&mvm->mutex); 52 53 if (!pasn) 54 return -ENOBUFS; 55 56 iwl_mvm_ftm_remove_pasn_sta(mvm, addr); 57 58 pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher); 59 60 switch (pasn->cipher) { 61 case IWL_LOCATION_CIPHER_CCMP_128: 62 case IWL_LOCATION_CIPHER_GCMP_128: 63 expected_tk_len = WLAN_KEY_LEN_CCMP; 64 break; 65 case IWL_LOCATION_CIPHER_GCMP_256: 66 expected_tk_len = WLAN_KEY_LEN_GCMP_256; 67 break; 68 default: 69 goto out; 70 } 71 72 /* 73 * If associated to this AP and already have security context, 74 * the TK is already configured for this station, so it 75 * shouldn't be set again here. 76 */ 77 if (vif->cfg.assoc) { 78 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 79 struct ieee80211_bss_conf *link_conf; 80 unsigned int link_id; 81 struct ieee80211_sta *sta; 82 u8 sta_id; 83 84 rcu_read_lock(); 85 for_each_vif_active_link(vif, link_conf, link_id) { 86 if (memcmp(addr, link_conf->bssid, ETH_ALEN)) 87 continue; 88 89 sta_id = mvmvif->link[link_id]->ap_sta_id; 90 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 91 if (!IS_ERR_OR_NULL(sta) && sta->mfp) 92 expected_tk_len = 0; 93 break; 94 } 95 rcu_read_unlock(); 96 } 97 98 if (tk_len != expected_tk_len || 99 (hltk_len && hltk_len != sizeof(pasn->hltk))) { 100 IWL_ERR(mvm, "Invalid key length: tk_len=%u hltk_len=%u\n", 101 tk_len, hltk_len); 102 goto out; 103 } 104 105 if (!expected_tk_len && !hltk_len) { 106 IWL_ERR(mvm, "TK and HLTK not set\n"); 107 goto out; 108 } 109 110 memcpy(pasn->addr, addr, sizeof(pasn->addr)); 111 112 if (hltk_len) { 113 memcpy(pasn->hltk, hltk, sizeof(pasn->hltk)); 114 pasn->flags |= IWL_MVM_PASN_FLAG_HAS_HLTK; 115 } 116 117 if (tk && tk_len) 118 memcpy(pasn->tk, tk, sizeof(pasn->tk)); 119 120 list_add_tail(&pasn->list, &mvm->ftm_initiator.pasn_list); 121 return 0; 122 out: 123 kfree(pasn); 124 return -EINVAL; 125 } 126 127 void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr) 128 { 129 struct iwl_mvm_ftm_pasn_entry *entry, *prev; 130 131 lockdep_assert_held(&mvm->mutex); 132 133 list_for_each_entry_safe(entry, prev, &mvm->ftm_initiator.pasn_list, 134 list) { 135 if (memcmp(entry->addr, addr, sizeof(entry->addr))) 136 continue; 137 138 list_del(&entry->list); 139 kfree(entry); 140 return; 141 } 142 } 143 144 static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm) 145 { 146 struct iwl_mvm_loc_entry *e, *t; 147 148 mvm->ftm_initiator.req = NULL; 149 mvm->ftm_initiator.req_wdev = NULL; 150 memset(mvm->ftm_initiator.responses, 0, 151 sizeof(mvm->ftm_initiator.responses)); 152 153 list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) { 154 list_del(&e->list); 155 kfree(e); 156 } 157 } 158 159 void iwl_mvm_ftm_restart(struct iwl_mvm *mvm) 160 { 161 struct cfg80211_pmsr_result result = { 162 .status = NL80211_PMSR_STATUS_FAILURE, 163 .final = 1, 164 .host_time = ktime_get_boottime_ns(), 165 .type = NL80211_PMSR_TYPE_FTM, 166 }; 167 int i; 168 169 lockdep_assert_held(&mvm->mutex); 170 171 if (!mvm->ftm_initiator.req) 172 return; 173 174 for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) { 175 memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr, 176 ETH_ALEN); 177 result.ftm.burst_index = mvm->ftm_initiator.responses[i]; 178 179 cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, 180 mvm->ftm_initiator.req, 181 &result, GFP_KERNEL); 182 } 183 184 cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev, 185 mvm->ftm_initiator.req, GFP_KERNEL); 186 iwl_mvm_ftm_reset(mvm); 187 } 188 189 void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm) 190 { 191 INIT_LIST_HEAD(&mvm->ftm_initiator.smooth.resp); 192 193 IWL_DEBUG_INFO(mvm, 194 "enable=%u, alpha=%u, age_jiffies=%u, thresh=(%u:%u)\n", 195 IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH, 196 IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA, 197 IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * HZ, 198 IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT, 199 IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT); 200 } 201 202 void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm) 203 { 204 struct iwl_mvm_smooth_entry *se, *st; 205 206 list_for_each_entry_safe(se, st, &mvm->ftm_initiator.smooth.resp, 207 list) { 208 list_del(&se->list); 209 kfree(se); 210 } 211 } 212 213 static int 214 iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s) 215 { 216 switch (s) { 217 case IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS: 218 return 0; 219 case IWL_TOF_RANGE_REQUEST_STATUS_BUSY: 220 return -EBUSY; 221 default: 222 WARN_ON_ONCE(1); 223 return -EIO; 224 } 225 } 226 227 static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 228 struct iwl_tof_range_req_cmd_v5 *cmd, 229 struct cfg80211_pmsr_request *req) 230 { 231 int i; 232 233 cmd->request_id = req->cookie; 234 cmd->num_of_ap = req->n_peers; 235 236 /* use maximum for "no timeout" or bigger than what we can do */ 237 if (!req->timeout || req->timeout > 255 * 100) 238 cmd->req_timeout = 255; 239 else 240 cmd->req_timeout = DIV_ROUND_UP(req->timeout, 100); 241 242 /* 243 * We treat it always as random, since if not we'll 244 * have filled our local address there instead. 245 */ 246 cmd->macaddr_random = 1; 247 memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); 248 for (i = 0; i < ETH_ALEN; i++) 249 cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; 250 251 if (vif->cfg.assoc) 252 memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); 253 else 254 eth_broadcast_addr(cmd->range_req_bssid); 255 } 256 257 static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm, 258 struct ieee80211_vif *vif, 259 struct iwl_tof_range_req_cmd_v9 *cmd, 260 struct cfg80211_pmsr_request *req) 261 { 262 int i; 263 264 cmd->initiator_flags = 265 cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM | 266 IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT); 267 cmd->request_id = req->cookie; 268 cmd->num_of_ap = req->n_peers; 269 270 /* 271 * Use a large value for "no timeout". Don't use the maximum value 272 * because of fw limitations. 273 */ 274 if (req->timeout) 275 cmd->req_timeout_ms = cpu_to_le32(req->timeout); 276 else 277 cmd->req_timeout_ms = cpu_to_le32(0xfffff); 278 279 memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); 280 for (i = 0; i < ETH_ALEN; i++) 281 cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; 282 283 if (vif->cfg.assoc) { 284 memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); 285 286 /* AP's TSF is only relevant if associated */ 287 for (i = 0; i < req->n_peers; i++) { 288 if (req->peers[i].report_ap_tsf) { 289 struct iwl_mvm_vif *mvmvif = 290 iwl_mvm_vif_from_mac80211(vif); 291 292 cmd->tsf_mac_id = cpu_to_le32(mvmvif->id); 293 return; 294 } 295 } 296 } else { 297 eth_broadcast_addr(cmd->range_req_bssid); 298 } 299 300 /* Don't report AP's TSF */ 301 cmd->tsf_mac_id = cpu_to_le32(0xff); 302 } 303 304 static void iwl_mvm_ftm_cmd_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 305 struct iwl_tof_range_req_cmd_v8 *cmd, 306 struct cfg80211_pmsr_request *req) 307 { 308 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)cmd, req); 309 } 310 311 static int 312 iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm, 313 struct cfg80211_pmsr_request_peer *peer, 314 u8 *channel, u8 *bandwidth, 315 u8 *ctrl_ch_position) 316 { 317 u32 freq = peer->chandef.chan->center_freq; 318 319 *channel = ieee80211_frequency_to_channel(freq); 320 321 switch (peer->chandef.width) { 322 case NL80211_CHAN_WIDTH_20_NOHT: 323 *bandwidth = IWL_TOF_BW_20_LEGACY; 324 break; 325 case NL80211_CHAN_WIDTH_20: 326 *bandwidth = IWL_TOF_BW_20_HT; 327 break; 328 case NL80211_CHAN_WIDTH_40: 329 *bandwidth = IWL_TOF_BW_40; 330 break; 331 case NL80211_CHAN_WIDTH_80: 332 *bandwidth = IWL_TOF_BW_80; 333 break; 334 default: 335 IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n", 336 peer->chandef.width); 337 return -EINVAL; 338 } 339 340 *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ? 341 iwl_mvm_get_ctrl_pos(&peer->chandef) : 0; 342 343 return 0; 344 } 345 346 static int 347 iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm, 348 struct cfg80211_pmsr_request_peer *peer, 349 u8 *channel, u8 *format_bw, 350 u8 *ctrl_ch_position) 351 { 352 u32 freq = peer->chandef.chan->center_freq; 353 u8 cmd_ver; 354 355 *channel = ieee80211_frequency_to_channel(freq); 356 357 switch (peer->chandef.width) { 358 case NL80211_CHAN_WIDTH_20_NOHT: 359 *format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY; 360 *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; 361 break; 362 case NL80211_CHAN_WIDTH_20: 363 *format_bw = IWL_LOCATION_FRAME_FORMAT_HT; 364 *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; 365 break; 366 case NL80211_CHAN_WIDTH_40: 367 *format_bw = IWL_LOCATION_FRAME_FORMAT_HT; 368 *format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS; 369 break; 370 case NL80211_CHAN_WIDTH_80: 371 *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT; 372 *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS; 373 break; 374 case NL80211_CHAN_WIDTH_160: 375 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, 376 WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 377 IWL_FW_CMD_VER_UNKNOWN); 378 379 if (cmd_ver >= 13) { 380 *format_bw = IWL_LOCATION_FRAME_FORMAT_HE; 381 *format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS; 382 break; 383 } 384 fallthrough; 385 default: 386 IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n", 387 peer->chandef.width); 388 return -EINVAL; 389 } 390 391 /* non EDCA based measurement must use HE preamble */ 392 if (peer->ftm.trigger_based || peer->ftm.non_trigger_based) 393 *format_bw |= IWL_LOCATION_FRAME_FORMAT_HE; 394 395 *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ? 396 iwl_mvm_get_ctrl_pos(&peer->chandef) : 0; 397 398 return 0; 399 } 400 401 static int 402 iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm, 403 struct cfg80211_pmsr_request_peer *peer, 404 struct iwl_tof_range_req_ap_entry_v2 *target) 405 { 406 int ret; 407 408 ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num, 409 &target->bandwidth, 410 &target->ctrl_ch_position); 411 if (ret) 412 return ret; 413 414 memcpy(target->bssid, peer->addr, ETH_ALEN); 415 target->burst_period = 416 cpu_to_le16(peer->ftm.burst_period); 417 target->samples_per_burst = peer->ftm.ftms_per_burst; 418 target->num_of_bursts = peer->ftm.num_bursts_exp; 419 target->measure_type = 0; /* regular two-sided FTM */ 420 target->retries_per_sample = peer->ftm.ftmr_retries; 421 target->asap_mode = peer->ftm.asap; 422 target->enable_dyn_ack = IWL_MVM_FTM_INITIATOR_DYNACK; 423 424 if (peer->ftm.request_lci) 425 target->location_req |= IWL_TOF_LOC_LCI; 426 if (peer->ftm.request_civicloc) 427 target->location_req |= IWL_TOF_LOC_CIVIC; 428 429 target->algo_type = IWL_MVM_FTM_INITIATOR_ALGO; 430 431 return 0; 432 } 433 434 #define FTM_PUT_FLAG(flag) (target->initiator_ap_flags |= \ 435 cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag)) 436 437 static void 438 iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm, 439 struct cfg80211_pmsr_request_peer *peer, 440 struct iwl_tof_range_req_ap_entry_v6 *target) 441 { 442 memcpy(target->bssid, peer->addr, ETH_ALEN); 443 target->burst_period = 444 cpu_to_le16(peer->ftm.burst_period); 445 target->samples_per_burst = peer->ftm.ftms_per_burst; 446 target->num_of_bursts = peer->ftm.num_bursts_exp; 447 target->ftmr_max_retries = peer->ftm.ftmr_retries; 448 target->initiator_ap_flags = cpu_to_le32(0); 449 450 if (peer->ftm.asap) 451 FTM_PUT_FLAG(ASAP); 452 453 if (peer->ftm.request_lci) 454 FTM_PUT_FLAG(LCI_REQUEST); 455 456 if (peer->ftm.request_civicloc) 457 FTM_PUT_FLAG(CIVIC_REQUEST); 458 459 if (IWL_MVM_FTM_INITIATOR_DYNACK) 460 FTM_PUT_FLAG(DYN_ACK); 461 462 if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG) 463 FTM_PUT_FLAG(ALGO_LR); 464 else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT) 465 FTM_PUT_FLAG(ALGO_FFT); 466 467 if (peer->ftm.trigger_based) 468 FTM_PUT_FLAG(TB); 469 else if (peer->ftm.non_trigger_based) 470 FTM_PUT_FLAG(NON_TB); 471 472 if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) && 473 peer->ftm.lmr_feedback) 474 FTM_PUT_FLAG(LMR_FEEDBACK); 475 } 476 477 static int 478 iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm, 479 struct cfg80211_pmsr_request_peer *peer, 480 struct iwl_tof_range_req_ap_entry_v3 *target) 481 { 482 int ret; 483 484 ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num, 485 &target->bandwidth, 486 &target->ctrl_ch_position); 487 if (ret) 488 return ret; 489 490 /* 491 * Versions 3 and 4 has some common fields, so 492 * iwl_mvm_ftm_put_target_common() can be used for version 7 too. 493 */ 494 iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target); 495 496 return 0; 497 } 498 499 static int 500 iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm, 501 struct cfg80211_pmsr_request_peer *peer, 502 struct iwl_tof_range_req_ap_entry_v4 *target) 503 { 504 int ret; 505 506 ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, 507 &target->format_bw, 508 &target->ctrl_ch_position); 509 if (ret) 510 return ret; 511 512 iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target); 513 514 return 0; 515 } 516 517 static int 518 iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 519 struct cfg80211_pmsr_request_peer *peer, 520 struct iwl_tof_range_req_ap_entry_v6 *target) 521 { 522 int ret; 523 524 ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, 525 &target->format_bw, 526 &target->ctrl_ch_position); 527 if (ret) 528 return ret; 529 530 iwl_mvm_ftm_put_target_common(mvm, peer, target); 531 532 if (vif->cfg.assoc) { 533 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 534 struct ieee80211_sta *sta; 535 struct ieee80211_bss_conf *link_conf; 536 unsigned int link_id; 537 538 rcu_read_lock(); 539 for_each_vif_active_link(vif, link_conf, link_id) { 540 if (memcmp(peer->addr, link_conf->bssid, ETH_ALEN)) 541 continue; 542 543 target->sta_id = mvmvif->link[link_id]->ap_sta_id; 544 sta = rcu_dereference(mvm->fw_id_to_mac_id[target->sta_id]); 545 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { 546 rcu_read_unlock(); 547 return PTR_ERR_OR_ZERO(sta); 548 } 549 550 if (sta->mfp && (peer->ftm.trigger_based || 551 peer->ftm.non_trigger_based)) 552 FTM_PUT_FLAG(PMF); 553 break; 554 } 555 rcu_read_unlock(); 556 557 #ifdef CONFIG_IWLWIFI_DEBUGFS 558 if (mvmvif->ftm_unprotected) { 559 target->sta_id = IWL_MVM_INVALID_STA; 560 target->initiator_ap_flags &= 561 ~cpu_to_le32(IWL_INITIATOR_AP_FLAGS_PMF); 562 } 563 564 #endif 565 } else { 566 target->sta_id = IWL_MVM_INVALID_STA; 567 } 568 569 /* 570 * TODO: Beacon interval is currently unknown, so use the common value 571 * of 100 TUs. 572 */ 573 target->beacon_interval = cpu_to_le16(100); 574 return 0; 575 } 576 577 static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd) 578 { 579 u32 status; 580 int err = iwl_mvm_send_cmd_status(mvm, hcmd, &status); 581 582 if (!err && status) { 583 IWL_ERR(mvm, "FTM range request command failure, status: %u\n", 584 status); 585 err = iwl_ftm_range_request_status_to_err(status); 586 } 587 588 return err; 589 } 590 591 static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 592 struct cfg80211_pmsr_request *req) 593 { 594 struct iwl_tof_range_req_cmd_v5 cmd_v5; 595 struct iwl_host_cmd hcmd = { 596 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 597 .dataflags[0] = IWL_HCMD_DFL_DUP, 598 .data[0] = &cmd_v5, 599 .len[0] = sizeof(cmd_v5), 600 }; 601 u8 i; 602 int err; 603 604 iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req); 605 606 for (i = 0; i < cmd_v5.num_of_ap; i++) { 607 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 608 609 err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]); 610 if (err) 611 return err; 612 } 613 614 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 615 } 616 617 static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 618 struct cfg80211_pmsr_request *req) 619 { 620 struct iwl_tof_range_req_cmd_v7 cmd_v7; 621 struct iwl_host_cmd hcmd = { 622 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 623 .dataflags[0] = IWL_HCMD_DFL_DUP, 624 .data[0] = &cmd_v7, 625 .len[0] = sizeof(cmd_v7), 626 }; 627 u8 i; 628 int err; 629 630 /* 631 * Versions 7 and 8 has the same structure except from the responders 632 * list, so iwl_mvm_ftm_cmd() can be used for version 7 too. 633 */ 634 iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd_v7, req); 635 636 for (i = 0; i < cmd_v7.num_of_ap; i++) { 637 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 638 639 err = iwl_mvm_ftm_put_target_v3(mvm, peer, &cmd_v7.ap[i]); 640 if (err) 641 return err; 642 } 643 644 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 645 } 646 647 static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 648 struct cfg80211_pmsr_request *req) 649 { 650 struct iwl_tof_range_req_cmd_v8 cmd; 651 struct iwl_host_cmd hcmd = { 652 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 653 .dataflags[0] = IWL_HCMD_DFL_DUP, 654 .data[0] = &cmd, 655 .len[0] = sizeof(cmd), 656 }; 657 u8 i; 658 int err; 659 660 iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd, req); 661 662 for (i = 0; i < cmd.num_of_ap; i++) { 663 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 664 665 err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]); 666 if (err) 667 return err; 668 } 669 670 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 671 } 672 673 static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 674 struct cfg80211_pmsr_request *req) 675 { 676 struct iwl_tof_range_req_cmd_v9 cmd; 677 struct iwl_host_cmd hcmd = { 678 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 679 .dataflags[0] = IWL_HCMD_DFL_DUP, 680 .data[0] = &cmd, 681 .len[0] = sizeof(cmd), 682 }; 683 u8 i; 684 int err; 685 686 iwl_mvm_ftm_cmd_common(mvm, vif, &cmd, req); 687 688 for (i = 0; i < cmd.num_of_ap; i++) { 689 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 690 struct iwl_tof_range_req_ap_entry_v6 *target = &cmd.ap[i]; 691 692 err = iwl_mvm_ftm_put_target(mvm, vif, peer, target); 693 if (err) 694 return err; 695 } 696 697 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 698 } 699 700 static void iter(struct ieee80211_hw *hw, 701 struct ieee80211_vif *vif, 702 struct ieee80211_sta *sta, 703 struct ieee80211_key_conf *key, 704 void *data) 705 { 706 struct iwl_tof_range_req_ap_entry_v6 *target = data; 707 708 if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN)) 709 return; 710 711 WARN_ON(!sta->mfp); 712 713 if (WARN_ON(key->keylen > sizeof(target->tk))) 714 return; 715 716 memcpy(target->tk, key->key, key->keylen); 717 target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher); 718 WARN_ON(target->cipher == IWL_LOCATION_CIPHER_INVALID); 719 } 720 721 static void 722 iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 723 struct iwl_tof_range_req_ap_entry_v7 *target) 724 { 725 struct iwl_mvm_ftm_pasn_entry *entry; 726 u32 flags = le32_to_cpu(target->initiator_ap_flags); 727 #ifdef CONFIG_IWLWIFI_DEBUGFS 728 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 729 730 if (mvmvif->ftm_unprotected) 731 return; 732 #endif 733 734 if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB | 735 IWL_INITIATOR_AP_FLAGS_TB))) 736 return; 737 738 lockdep_assert_held(&mvm->mutex); 739 740 list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) { 741 if (memcmp(entry->addr, target->bssid, sizeof(entry->addr))) 742 continue; 743 744 target->cipher = entry->cipher; 745 746 if (entry->flags & IWL_MVM_PASN_FLAG_HAS_HLTK) 747 memcpy(target->hltk, entry->hltk, sizeof(target->hltk)); 748 else 749 memset(target->hltk, 0, sizeof(target->hltk)); 750 751 if (vif->cfg.assoc && 752 !memcmp(vif->bss_conf.bssid, target->bssid, 753 sizeof(target->bssid))) 754 ieee80211_iter_keys(mvm->hw, vif, iter, target); 755 else 756 memcpy(target->tk, entry->tk, sizeof(target->tk)); 757 758 memcpy(target->rx_pn, entry->rx_pn, sizeof(target->rx_pn)); 759 memcpy(target->tx_pn, entry->tx_pn, sizeof(target->tx_pn)); 760 761 target->initiator_ap_flags |= 762 cpu_to_le32(IWL_INITIATOR_AP_FLAGS_SECURED); 763 return; 764 } 765 } 766 767 static int 768 iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 769 struct cfg80211_pmsr_request_peer *peer, 770 struct iwl_tof_range_req_ap_entry_v7 *target) 771 { 772 int err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target); 773 if (err) 774 return err; 775 776 iwl_mvm_ftm_set_secured_ranging(mvm, vif, target); 777 return err; 778 } 779 780 static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm, 781 struct ieee80211_vif *vif, 782 struct cfg80211_pmsr_request *req) 783 { 784 struct iwl_tof_range_req_cmd_v11 cmd; 785 struct iwl_host_cmd hcmd = { 786 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 787 .dataflags[0] = IWL_HCMD_DFL_DUP, 788 .data[0] = &cmd, 789 .len[0] = sizeof(cmd), 790 }; 791 u8 i; 792 int err; 793 794 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 795 796 for (i = 0; i < cmd.num_of_ap; i++) { 797 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 798 struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i]; 799 800 err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, target); 801 if (err) 802 return err; 803 } 804 805 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 806 } 807 808 static void 809 iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm, 810 struct iwl_tof_range_req_ap_entry_v8 *target) 811 { 812 /* Only 2 STS are supported on Tx */ 813 u32 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 : 814 IWL_MVM_FTM_I2R_MAX_STS; 815 816 target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP | 817 (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS); 818 target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP | 819 (i2r_max_sts << IWL_LOCATION_MAX_STS_POS); 820 target->r2i_max_total_ltf = IWL_MVM_FTM_R2I_MAX_TOTAL_LTF; 821 target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF; 822 } 823 824 static int 825 iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 826 struct cfg80211_pmsr_request_peer *peer, 827 struct iwl_tof_range_req_ap_entry_v8 *target) 828 { 829 u32 flags; 830 int ret = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target); 831 832 if (ret) 833 return ret; 834 835 iwl_mvm_ftm_set_ndp_params(mvm, target); 836 837 /* 838 * If secure LTF is turned off, replace the flag with PMF only 839 */ 840 flags = le32_to_cpu(target->initiator_ap_flags); 841 if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) { 842 if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF) 843 flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; 844 845 flags |= IWL_INITIATOR_AP_FLAGS_PMF; 846 target->initiator_ap_flags = cpu_to_le32(flags); 847 } 848 849 return 0; 850 } 851 852 static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm, 853 struct ieee80211_vif *vif, 854 struct cfg80211_pmsr_request *req) 855 { 856 struct iwl_tof_range_req_cmd_v12 cmd; 857 struct iwl_host_cmd hcmd = { 858 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 859 .dataflags[0] = IWL_HCMD_DFL_DUP, 860 .data[0] = &cmd, 861 .len[0] = sizeof(cmd), 862 }; 863 u8 i; 864 int err; 865 866 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 867 868 for (i = 0; i < cmd.num_of_ap; i++) { 869 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 870 struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i]; 871 872 err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, target); 873 if (err) 874 return err; 875 } 876 877 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 878 } 879 880 static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm, 881 struct ieee80211_vif *vif, 882 struct cfg80211_pmsr_request *req) 883 { 884 struct iwl_tof_range_req_cmd_v13 cmd; 885 struct iwl_host_cmd hcmd = { 886 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 887 .dataflags[0] = IWL_HCMD_DFL_DUP, 888 .data[0] = &cmd, 889 .len[0] = sizeof(cmd), 890 }; 891 u8 i; 892 int err; 893 894 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 895 896 for (i = 0; i < cmd.num_of_ap; i++) { 897 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 898 struct iwl_tof_range_req_ap_entry_v9 *target = &cmd.ap[i]; 899 900 err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, (void *)target); 901 if (err) 902 return err; 903 904 if (peer->ftm.trigger_based || peer->ftm.non_trigger_based) 905 target->bss_color = peer->ftm.bss_color; 906 907 if (peer->ftm.non_trigger_based) { 908 target->min_time_between_msr = 909 cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); 910 target->burst_period = 911 cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); 912 } else { 913 target->min_time_between_msr = cpu_to_le16(0); 914 } 915 916 target->band = 917 iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band); 918 } 919 920 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 921 } 922 923 int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 924 struct cfg80211_pmsr_request *req) 925 { 926 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 927 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); 928 int err; 929 930 lockdep_assert_held(&mvm->mutex); 931 932 if (mvm->ftm_initiator.req) 933 return -EBUSY; 934 935 if (new_api) { 936 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, 937 WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 938 IWL_FW_CMD_VER_UNKNOWN); 939 940 switch (cmd_ver) { 941 case 13: 942 err = iwl_mvm_ftm_start_v13(mvm, vif, req); 943 break; 944 case 12: 945 err = iwl_mvm_ftm_start_v12(mvm, vif, req); 946 break; 947 case 11: 948 err = iwl_mvm_ftm_start_v11(mvm, vif, req); 949 break; 950 case 9: 951 case 10: 952 err = iwl_mvm_ftm_start_v9(mvm, vif, req); 953 break; 954 case 8: 955 err = iwl_mvm_ftm_start_v8(mvm, vif, req); 956 break; 957 default: 958 err = iwl_mvm_ftm_start_v7(mvm, vif, req); 959 break; 960 } 961 } else { 962 err = iwl_mvm_ftm_start_v5(mvm, vif, req); 963 } 964 965 if (!err) { 966 mvm->ftm_initiator.req = req; 967 mvm->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif); 968 } 969 970 return err; 971 } 972 973 void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req) 974 { 975 struct iwl_tof_range_abort_cmd cmd = { 976 .request_id = req->cookie, 977 }; 978 979 lockdep_assert_held(&mvm->mutex); 980 981 if (req != mvm->ftm_initiator.req) 982 return; 983 984 iwl_mvm_ftm_reset(mvm); 985 986 if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD), 987 0, sizeof(cmd), &cmd)) 988 IWL_ERR(mvm, "failed to abort FTM process\n"); 989 } 990 991 static int iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req, 992 const u8 *addr) 993 { 994 int i; 995 996 for (i = 0; i < req->n_peers; i++) { 997 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 998 999 if (ether_addr_equal_unaligned(peer->addr, addr)) 1000 return i; 1001 } 1002 1003 return -ENOENT; 1004 } 1005 1006 static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts) 1007 { 1008 u32 gp2_ts = le32_to_cpu(fw_gp2_ts); 1009 u32 curr_gp2, diff; 1010 u64 now_from_boot_ns; 1011 1012 iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2, 1013 &now_from_boot_ns, NULL); 1014 1015 if (curr_gp2 >= gp2_ts) 1016 diff = curr_gp2 - gp2_ts; 1017 else 1018 diff = curr_gp2 + (U32_MAX - gp2_ts + 1); 1019 1020 return now_from_boot_ns - (u64)diff * 1000; 1021 } 1022 1023 static void iwl_mvm_ftm_get_lci_civic(struct iwl_mvm *mvm, 1024 struct cfg80211_pmsr_result *res) 1025 { 1026 struct iwl_mvm_loc_entry *entry; 1027 1028 list_for_each_entry(entry, &mvm->ftm_initiator.loc_list, list) { 1029 if (!ether_addr_equal_unaligned(res->addr, entry->addr)) 1030 continue; 1031 1032 if (entry->lci_len) { 1033 res->ftm.lci_len = entry->lci_len; 1034 res->ftm.lci = entry->buf; 1035 } 1036 1037 if (entry->civic_len) { 1038 res->ftm.civicloc_len = entry->civic_len; 1039 res->ftm.civicloc = entry->buf + entry->lci_len; 1040 } 1041 1042 /* we found the entry we needed */ 1043 break; 1044 } 1045 } 1046 1047 static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id, 1048 u8 num_of_aps) 1049 { 1050 lockdep_assert_held(&mvm->mutex); 1051 1052 if (request_id != (u8)mvm->ftm_initiator.req->cookie) { 1053 IWL_ERR(mvm, "Request ID mismatch, got %u, active %u\n", 1054 request_id, (u8)mvm->ftm_initiator.req->cookie); 1055 return -EINVAL; 1056 } 1057 1058 if (num_of_aps > mvm->ftm_initiator.req->n_peers) { 1059 IWL_ERR(mvm, "FTM range response invalid\n"); 1060 return -EINVAL; 1061 } 1062 1063 return 0; 1064 } 1065 1066 static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm, 1067 struct cfg80211_pmsr_result *res) 1068 { 1069 struct iwl_mvm_smooth_entry *resp = NULL, *iter; 1070 s64 rtt_avg, rtt = res->ftm.rtt_avg; 1071 u32 undershoot, overshoot; 1072 u8 alpha; 1073 1074 if (!IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH) 1075 return; 1076 1077 WARN_ON(rtt < 0); 1078 1079 if (res->status != NL80211_PMSR_STATUS_SUCCESS) { 1080 IWL_DEBUG_INFO(mvm, 1081 ": %pM: ignore failed measurement. Status=%u\n", 1082 res->addr, res->status); 1083 return; 1084 } 1085 1086 list_for_each_entry(iter, &mvm->ftm_initiator.smooth.resp, list) { 1087 if (!memcmp(res->addr, iter->addr, ETH_ALEN)) { 1088 resp = iter; 1089 break; 1090 } 1091 } 1092 1093 if (!resp) { 1094 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 1095 if (!resp) 1096 return; 1097 1098 memcpy(resp->addr, res->addr, ETH_ALEN); 1099 list_add_tail(&resp->list, &mvm->ftm_initiator.smooth.resp); 1100 1101 resp->rtt_avg = rtt; 1102 1103 IWL_DEBUG_INFO(mvm, "new: %pM: rtt_avg=%lld\n", 1104 resp->addr, resp->rtt_avg); 1105 goto update_time; 1106 } 1107 1108 if (res->host_time - resp->host_time > 1109 IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * 1000000000) { 1110 resp->rtt_avg = rtt; 1111 1112 IWL_DEBUG_INFO(mvm, "expired: %pM: rtt_avg=%lld\n", 1113 resp->addr, resp->rtt_avg); 1114 goto update_time; 1115 } 1116 1117 /* Smooth the results based on the tracked RTT average */ 1118 undershoot = IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT; 1119 overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT; 1120 alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA; 1121 1122 rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100); 1123 1124 IWL_DEBUG_INFO(mvm, 1125 "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n", 1126 resp->addr, resp->rtt_avg, rtt_avg, rtt); 1127 1128 /* 1129 * update the responder's average RTT results regardless of 1130 * the under/over shoot logic below 1131 */ 1132 resp->rtt_avg = rtt_avg; 1133 1134 /* smooth the results */ 1135 if (rtt_avg > rtt && (rtt_avg - rtt) > undershoot) { 1136 res->ftm.rtt_avg = rtt_avg; 1137 1138 IWL_DEBUG_INFO(mvm, 1139 "undershoot: val=%lld\n", 1140 (rtt_avg - rtt)); 1141 } else if (rtt_avg < rtt && (rtt - rtt_avg) > 1142 overshoot) { 1143 res->ftm.rtt_avg = rtt_avg; 1144 IWL_DEBUG_INFO(mvm, 1145 "overshoot: val=%lld\n", 1146 (rtt - rtt_avg)); 1147 } 1148 1149 update_time: 1150 resp->host_time = res->host_time; 1151 } 1152 1153 static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index, 1154 struct cfg80211_pmsr_result *res) 1155 { 1156 s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666); 1157 1158 IWL_DEBUG_INFO(mvm, "entry %d\n", index); 1159 IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status); 1160 IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr); 1161 IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time); 1162 IWL_DEBUG_INFO(mvm, "\tburst index: %d\n", res->ftm.burst_index); 1163 IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes); 1164 IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg); 1165 IWL_DEBUG_INFO(mvm, "\trssi spread: %d\n", res->ftm.rssi_spread); 1166 IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg); 1167 IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance); 1168 IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread); 1169 IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg); 1170 } 1171 1172 static void 1173 iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm, 1174 struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap) 1175 { 1176 struct iwl_mvm_ftm_pasn_entry *entry; 1177 1178 lockdep_assert_held(&mvm->mutex); 1179 1180 list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) { 1181 if (memcmp(fw_ap->bssid, entry->addr, sizeof(entry->addr))) 1182 continue; 1183 1184 memcpy(entry->rx_pn, fw_ap->rx_pn, sizeof(entry->rx_pn)); 1185 memcpy(entry->tx_pn, fw_ap->tx_pn, sizeof(entry->tx_pn)); 1186 return; 1187 } 1188 } 1189 1190 static u8 iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm *mvm) 1191 { 1192 if (!fw_has_api(&mvm->fw->ucode_capa, 1193 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ)) 1194 return 5; 1195 1196 /* Starting from version 8, the FW advertises the version */ 1197 if (mvm->cmd_ver.range_resp >= 8) 1198 return mvm->cmd_ver.range_resp; 1199 else if (fw_has_api(&mvm->fw->ucode_capa, 1200 IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) 1201 return 7; 1202 1203 /* The first version of the new range request API */ 1204 return 6; 1205 } 1206 1207 static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len) 1208 { 1209 switch (ver) { 1210 case 9: 1211 case 8: 1212 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v8); 1213 case 7: 1214 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v7); 1215 case 6: 1216 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v6); 1217 case 5: 1218 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v5); 1219 default: 1220 WARN_ONCE(1, "FTM: unsupported range response version %u", ver); 1221 return false; 1222 } 1223 } 1224 1225 void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 1226 { 1227 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1228 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); 1229 struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data; 1230 struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data; 1231 struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data; 1232 struct iwl_tof_range_rsp_ntfy_v8 *fw_resp_v8 = (void *)pkt->data; 1233 int i; 1234 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 1235 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); 1236 u8 num_of_aps, last_in_batch; 1237 u8 notif_ver = iwl_mvm_ftm_get_range_resp_ver(mvm); 1238 1239 lockdep_assert_held(&mvm->mutex); 1240 1241 if (!mvm->ftm_initiator.req) { 1242 return; 1243 } 1244 1245 if (unlikely(!iwl_mvm_ftm_resp_size_validation(notif_ver, pkt_len))) 1246 return; 1247 1248 if (new_api) { 1249 if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v8->request_id, 1250 fw_resp_v8->num_of_aps)) 1251 return; 1252 1253 num_of_aps = fw_resp_v8->num_of_aps; 1254 last_in_batch = fw_resp_v8->last_report; 1255 } else { 1256 if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id, 1257 fw_resp_v5->num_of_aps)) 1258 return; 1259 1260 num_of_aps = fw_resp_v5->num_of_aps; 1261 last_in_batch = fw_resp_v5->last_in_batch; 1262 } 1263 1264 IWL_DEBUG_INFO(mvm, "Range response received\n"); 1265 IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %u\n", 1266 mvm->ftm_initiator.req->cookie, num_of_aps); 1267 1268 for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) { 1269 struct cfg80211_pmsr_result result = {}; 1270 struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap; 1271 int peer_idx; 1272 1273 if (new_api) { 1274 if (notif_ver >= 8) { 1275 fw_ap = &fw_resp_v8->ap[i]; 1276 iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap); 1277 } else if (notif_ver == 7) { 1278 fw_ap = (void *)&fw_resp_v7->ap[i]; 1279 } else { 1280 fw_ap = (void *)&fw_resp_v6->ap[i]; 1281 } 1282 1283 result.final = fw_ap->last_burst; 1284 result.ap_tsf = le32_to_cpu(fw_ap->start_tsf); 1285 result.ap_tsf_valid = 1; 1286 } else { 1287 /* the first part is the same for old and new APIs */ 1288 fw_ap = (void *)&fw_resp_v5->ap[i]; 1289 /* 1290 * FIXME: the firmware needs to report this, we don't 1291 * even know the number of bursts the responder picked 1292 * (if we asked it to) 1293 */ 1294 result.final = 0; 1295 } 1296 1297 peer_idx = iwl_mvm_ftm_find_peer(mvm->ftm_initiator.req, 1298 fw_ap->bssid); 1299 if (peer_idx < 0) { 1300 IWL_WARN(mvm, 1301 "Unknown address (%pM, target #%d) in FTM response\n", 1302 fw_ap->bssid, i); 1303 continue; 1304 } 1305 1306 switch (fw_ap->measure_status) { 1307 case IWL_TOF_ENTRY_SUCCESS: 1308 result.status = NL80211_PMSR_STATUS_SUCCESS; 1309 break; 1310 case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT: 1311 result.status = NL80211_PMSR_STATUS_TIMEOUT; 1312 break; 1313 case IWL_TOF_ENTRY_NO_RESPONSE: 1314 result.status = NL80211_PMSR_STATUS_FAILURE; 1315 result.ftm.failure_reason = 1316 NL80211_PMSR_FTM_FAILURE_NO_RESPONSE; 1317 break; 1318 case IWL_TOF_ENTRY_REQUEST_REJECTED: 1319 result.status = NL80211_PMSR_STATUS_FAILURE; 1320 result.ftm.failure_reason = 1321 NL80211_PMSR_FTM_FAILURE_PEER_BUSY; 1322 result.ftm.busy_retry_time = fw_ap->refusal_period; 1323 break; 1324 default: 1325 result.status = NL80211_PMSR_STATUS_FAILURE; 1326 result.ftm.failure_reason = 1327 NL80211_PMSR_FTM_FAILURE_UNSPECIFIED; 1328 break; 1329 } 1330 memcpy(result.addr, fw_ap->bssid, ETH_ALEN); 1331 result.host_time = iwl_mvm_ftm_get_host_time(mvm, 1332 fw_ap->timestamp); 1333 result.type = NL80211_PMSR_TYPE_FTM; 1334 result.ftm.burst_index = mvm->ftm_initiator.responses[peer_idx]; 1335 mvm->ftm_initiator.responses[peer_idx]++; 1336 result.ftm.rssi_avg = fw_ap->rssi; 1337 result.ftm.rssi_avg_valid = 1; 1338 result.ftm.rssi_spread = fw_ap->rssi_spread; 1339 result.ftm.rssi_spread_valid = 1; 1340 result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt); 1341 result.ftm.rtt_avg_valid = 1; 1342 result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance); 1343 result.ftm.rtt_variance_valid = 1; 1344 result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread); 1345 result.ftm.rtt_spread_valid = 1; 1346 1347 iwl_mvm_ftm_get_lci_civic(mvm, &result); 1348 1349 iwl_mvm_ftm_rtt_smoothing(mvm, &result); 1350 1351 cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, 1352 mvm->ftm_initiator.req, 1353 &result, GFP_KERNEL); 1354 1355 if (fw_has_api(&mvm->fw->ucode_capa, 1356 IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) 1357 IWL_DEBUG_INFO(mvm, "RTT confidence: %u\n", 1358 fw_ap->rttConfidence); 1359 1360 iwl_mvm_debug_range_resp(mvm, i, &result); 1361 } 1362 1363 if (last_in_batch) { 1364 cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev, 1365 mvm->ftm_initiator.req, 1366 GFP_KERNEL); 1367 iwl_mvm_ftm_reset(mvm); 1368 } 1369 } 1370 1371 void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 1372 { 1373 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1374 const struct ieee80211_mgmt *mgmt = (void *)pkt->data; 1375 size_t len = iwl_rx_packet_payload_len(pkt); 1376 struct iwl_mvm_loc_entry *entry; 1377 const u8 *ies, *lci, *civic, *msr_ie; 1378 size_t ies_len, lci_len = 0, civic_len = 0; 1379 size_t baselen = IEEE80211_MIN_ACTION_SIZE + 1380 sizeof(mgmt->u.action.u.ftm); 1381 static const u8 rprt_type_lci = IEEE80211_SPCT_MSR_RPRT_TYPE_LCI; 1382 static const u8 rprt_type_civic = IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC; 1383 1384 if (len <= baselen) 1385 return; 1386 1387 lockdep_assert_held(&mvm->mutex); 1388 1389 ies = mgmt->u.action.u.ftm.variable; 1390 ies_len = len - baselen; 1391 1392 msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len, 1393 &rprt_type_lci, 1, 4); 1394 if (msr_ie) { 1395 lci = msr_ie + 2; 1396 lci_len = msr_ie[1]; 1397 } 1398 1399 msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len, 1400 &rprt_type_civic, 1, 4); 1401 if (msr_ie) { 1402 civic = msr_ie + 2; 1403 civic_len = msr_ie[1]; 1404 } 1405 1406 entry = kmalloc(sizeof(*entry) + lci_len + civic_len, GFP_KERNEL); 1407 if (!entry) 1408 return; 1409 1410 memcpy(entry->addr, mgmt->bssid, ETH_ALEN); 1411 1412 entry->lci_len = lci_len; 1413 if (lci_len) 1414 memcpy(entry->buf, lci, lci_len); 1415 1416 entry->civic_len = civic_len; 1417 if (civic_len) 1418 memcpy(entry->buf + lci_len, civic, civic_len); 1419 1420 list_add_tail(&entry->list, &mvm->ftm_initiator.loc_list); 1421 } 1422