1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * The full GNU General Public License is included in this distribution 22 * in the file called COPYING. 23 * 24 * Contact Information: 25 * Intel Linux Wireless <linuxwifi@intel.com> 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 27 * 28 * BSD LICENSE 29 * 30 * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. 31 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 32 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 33 * All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 39 * * Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * * Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in 43 * the documentation and/or other materials provided with the 44 * distribution. 45 * * Neither the name Intel Corporation nor the names of its 46 * contributors may be used to endorse or promote products derived 47 * from this software without specific prior written permission. 48 * 49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 50 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 51 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 52 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 53 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 55 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 59 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * 61 *****************************************************************************/ 62 63 #include <linux/etherdevice.h> 64 #include <linux/ip.h> 65 #include <linux/fs.h> 66 #include <net/cfg80211.h> 67 #include <net/ipv6.h> 68 #include <net/tcp.h> 69 #include <net/addrconf.h> 70 #include "iwl-modparams.h" 71 #include "fw-api.h" 72 #include "mvm.h" 73 74 void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, 75 struct ieee80211_vif *vif, 76 struct cfg80211_gtk_rekey_data *data) 77 { 78 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 79 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 80 81 mutex_lock(&mvm->mutex); 82 83 memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN); 84 memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN); 85 mvmvif->rekey_data.replay_ctr = 86 cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr)); 87 mvmvif->rekey_data.valid = true; 88 89 mutex_unlock(&mvm->mutex); 90 } 91 92 #if IS_ENABLED(CONFIG_IPV6) 93 void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, 94 struct ieee80211_vif *vif, 95 struct inet6_dev *idev) 96 { 97 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 98 struct inet6_ifaddr *ifa; 99 int idx = 0; 100 101 memset(mvmvif->tentative_addrs, 0, sizeof(mvmvif->tentative_addrs)); 102 103 read_lock_bh(&idev->lock); 104 list_for_each_entry(ifa, &idev->addr_list, if_list) { 105 mvmvif->target_ipv6_addrs[idx] = ifa->addr; 106 if (ifa->flags & IFA_F_TENTATIVE) 107 __set_bit(idx, mvmvif->tentative_addrs); 108 idx++; 109 if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX) 110 break; 111 } 112 read_unlock_bh(&idev->lock); 113 114 mvmvif->num_target_ipv6_addrs = idx; 115 } 116 #endif 117 118 void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, 119 struct ieee80211_vif *vif, int idx) 120 { 121 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 122 123 mvmvif->tx_key_idx = idx; 124 } 125 126 static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out) 127 { 128 int i; 129 130 for (i = 0; i < IWL_P1K_SIZE; i++) 131 out[i] = cpu_to_le16(p1k[i]); 132 } 133 134 static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key, 135 struct iwl_mvm_key_pn *ptk_pn, 136 struct ieee80211_key_seq *seq, 137 int tid, int queues) 138 { 139 const u8 *ret = seq->ccmp.pn; 140 int i; 141 142 /* get the PN from mac80211, used on the default queue */ 143 ieee80211_get_key_rx_seq(key, tid, seq); 144 145 /* and use the internal data for the other queues */ 146 for (i = 1; i < queues; i++) { 147 const u8 *tmp = ptk_pn->q[i].pn[tid]; 148 149 if (memcmp(ret, tmp, IEEE80211_CCMP_PN_LEN) <= 0) 150 ret = tmp; 151 } 152 153 return ret; 154 } 155 156 struct wowlan_key_data { 157 struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc; 158 struct iwl_wowlan_tkip_params_cmd *tkip; 159 bool error, use_rsc_tsc, use_tkip, configure_keys; 160 int wep_key_idx; 161 }; 162 163 static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, 164 struct ieee80211_vif *vif, 165 struct ieee80211_sta *sta, 166 struct ieee80211_key_conf *key, 167 void *_data) 168 { 169 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 170 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 171 struct wowlan_key_data *data = _data; 172 struct aes_sc *aes_sc, *aes_tx_sc = NULL; 173 struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; 174 struct iwl_p1k_cache *rx_p1ks; 175 u8 *rx_mic_key; 176 struct ieee80211_key_seq seq; 177 u32 cur_rx_iv32 = 0; 178 u16 p1k[IWL_P1K_SIZE]; 179 int ret, i; 180 181 switch (key->cipher) { 182 case WLAN_CIPHER_SUITE_WEP40: 183 case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */ 184 struct { 185 struct iwl_mvm_wep_key_cmd wep_key_cmd; 186 struct iwl_mvm_wep_key wep_key; 187 } __packed wkc = { 188 .wep_key_cmd.mac_id_n_color = 189 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 190 mvmvif->color)), 191 .wep_key_cmd.num_keys = 1, 192 /* firmware sets STA_KEY_FLG_WEP_13BYTES */ 193 .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP, 194 .wep_key.key_index = key->keyidx, 195 .wep_key.key_size = key->keylen, 196 }; 197 198 /* 199 * This will fail -- the key functions don't set support 200 * pairwise WEP keys. However, that's better than silently 201 * failing WoWLAN. Or maybe not? 202 */ 203 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 204 break; 205 206 memcpy(&wkc.wep_key.key[3], key->key, key->keylen); 207 if (key->keyidx == mvmvif->tx_key_idx) { 208 /* TX key must be at offset 0 */ 209 wkc.wep_key.key_offset = 0; 210 } else { 211 /* others start at 1 */ 212 data->wep_key_idx++; 213 wkc.wep_key.key_offset = data->wep_key_idx; 214 } 215 216 if (data->configure_keys) { 217 mutex_lock(&mvm->mutex); 218 ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, 219 sizeof(wkc), &wkc); 220 data->error = ret != 0; 221 222 mvm->ptk_ivlen = key->iv_len; 223 mvm->ptk_icvlen = key->icv_len; 224 mvm->gtk_ivlen = key->iv_len; 225 mvm->gtk_icvlen = key->icv_len; 226 mutex_unlock(&mvm->mutex); 227 } 228 229 /* don't upload key again */ 230 return; 231 } 232 default: 233 data->error = true; 234 return; 235 case WLAN_CIPHER_SUITE_AES_CMAC: 236 /* 237 * Ignore CMAC keys -- the WoWLAN firmware doesn't support them 238 * but we also shouldn't abort suspend due to that. It does have 239 * support for the IGTK key renewal, but doesn't really use the 240 * IGTK for anything. This means we could spuriously wake up or 241 * be deauthenticated, but that was considered acceptable. 242 */ 243 return; 244 case WLAN_CIPHER_SUITE_TKIP: 245 if (sta) { 246 u64 pn64; 247 248 tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc; 249 tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc; 250 251 rx_p1ks = data->tkip->rx_uni; 252 253 pn64 = atomic64_read(&key->tx_pn); 254 tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64)); 255 tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64)); 256 257 ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64), 258 p1k); 259 iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k); 260 261 memcpy(data->tkip->mic_keys.tx, 262 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], 263 IWL_MIC_KEY_SIZE); 264 265 rx_mic_key = data->tkip->mic_keys.rx_unicast; 266 } else { 267 tkip_sc = 268 data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc; 269 rx_p1ks = data->tkip->rx_multi; 270 rx_mic_key = data->tkip->mic_keys.rx_mcast; 271 } 272 273 /* 274 * For non-QoS this relies on the fact that both the uCode and 275 * mac80211 use TID 0 (as they need to to avoid replay attacks) 276 * for checking the IV in the frames. 277 */ 278 for (i = 0; i < IWL_NUM_RSC; i++) { 279 ieee80211_get_key_rx_seq(key, i, &seq); 280 tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); 281 tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); 282 /* wrapping isn't allowed, AP must rekey */ 283 if (seq.tkip.iv32 > cur_rx_iv32) 284 cur_rx_iv32 = seq.tkip.iv32; 285 } 286 287 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, 288 cur_rx_iv32, p1k); 289 iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k); 290 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, 291 cur_rx_iv32 + 1, p1k); 292 iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k); 293 294 memcpy(rx_mic_key, 295 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], 296 IWL_MIC_KEY_SIZE); 297 298 data->use_tkip = true; 299 data->use_rsc_tsc = true; 300 break; 301 case WLAN_CIPHER_SUITE_CCMP: 302 if (sta) { 303 u64 pn64; 304 305 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; 306 aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; 307 308 pn64 = atomic64_read(&key->tx_pn); 309 aes_tx_sc->pn = cpu_to_le64(pn64); 310 } else { 311 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; 312 } 313 314 /* 315 * For non-QoS this relies on the fact that both the uCode and 316 * mac80211/our RX code use TID 0 for checking the PN. 317 */ 318 if (sta && iwl_mvm_has_new_rx_api(mvm)) { 319 struct iwl_mvm_sta *mvmsta; 320 struct iwl_mvm_key_pn *ptk_pn; 321 const u8 *pn; 322 323 mvmsta = iwl_mvm_sta_from_mac80211(sta); 324 ptk_pn = rcu_dereference_protected( 325 mvmsta->ptk_pn[key->keyidx], 326 lockdep_is_held(&mvm->mutex)); 327 if (WARN_ON(!ptk_pn)) 328 break; 329 330 for (i = 0; i < IWL_MAX_TID_COUNT; i++) { 331 pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i, 332 mvm->trans->num_rx_queues); 333 aes_sc[i].pn = cpu_to_le64((u64)pn[5] | 334 ((u64)pn[4] << 8) | 335 ((u64)pn[3] << 16) | 336 ((u64)pn[2] << 24) | 337 ((u64)pn[1] << 32) | 338 ((u64)pn[0] << 40)); 339 } 340 } else { 341 for (i = 0; i < IWL_NUM_RSC; i++) { 342 u8 *pn = seq.ccmp.pn; 343 344 ieee80211_get_key_rx_seq(key, i, &seq); 345 aes_sc[i].pn = cpu_to_le64((u64)pn[5] | 346 ((u64)pn[4] << 8) | 347 ((u64)pn[3] << 16) | 348 ((u64)pn[2] << 24) | 349 ((u64)pn[1] << 32) | 350 ((u64)pn[0] << 40)); 351 } 352 } 353 data->use_rsc_tsc = true; 354 break; 355 } 356 357 if (data->configure_keys) { 358 mutex_lock(&mvm->mutex); 359 /* 360 * The D3 firmware hardcodes the key offset 0 as the key it 361 * uses to transmit packets to the AP, i.e. the PTK. 362 */ 363 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { 364 mvm->ptk_ivlen = key->iv_len; 365 mvm->ptk_icvlen = key->icv_len; 366 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0); 367 } else { 368 /* 369 * firmware only supports TSC/RSC for a single key, 370 * so if there are multiple keep overwriting them 371 * with new ones -- this relies on mac80211 doing 372 * list_add_tail(). 373 */ 374 mvm->gtk_ivlen = key->iv_len; 375 mvm->gtk_icvlen = key->icv_len; 376 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1); 377 } 378 mutex_unlock(&mvm->mutex); 379 data->error = ret != 0; 380 } 381 } 382 383 static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm, 384 struct cfg80211_wowlan *wowlan) 385 { 386 struct iwl_wowlan_patterns_cmd_v1 *pattern_cmd; 387 struct iwl_host_cmd cmd = { 388 .id = WOWLAN_PATTERNS, 389 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 390 }; 391 int i, err; 392 393 if (!wowlan->n_patterns) 394 return 0; 395 396 cmd.len[0] = struct_size(pattern_cmd, patterns, wowlan->n_patterns); 397 398 pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); 399 if (!pattern_cmd) 400 return -ENOMEM; 401 402 pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); 403 404 for (i = 0; i < wowlan->n_patterns; i++) { 405 int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); 406 407 memcpy(&pattern_cmd->patterns[i].mask, 408 wowlan->patterns[i].mask, mask_len); 409 memcpy(&pattern_cmd->patterns[i].pattern, 410 wowlan->patterns[i].pattern, 411 wowlan->patterns[i].pattern_len); 412 pattern_cmd->patterns[i].mask_size = mask_len; 413 pattern_cmd->patterns[i].pattern_size = 414 wowlan->patterns[i].pattern_len; 415 } 416 417 cmd.data[0] = pattern_cmd; 418 err = iwl_mvm_send_cmd(mvm, &cmd); 419 kfree(pattern_cmd); 420 return err; 421 } 422 423 static int iwl_mvm_send_patterns(struct iwl_mvm *mvm, 424 struct cfg80211_wowlan *wowlan) 425 { 426 struct iwl_wowlan_patterns_cmd *pattern_cmd; 427 struct iwl_host_cmd cmd = { 428 .id = WOWLAN_PATTERNS, 429 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 430 }; 431 int i, err; 432 433 if (!wowlan->n_patterns) 434 return 0; 435 436 cmd.len[0] = sizeof(*pattern_cmd) + 437 wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v2); 438 439 pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); 440 if (!pattern_cmd) 441 return -ENOMEM; 442 443 pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); 444 445 for (i = 0; i < wowlan->n_patterns; i++) { 446 int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); 447 448 pattern_cmd->patterns[i].pattern_type = 449 WOWLAN_PATTERN_TYPE_BITMASK; 450 451 memcpy(&pattern_cmd->patterns[i].u.bitmask.mask, 452 wowlan->patterns[i].mask, mask_len); 453 memcpy(&pattern_cmd->patterns[i].u.bitmask.pattern, 454 wowlan->patterns[i].pattern, 455 wowlan->patterns[i].pattern_len); 456 pattern_cmd->patterns[i].u.bitmask.mask_size = mask_len; 457 pattern_cmd->patterns[i].u.bitmask.pattern_size = 458 wowlan->patterns[i].pattern_len; 459 } 460 461 cmd.data[0] = pattern_cmd; 462 err = iwl_mvm_send_cmd(mvm, &cmd); 463 kfree(pattern_cmd); 464 return err; 465 } 466 467 static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 468 struct ieee80211_sta *ap_sta) 469 { 470 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 471 struct ieee80211_chanctx_conf *ctx; 472 u8 chains_static, chains_dynamic; 473 struct cfg80211_chan_def chandef; 474 int ret, i; 475 struct iwl_binding_cmd_v1 binding_cmd = {}; 476 struct iwl_time_quota_cmd quota_cmd = {}; 477 struct iwl_time_quota_data *quota; 478 u32 status; 479 480 if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm))) 481 return -EINVAL; 482 483 /* add back the PHY */ 484 if (WARN_ON(!mvmvif->phy_ctxt)) 485 return -EINVAL; 486 487 rcu_read_lock(); 488 ctx = rcu_dereference(vif->chanctx_conf); 489 if (WARN_ON(!ctx)) { 490 rcu_read_unlock(); 491 return -EINVAL; 492 } 493 chandef = ctx->def; 494 chains_static = ctx->rx_chains_static; 495 chains_dynamic = ctx->rx_chains_dynamic; 496 rcu_read_unlock(); 497 498 ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef, 499 chains_static, chains_dynamic); 500 if (ret) 501 return ret; 502 503 /* add back the MAC */ 504 mvmvif->uploaded = false; 505 506 if (WARN_ON(!vif->bss_conf.assoc)) 507 return -EINVAL; 508 509 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 510 if (ret) 511 return ret; 512 513 /* add back binding - XXX refactor? */ 514 binding_cmd.id_and_color = 515 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, 516 mvmvif->phy_ctxt->color)); 517 binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); 518 binding_cmd.phy = 519 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, 520 mvmvif->phy_ctxt->color)); 521 binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 522 mvmvif->color)); 523 for (i = 1; i < MAX_MACS_IN_BINDING; i++) 524 binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID); 525 526 status = 0; 527 ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, 528 IWL_BINDING_CMD_SIZE_V1, &binding_cmd, 529 &status); 530 if (ret) { 531 IWL_ERR(mvm, "Failed to add binding: %d\n", ret); 532 return ret; 533 } 534 535 if (status) { 536 IWL_ERR(mvm, "Binding command failed: %u\n", status); 537 return -EIO; 538 } 539 540 ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0); 541 if (ret) 542 return ret; 543 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); 544 545 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 546 if (ret) 547 return ret; 548 549 /* and some quota */ 550 quota = iwl_mvm_quota_cmd_get_quota(mvm, "a_cmd, 0); 551 quota->id_and_color = 552 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, 553 mvmvif->phy_ctxt->color)); 554 quota->quota = cpu_to_le32(IWL_MVM_MAX_QUOTA); 555 quota->max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA); 556 557 for (i = 1; i < MAX_BINDINGS; i++) { 558 quota = iwl_mvm_quota_cmd_get_quota(mvm, "a_cmd, i); 559 quota->id_and_color = cpu_to_le32(FW_CTXT_INVALID); 560 } 561 562 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, 563 iwl_mvm_quota_cmd_size(mvm), "a_cmd); 564 if (ret) 565 IWL_ERR(mvm, "Failed to send quota: %d\n", ret); 566 567 if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm)) 568 IWL_ERR(mvm, "Failed to initialize D3 LAR information\n"); 569 570 return 0; 571 } 572 573 static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm, 574 struct ieee80211_vif *vif) 575 { 576 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 577 struct iwl_nonqos_seq_query_cmd query_cmd = { 578 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET), 579 .mac_id_n_color = 580 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 581 mvmvif->color)), 582 }; 583 struct iwl_host_cmd cmd = { 584 .id = NON_QOS_TX_COUNTER_CMD, 585 .flags = CMD_WANT_SKB, 586 }; 587 int err; 588 u32 size; 589 590 cmd.data[0] = &query_cmd; 591 cmd.len[0] = sizeof(query_cmd); 592 593 err = iwl_mvm_send_cmd(mvm, &cmd); 594 if (err) 595 return err; 596 597 size = iwl_rx_packet_payload_len(cmd.resp_pkt); 598 if (size < sizeof(__le16)) { 599 err = -EINVAL; 600 } else { 601 err = le16_to_cpup((__le16 *)cmd.resp_pkt->data); 602 /* firmware returns next, not last-used seqno */ 603 err = (u16) (err - 0x10); 604 } 605 606 iwl_free_resp(&cmd); 607 return err; 608 } 609 610 void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 611 { 612 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 613 struct iwl_nonqos_seq_query_cmd query_cmd = { 614 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET), 615 .mac_id_n_color = 616 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 617 mvmvif->color)), 618 .value = cpu_to_le16(mvmvif->seqno), 619 }; 620 621 /* return if called during restart, not resume from D3 */ 622 if (!mvmvif->seqno_valid) 623 return; 624 625 mvmvif->seqno_valid = false; 626 627 if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0, 628 sizeof(query_cmd), &query_cmd)) 629 IWL_ERR(mvm, "failed to set non-QoS seqno\n"); 630 } 631 632 static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) 633 { 634 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); 635 636 iwl_mvm_stop_device(mvm); 637 /* 638 * Set the HW restart bit -- this is mostly true as we're 639 * going to load new firmware and reprogram that, though 640 * the reprogramming is going to be manual to avoid adding 641 * all the MACs that aren't support. 642 * We don't have to clear up everything though because the 643 * reprogramming is manual. When we resume, we'll actually 644 * go through a proper restart sequence again to switch 645 * back to the runtime firmware image. 646 */ 647 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 648 649 /* the fw is reset, so all the keys are cleared */ 650 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); 651 652 mvm->ptk_ivlen = 0; 653 mvm->ptk_icvlen = 0; 654 mvm->ptk_ivlen = 0; 655 mvm->ptk_icvlen = 0; 656 657 return iwl_mvm_load_d3_fw(mvm); 658 } 659 660 static int 661 iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, 662 struct cfg80211_wowlan *wowlan, 663 struct iwl_wowlan_config_cmd *wowlan_config_cmd, 664 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, 665 struct ieee80211_sta *ap_sta) 666 { 667 int ret; 668 struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); 669 670 /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */ 671 672 wowlan_config_cmd->is_11n_connection = 673 ap_sta->ht_cap.ht_supported; 674 wowlan_config_cmd->flags = ENABLE_L3_FILTERING | 675 ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING; 676 677 /* Query the last used seqno and set it */ 678 ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); 679 if (ret < 0) 680 return ret; 681 682 wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret); 683 684 iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd); 685 686 if (wowlan->disconnect) 687 wowlan_config_cmd->wakeup_filter |= 688 cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | 689 IWL_WOWLAN_WAKEUP_LINK_CHANGE); 690 if (wowlan->magic_pkt) 691 wowlan_config_cmd->wakeup_filter |= 692 cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET); 693 if (wowlan->gtk_rekey_failure) 694 wowlan_config_cmd->wakeup_filter |= 695 cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL); 696 if (wowlan->eap_identity_req) 697 wowlan_config_cmd->wakeup_filter |= 698 cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ); 699 if (wowlan->four_way_handshake) 700 wowlan_config_cmd->wakeup_filter |= 701 cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE); 702 if (wowlan->n_patterns) 703 wowlan_config_cmd->wakeup_filter |= 704 cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH); 705 706 if (wowlan->rfkill_release) 707 wowlan_config_cmd->wakeup_filter |= 708 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); 709 710 if (wowlan->tcp) { 711 /* 712 * Set the "link change" (really "link lost") flag as well 713 * since that implies losing the TCP connection. 714 */ 715 wowlan_config_cmd->wakeup_filter |= 716 cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS | 717 IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE | 718 IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET | 719 IWL_WOWLAN_WAKEUP_LINK_CHANGE); 720 } 721 722 if (wowlan->any) { 723 wowlan_config_cmd->wakeup_filter |= 724 cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | 725 IWL_WOWLAN_WAKEUP_LINK_CHANGE | 726 IWL_WOWLAN_WAKEUP_RX_FRAME | 727 IWL_WOWLAN_WAKEUP_BCN_FILTERING); 728 } 729 730 return 0; 731 } 732 733 static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, 734 struct ieee80211_vif *vif, 735 u32 cmd_flags) 736 { 737 struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; 738 struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; 739 bool unified = fw_has_capa(&mvm->fw->ucode_capa, 740 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 741 struct wowlan_key_data key_data = { 742 .configure_keys = !unified, 743 .use_rsc_tsc = false, 744 .tkip = &tkip_cmd, 745 .use_tkip = false, 746 }; 747 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 748 int ret; 749 750 key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); 751 if (!key_data.rsc_tsc) 752 return -ENOMEM; 753 754 /* 755 * if we have to configure keys, call ieee80211_iter_keys(), 756 * as we need non-atomic context in order to take the 757 * required locks. 758 */ 759 /* 760 * Note that currently we don't propagate cmd_flags 761 * to the iterator. In case of key_data.configure_keys, 762 * all the configured commands are SYNC, and 763 * iwl_mvm_wowlan_program_keys() will take care of 764 * locking/unlocking mvm->mutex. 765 */ 766 ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_program_keys, 767 &key_data); 768 769 if (key_data.error) { 770 ret = -EIO; 771 goto out; 772 } 773 774 if (key_data.use_rsc_tsc) { 775 ret = iwl_mvm_send_cmd_pdu(mvm, 776 WOWLAN_TSC_RSC_PARAM, cmd_flags, 777 sizeof(*key_data.rsc_tsc), 778 key_data.rsc_tsc); 779 if (ret) 780 goto out; 781 } 782 783 if (key_data.use_tkip && 784 !fw_has_api(&mvm->fw->ucode_capa, 785 IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) { 786 ret = iwl_mvm_send_cmd_pdu(mvm, 787 WOWLAN_TKIP_PARAM, 788 cmd_flags, sizeof(tkip_cmd), 789 &tkip_cmd); 790 if (ret) 791 goto out; 792 } 793 794 /* configure rekey data only if offloaded rekey is supported (d3) */ 795 if (mvmvif->rekey_data.valid) { 796 memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd)); 797 memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck, 798 NL80211_KCK_LEN); 799 kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN); 800 memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek, 801 NL80211_KEK_LEN); 802 kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN); 803 kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr; 804 805 ret = iwl_mvm_send_cmd_pdu(mvm, 806 WOWLAN_KEK_KCK_MATERIAL, cmd_flags, 807 sizeof(kek_kck_cmd), 808 &kek_kck_cmd); 809 if (ret) 810 goto out; 811 } 812 ret = 0; 813 out: 814 kfree(key_data.rsc_tsc); 815 return ret; 816 } 817 818 static int 819 iwl_mvm_wowlan_config(struct iwl_mvm *mvm, 820 struct cfg80211_wowlan *wowlan, 821 struct iwl_wowlan_config_cmd *wowlan_config_cmd, 822 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, 823 struct ieee80211_sta *ap_sta) 824 { 825 int ret; 826 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 827 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 828 829 mvm->offload_tid = wowlan_config_cmd->offloading_tid; 830 831 if (!unified_image) { 832 ret = iwl_mvm_switch_to_d3(mvm); 833 if (ret) 834 return ret; 835 836 ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); 837 if (ret) 838 return ret; 839 } 840 841 /* 842 * This needs to be unlocked due to lock ordering 843 * constraints. Since we're in the suspend path 844 * that isn't really a problem though. 845 */ 846 mutex_unlock(&mvm->mutex); 847 ret = iwl_mvm_wowlan_config_key_params(mvm, vif, CMD_ASYNC); 848 mutex_lock(&mvm->mutex); 849 if (ret) 850 return ret; 851 852 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, 853 sizeof(*wowlan_config_cmd), 854 wowlan_config_cmd); 855 if (ret) 856 return ret; 857 858 if (fw_has_api(&mvm->fw->ucode_capa, 859 IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE)) 860 ret = iwl_mvm_send_patterns(mvm, wowlan); 861 else 862 ret = iwl_mvm_send_patterns_v1(mvm, wowlan); 863 if (ret) 864 return ret; 865 866 return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0); 867 } 868 869 static int 870 iwl_mvm_netdetect_config(struct iwl_mvm *mvm, 871 struct cfg80211_wowlan *wowlan, 872 struct cfg80211_sched_scan_request *nd_config, 873 struct ieee80211_vif *vif) 874 { 875 struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; 876 int ret; 877 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 878 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 879 880 if (!unified_image) { 881 ret = iwl_mvm_switch_to_d3(mvm); 882 if (ret) 883 return ret; 884 } else { 885 /* In theory, we wouldn't have to stop a running sched 886 * scan in order to start another one (for 887 * net-detect). But in practice this doesn't seem to 888 * work properly, so stop any running sched_scan now. 889 */ 890 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); 891 if (ret) 892 return ret; 893 } 894 895 /* rfkill release can be either for wowlan or netdetect */ 896 if (wowlan->rfkill_release) 897 wowlan_config_cmd.wakeup_filter |= 898 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); 899 900 wowlan_config_cmd.sta_id = mvm->aux_sta.sta_id; 901 902 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, 903 sizeof(wowlan_config_cmd), 904 &wowlan_config_cmd); 905 if (ret) 906 return ret; 907 908 ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies, 909 IWL_MVM_SCAN_NETDETECT); 910 if (ret) 911 return ret; 912 913 if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels)) 914 return -EBUSY; 915 916 /* save the sched scan matchsets... */ 917 if (nd_config->n_match_sets) { 918 mvm->nd_match_sets = kmemdup(nd_config->match_sets, 919 sizeof(*nd_config->match_sets) * 920 nd_config->n_match_sets, 921 GFP_KERNEL); 922 if (mvm->nd_match_sets) 923 mvm->n_nd_match_sets = nd_config->n_match_sets; 924 } 925 926 /* ...and the sched scan channels for later reporting */ 927 mvm->nd_channels = kmemdup(nd_config->channels, 928 sizeof(*nd_config->channels) * 929 nd_config->n_channels, 930 GFP_KERNEL); 931 if (mvm->nd_channels) 932 mvm->n_nd_channels = nd_config->n_channels; 933 934 return 0; 935 } 936 937 static void iwl_mvm_free_nd(struct iwl_mvm *mvm) 938 { 939 kfree(mvm->nd_match_sets); 940 mvm->nd_match_sets = NULL; 941 mvm->n_nd_match_sets = 0; 942 kfree(mvm->nd_channels); 943 mvm->nd_channels = NULL; 944 mvm->n_nd_channels = 0; 945 } 946 947 static int __iwl_mvm_suspend(struct ieee80211_hw *hw, 948 struct cfg80211_wowlan *wowlan, 949 bool test) 950 { 951 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 952 struct ieee80211_vif *vif = NULL; 953 struct iwl_mvm_vif *mvmvif = NULL; 954 struct ieee80211_sta *ap_sta = NULL; 955 struct iwl_d3_manager_config d3_cfg_cmd_data = { 956 /* 957 * Program the minimum sleep time to 10 seconds, as many 958 * platforms have issues processing a wakeup signal while 959 * still being in the process of suspending. 960 */ 961 .min_sleep_time = cpu_to_le32(10 * 1000 * 1000), 962 }; 963 struct iwl_host_cmd d3_cfg_cmd = { 964 .id = D3_CONFIG_CMD, 965 .flags = CMD_WANT_SKB, 966 .data[0] = &d3_cfg_cmd_data, 967 .len[0] = sizeof(d3_cfg_cmd_data), 968 }; 969 int ret; 970 int len __maybe_unused; 971 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 972 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 973 974 if (!wowlan) { 975 /* 976 * mac80211 shouldn't get here, but for D3 test 977 * it doesn't warrant a warning 978 */ 979 WARN_ON(!test); 980 return -EINVAL; 981 } 982 983 mutex_lock(&mvm->mutex); 984 985 set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); 986 987 vif = iwl_mvm_get_bss_vif(mvm); 988 if (IS_ERR_OR_NULL(vif)) { 989 ret = 1; 990 goto out_noreset; 991 } 992 993 mvmvif = iwl_mvm_vif_from_mac80211(vif); 994 995 if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) { 996 /* if we're not associated, this must be netdetect */ 997 if (!wowlan->nd_config) { 998 ret = 1; 999 goto out_noreset; 1000 } 1001 1002 ret = iwl_mvm_netdetect_config( 1003 mvm, wowlan, wowlan->nd_config, vif); 1004 if (ret) 1005 goto out; 1006 1007 mvm->net_detect = true; 1008 } else { 1009 struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; 1010 1011 wowlan_config_cmd.sta_id = mvmvif->ap_sta_id; 1012 1013 ap_sta = rcu_dereference_protected( 1014 mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], 1015 lockdep_is_held(&mvm->mutex)); 1016 if (IS_ERR_OR_NULL(ap_sta)) { 1017 ret = -EINVAL; 1018 goto out_noreset; 1019 } 1020 1021 ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd, 1022 vif, mvmvif, ap_sta); 1023 if (ret) 1024 goto out_noreset; 1025 ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd, 1026 vif, mvmvif, ap_sta); 1027 if (ret) 1028 goto out; 1029 1030 mvm->net_detect = false; 1031 } 1032 1033 ret = iwl_mvm_power_update_device(mvm); 1034 if (ret) 1035 goto out; 1036 1037 ret = iwl_mvm_power_update_mac(mvm); 1038 if (ret) 1039 goto out; 1040 1041 #ifdef CONFIG_IWLWIFI_DEBUGFS 1042 if (mvm->d3_wake_sysassert) 1043 d3_cfg_cmd_data.wakeup_flags |= 1044 cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR); 1045 #endif 1046 1047 /* 1048 * Prior to 9000 device family the driver needs to stop the dbg 1049 * recording before entering D3. In later devices the FW stops the 1050 * recording automatically. 1051 */ 1052 if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000) 1053 iwl_fw_dbg_stop_restart_recording(&mvm->fwrt, NULL, true); 1054 1055 /* must be last -- this switches firmware state */ 1056 ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd); 1057 if (ret) 1058 goto out; 1059 #ifdef CONFIG_IWLWIFI_DEBUGFS 1060 len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt); 1061 if (len >= sizeof(u32)) { 1062 mvm->d3_test_pme_ptr = 1063 le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data); 1064 } 1065 #endif 1066 iwl_free_resp(&d3_cfg_cmd); 1067 1068 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1069 1070 ret = iwl_trans_d3_suspend(mvm->trans, test, !unified_image); 1071 out: 1072 if (ret < 0) { 1073 iwl_mvm_free_nd(mvm); 1074 1075 if (!unified_image) { 1076 if (mvm->fw_restart > 0) { 1077 mvm->fw_restart--; 1078 ieee80211_restart_hw(mvm->hw); 1079 } 1080 } 1081 1082 clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); 1083 } 1084 out_noreset: 1085 mutex_unlock(&mvm->mutex); 1086 1087 return ret; 1088 } 1089 1090 int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) 1091 { 1092 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1093 struct iwl_trans *trans = mvm->trans; 1094 int ret; 1095 1096 iwl_mvm_pause_tcm(mvm, true); 1097 1098 iwl_fw_runtime_suspend(&mvm->fwrt); 1099 1100 ret = iwl_trans_suspend(trans); 1101 if (ret) 1102 return ret; 1103 1104 trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; 1105 1106 return __iwl_mvm_suspend(hw, wowlan, false); 1107 } 1108 1109 /* converted data from the different status responses */ 1110 struct iwl_wowlan_status_data { 1111 u16 pattern_number; 1112 u16 qos_seq_ctr[8]; 1113 u32 wakeup_reasons; 1114 u32 wake_packet_length; 1115 u32 wake_packet_bufsize; 1116 const u8 *wake_packet; 1117 }; 1118 1119 static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm, 1120 struct ieee80211_vif *vif, 1121 struct iwl_wowlan_status_data *status) 1122 { 1123 struct sk_buff *pkt = NULL; 1124 struct cfg80211_wowlan_wakeup wakeup = { 1125 .pattern_idx = -1, 1126 }; 1127 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; 1128 u32 reasons = status->wakeup_reasons; 1129 1130 if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) { 1131 wakeup_report = NULL; 1132 goto report; 1133 } 1134 1135 pm_wakeup_event(mvm->dev, 0); 1136 1137 if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) 1138 wakeup.magic_pkt = true; 1139 1140 if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) 1141 wakeup.pattern_idx = 1142 status->pattern_number; 1143 1144 if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | 1145 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)) 1146 wakeup.disconnect = true; 1147 1148 if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) 1149 wakeup.gtk_rekey_failure = true; 1150 1151 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) 1152 wakeup.rfkill_release = true; 1153 1154 if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) 1155 wakeup.eap_identity_req = true; 1156 1157 if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) 1158 wakeup.four_way_handshake = true; 1159 1160 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS) 1161 wakeup.tcp_connlost = true; 1162 1163 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE) 1164 wakeup.tcp_nomoretokens = true; 1165 1166 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET) 1167 wakeup.tcp_match = true; 1168 1169 if (status->wake_packet_bufsize) { 1170 int pktsize = status->wake_packet_bufsize; 1171 int pktlen = status->wake_packet_length; 1172 const u8 *pktdata = status->wake_packet; 1173 struct ieee80211_hdr *hdr = (void *)pktdata; 1174 int truncated = pktlen - pktsize; 1175 1176 /* this would be a firmware bug */ 1177 if (WARN_ON_ONCE(truncated < 0)) 1178 truncated = 0; 1179 1180 if (ieee80211_is_data(hdr->frame_control)) { 1181 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 1182 int ivlen = 0, icvlen = 4; /* also FCS */ 1183 1184 pkt = alloc_skb(pktsize, GFP_KERNEL); 1185 if (!pkt) 1186 goto report; 1187 1188 skb_put_data(pkt, pktdata, hdrlen); 1189 pktdata += hdrlen; 1190 pktsize -= hdrlen; 1191 1192 if (ieee80211_has_protected(hdr->frame_control)) { 1193 /* 1194 * This is unlocked and using gtk_i(c)vlen, 1195 * but since everything is under RTNL still 1196 * that's not really a problem - changing 1197 * it would be difficult. 1198 */ 1199 if (is_multicast_ether_addr(hdr->addr1)) { 1200 ivlen = mvm->gtk_ivlen; 1201 icvlen += mvm->gtk_icvlen; 1202 } else { 1203 ivlen = mvm->ptk_ivlen; 1204 icvlen += mvm->ptk_icvlen; 1205 } 1206 } 1207 1208 /* if truncated, FCS/ICV is (partially) gone */ 1209 if (truncated >= icvlen) { 1210 icvlen = 0; 1211 truncated -= icvlen; 1212 } else { 1213 icvlen -= truncated; 1214 truncated = 0; 1215 } 1216 1217 pktsize -= ivlen + icvlen; 1218 pktdata += ivlen; 1219 1220 skb_put_data(pkt, pktdata, pktsize); 1221 1222 if (ieee80211_data_to_8023(pkt, vif->addr, vif->type)) 1223 goto report; 1224 wakeup.packet = pkt->data; 1225 wakeup.packet_present_len = pkt->len; 1226 wakeup.packet_len = pkt->len - truncated; 1227 wakeup.packet_80211 = false; 1228 } else { 1229 int fcslen = 4; 1230 1231 if (truncated >= 4) { 1232 truncated -= 4; 1233 fcslen = 0; 1234 } else { 1235 fcslen -= truncated; 1236 truncated = 0; 1237 } 1238 pktsize -= fcslen; 1239 wakeup.packet = status->wake_packet; 1240 wakeup.packet_present_len = pktsize; 1241 wakeup.packet_len = pktlen - truncated; 1242 wakeup.packet_80211 = true; 1243 } 1244 } 1245 1246 report: 1247 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); 1248 kfree_skb(pkt); 1249 } 1250 1251 static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc, 1252 struct ieee80211_key_seq *seq) 1253 { 1254 u64 pn; 1255 1256 pn = le64_to_cpu(sc->pn); 1257 seq->ccmp.pn[0] = pn >> 40; 1258 seq->ccmp.pn[1] = pn >> 32; 1259 seq->ccmp.pn[2] = pn >> 24; 1260 seq->ccmp.pn[3] = pn >> 16; 1261 seq->ccmp.pn[4] = pn >> 8; 1262 seq->ccmp.pn[5] = pn; 1263 } 1264 1265 static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc, 1266 struct ieee80211_key_seq *seq) 1267 { 1268 seq->tkip.iv32 = le32_to_cpu(sc->iv32); 1269 seq->tkip.iv16 = le16_to_cpu(sc->iv16); 1270 } 1271 1272 static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm *mvm, struct aes_sc *scs, 1273 struct ieee80211_sta *sta, 1274 struct ieee80211_key_conf *key) 1275 { 1276 int tid; 1277 1278 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS); 1279 1280 if (sta && iwl_mvm_has_new_rx_api(mvm)) { 1281 struct iwl_mvm_sta *mvmsta; 1282 struct iwl_mvm_key_pn *ptk_pn; 1283 1284 mvmsta = iwl_mvm_sta_from_mac80211(sta); 1285 1286 ptk_pn = rcu_dereference_protected(mvmsta->ptk_pn[key->keyidx], 1287 lockdep_is_held(&mvm->mutex)); 1288 if (WARN_ON(!ptk_pn)) 1289 return; 1290 1291 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { 1292 struct ieee80211_key_seq seq = {}; 1293 int i; 1294 1295 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq); 1296 ieee80211_set_key_rx_seq(key, tid, &seq); 1297 for (i = 1; i < mvm->trans->num_rx_queues; i++) 1298 memcpy(ptk_pn->q[i].pn[tid], 1299 seq.ccmp.pn, IEEE80211_CCMP_PN_LEN); 1300 } 1301 } else { 1302 for (tid = 0; tid < IWL_NUM_RSC; tid++) { 1303 struct ieee80211_key_seq seq = {}; 1304 1305 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq); 1306 ieee80211_set_key_rx_seq(key, tid, &seq); 1307 } 1308 } 1309 } 1310 1311 static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs, 1312 struct ieee80211_key_conf *key) 1313 { 1314 int tid; 1315 1316 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS); 1317 1318 for (tid = 0; tid < IWL_NUM_RSC; tid++) { 1319 struct ieee80211_key_seq seq = {}; 1320 1321 iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq); 1322 ieee80211_set_key_rx_seq(key, tid, &seq); 1323 } 1324 } 1325 1326 static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm, 1327 struct ieee80211_key_conf *key, 1328 struct iwl_wowlan_status *status) 1329 { 1330 union iwl_all_tsc_rsc *rsc = &status->gtk[0].rsc.all_tsc_rsc; 1331 1332 switch (key->cipher) { 1333 case WLAN_CIPHER_SUITE_CCMP: 1334 iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key); 1335 break; 1336 case WLAN_CIPHER_SUITE_TKIP: 1337 iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key); 1338 break; 1339 default: 1340 WARN_ON(1); 1341 } 1342 } 1343 1344 struct iwl_mvm_d3_gtk_iter_data { 1345 struct iwl_mvm *mvm; 1346 struct iwl_wowlan_status *status; 1347 void *last_gtk; 1348 u32 cipher; 1349 bool find_phase, unhandled_cipher; 1350 int num_keys; 1351 }; 1352 1353 static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, 1354 struct ieee80211_vif *vif, 1355 struct ieee80211_sta *sta, 1356 struct ieee80211_key_conf *key, 1357 void *_data) 1358 { 1359 struct iwl_mvm_d3_gtk_iter_data *data = _data; 1360 1361 if (data->unhandled_cipher) 1362 return; 1363 1364 switch (key->cipher) { 1365 case WLAN_CIPHER_SUITE_WEP40: 1366 case WLAN_CIPHER_SUITE_WEP104: 1367 /* ignore WEP completely, nothing to do */ 1368 return; 1369 case WLAN_CIPHER_SUITE_CCMP: 1370 case WLAN_CIPHER_SUITE_TKIP: 1371 /* we support these */ 1372 break; 1373 default: 1374 /* everything else (even CMAC for MFP) - disconnect from AP */ 1375 data->unhandled_cipher = true; 1376 return; 1377 } 1378 1379 data->num_keys++; 1380 1381 /* 1382 * pairwise key - update sequence counters only; 1383 * note that this assumes no TDLS sessions are active 1384 */ 1385 if (sta) { 1386 struct ieee80211_key_seq seq = {}; 1387 union iwl_all_tsc_rsc *sc = 1388 &data->status->gtk[0].rsc.all_tsc_rsc; 1389 1390 if (data->find_phase) 1391 return; 1392 1393 switch (key->cipher) { 1394 case WLAN_CIPHER_SUITE_CCMP: 1395 iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc, 1396 sta, key); 1397 atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn)); 1398 break; 1399 case WLAN_CIPHER_SUITE_TKIP: 1400 iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq); 1401 iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key); 1402 atomic64_set(&key->tx_pn, 1403 (u64)seq.tkip.iv16 | 1404 ((u64)seq.tkip.iv32 << 16)); 1405 break; 1406 } 1407 1408 /* that's it for this key */ 1409 return; 1410 } 1411 1412 if (data->find_phase) { 1413 data->last_gtk = key; 1414 data->cipher = key->cipher; 1415 return; 1416 } 1417 1418 if (data->status->num_of_gtk_rekeys) 1419 ieee80211_remove_key(key); 1420 else if (data->last_gtk == key) 1421 iwl_mvm_set_key_rx_seq(data->mvm, key, data->status); 1422 } 1423 1424 static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, 1425 struct ieee80211_vif *vif, 1426 struct iwl_wowlan_status *status) 1427 { 1428 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1429 struct iwl_mvm_d3_gtk_iter_data gtkdata = { 1430 .mvm = mvm, 1431 .status = status, 1432 }; 1433 u32 disconnection_reasons = 1434 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | 1435 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; 1436 1437 if (!status || !vif->bss_conf.bssid) 1438 return false; 1439 1440 if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons) 1441 return false; 1442 1443 /* find last GTK that we used initially, if any */ 1444 gtkdata.find_phase = true; 1445 ieee80211_iter_keys(mvm->hw, vif, 1446 iwl_mvm_d3_update_keys, >kdata); 1447 /* not trying to keep connections with MFP/unhandled ciphers */ 1448 if (gtkdata.unhandled_cipher) 1449 return false; 1450 if (!gtkdata.num_keys) 1451 goto out; 1452 if (!gtkdata.last_gtk) 1453 return false; 1454 1455 /* 1456 * invalidate all other GTKs that might still exist and update 1457 * the one that we used 1458 */ 1459 gtkdata.find_phase = false; 1460 ieee80211_iter_keys(mvm->hw, vif, 1461 iwl_mvm_d3_update_keys, >kdata); 1462 1463 if (status->num_of_gtk_rekeys) { 1464 struct ieee80211_key_conf *key; 1465 struct { 1466 struct ieee80211_key_conf conf; 1467 u8 key[32]; 1468 } conf = { 1469 .conf.cipher = gtkdata.cipher, 1470 .conf.keyidx = 1471 iwlmvm_wowlan_gtk_idx(&status->gtk[0]), 1472 }; 1473 __be64 replay_ctr; 1474 1475 switch (gtkdata.cipher) { 1476 case WLAN_CIPHER_SUITE_CCMP: 1477 conf.conf.keylen = WLAN_KEY_LEN_CCMP; 1478 memcpy(conf.conf.key, status->gtk[0].key, 1479 WLAN_KEY_LEN_CCMP); 1480 break; 1481 case WLAN_CIPHER_SUITE_TKIP: 1482 conf.conf.keylen = WLAN_KEY_LEN_TKIP; 1483 memcpy(conf.conf.key, status->gtk[0].key, 16); 1484 /* leave TX MIC key zeroed, we don't use it anyway */ 1485 memcpy(conf.conf.key + 1486 NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, 1487 status->gtk[0].tkip_mic_key, 8); 1488 break; 1489 } 1490 1491 key = ieee80211_gtk_rekey_add(vif, &conf.conf); 1492 if (IS_ERR(key)) 1493 return false; 1494 iwl_mvm_set_key_rx_seq(mvm, key, status); 1495 1496 replay_ctr = 1497 cpu_to_be64(le64_to_cpu(status->replay_ctr)); 1498 1499 ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, 1500 (void *)&replay_ctr, GFP_KERNEL); 1501 } 1502 1503 out: 1504 mvmvif->seqno_valid = true; 1505 /* +0x10 because the set API expects next-to-use, not last-used */ 1506 mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10; 1507 1508 return true; 1509 } 1510 1511 struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm) 1512 { 1513 struct iwl_wowlan_status_v7 *v7; 1514 struct iwl_wowlan_status *status; 1515 struct iwl_host_cmd cmd = { 1516 .id = WOWLAN_GET_STATUSES, 1517 .flags = CMD_WANT_SKB, 1518 }; 1519 int ret, len, status_size, data_size; 1520 u8 notif_ver; 1521 1522 lockdep_assert_held(&mvm->mutex); 1523 1524 ret = iwl_mvm_send_cmd(mvm, &cmd); 1525 if (ret) { 1526 IWL_ERR(mvm, "failed to query wakeup status (%d)\n", ret); 1527 return ERR_PTR(ret); 1528 } 1529 1530 len = iwl_rx_packet_payload_len(cmd.resp_pkt); 1531 if (!fw_has_api(&mvm->fw->ucode_capa, 1532 IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) { 1533 struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data; 1534 1535 status_size = sizeof(*v6); 1536 1537 if (len < status_size) { 1538 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); 1539 status = ERR_PTR(-EIO); 1540 goto out_free_resp; 1541 } 1542 1543 data_size = ALIGN(le32_to_cpu(v6->wake_packet_bufsize), 4); 1544 1545 if (len != (status_size + data_size)) { 1546 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); 1547 status = ERR_PTR(-EIO); 1548 goto out_free_resp; 1549 } 1550 1551 status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); 1552 if (!status) 1553 goto out_free_resp; 1554 1555 BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) > 1556 sizeof(status->gtk[0].key)); 1557 BUILD_BUG_ON(sizeof(v6->gtk.tkip_mic_key) > 1558 sizeof(status->gtk[0].tkip_mic_key)); 1559 1560 /* copy GTK info to the right place */ 1561 memcpy(status->gtk[0].key, v6->gtk.decrypt_key, 1562 sizeof(v6->gtk.decrypt_key)); 1563 memcpy(status->gtk[0].tkip_mic_key, v6->gtk.tkip_mic_key, 1564 sizeof(v6->gtk.tkip_mic_key)); 1565 memcpy(&status->gtk[0].rsc, &v6->gtk.rsc, 1566 sizeof(status->gtk[0].rsc)); 1567 1568 /* hardcode the key length to 16 since v6 only supports 16 */ 1569 status->gtk[0].key_len = 16; 1570 1571 /* 1572 * The key index only uses 2 bits (values 0 to 3) and 1573 * we always set bit 7 which means this is the 1574 * currently used key. 1575 */ 1576 status->gtk[0].key_flags = v6->gtk.key_index | BIT(7); 1577 1578 status->replay_ctr = v6->replay_ctr; 1579 1580 /* everything starting from pattern_number is identical */ 1581 memcpy(&status->pattern_number, &v6->pattern_number, 1582 offsetof(struct iwl_wowlan_status, wake_packet) - 1583 offsetof(struct iwl_wowlan_status, pattern_number) + 1584 data_size); 1585 1586 goto out_free_resp; 1587 } 1588 1589 v7 = (void *)cmd.resp_pkt->data; 1590 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, 1591 WOWLAN_GET_STATUSES, 0); 1592 1593 status_size = sizeof(*status); 1594 1595 if (notif_ver == IWL_FW_CMD_VER_UNKNOWN || notif_ver < 9) 1596 status_size = sizeof(*v7); 1597 1598 if (len < status_size) { 1599 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); 1600 status = ERR_PTR(-EIO); 1601 goto out_free_resp; 1602 } 1603 data_size = ALIGN(le32_to_cpu(v7->wake_packet_bufsize), 4); 1604 1605 if (len != (status_size + data_size)) { 1606 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); 1607 status = ERR_PTR(-EIO); 1608 goto out_free_resp; 1609 } 1610 1611 status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); 1612 if (!status) 1613 goto out_free_resp; 1614 1615 memcpy(status, v7, status_size); 1616 memcpy(status->wake_packet, (u8 *)v7 + status_size, data_size); 1617 1618 out_free_resp: 1619 iwl_free_resp(&cmd); 1620 return status; 1621 } 1622 1623 static struct iwl_wowlan_status * 1624 iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm) 1625 { 1626 int ret; 1627 1628 /* only for tracing for now */ 1629 ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL); 1630 if (ret) 1631 IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret); 1632 1633 return iwl_mvm_send_wowlan_get_status(mvm); 1634 } 1635 1636 /* releases the MVM mutex */ 1637 static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, 1638 struct ieee80211_vif *vif) 1639 { 1640 struct iwl_wowlan_status_data status; 1641 struct iwl_wowlan_status *fw_status; 1642 int i; 1643 bool keep; 1644 struct iwl_mvm_sta *mvm_ap_sta; 1645 1646 fw_status = iwl_mvm_get_wakeup_status(mvm); 1647 if (IS_ERR_OR_NULL(fw_status)) 1648 goto out_unlock; 1649 1650 status.pattern_number = le16_to_cpu(fw_status->pattern_number); 1651 for (i = 0; i < 8; i++) 1652 status.qos_seq_ctr[i] = 1653 le16_to_cpu(fw_status->qos_seq_ctr[i]); 1654 status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons); 1655 status.wake_packet_length = 1656 le32_to_cpu(fw_status->wake_packet_length); 1657 status.wake_packet_bufsize = 1658 le32_to_cpu(fw_status->wake_packet_bufsize); 1659 status.wake_packet = fw_status->wake_packet; 1660 1661 /* still at hard-coded place 0 for D3 image */ 1662 mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0); 1663 if (!mvm_ap_sta) 1664 goto out_free; 1665 1666 for (i = 0; i < IWL_MAX_TID_COUNT; i++) { 1667 u16 seq = status.qos_seq_ctr[i]; 1668 /* firmware stores last-used value, we store next value */ 1669 seq += 0x10; 1670 mvm_ap_sta->tid_data[i].seq_number = seq; 1671 } 1672 1673 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { 1674 i = mvm->offload_tid; 1675 iwl_trans_set_q_ptrs(mvm->trans, 1676 mvm_ap_sta->tid_data[i].txq_id, 1677 mvm_ap_sta->tid_data[i].seq_number >> 4); 1678 } 1679 1680 /* now we have all the data we need, unlock to avoid mac80211 issues */ 1681 mutex_unlock(&mvm->mutex); 1682 1683 iwl_mvm_report_wakeup_reasons(mvm, vif, &status); 1684 1685 keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status); 1686 1687 kfree(fw_status); 1688 return keep; 1689 1690 out_free: 1691 kfree(fw_status); 1692 out_unlock: 1693 mutex_unlock(&mvm->mutex); 1694 return false; 1695 } 1696 1697 #define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \ 1698 IWL_SCAN_MAX_PROFILES) 1699 1700 struct iwl_mvm_nd_query_results { 1701 u32 matched_profiles; 1702 u8 matches[ND_QUERY_BUF_LEN]; 1703 }; 1704 1705 static int 1706 iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm, 1707 struct iwl_mvm_nd_query_results *results) 1708 { 1709 struct iwl_scan_offload_profiles_query *query; 1710 struct iwl_host_cmd cmd = { 1711 .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD, 1712 .flags = CMD_WANT_SKB, 1713 }; 1714 int ret, len; 1715 size_t query_len, matches_len; 1716 int max_profiles = iwl_umac_scan_get_max_profiles(mvm->fw); 1717 1718 ret = iwl_mvm_send_cmd(mvm, &cmd); 1719 if (ret) { 1720 IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret); 1721 return ret; 1722 } 1723 1724 if (fw_has_api(&mvm->fw->ucode_capa, 1725 IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { 1726 query_len = sizeof(struct iwl_scan_offload_profiles_query); 1727 matches_len = sizeof(struct iwl_scan_offload_profile_match) * 1728 max_profiles; 1729 } else { 1730 query_len = sizeof(struct iwl_scan_offload_profiles_query_v1); 1731 matches_len = sizeof(struct iwl_scan_offload_profile_match_v1) * 1732 max_profiles; 1733 } 1734 1735 len = iwl_rx_packet_payload_len(cmd.resp_pkt); 1736 if (len < query_len) { 1737 IWL_ERR(mvm, "Invalid scan offload profiles query response!\n"); 1738 ret = -EIO; 1739 goto out_free_resp; 1740 } 1741 1742 query = (void *)cmd.resp_pkt->data; 1743 1744 results->matched_profiles = le32_to_cpu(query->matched_profiles); 1745 memcpy(results->matches, query->matches, matches_len); 1746 1747 #ifdef CONFIG_IWLWIFI_DEBUGFS 1748 mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done); 1749 #endif 1750 1751 out_free_resp: 1752 iwl_free_resp(&cmd); 1753 return ret; 1754 } 1755 1756 static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm, 1757 struct iwl_mvm_nd_query_results *query, 1758 int idx) 1759 { 1760 int n_chans = 0, i; 1761 1762 if (fw_has_api(&mvm->fw->ucode_capa, 1763 IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { 1764 struct iwl_scan_offload_profile_match *matches = 1765 (struct iwl_scan_offload_profile_match *)query->matches; 1766 1767 for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; i++) 1768 n_chans += hweight8(matches[idx].matching_channels[i]); 1769 } else { 1770 struct iwl_scan_offload_profile_match_v1 *matches = 1771 (struct iwl_scan_offload_profile_match_v1 *)query->matches; 1772 1773 for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1; i++) 1774 n_chans += hweight8(matches[idx].matching_channels[i]); 1775 } 1776 1777 return n_chans; 1778 } 1779 1780 static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm, 1781 struct iwl_mvm_nd_query_results *query, 1782 struct cfg80211_wowlan_nd_match *match, 1783 int idx) 1784 { 1785 int i; 1786 1787 if (fw_has_api(&mvm->fw->ucode_capa, 1788 IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { 1789 struct iwl_scan_offload_profile_match *matches = 1790 (struct iwl_scan_offload_profile_match *)query->matches; 1791 1792 for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++) 1793 if (matches[idx].matching_channels[i / 8] & (BIT(i % 8))) 1794 match->channels[match->n_channels++] = 1795 mvm->nd_channels[i]->center_freq; 1796 } else { 1797 struct iwl_scan_offload_profile_match_v1 *matches = 1798 (struct iwl_scan_offload_profile_match_v1 *)query->matches; 1799 1800 for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++) 1801 if (matches[idx].matching_channels[i / 8] & (BIT(i % 8))) 1802 match->channels[match->n_channels++] = 1803 mvm->nd_channels[i]->center_freq; 1804 } 1805 } 1806 1807 static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, 1808 struct ieee80211_vif *vif) 1809 { 1810 struct cfg80211_wowlan_nd_info *net_detect = NULL; 1811 struct cfg80211_wowlan_wakeup wakeup = { 1812 .pattern_idx = -1, 1813 }; 1814 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; 1815 struct iwl_mvm_nd_query_results query; 1816 struct iwl_wowlan_status *fw_status; 1817 unsigned long matched_profiles; 1818 u32 reasons = 0; 1819 int i, n_matches, ret; 1820 1821 fw_status = iwl_mvm_get_wakeup_status(mvm); 1822 if (!IS_ERR_OR_NULL(fw_status)) { 1823 reasons = le32_to_cpu(fw_status->wakeup_reasons); 1824 kfree(fw_status); 1825 } 1826 1827 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) 1828 wakeup.rfkill_release = true; 1829 1830 if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) 1831 goto out; 1832 1833 ret = iwl_mvm_netdetect_query_results(mvm, &query); 1834 if (ret || !query.matched_profiles) { 1835 wakeup_report = NULL; 1836 goto out; 1837 } 1838 1839 matched_profiles = query.matched_profiles; 1840 if (mvm->n_nd_match_sets) { 1841 n_matches = hweight_long(matched_profiles); 1842 } else { 1843 IWL_ERR(mvm, "no net detect match information available\n"); 1844 n_matches = 0; 1845 } 1846 1847 net_detect = kzalloc(struct_size(net_detect, matches, n_matches), 1848 GFP_KERNEL); 1849 if (!net_detect || !n_matches) 1850 goto out_report_nd; 1851 1852 for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) { 1853 struct cfg80211_wowlan_nd_match *match; 1854 int idx, n_channels = 0; 1855 1856 n_channels = iwl_mvm_query_num_match_chans(mvm, &query, i); 1857 1858 match = kzalloc(struct_size(match, channels, n_channels), 1859 GFP_KERNEL); 1860 if (!match) 1861 goto out_report_nd; 1862 1863 net_detect->matches[net_detect->n_matches++] = match; 1864 1865 /* We inverted the order of the SSIDs in the scan 1866 * request, so invert the index here. 1867 */ 1868 idx = mvm->n_nd_match_sets - i - 1; 1869 match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len; 1870 memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid, 1871 match->ssid.ssid_len); 1872 1873 if (mvm->n_nd_channels < n_channels) 1874 continue; 1875 1876 iwl_mvm_query_set_freqs(mvm, &query, match, i); 1877 } 1878 1879 out_report_nd: 1880 wakeup.net_detect = net_detect; 1881 out: 1882 iwl_mvm_free_nd(mvm); 1883 1884 mutex_unlock(&mvm->mutex); 1885 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); 1886 1887 if (net_detect) { 1888 for (i = 0; i < net_detect->n_matches; i++) 1889 kfree(net_detect->matches[i]); 1890 kfree(net_detect); 1891 } 1892 } 1893 1894 static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, 1895 struct ieee80211_vif *vif) 1896 { 1897 /* skip the one we keep connection on */ 1898 if (data == vif) 1899 return; 1900 1901 if (vif->type == NL80211_IFTYPE_STATION) 1902 ieee80211_resume_disconnect(vif); 1903 } 1904 1905 static bool iwl_mvm_rt_status(struct iwl_trans *trans, u32 base, u32 *err_id) 1906 { 1907 struct error_table_start { 1908 /* cf. struct iwl_error_event_table */ 1909 u32 valid; 1910 __le32 err_id; 1911 } err_info; 1912 1913 if (!base) 1914 return false; 1915 1916 iwl_trans_read_mem_bytes(trans, base, 1917 &err_info, sizeof(err_info)); 1918 if (err_info.valid && err_id) 1919 *err_id = le32_to_cpu(err_info.err_id); 1920 1921 return !!err_info.valid; 1922 } 1923 1924 static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm, 1925 struct ieee80211_vif *vif) 1926 { 1927 u32 err_id; 1928 1929 /* check for lmac1 error */ 1930 if (iwl_mvm_rt_status(mvm->trans, 1931 mvm->trans->dbg.lmac_error_event_table[0], 1932 &err_id)) { 1933 if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) { 1934 struct cfg80211_wowlan_wakeup wakeup = { 1935 .rfkill_release = true, 1936 }; 1937 ieee80211_report_wowlan_wakeup(vif, &wakeup, 1938 GFP_KERNEL); 1939 } 1940 return true; 1941 } 1942 1943 /* check if we have lmac2 set and check for error */ 1944 if (iwl_mvm_rt_status(mvm->trans, 1945 mvm->trans->dbg.lmac_error_event_table[1], NULL)) 1946 return true; 1947 1948 /* check for umac error */ 1949 if (iwl_mvm_rt_status(mvm->trans, 1950 mvm->trans->dbg.umac_error_event_table, NULL)) 1951 return true; 1952 1953 return false; 1954 } 1955 1956 static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) 1957 { 1958 struct ieee80211_vif *vif = NULL; 1959 int ret = 1; 1960 enum iwl_d3_status d3_status; 1961 bool keep = false; 1962 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 1963 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 1964 bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa, 1965 IWL_UCODE_TLV_CAPA_D0I3_END_FIRST); 1966 1967 mutex_lock(&mvm->mutex); 1968 1969 clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); 1970 1971 /* get the BSS vif pointer again */ 1972 vif = iwl_mvm_get_bss_vif(mvm); 1973 if (IS_ERR_OR_NULL(vif)) 1974 goto err; 1975 1976 iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); 1977 1978 if (iwl_mvm_check_rt_status(mvm, vif)) { 1979 set_bit(STATUS_FW_ERROR, &mvm->trans->status); 1980 iwl_mvm_dump_nic_error_log(mvm); 1981 iwl_dbg_tlv_time_point(&mvm->fwrt, 1982 IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL); 1983 iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, 1984 false, 0); 1985 ret = 1; 1986 goto err; 1987 } 1988 1989 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_END, 1990 NULL); 1991 1992 ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image); 1993 if (ret) 1994 goto err; 1995 1996 if (d3_status != IWL_D3_STATUS_ALIVE) { 1997 IWL_INFO(mvm, "Device was reset during suspend\n"); 1998 goto err; 1999 } 2000 2001 if (d0i3_first) { 2002 struct iwl_host_cmd cmd = { 2003 .id = D0I3_END_CMD, 2004 .flags = CMD_WANT_SKB, 2005 }; 2006 int len; 2007 2008 ret = iwl_mvm_send_cmd(mvm, &cmd); 2009 if (ret < 0) { 2010 IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n", 2011 ret); 2012 goto err; 2013 } 2014 switch (mvm->cmd_ver.d0i3_resp) { 2015 case 0: 2016 break; 2017 case 1: 2018 len = iwl_rx_packet_payload_len(cmd.resp_pkt); 2019 if (len != sizeof(u32)) { 2020 IWL_ERR(mvm, 2021 "Error with D0I3_END_CMD response size (%d)\n", 2022 len); 2023 goto err; 2024 } 2025 if (IWL_D0I3_RESET_REQUIRE & 2026 le32_to_cpu(*(__le32 *)cmd.resp_pkt->data)) { 2027 iwl_write32(mvm->trans, CSR_RESET, 2028 CSR_RESET_REG_FLAG_FORCE_NMI); 2029 iwl_free_resp(&cmd); 2030 } 2031 break; 2032 default: 2033 WARN_ON(1); 2034 } 2035 } 2036 2037 /* 2038 * Query the current location and source from the D3 firmware so we 2039 * can play it back when we re-intiailize the D0 firmware 2040 */ 2041 iwl_mvm_update_changed_regdom(mvm); 2042 2043 /* Re-configure PPAG settings */ 2044 iwl_mvm_ppag_send_cmd(mvm); 2045 2046 if (!unified_image) 2047 /* Re-configure default SAR profile */ 2048 iwl_mvm_sar_select_profile(mvm, 1, 1); 2049 2050 if (mvm->net_detect) { 2051 /* If this is a non-unified image, we restart the FW, 2052 * so no need to stop the netdetect scan. If that 2053 * fails, continue and try to get the wake-up reasons, 2054 * but trigger a HW restart by keeping a failure code 2055 * in ret. 2056 */ 2057 if (unified_image) 2058 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT, 2059 false); 2060 2061 iwl_mvm_query_netdetect_reasons(mvm, vif); 2062 /* has unlocked the mutex, so skip that */ 2063 goto out; 2064 } else { 2065 keep = iwl_mvm_query_wakeup_reasons(mvm, vif); 2066 #ifdef CONFIG_IWLWIFI_DEBUGFS 2067 if (keep) 2068 mvm->keep_vif = vif; 2069 #endif 2070 /* has unlocked the mutex, so skip that */ 2071 goto out_iterate; 2072 } 2073 2074 err: 2075 iwl_mvm_free_nd(mvm); 2076 mutex_unlock(&mvm->mutex); 2077 2078 out_iterate: 2079 if (!test) 2080 ieee80211_iterate_active_interfaces_rtnl(mvm->hw, 2081 IEEE80211_IFACE_ITER_NORMAL, 2082 iwl_mvm_d3_disconnect_iter, keep ? vif : NULL); 2083 2084 out: 2085 /* no need to reset the device in unified images, if successful */ 2086 if (unified_image && !ret) { 2087 /* nothing else to do if we already sent D0I3_END_CMD */ 2088 if (d0i3_first) 2089 return 0; 2090 2091 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL); 2092 if (!ret) 2093 return 0; 2094 } 2095 2096 /* 2097 * Reconfigure the device in one of the following cases: 2098 * 1. We are not using a unified image 2099 * 2. We are using a unified image but had an error while exiting D3 2100 */ 2101 set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); 2102 2103 return 1; 2104 } 2105 2106 static int iwl_mvm_resume_d3(struct iwl_mvm *mvm) 2107 { 2108 iwl_trans_resume(mvm->trans); 2109 2110 return __iwl_mvm_resume(mvm, false); 2111 } 2112 2113 int iwl_mvm_resume(struct ieee80211_hw *hw) 2114 { 2115 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2116 int ret; 2117 2118 ret = iwl_mvm_resume_d3(mvm); 2119 2120 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2121 2122 iwl_mvm_resume_tcm(mvm); 2123 2124 iwl_fw_runtime_resume(&mvm->fwrt); 2125 2126 return ret; 2127 } 2128 2129 void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled) 2130 { 2131 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2132 2133 device_set_wakeup_enable(mvm->trans->dev, enabled); 2134 } 2135 2136 #ifdef CONFIG_IWLWIFI_DEBUGFS 2137 static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) 2138 { 2139 struct iwl_mvm *mvm = inode->i_private; 2140 int err; 2141 2142 if (mvm->d3_test_active) 2143 return -EBUSY; 2144 2145 file->private_data = inode->i_private; 2146 2147 synchronize_net(); 2148 2149 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; 2150 2151 iwl_mvm_pause_tcm(mvm, true); 2152 2153 iwl_fw_runtime_suspend(&mvm->fwrt); 2154 2155 /* start pseudo D3 */ 2156 rtnl_lock(); 2157 err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); 2158 rtnl_unlock(); 2159 if (err > 0) 2160 err = -EINVAL; 2161 if (err) 2162 return err; 2163 2164 mvm->d3_test_active = true; 2165 mvm->keep_vif = NULL; 2166 return 0; 2167 } 2168 2169 static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf, 2170 size_t count, loff_t *ppos) 2171 { 2172 struct iwl_mvm *mvm = file->private_data; 2173 u32 pme_asserted; 2174 2175 while (true) { 2176 /* read pme_ptr if available */ 2177 if (mvm->d3_test_pme_ptr) { 2178 pme_asserted = iwl_trans_read_mem32(mvm->trans, 2179 mvm->d3_test_pme_ptr); 2180 if (pme_asserted) 2181 break; 2182 } 2183 2184 if (msleep_interruptible(100)) 2185 break; 2186 } 2187 2188 return 0; 2189 } 2190 2191 static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac, 2192 struct ieee80211_vif *vif) 2193 { 2194 /* skip the one we keep connection on */ 2195 if (_data == vif) 2196 return; 2197 2198 if (vif->type == NL80211_IFTYPE_STATION) 2199 ieee80211_connection_loss(vif); 2200 } 2201 2202 static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) 2203 { 2204 struct iwl_mvm *mvm = inode->i_private; 2205 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 2206 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 2207 2208 mvm->d3_test_active = false; 2209 2210 iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); 2211 2212 rtnl_lock(); 2213 __iwl_mvm_resume(mvm, true); 2214 rtnl_unlock(); 2215 2216 iwl_mvm_resume_tcm(mvm); 2217 2218 iwl_fw_runtime_resume(&mvm->fwrt); 2219 2220 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2221 2222 iwl_abort_notification_waits(&mvm->notif_wait); 2223 if (!unified_image) { 2224 int remaining_time = 10; 2225 2226 ieee80211_restart_hw(mvm->hw); 2227 2228 /* wait for restart and disconnect all interfaces */ 2229 while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 2230 remaining_time > 0) { 2231 remaining_time--; 2232 msleep(1000); 2233 } 2234 2235 if (remaining_time == 0) 2236 IWL_ERR(mvm, "Timed out waiting for HW restart!\n"); 2237 } 2238 2239 ieee80211_iterate_active_interfaces_atomic( 2240 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 2241 iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif); 2242 2243 return 0; 2244 } 2245 2246 const struct file_operations iwl_dbgfs_d3_test_ops = { 2247 .llseek = no_llseek, 2248 .open = iwl_mvm_d3_test_open, 2249 .read = iwl_mvm_d3_test_read, 2250 .release = iwl_mvm_d3_test_release, 2251 }; 2252 #endif 2253