1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2020 The Linux Foundation. All rights reserved. 4 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 5 */ 6 7 #include <linux/delay.h> 8 #include <linux/inetdevice.h> 9 #include <net/addrconf.h> 10 #include <net/if_inet6.h> 11 #include <net/ipv6.h> 12 13 #include "mac.h" 14 15 #include <net/mac80211.h> 16 #include "core.h" 17 #include "hif.h" 18 #include "debug.h" 19 #include "wmi.h" 20 #include "wow.h" 21 22 static const struct wiphy_wowlan_support ath12k_wowlan_support = { 23 .flags = WIPHY_WOWLAN_DISCONNECT | 24 WIPHY_WOWLAN_MAGIC_PKT | 25 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | 26 WIPHY_WOWLAN_GTK_REKEY_FAILURE, 27 .pattern_min_len = WOW_MIN_PATTERN_SIZE, 28 .pattern_max_len = WOW_MAX_PATTERN_SIZE, 29 .max_pkt_offset = WOW_MAX_PKT_OFFSET, 30 }; 31 32 static inline bool ath12k_wow_is_p2p_vdev(struct ath12k_vif *ahvif) 33 { 34 return (ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_DEVICE || 35 ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_CLIENT || 36 ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_GO); 37 } 38 39 int ath12k_wow_enable(struct ath12k *ar) 40 { 41 struct ath12k_base *ab = ar->ab; 42 int i, ret; 43 44 clear_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags); 45 46 /* The firmware might be busy and it can not enter WoW immediately. 47 * In that case firmware notifies host with 48 * ATH12K_HTC_MSG_NACK_SUSPEND message, asking host to try again 49 * later. Per the firmware team there could be up to 10 loops. 50 */ 51 for (i = 0; i < ATH12K_WOW_RETRY_NUM; i++) { 52 reinit_completion(&ab->htc_suspend); 53 54 ret = ath12k_wmi_wow_enable(ar); 55 if (ret) { 56 ath12k_warn(ab, "failed to issue wow enable: %d\n", ret); 57 return ret; 58 } 59 60 ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ); 61 if (ret == 0) { 62 ath12k_warn(ab, 63 "timed out while waiting for htc suspend completion\n"); 64 return -ETIMEDOUT; 65 } 66 67 if (test_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags)) 68 /* success, suspend complete received */ 69 return 0; 70 71 ath12k_warn(ab, "htc suspend not complete, retrying (try %d)\n", 72 i); 73 msleep(ATH12K_WOW_RETRY_WAIT_MS); 74 } 75 76 ath12k_warn(ab, "htc suspend not complete, failing after %d tries\n", i); 77 78 return -ETIMEDOUT; 79 } 80 81 int ath12k_wow_wakeup(struct ath12k *ar) 82 { 83 struct ath12k_base *ab = ar->ab; 84 int ret; 85 86 reinit_completion(&ab->wow.wakeup_completed); 87 88 ret = ath12k_wmi_wow_host_wakeup_ind(ar); 89 if (ret) { 90 ath12k_warn(ab, "failed to send wow wakeup indication: %d\n", 91 ret); 92 return ret; 93 } 94 95 ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ); 96 if (ret == 0) { 97 ath12k_warn(ab, "timed out while waiting for wow wakeup completion\n"); 98 return -ETIMEDOUT; 99 } 100 101 return 0; 102 } 103 104 static int ath12k_wow_vif_cleanup(struct ath12k_link_vif *arvif) 105 { 106 struct ath12k *ar = arvif->ar; 107 int i, ret; 108 109 for (i = 0; i < WOW_EVENT_MAX; i++) { 110 ret = ath12k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0); 111 if (ret) { 112 ath12k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n", 113 wow_wakeup_event(i), arvif->vdev_id, ret); 114 return ret; 115 } 116 } 117 118 for (i = 0; i < ar->wow.max_num_patterns; i++) { 119 ret = ath12k_wmi_wow_del_pattern(ar, arvif->vdev_id, i); 120 if (ret) { 121 ath12k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n", 122 i, arvif->vdev_id, ret); 123 return ret; 124 } 125 } 126 127 return 0; 128 } 129 130 static int ath12k_wow_cleanup(struct ath12k *ar) 131 { 132 struct ath12k_link_vif *arvif; 133 int ret; 134 135 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 136 137 list_for_each_entry(arvif, &ar->arvifs, list) { 138 if (arvif != &arvif->ahvif->deflink) 139 continue; 140 141 ret = ath12k_wow_vif_cleanup(arvif); 142 if (ret) { 143 ath12k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n", 144 arvif->vdev_id, ret); 145 return ret; 146 } 147 } 148 149 return 0; 150 } 151 152 /* Convert a 802.3 format to a 802.11 format. 153 * +------------+-----------+--------+----------------+ 154 * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... | 155 * +------------+-----------+--------+----------------+ 156 * |__ |_______ |____________ |________ 157 * | | | | 158 * +--+------------+----+-----------+---------------+-----------+ 159 * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... | 160 * +--+------------+----+-----------+---------------+-----------+ 161 */ 162 static void 163 ath12k_wow_convert_8023_to_80211(struct ath12k *ar, 164 const struct cfg80211_pkt_pattern *eth_pattern, 165 struct ath12k_pkt_pattern *i80211_pattern) 166 { 167 size_t r1042_eth_ofs = offsetof(struct rfc1042_hdr, eth_type); 168 size_t a1_ofs = offsetof(struct ieee80211_hdr_3addr, addr1); 169 size_t a3_ofs = offsetof(struct ieee80211_hdr_3addr, addr3); 170 size_t i80211_hdr_len = sizeof(struct ieee80211_hdr_3addr); 171 size_t prot_ofs = offsetof(struct ethhdr, h_proto); 172 size_t src_ofs = offsetof(struct ethhdr, h_source); 173 u8 eth_bytemask[WOW_MAX_PATTERN_SIZE] = {}; 174 const u8 *eth_pat = eth_pattern->pattern; 175 size_t eth_pat_len = eth_pattern->pattern_len; 176 size_t eth_pkt_ofs = eth_pattern->pkt_offset; 177 u8 *bytemask = i80211_pattern->bytemask; 178 u8 *pat = i80211_pattern->pattern; 179 size_t pat_len = 0; 180 size_t pkt_ofs = 0; 181 size_t delta; 182 int i; 183 184 /* convert bitmask to bytemask */ 185 for (i = 0; i < eth_pat_len; i++) 186 if (eth_pattern->mask[i / 8] & BIT(i % 8)) 187 eth_bytemask[i] = 0xff; 188 189 if (eth_pkt_ofs < ETH_ALEN) { 190 pkt_ofs = eth_pkt_ofs + a1_ofs; 191 192 if (size_add(eth_pkt_ofs, eth_pat_len) < ETH_ALEN) { 193 memcpy(pat, eth_pat, eth_pat_len); 194 memcpy(bytemask, eth_bytemask, eth_pat_len); 195 196 pat_len = eth_pat_len; 197 } else if (size_add(eth_pkt_ofs, eth_pat_len) < prot_ofs) { 198 memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs); 199 memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs); 200 201 delta = eth_pkt_ofs + eth_pat_len - src_ofs; 202 memcpy(pat + a3_ofs - pkt_ofs, 203 eth_pat + ETH_ALEN - eth_pkt_ofs, 204 delta); 205 memcpy(bytemask + a3_ofs - pkt_ofs, 206 eth_bytemask + ETH_ALEN - eth_pkt_ofs, 207 delta); 208 209 pat_len = a3_ofs - pkt_ofs + delta; 210 } else { 211 memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs); 212 memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs); 213 214 memcpy(pat + a3_ofs - pkt_ofs, 215 eth_pat + ETH_ALEN - eth_pkt_ofs, 216 ETH_ALEN); 217 memcpy(bytemask + a3_ofs - pkt_ofs, 218 eth_bytemask + ETH_ALEN - eth_pkt_ofs, 219 ETH_ALEN); 220 221 delta = eth_pkt_ofs + eth_pat_len - prot_ofs; 222 memcpy(pat + i80211_hdr_len + r1042_eth_ofs - pkt_ofs, 223 eth_pat + prot_ofs - eth_pkt_ofs, 224 delta); 225 memcpy(bytemask + i80211_hdr_len + r1042_eth_ofs - pkt_ofs, 226 eth_bytemask + prot_ofs - eth_pkt_ofs, 227 delta); 228 229 pat_len = i80211_hdr_len + r1042_eth_ofs - pkt_ofs + delta; 230 } 231 } else if (eth_pkt_ofs < prot_ofs) { 232 pkt_ofs = eth_pkt_ofs - ETH_ALEN + a3_ofs; 233 234 if (size_add(eth_pkt_ofs, eth_pat_len) < prot_ofs) { 235 memcpy(pat, eth_pat, eth_pat_len); 236 memcpy(bytemask, eth_bytemask, eth_pat_len); 237 238 pat_len = eth_pat_len; 239 } else { 240 memcpy(pat, eth_pat, prot_ofs - eth_pkt_ofs); 241 memcpy(bytemask, eth_bytemask, prot_ofs - eth_pkt_ofs); 242 243 delta = eth_pkt_ofs + eth_pat_len - prot_ofs; 244 memcpy(pat + i80211_hdr_len + r1042_eth_ofs - pkt_ofs, 245 eth_pat + prot_ofs - eth_pkt_ofs, 246 delta); 247 memcpy(bytemask + i80211_hdr_len + r1042_eth_ofs - pkt_ofs, 248 eth_bytemask + prot_ofs - eth_pkt_ofs, 249 delta); 250 251 pat_len = i80211_hdr_len + r1042_eth_ofs - pkt_ofs + delta; 252 } 253 } else { 254 pkt_ofs = eth_pkt_ofs - prot_ofs + i80211_hdr_len + r1042_eth_ofs; 255 256 memcpy(pat, eth_pat, eth_pat_len); 257 memcpy(bytemask, eth_bytemask, eth_pat_len); 258 259 pat_len = eth_pat_len; 260 } 261 262 i80211_pattern->pattern_len = pat_len; 263 i80211_pattern->pkt_offset = pkt_ofs; 264 } 265 266 static int 267 ath12k_wow_pno_check_and_convert(struct ath12k *ar, u32 vdev_id, 268 const struct cfg80211_sched_scan_request *nd_config, 269 struct wmi_pno_scan_req_arg *pno) 270 { 271 int i, j; 272 u8 ssid_len; 273 274 pno->enable = 1; 275 pno->vdev_id = vdev_id; 276 pno->uc_networks_count = nd_config->n_match_sets; 277 278 if (!pno->uc_networks_count || 279 pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS) 280 return -EINVAL; 281 282 if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX) 283 return -EINVAL; 284 285 /* Filling per profile params */ 286 for (i = 0; i < pno->uc_networks_count; i++) { 287 ssid_len = nd_config->match_sets[i].ssid.ssid_len; 288 289 if (ssid_len == 0 || ssid_len > 32) 290 return -EINVAL; 291 292 pno->a_networks[i].ssid.ssid_len = ssid_len; 293 294 memcpy(pno->a_networks[i].ssid.ssid, 295 nd_config->match_sets[i].ssid.ssid, 296 ssid_len); 297 pno->a_networks[i].authentication = 0; 298 pno->a_networks[i].encryption = 0; 299 pno->a_networks[i].bcast_nw_type = 0; 300 301 /* Copying list of valid channel into request */ 302 pno->a_networks[i].channel_count = nd_config->n_channels; 303 pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold; 304 305 for (j = 0; j < nd_config->n_channels; j++) { 306 pno->a_networks[i].channels[j] = 307 nd_config->channels[j]->center_freq; 308 } 309 } 310 311 /* set scan to passive if no SSIDs are specified in the request */ 312 if (nd_config->n_ssids == 0) 313 pno->do_passive_scan = true; 314 else 315 pno->do_passive_scan = false; 316 317 for (i = 0; i < nd_config->n_ssids; i++) { 318 for (j = 0; j < pno->uc_networks_count; j++) { 319 if (pno->a_networks[j].ssid.ssid_len == 320 nd_config->ssids[i].ssid_len && 321 !memcmp(pno->a_networks[j].ssid.ssid, 322 nd_config->ssids[i].ssid, 323 pno->a_networks[j].ssid.ssid_len)) { 324 pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN; 325 break; 326 } 327 } 328 } 329 330 if (nd_config->n_scan_plans == 2) { 331 pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; 332 pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations; 333 pno->slow_scan_period = 334 nd_config->scan_plans[1].interval * MSEC_PER_SEC; 335 } else if (nd_config->n_scan_plans == 1) { 336 pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; 337 pno->fast_scan_max_cycles = 1; 338 pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; 339 } else { 340 ath12k_warn(ar->ab, "Invalid number of PNO scan plans: %d", 341 nd_config->n_scan_plans); 342 } 343 344 if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { 345 /* enable mac randomization */ 346 pno->enable_pno_scan_randomization = 1; 347 memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN); 348 memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN); 349 } 350 351 pno->delay_start_time = nd_config->delay; 352 353 /* Current FW does not support min-max range for dwell time */ 354 pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME; 355 pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME; 356 357 return 0; 358 } 359 360 static int ath12k_wow_vif_set_wakeups(struct ath12k_link_vif *arvif, 361 struct cfg80211_wowlan *wowlan) 362 { 363 const struct cfg80211_pkt_pattern *patterns = wowlan->patterns; 364 struct ath12k *ar = arvif->ar; 365 unsigned long wow_mask = 0; 366 int pattern_id = 0; 367 int ret, i, j; 368 369 /* Setup requested WOW features */ 370 switch (arvif->ahvif->vdev_type) { 371 case WMI_VDEV_TYPE_IBSS: 372 __set_bit(WOW_BEACON_EVENT, &wow_mask); 373 fallthrough; 374 case WMI_VDEV_TYPE_AP: 375 __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask); 376 __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask); 377 __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask); 378 __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask); 379 __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask); 380 __set_bit(WOW_HTT_EVENT, &wow_mask); 381 __set_bit(WOW_RA_MATCH_EVENT, &wow_mask); 382 break; 383 case WMI_VDEV_TYPE_STA: 384 if (wowlan->disconnect) { 385 __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask); 386 __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask); 387 __set_bit(WOW_BMISS_EVENT, &wow_mask); 388 __set_bit(WOW_CSA_IE_EVENT, &wow_mask); 389 } 390 391 if (wowlan->magic_pkt) 392 __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask); 393 394 if (wowlan->nd_config) { 395 struct wmi_pno_scan_req_arg *pno; 396 int ret; 397 398 pno = kzalloc_obj(*pno); 399 if (!pno) 400 return -ENOMEM; 401 402 ar->nlo_enabled = true; 403 404 ret = ath12k_wow_pno_check_and_convert(ar, arvif->vdev_id, 405 wowlan->nd_config, pno); 406 if (!ret) { 407 ath12k_wmi_wow_config_pno(ar, arvif->vdev_id, pno); 408 __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask); 409 } 410 411 kfree(pno); 412 } 413 break; 414 default: 415 break; 416 } 417 418 for (i = 0; i < wowlan->n_patterns; i++) { 419 const struct cfg80211_pkt_pattern *eth_pattern = &patterns[i]; 420 struct ath12k_pkt_pattern new_pattern = {}; 421 422 if (WARN_ON(eth_pattern->pattern_len > WOW_MAX_PATTERN_SIZE)) 423 return -EINVAL; 424 425 if (ar->ab->wow.wmi_conf_rx_decap_mode == 426 ATH12K_HW_TXRX_NATIVE_WIFI) { 427 ath12k_wow_convert_8023_to_80211(ar, eth_pattern, 428 &new_pattern); 429 430 if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE)) 431 return -EINVAL; 432 } else { 433 memcpy(new_pattern.pattern, eth_pattern->pattern, 434 eth_pattern->pattern_len); 435 436 /* convert bitmask to bytemask */ 437 for (j = 0; j < eth_pattern->pattern_len; j++) 438 if (eth_pattern->mask[j / 8] & BIT(j % 8)) 439 new_pattern.bytemask[j] = 0xff; 440 441 new_pattern.pattern_len = eth_pattern->pattern_len; 442 new_pattern.pkt_offset = eth_pattern->pkt_offset; 443 } 444 445 ret = ath12k_wmi_wow_add_pattern(ar, arvif->vdev_id, 446 pattern_id, 447 new_pattern.pattern, 448 new_pattern.bytemask, 449 new_pattern.pattern_len, 450 new_pattern.pkt_offset); 451 if (ret) { 452 ath12k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n", 453 pattern_id, 454 arvif->vdev_id, ret); 455 return ret; 456 } 457 458 pattern_id++; 459 __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask); 460 } 461 462 for (i = 0; i < WOW_EVENT_MAX; i++) { 463 if (!test_bit(i, &wow_mask)) 464 continue; 465 ret = ath12k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1); 466 if (ret) { 467 ath12k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n", 468 wow_wakeup_event(i), arvif->vdev_id, ret); 469 return ret; 470 } 471 } 472 473 return 0; 474 } 475 476 static int ath12k_wow_set_wakeups(struct ath12k *ar, 477 struct cfg80211_wowlan *wowlan) 478 { 479 struct ath12k_link_vif *arvif; 480 int ret; 481 482 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 483 484 list_for_each_entry(arvif, &ar->arvifs, list) { 485 if (arvif != &arvif->ahvif->deflink) 486 continue; 487 488 if (ath12k_wow_is_p2p_vdev(arvif->ahvif)) 489 continue; 490 491 ret = ath12k_wow_vif_set_wakeups(arvif, wowlan); 492 if (ret) { 493 ath12k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n", 494 arvif->vdev_id, ret); 495 return ret; 496 } 497 } 498 499 return 0; 500 } 501 502 static int ath12k_wow_vdev_clean_nlo(struct ath12k *ar, u32 vdev_id) 503 { 504 struct wmi_pno_scan_req_arg *pno; 505 int ret; 506 507 if (!ar->nlo_enabled) 508 return 0; 509 510 pno = kzalloc_obj(*pno); 511 if (!pno) 512 return -ENOMEM; 513 514 pno->enable = 0; 515 ret = ath12k_wmi_wow_config_pno(ar, vdev_id, pno); 516 if (ret) { 517 ath12k_warn(ar->ab, "failed to disable PNO: %d", ret); 518 goto out; 519 } 520 521 ar->nlo_enabled = false; 522 523 out: 524 kfree(pno); 525 return ret; 526 } 527 528 static int ath12k_wow_vif_clean_nlo(struct ath12k_link_vif *arvif) 529 { 530 struct ath12k *ar = arvif->ar; 531 532 switch (arvif->ahvif->vdev_type) { 533 case WMI_VDEV_TYPE_STA: 534 return ath12k_wow_vdev_clean_nlo(ar, arvif->vdev_id); 535 default: 536 return 0; 537 } 538 } 539 540 static int ath12k_wow_nlo_cleanup(struct ath12k *ar) 541 { 542 struct ath12k_link_vif *arvif; 543 int ret; 544 545 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 546 547 list_for_each_entry(arvif, &ar->arvifs, list) { 548 if (arvif != &arvif->ahvif->deflink) 549 continue; 550 551 if (ath12k_wow_is_p2p_vdev(arvif->ahvif)) 552 continue; 553 554 ret = ath12k_wow_vif_clean_nlo(arvif); 555 if (ret) { 556 ath12k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n", 557 arvif->vdev_id, ret); 558 return ret; 559 } 560 } 561 562 return 0; 563 } 564 565 static int ath12k_wow_set_hw_filter(struct ath12k *ar) 566 { 567 struct wmi_hw_data_filter_arg arg; 568 struct ath12k_link_vif *arvif; 569 int ret; 570 571 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 572 573 list_for_each_entry(arvif, &ar->arvifs, list) { 574 if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA) 575 continue; 576 577 arg.vdev_id = arvif->vdev_id; 578 arg.enable = true; 579 arg.hw_filter_bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC; 580 ret = ath12k_wmi_hw_data_filter_cmd(ar, &arg); 581 if (ret) { 582 ath12k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n", 583 arvif->vdev_id, ret); 584 return ret; 585 } 586 } 587 588 return 0; 589 } 590 591 static int ath12k_wow_clear_hw_filter(struct ath12k *ar) 592 { 593 struct wmi_hw_data_filter_arg arg; 594 struct ath12k_link_vif *arvif; 595 int ret; 596 597 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 598 599 list_for_each_entry(arvif, &ar->arvifs, list) { 600 if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA) 601 continue; 602 603 arg.vdev_id = arvif->vdev_id; 604 arg.enable = false; 605 arg.hw_filter_bitmap = 0; 606 ret = ath12k_wmi_hw_data_filter_cmd(ar, &arg); 607 608 if (ret) { 609 ath12k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n", 610 arvif->vdev_id, ret); 611 return ret; 612 } 613 } 614 615 return 0; 616 } 617 618 static void ath12k_wow_generate_ns_mc_addr(struct ath12k_base *ab, 619 struct wmi_arp_ns_offload_arg *offload) 620 { 621 int i; 622 623 for (i = 0; i < offload->ipv6_count; i++) { 624 offload->self_ipv6_addr[i][0] = 0xff; 625 offload->self_ipv6_addr[i][1] = 0x02; 626 offload->self_ipv6_addr[i][11] = 0x01; 627 offload->self_ipv6_addr[i][12] = 0xff; 628 offload->self_ipv6_addr[i][13] = 629 offload->ipv6_addr[i][13]; 630 offload->self_ipv6_addr[i][14] = 631 offload->ipv6_addr[i][14]; 632 offload->self_ipv6_addr[i][15] = 633 offload->ipv6_addr[i][15]; 634 ath12k_dbg(ab, ATH12K_DBG_WOW, "NS solicited addr %pI6\n", 635 offload->self_ipv6_addr[i]); 636 } 637 } 638 639 static void ath12k_wow_prepare_ns_offload(struct ath12k_link_vif *arvif, 640 struct wmi_arp_ns_offload_arg *offload) 641 { 642 struct net_device *ndev = ieee80211_vif_to_wdev(arvif->ahvif->vif)->netdev; 643 struct ath12k_base *ab = arvif->ar->ab; 644 struct inet6_ifaddr *ifa6; 645 struct ifacaddr6 *ifaca6; 646 struct inet6_dev *idev; 647 u32 count = 0, scope; 648 649 if (!ndev) 650 return; 651 652 idev = in6_dev_get(ndev); 653 if (!idev) 654 return; 655 656 ath12k_dbg(ab, ATH12K_DBG_WOW, "wow prepare ns offload\n"); 657 658 read_lock_bh(&idev->lock); 659 660 /* get unicast address */ 661 list_for_each_entry(ifa6, &idev->addr_list, if_list) { 662 if (count >= WMI_IPV6_MAX_COUNT) 663 goto unlock; 664 665 if (ifa6->flags & IFA_F_DADFAILED) 666 continue; 667 668 scope = ipv6_addr_src_scope(&ifa6->addr); 669 if (scope != IPV6_ADDR_SCOPE_LINKLOCAL && 670 scope != IPV6_ADDR_SCOPE_GLOBAL) { 671 ath12k_dbg(ab, ATH12K_DBG_WOW, 672 "Unsupported ipv6 scope: %d\n", scope); 673 continue; 674 } 675 676 memcpy(offload->ipv6_addr[count], &ifa6->addr.s6_addr, 677 sizeof(ifa6->addr.s6_addr)); 678 offload->ipv6_type[count] = WMI_IPV6_UC_TYPE; 679 ath12k_dbg(ab, ATH12K_DBG_WOW, "mac count %d ipv6 uc %pI6 scope %d\n", 680 count, offload->ipv6_addr[count], 681 scope); 682 count++; 683 } 684 685 /* get anycast address */ 686 rcu_read_lock(); 687 688 for (ifaca6 = rcu_dereference(idev->ac_list); ifaca6; 689 ifaca6 = rcu_dereference(ifaca6->aca_next)) { 690 if (count >= WMI_IPV6_MAX_COUNT) { 691 rcu_read_unlock(); 692 goto unlock; 693 } 694 695 scope = ipv6_addr_src_scope(&ifaca6->aca_addr); 696 if (scope != IPV6_ADDR_SCOPE_LINKLOCAL && 697 scope != IPV6_ADDR_SCOPE_GLOBAL) { 698 ath12k_dbg(ab, ATH12K_DBG_WOW, 699 "Unsupported ipv scope: %d\n", scope); 700 continue; 701 } 702 703 memcpy(offload->ipv6_addr[count], &ifaca6->aca_addr, 704 sizeof(ifaca6->aca_addr)); 705 offload->ipv6_type[count] = WMI_IPV6_AC_TYPE; 706 ath12k_dbg(ab, ATH12K_DBG_WOW, "mac count %d ipv6 ac %pI6 scope %d\n", 707 count, offload->ipv6_addr[count], 708 scope); 709 count++; 710 } 711 712 rcu_read_unlock(); 713 714 unlock: 715 read_unlock_bh(&idev->lock); 716 717 in6_dev_put(idev); 718 719 offload->ipv6_count = count; 720 ath12k_wow_generate_ns_mc_addr(ab, offload); 721 } 722 723 static void ath12k_wow_prepare_arp_offload(struct ath12k_link_vif *arvif, 724 struct wmi_arp_ns_offload_arg *offload) 725 { 726 struct ieee80211_vif *vif = arvif->ahvif->vif; 727 struct ieee80211_vif_cfg vif_cfg = vif->cfg; 728 struct ath12k_base *ab = arvif->ar->ab; 729 u32 ipv4_cnt; 730 731 ath12k_dbg(ab, ATH12K_DBG_WOW, "wow prepare arp offload\n"); 732 733 ipv4_cnt = min(vif_cfg.arp_addr_cnt, WMI_IPV4_MAX_COUNT); 734 memcpy(offload->ipv4_addr, vif_cfg.arp_addr_list, ipv4_cnt * sizeof(u32)); 735 offload->ipv4_count = ipv4_cnt; 736 737 ath12k_dbg(ab, ATH12K_DBG_WOW, 738 "wow arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n", 739 vif_cfg.arp_addr_cnt, vif->addr, offload->ipv4_addr); 740 } 741 742 static int ath12k_wow_arp_ns_offload(struct ath12k *ar, bool enable) 743 { 744 struct wmi_arp_ns_offload_arg *offload; 745 struct ath12k_link_vif *arvif; 746 struct ath12k_vif *ahvif; 747 int ret; 748 749 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 750 751 offload = kmalloc_obj(*offload); 752 if (!offload) 753 return -ENOMEM; 754 755 list_for_each_entry(arvif, &ar->arvifs, list) { 756 ahvif = arvif->ahvif; 757 758 if (arvif != &ahvif->deflink) 759 continue; 760 761 if (ahvif->vdev_type != WMI_VDEV_TYPE_STA) 762 continue; 763 764 memset(offload, 0, sizeof(*offload)); 765 766 memcpy(offload->mac_addr, ahvif->vif->addr, ETH_ALEN); 767 ath12k_wow_prepare_ns_offload(arvif, offload); 768 ath12k_wow_prepare_arp_offload(arvif, offload); 769 770 ret = ath12k_wmi_arp_ns_offload(ar, arvif, offload, enable); 771 if (ret) { 772 ath12k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n", 773 arvif->vdev_id, enable, ret); 774 kfree(offload); 775 return ret; 776 } 777 } 778 779 kfree(offload); 780 781 return 0; 782 } 783 784 static int ath12k_gtk_rekey_offload(struct ath12k *ar, bool enable) 785 { 786 struct ath12k_link_vif *arvif; 787 int ret; 788 789 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 790 791 list_for_each_entry(arvif, &ar->arvifs, list) { 792 if (arvif != &arvif->ahvif->deflink) 793 continue; 794 795 if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA || 796 !arvif->is_up || 797 !arvif->rekey_data.enable_offload) 798 continue; 799 800 /* get rekey info before disable rekey offload */ 801 if (!enable) { 802 ret = ath12k_wmi_gtk_rekey_getinfo(ar, arvif); 803 if (ret) { 804 ath12k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n", 805 arvif->vdev_id, ret); 806 return ret; 807 } 808 } 809 810 ret = ath12k_wmi_gtk_rekey_offload(ar, arvif, enable); 811 812 if (ret) { 813 ath12k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n", 814 arvif->vdev_id, enable, ret); 815 return ret; 816 } 817 } 818 819 return 0; 820 } 821 822 static int ath12k_wow_protocol_offload(struct ath12k *ar, bool enable) 823 { 824 int ret; 825 826 ret = ath12k_wow_arp_ns_offload(ar, enable); 827 if (ret) { 828 ath12k_warn(ar->ab, "failed to offload ARP and NS %d %d\n", 829 enable, ret); 830 return ret; 831 } 832 833 ret = ath12k_gtk_rekey_offload(ar, enable); 834 if (ret) { 835 ath12k_warn(ar->ab, "failed to offload gtk rekey %d %d\n", 836 enable, ret); 837 return ret; 838 } 839 840 return 0; 841 } 842 843 static int ath12k_wow_set_keepalive(struct ath12k *ar, 844 enum wmi_sta_keepalive_method method, 845 u32 interval) 846 { 847 struct ath12k_link_vif *arvif; 848 int ret; 849 850 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 851 852 list_for_each_entry(arvif, &ar->arvifs, list) { 853 ret = ath12k_mac_vif_set_keepalive(arvif, method, interval); 854 if (ret) 855 return ret; 856 } 857 858 return 0; 859 } 860 861 int ath12k_wow_op_suspend(struct ieee80211_hw *hw, 862 struct cfg80211_wowlan *wowlan) 863 { 864 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 865 struct ath12k *ar = ath12k_ah_to_ar(ah, 0); 866 int ret; 867 868 lockdep_assert_wiphy(hw->wiphy); 869 870 ret = ath12k_wow_cleanup(ar); 871 if (ret) { 872 ath12k_warn(ar->ab, "failed to clear wow wakeup events: %d\n", 873 ret); 874 goto exit; 875 } 876 877 ret = ath12k_wow_set_wakeups(ar, wowlan); 878 if (ret) { 879 ath12k_warn(ar->ab, "failed to set wow wakeup events: %d\n", 880 ret); 881 goto cleanup; 882 } 883 884 ret = ath12k_wow_protocol_offload(ar, true); 885 if (ret) { 886 ath12k_warn(ar->ab, "failed to set wow protocol offload events: %d\n", 887 ret); 888 goto cleanup; 889 } 890 891 ret = ath12k_mac_wait_tx_complete(ar); 892 if (ret) { 893 ath12k_warn(ar->ab, "failed to wait tx complete: %d\n", ret); 894 goto cleanup; 895 } 896 897 ret = ath12k_wow_set_hw_filter(ar); 898 if (ret) { 899 ath12k_warn(ar->ab, "failed to set hw filter: %d\n", 900 ret); 901 goto cleanup; 902 } 903 904 ret = ath12k_wow_set_keepalive(ar, 905 WMI_STA_KEEPALIVE_METHOD_NULL_FRAME, 906 WMI_STA_KEEPALIVE_INTERVAL_DEFAULT); 907 if (ret) { 908 ath12k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret); 909 goto cleanup; 910 } 911 912 ret = ath12k_wow_enable(ar); 913 if (ret) { 914 ath12k_warn(ar->ab, "failed to start wow: %d\n", ret); 915 goto cleanup; 916 } 917 918 ath12k_hif_irq_disable(ar->ab); 919 ath12k_hif_ce_irq_disable(ar->ab); 920 921 ret = ath12k_hif_suspend(ar->ab); 922 if (ret) { 923 ath12k_warn(ar->ab, "failed to suspend hif: %d\n", ret); 924 goto wakeup; 925 } 926 927 goto exit; 928 929 wakeup: 930 ath12k_wow_wakeup(ar); 931 932 cleanup: 933 ath12k_wow_cleanup(ar); 934 935 exit: 936 return ret ? 1 : 0; 937 } 938 EXPORT_SYMBOL(ath12k_wow_op_suspend); 939 940 void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled) 941 { 942 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 943 struct ath12k *ar = ath12k_ah_to_ar(ah, 0); 944 945 lockdep_assert_wiphy(hw->wiphy); 946 947 device_set_wakeup_enable(ar->ab->dev, enabled); 948 } 949 EXPORT_SYMBOL(ath12k_wow_op_set_wakeup); 950 951 int ath12k_wow_op_resume(struct ieee80211_hw *hw) 952 { 953 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 954 struct ath12k *ar = ath12k_ah_to_ar(ah, 0); 955 int ret; 956 957 lockdep_assert_wiphy(hw->wiphy); 958 959 ret = ath12k_hif_resume(ar->ab); 960 if (ret) { 961 ath12k_warn(ar->ab, "failed to resume hif: %d\n", ret); 962 goto exit; 963 } 964 965 ath12k_hif_ce_irq_enable(ar->ab); 966 ath12k_hif_irq_enable(ar->ab); 967 968 ret = ath12k_wow_wakeup(ar); 969 if (ret) { 970 ath12k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret); 971 goto exit; 972 } 973 974 ret = ath12k_wow_nlo_cleanup(ar); 975 if (ret) { 976 ath12k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret); 977 goto exit; 978 } 979 980 ret = ath12k_wow_clear_hw_filter(ar); 981 if (ret) { 982 ath12k_warn(ar->ab, "failed to clear hw filter: %d\n", ret); 983 goto exit; 984 } 985 986 ret = ath12k_wow_protocol_offload(ar, false); 987 if (ret) { 988 ath12k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n", 989 ret); 990 goto exit; 991 } 992 993 ret = ath12k_wow_set_keepalive(ar, 994 WMI_STA_KEEPALIVE_METHOD_NULL_FRAME, 995 WMI_STA_KEEPALIVE_INTERVAL_DISABLE); 996 if (ret) { 997 ath12k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret); 998 goto exit; 999 } 1000 1001 exit: 1002 if (ret) { 1003 switch (ah->state) { 1004 case ATH12K_HW_STATE_ON: 1005 ah->state = ATH12K_HW_STATE_RESTARTING; 1006 ret = 1; 1007 break; 1008 case ATH12K_HW_STATE_OFF: 1009 case ATH12K_HW_STATE_RESTARTING: 1010 case ATH12K_HW_STATE_RESTARTED: 1011 case ATH12K_HW_STATE_WEDGED: 1012 case ATH12K_HW_STATE_TM: 1013 ath12k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n", 1014 ah->state); 1015 ret = -EIO; 1016 break; 1017 } 1018 } 1019 1020 return ret; 1021 } 1022 EXPORT_SYMBOL(ath12k_wow_op_resume); 1023 1024 int ath12k_wow_init(struct ath12k *ar) 1025 { 1026 if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map)) 1027 return 0; 1028 1029 ar->wow.wowlan_support = ath12k_wowlan_support; 1030 1031 if (ar->ab->wow.wmi_conf_rx_decap_mode == ATH12K_HW_TXRX_NATIVE_WIFI) { 1032 ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE; 1033 ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE; 1034 } 1035 1036 if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) { 1037 ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT; 1038 ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; 1039 } 1040 1041 ar->wow.max_num_patterns = ATH12K_WOW_PATTERNS; 1042 ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns; 1043 ar->ah->hw->wiphy->wowlan = &ar->wow.wowlan_support; 1044 1045 device_set_wakeup_capable(ar->ab->dev, true); 1046 1047 return 0; 1048 } 1049