1 /* 2 * Copyright (c) 2013 Qualcomm Atheros, Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH 9 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY 10 * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, 11 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM 12 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR 13 * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 14 * PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "opt_ah.h" 18 19 #include "ah.h" 20 #include "ah_internal.h" 21 22 #include "ar9300/ar9300.h" 23 #include "ar9300/ar9300reg.h" 24 25 #if ATH_WOW_OFFLOAD 26 void ar9300_wowoffload_prep(struct ath_hal *ah) 27 { 28 struct ath_hal_9300 *ahp = AH9300(ah); 29 30 ahp->ah_mcast_filter_l32_set = 0; 31 ahp->ah_mcast_filter_u32_set = 0; 32 } 33 34 void ar9300_wowoffload_post(struct ath_hal *ah) 35 { 36 struct ath_hal_9300 *ahp = AH9300(ah); 37 u_int32_t val; 38 39 if (ahp->ah_mcast_filter_l32_set != 0) { 40 val = OS_REG_READ(ah, AR_MCAST_FIL0); 41 val &= ~ahp->ah_mcast_filter_l32_set; 42 OS_REG_WRITE(ah, AR_MCAST_FIL0, val); 43 } 44 if (ahp->ah_mcast_filter_u32_set != 0) { 45 val = OS_REG_READ(ah, AR_MCAST_FIL1); 46 val &= ~ahp->ah_mcast_filter_u32_set; 47 OS_REG_WRITE(ah, AR_MCAST_FIL1, val); 48 } 49 50 ahp->ah_mcast_filter_l32_set = 0; 51 ahp->ah_mcast_filter_u32_set = 0; 52 } 53 54 static void ar9300_wowoffload_add_mcast_filter(struct ath_hal *ah, u_int8_t *mc_addr) 55 { 56 struct ath_hal_9300 *ahp = AH9300(ah); 57 u_int32_t reg, val; 58 u_int8_t pos, high32; 59 60 memcpy((u_int8_t *) &val, &mc_addr[0], 3); 61 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 62 memcpy((u_int8_t *) &val, &mc_addr[3], 3); 63 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 64 high32 = pos & 0x20; 65 reg = high32 ? AR_MCAST_FIL1 : AR_MCAST_FIL0; 66 pos &= 0x1F; 67 68 val = OS_REG_READ(ah, reg); 69 if ((val & (1 << pos)) == 0) { 70 val |= (1 << pos); 71 if (high32) { 72 ahp->ah_mcast_filter_u32_set |= (1 << pos); 73 } else { 74 ahp->ah_mcast_filter_l32_set |= (1 << pos); 75 } 76 OS_REG_WRITE(ah, reg, val); 77 } 78 } 79 80 /* 81 * DeviceID SWAR - EV91928 82 * 83 * During SW WOW, 0x4004[13] is set to allow BT eCPU to access WLAN MAC 84 * registers. Setting 00x4004[13] will prevent eeprom state machine to 85 * load customizable PCIE configuration registers, which lead to the PCIE 86 * device id stay as default 0xABCD. The SWAR to have BT eCPU to write 87 * to PCIE registers as soon as it detects PCIE reset is deasserted. 88 */ 89 void ar9300_wowoffload_download_devid_swar(struct ath_hal *ah) 90 { 91 u_int32_t addr = AR_WOW_OFFLOAD_WLAN_REGSET_NUM; 92 93 OS_REG_WRITE(ah, addr, 8); 94 addr += 4; 95 OS_REG_WRITE(ah, addr, 0x5000); 96 addr += 4; 97 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) pcie_000 = %08x\n", 98 AH_PRIVATE(ah)->ah_config.ath_hal_pcie_000); 99 OS_REG_WRITE(ah, addr, AH_PRIVATE(ah)->ah_config.ath_hal_pcie_000); 100 addr += 4; 101 OS_REG_WRITE(ah, addr, 0x5008); 102 addr += 4; 103 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) pcie_008 = %08x\n", 104 AH_PRIVATE(ah)->ah_config.ath_hal_pcie_008); 105 OS_REG_WRITE(ah, addr, AH_PRIVATE(ah)->ah_config.ath_hal_pcie_008); 106 addr += 4; 107 OS_REG_WRITE(ah, addr, 0x502c); 108 addr += 4; 109 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) pcie_02c = %08x\n", 110 AH_PRIVATE(ah)->ah_config.ath_hal_pcie_02c); 111 OS_REG_WRITE(ah, addr, AH_PRIVATE(ah)->ah_config.ath_hal_pcie_02c); 112 addr += 4; 113 OS_REG_WRITE(ah, addr, 0x18c00); 114 addr += 4; 115 OS_REG_WRITE(ah, addr, 0x18212ede); 116 addr += 4; 117 OS_REG_WRITE(ah, addr, 0x18c04); 118 addr += 4; 119 OS_REG_WRITE(ah, addr, 0x008001d8); 120 addr += 4; 121 OS_REG_WRITE(ah, addr, 0x18c08); 122 addr += 4; 123 OS_REG_WRITE(ah, addr, 0x0003580c); 124 addr += 4; 125 OS_REG_WRITE(ah, addr, 0x570c); 126 addr += 4; 127 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) pcie_70c = %08x\n", 128 AH_PRIVATE(ah)->ah_config.ath_hal_pcie_70c); 129 OS_REG_WRITE(ah, addr, AH_PRIVATE(ah)->ah_config.ath_hal_pcie_70c); 130 addr += 4; 131 OS_REG_WRITE(ah, addr, 0x5040); 132 addr += 4; 133 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) pcie_040 = %08x\n", 134 AH_PRIVATE(ah)->ah_config.ath_hal_pcie_040); 135 OS_REG_WRITE(ah, addr, AH_PRIVATE(ah)->ah_config.ath_hal_pcie_040); 136 addr += 4; 137 /* 138 A_SOC_REG_WRITE(0x45000, 0x0034168c); 139 A_SOC_REG_WRITE(0x45008, 0x02800001); 140 A_SOC_REG_WRITE(0x4502c, 0x3117168c); 141 A_SOC_REG_WRITE(0x58c00, 0x18212ede); 142 A_SOC_REG_WRITE(0x58c04, 0x000801d8); 143 A_SOC_REG_WRITE(0x58c08, 0x0003580c); 144 A_SOC_REG_WRITE(0x4570c, 0x275f3f01); 145 A_SOC_REG_WRITE(0x45040, 0xffc25001); 146 */ 147 } 148 149 /* Retrieve updated information from MAC PCU buffer. 150 * Embedded CPU would have written the value before exiting WoW 151 * */ 152 void ar9300_wowoffload_retrieve_data(struct ath_hal *ah, void *buf, u_int32_t param) 153 { 154 u_int32_t rc_lower, rc_upper; 155 156 if (param == WOW_PARAM_REPLAY_CNTR) { 157 rc_lower = OS_REG_READ(ah, AR_WOW_TXBUF(0)); 158 rc_upper = OS_REG_READ(ah, AR_WOW_TXBUF(1)); 159 *(u_int64_t *)buf = rc_lower + (rc_upper << 32); 160 } 161 else if (param == WOW_PARAM_KEY_TSC) { 162 rc_lower = OS_REG_READ(ah, AR_WOW_TXBUF(2)); 163 rc_upper = OS_REG_READ(ah, AR_WOW_TXBUF(3)); 164 *(u_int64_t *)buf = rc_lower + (rc_upper << 32); 165 } 166 else if (param == WOW_PARAM_TX_SEQNUM) { 167 *(u_int32_t *)buf = OS_REG_READ(ah, AR_WOW_TXBUF(4)); 168 } 169 170 } 171 172 /* Download GTK rekey related information to the embedded CPU */ 173 u_int32_t ar9300_wowoffload_download_rekey_data(struct ath_hal *ah, u_int32_t *data, u_int32_t bytes) 174 { 175 int i; 176 int mbox_status = OS_REG_READ(ah, AR_MBOX_CTRL_STATUS); 177 u_int32_t gtk_data_start; 178 179 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) %s, bytes=%d\n", __func__, bytes); 180 if (AR_SREV_JUPITER(ah) && 181 (bytes > (AR_WOW_OFFLOAD_GTK_DATA_WORDS_JUPITER * 4))) 182 { 183 bytes = AR_WOW_OFFLOAD_GTK_DATA_WORDS_JUPITER * 4; 184 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) bytes truncated to %d\n", bytes); 185 } 186 /* Check if mailbox is busy */ 187 if (mbox_status != 0) { 188 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "%s: Mailbox register busy! Reg = 0x%x", __func__, mbox_status); 189 return 1; 190 } 191 192 /* Clear status */ 193 OS_REG_WRITE(ah, AR_EMB_CPU_WOW_STATUS, 0x0); 194 OS_REG_WRITE(ah, AR_WLAN_WOW_ENABLE, 0); 195 OS_REG_WRITE(ah, AR_WLAN_WOW_STATUS, 0xFFFFFFFF); 196 197 if (AR_SREV_JUPITER(ah)) { 198 gtk_data_start = AR_WOW_OFFLOAD_GTK_DATA_START_JUPITER; 199 } else { 200 gtk_data_start = AR_WOW_OFFLOAD_GTK_DATA_START; 201 } 202 for (i = 0;i < bytes/4; i++) { 203 OS_REG_WRITE(ah, gtk_data_start + i * 4, data[i]); 204 } 205 206 return 0; 207 } 208 209 void ar9300_wowoffload_download_acer_magic( struct ath_hal *ah, 210 HAL_BOOL valid, 211 u_int8_t* datap, 212 u_int32_t bytes) 213 { 214 u_int32_t *p32 = (u_int32_t *) datap; 215 u_int32_t l = 0, u = 0; 216 217 if (valid) { 218 l = *p32; 219 p32++; 220 u = *(u_int16_t *) p32; 221 } 222 223 OS_REG_WRITE(ah, AR_WOW_OFFLOAD_ACER_MAGIC_START, l); 224 OS_REG_WRITE(ah, AR_WOW_OFFLOAD_ACER_MAGIC_START + 4, u); 225 226 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, 227 "%s: Aer Magic: %02x-%02x-%02x-%02x-%02x-%02x\n", __func__, 228 datap[0], datap[1], datap[2], datap[3], datap[4], datap[5]); 229 } 230 231 void ar9300_wowoffload_download_acer_swka( struct ath_hal *ah, 232 u_int32_t id, 233 HAL_BOOL valid, 234 u_int32_t period, 235 u_int32_t size, 236 u_int32_t* datap) 237 { 238 u_int32_t ka_period[2] = { 239 AR_WOW_OFFLOAD_ACER_KA0_PERIOD_MS, 240 AR_WOW_OFFLOAD_ACER_KA1_PERIOD_MS 241 }; 242 u_int32_t ka_size[2] = { 243 AR_WOW_OFFLOAD_ACER_KA0_SIZE, 244 AR_WOW_OFFLOAD_ACER_KA1_SIZE 245 }; 246 u_int32_t ka_data[2] = { 247 AR_WOW_OFFLOAD_ACER_KA0_DATA, 248 AR_WOW_OFFLOAD_ACER_KA1_DATA 249 }; 250 u_int32_t n_data = AR_WOW_OFFLOAD_ACER_KA0_DATA_WORDS; 251 int i; 252 253 if (id >= 2) { 254 return; 255 } 256 257 if (valid) { 258 OS_REG_WRITE(ah, ka_period[id], period); 259 OS_REG_WRITE(ah, ka_size[id], size); 260 } else { 261 OS_REG_WRITE(ah, ka_period[id], 0); 262 OS_REG_WRITE(ah, ka_size[id], 0); 263 } 264 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "%s: id=%d, period=%d ms, size=%d bytes\n", 265 __func__, id, period, size); 266 267 if (size < (n_data * 4)) { 268 n_data = (size + 3) / 4; 269 } 270 for (i=0; i<n_data * 4; i+=4) { 271 OS_REG_WRITE(ah, ka_data[id] + i, *datap); 272 /*HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) %08x\n", *datap);*/ 273 datap++; 274 } 275 } 276 277 void ar9300_wowoffload_download_arp_info(struct ath_hal *ah, u_int32_t id, u_int32_t *data) 278 { 279 u_int32_t addr; 280 struct hal_wow_offload_arp_info *p_info = (struct hal_wow_offload_arp_info *) data; 281 282 if (id == 0) { 283 addr = AR_WOW_OFFLOAD_ARP0_VALID; 284 } else if (id == 1) { 285 addr = AR_WOW_OFFLOAD_ARP1_VALID; 286 } else { 287 return; 288 } 289 290 if (p_info->valid) { 291 OS_REG_WRITE(ah, addr, 0x1); 292 addr += 4; 293 OS_REG_WRITE(ah, addr, p_info->RemoteIPv4Address.u32); 294 addr += 4; 295 OS_REG_WRITE(ah, addr, p_info->HostIPv4Address.u32); 296 addr += 4; 297 OS_REG_WRITE(ah, addr, p_info->MacAddress.u32[0]); 298 addr += 4; 299 OS_REG_WRITE(ah, addr, p_info->MacAddress.u32[1]); 300 } else { 301 OS_REG_WRITE(ah, addr, 0x0); 302 } 303 } 304 305 #define WOW_WRITE_NS_IPV6_ADDRESS(_ah, _buf_addr, _p_ipv6_addr) \ 306 { \ 307 u_int32_t offset = (_buf_addr); \ 308 u_int32_t *p_ipv6_addr = (u_int32_t *) (_p_ipv6_addr); \ 309 int i; \ 310 for (i = 0; i < 4; i++) { \ 311 OS_REG_WRITE((_ah), offset, *p_ipv6_addr); \ 312 offset += 4; \ 313 p_ipv6_addr ++; \ 314 } \ 315 } 316 317 void ar9300_wowoffload_download_ns_info(struct ath_hal *ah, u_int32_t id, u_int32_t *data) 318 { 319 u_int32_t addr; 320 struct hal_wow_offload_ns_info *p_info = (struct hal_wow_offload_ns_info *) data; 321 u_int8_t mc_addr[6]; 322 323 if (id == 0) { 324 addr = AR_WOW_OFFLOAD_NS0_VALID; 325 } else if (id == 1) { 326 addr = AR_WOW_OFFLOAD_NS1_VALID; 327 } else { 328 return; 329 } 330 331 if (p_info->valid) { 332 OS_REG_WRITE(ah, addr, 0x1); 333 addr += 4; 334 WOW_WRITE_NS_IPV6_ADDRESS(ah, addr, &p_info->RemoteIPv6Address.u32[0]); 335 addr += 4 * 4; 336 WOW_WRITE_NS_IPV6_ADDRESS(ah, addr, &p_info->SolicitedNodeIPv6Address.u32[0]); 337 addr += 4 * 4; 338 OS_REG_WRITE(ah, addr, p_info->MacAddress.u32[0]); 339 addr += 4; 340 OS_REG_WRITE(ah, addr, p_info->MacAddress.u32[1]); 341 addr += 4; 342 WOW_WRITE_NS_IPV6_ADDRESS(ah, addr, &p_info->TargetIPv6Addresses[0].u32[0]); 343 addr += 4 * 4; 344 WOW_WRITE_NS_IPV6_ADDRESS(ah, addr, &p_info->TargetIPv6Addresses[1].u32[0]); 345 346 mc_addr[0] = 0x33; 347 mc_addr[1] = 0x33; 348 mc_addr[2] = 0xFF; 349 mc_addr[3] = p_info->SolicitedNodeIPv6Address.u8[13]; 350 mc_addr[4] = p_info->SolicitedNodeIPv6Address.u8[14]; 351 mc_addr[5] = p_info->SolicitedNodeIPv6Address.u8[15]; 352 ar9300_wowoffload_add_mcast_filter(ah, mc_addr); 353 } else { 354 OS_REG_WRITE(ah, addr, 0x0); 355 } 356 } 357 358 /* Download transmit parameters for GTK response frame during WoW 359 * offload */ 360 u_int32_t ar9300_wow_offload_download_hal_params(struct ath_hal *ah) 361 { 362 u_int32_t tpc = 0x3f; /* Transmit Power Control */ 363 u_int32_t tx_tries_series = 7; 364 u_int32_t tx_rate_series, transmit_rate; 365 u_int32_t gtk_txdesc_param_start; 366 367 if (AH_PRIVATE(ah)->ah_curchan->channel_flags & CHANNEL_CCK) { 368 transmit_rate = 0x1B; /* CCK_1M */ 369 } else { 370 transmit_rate = 0xB; /* OFDM_6M */ 371 } 372 373 /* Use single rate for now. Change later as need be */ 374 tx_rate_series = transmit_rate; 375 tx_tries_series = 7; 376 377 if (AR_SREV_JUPITER(ah)) { 378 gtk_txdesc_param_start = AR_WOW_OFFLOAD_GTK_TXDESC_PARAM_START_JUPITER; 379 } else { 380 gtk_txdesc_param_start = AR_WOW_OFFLOAD_GTK_TXDESC_PARAM_START; 381 } 382 #define AR_WOW_OFFLOAD_GTK_TXDESC_PARAM(x) (gtk_txdesc_param_start + ((x) * 4)) 383 384 /* Do not change the data order unless firmware code on embedded 385 * CPU is changed correspondingly */ 386 OS_REG_WRITE(ah, AR_WOW_OFFLOAD_GTK_TXDESC_PARAM(0), tx_rate_series); 387 OS_REG_WRITE(ah, AR_WOW_OFFLOAD_GTK_TXDESC_PARAM(1), tx_tries_series); 388 OS_REG_WRITE(ah, AR_WOW_OFFLOAD_GTK_TXDESC_PARAM(2), AH9300(ah)->ah_tx_chainmask); 389 OS_REG_WRITE(ah, AR_WOW_OFFLOAD_GTK_TXDESC_PARAM(3), tpc); 390 391 return 0; 392 } 393 394 /* Indicate to the embedded CPU that host is ready to enter WoW mode. 395 * Embedded CPU will copy relevant information from the MAC PCU buffer 396 */ 397 u_int32_t ar9300_wow_offload_handshake(struct ath_hal *ah, u_int32_t pattern_enable) 398 { 399 int val; 400 int mbox_status = OS_REG_READ(ah, AR_MBOX_CTRL_STATUS); 401 #if ATH_WOW_OFFLOAD 402 u_int32_t bt_handshake_timeout_us = HAL_WOW_CTRL_WAIT_BT_TO(ah) * 100000; 403 404 #define AH_DEFAULT_BT_WAIT_TIMEOUT 3000000; /* 3 sec */ 405 if (bt_handshake_timeout_us == 0) { 406 bt_handshake_timeout_us = AH_DEFAULT_BT_WAIT_TIMEOUT; 407 } 408 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) TIMEOUT: %d us\n", bt_handshake_timeout_us); 409 #endif /* ATH_WOW_OFFLOAD */ 410 411 if (mbox_status & AR_MBOX_WOW_REQ) { 412 /* WOW mode request handshake is already in progress. 413 * Do nothing */ 414 return 0; 415 } 416 417 /* Clear status */ 418 OS_REG_WRITE(ah, AR_MBOX_CTRL_STATUS, 0); 419 OS_REG_WRITE(ah, AR_EMB_CPU_WOW_STATUS, 0x0); 420 OS_REG_WRITE(ah, AR_WLAN_WOW_ENABLE, 0); 421 OS_REG_WRITE(ah, AR_WLAN_WOW_STATUS, 0xFFFFFFFF); 422 423 OS_REG_WRITE(ah, AR_RIMT, 0); 424 OS_REG_WRITE(ah, AR_TIMT, 0); 425 426 val = 0; 427 if (pattern_enable & AH_WOW_USER_PATTERN_EN) { 428 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) ENA - User pattern\n"); 429 val |= AR_EMB_CPU_WOW_ENABLE_PATTERN_MATCH; 430 } 431 else { 432 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) DIS - User pattern\n"); 433 } 434 if ((pattern_enable & AH_WOW_MAGIC_PATTERN_EN) 435 #if ATH_WOW_OFFLOAD 436 || (pattern_enable & AH_WOW_ACER_MAGIC_EN) 437 #endif 438 ) 439 { 440 val |= AR_EMB_CPU_WOW_ENABLE_MAGIC_PATTERN; 441 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) ENA - Magic pattern\n"); 442 } 443 else { 444 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) DIS - Magic pattern\n"); 445 } 446 if ((pattern_enable & AH_WOW_LINK_CHANGE) 447 #if ATH_WOW_OFFLOAD 448 || HAL_WOW_CTRL(ah, HAL_WOW_OFFLOAD_KAFAIL_ENABLE) 449 #endif 450 ) 451 { 452 val |= AR_EMB_CPU_WOW_ENABLE_KEEP_ALIVE_FAIL; 453 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) ENA - Kepp alive fail\n"); 454 } 455 else { 456 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) DIS - Kepp alive fail\n"); 457 } 458 if (pattern_enable & AH_WOW_BEACON_MISS) { 459 val |= AR_EMB_CPU_WOW_ENABLE_BEACON_MISS; 460 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) ENA - Becon Miss\n"); 461 } 462 else { 463 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) DIS - Becon Miss\n"); 464 } 465 466 OS_REG_WRITE(ah, AR_EMB_CPU_WOW_ENABLE, val); 467 468 OS_REG_CLR_BIT(ah, AR_MBOX_CTRL_STATUS, AR_MBOX_WOW_CONF); 469 OS_REG_SET_BIT(ah, AR_MBOX_CTRL_STATUS, AR_MBOX_WOW_REQ); 470 OS_REG_SET_BIT(ah, AR_MBOX_CTRL_STATUS, AR_MBOX_INT_EMB_CPU); 471 472 if (!ath_hal_wait(ah, AR_MBOX_CTRL_STATUS, AR_MBOX_WOW_CONF, AR_MBOX_WOW_CONF, bt_handshake_timeout_us)) { 473 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "%s: WoW offload handshake failed", __func__); 474 return 0; 475 } 476 else { 477 OS_REG_CLR_BIT(ah, AR_MBOX_CTRL_STATUS, AR_MBOX_WOW_CONF); 478 HALDEBUG(ah, HAL_DEBUG_POWER_MGMT, "%s: WoW offload handshake successful",__func__); 479 } 480 return 1; 481 } 482 #endif /* ATH_WOW_OFFLOAD */ 483 484 /* 485 * Notify Power Mgt is enabled in self-generated frames. 486 * If requested, force chip awake. 487 * 488 * Returns A_OK if chip is awake or successfully forced awake. 489 * 490 * WARNING WARNING WARNING 491 * There is a problem with the chip where sometimes it will not wake up. 492 */ 493 HAL_BOOL 494 ar9300_set_power_mode_awake(struct ath_hal *ah, int set_chip) 495 { 496 struct ath_hal_9300 *ahp = AH9300(ah); 497 #define POWER_UP_TIME 10000 498 u_int32_t val; 499 int i; 500 501 /* Set Bits 14 and 17 of AR_WA before powering on the chip. */ 502 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_WA), ahp->ah_wa_reg_val); 503 OS_DELAY(10); /* delay to allow the write to take effect. */ 504 505 if (set_chip) { 506 /* Do a Power-On-Reset if MAC is shutdown */ 507 if ((OS_REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_SHUTDOWN)) { 508 if (ar9300_set_reset_reg(ah, HAL_RESET_POWER_ON) != AH_TRUE) { 509 HALASSERT(0); 510 return AH_FALSE; 511 } 512 } 513 514 OS_REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); 515 516 OS_DELAY(50); 517 518 for (i = POWER_UP_TIME / 50; i > 0; i--) { 519 val = OS_REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M; 520 if (val == AR_RTC_STATUS_ON) { 521 break; 522 } 523 OS_DELAY(50); 524 OS_REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); 525 } 526 if (i == 0) { 527 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "%s: Failed to wakeup in %uus\n", 528 __func__, POWER_UP_TIME / 20); 529 return AH_FALSE; 530 } 531 532 } 533 534 OS_REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 535 return AH_TRUE; 536 #undef POWER_UP_TIME 537 } 538 539 /* 540 * Notify Power Mgt is disabled in self-generated frames. 541 * If requested, force chip to sleep. 542 */ 543 static void 544 ar9300_set_power_mode_sleep(struct ath_hal *ah, int set_chip) 545 { 546 struct ath_hal_9300 *ahp = AH9300(ah); 547 548 OS_REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 549 if (set_chip ) { 550 if (AR_SREV_JUPITER(ah) || AR_SREV_APHRODITE(ah)) { 551 OS_REG_WRITE(ah, AR_TIMER_MODE, 552 OS_REG_READ(ah, AR_TIMER_MODE) & 0xFFFFFF00); 553 OS_REG_WRITE(ah, AR_GEN_TIMERS2_MODE, 554 OS_REG_READ(ah, AR_GEN_TIMERS2_MODE) & 0xFFFFFF00); 555 OS_REG_WRITE(ah, AR_SLP32_INC, 556 OS_REG_READ(ah, AR_SLP32_INC) & 0xFFF00000); 557 OS_REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0); 558 OS_DELAY(100); 559 } 560 /* Clear the RTC force wake bit to allow the mac to go to sleep */ 561 OS_REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); 562 563 if (AR_SREV_JUPITER(ah) || AR_SREV_APHRODITE(ah)) { 564 /* 565 * In Jupiter, after enter sleep mode, hardware will send 566 * a SYS_SLEEPING message through MCI interface. Add a 567 * few us delay to make sure the message can reach BT side. 568 */ 569 OS_DELAY(100); 570 } 571 572 if (!AR_SREV_JUPITER_10(ah)) { 573 /* Shutdown chip. Active low */ 574 OS_REG_CLR_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN); 575 /* Settle time */ 576 OS_DELAY(2); 577 } 578 } 579 580 #if ATH_WOW_OFFLOAD 581 if (!AR_SREV_JUPITER(ah) || !HAL_WOW_CTRL(ah, HAL_WOW_OFFLOAD_SET_4004_BIT14)) 582 #endif /* ATH_WOW_OFFLOAD */ 583 { 584 /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */ 585 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_WA), 586 ahp->ah_wa_reg_val & ~AR_WA_D3_TO_L1_DISABLE); 587 } 588 } 589 590 /* 591 * Notify Power Management is enabled in self-generating 592 * frames. If request, set power mode of chip to 593 * auto/normal. Duration in units of 128us (1/8 TU). 594 */ 595 static void 596 ar9300_set_power_mode_network_sleep(struct ath_hal *ah, int set_chip) 597 { 598 struct ath_hal_9300 *ahp = AH9300(ah); 599 600 OS_REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 601 if (set_chip) { 602 HAL_CAPABILITIES *p_cap = &AH_PRIVATE(ah)->ah_caps; 603 604 if (! p_cap->halAutoSleepSupport) { 605 /* Set wake_on_interrupt bit; clear force_wake bit */ 606 OS_REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT); 607 } 608 else { 609 /* 610 * When chip goes into network sleep, it could be waken up by 611 * MCI_INT interrupt caused by BT's HW messages (LNA_xxx, CONT_xxx) 612 * which chould be in a very fast rate (~100us). This will cause 613 * chip to leave and re-enter network sleep mode frequently, which 614 * in consequence will have WLAN MCI HW to generate lots of 615 * SYS_WAKING and SYS_SLEEPING messages which will make BT CPU 616 * to busy to process. 617 */ 618 if (AR_SREV_JUPITER(ah) || AR_SREV_APHRODITE(ah)) { 619 OS_REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 620 OS_REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_EN) & 621 ~AR_MCI_INTERRUPT_RX_HW_MSG_MASK); 622 } 623 624 /* Clear the RTC force wake bit to allow the mac to go to sleep */ 625 OS_REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); 626 627 if (AR_SREV_JUPITER(ah) || AR_SREV_APHRODITE(ah)) { 628 /* 629 * In Jupiter, after enter sleep mode, hardware will send 630 * a SYS_SLEEPING message through MCI interface. Add a 631 * few us delay to make sure the message can reach BT side. 632 */ 633 OS_DELAY(30); 634 } 635 } 636 } 637 638 #if ATH_WOW_OFFLOAD 639 if (!AR_SREV_JUPITER(ah) || !HAL_WOW_CTRL(ah, HAL_WOW_OFFLOAD_SET_4004_BIT14)) 640 #endif /* ATH_WOW_OFFLOAD */ 641 { 642 /* Clear Bit 14 of AR_WA after putting chip into Sleep mode. */ 643 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_WA), 644 ahp->ah_wa_reg_val & ~AR_WA_D3_TO_L1_DISABLE); 645 } 646 } 647 648 /* 649 * Set power mgt to the requested mode, and conditionally set 650 * the chip as well 651 */ 652 HAL_BOOL 653 ar9300_set_power_mode(struct ath_hal *ah, HAL_POWER_MODE mode, int set_chip) 654 { 655 struct ath_hal_9300 *ahp = AH9300(ah); 656 #if defined(AH_DEBUG) || defined(AH_PRINT_FILTER) 657 static const char* modes[] = { 658 "AWAKE", 659 "FULL-SLEEP", 660 "NETWORK SLEEP", 661 "UNDEFINED" 662 }; 663 #endif 664 int status = AH_TRUE; 665 666 HALDEBUG(ah, HAL_DEBUG_POWER_MGMT, "%s: %s -> %s (%s)\n", __func__, 667 modes[ar9300_get_power_mode(ah)], modes[mode], 668 set_chip ? "set chip " : ""); 669 670 switch (mode) { 671 case HAL_PM_AWAKE: 672 status = ar9300_set_power_mode_awake(ah, set_chip); 673 #if ATH_SUPPORT_MCI 674 if (AH_PRIVATE(ah)->ah_caps.halMciSupport) { 675 OS_REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); 676 } 677 #endif 678 break; 679 case HAL_PM_FULL_SLEEP: 680 #if ATH_SUPPORT_MCI 681 if (AH_PRIVATE(ah)->ah_caps.halMciSupport) { 682 if (ar9300_get_power_mode(ah) == HAL_PM_AWAKE) { 683 if ((ar9300_mci_state(ah, HAL_MCI_STATE_ENABLE, NULL) != 0) && 684 (ahp->ah_mci_bt_state != MCI_BT_SLEEP) && 685 !ahp->ah_mci_halted_bt_gpm) 686 { 687 HALDEBUG(ah, HAL_DEBUG_BT_COEX, 688 "(MCI) %s: HALT BT GPM (full_sleep)\n", __func__); 689 ar9300_mci_send_coex_halt_bt_gpm(ah, AH_TRUE, AH_TRUE); 690 } 691 } 692 ahp->ah_mci_ready = AH_FALSE; 693 } 694 #endif 695 #if ATH_SUPPORT_MCI 696 if (AH_PRIVATE(ah)->ah_caps.halMciSupport) { 697 OS_REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); 698 } 699 #endif 700 ar9300_set_power_mode_sleep(ah, set_chip); 701 ahp->ah_chip_full_sleep = AH_TRUE; 702 break; 703 case HAL_PM_NETWORK_SLEEP: 704 #if ATH_SUPPORT_MCI 705 if (AH_PRIVATE(ah)->ah_caps.halMciSupport) { 706 OS_REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); 707 } 708 #endif 709 ar9300_set_power_mode_network_sleep(ah, set_chip); 710 break; 711 default: 712 HALDEBUG(ah, HAL_DEBUG_POWER_MGMT, 713 "%s: unknown power mode %u\n", __func__, mode); 714 return AH_FALSE; 715 } 716 return status; 717 } 718 719 /* 720 * Return the current sleep mode of the chip 721 */ 722 HAL_POWER_MODE 723 ar9300_get_power_mode(struct ath_hal *ah) 724 { 725 int mode = OS_REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M; 726 727 switch (mode) { 728 case AR_RTC_STATUS_ON: 729 case AR_RTC_STATUS_WAKEUP: 730 return HAL_PM_AWAKE; 731 break; 732 case AR_RTC_STATUS_SLEEP: 733 return HAL_PM_NETWORK_SLEEP; 734 break; 735 case AR_RTC_STATUS_SHUTDOWN: 736 return HAL_PM_FULL_SLEEP; 737 break; 738 default: 739 HALDEBUG(ah, HAL_DEBUG_POWER_MGMT, 740 "%s: unknown power mode 0x%x\n", __func__, mode); 741 return HAL_PM_UNDEFINED; 742 } 743 } 744 745 /* 746 * Set SM power save mode 747 */ 748 void 749 ar9300_set_sm_power_mode(struct ath_hal *ah, HAL_SMPS_MODE mode) 750 { 751 int regval; 752 struct ath_hal_9300 *ahp = AH9300(ah); 753 754 if (ar9300_get_capability(ah, HAL_CAP_DYNAMIC_SMPS, 0, AH_NULL) != HAL_OK) { 755 return; 756 } 757 758 /* Program low & high power chainmask settings and enable MAC control */ 759 regval = SM(AR_PCU_SMPS_LPWR_CHNMSK_VAL, AR_PCU_SMPS_LPWR_CHNMSK) | 760 SM(ahp->ah_rx_chainmask, AR_PCU_SMPS_HPWR_CHNMSK) | 761 AR_PCU_SMPS_MAC_CHAINMASK; 762 763 /* Program registers according to required SM power mode.*/ 764 switch (mode) { 765 case HAL_SMPS_SW_CTRL_LOW_PWR: 766 OS_REG_WRITE(ah, AR_PCU_SMPS, regval); 767 break; 768 case HAL_SMPS_SW_CTRL_HIGH_PWR: 769 OS_REG_WRITE(ah, AR_PCU_SMPS, regval | AR_PCU_SMPS_SW_CTRL_HPWR); 770 break; 771 case HAL_SMPS_HW_CTRL: 772 OS_REG_WRITE(ah, AR_PCU_SMPS, regval | AR_PCU_SMPS_HW_CTRL_EN); 773 break; 774 case HAL_SMPS_DEFAULT: 775 OS_REG_WRITE(ah, AR_PCU_SMPS, 0); 776 break; 777 default: 778 break; 779 } 780 ahp->ah_sm_power_mode = mode; 781 } 782 783 #if ATH_WOW 784 #if NOT_NEEDED_FOR_OSPREY /* not compiled for darwin */ 785 /* 786 * This routine is called to configure the SerDes register for the 787 * Merlin 2.0 and above chip during WOW sleep. 788 */ 789 static void 790 ar9280_config_ser_des__wow_sleep(struct ath_hal *ah) 791 { 792 int i; 793 struct ath_hal_9300 *ahp = AH9300(ah); 794 795 /* 796 * For WOW sleep, we reprogram the SerDes so that the PLL and CHK REQ 797 * are both enabled. This uses more power but the Maverick team reported 798 * that otherwise, WOW sleep is unstable and chip may disappears. 799 */ 800 for (i = 0; i < ahp->ah_ini_pcie_serdes_wow.ia_rows; i++) { 801 OS_REG_WRITE(ah, 802 INI_RA(&ahp->ah_ini_pcie_serdes_wow, i, 0), 803 INI_RA(&ahp->ah_ini_pcie_serdes_wow, i, 1)); 804 } 805 OS_DELAY(1000); 806 } 807 #endif /* if NOT_NEEDED_FOR_OSPREY */ 808 static HAL_BOOL 809 ar9300_wow_create_keep_alive_pattern(struct ath_hal *ah) 810 { 811 struct ath_hal_9300 *ahp = AH9300(ah); 812 u_int32_t frame_len = 28; 813 u_int32_t tpc = 0x3f; 814 u_int32_t transmit_rate; 815 u_int32_t frame_type = 0x2; /* Frame Type -> Data; */ 816 u_int32_t sub_type = 0x4; /* Subtype -> Null Data */ 817 u_int32_t to_ds = 1; 818 u_int32_t duration_id = 0x3d; 819 u_int8_t *sta_mac_addr, *ap_mac_addr; 820 u_int8_t *addr1, *addr2, *addr3; 821 u_int32_t ctl[13] = { 0, }; 822 #define NUM_KA_DATA_WORDS 6 823 u_int32_t data_word[NUM_KA_DATA_WORDS]; 824 u_int32_t i; 825 u_int32_t wow_ka_dataword0; 826 827 sta_mac_addr = (u_int8_t *)ahp->ah_macaddr; 828 ap_mac_addr = (u_int8_t *)ahp->ah_bssid; 829 addr2 = sta_mac_addr; 830 addr1 = addr3 = ap_mac_addr; 831 832 if (AH_PRIVATE(ah)->ah_curchan->channel_flags & CHANNEL_CCK) { 833 transmit_rate = 0x1B; /* CCK_1M */ 834 } else { 835 transmit_rate = 0xB; /* OFDM_6M */ 836 } 837 838 /* Set the Transmit Buffer. */ 839 ctl[0] = (frame_len | (tpc << 16)); 840 ctl[1] = 0; 841 ctl[2] = (0x7 << 16); /* tx_tries0 */ 842 ctl[3] = transmit_rate; 843 ctl[4] = 0; 844 ctl[7] = ahp->ah_tx_chainmask << 2; 845 846 for (i = 0; i < 13; i++) { 847 OS_REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]); 848 } 849 850 data_word[0] = 851 (frame_type << 2) | 852 (sub_type << 4) | 853 (to_ds << 8) | 854 (duration_id << 16); 855 data_word[1] = (((u_int32_t)addr1[3] << 24) | ((u_int32_t)addr1[2] << 16) | 856 ((u_int32_t)addr1[1]) << 8 | ((u_int32_t)addr1[0])); 857 data_word[2] = (((u_int32_t)addr2[1] << 24) | ((u_int32_t)addr2[0] << 16) | 858 ((u_int32_t)addr1[5]) << 8 | ((u_int32_t)addr1[4])); 859 data_word[3] = (((u_int32_t)addr2[5] << 24) | ((u_int32_t)addr2[4] << 16) | 860 ((u_int32_t)addr2[3]) << 8 | ((u_int32_t)addr2[2])); 861 data_word[4] = (((u_int32_t)addr3[3] << 24) | ((u_int32_t)addr3[2] << 16) | 862 ((u_int32_t)addr3[1]) << 8 | (u_int32_t)addr3[0]); 863 data_word[5] = (((u_int32_t)addr3[5]) << 8 | ((u_int32_t)addr3[4])); 864 865 if (AR_SREV_JUPITER_20_OR_LATER(ah) || AR_SREV_APHRODITE(ah)) { 866 /* Jupiter 2.0 has an extra descriptor word (Time based 867 * discard) compared to other chips */ 868 OS_REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + 12 * 4), 0); 869 wow_ka_dataword0 = AR_WOW_TXBUF(13); 870 } 871 else { 872 wow_ka_dataword0 = AR_WOW_TXBUF(12); 873 } 874 875 for (i = 0; i < NUM_KA_DATA_WORDS; i++) { 876 OS_REG_WRITE(ah, (wow_ka_dataword0 + i * 4), data_word[i]); 877 } 878 879 return AH_TRUE; 880 } 881 882 /* TBD: Should querying hal for hardware capability */ 883 #define MAX_PATTERN_SIZE 256 884 #define MAX_PATTERN_MASK_SIZE 32 885 #define MAX_NUM_USER_PATTERN 6 /* Deducting the disassoc/deauth packets */ 886 887 void 888 ar9300_wow_apply_pattern( 889 struct ath_hal *ah, 890 u_int8_t *p_ath_pattern, 891 u_int8_t *p_ath_mask, 892 int32_t pattern_count, 893 u_int32_t ath_pattern_len) 894 { 895 int i; 896 u_int32_t reg_pat[] = { 897 AR_WOW_TB_PATTERN0, 898 AR_WOW_TB_PATTERN1, 899 AR_WOW_TB_PATTERN2, 900 AR_WOW_TB_PATTERN3, 901 AR_WOW_TB_PATTERN4, 902 AR_WOW_TB_PATTERN5, 903 AR_WOW_TB_PATTERN6, 904 AR_WOW_TB_PATTERN7 905 }; 906 u_int32_t reg_mask[] = { 907 AR_WOW_TB_MASK0, 908 AR_WOW_TB_MASK1, 909 AR_WOW_TB_MASK2, 910 AR_WOW_TB_MASK3, 911 AR_WOW_TB_MASK4, 912 AR_WOW_TB_MASK5, 913 AR_WOW_TB_MASK6, 914 AR_WOW_TB_MASK7 915 }; 916 u_int32_t pattern_val; 917 u_int32_t mask_val; 918 u_int32_t val; 919 u_int8_t mask_bit = 0x1; 920 u_int8_t pattern; 921 922 /* TBD: should check count by querying the hardware capability */ 923 if (pattern_count >= MAX_NUM_USER_PATTERN) { 924 return; 925 } 926 927 pattern = (u_int8_t)OS_REG_READ(ah, AR_WOW_PATTERN_REG); 928 pattern = pattern | (mask_bit << pattern_count); 929 OS_REG_WRITE(ah, AR_WOW_PATTERN_REG, pattern); 930 931 /* Set the registers for pattern */ 932 for (i = 0; i < MAX_PATTERN_SIZE; i += 4) { 933 pattern_val = (((u_int32_t)p_ath_pattern[i + 0]) | 934 ((u_int32_t)p_ath_pattern[i + 1] << 8) | 935 ((u_int32_t)p_ath_pattern[i + 2] << 16) | 936 ((u_int32_t)p_ath_pattern[i + 3] << 24)); 937 OS_REG_WRITE(ah, (reg_pat[pattern_count] + i), pattern_val); 938 } 939 940 /* Set the registers for mask */ 941 for (i = 0; i < MAX_PATTERN_MASK_SIZE; i += 4) { 942 mask_val = (((u_int32_t)p_ath_mask[i + 0]) | 943 ((u_int32_t)p_ath_mask[i + 1] << 8) | 944 ((u_int32_t)p_ath_mask[i + 2] << 16) | 945 ((u_int32_t)p_ath_mask[i + 3] << 24)); 946 OS_REG_WRITE(ah, (reg_mask[pattern_count] + i), mask_val); 947 } 948 949 /* XXX */ 950 /* Set the pattern length to be matched */ 951 if (pattern_count < 4) { 952 /* Pattern 0-3 uses AR_WOW_LENGTH1_REG register */ 953 val = OS_REG_READ(ah, AR_WOW_LENGTH1_REG); 954 val = ((val & (~AR_WOW_LENGTH1_MASK(pattern_count))) | 955 ((ath_pattern_len & AR_WOW_LENGTH_MAX) << 956 AR_WOW_LENGTH1_SHIFT(pattern_count))); 957 OS_REG_WRITE(ah, AR_WOW_LENGTH1_REG, val); 958 } else { 959 /* Pattern 4-7 uses AR_WOW_LENGTH2_REG register */ 960 val = OS_REG_READ(ah, AR_WOW_LENGTH2_REG); 961 val = ((val & (~AR_WOW_LENGTH2_MASK(pattern_count))) | 962 ((ath_pattern_len & AR_WOW_LENGTH_MAX) << 963 AR_WOW_LENGTH2_SHIFT(pattern_count))); 964 OS_REG_WRITE(ah, AR_WOW_LENGTH2_REG, val); 965 } 966 967 AH_PRIVATE(ah)->ah_wow_event_mask |= 968 (1 << (pattern_count + AR_WOW_PATTERN_FOUND_SHIFT)); 969 970 return; 971 } 972 973 HAL_BOOL 974 ar9300_set_power_mode_wow_sleep(struct ath_hal *ah) 975 { 976 OS_REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 977 978 OS_REG_WRITE(ah, AR_CR, AR_CR_RXD); /* Set receive disable bit */ 979 if (!ath_hal_wait(ah, AR_CR, AR_CR_RXE, 0, AH_WAIT_TIMEOUT)) { 980 HALDEBUG(ah, HAL_DEBUG_POWER_MGMT, "%s: dma failed to stop in 10ms\n" 981 "AR_CR=0x%08x\nAR_DIAG_SW=0x%08x\n", __func__, 982 OS_REG_READ(ah, AR_CR), OS_REG_READ(ah, AR_DIAG_SW)); 983 return AH_FALSE; 984 } else { 985 #if 0 986 OS_REG_WRITE(ah, AR_RXDP, 0x0); 987 #endif 988 989 HALDEBUG(AH_NULL, HAL_DEBUG_UNMASKABLE, 990 "%s: TODO How to disable RXDP!!\n", __func__); 991 992 #if ATH_SUPPORT_MCI 993 if (AH_PRIVATE(ah)->ah_caps.halMciSupport) { 994 OS_REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); 995 } 996 #endif 997 OS_REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT); 998 999 return AH_TRUE; 1000 } 1001 } 1002 1003 1004 HAL_BOOL 1005 ar9300_wow_enable( 1006 struct ath_hal *ah, 1007 u_int32_t pattern_enable, 1008 u_int32_t timeout_in_seconds, 1009 int clearbssid, 1010 HAL_BOOL offloadEnable) 1011 { 1012 uint32_t init_val, val, rval = 0; 1013 const int ka_delay = 4; /* Delay of 4 millisec between two keep_alive's */ 1014 uint32_t wow_event_mask; 1015 #if ATH_WOW_OFFLOAD 1016 uint32_t wow_feature_enable = 1017 //AR_WOW_OFFLOAD_ENA_GTK | 1018 //AR_WOW_OFFLOAD_ENA_ARP_OFFLOAD | 1019 //AR_WOW_OFFLOAD_ENA_NS_OFFLOAD | 1020 //AR_WOW_OFFLOAD_ENA_ACER_MAGIC | 1021 //AR_WOW_OFFLOAD_ENA_STD_MAGIC | 1022 //AR_WOW_OFFLOAD_ENA_4WAY_WAKE | 1023 //AR_WOW_OFFLOAD_ENA_SWKA | 1024 //AR_WOW_OFFLOAD_ENA_BT_SLEEP | 1025 AR_WOW_OFFLOAD_ENA_SW_NULL; 1026 #endif 1027 1028 /* 1029 * ah_wow_event_mask is a mask to the AR_WOW_PATTERN_REG register to 1030 * indicate which WOW events that we have enabled. The WOW Events are 1031 * from the pattern_enable in this function and pattern_count of 1032 * ar9300_wow_apply_pattern() 1033 */ 1034 wow_event_mask = AH_PRIVATE(ah)->ah_wow_event_mask; 1035 1036 HALDEBUG(AH_NULL, HAL_DEBUG_UNMASKABLE, 1037 "%s: offload: %d, pattern: %08x, event_mask: %08x\n", 1038 __func__, offloadEnable, pattern_enable, wow_event_mask); 1039 1040 /* 1041 * Untie Power-On-Reset from the PCI-E Reset. When we are in WOW sleep, 1042 * we do not want the Reset from the PCI-E to disturb our hw state. 1043 */ 1044 if (AH_PRIVATE(ah)->ah_is_pci_express == AH_TRUE) { 1045 1046 u_int32_t wa_reg_val; 1047 /* 1048 * We need to untie the internal POR (power-on-reset) to the external 1049 * PCI-E reset. We also need to tie the PCI-E Phy reset to the PCI-E 1050 * reset. 1051 */ 1052 HAL_DEBUG(AH_NULL, HAL_DEBUG_UNMASKABLE, 1053 "%s: Untie POR and PCIE reset\n", __func__); 1054 wa_reg_val = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_WA)); 1055 wa_reg_val = wa_reg_val & ~(AR_WA_UNTIE_RESET_EN); 1056 wa_reg_val = wa_reg_val | AR_WA_RESET_EN | AR_WA_POR_SHORT; 1057 /* 1058 * This bit is to bypass the EEPROM/OTP state machine, (by clearing its 1059 * busy state while PCIE_rst is asserted), to allow BT embedded CPU 1060 * be able to access WLAN registers. Otherwise the eCPU access will be 1061 * stalled as eeprom_sm is held in busy state. 1062 * 1063 * EV91928 is that when this bit is set, after host wakeup and PCIE_rst 1064 * deasserted, PCIE configuration registers will be reset and DeviceID 1065 * SubsystemID etc. registers will be different from values before 1066 * entering sleep. This will cause Windows to detect a device removal. 1067 * 1068 * For HW WOW, this bit should keep as cleared. 1069 */ 1070 if (offloadEnable) { 1071 HALDEBUG(AH_NULL, HAL_DEBUG_UNMASKABLE, 1072 "%s: Set AR_WA.13 COLD_RESET_OVERRIDE\n", __func__); 1073 wa_reg_val = wa_reg_val | AR_WA_COLD_RESET_OVERRIDE; 1074 1075 #if ATH_WOW_OFFLOAD 1076 if (AR_SREV_JUPITER(ah)) { 1077 wa_reg_val = wa_reg_val | AR_WA_D3_TO_L1_DISABLE; 1078 } 1079 #endif 1080 } 1081 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_WA), wa_reg_val); 1082 } 1083 1084 /* 1085 * Set the power states appropriately and enable pme. 1086 */ 1087 val = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_PCIE_PM_CTRL)); 1088 val |= 1089 AR_PMCTRL_HOST_PME_EN | 1090 AR_PMCTRL_PWR_PM_CTRL_ENA | 1091 AR_PMCTRL_AUX_PWR_DET; 1092 1093 /* 1094 * Set and clear WOW_PME_CLEAR registers for the chip to generate next 1095 * wow signal. 1096 */ 1097 val |= AR_PMCTRL_WOW_PME_CLR; 1098 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_PCIE_PM_CTRL), val); 1099 val &= ~AR_PMCTRL_WOW_PME_CLR; 1100 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_PCIE_PM_CTRL), val); 1101 1102 /* 1103 * Setup for for: 1104 * - beacon misses 1105 * - magic pattern 1106 * - keep alive timeout 1107 * - pattern matching 1108 */ 1109 1110 /* 1111 * Program some default values for keep-alives, beacon misses, etc. 1112 */ 1113 init_val = OS_REG_READ(ah, AR_WOW_PATTERN_REG); 1114 val = AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF) | init_val; 1115 OS_REG_WRITE(ah, AR_WOW_PATTERN_REG, val); 1116 rval = OS_REG_READ(ah, AR_WOW_PATTERN_REG); 1117 1118 val = 1119 AR_WOW_AIFS_CNT(AR_WOW_CNT_AIFS_CNT) | 1120 AR_WOW_SLOT_CNT(AR_WOW_CNT_SLOT_CNT) | 1121 AR_WOW_KEEP_ALIVE_CNT(AR_WOW_CNT_KA_CNT); 1122 OS_REG_WRITE(ah, AR_WOW_COUNT_REG, val); 1123 rval = OS_REG_READ(ah, AR_WOW_COUNT_REG); 1124 1125 if (pattern_enable & AH_WOW_BEACON_MISS) { 1126 val = AR_WOW_BEACON_TIMO; 1127 } else { 1128 /* We are not using the beacon miss. Program a large value. */ 1129 val = AR_WOW_BEACON_TIMO_MAX; 1130 } 1131 OS_REG_WRITE(ah, AR_WOW_BCN_TIMO_REG, val); 1132 rval = OS_REG_READ(ah, AR_WOW_BCN_TIMO_REG); 1133 1134 /* 1135 * Keep Alive Timo in ms. 1136 */ 1137 if (pattern_enable == 0) { 1138 val = AR_WOW_KEEP_ALIVE_NEVER; 1139 } else { 1140 val = AH_PRIVATE(ah)->ah_config.ath_hal_keep_alive_timeout * 32; 1141 } 1142 OS_REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO_REG, val); 1143 rval = OS_REG_READ(ah, AR_WOW_KEEP_ALIVE_TIMO_REG); 1144 1145 /* 1146 * Keep Alive delay in us. 1147 */ 1148 val = ka_delay * 1000; 1149 OS_REG_WRITE(ah, AR_WOW_KEEP_ALIVE_DELAY_REG, val); 1150 rval = OS_REG_READ(ah, AR_WOW_KEEP_ALIVE_DELAY_REG); 1151 1152 /* 1153 * Create keep_alive Pattern to respond to beacons. 1154 */ 1155 ar9300_wow_create_keep_alive_pattern(ah); 1156 1157 /* 1158 * Configure Mac Wow Registers. 1159 */ 1160 1161 val = OS_REG_READ(ah, AR_WOW_KEEP_ALIVE_REG); 1162 1163 /* 1164 * Send keep alive timeouts anyway. 1165 */ 1166 val &= ~AR_WOW_KEEP_ALIVE_AUTO_DIS; 1167 1168 if (pattern_enable & AH_WOW_LINK_CHANGE) { 1169 val &= ~ AR_WOW_KEEP_ALIVE_FAIL_DIS; 1170 wow_event_mask |= AR_WOW_KEEP_ALIVE_FAIL; 1171 } else { 1172 val |= AR_WOW_KEEP_ALIVE_FAIL_DIS; 1173 } 1174 #if ATH_WOW_OFFLOAD 1175 if (offloadEnable) { 1176 /* Don't enable KA frames yet. BT CPU is not 1177 * yet ready. */ 1178 } 1179 else 1180 #endif /* ATH_WOW_OFFLOAD */ 1181 { 1182 OS_REG_WRITE(ah, AR_WOW_KEEP_ALIVE_REG, val); 1183 val = OS_REG_READ(ah, AR_WOW_KEEP_ALIVE_REG); 1184 } 1185 1186 1187 /* 1188 * We are relying on a bmiss failure. Ensure we have enough 1189 * threshold to prevent AH_FALSE positives. 1190 */ 1191 OS_REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR, 1192 AR_WOW_BMISSTHRESHOLD); 1193 1194 val = OS_REG_READ(ah, AR_WOW_BCN_EN_REG); 1195 if (pattern_enable & AH_WOW_BEACON_MISS) { 1196 val |= AR_WOW_BEACON_FAIL_EN; 1197 wow_event_mask |= AR_WOW_BEACON_FAIL; 1198 } else { 1199 val &= ~AR_WOW_BEACON_FAIL_EN; 1200 } 1201 OS_REG_WRITE(ah, AR_WOW_BCN_EN_REG, val); 1202 val = OS_REG_READ(ah, AR_WOW_BCN_EN_REG); 1203 1204 /* 1205 * Enable the magic packet registers. 1206 */ 1207 val = OS_REG_READ(ah, AR_WOW_PATTERN_REG); 1208 if ((pattern_enable & AH_WOW_MAGIC_PATTERN_EN) 1209 #if ATH_WOW_OFFLOAD 1210 || (pattern_enable & AH_WOW_ACER_MAGIC_EN) 1211 #endif 1212 ) 1213 { 1214 val |= AR_WOW_MAGIC_EN; 1215 wow_event_mask |= AR_WOW_MAGIC_PAT_FOUND; 1216 } else { 1217 val &= ~AR_WOW_MAGIC_EN; 1218 } 1219 val |= AR_WOW_MAC_INTR_EN; 1220 OS_REG_WRITE(ah, AR_WOW_PATTERN_REG, val); 1221 val = OS_REG_READ(ah, AR_WOW_PATTERN_REG); 1222 1223 #if ATH_WOW_OFFLOAD 1224 if (HAL_WOW_CTRL(ah, HAL_WOW_OFFLOAD_FORCE_BT_SLEEP)) { 1225 wow_feature_enable |= AR_WOW_OFFLOAD_ENA_BT_SLEEP; 1226 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) ENA - BT SLEEP\n"); 1227 } else { 1228 wow_feature_enable &= ~AR_WOW_OFFLOAD_ENA_BT_SLEEP; 1229 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) DIS - BT SLEEP\n"); 1230 } 1231 1232 if (HAL_WOW_CTRL(ah, HAL_WOW_OFFLOAD_SW_NULL_DISABLE)) { 1233 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) DIS - SW NULL\n"); 1234 wow_feature_enable &= ~AR_WOW_OFFLOAD_ENA_SW_NULL; 1235 } else { 1236 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) ENA - SW NULL\n"); 1237 wow_feature_enable |= AR_WOW_OFFLOAD_ENA_SW_NULL; 1238 } 1239 1240 if (HAL_WOW_CTRL(ah, HAL_WOW_OFFLOAD_DEVID_SWAR_DISABLE)) { 1241 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) DIS - DevID SWAR\n"); 1242 wow_feature_enable &= ~AR_WOW_OFFLOAD_ENA_DEVID_SWAR; 1243 } else { 1244 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) ENA - DevID SWAR\n"); 1245 wow_feature_enable |= AR_WOW_OFFLOAD_ENA_DEVID_SWAR; 1246 } 1247 1248 if (pattern_enable & AH_WOW_ACER_KEEP_ALIVE_EN) { 1249 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) ENA - Acer SWKA\n"); 1250 wow_feature_enable |= AR_WOW_OFFLOAD_ENA_SWKA; 1251 } else { 1252 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) DIS - Acer SWKA\n"); 1253 wow_feature_enable &= ~AR_WOW_OFFLOAD_ENA_SWKA; 1254 } 1255 1256 if (pattern_enable & AH_WOW_ACER_MAGIC_EN) { 1257 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) DIS - Standard Magic\n"); 1258 wow_feature_enable &= ~AR_WOW_OFFLOAD_ENA_STD_MAGIC; 1259 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) ENA - Acer Magic\n"); 1260 wow_feature_enable |= AR_WOW_OFFLOAD_ENA_ACER_MAGIC; 1261 } else { 1262 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) ENA - Standard Magic\n"); 1263 wow_feature_enable |= AR_WOW_OFFLOAD_ENA_STD_MAGIC; 1264 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) DIS - Acer Magic\n"); 1265 wow_feature_enable &= ~AR_WOW_OFFLOAD_ENA_ACER_MAGIC; 1266 } 1267 1268 if ((pattern_enable & AH_WOW_4WAY_HANDSHAKE_EN) || 1269 HAL_WOW_CTRL(ah, HAL_WOW_OFFLOAD_FORCE_4WAY_HS_WAKE)) { 1270 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) ENA - 4Way Handshake\n"); 1271 wow_feature_enable |= AR_WOW_OFFLOAD_ENA_4WAY_WAKE; 1272 } else { 1273 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) DIS - 4Way Handshake\n"); 1274 wow_feature_enable &= ~AR_WOW_OFFLOAD_ENA_4WAY_WAKE; 1275 } 1276 1277 if((pattern_enable & AH_WOW_AP_ASSOCIATION_LOST_EN) || 1278 HAL_WOW_CTRL(ah, HAL_WOW_OFFLOAD_FORCE_AP_LOSS_WAKE)) 1279 { 1280 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) ENA - AP loss wake\n"); 1281 wow_feature_enable |= AR_WOW_OFFLOAD_ENA_AP_LOSS_WAKE; 1282 } else { 1283 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) DIS - AP loss wake\n"); 1284 wow_feature_enable &= ~AR_WOW_OFFLOAD_ENA_AP_LOSS_WAKE; 1285 } 1286 1287 if((pattern_enable & AH_WOW_GTK_HANDSHAKE_ERROR_EN) || 1288 HAL_WOW_CTRL(ah, HAL_WOW_OFFLOAD_FORCE_GTK_ERR_WAKE)) 1289 { 1290 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) ENA - GTK error wake\n"); 1291 wow_feature_enable |= AR_WOW_OFFLOAD_ENA_GTK_ERROR_WAKE; 1292 } else { 1293 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) DIS - GTK error wake\n"); 1294 wow_feature_enable &= ~AR_WOW_OFFLOAD_ENA_GTK_ERROR_WAKE; 1295 } 1296 1297 if (pattern_enable & AH_WOW_GTK_OFFLOAD_EN) { 1298 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) ENA - GTK offload\n"); 1299 wow_feature_enable |= AR_WOW_OFFLOAD_ENA_GTK; 1300 } else { 1301 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) DIS - GTK offload\n"); 1302 wow_feature_enable &= ~AR_WOW_OFFLOAD_ENA_GTK; 1303 } 1304 1305 if (pattern_enable & AH_WOW_ARP_OFFLOAD_EN) { 1306 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) ENA - ARP offload\n"); 1307 wow_feature_enable |= AR_WOW_OFFLOAD_ENA_ARP_OFFLOAD; 1308 } else { 1309 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) DIS - ARP offload\n"); 1310 wow_feature_enable &= ~AR_WOW_OFFLOAD_ENA_ARP_OFFLOAD; 1311 } 1312 1313 if (pattern_enable & AH_WOW_NS_OFFLOAD_EN) { 1314 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) ENA - NS offload\n"); 1315 wow_feature_enable |= AR_WOW_OFFLOAD_ENA_NS_OFFLOAD; 1316 } else { 1317 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) DIS - NS offload\n"); 1318 wow_feature_enable &= ~AR_WOW_OFFLOAD_ENA_NS_OFFLOAD; 1319 } 1320 1321 #endif /* ATH_WOW_OFFLOAD */ 1322 1323 /* For Kite and later version of the chips 1324 * enable wow pattern match for packets less than 1325 * 256 bytes for all patterns. 1326 */ 1327 /* XXX */ 1328 OS_REG_WRITE( 1329 ah, AR_WOW_PATTERN_MATCH_LT_256B_REG, AR_WOW_PATTERN_SUPPORTED); 1330 1331 /* 1332 * Set the power states appropriately and enable PME. 1333 */ 1334 val = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_PCIE_PM_CTRL)); 1335 val |= 1336 AR_PMCTRL_PWR_STATE_D1D3 | 1337 AR_PMCTRL_HOST_PME_EN | 1338 AR_PMCTRL_PWR_PM_CTRL_ENA; 1339 val &= ~AR_PCIE_PM_CTRL_ENA; 1340 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_PCIE_PM_CTRL), val); 1341 1342 /* Wake on Timer Interrupt. Test mode only. Used in Manufacturing line. */ 1343 if (timeout_in_seconds) { 1344 /* convert Timeout to u_secs */ 1345 OS_REG_WRITE(ah, AR_NEXT_NDP_TIMER, 1346 OS_REG_READ(ah, AR_TSF_L32) + timeout_in_seconds * 1000000 ); 1347 /* timer_period = 30 seconds always */ 1348 OS_REG_WRITE(ah, AR_NDP_PERIOD, 30 * 1000000); 1349 OS_REG_WRITE(ah, AR_TIMER_MODE, OS_REG_READ(ah, AR_TIMER_MODE) | 0x80); 1350 OS_REG_WRITE(ah, AR_IMR_S5, OS_REG_READ(ah, AR_IMR_S5) | 0x80); 1351 OS_REG_WRITE(ah, AR_IMR, OS_REG_READ(ah, AR_IMR) | AR_IMR_GENTMR); 1352 if (clearbssid) { 1353 OS_REG_WRITE(ah, AR_BSS_ID0, 0); 1354 OS_REG_WRITE(ah, AR_BSS_ID1, 0); 1355 } 1356 } 1357 1358 /* Enable Seq# generation when asleep. */ 1359 OS_REG_WRITE(ah, AR_STA_ID1, 1360 OS_REG_READ(ah, AR_STA_ID1) & ~AR_STA_ID1_PRESERVE_SEQNUM); 1361 1362 AH_PRIVATE(ah)->ah_wow_event_mask = wow_event_mask; 1363 1364 #if ATH_WOW_OFFLOAD 1365 if (offloadEnable) { 1366 /* Force MAC awake before entering SW WoW mode */ 1367 OS_REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); 1368 #if ATH_SUPPORT_MCI 1369 if (AH_PRIVATE(ah)->ah_caps.halMciSupport) { 1370 OS_REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); 1371 } 1372 #endif 1373 1374 OS_REG_WRITE(ah, AR_WOW_OFFLOAD_COMMAND_JUPITER, wow_feature_enable); 1375 OS_REG_WRITE(ah, AR_WOW_OFFLOAD_STATUS_JUPITER, 0x0); 1376 if (wow_feature_enable & AR_WOW_OFFLOAD_ENA_SW_NULL) { 1377 OS_REG_WRITE(ah, AR_WOW_SW_NULL_PARAMETER, 1378 ((1000) | 1379 (4 << AR_WOW_SW_NULL_SHORT_PERIOD_MASK_S))); 1380 } 1381 1382 if (wow_feature_enable & AR_WOW_OFFLOAD_ENA_DEVID_SWAR) { 1383 ar9300_wowoffload_download_devid_swar(ah); 1384 } 1385 1386 ar9300_wow_offload_download_hal_params(ah); 1387 ar9300_wow_offload_handshake(ah, pattern_enable); 1388 AH9300(ah)->ah_chip_full_sleep = AH_FALSE; 1389 1390 //OS_REG_SET_BIT(ah, AR_SW_WOW_CONTROL, AR_HW_WOW_DISABLE); 1391 } 1392 else 1393 #endif /* ATH_WOW_OFFLOAD */ 1394 { 1395 #if ATH_SUPPORT_MCI 1396 if (AH_PRIVATE(ah)->ah_caps.halMciSupport) { 1397 OS_REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); 1398 } 1399 #endif 1400 ar9300_set_power_mode_wow_sleep(ah); 1401 AH9300(ah)->ah_chip_full_sleep = AH_TRUE; 1402 } 1403 1404 return (AH_TRUE); 1405 } 1406 1407 u_int32_t 1408 //ar9300_wow_wake_up(struct ath_hal *ah, u_int8_t *chipPatternBytes) 1409 ar9300_wow_wake_up(struct ath_hal *ah, HAL_BOOL offloadEnabled) 1410 { 1411 uint32_t wow_status = 0; 1412 uint32_t val = 0, rval; 1413 1414 OS_REG_CLR_BIT(ah, AR_SW_WOW_CONTROL, AR_HW_WOW_DISABLE); 1415 OS_REG_CLR_BIT(ah, AR_SW_WOW_CONTROL, AR_SW_WOW_ENABLE); 1416 1417 #if ATH_WOW_OFFLOAD 1418 /* If WoW was offloaded to embedded CPU, use the global 1419 * shared register to know the wakeup reason */ 1420 if (offloadEnabled) { 1421 val = OS_REG_READ(ah, AR_EMB_CPU_WOW_STATUS); 1422 if (val) { 1423 if (val & AR_EMB_CPU_WOW_STATUS_MAGIC_PATTERN) { 1424 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) SW MAGIC_PATTERN\n"); 1425 wow_status |= AH_WOW_MAGIC_PATTERN_EN; 1426 } 1427 if (val & AR_EMB_CPU_WOW_STATUS_PATTERN_MATCH) { 1428 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) SW USER_PATTERN\n"); 1429 wow_status |= AH_WOW_USER_PATTERN_EN; 1430 } 1431 if (val & AR_EMB_CPU_WOW_STATUS_KEEP_ALIVE_FAIL) { 1432 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) SW KEEP_ALIVE_FAIL\n"); 1433 wow_status |= AH_WOW_LINK_CHANGE; 1434 } 1435 if (val & AR_EMB_CPU_WOW_STATUS_BEACON_MISS) { 1436 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) SW BEACON_FAIL\n"); 1437 wow_status |= AH_WOW_BEACON_MISS; 1438 } 1439 } 1440 1441 /* Clear status and mask registers */ 1442 OS_REG_WRITE(ah, AR_EMB_CPU_WOW_STATUS, 0x0); 1443 OS_REG_WRITE(ah, AR_EMB_CPU_WOW_ENABLE, 0); 1444 OS_REG_WRITE(ah, AR_MBOX_CTRL_STATUS, 0); 1445 1446 } 1447 else 1448 #endif /* ATH_WOW_OFFLOAD */ 1449 { 1450 /* 1451 * Read the WOW Status register to know the wakeup reason. 1452 */ 1453 rval = OS_REG_READ(ah, AR_WOW_PATTERN_REG); 1454 val = AR_WOW_STATUS(rval); 1455 1456 /* 1457 * Mask only the WOW events that we have enabled. 1458 * Sometimes we have spurious WOW events from the AR_WOW_PATTERN_REG 1459 * register. This mask will clean it up. 1460 */ 1461 val &= AH_PRIVATE(ah)->ah_wow_event_mask; 1462 1463 if (val) { 1464 if (val & AR_WOW_MAGIC_PAT_FOUND) { 1465 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) HW MAGIC_PATTERN\n"); 1466 wow_status |= AH_WOW_MAGIC_PATTERN_EN; 1467 } 1468 if (AR_WOW_PATTERN_FOUND(val)) { 1469 //int i, offset; 1470 //offset = OS_REG_READ(ah, AR_WOW_RXBUF_START_ADDR); 1471 //// Read matched pattern for wake packet detection indication. 1472 //for( i = 0; i< MAX_PATTERN_SIZE/4; i+=4) 1473 //{ 1474 // // RX FIFO is only 8K wrapping. 1475 // if(offset >= 8 * 1024 / 4) offset = 0; 1476 // *(u_int32_t*)(chipPatternBytes + i) = OS_REG_READ( ah,offset ); 1477 // offset++; 1478 //} 1479 wow_status |= AH_WOW_USER_PATTERN_EN; 1480 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) HW USER_PATTERN\n"); 1481 } 1482 if (val & AR_WOW_KEEP_ALIVE_FAIL) { 1483 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) HW KEEP_ALIVE_FAIL\n"); 1484 wow_status |= AH_WOW_LINK_CHANGE; 1485 } 1486 if (val & AR_WOW_BEACON_FAIL) { 1487 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE, "(WOW) HW BEACON_FAIL\n"); 1488 wow_status |= AH_WOW_BEACON_MISS; 1489 } 1490 } 1491 } 1492 1493 /* 1494 * Set and clear WOW_PME_CLEAR registers for the chip to generate next 1495 * wow signal. 1496 * Disable D3 before accessing other registers ? 1497 */ 1498 val = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_PCIE_PM_CTRL)); 1499 /* Check the bit value 0x01000000 (7-10)? */ 1500 val &= ~AR_PMCTRL_PWR_STATE_D1D3; 1501 val |= AR_PMCTRL_WOW_PME_CLR; 1502 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_PCIE_PM_CTRL), val); 1503 1504 /* 1505 * Clear all events. 1506 */ 1507 OS_REG_WRITE(ah, AR_WOW_PATTERN_REG, 1508 AR_WOW_CLEAR_EVENTS(OS_REG_READ(ah, AR_WOW_PATTERN_REG))); 1509 1510 //HALDEBUG(AH_NULL, HAL_DEBUG_UNMASKABLE, 1511 // "%s: Skip PCIE WA programming\n", __func__); 1512 #if 0 1513 /* 1514 * Tie reset register. 1515 * FIXME: Per David Quan not tieing it back might have some repurcussions. 1516 */ 1517 /* XXX */ 1518 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_WA), OS_REG_READ(ah, AR_WA) | 1519 AR_WA_UNTIE_RESET_EN | AR_WA_POR_SHORT | AR_WA_RESET_EN); 1520 #endif 1521 1522 /* Restore the Beacon Threshold to init value */ 1523 OS_REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR, INIT_RSSI_THR); 1524 1525 /* 1526 * Restore the way the PCI-E Reset, Power-On-Reset, external PCIE_POR_SHORT 1527 * pins are tied to its original value. Previously just before WOW sleep, 1528 * we untie the PCI-E Reset to our Chip's Power On Reset so that 1529 * any PCI-E reset from the bus will not reset our chip. 1530 */ 1531 HALDEBUG(AH_NULL, HAL_DEBUG_UNMASKABLE, "%s: restore AR_WA\n", __func__); 1532 if (AH_PRIVATE(ah)->ah_is_pci_express == AH_TRUE) { 1533 ar9300_config_pci_power_save(ah, 0, 0); 1534 } 1535 1536 AH_PRIVATE(ah)->ah_wow_event_mask = 0; 1537 HALDEBUG(AH_NULL, HAL_DEBUG_UNMASKABLE, 1538 "(WOW) wow_status=%08x\n", wow_status); 1539 1540 return (wow_status); 1541 } 1542 1543 void 1544 ar9300_wow_set_gpio_reset_low(struct ath_hal *ah) 1545 { 1546 uint32_t val; 1547 1548 val = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_GPIO_OE_OUT)); 1549 val |= (1 << (2 * 2)); 1550 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_GPIO_OE_OUT), val); 1551 val = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_GPIO_OE_OUT)); 1552 /* val = OS_REG_READ(ah,AR_GPIO_IN_OUT ); */ 1553 } 1554 #endif /* ATH_WOW */ 1555