1 /* 2 * Copyright (c) 2012 Qualcomm Atheros, Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/export.h> 18 #include "ath9k.h" 19 #include "reg.h" 20 #include "hw-ops.h" 21 22 const char *ath9k_hw_wow_event_to_string(u32 wow_event) 23 { 24 if (wow_event & AH_WOW_MAGIC_PATTERN_EN) 25 return "Magic pattern"; 26 if (wow_event & AH_WOW_USER_PATTERN_EN) 27 return "User pattern"; 28 if (wow_event & AH_WOW_LINK_CHANGE) 29 return "Link change"; 30 if (wow_event & AH_WOW_BEACON_MISS) 31 return "Beacon miss"; 32 33 return "unknown reason"; 34 } 35 EXPORT_SYMBOL(ath9k_hw_wow_event_to_string); 36 37 static void ath9k_hw_config_serdes_wow_sleep(struct ath_hw *ah) 38 { 39 int i; 40 41 for (i = 0; i < ah->iniPcieSerdesWow.ia_rows; i++) 42 REG_WRITE(ah, INI_RA(&ah->iniPcieSerdesWow, i, 0), 43 INI_RA(&ah->iniPcieSerdesWow, i, 1)); 44 45 usleep_range(1000, 1500); 46 } 47 48 static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah) 49 { 50 struct ath_common *common = ath9k_hw_common(ah); 51 52 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 53 54 /* set rx disable bit */ 55 REG_WRITE(ah, AR_CR, AR_CR_RXD); 56 57 if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0, AH_WAIT_TIMEOUT)) { 58 ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n", 59 REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW)); 60 return; 61 } else { 62 if (!AR_SREV_9300_20_OR_LATER(ah)) 63 REG_WRITE(ah, AR_RXDP, 0x0); 64 } 65 66 /* AR9280 WoW has sleep issue, do not set it to sleep */ 67 if (AR_SREV_9280_20(ah)) 68 return; 69 70 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT); 71 } 72 73 static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah) 74 { 75 struct ath_common *common = ath9k_hw_common(ah); 76 u8 sta_mac_addr[ETH_ALEN], ap_mac_addr[ETH_ALEN]; 77 u32 ctl[13] = {0}; 78 u32 data_word[KAL_NUM_DATA_WORDS]; 79 u8 i; 80 u32 wow_ka_data_word0; 81 82 memcpy(sta_mac_addr, common->macaddr, ETH_ALEN); 83 memcpy(ap_mac_addr, common->curbssid, ETH_ALEN); 84 85 /* set the transmit buffer */ 86 ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16)); 87 88 if (!(AR_SREV_9300_20_OR_LATER(ah))) 89 ctl[0] += (KAL_ANTENNA_MODE << 25); 90 91 ctl[1] = 0; 92 ctl[3] = 0xb; /* OFDM_6M hardware value for this rate */ 93 ctl[4] = 0; 94 ctl[7] = (ah->txchainmask) << 2; 95 96 if (AR_SREV_9300_20_OR_LATER(ah)) 97 ctl[2] = 0xf << 16; /* tx_tries 0 */ 98 else 99 ctl[2] = 0x7 << 16; /* tx_tries 0 */ 100 101 102 for (i = 0; i < KAL_NUM_DESC_WORDS; i++) 103 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]); 104 105 /* for AR9300 family 13 descriptor words */ 106 if (AR_SREV_9300_20_OR_LATER(ah)) 107 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]); 108 109 data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) | 110 (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16); 111 data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) | 112 (ap_mac_addr[1] << 8) | (ap_mac_addr[0]); 113 data_word[2] = (sta_mac_addr[1] << 24) | (sta_mac_addr[0] << 16) | 114 (ap_mac_addr[5] << 8) | (ap_mac_addr[4]); 115 data_word[3] = (sta_mac_addr[5] << 24) | (sta_mac_addr[4] << 16) | 116 (sta_mac_addr[3] << 8) | (sta_mac_addr[2]); 117 data_word[4] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) | 118 (ap_mac_addr[1] << 8) | (ap_mac_addr[0]); 119 data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]); 120 121 if (AR_SREV_9462_20(ah)) { 122 /* AR9462 2.0 has an extra descriptor word (time based 123 * discard) compared to other chips */ 124 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0); 125 wow_ka_data_word0 = AR_WOW_TXBUF(13); 126 } else { 127 wow_ka_data_word0 = AR_WOW_TXBUF(12); 128 } 129 130 for (i = 0; i < KAL_NUM_DATA_WORDS; i++) 131 REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]); 132 133 } 134 135 void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern, 136 u8 *user_mask, int pattern_count, 137 int pattern_len) 138 { 139 int i; 140 u32 pattern_val, mask_val; 141 u32 set, clr; 142 143 /* FIXME: should check count by querying the hardware capability */ 144 if (pattern_count >= MAX_NUM_PATTERN) 145 return; 146 147 REG_SET_BIT(ah, AR_WOW_PATTERN, BIT(pattern_count)); 148 149 /* set the registers for pattern */ 150 for (i = 0; i < MAX_PATTERN_SIZE; i += 4) { 151 memcpy(&pattern_val, user_pattern, 4); 152 REG_WRITE(ah, (AR_WOW_TB_PATTERN(pattern_count) + i), 153 pattern_val); 154 user_pattern += 4; 155 } 156 157 /* set the registers for mask */ 158 for (i = 0; i < MAX_PATTERN_MASK_SIZE; i += 4) { 159 memcpy(&mask_val, user_mask, 4); 160 REG_WRITE(ah, (AR_WOW_TB_MASK(pattern_count) + i), mask_val); 161 user_mask += 4; 162 } 163 164 /* set the pattern length to be matched 165 * 166 * AR_WOW_LENGTH1_REG1 167 * bit 31:24 pattern 0 length 168 * bit 23:16 pattern 1 length 169 * bit 15:8 pattern 2 length 170 * bit 7:0 pattern 3 length 171 * 172 * AR_WOW_LENGTH1_REG2 173 * bit 31:24 pattern 4 length 174 * bit 23:16 pattern 5 length 175 * bit 15:8 pattern 6 length 176 * bit 7:0 pattern 7 length 177 * 178 * the below logic writes out the new 179 * pattern length for the corresponding 180 * pattern_count, while masking out the 181 * other fields 182 */ 183 184 ah->wow_event_mask |= BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT); 185 186 if (!AR_SREV_9285_12_OR_LATER(ah)) 187 return; 188 189 if (pattern_count < 4) { 190 /* Pattern 0-3 uses AR_WOW_LENGTH1 register */ 191 set = (pattern_len & AR_WOW_LENGTH_MAX) << 192 AR_WOW_LEN1_SHIFT(pattern_count); 193 clr = AR_WOW_LENGTH1_MASK(pattern_count); 194 REG_RMW(ah, AR_WOW_LENGTH1, set, clr); 195 } else { 196 /* Pattern 4-7 uses AR_WOW_LENGTH2 register */ 197 set = (pattern_len & AR_WOW_LENGTH_MAX) << 198 AR_WOW_LEN2_SHIFT(pattern_count); 199 clr = AR_WOW_LENGTH2_MASK(pattern_count); 200 REG_RMW(ah, AR_WOW_LENGTH2, set, clr); 201 } 202 203 } 204 EXPORT_SYMBOL(ath9k_hw_wow_apply_pattern); 205 206 u32 ath9k_hw_wow_wakeup(struct ath_hw *ah) 207 { 208 u32 wow_status = 0; 209 u32 val = 0, rval; 210 /* 211 * read the WoW status register to know 212 * the wakeup reason 213 */ 214 rval = REG_READ(ah, AR_WOW_PATTERN); 215 val = AR_WOW_STATUS(rval); 216 217 /* 218 * mask only the WoW events that we have enabled. Sometimes 219 * we have spurious WoW events from the AR_WOW_PATTERN 220 * register. This mask will clean it up. 221 */ 222 223 val &= ah->wow_event_mask; 224 225 if (val) { 226 227 if (val & AR_WOW_MAGIC_PAT_FOUND) 228 wow_status |= AH_WOW_MAGIC_PATTERN_EN; 229 230 if (AR_WOW_PATTERN_FOUND(val)) 231 wow_status |= AH_WOW_USER_PATTERN_EN; 232 233 if (val & AR_WOW_KEEP_ALIVE_FAIL) 234 wow_status |= AH_WOW_LINK_CHANGE; 235 236 if (val & AR_WOW_BEACON_FAIL) 237 wow_status |= AH_WOW_BEACON_MISS; 238 239 } 240 241 /* 242 * set and clear WOW_PME_CLEAR registers for the chip to 243 * generate next wow signal. 244 * disable D3 before accessing other registers ? 245 */ 246 247 /* do we need to check the bit value 0x01000000 (7-10) ?? */ 248 REG_RMW(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR, 249 AR_PMCTRL_PWR_STATE_D1D3); 250 251 /* 252 * clear all events 253 */ 254 REG_WRITE(ah, AR_WOW_PATTERN, 255 AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN))); 256 257 /* 258 * tie reset register for AR9002 family of chipsets 259 * NB: not tieing it back might have some repurcussions. 260 */ 261 262 if (!AR_SREV_9300_20_OR_LATER(ah)) { 263 REG_SET_BIT(ah, AR_WA, AR_WA_UNTIE_RESET_EN | 264 AR_WA_POR_SHORT | AR_WA_RESET_EN); 265 } 266 267 268 /* 269 * restore the beacon threshold to init value 270 */ 271 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR); 272 273 /* 274 * Restore the way the PCI-E reset, Power-On-Reset, external 275 * PCIE_POR_SHORT pins are tied to its original value. 276 * Previously just before WoW sleep, we untie the PCI-E 277 * reset to our Chip's Power On Reset so that any PCI-E 278 * reset from the bus will not reset our chip 279 */ 280 281 if (AR_SREV_9280_20_OR_LATER(ah) && ah->is_pciexpress) 282 ath9k_hw_configpcipowersave(ah, false); 283 284 ah->wow_event_mask = 0; 285 286 return wow_status; 287 } 288 EXPORT_SYMBOL(ath9k_hw_wow_wakeup); 289 290 void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) 291 { 292 u32 wow_event_mask; 293 u32 set, clr; 294 295 /* 296 * wow_event_mask is a mask to the AR_WOW_PATTERN register to 297 * indicate which WoW events we have enabled. The WoW events 298 * are from the 'pattern_enable' in this function and 299 * 'pattern_count' of ath9k_hw_wow_apply_pattern() 300 */ 301 302 wow_event_mask = ah->wow_event_mask; 303 304 /* 305 * Untie Power-on-Reset from the PCI-E-Reset. When we are in 306 * WOW sleep, we do want the Reset from the PCI-E to disturb 307 * our hw state 308 */ 309 310 if (ah->is_pciexpress) { 311 312 /* 313 * we need to untie the internal POR (power-on-reset) 314 * to the external PCI-E reset. We also need to tie 315 * the PCI-E Phy reset to the PCI-E reset. 316 */ 317 318 if (AR_SREV_9300_20_OR_LATER(ah)) { 319 set = AR_WA_RESET_EN | AR_WA_POR_SHORT; 320 clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE; 321 REG_RMW(ah, AR_WA, set, clr); 322 } else { 323 if (AR_SREV_9285(ah) || AR_SREV_9287(ah)) 324 set = AR9285_WA_DEFAULT; 325 else 326 set = AR9280_WA_DEFAULT; 327 328 /* 329 * In AR9280 and AR9285, bit 14 in WA register 330 * (disable L1) should only be set when device 331 * enters D3 state and be cleared when device 332 * comes back to D0 333 */ 334 335 if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE) 336 set |= AR_WA_D3_L1_DISABLE; 337 338 clr = AR_WA_UNTIE_RESET_EN; 339 set |= AR_WA_RESET_EN | AR_WA_POR_SHORT; 340 REG_RMW(ah, AR_WA, set, clr); 341 342 /* 343 * for WoW sleep, we reprogram the SerDes so that the 344 * PLL and CLK REQ are both enabled. This uses more 345 * power but otherwise WoW sleep is unstable and the 346 * chip may disappear. 347 */ 348 349 if (AR_SREV_9285_12_OR_LATER(ah)) 350 ath9k_hw_config_serdes_wow_sleep(ah); 351 352 } 353 } 354 355 /* 356 * set the power states appropriately and enable PME 357 */ 358 set = AR_PMCTRL_HOST_PME_EN | AR_PMCTRL_PWR_PM_CTRL_ENA | 359 AR_PMCTRL_AUX_PWR_DET | AR_PMCTRL_WOW_PME_CLR; 360 361 /* 362 * set and clear WOW_PME_CLEAR registers for the chip 363 * to generate next wow signal. 364 */ 365 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set); 366 clr = AR_PMCTRL_WOW_PME_CLR; 367 REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr); 368 369 /* 370 * Setup for: 371 * - beacon misses 372 * - magic pattern 373 * - keep alive timeout 374 * - pattern matching 375 */ 376 377 /* 378 * Program default values for pattern backoff, aifs/slot/KAL count, 379 * beacon miss timeout, KAL timeout, etc. 380 */ 381 382 set = AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF); 383 REG_SET_BIT(ah, AR_WOW_PATTERN, set); 384 385 set = AR_WOW_AIFS_CNT(AR_WOW_CNT_AIFS_CNT) | 386 AR_WOW_SLOT_CNT(AR_WOW_CNT_SLOT_CNT) | 387 AR_WOW_KEEP_ALIVE_CNT(AR_WOW_CNT_KA_CNT); 388 REG_SET_BIT(ah, AR_WOW_COUNT, set); 389 390 if (pattern_enable & AH_WOW_BEACON_MISS) 391 set = AR_WOW_BEACON_TIMO; 392 /* We are not using beacon miss, program a large value */ 393 else 394 set = AR_WOW_BEACON_TIMO_MAX; 395 396 REG_WRITE(ah, AR_WOW_BCN_TIMO, set); 397 398 /* 399 * Keep alive timo in ms except AR9280 400 */ 401 if (!pattern_enable || AR_SREV_9280(ah)) 402 set = AR_WOW_KEEP_ALIVE_NEVER; 403 else 404 set = KAL_TIMEOUT * 32; 405 406 REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, set); 407 408 /* 409 * Keep alive delay in us. based on 'power on clock', 410 * therefore in usec 411 */ 412 set = KAL_DELAY * 1000; 413 REG_WRITE(ah, AR_WOW_KEEP_ALIVE_DELAY, set); 414 415 /* 416 * Create keep alive pattern to respond to beacons 417 */ 418 ath9k_wow_create_keep_alive_pattern(ah); 419 420 /* 421 * Configure MAC WoW Registers 422 */ 423 424 set = 0; 425 /* Send keep alive timeouts anyway */ 426 clr = AR_WOW_KEEP_ALIVE_AUTO_DIS; 427 428 if (pattern_enable & AH_WOW_LINK_CHANGE) 429 wow_event_mask |= AR_WOW_KEEP_ALIVE_FAIL; 430 else 431 set = AR_WOW_KEEP_ALIVE_FAIL_DIS; 432 433 /* 434 * FIXME: For now disable keep alive frame 435 * failure. This seems to sometimes trigger 436 * unnecessary wake up with AR9485 chipsets. 437 */ 438 set = AR_WOW_KEEP_ALIVE_FAIL_DIS; 439 440 REG_RMW(ah, AR_WOW_KEEP_ALIVE, set, clr); 441 442 443 /* 444 * we are relying on a bmiss failure. ensure we have 445 * enough threshold to prevent false positives 446 */ 447 REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR, 448 AR_WOW_BMISSTHRESHOLD); 449 450 set = 0; 451 clr = 0; 452 453 if (pattern_enable & AH_WOW_BEACON_MISS) { 454 set = AR_WOW_BEACON_FAIL_EN; 455 wow_event_mask |= AR_WOW_BEACON_FAIL; 456 } else { 457 clr = AR_WOW_BEACON_FAIL_EN; 458 } 459 460 REG_RMW(ah, AR_WOW_BCN_EN, set, clr); 461 462 set = 0; 463 clr = 0; 464 /* 465 * Enable the magic packet registers 466 */ 467 if (pattern_enable & AH_WOW_MAGIC_PATTERN_EN) { 468 set = AR_WOW_MAGIC_EN; 469 wow_event_mask |= AR_WOW_MAGIC_PAT_FOUND; 470 } else { 471 clr = AR_WOW_MAGIC_EN; 472 } 473 set |= AR_WOW_MAC_INTR_EN; 474 REG_RMW(ah, AR_WOW_PATTERN, set, clr); 475 476 /* 477 * For AR9285 and later version of chipsets 478 * enable WoW pattern match for packets less 479 * than 256 bytes for all patterns 480 */ 481 if (AR_SREV_9285_12_OR_LATER(ah)) 482 REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B, 483 AR_WOW_PATTERN_SUPPORTED); 484 485 /* 486 * Set the power states appropriately and enable PME 487 */ 488 clr = 0; 489 set = AR_PMCTRL_PWR_STATE_D1D3 | AR_PMCTRL_HOST_PME_EN | 490 AR_PMCTRL_PWR_PM_CTRL_ENA; 491 /* 492 * This is needed for AR9300 chipsets to wake-up 493 * the host. 494 */ 495 if (AR_SREV_9300_20_OR_LATER(ah)) 496 clr = AR_PCIE_PM_CTRL_ENA; 497 498 REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr); 499 500 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { 501 /* 502 * this is needed to prevent the chip waking up 503 * the host within 3-4 seconds with certain 504 * platform/BIOS. The fix is to enable 505 * D1 & D3 to match original definition and 506 * also match the OTP value. Anyway this 507 * is more related to SW WOW. 508 */ 509 clr = AR_PMCTRL_PWR_STATE_D1D3; 510 REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr); 511 512 set = AR_PMCTRL_PWR_STATE_D1D3_REAL; 513 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set); 514 } 515 516 517 518 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM); 519 520 if (AR_SREV_9300_20_OR_LATER(ah)) { 521 /* to bring down WOW power low margin */ 522 set = BIT(13); 523 REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set); 524 /* HW WoW */ 525 clr = BIT(5); 526 REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr); 527 } 528 529 ath9k_hw_set_powermode_wow_sleep(ah); 530 ah->wow_event_mask = wow_event_mask; 531 } 532 EXPORT_SYMBOL(ath9k_hw_wow_enable); 533