1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "hw.h" 18 19 void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val) 20 { 21 REG_WRITE(ah, reg, val); 22 23 if (ah->config.analog_shiftreg) 24 udelay(100); 25 } 26 27 void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask, 28 u32 shift, u32 val) 29 { 30 REG_RMW(ah, reg, ((val << shift) & mask), mask); 31 32 if (ah->config.analog_shiftreg) 33 udelay(100); 34 } 35 36 int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight, 37 int16_t targetLeft, int16_t targetRight) 38 { 39 int16_t rv; 40 41 if (srcRight == srcLeft) { 42 rv = targetLeft; 43 } else { 44 rv = (int16_t) (((target - srcLeft) * targetRight + 45 (srcRight - target) * targetLeft) / 46 (srcRight - srcLeft)); 47 } 48 return rv; 49 } 50 51 bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize, 52 u16 *indexL, u16 *indexR) 53 { 54 u16 i; 55 56 if (target <= pList[0]) { 57 *indexL = *indexR = 0; 58 return true; 59 } 60 if (target >= pList[listSize - 1]) { 61 *indexL = *indexR = (u16) (listSize - 1); 62 return true; 63 } 64 65 for (i = 0; i < listSize - 1; i++) { 66 if (pList[i] == target) { 67 *indexL = *indexR = i; 68 return true; 69 } 70 if (target < pList[i + 1]) { 71 *indexL = i; 72 *indexR = (u16) (i + 1); 73 return false; 74 } 75 } 76 return false; 77 } 78 79 void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data, 80 int eep_start_loc, int size) 81 { 82 int i = 0, j, addr; 83 u32 addrdata[8]; 84 u32 data[8]; 85 86 for (addr = 0; addr < size; addr++) { 87 addrdata[i] = AR5416_EEPROM_OFFSET + 88 ((addr + eep_start_loc) << AR5416_EEPROM_S); 89 i++; 90 if (i == 8) { 91 REG_READ_MULTI(ah, addrdata, data, i); 92 93 for (j = 0; j < i; j++) { 94 *eep_data = data[j]; 95 eep_data++; 96 } 97 i = 0; 98 } 99 } 100 101 if (i != 0) { 102 REG_READ_MULTI(ah, addrdata, data, i); 103 104 for (j = 0; j < i; j++) { 105 *eep_data = data[j]; 106 eep_data++; 107 } 108 } 109 } 110 111 static bool ath9k_hw_nvram_read_array(u16 *blob, size_t blob_size, 112 off_t offset, u16 *data) 113 { 114 if (offset >= blob_size) 115 return false; 116 117 *data = blob[offset]; 118 return true; 119 } 120 121 static bool ath9k_hw_nvram_read_firmware(const struct firmware *eeprom_blob, 122 off_t offset, u16 *data) 123 { 124 return ath9k_hw_nvram_read_array((u16 *) eeprom_blob->data, 125 eeprom_blob->size / sizeof(u16), 126 offset, data); 127 } 128 129 static bool ath9k_hw_nvram_read_nvmem(struct ath_hw *ah, off_t offset, 130 u16 *data) 131 { 132 return ath9k_hw_nvram_read_array(ah->nvmem_blob, 133 ah->nvmem_blob_len / sizeof(u16), 134 offset, data); 135 } 136 137 bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data) 138 { 139 struct ath_common *common = ath9k_hw_common(ah); 140 bool ret; 141 142 if (ah->nvmem_blob) 143 ret = ath9k_hw_nvram_read_nvmem(ah, off, data); 144 else if (ah->eeprom_blob) 145 ret = ath9k_hw_nvram_read_firmware(ah->eeprom_blob, off, data); 146 else 147 ret = common->bus_ops->eeprom_read(common, off, data); 148 149 if (!ret) 150 ath_dbg(common, EEPROM, 151 "unable to read eeprom region at offset %u\n", off); 152 153 return ret; 154 } 155 156 int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size) 157 { 158 u16 magic; 159 u16 *eepdata; 160 int i; 161 bool needs_byteswap = false; 162 struct ath_common *common = ath9k_hw_common(ah); 163 164 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) { 165 ath_err(common, "Reading Magic # failed\n"); 166 return -EIO; 167 } 168 169 if (swab16(magic) == AR5416_EEPROM_MAGIC) { 170 needs_byteswap = true; 171 ath_dbg(common, EEPROM, 172 "EEPROM needs byte-swapping to correct endianness.\n"); 173 } else if (magic != AR5416_EEPROM_MAGIC) { 174 if (ath9k_hw_use_flash(ah)) { 175 ath_dbg(common, EEPROM, 176 "Ignoring invalid EEPROM magic (0x%04x).\n", 177 magic); 178 } else { 179 ath_err(common, 180 "Invalid EEPROM magic (0x%04x).\n", magic); 181 return -EINVAL; 182 } 183 } 184 185 if (needs_byteswap) { 186 if (ah->ah_flags & AH_NO_EEP_SWAP) { 187 ath_info(common, 188 "Ignoring endianness difference in EEPROM magic bytes.\n"); 189 } else { 190 eepdata = (u16 *)(&ah->eeprom); 191 192 for (i = 0; i < size; i++) 193 eepdata[i] = swab16(eepdata[i]); 194 } 195 } 196 197 if (ah->eep_ops->get_eepmisc(ah) & AR5416_EEPMISC_BIG_ENDIAN) { 198 *swap_needed = true; 199 ath_dbg(common, EEPROM, 200 "Big Endian EEPROM detected according to EEPMISC register.\n"); 201 } else { 202 *swap_needed = false; 203 } 204 205 return 0; 206 } 207 208 bool ath9k_hw_nvram_validate_checksum(struct ath_hw *ah, int size) 209 { 210 u32 i, sum = 0; 211 u16 *eepdata = (u16 *)(&ah->eeprom); 212 struct ath_common *common = ath9k_hw_common(ah); 213 214 for (i = 0; i < size; i++) 215 sum ^= eepdata[i]; 216 217 if (sum != 0xffff) { 218 ath_err(common, "Bad EEPROM checksum 0x%x\n", sum); 219 return false; 220 } 221 222 return true; 223 } 224 225 bool ath9k_hw_nvram_check_version(struct ath_hw *ah, int version, int minrev) 226 { 227 struct ath_common *common = ath9k_hw_common(ah); 228 229 if (ah->eep_ops->get_eeprom_ver(ah) != version || 230 ah->eep_ops->get_eeprom_rev(ah) < minrev) { 231 ath_err(common, "Bad EEPROM VER 0x%04x or REV 0x%04x\n", 232 ah->eep_ops->get_eeprom_ver(ah), 233 ah->eep_ops->get_eeprom_rev(ah)); 234 return false; 235 } 236 237 return true; 238 } 239 240 void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList, 241 u8 *pVpdList, u16 numIntercepts, 242 u8 *pRetVpdList) 243 { 244 u16 i, k; 245 u8 currPwr = pwrMin; 246 u16 idxL = 0, idxR = 0; 247 248 for (i = 0; i <= (pwrMax - pwrMin) / 2; i++) { 249 ath9k_hw_get_lower_upper_index(currPwr, pPwrList, 250 numIntercepts, &(idxL), 251 &(idxR)); 252 if (idxR < 1) 253 idxR = 1; 254 if (idxL == numIntercepts - 1) 255 idxL = (u16) (numIntercepts - 2); 256 if (pPwrList[idxL] == pPwrList[idxR]) 257 k = pVpdList[idxL]; 258 else 259 k = (u16)(((currPwr - pPwrList[idxL]) * pVpdList[idxR] + 260 (pPwrList[idxR] - currPwr) * pVpdList[idxL]) / 261 (pPwrList[idxR] - pPwrList[idxL])); 262 pRetVpdList[i] = (u8) k; 263 currPwr += 2; 264 } 265 } 266 267 void ath9k_hw_get_legacy_target_powers(struct ath_hw *ah, 268 struct ath9k_channel *chan, 269 struct cal_target_power_leg *powInfo, 270 u16 numChannels, 271 struct cal_target_power_leg *pNewPower, 272 u16 numRates, bool isExtTarget) 273 { 274 struct chan_centers centers; 275 u16 clo, chi; 276 int i; 277 int matchIndex = -1, lowIndex = -1; 278 u16 freq; 279 280 ath9k_hw_get_channel_centers(ah, chan, ¢ers); 281 freq = (isExtTarget) ? centers.ext_center : centers.ctl_center; 282 283 if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel, 284 IS_CHAN_2GHZ(chan))) { 285 matchIndex = 0; 286 } else { 287 for (i = 0; (i < numChannels) && 288 (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) { 289 if (freq == ath9k_hw_fbin2freq(powInfo[i].bChannel, 290 IS_CHAN_2GHZ(chan))) { 291 matchIndex = i; 292 break; 293 } else if (freq < ath9k_hw_fbin2freq(powInfo[i].bChannel, 294 IS_CHAN_2GHZ(chan)) && i > 0 && 295 freq > ath9k_hw_fbin2freq(powInfo[i - 1].bChannel, 296 IS_CHAN_2GHZ(chan))) { 297 lowIndex = i - 1; 298 break; 299 } 300 } 301 if ((matchIndex == -1) && (lowIndex == -1)) 302 matchIndex = i - 1; 303 } 304 305 if (matchIndex != -1) { 306 *pNewPower = powInfo[matchIndex]; 307 } else { 308 clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel, 309 IS_CHAN_2GHZ(chan)); 310 chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel, 311 IS_CHAN_2GHZ(chan)); 312 313 for (i = 0; i < numRates; i++) { 314 pNewPower->tPow2x[i] = 315 (u8)ath9k_hw_interpolate(freq, clo, chi, 316 powInfo[lowIndex].tPow2x[i], 317 powInfo[lowIndex + 1].tPow2x[i]); 318 } 319 } 320 } 321 322 void ath9k_hw_get_target_powers(struct ath_hw *ah, 323 struct ath9k_channel *chan, 324 struct cal_target_power_ht *powInfo, 325 u16 numChannels, 326 struct cal_target_power_ht *pNewPower, 327 u16 numRates, bool isHt40Target) 328 { 329 struct chan_centers centers; 330 u16 clo, chi; 331 int i; 332 int matchIndex = -1, lowIndex = -1; 333 u16 freq; 334 335 ath9k_hw_get_channel_centers(ah, chan, ¢ers); 336 freq = isHt40Target ? centers.synth_center : centers.ctl_center; 337 338 if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel, IS_CHAN_2GHZ(chan))) { 339 matchIndex = 0; 340 } else { 341 for (i = 0; (i < numChannels) && 342 (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) { 343 if (freq == ath9k_hw_fbin2freq(powInfo[i].bChannel, 344 IS_CHAN_2GHZ(chan))) { 345 matchIndex = i; 346 break; 347 } else 348 if (freq < ath9k_hw_fbin2freq(powInfo[i].bChannel, 349 IS_CHAN_2GHZ(chan)) && i > 0 && 350 freq > ath9k_hw_fbin2freq(powInfo[i - 1].bChannel, 351 IS_CHAN_2GHZ(chan))) { 352 lowIndex = i - 1; 353 break; 354 } 355 } 356 if ((matchIndex == -1) && (lowIndex == -1)) 357 matchIndex = i - 1; 358 } 359 360 if (matchIndex != -1) { 361 *pNewPower = powInfo[matchIndex]; 362 } else { 363 clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel, 364 IS_CHAN_2GHZ(chan)); 365 chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel, 366 IS_CHAN_2GHZ(chan)); 367 368 for (i = 0; i < numRates; i++) { 369 pNewPower->tPow2x[i] = (u8)ath9k_hw_interpolate(freq, 370 clo, chi, 371 powInfo[lowIndex].tPow2x[i], 372 powInfo[lowIndex + 1].tPow2x[i]); 373 } 374 } 375 } 376 377 u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower, 378 bool is2GHz, int num_band_edges) 379 { 380 u16 twiceMaxEdgePower = MAX_RATE_POWER; 381 int i; 382 383 for (i = 0; (i < num_band_edges) && 384 (pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) { 385 if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz)) { 386 twiceMaxEdgePower = CTL_EDGE_TPOWER(pRdEdgesPower[i].ctl); 387 break; 388 } else if ((i > 0) && 389 (freq < ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, 390 is2GHz))) { 391 if (ath9k_hw_fbin2freq(pRdEdgesPower[i - 1].bChannel, 392 is2GHz) < freq && 393 CTL_EDGE_FLAGS(pRdEdgesPower[i - 1].ctl)) { 394 twiceMaxEdgePower = 395 CTL_EDGE_TPOWER(pRdEdgesPower[i - 1].ctl); 396 } 397 break; 398 } 399 } 400 401 return twiceMaxEdgePower; 402 } 403 404 u16 ath9k_hw_get_scaled_power(struct ath_hw *ah, u16 power_limit, 405 u8 antenna_reduction) 406 { 407 u16 reduction = antenna_reduction; 408 409 /* 410 * Reduce scaled Power by number of chains active 411 * to get the per chain tx power level. 412 */ 413 switch (ar5416_get_ntxchains(ah->txchainmask)) { 414 case 1: 415 break; 416 case 2: 417 reduction += POWER_CORRECTION_FOR_TWO_CHAIN; 418 break; 419 case 3: 420 reduction += POWER_CORRECTION_FOR_THREE_CHAIN; 421 break; 422 } 423 424 if (power_limit > reduction) 425 power_limit -= reduction; 426 else 427 power_limit = 0; 428 429 return min_t(u16, power_limit, MAX_RATE_POWER); 430 } 431 432 void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah) 433 { 434 struct ath_common *common = ath9k_hw_common(ah); 435 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 436 437 switch (ar5416_get_ntxchains(ah->txchainmask)) { 438 case 1: 439 break; 440 case 2: 441 regulatory->max_power_level += POWER_CORRECTION_FOR_TWO_CHAIN; 442 break; 443 case 3: 444 regulatory->max_power_level += POWER_CORRECTION_FOR_THREE_CHAIN; 445 break; 446 default: 447 ath_dbg(common, EEPROM, "Invalid chainmask configuration\n"); 448 break; 449 } 450 } 451 452 void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah, 453 struct ath9k_channel *chan, 454 void *pRawDataSet, 455 u8 *bChans, u16 availPiers, 456 u16 tPdGainOverlap, 457 u16 *pPdGainBoundaries, u8 *pPDADCValues, 458 u16 numXpdGains) 459 { 460 int i, j, k; 461 int16_t ss; 462 u16 idxL = 0, idxR = 0, numPiers; 463 static u8 vpdTableL[AR5416_NUM_PD_GAINS] 464 [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; 465 static u8 vpdTableR[AR5416_NUM_PD_GAINS] 466 [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; 467 static u8 vpdTableI[AR5416_NUM_PD_GAINS] 468 [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; 469 470 u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR; 471 u8 minPwrT4[AR5416_NUM_PD_GAINS]; 472 u8 maxPwrT4[AR5416_NUM_PD_GAINS]; 473 int16_t vpdStep; 474 int16_t tmpVal; 475 u16 sizeCurrVpdTable, maxIndex, tgtIndex; 476 bool match; 477 int16_t minDelta = 0; 478 struct chan_centers centers; 479 int pdgain_boundary_default; 480 struct cal_data_per_freq *data_def = pRawDataSet; 481 struct cal_data_per_freq_4k *data_4k = pRawDataSet; 482 struct cal_data_per_freq_ar9287 *data_9287 = pRawDataSet; 483 bool eeprom_4k = AR_SREV_9285(ah) || AR_SREV_9271(ah); 484 int intercepts; 485 486 if (AR_SREV_9287(ah)) 487 intercepts = AR9287_PD_GAIN_ICEPTS; 488 else 489 intercepts = AR5416_PD_GAIN_ICEPTS; 490 491 memset(&minPwrT4, 0, AR5416_NUM_PD_GAINS); 492 ath9k_hw_get_channel_centers(ah, chan, ¢ers); 493 494 for (numPiers = 0; numPiers < availPiers; numPiers++) { 495 if (bChans[numPiers] == AR5416_BCHAN_UNUSED) 496 break; 497 } 498 499 match = ath9k_hw_get_lower_upper_index((u8)FREQ2FBIN(centers.synth_center, 500 IS_CHAN_2GHZ(chan)), 501 bChans, numPiers, &idxL, &idxR); 502 503 if (match) { 504 if (AR_SREV_9287(ah)) { 505 for (i = 0; i < numXpdGains; i++) { 506 minPwrT4[i] = data_9287[idxL].pwrPdg[i][0]; 507 maxPwrT4[i] = data_9287[idxL].pwrPdg[i][intercepts - 1]; 508 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], 509 data_9287[idxL].pwrPdg[i], 510 data_9287[idxL].vpdPdg[i], 511 intercepts, 512 vpdTableI[i]); 513 } 514 } else if (eeprom_4k) { 515 for (i = 0; i < numXpdGains; i++) { 516 minPwrT4[i] = data_4k[idxL].pwrPdg[i][0]; 517 maxPwrT4[i] = data_4k[idxL].pwrPdg[i][intercepts - 1]; 518 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], 519 data_4k[idxL].pwrPdg[i], 520 data_4k[idxL].vpdPdg[i], 521 intercepts, 522 vpdTableI[i]); 523 } 524 } else { 525 for (i = 0; i < numXpdGains; i++) { 526 minPwrT4[i] = data_def[idxL].pwrPdg[i][0]; 527 maxPwrT4[i] = data_def[idxL].pwrPdg[i][intercepts - 1]; 528 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], 529 data_def[idxL].pwrPdg[i], 530 data_def[idxL].vpdPdg[i], 531 intercepts, 532 vpdTableI[i]); 533 } 534 } 535 } else { 536 for (i = 0; i < numXpdGains; i++) { 537 if (AR_SREV_9287(ah)) { 538 pVpdL = data_9287[idxL].vpdPdg[i]; 539 pPwrL = data_9287[idxL].pwrPdg[i]; 540 pVpdR = data_9287[idxR].vpdPdg[i]; 541 pPwrR = data_9287[idxR].pwrPdg[i]; 542 } else if (eeprom_4k) { 543 pVpdL = data_4k[idxL].vpdPdg[i]; 544 pPwrL = data_4k[idxL].pwrPdg[i]; 545 pVpdR = data_4k[idxR].vpdPdg[i]; 546 pPwrR = data_4k[idxR].pwrPdg[i]; 547 } else { 548 pVpdL = data_def[idxL].vpdPdg[i]; 549 pPwrL = data_def[idxL].pwrPdg[i]; 550 pVpdR = data_def[idxR].vpdPdg[i]; 551 pPwrR = data_def[idxR].pwrPdg[i]; 552 } 553 554 minPwrT4[i] = max(pPwrL[0], pPwrR[0]); 555 556 maxPwrT4[i] = 557 min(pPwrL[intercepts - 1], 558 pPwrR[intercepts - 1]); 559 560 561 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], 562 pPwrL, pVpdL, 563 intercepts, 564 vpdTableL[i]); 565 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], 566 pPwrR, pVpdR, 567 intercepts, 568 vpdTableR[i]); 569 570 for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) { 571 vpdTableI[i][j] = 572 (u8)(ath9k_hw_interpolate((u16) 573 FREQ2FBIN(centers. 574 synth_center, 575 IS_CHAN_2GHZ 576 (chan)), 577 bChans[idxL], bChans[idxR], 578 vpdTableL[i][j], vpdTableR[i][j])); 579 } 580 } 581 } 582 583 k = 0; 584 585 for (i = 0; i < numXpdGains; i++) { 586 if (i == (numXpdGains - 1)) 587 pPdGainBoundaries[i] = 588 (u16)(maxPwrT4[i] / 2); 589 else 590 pPdGainBoundaries[i] = 591 (u16)((maxPwrT4[i] + minPwrT4[i + 1]) / 4); 592 593 pPdGainBoundaries[i] = 594 min((u16)MAX_RATE_POWER, pPdGainBoundaries[i]); 595 596 minDelta = 0; 597 598 if (i == 0) { 599 if (AR_SREV_9280_20_OR_LATER(ah)) 600 ss = (int16_t)(0 - (minPwrT4[i] / 2)); 601 else 602 ss = 0; 603 } else { 604 ss = (int16_t)((pPdGainBoundaries[i - 1] - 605 (minPwrT4[i] / 2)) - 606 tPdGainOverlap + 1 + minDelta); 607 } 608 vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]); 609 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep); 610 611 while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) { 612 tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep); 613 pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal); 614 ss++; 615 } 616 617 sizeCurrVpdTable = (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1); 618 tgtIndex = (u8)(pPdGainBoundaries[i] + tPdGainOverlap - 619 (minPwrT4[i] / 2)); 620 maxIndex = (tgtIndex < sizeCurrVpdTable) ? 621 tgtIndex : sizeCurrVpdTable; 622 623 while ((ss < maxIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1))) { 624 pPDADCValues[k++] = vpdTableI[i][ss++]; 625 } 626 627 vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] - 628 vpdTableI[i][sizeCurrVpdTable - 2]); 629 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep); 630 631 if (tgtIndex >= maxIndex) { 632 while ((ss <= tgtIndex) && 633 (k < (AR5416_NUM_PDADC_VALUES - 1))) { 634 tmpVal = (int16_t)((vpdTableI[i][sizeCurrVpdTable - 1] + 635 (ss - maxIndex + 1) * vpdStep)); 636 pPDADCValues[k++] = (u8)((tmpVal > 255) ? 637 255 : tmpVal); 638 ss++; 639 } 640 } 641 } 642 643 if (eeprom_4k) 644 pdgain_boundary_default = 58; 645 else 646 pdgain_boundary_default = pPdGainBoundaries[i - 1]; 647 648 while (i < AR5416_PD_GAINS_IN_MASK) { 649 pPdGainBoundaries[i] = pdgain_boundary_default; 650 i++; 651 } 652 653 while (k < AR5416_NUM_PDADC_VALUES) { 654 pPDADCValues[k] = pPDADCValues[k - 1]; 655 k++; 656 } 657 } 658 659 int ath9k_hw_eeprom_init(struct ath_hw *ah) 660 { 661 if (AR_SREV_9300_20_OR_LATER(ah)) 662 ah->eep_ops = &eep_ar9300_ops; 663 else if (AR_SREV_9287(ah)) { 664 ah->eep_ops = &eep_ar9287_ops; 665 } else if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) { 666 ah->eep_ops = &eep_4k_ops; 667 } else { 668 ah->eep_ops = &eep_def_ops; 669 } 670 671 if (!ah->eep_ops->fill_eeprom(ah)) 672 return -EIO; 673 674 return ah->eep_ops->check_eeprom(ah); 675 } 676