1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2018-2019 Realtek Corporation 3 */ 4 5 #include <linux/bcd.h> 6 7 #include "main.h" 8 #include "reg.h" 9 #include "fw.h" 10 #include "phy.h" 11 #include "debug.h" 12 13 struct phy_cfg_pair { 14 u32 addr; 15 u32 data; 16 }; 17 18 union phy_table_tile { 19 struct rtw_phy_cond cond; 20 struct phy_cfg_pair cfg; 21 }; 22 23 static const u32 db_invert_table[12][8] = { 24 {10, 13, 16, 20, 25 25, 32, 40, 50}, 26 {64, 80, 101, 128, 27 160, 201, 256, 318}, 28 {401, 505, 635, 800, 29 1007, 1268, 1596, 2010}, 30 {316, 398, 501, 631, 31 794, 1000, 1259, 1585}, 32 {1995, 2512, 3162, 3981, 33 5012, 6310, 7943, 10000}, 34 {12589, 15849, 19953, 25119, 35 31623, 39811, 50119, 63098}, 36 {79433, 100000, 125893, 158489, 37 199526, 251189, 316228, 398107}, 38 {501187, 630957, 794328, 1000000, 39 1258925, 1584893, 1995262, 2511886}, 40 {3162278, 3981072, 5011872, 6309573, 41 7943282, 1000000, 12589254, 15848932}, 42 {19952623, 25118864, 31622777, 39810717, 43 50118723, 63095734, 79432823, 100000000}, 44 {125892541, 158489319, 199526232, 251188643, 45 316227766, 398107171, 501187234, 630957345}, 46 {794328235, 1000000000, 1258925412, 1584893192, 47 1995262315, 2511886432U, 3162277660U, 3981071706U} 48 }; 49 50 u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M }; 51 u8 rtw_ofdm_rates[] = { 52 DESC_RATE6M, DESC_RATE9M, DESC_RATE12M, 53 DESC_RATE18M, DESC_RATE24M, DESC_RATE36M, 54 DESC_RATE48M, DESC_RATE54M 55 }; 56 u8 rtw_ht_1s_rates[] = { 57 DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2, 58 DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5, 59 DESC_RATEMCS6, DESC_RATEMCS7 60 }; 61 u8 rtw_ht_2s_rates[] = { 62 DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10, 63 DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13, 64 DESC_RATEMCS14, DESC_RATEMCS15 65 }; 66 u8 rtw_vht_1s_rates[] = { 67 DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1, 68 DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3, 69 DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5, 70 DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7, 71 DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9 72 }; 73 u8 rtw_vht_2s_rates[] = { 74 DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1, 75 DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3, 76 DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5, 77 DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7, 78 DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9 79 }; 80 u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = { 81 rtw_cck_rates, rtw_ofdm_rates, 82 rtw_ht_1s_rates, rtw_ht_2s_rates, 83 rtw_vht_1s_rates, rtw_vht_2s_rates 84 }; 85 u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = { 86 ARRAY_SIZE(rtw_cck_rates), 87 ARRAY_SIZE(rtw_ofdm_rates), 88 ARRAY_SIZE(rtw_ht_1s_rates), 89 ARRAY_SIZE(rtw_ht_2s_rates), 90 ARRAY_SIZE(rtw_vht_1s_rates), 91 ARRAY_SIZE(rtw_vht_2s_rates) 92 }; 93 static const u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates); 94 static const u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates); 95 static const u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates); 96 static const u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates); 97 static const u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates); 98 static const u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates); 99 100 enum rtw_phy_band_type { 101 PHY_BAND_2G = 0, 102 PHY_BAND_5G = 1, 103 }; 104 105 static void rtw_phy_cck_pd_init(struct rtw_dev *rtwdev) 106 { 107 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 108 u8 i, j; 109 110 for (i = 0; i <= RTW_CHANNEL_WIDTH_40; i++) { 111 for (j = 0; j < RTW_RF_PATH_MAX; j++) 112 dm_info->cck_pd_lv[i][j] = CCK_PD_LV0; 113 } 114 115 dm_info->cck_fa_avg = CCK_FA_AVG_RESET; 116 } 117 118 void rtw_phy_init(struct rtw_dev *rtwdev) 119 { 120 struct rtw_chip_info *chip = rtwdev->chip; 121 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 122 u32 addr, mask; 123 124 dm_info->fa_history[3] = 0; 125 dm_info->fa_history[2] = 0; 126 dm_info->fa_history[1] = 0; 127 dm_info->fa_history[0] = 0; 128 dm_info->igi_bitmap = 0; 129 dm_info->igi_history[3] = 0; 130 dm_info->igi_history[2] = 0; 131 dm_info->igi_history[1] = 0; 132 133 addr = chip->dig[0].addr; 134 mask = chip->dig[0].mask; 135 dm_info->igi_history[0] = rtw_read32_mask(rtwdev, addr, mask); 136 rtw_phy_cck_pd_init(rtwdev); 137 } 138 139 void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi) 140 { 141 struct rtw_chip_info *chip = rtwdev->chip; 142 struct rtw_hal *hal = &rtwdev->hal; 143 const struct rtw_hw_reg *dig_cck = &chip->dig_cck[0]; 144 u32 addr, mask; 145 u8 path; 146 147 if (dig_cck) 148 rtw_write32_mask(rtwdev, dig_cck->addr, dig_cck->mask, igi >> 1); 149 150 for (path = 0; path < hal->rf_path_num; path++) { 151 addr = chip->dig[path].addr; 152 mask = chip->dig[path].mask; 153 rtw_write32_mask(rtwdev, addr, mask, igi); 154 } 155 } 156 157 static void rtw_phy_stat_false_alarm(struct rtw_dev *rtwdev) 158 { 159 struct rtw_chip_info *chip = rtwdev->chip; 160 161 chip->ops->false_alarm_statistics(rtwdev); 162 } 163 164 #define RA_FLOOR_TABLE_SIZE 7 165 #define RA_FLOOR_UP_GAP 3 166 167 static u8 rtw_phy_get_rssi_level(u8 old_level, u8 rssi) 168 { 169 u8 table[RA_FLOOR_TABLE_SIZE] = {20, 34, 38, 42, 46, 50, 100}; 170 u8 new_level = 0; 171 int i; 172 173 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) 174 if (i >= old_level) 175 table[i] += RA_FLOOR_UP_GAP; 176 177 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) { 178 if (rssi < table[i]) { 179 new_level = i; 180 break; 181 } 182 } 183 184 return new_level; 185 } 186 187 struct rtw_phy_stat_iter_data { 188 struct rtw_dev *rtwdev; 189 u8 min_rssi; 190 }; 191 192 static void rtw_phy_stat_rssi_iter(void *data, struct ieee80211_sta *sta) 193 { 194 struct rtw_phy_stat_iter_data *iter_data = data; 195 struct rtw_dev *rtwdev = iter_data->rtwdev; 196 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; 197 u8 rssi; 198 199 rssi = ewma_rssi_read(&si->avg_rssi); 200 si->rssi_level = rtw_phy_get_rssi_level(si->rssi_level, rssi); 201 202 rtw_fw_send_rssi_info(rtwdev, si); 203 204 iter_data->min_rssi = min_t(u8, rssi, iter_data->min_rssi); 205 } 206 207 static void rtw_phy_stat_rssi(struct rtw_dev *rtwdev) 208 { 209 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 210 struct rtw_phy_stat_iter_data data = {}; 211 212 data.rtwdev = rtwdev; 213 data.min_rssi = U8_MAX; 214 rtw_iterate_stas_atomic(rtwdev, rtw_phy_stat_rssi_iter, &data); 215 216 dm_info->pre_min_rssi = dm_info->min_rssi; 217 dm_info->min_rssi = data.min_rssi; 218 } 219 220 static void rtw_phy_stat_rate_cnt(struct rtw_dev *rtwdev) 221 { 222 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 223 224 dm_info->last_pkt_count = dm_info->cur_pkt_count; 225 memset(&dm_info->cur_pkt_count, 0, sizeof(dm_info->cur_pkt_count)); 226 } 227 228 static void rtw_phy_statistics(struct rtw_dev *rtwdev) 229 { 230 rtw_phy_stat_rssi(rtwdev); 231 rtw_phy_stat_false_alarm(rtwdev); 232 rtw_phy_stat_rate_cnt(rtwdev); 233 } 234 235 #define DIG_PERF_FA_TH_LOW 250 236 #define DIG_PERF_FA_TH_HIGH 500 237 #define DIG_PERF_FA_TH_EXTRA_HIGH 750 238 #define DIG_PERF_MAX 0x5a 239 #define DIG_PERF_MID 0x40 240 #define DIG_CVRG_FA_TH_LOW 2000 241 #define DIG_CVRG_FA_TH_HIGH 4000 242 #define DIG_CVRG_FA_TH_EXTRA_HIGH 5000 243 #define DIG_CVRG_MAX 0x2a 244 #define DIG_CVRG_MID 0x26 245 #define DIG_CVRG_MIN 0x1c 246 #define DIG_RSSI_GAIN_OFFSET 15 247 248 static bool 249 rtw_phy_dig_check_damping(struct rtw_dm_info *dm_info) 250 { 251 u16 fa_lo = DIG_PERF_FA_TH_LOW; 252 u16 fa_hi = DIG_PERF_FA_TH_HIGH; 253 u16 *fa_history; 254 u8 *igi_history; 255 u8 damping_rssi; 256 u8 min_rssi; 257 u8 diff; 258 u8 igi_bitmap; 259 bool damping = false; 260 261 min_rssi = dm_info->min_rssi; 262 if (dm_info->damping) { 263 damping_rssi = dm_info->damping_rssi; 264 diff = min_rssi > damping_rssi ? min_rssi - damping_rssi : 265 damping_rssi - min_rssi; 266 if (diff > 3 || dm_info->damping_cnt++ > 20) { 267 dm_info->damping = false; 268 return false; 269 } 270 271 return true; 272 } 273 274 igi_history = dm_info->igi_history; 275 fa_history = dm_info->fa_history; 276 igi_bitmap = dm_info->igi_bitmap & 0xf; 277 switch (igi_bitmap) { 278 case 5: 279 /* down -> up -> down -> up */ 280 if (igi_history[0] > igi_history[1] && 281 igi_history[2] > igi_history[3] && 282 igi_history[0] - igi_history[1] >= 2 && 283 igi_history[2] - igi_history[3] >= 2 && 284 fa_history[0] > fa_hi && fa_history[1] < fa_lo && 285 fa_history[2] > fa_hi && fa_history[3] < fa_lo) 286 damping = true; 287 break; 288 case 9: 289 /* up -> down -> down -> up */ 290 if (igi_history[0] > igi_history[1] && 291 igi_history[3] > igi_history[2] && 292 igi_history[0] - igi_history[1] >= 4 && 293 igi_history[3] - igi_history[2] >= 2 && 294 fa_history[0] > fa_hi && fa_history[1] < fa_lo && 295 fa_history[2] < fa_lo && fa_history[3] > fa_hi) 296 damping = true; 297 break; 298 default: 299 return false; 300 } 301 302 if (damping) { 303 dm_info->damping = true; 304 dm_info->damping_cnt = 0; 305 dm_info->damping_rssi = min_rssi; 306 } 307 308 return damping; 309 } 310 311 static void rtw_phy_dig_get_boundary(struct rtw_dm_info *dm_info, 312 u8 *upper, u8 *lower, bool linked) 313 { 314 u8 dig_max, dig_min, dig_mid; 315 u8 min_rssi; 316 317 if (linked) { 318 dig_max = DIG_PERF_MAX; 319 dig_mid = DIG_PERF_MID; 320 /* 22B=0x1c, 22C=0x20 */ 321 dig_min = 0x1c; 322 min_rssi = max_t(u8, dm_info->min_rssi, dig_min); 323 } else { 324 dig_max = DIG_CVRG_MAX; 325 dig_mid = DIG_CVRG_MID; 326 dig_min = DIG_CVRG_MIN; 327 min_rssi = dig_min; 328 } 329 330 /* DIG MAX should be bounded by minimum RSSI with offset +15 */ 331 dig_max = min_t(u8, dig_max, min_rssi + DIG_RSSI_GAIN_OFFSET); 332 333 *lower = clamp_t(u8, min_rssi, dig_min, dig_mid); 334 *upper = clamp_t(u8, *lower + DIG_RSSI_GAIN_OFFSET, dig_min, dig_max); 335 } 336 337 static void rtw_phy_dig_get_threshold(struct rtw_dm_info *dm_info, 338 u16 *fa_th, u8 *step, bool linked) 339 { 340 u8 min_rssi, pre_min_rssi; 341 342 min_rssi = dm_info->min_rssi; 343 pre_min_rssi = dm_info->pre_min_rssi; 344 step[0] = 4; 345 step[1] = 3; 346 step[2] = 2; 347 348 if (linked) { 349 fa_th[0] = DIG_PERF_FA_TH_EXTRA_HIGH; 350 fa_th[1] = DIG_PERF_FA_TH_HIGH; 351 fa_th[2] = DIG_PERF_FA_TH_LOW; 352 if (pre_min_rssi > min_rssi) { 353 step[0] = 6; 354 step[1] = 4; 355 step[2] = 2; 356 } 357 } else { 358 fa_th[0] = DIG_CVRG_FA_TH_EXTRA_HIGH; 359 fa_th[1] = DIG_CVRG_FA_TH_HIGH; 360 fa_th[2] = DIG_CVRG_FA_TH_LOW; 361 } 362 } 363 364 static void rtw_phy_dig_recorder(struct rtw_dm_info *dm_info, u8 igi, u16 fa) 365 { 366 u8 *igi_history; 367 u16 *fa_history; 368 u8 igi_bitmap; 369 bool up; 370 371 igi_bitmap = dm_info->igi_bitmap << 1 & 0xfe; 372 igi_history = dm_info->igi_history; 373 fa_history = dm_info->fa_history; 374 375 up = igi > igi_history[0]; 376 igi_bitmap |= up; 377 378 igi_history[3] = igi_history[2]; 379 igi_history[2] = igi_history[1]; 380 igi_history[1] = igi_history[0]; 381 igi_history[0] = igi; 382 383 fa_history[3] = fa_history[2]; 384 fa_history[2] = fa_history[1]; 385 fa_history[1] = fa_history[0]; 386 fa_history[0] = fa; 387 388 dm_info->igi_bitmap = igi_bitmap; 389 } 390 391 static void rtw_phy_dig(struct rtw_dev *rtwdev) 392 { 393 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 394 u8 upper_bound, lower_bound; 395 u8 pre_igi, cur_igi; 396 u16 fa_th[3], fa_cnt; 397 u8 level; 398 u8 step[3]; 399 bool linked; 400 401 if (test_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags)) 402 return; 403 404 if (rtw_phy_dig_check_damping(dm_info)) 405 return; 406 407 linked = !!rtwdev->sta_cnt; 408 409 fa_cnt = dm_info->total_fa_cnt; 410 pre_igi = dm_info->igi_history[0]; 411 412 rtw_phy_dig_get_threshold(dm_info, fa_th, step, linked); 413 414 /* test the false alarm count from the highest threshold level first, 415 * and increase it by corresponding step size 416 * 417 * note that the step size is offset by -2, compensate it afterall 418 */ 419 cur_igi = pre_igi; 420 for (level = 0; level < 3; level++) { 421 if (fa_cnt > fa_th[level]) { 422 cur_igi += step[level]; 423 break; 424 } 425 } 426 cur_igi -= 2; 427 428 /* calculate the upper/lower bound by the minimum rssi we have among 429 * the peers connected with us, meanwhile make sure the igi value does 430 * not beyond the hardware limitation 431 */ 432 rtw_phy_dig_get_boundary(dm_info, &upper_bound, &lower_bound, linked); 433 cur_igi = clamp_t(u8, cur_igi, lower_bound, upper_bound); 434 435 /* record current igi value and false alarm statistics for further 436 * damping checks, and record the trend of igi values 437 */ 438 rtw_phy_dig_recorder(dm_info, cur_igi, fa_cnt); 439 440 if (cur_igi != pre_igi) 441 rtw_phy_dig_write(rtwdev, cur_igi); 442 } 443 444 static void rtw_phy_ra_info_update_iter(void *data, struct ieee80211_sta *sta) 445 { 446 struct rtw_dev *rtwdev = data; 447 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; 448 449 rtw_update_sta_info(rtwdev, si); 450 } 451 452 static void rtw_phy_ra_info_update(struct rtw_dev *rtwdev) 453 { 454 if (rtwdev->watch_dog_cnt & 0x3) 455 return; 456 457 rtw_iterate_stas_atomic(rtwdev, rtw_phy_ra_info_update_iter, rtwdev); 458 } 459 460 static void rtw_phy_dpk_track(struct rtw_dev *rtwdev) 461 { 462 struct rtw_chip_info *chip = rtwdev->chip; 463 464 if (chip->ops->dpk_track) 465 chip->ops->dpk_track(rtwdev); 466 } 467 468 #define CCK_PD_FA_LV1_MIN 1000 469 #define CCK_PD_FA_LV0_MAX 500 470 471 static u8 rtw_phy_cck_pd_lv_unlink(struct rtw_dev *rtwdev) 472 { 473 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 474 u32 cck_fa_avg = dm_info->cck_fa_avg; 475 476 if (cck_fa_avg > CCK_PD_FA_LV1_MIN) 477 return CCK_PD_LV1; 478 479 if (cck_fa_avg < CCK_PD_FA_LV0_MAX) 480 return CCK_PD_LV0; 481 482 return CCK_PD_LV_MAX; 483 } 484 485 #define CCK_PD_IGI_LV4_VAL 0x38 486 #define CCK_PD_IGI_LV3_VAL 0x2a 487 #define CCK_PD_IGI_LV2_VAL 0x24 488 #define CCK_PD_RSSI_LV4_VAL 32 489 #define CCK_PD_RSSI_LV3_VAL 32 490 #define CCK_PD_RSSI_LV2_VAL 24 491 492 static u8 rtw_phy_cck_pd_lv_link(struct rtw_dev *rtwdev) 493 { 494 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 495 u8 igi = dm_info->igi_history[0]; 496 u8 rssi = dm_info->min_rssi; 497 u32 cck_fa_avg = dm_info->cck_fa_avg; 498 499 if (igi > CCK_PD_IGI_LV4_VAL && rssi > CCK_PD_RSSI_LV4_VAL) 500 return CCK_PD_LV4; 501 if (igi > CCK_PD_IGI_LV3_VAL && rssi > CCK_PD_RSSI_LV3_VAL) 502 return CCK_PD_LV3; 503 if (igi > CCK_PD_IGI_LV2_VAL || rssi > CCK_PD_RSSI_LV2_VAL) 504 return CCK_PD_LV2; 505 if (cck_fa_avg > CCK_PD_FA_LV1_MIN) 506 return CCK_PD_LV1; 507 if (cck_fa_avg < CCK_PD_FA_LV0_MAX) 508 return CCK_PD_LV0; 509 510 return CCK_PD_LV_MAX; 511 } 512 513 static u8 rtw_phy_cck_pd_lv(struct rtw_dev *rtwdev) 514 { 515 if (!rtw_is_assoc(rtwdev)) 516 return rtw_phy_cck_pd_lv_unlink(rtwdev); 517 else 518 return rtw_phy_cck_pd_lv_link(rtwdev); 519 } 520 521 static void rtw_phy_cck_pd(struct rtw_dev *rtwdev) 522 { 523 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 524 struct rtw_chip_info *chip = rtwdev->chip; 525 u32 cck_fa = dm_info->cck_fa_cnt; 526 u8 level; 527 528 if (rtwdev->hal.current_band_type != RTW_BAND_2G) 529 return; 530 531 if (dm_info->cck_fa_avg == CCK_FA_AVG_RESET) 532 dm_info->cck_fa_avg = cck_fa; 533 else 534 dm_info->cck_fa_avg = (dm_info->cck_fa_avg * 3 + cck_fa) >> 2; 535 536 level = rtw_phy_cck_pd_lv(rtwdev); 537 538 if (level >= CCK_PD_LV_MAX) 539 return; 540 541 if (chip->ops->cck_pd_set) 542 chip->ops->cck_pd_set(rtwdev, level); 543 } 544 545 static void rtw_phy_pwr_track(struct rtw_dev *rtwdev) 546 { 547 rtwdev->chip->ops->pwr_track(rtwdev); 548 } 549 550 void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev) 551 { 552 /* for further calculation */ 553 rtw_phy_statistics(rtwdev); 554 rtw_phy_dig(rtwdev); 555 rtw_phy_cck_pd(rtwdev); 556 rtw_phy_ra_info_update(rtwdev); 557 rtw_phy_dpk_track(rtwdev); 558 rtw_phy_pwr_track(rtwdev); 559 } 560 561 #define FRAC_BITS 3 562 563 static u8 rtw_phy_power_2_db(s8 power) 564 { 565 if (power <= -100 || power >= 20) 566 return 0; 567 else if (power >= 0) 568 return 100; 569 else 570 return 100 + power; 571 } 572 573 static u64 rtw_phy_db_2_linear(u8 power_db) 574 { 575 u8 i, j; 576 u64 linear; 577 578 if (power_db > 96) 579 power_db = 96; 580 else if (power_db < 1) 581 return 1; 582 583 /* 1dB ~ 96dB */ 584 i = (power_db - 1) >> 3; 585 j = (power_db - 1) - (i << 3); 586 587 linear = db_invert_table[i][j]; 588 linear = i > 2 ? linear << FRAC_BITS : linear; 589 590 return linear; 591 } 592 593 static u8 rtw_phy_linear_2_db(u64 linear) 594 { 595 u8 i; 596 u8 j; 597 u32 dB; 598 599 if (linear >= db_invert_table[11][7]) 600 return 96; /* maximum 96 dB */ 601 602 for (i = 0; i < 12; i++) { 603 if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][7]) 604 break; 605 else if (i > 2 && linear <= db_invert_table[i][7]) 606 break; 607 } 608 609 for (j = 0; j < 8; j++) { 610 if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j]) 611 break; 612 else if (i > 2 && linear <= db_invert_table[i][j]) 613 break; 614 } 615 616 if (j == 0 && i == 0) 617 goto end; 618 619 if (j == 0) { 620 if (i != 3) { 621 if (db_invert_table[i][0] - linear > 622 linear - db_invert_table[i - 1][7]) { 623 i = i - 1; 624 j = 7; 625 } 626 } else { 627 if (db_invert_table[3][0] - linear > 628 linear - db_invert_table[2][7]) { 629 i = 2; 630 j = 7; 631 } 632 } 633 } else { 634 if (db_invert_table[i][j] - linear > 635 linear - db_invert_table[i][j - 1]) { 636 j = j - 1; 637 } 638 } 639 end: 640 dB = (i << 3) + j + 1; 641 642 return dB; 643 } 644 645 u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num) 646 { 647 s8 power; 648 u8 power_db; 649 u64 linear; 650 u64 sum = 0; 651 u8 path; 652 653 for (path = 0; path < path_num; path++) { 654 power = rf_power[path]; 655 power_db = rtw_phy_power_2_db(power); 656 linear = rtw_phy_db_2_linear(power_db); 657 sum += linear; 658 } 659 660 sum = (sum + (1 << (FRAC_BITS - 1))) >> FRAC_BITS; 661 switch (path_num) { 662 case 2: 663 sum >>= 1; 664 break; 665 case 3: 666 sum = ((sum) + ((sum) << 1) + ((sum) << 3)) >> 5; 667 break; 668 case 4: 669 sum >>= 2; 670 break; 671 default: 672 break; 673 } 674 675 return rtw_phy_linear_2_db(sum); 676 } 677 678 u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 679 u32 addr, u32 mask) 680 { 681 struct rtw_hal *hal = &rtwdev->hal; 682 struct rtw_chip_info *chip = rtwdev->chip; 683 const u32 *base_addr = chip->rf_base_addr; 684 u32 val, direct_addr; 685 686 if (rf_path >= hal->rf_phy_num) { 687 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 688 return INV_RF_DATA; 689 } 690 691 addr &= 0xff; 692 direct_addr = base_addr[rf_path] + (addr << 2); 693 mask &= RFREG_MASK; 694 695 val = rtw_read32_mask(rtwdev, direct_addr, mask); 696 697 return val; 698 } 699 700 u32 rtw_phy_read_rf_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 701 u32 addr, u32 mask) 702 { 703 struct rtw_hal *hal = &rtwdev->hal; 704 struct rtw_chip_info *chip = rtwdev->chip; 705 const struct rtw_rf_sipi_addr *rf_sipi_addr; 706 const struct rtw_rf_sipi_addr *rf_sipi_addr_a; 707 u32 val32; 708 u32 en_pi; 709 u32 r_addr; 710 u32 shift; 711 712 if (rf_path >= hal->rf_phy_num) { 713 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 714 return INV_RF_DATA; 715 } 716 717 if (!chip->rf_sipi_read_addr) { 718 rtw_err(rtwdev, "rf_sipi_read_addr isn't defined\n"); 719 return INV_RF_DATA; 720 } 721 722 rf_sipi_addr = &chip->rf_sipi_read_addr[rf_path]; 723 rf_sipi_addr_a = &chip->rf_sipi_read_addr[RF_PATH_A]; 724 725 addr &= 0xff; 726 727 val32 = rtw_read32(rtwdev, rf_sipi_addr->hssi_2); 728 val32 = (val32 & ~LSSI_READ_ADDR_MASK) | (addr << 23); 729 rtw_write32(rtwdev, rf_sipi_addr->hssi_2, val32); 730 731 /* toggle read edge of path A */ 732 val32 = rtw_read32(rtwdev, rf_sipi_addr_a->hssi_2); 733 rtw_write32(rtwdev, rf_sipi_addr_a->hssi_2, val32 & ~LSSI_READ_EDGE_MASK); 734 rtw_write32(rtwdev, rf_sipi_addr_a->hssi_2, val32 | LSSI_READ_EDGE_MASK); 735 736 udelay(120); 737 738 en_pi = rtw_read32_mask(rtwdev, rf_sipi_addr->hssi_1, BIT(8)); 739 r_addr = en_pi ? rf_sipi_addr->lssi_read_pi : rf_sipi_addr->lssi_read; 740 741 val32 = rtw_read32_mask(rtwdev, r_addr, LSSI_READ_DATA_MASK); 742 743 shift = __ffs(mask); 744 745 return (val32 & mask) >> shift; 746 } 747 748 bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 749 u32 addr, u32 mask, u32 data) 750 { 751 struct rtw_hal *hal = &rtwdev->hal; 752 struct rtw_chip_info *chip = rtwdev->chip; 753 u32 *sipi_addr = chip->rf_sipi_addr; 754 u32 data_and_addr; 755 u32 old_data = 0; 756 u32 shift; 757 758 if (rf_path >= hal->rf_phy_num) { 759 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 760 return false; 761 } 762 763 addr &= 0xff; 764 mask &= RFREG_MASK; 765 766 if (mask != RFREG_MASK) { 767 old_data = chip->ops->read_rf(rtwdev, rf_path, addr, RFREG_MASK); 768 769 if (old_data == INV_RF_DATA) { 770 rtw_err(rtwdev, "Write fail, rf is disabled\n"); 771 return false; 772 } 773 774 shift = __ffs(mask); 775 data = ((old_data) & (~mask)) | (data << shift); 776 } 777 778 data_and_addr = ((addr << 20) | (data & 0x000fffff)) & 0x0fffffff; 779 780 rtw_write32(rtwdev, sipi_addr[rf_path], data_and_addr); 781 782 udelay(13); 783 784 return true; 785 } 786 787 bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 788 u32 addr, u32 mask, u32 data) 789 { 790 struct rtw_hal *hal = &rtwdev->hal; 791 struct rtw_chip_info *chip = rtwdev->chip; 792 const u32 *base_addr = chip->rf_base_addr; 793 u32 direct_addr; 794 795 if (rf_path >= hal->rf_phy_num) { 796 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 797 return false; 798 } 799 800 addr &= 0xff; 801 direct_addr = base_addr[rf_path] + (addr << 2); 802 mask &= RFREG_MASK; 803 804 rtw_write32_mask(rtwdev, direct_addr, mask, data); 805 806 udelay(1); 807 808 return true; 809 } 810 811 bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 812 u32 addr, u32 mask, u32 data) 813 { 814 if (addr != 0x00) 815 return rtw_phy_write_rf_reg(rtwdev, rf_path, addr, mask, data); 816 817 return rtw_phy_write_rf_reg_sipi(rtwdev, rf_path, addr, mask, data); 818 } 819 820 void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg) 821 { 822 struct rtw_hal *hal = &rtwdev->hal; 823 struct rtw_efuse *efuse = &rtwdev->efuse; 824 struct rtw_phy_cond cond = {0}; 825 826 cond.cut = hal->cut_version ? hal->cut_version : 15; 827 cond.pkg = pkg ? pkg : 15; 828 cond.plat = 0x04; 829 cond.rfe = efuse->rfe_option; 830 831 switch (rtw_hci_type(rtwdev)) { 832 case RTW_HCI_TYPE_USB: 833 cond.intf = INTF_USB; 834 break; 835 case RTW_HCI_TYPE_SDIO: 836 cond.intf = INTF_SDIO; 837 break; 838 case RTW_HCI_TYPE_PCIE: 839 default: 840 cond.intf = INTF_PCIE; 841 break; 842 } 843 844 hal->phy_cond = cond; 845 846 rtw_dbg(rtwdev, RTW_DBG_PHY, "phy cond=0x%08x\n", *((u32 *)&hal->phy_cond)); 847 } 848 849 static bool check_positive(struct rtw_dev *rtwdev, struct rtw_phy_cond cond) 850 { 851 struct rtw_hal *hal = &rtwdev->hal; 852 struct rtw_phy_cond drv_cond = hal->phy_cond; 853 854 if (cond.cut && cond.cut != drv_cond.cut) 855 return false; 856 857 if (cond.pkg && cond.pkg != drv_cond.pkg) 858 return false; 859 860 if (cond.intf && cond.intf != drv_cond.intf) 861 return false; 862 863 if (cond.rfe != drv_cond.rfe) 864 return false; 865 866 return true; 867 } 868 869 void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl) 870 { 871 const union phy_table_tile *p = tbl->data; 872 const union phy_table_tile *end = p + tbl->size / 2; 873 struct rtw_phy_cond pos_cond = {0}; 874 bool is_matched = true, is_skipped = false; 875 876 BUILD_BUG_ON(sizeof(union phy_table_tile) != sizeof(struct phy_cfg_pair)); 877 878 for (; p < end; p++) { 879 if (p->cond.pos) { 880 switch (p->cond.branch) { 881 case BRANCH_ENDIF: 882 is_matched = true; 883 is_skipped = false; 884 break; 885 case BRANCH_ELSE: 886 is_matched = is_skipped ? false : true; 887 break; 888 case BRANCH_IF: 889 case BRANCH_ELIF: 890 default: 891 pos_cond = p->cond; 892 break; 893 } 894 } else if (p->cond.neg) { 895 if (!is_skipped) { 896 if (check_positive(rtwdev, pos_cond)) { 897 is_matched = true; 898 is_skipped = true; 899 } else { 900 is_matched = false; 901 is_skipped = false; 902 } 903 } else { 904 is_matched = false; 905 } 906 } else if (is_matched) { 907 (*tbl->do_cfg)(rtwdev, tbl, p->cfg.addr, p->cfg.data); 908 } 909 } 910 } 911 912 #define bcd_to_dec_pwr_by_rate(val, i) bcd2bin(val >> (i * 8)) 913 914 static u8 tbl_to_dec_pwr_by_rate(struct rtw_dev *rtwdev, u32 hex, u8 i) 915 { 916 if (rtwdev->chip->is_pwr_by_rate_dec) 917 return bcd_to_dec_pwr_by_rate(hex, i); 918 919 return (hex >> (i * 8)) & 0xFF; 920 } 921 922 static void 923 rtw_phy_get_rate_values_of_txpwr_by_rate(struct rtw_dev *rtwdev, 924 u32 addr, u32 mask, u32 val, u8 *rate, 925 u8 *pwr_by_rate, u8 *rate_num) 926 { 927 int i; 928 929 switch (addr) { 930 case 0xE00: 931 case 0x830: 932 rate[0] = DESC_RATE6M; 933 rate[1] = DESC_RATE9M; 934 rate[2] = DESC_RATE12M; 935 rate[3] = DESC_RATE18M; 936 for (i = 0; i < 4; ++i) 937 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 938 *rate_num = 4; 939 break; 940 case 0xE04: 941 case 0x834: 942 rate[0] = DESC_RATE24M; 943 rate[1] = DESC_RATE36M; 944 rate[2] = DESC_RATE48M; 945 rate[3] = DESC_RATE54M; 946 for (i = 0; i < 4; ++i) 947 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 948 *rate_num = 4; 949 break; 950 case 0xE08: 951 rate[0] = DESC_RATE1M; 952 pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 1); 953 *rate_num = 1; 954 break; 955 case 0x86C: 956 if (mask == 0xffffff00) { 957 rate[0] = DESC_RATE2M; 958 rate[1] = DESC_RATE5_5M; 959 rate[2] = DESC_RATE11M; 960 for (i = 1; i < 4; ++i) 961 pwr_by_rate[i - 1] = 962 tbl_to_dec_pwr_by_rate(rtwdev, val, i); 963 *rate_num = 3; 964 } else if (mask == 0x000000ff) { 965 rate[0] = DESC_RATE11M; 966 pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 0); 967 *rate_num = 1; 968 } 969 break; 970 case 0xE10: 971 case 0x83C: 972 rate[0] = DESC_RATEMCS0; 973 rate[1] = DESC_RATEMCS1; 974 rate[2] = DESC_RATEMCS2; 975 rate[3] = DESC_RATEMCS3; 976 for (i = 0; i < 4; ++i) 977 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 978 *rate_num = 4; 979 break; 980 case 0xE14: 981 case 0x848: 982 rate[0] = DESC_RATEMCS4; 983 rate[1] = DESC_RATEMCS5; 984 rate[2] = DESC_RATEMCS6; 985 rate[3] = DESC_RATEMCS7; 986 for (i = 0; i < 4; ++i) 987 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 988 *rate_num = 4; 989 break; 990 case 0xE18: 991 case 0x84C: 992 rate[0] = DESC_RATEMCS8; 993 rate[1] = DESC_RATEMCS9; 994 rate[2] = DESC_RATEMCS10; 995 rate[3] = DESC_RATEMCS11; 996 for (i = 0; i < 4; ++i) 997 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 998 *rate_num = 4; 999 break; 1000 case 0xE1C: 1001 case 0x868: 1002 rate[0] = DESC_RATEMCS12; 1003 rate[1] = DESC_RATEMCS13; 1004 rate[2] = DESC_RATEMCS14; 1005 rate[3] = DESC_RATEMCS15; 1006 for (i = 0; i < 4; ++i) 1007 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1008 *rate_num = 4; 1009 break; 1010 case 0x838: 1011 rate[0] = DESC_RATE1M; 1012 rate[1] = DESC_RATE2M; 1013 rate[2] = DESC_RATE5_5M; 1014 for (i = 1; i < 4; ++i) 1015 pwr_by_rate[i - 1] = tbl_to_dec_pwr_by_rate(rtwdev, 1016 val, i); 1017 *rate_num = 3; 1018 break; 1019 case 0xC20: 1020 case 0xE20: 1021 case 0x1820: 1022 case 0x1A20: 1023 rate[0] = DESC_RATE1M; 1024 rate[1] = DESC_RATE2M; 1025 rate[2] = DESC_RATE5_5M; 1026 rate[3] = DESC_RATE11M; 1027 for (i = 0; i < 4; ++i) 1028 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1029 *rate_num = 4; 1030 break; 1031 case 0xC24: 1032 case 0xE24: 1033 case 0x1824: 1034 case 0x1A24: 1035 rate[0] = DESC_RATE6M; 1036 rate[1] = DESC_RATE9M; 1037 rate[2] = DESC_RATE12M; 1038 rate[3] = DESC_RATE18M; 1039 for (i = 0; i < 4; ++i) 1040 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1041 *rate_num = 4; 1042 break; 1043 case 0xC28: 1044 case 0xE28: 1045 case 0x1828: 1046 case 0x1A28: 1047 rate[0] = DESC_RATE24M; 1048 rate[1] = DESC_RATE36M; 1049 rate[2] = DESC_RATE48M; 1050 rate[3] = DESC_RATE54M; 1051 for (i = 0; i < 4; ++i) 1052 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1053 *rate_num = 4; 1054 break; 1055 case 0xC2C: 1056 case 0xE2C: 1057 case 0x182C: 1058 case 0x1A2C: 1059 rate[0] = DESC_RATEMCS0; 1060 rate[1] = DESC_RATEMCS1; 1061 rate[2] = DESC_RATEMCS2; 1062 rate[3] = DESC_RATEMCS3; 1063 for (i = 0; i < 4; ++i) 1064 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1065 *rate_num = 4; 1066 break; 1067 case 0xC30: 1068 case 0xE30: 1069 case 0x1830: 1070 case 0x1A30: 1071 rate[0] = DESC_RATEMCS4; 1072 rate[1] = DESC_RATEMCS5; 1073 rate[2] = DESC_RATEMCS6; 1074 rate[3] = DESC_RATEMCS7; 1075 for (i = 0; i < 4; ++i) 1076 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1077 *rate_num = 4; 1078 break; 1079 case 0xC34: 1080 case 0xE34: 1081 case 0x1834: 1082 case 0x1A34: 1083 rate[0] = DESC_RATEMCS8; 1084 rate[1] = DESC_RATEMCS9; 1085 rate[2] = DESC_RATEMCS10; 1086 rate[3] = DESC_RATEMCS11; 1087 for (i = 0; i < 4; ++i) 1088 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1089 *rate_num = 4; 1090 break; 1091 case 0xC38: 1092 case 0xE38: 1093 case 0x1838: 1094 case 0x1A38: 1095 rate[0] = DESC_RATEMCS12; 1096 rate[1] = DESC_RATEMCS13; 1097 rate[2] = DESC_RATEMCS14; 1098 rate[3] = DESC_RATEMCS15; 1099 for (i = 0; i < 4; ++i) 1100 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1101 *rate_num = 4; 1102 break; 1103 case 0xC3C: 1104 case 0xE3C: 1105 case 0x183C: 1106 case 0x1A3C: 1107 rate[0] = DESC_RATEVHT1SS_MCS0; 1108 rate[1] = DESC_RATEVHT1SS_MCS1; 1109 rate[2] = DESC_RATEVHT1SS_MCS2; 1110 rate[3] = DESC_RATEVHT1SS_MCS3; 1111 for (i = 0; i < 4; ++i) 1112 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1113 *rate_num = 4; 1114 break; 1115 case 0xC40: 1116 case 0xE40: 1117 case 0x1840: 1118 case 0x1A40: 1119 rate[0] = DESC_RATEVHT1SS_MCS4; 1120 rate[1] = DESC_RATEVHT1SS_MCS5; 1121 rate[2] = DESC_RATEVHT1SS_MCS6; 1122 rate[3] = DESC_RATEVHT1SS_MCS7; 1123 for (i = 0; i < 4; ++i) 1124 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1125 *rate_num = 4; 1126 break; 1127 case 0xC44: 1128 case 0xE44: 1129 case 0x1844: 1130 case 0x1A44: 1131 rate[0] = DESC_RATEVHT1SS_MCS8; 1132 rate[1] = DESC_RATEVHT1SS_MCS9; 1133 rate[2] = DESC_RATEVHT2SS_MCS0; 1134 rate[3] = DESC_RATEVHT2SS_MCS1; 1135 for (i = 0; i < 4; ++i) 1136 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1137 *rate_num = 4; 1138 break; 1139 case 0xC48: 1140 case 0xE48: 1141 case 0x1848: 1142 case 0x1A48: 1143 rate[0] = DESC_RATEVHT2SS_MCS2; 1144 rate[1] = DESC_RATEVHT2SS_MCS3; 1145 rate[2] = DESC_RATEVHT2SS_MCS4; 1146 rate[3] = DESC_RATEVHT2SS_MCS5; 1147 for (i = 0; i < 4; ++i) 1148 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1149 *rate_num = 4; 1150 break; 1151 case 0xC4C: 1152 case 0xE4C: 1153 case 0x184C: 1154 case 0x1A4C: 1155 rate[0] = DESC_RATEVHT2SS_MCS6; 1156 rate[1] = DESC_RATEVHT2SS_MCS7; 1157 rate[2] = DESC_RATEVHT2SS_MCS8; 1158 rate[3] = DESC_RATEVHT2SS_MCS9; 1159 for (i = 0; i < 4; ++i) 1160 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1161 *rate_num = 4; 1162 break; 1163 case 0xCD8: 1164 case 0xED8: 1165 case 0x18D8: 1166 case 0x1AD8: 1167 rate[0] = DESC_RATEMCS16; 1168 rate[1] = DESC_RATEMCS17; 1169 rate[2] = DESC_RATEMCS18; 1170 rate[3] = DESC_RATEMCS19; 1171 for (i = 0; i < 4; ++i) 1172 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1173 *rate_num = 4; 1174 break; 1175 case 0xCDC: 1176 case 0xEDC: 1177 case 0x18DC: 1178 case 0x1ADC: 1179 rate[0] = DESC_RATEMCS20; 1180 rate[1] = DESC_RATEMCS21; 1181 rate[2] = DESC_RATEMCS22; 1182 rate[3] = DESC_RATEMCS23; 1183 for (i = 0; i < 4; ++i) 1184 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1185 *rate_num = 4; 1186 break; 1187 case 0xCE0: 1188 case 0xEE0: 1189 case 0x18E0: 1190 case 0x1AE0: 1191 rate[0] = DESC_RATEVHT3SS_MCS0; 1192 rate[1] = DESC_RATEVHT3SS_MCS1; 1193 rate[2] = DESC_RATEVHT3SS_MCS2; 1194 rate[3] = DESC_RATEVHT3SS_MCS3; 1195 for (i = 0; i < 4; ++i) 1196 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1197 *rate_num = 4; 1198 break; 1199 case 0xCE4: 1200 case 0xEE4: 1201 case 0x18E4: 1202 case 0x1AE4: 1203 rate[0] = DESC_RATEVHT3SS_MCS4; 1204 rate[1] = DESC_RATEVHT3SS_MCS5; 1205 rate[2] = DESC_RATEVHT3SS_MCS6; 1206 rate[3] = DESC_RATEVHT3SS_MCS7; 1207 for (i = 0; i < 4; ++i) 1208 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1209 *rate_num = 4; 1210 break; 1211 case 0xCE8: 1212 case 0xEE8: 1213 case 0x18E8: 1214 case 0x1AE8: 1215 rate[0] = DESC_RATEVHT3SS_MCS8; 1216 rate[1] = DESC_RATEVHT3SS_MCS9; 1217 for (i = 0; i < 2; ++i) 1218 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1219 *rate_num = 2; 1220 break; 1221 default: 1222 rtw_warn(rtwdev, "invalid tx power index addr 0x%08x\n", addr); 1223 break; 1224 } 1225 } 1226 1227 static void rtw_phy_store_tx_power_by_rate(struct rtw_dev *rtwdev, 1228 u32 band, u32 rfpath, u32 txnum, 1229 u32 regaddr, u32 bitmask, u32 data) 1230 { 1231 struct rtw_hal *hal = &rtwdev->hal; 1232 u8 rate_num = 0; 1233 u8 rate; 1234 u8 rates[RTW_RF_PATH_MAX] = {0}; 1235 s8 offset; 1236 s8 pwr_by_rate[RTW_RF_PATH_MAX] = {0}; 1237 int i; 1238 1239 rtw_phy_get_rate_values_of_txpwr_by_rate(rtwdev, regaddr, bitmask, data, 1240 rates, pwr_by_rate, &rate_num); 1241 1242 if (WARN_ON(rfpath >= RTW_RF_PATH_MAX || 1243 (band != PHY_BAND_2G && band != PHY_BAND_5G) || 1244 rate_num > RTW_RF_PATH_MAX)) 1245 return; 1246 1247 for (i = 0; i < rate_num; i++) { 1248 offset = pwr_by_rate[i]; 1249 rate = rates[i]; 1250 if (band == PHY_BAND_2G) 1251 hal->tx_pwr_by_rate_offset_2g[rfpath][rate] = offset; 1252 else if (band == PHY_BAND_5G) 1253 hal->tx_pwr_by_rate_offset_5g[rfpath][rate] = offset; 1254 else 1255 continue; 1256 } 1257 } 1258 1259 void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl) 1260 { 1261 const struct rtw_phy_pg_cfg_pair *p = tbl->data; 1262 const struct rtw_phy_pg_cfg_pair *end = p + tbl->size; 1263 1264 for (; p < end; p++) { 1265 if (p->addr == 0xfe || p->addr == 0xffe) { 1266 msleep(50); 1267 continue; 1268 } 1269 rtw_phy_store_tx_power_by_rate(rtwdev, p->band, p->rf_path, 1270 p->tx_num, p->addr, p->bitmask, 1271 p->data); 1272 } 1273 } 1274 1275 static const u8 rtw_channel_idx_5g[RTW_MAX_CHANNEL_NUM_5G] = { 1276 36, 38, 40, 42, 44, 46, 48, /* Band 1 */ 1277 52, 54, 56, 58, 60, 62, 64, /* Band 2 */ 1278 100, 102, 104, 106, 108, 110, 112, /* Band 3 */ 1279 116, 118, 120, 122, 124, 126, 128, /* Band 3 */ 1280 132, 134, 136, 138, 140, 142, 144, /* Band 3 */ 1281 149, 151, 153, 155, 157, 159, 161, /* Band 4 */ 1282 165, 167, 169, 171, 173, 175, 177}; /* Band 4 */ 1283 1284 static int rtw_channel_to_idx(u8 band, u8 channel) 1285 { 1286 int ch_idx; 1287 u8 n_channel; 1288 1289 if (band == PHY_BAND_2G) { 1290 ch_idx = channel - 1; 1291 n_channel = RTW_MAX_CHANNEL_NUM_2G; 1292 } else if (band == PHY_BAND_5G) { 1293 n_channel = RTW_MAX_CHANNEL_NUM_5G; 1294 for (ch_idx = 0; ch_idx < n_channel; ch_idx++) 1295 if (rtw_channel_idx_5g[ch_idx] == channel) 1296 break; 1297 } else { 1298 return -1; 1299 } 1300 1301 if (ch_idx >= n_channel) 1302 return -1; 1303 1304 return ch_idx; 1305 } 1306 1307 static void rtw_phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band, 1308 u8 bw, u8 rs, u8 ch, s8 pwr_limit) 1309 { 1310 struct rtw_hal *hal = &rtwdev->hal; 1311 u8 max_power_index = rtwdev->chip->max_power_index; 1312 s8 ww; 1313 int ch_idx; 1314 1315 pwr_limit = clamp_t(s8, pwr_limit, 1316 -max_power_index, max_power_index); 1317 ch_idx = rtw_channel_to_idx(band, ch); 1318 1319 if (regd >= RTW_REGD_MAX || bw >= RTW_CHANNEL_WIDTH_MAX || 1320 rs >= RTW_RATE_SECTION_MAX || ch_idx < 0) { 1321 WARN(1, 1322 "wrong txpwr_lmt regd=%u, band=%u bw=%u, rs=%u, ch_idx=%u, pwr_limit=%d\n", 1323 regd, band, bw, rs, ch_idx, pwr_limit); 1324 return; 1325 } 1326 1327 if (band == PHY_BAND_2G) { 1328 hal->tx_pwr_limit_2g[regd][bw][rs][ch_idx] = pwr_limit; 1329 ww = hal->tx_pwr_limit_2g[RTW_REGD_WW][bw][rs][ch_idx]; 1330 ww = min_t(s8, ww, pwr_limit); 1331 hal->tx_pwr_limit_2g[RTW_REGD_WW][bw][rs][ch_idx] = ww; 1332 } else if (band == PHY_BAND_5G) { 1333 hal->tx_pwr_limit_5g[regd][bw][rs][ch_idx] = pwr_limit; 1334 ww = hal->tx_pwr_limit_5g[RTW_REGD_WW][bw][rs][ch_idx]; 1335 ww = min_t(s8, ww, pwr_limit); 1336 hal->tx_pwr_limit_5g[RTW_REGD_WW][bw][rs][ch_idx] = ww; 1337 } 1338 } 1339 1340 /* cross-reference 5G power limits if values are not assigned */ 1341 static void 1342 rtw_xref_5g_txpwr_lmt(struct rtw_dev *rtwdev, u8 regd, 1343 u8 bw, u8 ch_idx, u8 rs_ht, u8 rs_vht) 1344 { 1345 struct rtw_hal *hal = &rtwdev->hal; 1346 u8 max_power_index = rtwdev->chip->max_power_index; 1347 s8 lmt_ht = hal->tx_pwr_limit_5g[regd][bw][rs_ht][ch_idx]; 1348 s8 lmt_vht = hal->tx_pwr_limit_5g[regd][bw][rs_vht][ch_idx]; 1349 1350 if (lmt_ht == lmt_vht) 1351 return; 1352 1353 if (lmt_ht == max_power_index) 1354 hal->tx_pwr_limit_5g[regd][bw][rs_ht][ch_idx] = lmt_vht; 1355 1356 else if (lmt_vht == max_power_index) 1357 hal->tx_pwr_limit_5g[regd][bw][rs_vht][ch_idx] = lmt_ht; 1358 } 1359 1360 /* cross-reference power limits for ht and vht */ 1361 static void 1362 rtw_xref_txpwr_lmt_by_rs(struct rtw_dev *rtwdev, u8 regd, u8 bw, u8 ch_idx) 1363 { 1364 u8 rs_idx, rs_ht, rs_vht; 1365 u8 rs_cmp[2][2] = {{RTW_RATE_SECTION_HT_1S, RTW_RATE_SECTION_VHT_1S}, 1366 {RTW_RATE_SECTION_HT_2S, RTW_RATE_SECTION_VHT_2S} }; 1367 1368 for (rs_idx = 0; rs_idx < 2; rs_idx++) { 1369 rs_ht = rs_cmp[rs_idx][0]; 1370 rs_vht = rs_cmp[rs_idx][1]; 1371 1372 rtw_xref_5g_txpwr_lmt(rtwdev, regd, bw, ch_idx, rs_ht, rs_vht); 1373 } 1374 } 1375 1376 /* cross-reference power limits for 5G channels */ 1377 static void 1378 rtw_xref_5g_txpwr_lmt_by_ch(struct rtw_dev *rtwdev, u8 regd, u8 bw) 1379 { 1380 u8 ch_idx; 1381 1382 for (ch_idx = 0; ch_idx < RTW_MAX_CHANNEL_NUM_5G; ch_idx++) 1383 rtw_xref_txpwr_lmt_by_rs(rtwdev, regd, bw, ch_idx); 1384 } 1385 1386 /* cross-reference power limits for 20/40M bandwidth */ 1387 static void 1388 rtw_xref_txpwr_lmt_by_bw(struct rtw_dev *rtwdev, u8 regd) 1389 { 1390 u8 bw; 1391 1392 for (bw = RTW_CHANNEL_WIDTH_20; bw <= RTW_CHANNEL_WIDTH_40; bw++) 1393 rtw_xref_5g_txpwr_lmt_by_ch(rtwdev, regd, bw); 1394 } 1395 1396 /* cross-reference power limits */ 1397 static void rtw_xref_txpwr_lmt(struct rtw_dev *rtwdev) 1398 { 1399 u8 regd; 1400 1401 for (regd = 0; regd < RTW_REGD_MAX; regd++) 1402 rtw_xref_txpwr_lmt_by_bw(rtwdev, regd); 1403 } 1404 1405 void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev, 1406 const struct rtw_table *tbl) 1407 { 1408 const struct rtw_txpwr_lmt_cfg_pair *p = tbl->data; 1409 const struct rtw_txpwr_lmt_cfg_pair *end = p + tbl->size; 1410 1411 for (; p < end; p++) { 1412 rtw_phy_set_tx_power_limit(rtwdev, p->regd, p->band, 1413 p->bw, p->rs, p->ch, p->txpwr_lmt); 1414 } 1415 1416 rtw_xref_txpwr_lmt(rtwdev); 1417 } 1418 1419 void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 1420 u32 addr, u32 data) 1421 { 1422 rtw_write8(rtwdev, addr, data); 1423 } 1424 1425 void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 1426 u32 addr, u32 data) 1427 { 1428 rtw_write32(rtwdev, addr, data); 1429 } 1430 1431 void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 1432 u32 addr, u32 data) 1433 { 1434 if (addr == 0xfe) 1435 msleep(50); 1436 else if (addr == 0xfd) 1437 mdelay(5); 1438 else if (addr == 0xfc) 1439 mdelay(1); 1440 else if (addr == 0xfb) 1441 usleep_range(50, 60); 1442 else if (addr == 0xfa) 1443 udelay(5); 1444 else if (addr == 0xf9) 1445 udelay(1); 1446 else 1447 rtw_write32(rtwdev, addr, data); 1448 } 1449 1450 void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 1451 u32 addr, u32 data) 1452 { 1453 if (addr == 0xffe) { 1454 msleep(50); 1455 } else if (addr == 0xfe) { 1456 usleep_range(100, 110); 1457 } else { 1458 rtw_write_rf(rtwdev, tbl->rf_path, addr, RFREG_MASK, data); 1459 udelay(1); 1460 } 1461 } 1462 1463 static void rtw_load_rfk_table(struct rtw_dev *rtwdev) 1464 { 1465 struct rtw_chip_info *chip = rtwdev->chip; 1466 struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; 1467 1468 if (!chip->rfk_init_tbl) 1469 return; 1470 1471 rtw_write32_mask(rtwdev, 0x1e24, BIT(17), 0x1); 1472 rtw_write32_mask(rtwdev, 0x1cd0, BIT(28), 0x1); 1473 rtw_write32_mask(rtwdev, 0x1cd0, BIT(29), 0x1); 1474 rtw_write32_mask(rtwdev, 0x1cd0, BIT(30), 0x1); 1475 rtw_write32_mask(rtwdev, 0x1cd0, BIT(31), 0x0); 1476 1477 rtw_load_table(rtwdev, chip->rfk_init_tbl); 1478 1479 dpk_info->is_dpk_pwr_on = true; 1480 } 1481 1482 void rtw_phy_load_tables(struct rtw_dev *rtwdev) 1483 { 1484 struct rtw_chip_info *chip = rtwdev->chip; 1485 u8 rf_path; 1486 1487 rtw_load_table(rtwdev, chip->mac_tbl); 1488 rtw_load_table(rtwdev, chip->bb_tbl); 1489 rtw_load_table(rtwdev, chip->agc_tbl); 1490 rtw_load_rfk_table(rtwdev); 1491 1492 for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) { 1493 const struct rtw_table *tbl; 1494 1495 tbl = chip->rf_tbl[rf_path]; 1496 rtw_load_table(rtwdev, tbl); 1497 } 1498 } 1499 1500 static u8 rtw_get_channel_group(u8 channel) 1501 { 1502 switch (channel) { 1503 default: 1504 WARN_ON(1); 1505 /* fall through */ 1506 case 1: 1507 case 2: 1508 case 36: 1509 case 38: 1510 case 40: 1511 case 42: 1512 return 0; 1513 case 3: 1514 case 4: 1515 case 5: 1516 case 44: 1517 case 46: 1518 case 48: 1519 case 50: 1520 return 1; 1521 case 6: 1522 case 7: 1523 case 8: 1524 case 52: 1525 case 54: 1526 case 56: 1527 case 58: 1528 return 2; 1529 case 9: 1530 case 10: 1531 case 11: 1532 case 60: 1533 case 62: 1534 case 64: 1535 return 3; 1536 case 12: 1537 case 13: 1538 case 100: 1539 case 102: 1540 case 104: 1541 case 106: 1542 return 4; 1543 case 14: 1544 case 108: 1545 case 110: 1546 case 112: 1547 case 114: 1548 return 5; 1549 case 116: 1550 case 118: 1551 case 120: 1552 case 122: 1553 return 6; 1554 case 124: 1555 case 126: 1556 case 128: 1557 case 130: 1558 return 7; 1559 case 132: 1560 case 134: 1561 case 136: 1562 case 138: 1563 return 8; 1564 case 140: 1565 case 142: 1566 case 144: 1567 return 9; 1568 case 149: 1569 case 151: 1570 case 153: 1571 case 155: 1572 return 10; 1573 case 157: 1574 case 159: 1575 case 161: 1576 return 11; 1577 case 165: 1578 case 167: 1579 case 169: 1580 case 171: 1581 return 12; 1582 case 173: 1583 case 175: 1584 case 177: 1585 return 13; 1586 } 1587 } 1588 1589 static s8 rtw_phy_get_dis_dpd_by_rate_diff(struct rtw_dev *rtwdev, u16 rate) 1590 { 1591 struct rtw_chip_info *chip = rtwdev->chip; 1592 s8 dpd_diff = 0; 1593 1594 if (!chip->en_dis_dpd) 1595 return 0; 1596 1597 #define RTW_DPD_RATE_CHECK(_rate) \ 1598 case DESC_RATE ## _rate: \ 1599 if (DIS_DPD_RATE ## _rate & chip->dpd_ratemask) \ 1600 dpd_diff = -6 * chip->txgi_factor; \ 1601 break 1602 1603 switch (rate) { 1604 RTW_DPD_RATE_CHECK(6M); 1605 RTW_DPD_RATE_CHECK(9M); 1606 RTW_DPD_RATE_CHECK(MCS0); 1607 RTW_DPD_RATE_CHECK(MCS1); 1608 RTW_DPD_RATE_CHECK(MCS8); 1609 RTW_DPD_RATE_CHECK(MCS9); 1610 RTW_DPD_RATE_CHECK(VHT1SS_MCS0); 1611 RTW_DPD_RATE_CHECK(VHT1SS_MCS1); 1612 RTW_DPD_RATE_CHECK(VHT2SS_MCS0); 1613 RTW_DPD_RATE_CHECK(VHT2SS_MCS1); 1614 } 1615 #undef RTW_DPD_RATE_CHECK 1616 1617 return dpd_diff; 1618 } 1619 1620 static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev, 1621 struct rtw_2g_txpwr_idx *pwr_idx_2g, 1622 enum rtw_bandwidth bandwidth, 1623 u8 rate, u8 group) 1624 { 1625 struct rtw_chip_info *chip = rtwdev->chip; 1626 u8 tx_power; 1627 bool mcs_rate; 1628 bool above_2ss; 1629 u8 factor = chip->txgi_factor; 1630 1631 if (rate <= DESC_RATE11M) 1632 tx_power = pwr_idx_2g->cck_base[group]; 1633 else 1634 tx_power = pwr_idx_2g->bw40_base[group]; 1635 1636 if (rate >= DESC_RATE6M && rate <= DESC_RATE54M) 1637 tx_power += pwr_idx_2g->ht_1s_diff.ofdm * factor; 1638 1639 mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) || 1640 (rate >= DESC_RATEVHT1SS_MCS0 && 1641 rate <= DESC_RATEVHT2SS_MCS9); 1642 above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) || 1643 (rate >= DESC_RATEVHT2SS_MCS0); 1644 1645 if (!mcs_rate) 1646 return tx_power; 1647 1648 switch (bandwidth) { 1649 default: 1650 WARN_ON(1); 1651 /* fall through */ 1652 case RTW_CHANNEL_WIDTH_20: 1653 tx_power += pwr_idx_2g->ht_1s_diff.bw20 * factor; 1654 if (above_2ss) 1655 tx_power += pwr_idx_2g->ht_2s_diff.bw20 * factor; 1656 break; 1657 case RTW_CHANNEL_WIDTH_40: 1658 /* bw40 is the base power */ 1659 if (above_2ss) 1660 tx_power += pwr_idx_2g->ht_2s_diff.bw40 * factor; 1661 break; 1662 } 1663 1664 return tx_power; 1665 } 1666 1667 static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev, 1668 struct rtw_5g_txpwr_idx *pwr_idx_5g, 1669 enum rtw_bandwidth bandwidth, 1670 u8 rate, u8 group) 1671 { 1672 struct rtw_chip_info *chip = rtwdev->chip; 1673 u8 tx_power; 1674 u8 upper, lower; 1675 bool mcs_rate; 1676 bool above_2ss; 1677 u8 factor = chip->txgi_factor; 1678 1679 tx_power = pwr_idx_5g->bw40_base[group]; 1680 1681 mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) || 1682 (rate >= DESC_RATEVHT1SS_MCS0 && 1683 rate <= DESC_RATEVHT2SS_MCS9); 1684 above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) || 1685 (rate >= DESC_RATEVHT2SS_MCS0); 1686 1687 if (!mcs_rate) { 1688 tx_power += pwr_idx_5g->ht_1s_diff.ofdm * factor; 1689 return tx_power; 1690 } 1691 1692 switch (bandwidth) { 1693 default: 1694 WARN_ON(1); 1695 /* fall through */ 1696 case RTW_CHANNEL_WIDTH_20: 1697 tx_power += pwr_idx_5g->ht_1s_diff.bw20 * factor; 1698 if (above_2ss) 1699 tx_power += pwr_idx_5g->ht_2s_diff.bw20 * factor; 1700 break; 1701 case RTW_CHANNEL_WIDTH_40: 1702 /* bw40 is the base power */ 1703 if (above_2ss) 1704 tx_power += pwr_idx_5g->ht_2s_diff.bw40 * factor; 1705 break; 1706 case RTW_CHANNEL_WIDTH_80: 1707 /* the base idx of bw80 is the average of bw40+/bw40- */ 1708 lower = pwr_idx_5g->bw40_base[group]; 1709 upper = pwr_idx_5g->bw40_base[group + 1]; 1710 1711 tx_power = (lower + upper) / 2; 1712 tx_power += pwr_idx_5g->vht_1s_diff.bw80 * factor; 1713 if (above_2ss) 1714 tx_power += pwr_idx_5g->vht_2s_diff.bw80 * factor; 1715 break; 1716 } 1717 1718 return tx_power; 1719 } 1720 1721 static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band, 1722 enum rtw_bandwidth bw, u8 rf_path, 1723 u8 rate, u8 channel, u8 regd) 1724 { 1725 struct rtw_hal *hal = &rtwdev->hal; 1726 u8 *cch_by_bw = hal->cch_by_bw; 1727 s8 power_limit = (s8)rtwdev->chip->max_power_index; 1728 u8 rs; 1729 int ch_idx; 1730 u8 cur_bw, cur_ch; 1731 s8 cur_lmt; 1732 1733 if (regd > RTW_REGD_WW) 1734 return power_limit; 1735 1736 if (rate >= DESC_RATE1M && rate <= DESC_RATE11M) 1737 rs = RTW_RATE_SECTION_CCK; 1738 else if (rate >= DESC_RATE6M && rate <= DESC_RATE54M) 1739 rs = RTW_RATE_SECTION_OFDM; 1740 else if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS7) 1741 rs = RTW_RATE_SECTION_HT_1S; 1742 else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) 1743 rs = RTW_RATE_SECTION_HT_2S; 1744 else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9) 1745 rs = RTW_RATE_SECTION_VHT_1S; 1746 else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9) 1747 rs = RTW_RATE_SECTION_VHT_2S; 1748 else 1749 goto err; 1750 1751 /* only 20M BW with cck and ofdm */ 1752 if (rs == RTW_RATE_SECTION_CCK || rs == RTW_RATE_SECTION_OFDM) 1753 bw = RTW_CHANNEL_WIDTH_20; 1754 1755 /* only 20/40M BW with ht */ 1756 if (rs == RTW_RATE_SECTION_HT_1S || rs == RTW_RATE_SECTION_HT_2S) 1757 bw = min_t(u8, bw, RTW_CHANNEL_WIDTH_40); 1758 1759 /* select min power limit among [20M BW ~ current BW] */ 1760 for (cur_bw = RTW_CHANNEL_WIDTH_20; cur_bw <= bw; cur_bw++) { 1761 cur_ch = cch_by_bw[cur_bw]; 1762 1763 ch_idx = rtw_channel_to_idx(band, cur_ch); 1764 if (ch_idx < 0) 1765 goto err; 1766 1767 cur_lmt = cur_ch <= RTW_MAX_CHANNEL_NUM_2G ? 1768 hal->tx_pwr_limit_2g[regd][cur_bw][rs][ch_idx] : 1769 hal->tx_pwr_limit_5g[regd][cur_bw][rs][ch_idx]; 1770 1771 power_limit = min_t(s8, cur_lmt, power_limit); 1772 } 1773 1774 return power_limit; 1775 1776 err: 1777 WARN(1, "invalid arguments, band=%d, bw=%d, path=%d, rate=%d, ch=%d\n", 1778 band, bw, rf_path, rate, channel); 1779 return (s8)rtwdev->chip->max_power_index; 1780 } 1781 1782 void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw, 1783 u8 ch, u8 regd, struct rtw_power_params *pwr_param) 1784 { 1785 struct rtw_hal *hal = &rtwdev->hal; 1786 struct rtw_txpwr_idx *pwr_idx; 1787 u8 group, band; 1788 u8 *base = &pwr_param->pwr_base; 1789 s8 *offset = &pwr_param->pwr_offset; 1790 s8 *limit = &pwr_param->pwr_limit; 1791 1792 pwr_idx = &rtwdev->efuse.txpwr_idx_table[path]; 1793 group = rtw_get_channel_group(ch); 1794 1795 /* base power index for 2.4G/5G */ 1796 if (IS_CH_2G_BAND(ch)) { 1797 band = PHY_BAND_2G; 1798 *base = rtw_phy_get_2g_tx_power_index(rtwdev, 1799 &pwr_idx->pwr_idx_2g, 1800 bw, rate, group); 1801 *offset = hal->tx_pwr_by_rate_offset_2g[path][rate]; 1802 } else { 1803 band = PHY_BAND_5G; 1804 *base = rtw_phy_get_5g_tx_power_index(rtwdev, 1805 &pwr_idx->pwr_idx_5g, 1806 bw, rate, group); 1807 *offset = hal->tx_pwr_by_rate_offset_5g[path][rate]; 1808 } 1809 1810 *limit = rtw_phy_get_tx_power_limit(rtwdev, band, bw, path, 1811 rate, ch, regd); 1812 } 1813 1814 u8 1815 rtw_phy_get_tx_power_index(struct rtw_dev *rtwdev, u8 rf_path, u8 rate, 1816 enum rtw_bandwidth bandwidth, u8 channel, u8 regd) 1817 { 1818 struct rtw_power_params pwr_param = {0}; 1819 u8 tx_power; 1820 s8 offset; 1821 1822 rtw_get_tx_power_params(rtwdev, rf_path, rate, bandwidth, 1823 channel, regd, &pwr_param); 1824 1825 tx_power = pwr_param.pwr_base; 1826 offset = min_t(s8, pwr_param.pwr_offset, pwr_param.pwr_limit); 1827 1828 if (rtwdev->chip->en_dis_dpd) 1829 offset += rtw_phy_get_dis_dpd_by_rate_diff(rtwdev, rate); 1830 1831 tx_power += offset; 1832 1833 if (tx_power > rtwdev->chip->max_power_index) 1834 tx_power = rtwdev->chip->max_power_index; 1835 1836 return tx_power; 1837 } 1838 1839 static void rtw_phy_set_tx_power_index_by_rs(struct rtw_dev *rtwdev, 1840 u8 ch, u8 path, u8 rs) 1841 { 1842 struct rtw_hal *hal = &rtwdev->hal; 1843 u8 regd = rtwdev->regd.txpwr_regd; 1844 u8 *rates; 1845 u8 size; 1846 u8 rate; 1847 u8 pwr_idx; 1848 u8 bw; 1849 int i; 1850 1851 if (rs >= RTW_RATE_SECTION_MAX) 1852 return; 1853 1854 rates = rtw_rate_section[rs]; 1855 size = rtw_rate_size[rs]; 1856 bw = hal->current_band_width; 1857 for (i = 0; i < size; i++) { 1858 rate = rates[i]; 1859 pwr_idx = rtw_phy_get_tx_power_index(rtwdev, path, rate, 1860 bw, ch, regd); 1861 hal->tx_pwr_tbl[path][rate] = pwr_idx; 1862 } 1863 } 1864 1865 /* set tx power level by path for each rates, note that the order of the rates 1866 * are *very* important, bacause 8822B/8821C combines every four bytes of tx 1867 * power index into a four-byte power index register, and calls set_tx_agc to 1868 * write these values into hardware 1869 */ 1870 static void rtw_phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev, 1871 u8 ch, u8 path) 1872 { 1873 struct rtw_hal *hal = &rtwdev->hal; 1874 u8 rs; 1875 1876 /* do not need cck rates if we are not in 2.4G */ 1877 if (hal->current_band_type == RTW_BAND_2G) 1878 rs = RTW_RATE_SECTION_CCK; 1879 else 1880 rs = RTW_RATE_SECTION_OFDM; 1881 1882 for (; rs < RTW_RATE_SECTION_MAX; rs++) 1883 rtw_phy_set_tx_power_index_by_rs(rtwdev, ch, path, rs); 1884 } 1885 1886 void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel) 1887 { 1888 struct rtw_chip_info *chip = rtwdev->chip; 1889 struct rtw_hal *hal = &rtwdev->hal; 1890 u8 path; 1891 1892 mutex_lock(&hal->tx_power_mutex); 1893 1894 for (path = 0; path < hal->rf_path_num; path++) 1895 rtw_phy_set_tx_power_level_by_path(rtwdev, channel, path); 1896 1897 chip->ops->set_tx_power_index(rtwdev); 1898 mutex_unlock(&hal->tx_power_mutex); 1899 } 1900 1901 static void 1902 rtw_phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path, 1903 u8 rs, u8 size, u8 *rates) 1904 { 1905 u8 rate; 1906 u8 base_idx, rate_idx; 1907 s8 base_2g, base_5g; 1908 1909 if (rs >= RTW_RATE_SECTION_VHT_1S) 1910 base_idx = rates[size - 3]; 1911 else 1912 base_idx = rates[size - 1]; 1913 base_2g = hal->tx_pwr_by_rate_offset_2g[path][base_idx]; 1914 base_5g = hal->tx_pwr_by_rate_offset_5g[path][base_idx]; 1915 hal->tx_pwr_by_rate_base_2g[path][rs] = base_2g; 1916 hal->tx_pwr_by_rate_base_5g[path][rs] = base_5g; 1917 for (rate = 0; rate < size; rate++) { 1918 rate_idx = rates[rate]; 1919 hal->tx_pwr_by_rate_offset_2g[path][rate_idx] -= base_2g; 1920 hal->tx_pwr_by_rate_offset_5g[path][rate_idx] -= base_5g; 1921 } 1922 } 1923 1924 void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal) 1925 { 1926 u8 path; 1927 1928 for (path = 0; path < RTW_RF_PATH_MAX; path++) { 1929 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1930 RTW_RATE_SECTION_CCK, 1931 rtw_cck_size, rtw_cck_rates); 1932 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1933 RTW_RATE_SECTION_OFDM, 1934 rtw_ofdm_size, rtw_ofdm_rates); 1935 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1936 RTW_RATE_SECTION_HT_1S, 1937 rtw_ht_1s_size, rtw_ht_1s_rates); 1938 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1939 RTW_RATE_SECTION_HT_2S, 1940 rtw_ht_2s_size, rtw_ht_2s_rates); 1941 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1942 RTW_RATE_SECTION_VHT_1S, 1943 rtw_vht_1s_size, rtw_vht_1s_rates); 1944 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1945 RTW_RATE_SECTION_VHT_2S, 1946 rtw_vht_2s_size, rtw_vht_2s_rates); 1947 } 1948 } 1949 1950 static void 1951 __rtw_phy_tx_power_limit_config(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs) 1952 { 1953 s8 base; 1954 u8 ch; 1955 1956 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) { 1957 base = hal->tx_pwr_by_rate_base_2g[0][rs]; 1958 hal->tx_pwr_limit_2g[regd][bw][rs][ch] -= base; 1959 } 1960 1961 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) { 1962 base = hal->tx_pwr_by_rate_base_5g[0][rs]; 1963 hal->tx_pwr_limit_5g[regd][bw][rs][ch] -= base; 1964 } 1965 } 1966 1967 void rtw_phy_tx_power_limit_config(struct rtw_hal *hal) 1968 { 1969 u8 regd, bw, rs; 1970 1971 /* default at channel 1 */ 1972 hal->cch_by_bw[RTW_CHANNEL_WIDTH_20] = 1; 1973 1974 for (regd = 0; regd < RTW_REGD_MAX; regd++) 1975 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++) 1976 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) 1977 __rtw_phy_tx_power_limit_config(hal, regd, bw, rs); 1978 } 1979 1980 static void rtw_phy_init_tx_power_limit(struct rtw_dev *rtwdev, 1981 u8 regd, u8 bw, u8 rs) 1982 { 1983 struct rtw_hal *hal = &rtwdev->hal; 1984 s8 max_power_index = (s8)rtwdev->chip->max_power_index; 1985 u8 ch; 1986 1987 /* 2.4G channels */ 1988 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) 1989 hal->tx_pwr_limit_2g[regd][bw][rs][ch] = max_power_index; 1990 1991 /* 5G channels */ 1992 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) 1993 hal->tx_pwr_limit_5g[regd][bw][rs][ch] = max_power_index; 1994 } 1995 1996 void rtw_phy_init_tx_power(struct rtw_dev *rtwdev) 1997 { 1998 struct rtw_hal *hal = &rtwdev->hal; 1999 u8 regd, path, rate, rs, bw; 2000 2001 /* init tx power by rate offset */ 2002 for (path = 0; path < RTW_RF_PATH_MAX; path++) { 2003 for (rate = 0; rate < DESC_RATE_MAX; rate++) { 2004 hal->tx_pwr_by_rate_offset_2g[path][rate] = 0; 2005 hal->tx_pwr_by_rate_offset_5g[path][rate] = 0; 2006 } 2007 } 2008 2009 /* init tx power limit */ 2010 for (regd = 0; regd < RTW_REGD_MAX; regd++) 2011 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++) 2012 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) 2013 rtw_phy_init_tx_power_limit(rtwdev, regd, bw, 2014 rs); 2015 } 2016 2017 void rtw_phy_config_swing_table(struct rtw_dev *rtwdev, 2018 struct rtw_swing_table *swing_table) 2019 { 2020 const struct rtw_pwr_track_tbl *tbl = rtwdev->chip->pwr_track_tbl; 2021 u8 channel = rtwdev->hal.current_channel; 2022 2023 if (IS_CH_2G_BAND(channel)) { 2024 if (rtwdev->dm_info.tx_rate <= DESC_RATE11M) { 2025 swing_table->p[RF_PATH_A] = tbl->pwrtrk_2g_ccka_p; 2026 swing_table->n[RF_PATH_A] = tbl->pwrtrk_2g_ccka_n; 2027 swing_table->p[RF_PATH_B] = tbl->pwrtrk_2g_cckb_p; 2028 swing_table->n[RF_PATH_B] = tbl->pwrtrk_2g_cckb_n; 2029 } else { 2030 swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p; 2031 swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n; 2032 swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p; 2033 swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n; 2034 } 2035 } else if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel)) { 2036 swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_1]; 2037 swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_1]; 2038 swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_1]; 2039 swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_1]; 2040 } else if (IS_CH_5G_BAND_3(channel)) { 2041 swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_2]; 2042 swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_2]; 2043 swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_2]; 2044 swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_2]; 2045 } else if (IS_CH_5G_BAND_4(channel)) { 2046 swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_3]; 2047 swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_3]; 2048 swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_3]; 2049 swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_3]; 2050 } else { 2051 swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p; 2052 swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n; 2053 swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p; 2054 swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n; 2055 } 2056 } 2057 2058 void rtw_phy_pwrtrack_avg(struct rtw_dev *rtwdev, u8 thermal, u8 path) 2059 { 2060 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 2061 2062 ewma_thermal_add(&dm_info->avg_thermal[path], thermal); 2063 dm_info->thermal_avg[path] = 2064 ewma_thermal_read(&dm_info->avg_thermal[path]); 2065 } 2066 2067 bool rtw_phy_pwrtrack_thermal_changed(struct rtw_dev *rtwdev, u8 thermal, 2068 u8 path) 2069 { 2070 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 2071 u8 avg = ewma_thermal_read(&dm_info->avg_thermal[path]); 2072 2073 if (avg == thermal) 2074 return false; 2075 2076 return true; 2077 } 2078 2079 u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path) 2080 { 2081 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 2082 u8 therm_avg, therm_efuse, therm_delta; 2083 2084 therm_avg = dm_info->thermal_avg[path]; 2085 therm_efuse = rtwdev->efuse.thermal_meter[path]; 2086 therm_delta = abs(therm_avg - therm_efuse); 2087 2088 return min_t(u8, therm_delta, RTW_PWR_TRK_TBL_SZ - 1); 2089 } 2090 2091 s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev, 2092 struct rtw_swing_table *swing_table, 2093 u8 tbl_path, u8 therm_path, u8 delta) 2094 { 2095 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 2096 const u8 *delta_swing_table_idx_pos; 2097 const u8 *delta_swing_table_idx_neg; 2098 2099 if (delta >= RTW_PWR_TRK_TBL_SZ) { 2100 rtw_warn(rtwdev, "power track table overflow\n"); 2101 return 0; 2102 } 2103 2104 if (!swing_table) { 2105 rtw_warn(rtwdev, "swing table not configured\n"); 2106 return 0; 2107 } 2108 2109 delta_swing_table_idx_pos = swing_table->p[tbl_path]; 2110 delta_swing_table_idx_neg = swing_table->n[tbl_path]; 2111 2112 if (!delta_swing_table_idx_pos || !delta_swing_table_idx_neg) { 2113 rtw_warn(rtwdev, "invalid swing table index\n"); 2114 return 0; 2115 } 2116 2117 if (dm_info->thermal_avg[therm_path] > 2118 rtwdev->efuse.thermal_meter[therm_path]) 2119 return delta_swing_table_idx_pos[delta]; 2120 else 2121 return -delta_swing_table_idx_neg[delta]; 2122 } 2123 2124 bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev) 2125 { 2126 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 2127 u8 delta_iqk; 2128 2129 delta_iqk = abs(dm_info->thermal_avg[0] - dm_info->thermal_meter_k); 2130 if (delta_iqk >= rtwdev->chip->iqk_threshold) { 2131 dm_info->thermal_meter_k = dm_info->thermal_avg[0]; 2132 return true; 2133 } 2134 return false; 2135 } 2136