1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2022 Realtek Corporation 3 */ 4 5 #include "coex.h" 6 #include "debug.h" 7 #include "mac.h" 8 #include "phy.h" 9 #include "reg.h" 10 #include "rtw8852b.h" 11 #include "rtw8852b_common.h" 12 #include "rtw8852b_rfk.h" 13 #include "rtw8852b_rfk_table.h" 14 #include "rtw8852b_table.h" 15 16 #define RTW8852B_RXDCK_VER 0x1 17 #define RTW8852B_IQK_VER 0x2a 18 #define RTW8852B_IQK_SS 2 19 #define RTW8852B_RXK_GROUP_NR 4 20 #define RTW8852B_TSSI_PATH_NR 2 21 #define RTW8852B_RF_REL_VERSION 34 22 #define RTW8852B_DPK_VER 0x0d 23 #define RTW8852B_DPK_RF_PATH 2 24 #define RTW8852B_DPK_KIP_REG_NUM 3 25 26 #define _TSSI_DE_MASK GENMASK(21, 12) 27 #define ADDC_T_AVG 100 28 #define DPK_TXAGC_LOWER 0x2e 29 #define DPK_TXAGC_UPPER 0x3f 30 #define DPK_TXAGC_INVAL 0xff 31 #define RFREG_MASKRXBB 0x003e0 32 #define RFREG_MASKMODE 0xf0000 33 34 enum rtw8852b_dpk_id { 35 LBK_RXIQK = 0x06, 36 SYNC = 0x10, 37 MDPK_IDL = 0x11, 38 MDPK_MPA = 0x12, 39 GAIN_LOSS = 0x13, 40 GAIN_CAL = 0x14, 41 DPK_RXAGC = 0x15, 42 KIP_PRESET = 0x16, 43 KIP_RESTORE = 0x17, 44 DPK_TXAGC = 0x19, 45 D_KIP_PRESET = 0x28, 46 D_TXAGC = 0x29, 47 D_RXAGC = 0x2a, 48 D_SYNC = 0x2b, 49 D_GAIN_LOSS = 0x2c, 50 D_MDPK_IDL = 0x2d, 51 D_GAIN_NORM = 0x2f, 52 D_KIP_THERMAL = 0x30, 53 D_KIP_RESTORE = 0x31 54 }; 55 56 enum dpk_agc_step { 57 DPK_AGC_STEP_SYNC_DGAIN, 58 DPK_AGC_STEP_GAIN_ADJ, 59 DPK_AGC_STEP_GAIN_LOSS_IDX, 60 DPK_AGC_STEP_GL_GT_CRITERION, 61 DPK_AGC_STEP_GL_LT_CRITERION, 62 DPK_AGC_STEP_SET_TX_GAIN, 63 }; 64 65 enum rtw8852b_iqk_type { 66 ID_TXAGC = 0x0, 67 ID_FLOK_COARSE = 0x1, 68 ID_FLOK_FINE = 0x2, 69 ID_TXK = 0x3, 70 ID_RXAGC = 0x4, 71 ID_RXK = 0x5, 72 ID_NBTXK = 0x6, 73 ID_NBRXK = 0x7, 74 ID_FLOK_VBUFFER = 0x8, 75 ID_A_FLOK_COARSE = 0x9, 76 ID_G_FLOK_COARSE = 0xa, 77 ID_A_FLOK_FINE = 0xb, 78 ID_G_FLOK_FINE = 0xc, 79 ID_IQK_RESTORE = 0x10, 80 }; 81 82 static const u32 _tssi_trigger[RTW8852B_TSSI_PATH_NR] = {0x5820, 0x7820}; 83 static const u32 _tssi_cw_rpt_addr[RTW8852B_TSSI_PATH_NR] = {0x1c18, 0x3c18}; 84 static const u32 _tssi_cw_default_addr[RTW8852B_TSSI_PATH_NR][4] = { 85 {0x5634, 0x5630, 0x5630, 0x5630}, 86 {0x7634, 0x7630, 0x7630, 0x7630} }; 87 static const u32 _tssi_cw_default_mask[4] = { 88 0x000003ff, 0x3ff00000, 0x000ffc00, 0x000003ff}; 89 static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852B] = {0x5858, 0x7858}; 90 static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852B] = {0x5860, 0x7860}; 91 static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852B] = {0x5838, 0x7838}; 92 static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852B] = {0x5840, 0x7840}; 93 static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852B] = {0x5848, 0x7848}; 94 static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852B] = {0x5850, 0x7850}; 95 static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852B] = {0x5828, 0x7828}; 96 static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852B] = {0x5830, 0x7830}; 97 static const u32 _a_idxrxgain[RTW8852B_RXK_GROUP_NR] = {0x190, 0x198, 0x350, 0x352}; 98 static const u32 _a_idxattc2[RTW8852B_RXK_GROUP_NR] = {0x0f, 0x0f, 0x3f, 0x7f}; 99 static const u32 _a_idxattc1[RTW8852B_RXK_GROUP_NR] = {0x3, 0x1, 0x0, 0x0}; 100 static const u32 _g_idxrxgain[RTW8852B_RXK_GROUP_NR] = {0x212, 0x21c, 0x350, 0x360}; 101 static const u32 _g_idxattc2[RTW8852B_RXK_GROUP_NR] = {0x00, 0x00, 0x28, 0x5f}; 102 static const u32 _g_idxattc1[RTW8852B_RXK_GROUP_NR] = {0x3, 0x3, 0x2, 0x1}; 103 static const u32 _a_power_range[RTW8852B_RXK_GROUP_NR] = {0x0, 0x0, 0x0, 0x0}; 104 static const u32 _a_track_range[RTW8852B_RXK_GROUP_NR] = {0x3, 0x3, 0x6, 0x6}; 105 static const u32 _a_gain_bb[RTW8852B_RXK_GROUP_NR] = {0x08, 0x0e, 0x06, 0x0e}; 106 static const u32 _a_itqt[RTW8852B_RXK_GROUP_NR] = {0x12, 0x12, 0x12, 0x1b}; 107 static const u32 _g_power_range[RTW8852B_RXK_GROUP_NR] = {0x0, 0x0, 0x0, 0x0}; 108 static const u32 _g_track_range[RTW8852B_RXK_GROUP_NR] = {0x4, 0x4, 0x6, 0x6}; 109 static const u32 _g_gain_bb[RTW8852B_RXK_GROUP_NR] = {0x08, 0x0e, 0x06, 0x0e}; 110 static const u32 _g_itqt[RTW8852B_RXK_GROUP_NR] = {0x09, 0x12, 0x1b, 0x24}; 111 112 static const u32 rtw8852b_backup_bb_regs[] = {0x2344, 0x5800, 0x7800}; 113 static const u32 rtw8852b_backup_rf_regs[] = { 114 0xde, 0xdf, 0x8b, 0x90, 0x97, 0x85, 0x1e, 0x0, 0x2, 0x5, 0x10005 115 }; 116 117 #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852b_backup_bb_regs) 118 #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852b_backup_rf_regs) 119 120 static const struct rtw89_reg3_def rtw8852b_set_nondbcc_path01[] = { 121 {0x20fc, 0xffff0000, 0x0303}, 122 {0x5864, 0x18000000, 0x3}, 123 {0x7864, 0x18000000, 0x3}, 124 {0x12b8, 0x40000000, 0x1}, 125 {0x32b8, 0x40000000, 0x1}, 126 {0x030c, 0xff000000, 0x13}, 127 {0x032c, 0xffff0000, 0x0041}, 128 {0x12b8, 0x10000000, 0x1}, 129 {0x58c8, 0x01000000, 0x1}, 130 {0x78c8, 0x01000000, 0x1}, 131 {0x5864, 0xc0000000, 0x3}, 132 {0x7864, 0xc0000000, 0x3}, 133 {0x2008, 0x01ffffff, 0x1ffffff}, 134 {0x0c1c, 0x00000004, 0x1}, 135 {0x0700, 0x08000000, 0x1}, 136 {0x0c70, 0x000003ff, 0x3ff}, 137 {0x0c60, 0x00000003, 0x3}, 138 {0x0c6c, 0x00000001, 0x1}, 139 {0x58ac, 0x08000000, 0x1}, 140 {0x78ac, 0x08000000, 0x1}, 141 {0x0c3c, 0x00000200, 0x1}, 142 {0x2344, 0x80000000, 0x1}, 143 {0x4490, 0x80000000, 0x1}, 144 {0x12a0, 0x00007000, 0x7}, 145 {0x12a0, 0x00008000, 0x1}, 146 {0x12a0, 0x00070000, 0x3}, 147 {0x12a0, 0x00080000, 0x1}, 148 {0x32a0, 0x00070000, 0x3}, 149 {0x32a0, 0x00080000, 0x1}, 150 {0x0700, 0x01000000, 0x1}, 151 {0x0700, 0x06000000, 0x2}, 152 {0x20fc, 0xffff0000, 0x3333}, 153 }; 154 155 static const struct rtw89_reg3_def rtw8852b_restore_nondbcc_path01[] = { 156 {0x20fc, 0xffff0000, 0x0303}, 157 {0x12b8, 0x40000000, 0x0}, 158 {0x32b8, 0x40000000, 0x0}, 159 {0x5864, 0xc0000000, 0x0}, 160 {0x7864, 0xc0000000, 0x0}, 161 {0x2008, 0x01ffffff, 0x0000000}, 162 {0x0c1c, 0x00000004, 0x0}, 163 {0x0700, 0x08000000, 0x0}, 164 {0x0c70, 0x0000001f, 0x03}, 165 {0x0c70, 0x000003e0, 0x03}, 166 {0x12a0, 0x000ff000, 0x00}, 167 {0x32a0, 0x000ff000, 0x00}, 168 {0x0700, 0x07000000, 0x0}, 169 {0x20fc, 0xffff0000, 0x0000}, 170 {0x58c8, 0x01000000, 0x0}, 171 {0x78c8, 0x01000000, 0x0}, 172 {0x0c3c, 0x00000200, 0x0}, 173 {0x2344, 0x80000000, 0x0}, 174 }; 175 176 static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[]) 177 { 178 u32 i; 179 180 for (i = 0; i < BACKUP_BB_REGS_NR; i++) { 181 backup_bb_reg_val[i] = 182 rtw89_phy_read32_mask(rtwdev, rtw8852b_backup_bb_regs[i], 183 MASKDWORD); 184 rtw89_debug(rtwdev, RTW89_DBG_RFK, 185 "[RFK]backup bb reg : %x, value =%x\n", 186 rtw8852b_backup_bb_regs[i], backup_bb_reg_val[i]); 187 } 188 } 189 190 static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[], 191 u8 rf_path) 192 { 193 u32 i; 194 195 for (i = 0; i < BACKUP_RF_REGS_NR; i++) { 196 backup_rf_reg_val[i] = 197 rtw89_read_rf(rtwdev, rf_path, 198 rtw8852b_backup_rf_regs[i], RFREG_MASK); 199 rtw89_debug(rtwdev, RTW89_DBG_RFK, 200 "[RFK]backup rf S%d reg : %x, value =%x\n", rf_path, 201 rtw8852b_backup_rf_regs[i], backup_rf_reg_val[i]); 202 } 203 } 204 205 static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev, 206 const u32 backup_bb_reg_val[]) 207 { 208 u32 i; 209 210 for (i = 0; i < BACKUP_BB_REGS_NR; i++) { 211 rtw89_phy_write32_mask(rtwdev, rtw8852b_backup_bb_regs[i], 212 MASKDWORD, backup_bb_reg_val[i]); 213 rtw89_debug(rtwdev, RTW89_DBG_RFK, 214 "[RFK]restore bb reg : %x, value =%x\n", 215 rtw8852b_backup_bb_regs[i], backup_bb_reg_val[i]); 216 } 217 } 218 219 static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev, 220 const u32 backup_rf_reg_val[], u8 rf_path) 221 { 222 u32 i; 223 224 for (i = 0; i < BACKUP_RF_REGS_NR; i++) { 225 rtw89_write_rf(rtwdev, rf_path, rtw8852b_backup_rf_regs[i], 226 RFREG_MASK, backup_rf_reg_val[i]); 227 228 rtw89_debug(rtwdev, RTW89_DBG_RFK, 229 "[RFK]restore rf S%d reg: %x, value =%x\n", rf_path, 230 rtw8852b_backup_rf_regs[i], backup_rf_reg_val[i]); 231 } 232 } 233 234 static void _rfk_rf_direct_cntrl(struct rtw89_dev *rtwdev, 235 enum rtw89_rf_path path, bool is_bybb) 236 { 237 if (is_bybb) 238 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1); 239 else 240 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); 241 } 242 243 static void _rfk_drf_direct_cntrl(struct rtw89_dev *rtwdev, 244 enum rtw89_rf_path path, bool is_bybb) 245 { 246 if (is_bybb) 247 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1); 248 else 249 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0); 250 } 251 252 static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path) 253 { 254 bool fail = true; 255 u32 val; 256 int ret; 257 258 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, 259 1, 8200, false, rtwdev, 0xbff8, MASKBYTE0); 260 if (ret) 261 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL1 IQK timeout!!!\n"); 262 263 udelay(200); 264 265 if (!ret) 266 fail = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 267 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0); 268 269 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret); 270 val = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD); 271 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8008 = 0x%x\n", path, val); 272 273 return fail; 274 } 275 276 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 277 { 278 u8 val; 279 280 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x,PHY%d\n", 281 rtwdev->dbcc_en, phy_idx); 282 283 if (!rtwdev->dbcc_en) { 284 val = RF_AB; 285 } else { 286 if (phy_idx == RTW89_PHY_0) 287 val = RF_A; 288 else 289 val = RF_B; 290 } 291 return val; 292 } 293 294 static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 295 enum rtw89_rf_path path) 296 { 297 rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0); 298 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); 299 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1); 300 mdelay(1); 301 } 302 303 static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 304 { 305 u8 path, dck_tune; 306 u32 rf_reg5; 307 308 rtw89_debug(rtwdev, RTW89_DBG_RFK, 309 "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, CV : 0x%x) ******\n", 310 RTW8852B_RXDCK_VER, rtwdev->hal.cv); 311 312 for (path = 0; path < RF_PATH_NUM_8852B; path++) { 313 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); 314 dck_tune = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE); 315 316 if (rtwdev->is_tssi_mode[path]) 317 rtw89_phy_write32_mask(rtwdev, 318 R_P0_TSSI_TRK + (path << 13), 319 B_P0_TSSI_TRK_EN, 0x1); 320 321 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); 322 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0); 323 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); 324 _set_rx_dck(rtwdev, phy, path); 325 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune); 326 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); 327 328 if (rtwdev->is_tssi_mode[path]) 329 rtw89_phy_write32_mask(rtwdev, 330 R_P0_TSSI_TRK + (path << 13), 331 B_P0_TSSI_TRK_EN, 0x0); 332 } 333 } 334 335 static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 336 { 337 u32 rf_reg5; 338 u32 rck_val; 339 u32 val; 340 int ret; 341 342 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path); 343 344 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); 345 346 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); 347 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); 348 349 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%05x\n", 350 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK)); 351 352 /* RCK trigger */ 353 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240); 354 355 ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30, 356 false, rtwdev, path, RR_RCKS, BIT(3)); 357 358 rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA); 359 360 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] rck_val = 0x%x, ret = %d\n", 361 rck_val, ret); 362 363 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val); 364 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); 365 366 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF 0x1b = 0x%x\n", 367 rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK)); 368 } 369 370 static void _afe_init(struct rtw89_dev *rtwdev) 371 { 372 rtw89_write32(rtwdev, R_AX_PHYREG_SET, 0xf); 373 374 rtw89_rfk_parser(rtwdev, &rtw8852b_afe_init_defs_tbl); 375 } 376 377 static void _drck(struct rtw89_dev *rtwdev) 378 { 379 u32 rck_d; 380 u32 val; 381 int ret; 382 383 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]Ddie RCK start!!!\n"); 384 rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_KICK, 0x1); 385 386 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, 387 false, rtwdev, R_DRCK_RS, B_DRCK_RS_DONE); 388 if (ret) 389 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n"); 390 391 rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_KICK, 0x0); 392 rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x1); 393 udelay(1); 394 rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x0); 395 rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RS, B_DRCK_RS_LPS); 396 rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_SEL, 0x0); 397 rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_CV, rck_d); 398 399 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0cc = 0x%x\n", 400 rtw89_phy_read32_mask(rtwdev, R_DRCK_V1, MASKDWORD)); 401 } 402 403 static void _addck_backup(struct rtw89_dev *rtwdev) 404 { 405 struct rtw89_dack_info *dack = &rtwdev->dack; 406 407 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0); 408 dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0); 409 dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1); 410 411 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0); 412 dack->addck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A0); 413 dack->addck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A1); 414 } 415 416 static void _addck_reload(struct rtw89_dev *rtwdev) 417 { 418 struct rtw89_dack_info *dack = &rtwdev->dack; 419 420 /* S0 */ 421 rtw89_phy_write32_mask(rtwdev, R_ADDCK0D, B_ADDCK0D_VAL, dack->addck_d[0][0]); 422 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_VAL, dack->addck_d[0][1] >> 6); 423 rtw89_phy_write32_mask(rtwdev, R_ADDCK0D, B_ADDCK0D_VAL2, dack->addck_d[0][1] & 0x3f); 424 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x3); 425 426 /* S1 */ 427 rtw89_phy_write32_mask(rtwdev, R_ADDCK1D, B_ADDCK1D_VAL, dack->addck_d[1][0]); 428 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK0_VAL, dack->addck_d[1][1] >> 6); 429 rtw89_phy_write32_mask(rtwdev, R_ADDCK1D, B_ADDCK1D_VAL2, dack->addck_d[1][1] & 0x3f); 430 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_MAN, 0x3); 431 } 432 433 static void _dack_backup_s0(struct rtw89_dev *rtwdev) 434 { 435 struct rtw89_dack_info *dack = &rtwdev->dack; 436 u8 i; 437 438 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); 439 440 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { 441 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i); 442 dack->msbk_d[0][0][i] = 443 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0); 444 rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i); 445 dack->msbk_d[0][1][i] = 446 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1); 447 } 448 449 dack->biask_d[0][0] = 450 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00); 451 dack->biask_d[0][1] = 452 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01); 453 454 dack->dadck_d[0][0] = 455 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00); 456 dack->dadck_d[0][1] = 457 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01); 458 } 459 460 static void _dack_backup_s1(struct rtw89_dev *rtwdev) 461 { 462 struct rtw89_dack_info *dack = &rtwdev->dack; 463 u8 i; 464 465 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1); 466 467 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { 468 rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i); 469 dack->msbk_d[1][0][i] = 470 rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK10S); 471 rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i); 472 dack->msbk_d[1][1][i] = 473 rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK11S); 474 } 475 476 dack->biask_d[1][0] = 477 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10, B_DACK_BIAS10); 478 dack->biask_d[1][1] = 479 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11, B_DACK_BIAS11); 480 481 dack->dadck_d[1][0] = 482 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10, B_DACK_DADCK10); 483 dack->dadck_d[1][1] = 484 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11, B_DACK_DADCK11); 485 } 486 487 static void _check_addc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 488 { 489 s32 dc_re = 0, dc_im = 0; 490 u32 tmp; 491 u32 i; 492 493 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, 494 &rtw8852b_check_addc_defs_a_tbl, 495 &rtw8852b_check_addc_defs_b_tbl); 496 497 for (i = 0; i < ADDC_T_AVG; i++) { 498 tmp = rtw89_phy_read32_mask(rtwdev, R_DBG32_D, MASKDWORD); 499 dc_re += sign_extend32(FIELD_GET(0xfff000, tmp), 11); 500 dc_im += sign_extend32(FIELD_GET(0xfff, tmp), 11); 501 } 502 503 dc_re /= ADDC_T_AVG; 504 dc_im /= ADDC_T_AVG; 505 506 rtw89_debug(rtwdev, RTW89_DBG_RFK, 507 "[DACK]S%d,dc_re = 0x%x,dc_im =0x%x\n", path, dc_re, dc_im); 508 } 509 510 static void _addck(struct rtw89_dev *rtwdev) 511 { 512 struct rtw89_dack_info *dack = &rtwdev->dack; 513 u32 val; 514 int ret; 515 516 /* S0 */ 517 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x0); 518 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x30, 0x0); 519 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); 520 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0); 521 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0); 522 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1); 523 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf); 524 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0); 525 rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(1), 0x1); 526 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3); 527 528 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S0 ADDCK\n"); 529 _check_addc(rtwdev, RF_PATH_A); 530 531 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x1); 532 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x0); 533 udelay(1); 534 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1); 535 536 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, 537 false, rtwdev, R_ADDCKR0, BIT(0)); 538 if (ret) { 539 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n"); 540 dack->addck_timeout[0] = true; 541 } 542 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret); 543 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 ADDCK\n"); 544 _check_addc(rtwdev, RF_PATH_A); 545 546 rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(1), 0x0); 547 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1); 548 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc); 549 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1); 550 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0); 551 552 /* S1 */ 553 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1); 554 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0); 555 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0); 556 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1); 557 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf); 558 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0); 559 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(1), 0x1); 560 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3); 561 562 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S1 ADDCK\n"); 563 _check_addc(rtwdev, RF_PATH_B); 564 565 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x1); 566 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x0); 567 udelay(1); 568 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1); 569 570 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, 571 false, rtwdev, R_ADDCKR1, BIT(0)); 572 if (ret) { 573 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n"); 574 dack->addck_timeout[1] = true; 575 } 576 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret); 577 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 ADDCK\n"); 578 _check_addc(rtwdev, RF_PATH_B); 579 580 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(1), 0x0); 581 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1); 582 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc); 583 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1); 584 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0); 585 } 586 587 static void _check_dadc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 588 { 589 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, 590 &rtw8852b_check_dadc_en_defs_a_tbl, 591 &rtw8852b_check_dadc_en_defs_b_tbl); 592 593 _check_addc(rtwdev, path); 594 595 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, 596 &rtw8852b_check_dadc_dis_defs_a_tbl, 597 &rtw8852b_check_dadc_dis_defs_b_tbl); 598 } 599 600 static bool _dack_s0_check_done(struct rtw89_dev *rtwdev, bool part1) 601 { 602 if (part1) { 603 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 || 604 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0) 605 return false; 606 } else { 607 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 || 608 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0) 609 return false; 610 } 611 612 return true; 613 } 614 615 static void _dack_s0(struct rtw89_dev *rtwdev) 616 { 617 struct rtw89_dack_info *dack = &rtwdev->dack; 618 bool done; 619 int ret; 620 621 rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_1_defs_tbl); 622 623 ret = read_poll_timeout_atomic(_dack_s0_check_done, done, done, 1, 10000, 624 false, rtwdev, true); 625 if (ret) { 626 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK timeout\n"); 627 dack->msbk_timeout[0] = true; 628 } 629 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret); 630 631 rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_2_defs_tbl); 632 633 ret = read_poll_timeout_atomic(_dack_s0_check_done, done, done, 1, 10000, 634 false, rtwdev, false); 635 if (ret) { 636 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DADCK timeout\n"); 637 dack->dadck_timeout[0] = true; 638 } 639 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret); 640 641 rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_3_defs_tbl); 642 643 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n"); 644 645 _dack_backup_s0(rtwdev); 646 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0); 647 } 648 649 static bool _dack_s1_check_done(struct rtw89_dev *rtwdev, bool part1) 650 { 651 if (part1) { 652 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 && 653 rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0) 654 return false; 655 } else { 656 if (rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK_S1P2_OK) == 0 && 657 rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK_S1P3_OK) == 0) 658 return false; 659 } 660 661 return true; 662 } 663 664 static void _dack_s1(struct rtw89_dev *rtwdev) 665 { 666 struct rtw89_dack_info *dack = &rtwdev->dack; 667 bool done; 668 int ret; 669 670 rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_1_defs_tbl); 671 672 ret = read_poll_timeout_atomic(_dack_s1_check_done, done, done, 1, 10000, 673 false, rtwdev, true); 674 if (ret) { 675 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK timeout\n"); 676 dack->msbk_timeout[1] = true; 677 } 678 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret); 679 680 rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_2_defs_tbl); 681 682 ret = read_poll_timeout_atomic(_dack_s1_check_done, done, done, 1, 10000, 683 false, rtwdev, false); 684 if (ret) { 685 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DADCK timeout\n"); 686 dack->dadck_timeout[1] = true; 687 } 688 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret); 689 690 rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_3_defs_tbl); 691 692 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n"); 693 694 _check_dadc(rtwdev, RF_PATH_B); 695 _dack_backup_s1(rtwdev); 696 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0); 697 } 698 699 static void _dack(struct rtw89_dev *rtwdev) 700 { 701 _dack_s0(rtwdev); 702 _dack_s1(rtwdev); 703 } 704 705 static void _dack_dump(struct rtw89_dev *rtwdev) 706 { 707 struct rtw89_dack_info *dack = &rtwdev->dack; 708 u8 i; 709 u8 t; 710 711 rtw89_debug(rtwdev, RTW89_DBG_RFK, 712 "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n", 713 dack->addck_d[0][0], dack->addck_d[0][1]); 714 rtw89_debug(rtwdev, RTW89_DBG_RFK, 715 "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n", 716 dack->addck_d[1][0], dack->addck_d[1][1]); 717 rtw89_debug(rtwdev, RTW89_DBG_RFK, 718 "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n", 719 dack->dadck_d[0][0], dack->dadck_d[0][1]); 720 rtw89_debug(rtwdev, RTW89_DBG_RFK, 721 "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n", 722 dack->dadck_d[1][0], dack->dadck_d[1][1]); 723 rtw89_debug(rtwdev, RTW89_DBG_RFK, 724 "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n", 725 dack->biask_d[0][0], dack->biask_d[0][1]); 726 rtw89_debug(rtwdev, RTW89_DBG_RFK, 727 "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n", 728 dack->biask_d[1][0], dack->biask_d[1][1]); 729 730 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n"); 731 for (i = 0; i < 0x10; i++) { 732 t = dack->msbk_d[0][0][i]; 733 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); 734 } 735 736 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n"); 737 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { 738 t = dack->msbk_d[0][1][i]; 739 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); 740 } 741 742 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n"); 743 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { 744 t = dack->msbk_d[1][0][i]; 745 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); 746 } 747 748 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n"); 749 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { 750 t = dack->msbk_d[1][1][i]; 751 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); 752 } 753 } 754 755 static void _dac_cal(struct rtw89_dev *rtwdev, bool force) 756 { 757 struct rtw89_dack_info *dack = &rtwdev->dack; 758 u32 rf0_0, rf1_0; 759 760 dack->dack_done = false; 761 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK 0x1\n"); 762 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n"); 763 764 rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK); 765 rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK); 766 _afe_init(rtwdev); 767 _drck(rtwdev); 768 769 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0); 770 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0); 771 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1); 772 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1); 773 _addck(rtwdev); 774 _addck_backup(rtwdev); 775 _addck_reload(rtwdev); 776 777 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0); 778 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0); 779 _dack(rtwdev); 780 _dack_dump(rtwdev); 781 dack->dack_done = true; 782 783 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0); 784 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0); 785 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1); 786 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1); 787 dack->dack_cnt++; 788 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n"); 789 } 790 791 static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path) 792 { 793 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 794 u32 tmp; 795 796 switch (iqk_info->iqk_band[path]) { 797 case RTW89_BAND_2G: 798 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); 799 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1); 800 tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK); 801 rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp); 802 break; 803 case RTW89_BAND_5G: 804 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); 805 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1); 806 tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK); 807 rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp); 808 break; 809 default: 810 break; 811 } 812 } 813 814 static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 815 u8 path, u8 ktype) 816 { 817 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 818 u32 iqk_cmd; 819 bool fail; 820 821 switch (ktype) { 822 case ID_FLOK_COARSE: 823 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); 824 iqk_cmd = 0x108 | (1 << (4 + path)); 825 break; 826 case ID_FLOK_FINE: 827 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); 828 iqk_cmd = 0x208 | (1 << (4 + path)); 829 break; 830 case ID_FLOK_VBUFFER: 831 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); 832 iqk_cmd = 0x308 | (1 << (4 + path)); 833 break; 834 case ID_TXK: 835 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); 836 iqk_cmd = 0x008 | (1 << (path + 4)) | 837 (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8); 838 break; 839 case ID_RXAGC: 840 iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1); 841 break; 842 case ID_RXK: 843 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); 844 iqk_cmd = 0x008 | (1 << (path + 4)) | 845 (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8); 846 break; 847 case ID_NBTXK: 848 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); 849 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x011); 850 iqk_cmd = 0x408 | (1 << (4 + path)); 851 break; 852 case ID_NBRXK: 853 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); 854 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); 855 iqk_cmd = 0x608 | (1 << (4 + path)); 856 break; 857 default: 858 return false; 859 } 860 861 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1); 862 udelay(1); 863 fail = _iqk_check_cal(rtwdev, path); 864 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); 865 866 return fail; 867 } 868 869 static bool _rxk_group_sel(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 870 u8 path) 871 { 872 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 873 bool kfail = false; 874 bool fail; 875 u8 gp; 876 877 for (gp = 0; gp < RTW8852B_RXK_GROUP_NR; gp++) { 878 switch (iqk_info->iqk_band[path]) { 879 case RTW89_BAND_2G: 880 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, 881 _g_idxrxgain[gp]); 882 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, 883 _g_idxattc2[gp]); 884 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, 885 _g_idxattc1[gp]); 886 break; 887 case RTW89_BAND_5G: 888 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, 889 _a_idxrxgain[gp]); 890 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT, 891 _a_idxattc2[gp]); 892 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2, 893 _a_idxattc1[gp]); 894 break; 895 default: 896 break; 897 } 898 899 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 900 B_CFIR_LUT_SEL, 0x1); 901 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 902 B_CFIR_LUT_SET, 0x0); 903 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 904 B_CFIR_LUT_GP_V1, gp); 905 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK); 906 rtw89_phy_write32_mask(rtwdev, R_IQKINF, 907 BIT(16 + gp + path * 4), fail); 908 kfail |= fail; 909 } 910 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0); 911 912 if (kfail) { 913 iqk_info->nb_rxcfir[path] = 0x40000002; 914 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), 915 B_IQK_RES_RXCFIR, 0x0); 916 iqk_info->is_wb_rxiqk[path] = false; 917 } else { 918 iqk_info->nb_rxcfir[path] = 0x40000000; 919 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), 920 B_IQK_RES_RXCFIR, 0x5); 921 iqk_info->is_wb_rxiqk[path] = true; 922 } 923 924 return kfail; 925 } 926 927 static bool _iqk_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 928 u8 path) 929 { 930 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 931 const u8 gp = 0x3; 932 bool kfail = false; 933 bool fail; 934 935 switch (iqk_info->iqk_band[path]) { 936 case RTW89_BAND_2G: 937 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, 938 _g_idxrxgain[gp]); 939 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, 940 _g_idxattc2[gp]); 941 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, 942 _g_idxattc1[gp]); 943 break; 944 case RTW89_BAND_5G: 945 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, 946 _a_idxrxgain[gp]); 947 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT, 948 _a_idxattc2[gp]); 949 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2, 950 _a_idxattc1[gp]); 951 break; 952 default: 953 break; 954 } 955 956 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1); 957 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x0); 958 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP_V1, gp); 959 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); 960 udelay(1); 961 962 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK); 963 rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(16 + gp + path * 4), fail); 964 kfail |= fail; 965 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0); 966 967 if (!kfail) 968 iqk_info->nb_rxcfir[path] = 969 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD) | 0x2; 970 else 971 iqk_info->nb_rxcfir[path] = 0x40000002; 972 973 return kfail; 974 } 975 976 static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path) 977 { 978 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 979 980 if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) { 981 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); 982 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1); 983 udelay(1); 984 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f); 985 udelay(1); 986 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03); 987 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001); 988 udelay(1); 989 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041); 990 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_VAL, 0x2); 991 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_ON, 0x1); 992 rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_VAL, 0x2); 993 rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_ON, 0x1); 994 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1); 995 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x1); 996 } else { 997 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); 998 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1); 999 udelay(1); 1000 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f); 1001 udelay(1); 1002 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03); 1003 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001); 1004 udelay(1); 1005 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041); 1006 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_VAL, 0x1); 1007 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_ON, 0x1); 1008 rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_VAL, 0x1); 1009 rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_ON, 0x1); 1010 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1); 1011 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x0); 1012 } 1013 } 1014 1015 static bool _txk_group_sel(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) 1016 { 1017 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1018 bool kfail = false; 1019 bool fail; 1020 u8 gp; 1021 1022 for (gp = 0x0; gp < RTW8852B_RXK_GROUP_NR; gp++) { 1023 switch (iqk_info->iqk_band[path]) { 1024 case RTW89_BAND_2G: 1025 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 1026 _g_power_range[gp]); 1027 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 1028 _g_track_range[gp]); 1029 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 1030 _g_gain_bb[gp]); 1031 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1032 MASKDWORD, _g_itqt[gp]); 1033 break; 1034 case RTW89_BAND_5G: 1035 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 1036 _a_power_range[gp]); 1037 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 1038 _a_track_range[gp]); 1039 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 1040 _a_gain_bb[gp]); 1041 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1042 MASKDWORD, _a_itqt[gp]); 1043 break; 1044 default: 1045 break; 1046 } 1047 1048 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 1049 B_CFIR_LUT_SEL, 0x1); 1050 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 1051 B_CFIR_LUT_SET, 0x1); 1052 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 1053 B_CFIR_LUT_G2, 0x0); 1054 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 1055 B_CFIR_LUT_GP, gp); 1056 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1057 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK); 1058 rtw89_phy_write32_mask(rtwdev, R_IQKINF, 1059 BIT(8 + gp + path * 4), fail); 1060 kfail |= fail; 1061 } 1062 1063 if (kfail) { 1064 iqk_info->nb_txcfir[path] = 0x40000002; 1065 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), 1066 B_IQK_RES_TXCFIR, 0x0); 1067 iqk_info->is_wb_txiqk[path] = false; 1068 } else { 1069 iqk_info->nb_txcfir[path] = 0x40000000; 1070 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), 1071 B_IQK_RES_TXCFIR, 0x5); 1072 iqk_info->is_wb_txiqk[path] = true; 1073 } 1074 1075 return kfail; 1076 } 1077 1078 static bool _iqk_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) 1079 { 1080 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1081 bool kfail; 1082 u8 gp = 0x2; 1083 1084 switch (iqk_info->iqk_band[path]) { 1085 case RTW89_BAND_2G: 1086 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 1087 _g_power_range[gp]); 1088 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 1089 _g_track_range[gp]); 1090 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 1091 _g_gain_bb[gp]); 1092 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1093 MASKDWORD, _g_itqt[gp]); 1094 break; 1095 case RTW89_BAND_5G: 1096 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 1097 _a_power_range[gp]); 1098 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 1099 _a_track_range[gp]); 1100 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 1101 _a_gain_bb[gp]); 1102 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1103 MASKDWORD, _a_itqt[gp]); 1104 break; 1105 default: 1106 break; 1107 } 1108 1109 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1); 1110 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x1); 1111 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G2, 0x0); 1112 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp); 1113 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1114 kfail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); 1115 1116 if (!kfail) 1117 iqk_info->nb_txcfir[path] = 1118 rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), 1119 MASKDWORD) | 0x2; 1120 else 1121 iqk_info->nb_txcfir[path] = 0x40000002; 1122 1123 return kfail; 1124 } 1125 1126 static void _lok_res_table(struct rtw89_dev *rtwdev, u8 path, u8 ibias) 1127 { 1128 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1129 1130 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1131 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ibias = %x\n", path, ibias); 1132 1133 rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x2); 1134 if (iqk_info->iqk_band[path] == RTW89_BAND_2G) 1135 rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x0); 1136 else 1137 rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x1); 1138 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ibias); 1139 rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0); 1140 rtw89_write_rf(rtwdev, path, RR_TXVBUF, RR_TXVBUF_DACEN, 0x1); 1141 1142 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x7c = %x\n", path, 1143 rtw89_read_rf(rtwdev, path, RR_TXVBUF, RFREG_MASK)); 1144 } 1145 1146 static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path) 1147 { 1148 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1149 bool is_fail1, is_fail2; 1150 u32 vbuff_i; 1151 u32 vbuff_q; 1152 u32 core_i; 1153 u32 core_q; 1154 u32 tmp; 1155 u8 ch; 1156 1157 tmp = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK); 1158 core_i = FIELD_GET(RR_TXMO_COI, tmp); 1159 core_q = FIELD_GET(RR_TXMO_COQ, tmp); 1160 ch = (iqk_info->iqk_times / 2) % RTW89_IQK_CHS_NR; 1161 1162 if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d) 1163 is_fail1 = true; 1164 else 1165 is_fail1 = false; 1166 1167 iqk_info->lok_idac[ch][path] = tmp; 1168 1169 tmp = rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK); 1170 vbuff_i = FIELD_GET(RR_LOKVB_COI, tmp); 1171 vbuff_q = FIELD_GET(RR_LOKVB_COQ, tmp); 1172 1173 if (vbuff_i < 0x2 || vbuff_i > 0x3d || vbuff_q < 0x2 || vbuff_q > 0x3d) 1174 is_fail2 = true; 1175 else 1176 is_fail2 = false; 1177 1178 iqk_info->lok_vbuf[ch][path] = tmp; 1179 1180 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1181 "[IQK]S%x, lok_idac[%x][%x] = 0x%x\n", path, ch, path, 1182 iqk_info->lok_idac[ch][path]); 1183 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1184 "[IQK]S%x, lok_vbuf[%x][%x] = 0x%x\n", path, ch, path, 1185 iqk_info->lok_vbuf[ch][path]); 1186 1187 return is_fail1 | is_fail2; 1188 } 1189 1190 static bool _iqk_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) 1191 { 1192 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1193 bool tmp; 1194 1195 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021); 1196 1197 switch (iqk_info->iqk_band[path]) { 1198 case RTW89_BAND_2G: 1199 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); 1200 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6); 1201 break; 1202 case RTW89_BAND_5G: 1203 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); 1204 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x4); 1205 break; 1206 default: 1207 break; 1208 } 1209 1210 switch (iqk_info->iqk_band[path]) { 1211 case RTW89_BAND_2G: 1212 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0); 1213 break; 1214 case RTW89_BAND_5G: 1215 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0); 1216 break; 1217 default: 1218 break; 1219 } 1220 1221 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x9); 1222 tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE); 1223 iqk_info->lok_cor_fail[0][path] = tmp; 1224 1225 switch (iqk_info->iqk_band[path]) { 1226 case RTW89_BAND_2G: 1227 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); 1228 break; 1229 case RTW89_BAND_5G: 1230 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); 1231 break; 1232 default: 1233 break; 1234 } 1235 1236 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x24); 1237 tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER); 1238 1239 switch (iqk_info->iqk_band[path]) { 1240 case RTW89_BAND_2G: 1241 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0); 1242 break; 1243 case RTW89_BAND_5G: 1244 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0); 1245 break; 1246 default: 1247 break; 1248 } 1249 1250 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x9); 1251 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021); 1252 tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE); 1253 iqk_info->lok_fin_fail[0][path] = tmp; 1254 1255 switch (iqk_info->iqk_band[path]) { 1256 case RTW89_BAND_2G: 1257 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); 1258 break; 1259 case RTW89_BAND_5G: 1260 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); 1261 break; 1262 default: 1263 break; 1264 } 1265 1266 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x24); 1267 _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER); 1268 1269 return _lok_finetune_check(rtwdev, path); 1270 } 1271 1272 static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path) 1273 { 1274 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1275 1276 switch (iqk_info->iqk_band[path]) { 1277 case RTW89_BAND_2G: 1278 rtw89_write_rf(rtwdev, path, RR_XALNA2, RR_XALNA2_SW2, 0x00); 1279 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0); 1280 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0); 1281 rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1); 1282 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0); 1283 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1); 1284 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x00); 1285 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e); 1286 udelay(1); 1287 break; 1288 case RTW89_BAND_5G: 1289 rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x00); 1290 rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x1); 1291 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0); 1292 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1); 1293 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x80); 1294 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e); 1295 udelay(1); 1296 break; 1297 default: 1298 break; 1299 } 1300 } 1301 1302 static void _iqk_txclk_setting(struct rtw89_dev *rtwdev, u8 path) 1303 { 1304 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); 1305 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1); 1306 udelay(1); 1307 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f); 1308 udelay(1); 1309 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13); 1310 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001); 1311 udelay(1); 1312 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041); 1313 } 1314 1315 static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) 1316 { 1317 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1318 u32 tmp; 1319 bool flag; 1320 1321 flag = iqk_info->lok_cor_fail[0][path]; 1322 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FCOR << (path * 4), flag); 1323 flag = iqk_info->lok_fin_fail[0][path]; 1324 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FFIN << (path * 4), flag); 1325 flag = iqk_info->iqk_tx_fail[0][path]; 1326 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FTX << (path * 4), flag); 1327 flag = iqk_info->iqk_rx_fail[0][path]; 1328 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_F_RX << (path * 4), flag); 1329 1330 tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD); 1331 iqk_info->bp_iqkenable[path] = tmp; 1332 tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD); 1333 iqk_info->bp_txkresult[path] = tmp; 1334 tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD); 1335 iqk_info->bp_rxkresult[path] = tmp; 1336 1337 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT, iqk_info->iqk_times); 1338 1339 tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, B_IQKINF_FAIL << (path * 4)); 1340 if (tmp) 1341 iqk_info->iqk_fail_cnt++; 1342 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_FCNT << (path * 4), 1343 iqk_info->iqk_fail_cnt); 1344 } 1345 1346 static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) 1347 { 1348 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1349 bool lok_is_fail = false; 1350 const int try = 3; 1351 u8 ibias = 0x1; 1352 u8 i; 1353 1354 _iqk_txclk_setting(rtwdev, path); 1355 1356 /* LOK */ 1357 for (i = 0; i < try; i++) { 1358 _lok_res_table(rtwdev, path, ibias++); 1359 _iqk_txk_setting(rtwdev, path); 1360 lok_is_fail = _iqk_lok(rtwdev, phy_idx, path); 1361 if (!lok_is_fail) 1362 break; 1363 } 1364 1365 if (lok_is_fail) 1366 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] LOK (%d) fail\n", path); 1367 1368 /* TXK */ 1369 if (iqk_info->is_nbiqk) 1370 iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path); 1371 else 1372 iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path); 1373 1374 /* RX */ 1375 _iqk_rxclk_setting(rtwdev, path); 1376 _iqk_rxk_setting(rtwdev, path); 1377 if (iqk_info->is_nbiqk) 1378 iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path); 1379 else 1380 iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path); 1381 1382 _iqk_info_iqk(rtwdev, phy_idx, path); 1383 } 1384 1385 static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path) 1386 { 1387 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1388 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1389 u32 reg_rf18; 1390 u32 reg_35c; 1391 u8 idx; 1392 u8 get_empty_table = false; 1393 1394 for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) { 1395 if (iqk_info->iqk_mcc_ch[idx][path] == 0) { 1396 get_empty_table = true; 1397 break; 1398 } 1399 } 1400 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (1)idx = %x\n", idx); 1401 1402 if (!get_empty_table) { 1403 idx = iqk_info->iqk_table_idx[path] + 1; 1404 if (idx > 1) 1405 idx = 0; 1406 } 1407 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (2)idx = %x\n", idx); 1408 1409 reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK); 1410 reg_35c = rtw89_phy_read32_mask(rtwdev, R_CIRST, B_CIRST_SYN); 1411 1412 iqk_info->iqk_band[path] = chan->band_type; 1413 iqk_info->iqk_bw[path] = chan->band_width; 1414 iqk_info->iqk_ch[path] = chan->channel; 1415 iqk_info->iqk_mcc_ch[idx][path] = chan->channel; 1416 iqk_info->iqk_table_idx[path] = idx; 1417 1418 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x, idx = %x\n", 1419 path, reg_rf18, idx); 1420 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x\n", 1421 path, reg_rf18); 1422 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]times = 0x%x, ch =%x\n", 1423 iqk_info->iqk_times, idx); 1424 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_mcc_ch[%x][%x] = 0x%x\n", 1425 idx, path, iqk_info->iqk_mcc_ch[idx][path]); 1426 1427 if (reg_35c == 0x01) 1428 iqk_info->syn1to2 = 0x1; 1429 else 1430 iqk_info->syn1to2 = 0x0; 1431 1432 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1433 "[IQK]S%x, iqk_info->syn1to2= 0x%x\n", path, 1434 iqk_info->syn1to2); 1435 1436 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852B_IQK_VER); 1437 /* 2GHz/5GHz/6GHz = 0/1/2 */ 1438 rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BAND << (path * 16), 1439 iqk_info->iqk_band[path]); 1440 /* 20/40/80 = 0/1/2 */ 1441 rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BW << (path * 16), 1442 iqk_info->iqk_bw[path]); 1443 rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_CH << (path * 16), 1444 iqk_info->iqk_ch[path]); 1445 } 1446 1447 static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) 1448 { 1449 _iqk_by_path(rtwdev, phy_idx, path); 1450 } 1451 1452 static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path) 1453 { 1454 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1455 bool fail; 1456 1457 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, 1458 iqk_info->nb_txcfir[path]); 1459 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 1460 iqk_info->nb_rxcfir[path]); 1461 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 1462 0x00000e19 + (path << 4)); 1463 fail = _iqk_check_cal(rtwdev, path); 1464 1465 rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s result =%x\n", __func__, fail); 1466 1467 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1468 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000); 1469 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000); 1470 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS, B_IQK_RES_K, 0x0); 1471 rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K1, 0x0); 1472 rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K2, 0x0); 1473 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0); 1474 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0); 1475 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0x3); 1476 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1); 1477 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1); 1478 } 1479 1480 static void _iqk_afebb_restore(struct rtw89_dev *rtwdev, 1481 enum rtw89_phy_idx phy_idx, u8 path) 1482 { 1483 const struct rtw89_reg3_def *def; 1484 int size; 1485 u8 kpath; 1486 int i; 1487 1488 rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__); 1489 1490 kpath = _kpath(rtwdev, phy_idx); 1491 1492 switch (kpath) { 1493 case RF_A: 1494 case RF_B: 1495 return; 1496 default: 1497 size = ARRAY_SIZE(rtw8852b_restore_nondbcc_path01); 1498 def = rtw8852b_restore_nondbcc_path01; 1499 break; 1500 } 1501 1502 for (i = 0; i < size; i++, def++) 1503 rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data); 1504 } 1505 1506 static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path) 1507 { 1508 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1509 u8 idx; 1510 1511 idx = iqk_info->iqk_table_idx[path]; 1512 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (3)idx = %x\n", idx); 1513 1514 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, idx); 1515 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, idx); 1516 1517 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); 1518 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0); 1519 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080); 1520 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a); 1521 1522 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK](1)S%x, 0x8%x54 = 0x%x\n", path, 1 << path, 1523 rtw89_phy_read32_mask(rtwdev, R_CFIR_LUT + (path << 8), MASKDWORD)); 1524 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK](1)S%x, 0x8%x04 = 0x%x\n", path, 1 << path, 1525 rtw89_phy_read32_mask(rtwdev, R_COEF_SEL + (path << 8), MASKDWORD)); 1526 } 1527 1528 static void _iqk_macbb_setting(struct rtw89_dev *rtwdev, 1529 enum rtw89_phy_idx phy_idx, u8 path) 1530 { 1531 const struct rtw89_reg3_def *def; 1532 int size; 1533 u8 kpath; 1534 int i; 1535 1536 kpath = _kpath(rtwdev, phy_idx); 1537 1538 switch (kpath) { 1539 case RF_A: 1540 case RF_B: 1541 return; 1542 default: 1543 size = ARRAY_SIZE(rtw8852b_set_nondbcc_path01); 1544 def = rtw8852b_set_nondbcc_path01; 1545 break; 1546 } 1547 1548 for (i = 0; i < size; i++, def++) 1549 rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data); 1550 } 1551 1552 static void _iqk_init(struct rtw89_dev *rtwdev) 1553 { 1554 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1555 u8 idx, path; 1556 1557 rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0); 1558 if (iqk_info->is_iqk_init) 1559 return; 1560 1561 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1562 iqk_info->is_iqk_init = true; 1563 iqk_info->is_nbiqk = false; 1564 iqk_info->iqk_fft_en = false; 1565 iqk_info->iqk_sram_en = false; 1566 iqk_info->iqk_cfir_en = false; 1567 iqk_info->iqk_xym_en = false; 1568 iqk_info->iqk_times = 0x0; 1569 1570 for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) { 1571 iqk_info->iqk_channel[idx] = 0x0; 1572 for (path = 0; path < RTW8852B_IQK_SS; path++) { 1573 iqk_info->lok_cor_fail[idx][path] = false; 1574 iqk_info->lok_fin_fail[idx][path] = false; 1575 iqk_info->iqk_tx_fail[idx][path] = false; 1576 iqk_info->iqk_rx_fail[idx][path] = false; 1577 iqk_info->iqk_mcc_ch[idx][path] = 0x0; 1578 iqk_info->iqk_table_idx[path] = 0x0; 1579 } 1580 } 1581 } 1582 1583 static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath) 1584 { 1585 u32 rf_mode; 1586 u8 path; 1587 int ret; 1588 1589 for (path = 0; path < RF_PATH_MAX; path++) { 1590 if (!(kpath & BIT(path))) 1591 continue; 1592 1593 ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, 1594 rf_mode != 2, 2, 5000, false, 1595 rtwdev, path, RR_MOD, RR_MOD_MASK); 1596 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1597 "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", path, ret); 1598 } 1599 } 1600 1601 static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx, 1602 bool is_pause) 1603 { 1604 if (!is_pause) 1605 return; 1606 1607 _wait_rx_mode(rtwdev, _kpath(rtwdev, band_idx)); 1608 } 1609 1610 static void _doiqk(struct rtw89_dev *rtwdev, bool force, 1611 enum rtw89_phy_idx phy_idx, u8 path) 1612 { 1613 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1614 u32 backup_bb_val[BACKUP_BB_REGS_NR]; 1615 u32 backup_rf_val[RTW8852B_IQK_SS][BACKUP_RF_REGS_NR]; 1616 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB); 1617 1618 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START); 1619 1620 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1621 "[IQK]==========IQK start!!!!!==========\n"); 1622 iqk_info->iqk_times++; 1623 iqk_info->version = RTW8852B_IQK_VER; 1624 1625 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version); 1626 _iqk_get_ch_info(rtwdev, phy_idx, path); 1627 1628 _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]); 1629 _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path); 1630 _iqk_macbb_setting(rtwdev, phy_idx, path); 1631 _iqk_preset(rtwdev, path); 1632 _iqk_start_iqk(rtwdev, phy_idx, path); 1633 _iqk_restore(rtwdev, path); 1634 _iqk_afebb_restore(rtwdev, phy_idx, path); 1635 _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]); 1636 _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path); 1637 1638 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); 1639 } 1640 1641 static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force) 1642 { 1643 u8 kpath = _kpath(rtwdev, phy_idx); 1644 1645 switch (kpath) { 1646 case RF_A: 1647 _doiqk(rtwdev, force, phy_idx, RF_PATH_A); 1648 break; 1649 case RF_B: 1650 _doiqk(rtwdev, force, phy_idx, RF_PATH_B); 1651 break; 1652 case RF_AB: 1653 _doiqk(rtwdev, force, phy_idx, RF_PATH_A); 1654 _doiqk(rtwdev, force, phy_idx, RF_PATH_B); 1655 break; 1656 default: 1657 break; 1658 } 1659 } 1660 1661 static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 reg[], 1662 u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM], u8 path) 1663 { 1664 u8 i; 1665 1666 for (i = 0; i < RTW8852B_DPK_KIP_REG_NUM; i++) { 1667 reg_bkup[path][i] = 1668 rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD); 1669 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n", 1670 reg[i] + (path << 8), reg_bkup[path][i]); 1671 } 1672 } 1673 1674 static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 reg[], 1675 const u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM], u8 path) 1676 { 1677 u8 i; 1678 1679 for (i = 0; i < RTW8852B_DPK_KIP_REG_NUM; i++) { 1680 rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD, 1681 reg_bkup[path][i]); 1682 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n", 1683 reg[i] + (path << 8), reg_bkup[path][i]); 1684 } 1685 } 1686 1687 static u8 _dpk_order_convert(struct rtw89_dev *rtwdev) 1688 { 1689 u8 order; 1690 u8 val; 1691 1692 order = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP); 1693 val = 0x3 >> order; 1694 1695 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val); 1696 1697 return val; 1698 } 1699 1700 static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool off) 1701 { 1702 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1703 u8 val, kidx = dpk->cur_idx[path]; 1704 1705 val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok; 1706 1707 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), 1708 MASKBYTE3, _dpk_order_convert(rtwdev) << 1 | val); 1709 1710 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path, 1711 kidx, dpk->is_dpk_enable && !off ? "enable" : "disable"); 1712 } 1713 1714 static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1715 enum rtw89_rf_path path, enum rtw8852b_dpk_id id) 1716 { 1717 u16 dpk_cmd; 1718 u32 val; 1719 int ret; 1720 1721 dpk_cmd = (id << 8) | (0x19 + (path << 4)); 1722 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd); 1723 1724 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, 1725 1, 20000, false, 1726 rtwdev, 0xbff8, MASKBYTE0); 1727 if (ret) 1728 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot over 20ms!!!!\n"); 1729 1730 udelay(1); 1731 1732 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00030000); 1733 1734 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000, 1735 1, 2000, false, 1736 rtwdev, 0x80fc, MASKLWORD); 1737 if (ret) 1738 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot over 20ms!!!!\n"); 1739 1740 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0); 1741 1742 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1743 "[DPK] one-shot for %s = 0x%x\n", 1744 id == 0x06 ? "LBK_RXIQK" : 1745 id == 0x10 ? "SYNC" : 1746 id == 0x11 ? "MDPK_IDL" : 1747 id == 0x12 ? "MDPK_MPA" : 1748 id == 0x13 ? "GAIN_LOSS" : 1749 id == 0x14 ? "PWR_CAL" : 1750 id == 0x15 ? "DPK_RXAGC" : 1751 id == 0x16 ? "KIP_PRESET" : 1752 id == 0x17 ? "KIP_RESTORE" : "DPK_TXAGC", 1753 dpk_cmd); 1754 } 1755 1756 static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1757 enum rtw89_rf_path path) 1758 { 1759 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3); 1760 _set_rx_dck(rtwdev, phy, path); 1761 } 1762 1763 static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1764 enum rtw89_rf_path path) 1765 { 1766 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1767 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1768 1769 u8 kidx = dpk->cur_idx[path]; 1770 1771 dpk->bp[path][kidx].band = chan->band_type; 1772 dpk->bp[path][kidx].ch = chan->channel; 1773 dpk->bp[path][kidx].bw = chan->band_width; 1774 1775 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1776 "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n", 1777 path, dpk->cur_idx[path], phy, 1778 rtwdev->is_tssi_mode[path] ? "on" : "off", 1779 rtwdev->dbcc_en ? "on" : "off", 1780 dpk->bp[path][kidx].band == 0 ? "2G" : 1781 dpk->bp[path][kidx].band == 1 ? "5G" : "6G", 1782 dpk->bp[path][kidx].ch, 1783 dpk->bp[path][kidx].bw == 0 ? "20M" : 1784 dpk->bp[path][kidx].bw == 1 ? "40M" : "80M"); 1785 } 1786 1787 static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev, 1788 enum rtw89_phy_idx phy, 1789 enum rtw89_rf_path path, u8 kpath) 1790 { 1791 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1792 1793 rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_defs_tbl); 1794 1795 if (chan->band_width == RTW89_CHANNEL_WIDTH_80) { 1796 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_EX, 0x1); 1797 rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, B_PATH1_BW_SEL_EX, 0x1); 1798 } 1799 1800 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1801 "[DPK] Set BB/AFE for PHY%d (kpath=%d)\n", phy, kpath); 1802 } 1803 1804 static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev, 1805 enum rtw89_phy_idx phy, 1806 enum rtw89_rf_path path, u8 kpath) 1807 { 1808 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1809 1810 rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_restore_defs_tbl); 1811 1812 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1813 "[DPK] Restore BB/AFE for PHY%d (kpath=%d)\n", phy, kpath); 1814 1815 if (chan->band_width == RTW89_CHANNEL_WIDTH_80) { 1816 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_EX, 0x0); 1817 rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, B_PATH1_BW_SEL_EX, 0x0); 1818 } 1819 } 1820 1821 static void _dpk_tssi_pause(struct rtw89_dev *rtwdev, 1822 enum rtw89_rf_path path, bool is_pause) 1823 { 1824 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13), 1825 B_P0_TSSI_TRK_EN, is_pause); 1826 1827 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path, 1828 is_pause ? "pause" : "resume"); 1829 } 1830 1831 static void _dpk_kip_restore(struct rtw89_dev *rtwdev, 1832 enum rtw89_rf_path path) 1833 { 1834 rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_kip_defs_tbl); 1835 1836 if (rtwdev->hal.cv > CHIP_CAV) 1837 rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8), B_DPD_COM_OF, 0x1); 1838 1839 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path); 1840 } 1841 1842 static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1843 enum rtw89_rf_path path) 1844 { 1845 u8 cur_rxbb; 1846 u32 tmp; 1847 1848 cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB); 1849 1850 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1); 1851 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR, 0x0); 1852 1853 tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK); 1854 rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp); 1855 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0xd); 1856 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1); 1857 1858 if (cur_rxbb >= 0x11) 1859 rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x13); 1860 else if (cur_rxbb <= 0xa) 1861 rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x00); 1862 else 1863 rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x05); 1864 1865 rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x0); 1866 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); 1867 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80014); 1868 udelay(70); 1869 1870 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); 1871 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x025); 1872 1873 _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK); 1874 1875 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path, 1876 rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD)); 1877 1878 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); 1879 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0); 1880 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0); 1881 rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0); 1882 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1); 1883 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0x5); 1884 } 1885 1886 static void _dpk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx, enum rtw89_rf_path path) 1887 { 1888 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1889 1890 rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1); 1891 rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x0); 1892 rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1); 1893 1894 udelay(200); 1895 1896 dpk->bp[path][kidx].ther_dpk = rtw89_read_rf(rtwdev, path, RR_TM, RR_TM_VAL); 1897 1898 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n", 1899 dpk->bp[path][kidx].ther_dpk); 1900 } 1901 1902 static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain, 1903 enum rtw89_rf_path path, u8 kidx) 1904 { 1905 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1906 1907 if (dpk->bp[path][kidx].band == RTW89_BAND_2G) { 1908 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220); 1909 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_FATT, 0xf2); 1910 rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1); 1911 rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1); 1912 } else { 1913 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220); 1914 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RAA2_SWATT, 0x5); 1915 rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1); 1916 rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1); 1917 rtw89_write_rf(rtwdev, path, RR_RXA_LNA, RFREG_MASK, 0x920FC); 1918 rtw89_write_rf(rtwdev, path, RR_XALNA2, RFREG_MASK, 0x002C0); 1919 rtw89_write_rf(rtwdev, path, RR_IQGEN, RFREG_MASK, 0x38800); 1920 } 1921 1922 rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1); 1923 rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1); 1924 rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0); 1925 1926 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1927 "[DPK] ARF 0x0/0x11/0x1a = 0x%x/ 0x%x/ 0x%x\n", 1928 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK), 1929 rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK), 1930 rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK)); 1931 } 1932 1933 static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev, 1934 enum rtw89_rf_path path, bool is_bypass) 1935 { 1936 if (is_bypass) { 1937 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), 1938 B_RXIQC_BYPASS2, 0x1); 1939 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), 1940 B_RXIQC_BYPASS, 0x1); 1941 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1942 "[DPK] Bypass RXIQC (0x8%d3c = 0x%x)\n", 1 + path, 1943 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), 1944 MASKDWORD)); 1945 } else { 1946 rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS2); 1947 rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS); 1948 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1949 "[DPK] restore 0x8%d3c = 0x%x\n", 1 + path, 1950 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), 1951 MASKDWORD)); 1952 } 1953 } 1954 1955 static 1956 void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) 1957 { 1958 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1959 1960 if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) 1961 rtw89_phy_write32_clr(rtwdev, R_TPG_MOD, B_TPG_MOD_F); 1962 else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40) 1963 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2); 1964 else 1965 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1); 1966 1967 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n", 1968 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" : 1969 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M"); 1970 } 1971 1972 static void _dpk_table_select(struct rtw89_dev *rtwdev, 1973 enum rtw89_rf_path path, u8 kidx, u8 gain) 1974 { 1975 u8 val; 1976 1977 val = 0x80 + kidx * 0x20 + gain * 0x10; 1978 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val); 1979 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1980 "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx, 1981 gain, val); 1982 } 1983 1984 static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) 1985 { 1986 #define DPK_SYNC_TH_DC_I 200 1987 #define DPK_SYNC_TH_DC_Q 200 1988 #define DPK_SYNC_TH_CORR 170 1989 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1990 u16 dc_i, dc_q; 1991 u8 corr_val, corr_idx; 1992 1993 rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL); 1994 1995 corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI); 1996 corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV); 1997 1998 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1999 "[DPK] S%d Corr_idx / Corr_val = %d / %d\n", 2000 path, corr_idx, corr_val); 2001 2002 dpk->corr_idx[path][kidx] = corr_idx; 2003 dpk->corr_val[path][kidx] = corr_val; 2004 2005 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9); 2006 2007 dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); 2008 dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ); 2009 2010 dc_i = abs(sign_extend32(dc_i, 11)); 2011 dc_q = abs(sign_extend32(dc_q, 11)); 2012 2013 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n", 2014 path, dc_i, dc_q); 2015 2016 dpk->dc_i[path][kidx] = dc_i; 2017 dpk->dc_q[path][kidx] = dc_q; 2018 2019 if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q || 2020 corr_val < DPK_SYNC_TH_CORR) 2021 return true; 2022 else 2023 return false; 2024 } 2025 2026 static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2027 enum rtw89_rf_path path, u8 kidx) 2028 { 2029 _dpk_one_shot(rtwdev, phy, path, SYNC); 2030 2031 return _dpk_sync_check(rtwdev, path, kidx); 2032 } 2033 2034 static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev) 2035 { 2036 u16 dgain; 2037 2038 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0); 2039 2040 dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); 2041 2042 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x\n", dgain); 2043 2044 return dgain; 2045 } 2046 2047 static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain) 2048 { 2049 static const u16 bnd[15] = { 2050 0xbf1, 0xaa5, 0x97d, 0x875, 0x789, 0x6b7, 0x5fc, 0x556, 2051 0x4c1, 0x43d, 0x3c7, 0x35e, 0x2ac, 0x262, 0x220 2052 }; 2053 s8 offset; 2054 2055 if (dgain >= bnd[0]) 2056 offset = 0x6; 2057 else if (bnd[0] > dgain && dgain >= bnd[1]) 2058 offset = 0x6; 2059 else if (bnd[1] > dgain && dgain >= bnd[2]) 2060 offset = 0x5; 2061 else if (bnd[2] > dgain && dgain >= bnd[3]) 2062 offset = 0x4; 2063 else if (bnd[3] > dgain && dgain >= bnd[4]) 2064 offset = 0x3; 2065 else if (bnd[4] > dgain && dgain >= bnd[5]) 2066 offset = 0x2; 2067 else if (bnd[5] > dgain && dgain >= bnd[6]) 2068 offset = 0x1; 2069 else if (bnd[6] > dgain && dgain >= bnd[7]) 2070 offset = 0x0; 2071 else if (bnd[7] > dgain && dgain >= bnd[8]) 2072 offset = 0xff; 2073 else if (bnd[8] > dgain && dgain >= bnd[9]) 2074 offset = 0xfe; 2075 else if (bnd[9] > dgain && dgain >= bnd[10]) 2076 offset = 0xfd; 2077 else if (bnd[10] > dgain && dgain >= bnd[11]) 2078 offset = 0xfc; 2079 else if (bnd[11] > dgain && dgain >= bnd[12]) 2080 offset = 0xfb; 2081 else if (bnd[12] > dgain && dgain >= bnd[13]) 2082 offset = 0xfa; 2083 else if (bnd[13] > dgain && dgain >= bnd[14]) 2084 offset = 0xf9; 2085 else if (bnd[14] > dgain) 2086 offset = 0xf8; 2087 else 2088 offset = 0x0; 2089 2090 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain offset = %d\n", offset); 2091 2092 return offset; 2093 } 2094 2095 static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev) 2096 { 2097 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6); 2098 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1); 2099 2100 return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL); 2101 } 2102 2103 static void _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2104 enum rtw89_rf_path path, u8 kidx) 2105 { 2106 _dpk_table_select(rtwdev, path, kidx, 1); 2107 _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS); 2108 } 2109 2110 static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2111 enum rtw89_rf_path path, u8 kidx) 2112 { 2113 _dpk_tpg_sel(rtwdev, path, kidx); 2114 _dpk_one_shot(rtwdev, phy, path, KIP_PRESET); 2115 } 2116 2117 static void _dpk_kip_pwr_clk_on(struct rtw89_dev *rtwdev, 2118 enum rtw89_rf_path path) 2119 { 2120 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080); 2121 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a); 2122 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08); 2123 2124 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP Power/CLK on\n"); 2125 } 2126 2127 static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2128 enum rtw89_rf_path path, u8 txagc) 2129 { 2130 rtw89_write_rf(rtwdev, path, RR_TXAGC, RFREG_MASK, txagc); 2131 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); 2132 _dpk_one_shot(rtwdev, phy, path, DPK_TXAGC); 2133 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); 2134 2135 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set TXAGC = 0x%x\n", txagc); 2136 } 2137 2138 static void _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2139 enum rtw89_rf_path path) 2140 { 2141 u32 tmp; 2142 2143 tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); 2144 rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, tmp); 2145 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); 2146 _dpk_one_shot(rtwdev, phy, path, DPK_RXAGC); 2147 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); 2148 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL_V1, 0x8); 2149 2150 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2151 "[DPK] set RXBB = 0x%x (RF0x0[9:5] = 0x%x)\n", 2152 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB_V1), 2153 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB)); 2154 } 2155 2156 static u8 _dpk_set_offset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2157 enum rtw89_rf_path path, s8 gain_offset) 2158 { 2159 u8 txagc; 2160 2161 txagc = rtw89_read_rf(rtwdev, path, RR_TXAGC, RFREG_MASK); 2162 2163 if (txagc - gain_offset < DPK_TXAGC_LOWER) 2164 txagc = DPK_TXAGC_LOWER; 2165 else if (txagc - gain_offset > DPK_TXAGC_UPPER) 2166 txagc = DPK_TXAGC_UPPER; 2167 else 2168 txagc = txagc - gain_offset; 2169 2170 _dpk_kip_set_txagc(rtwdev, phy, path, txagc); 2171 2172 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n", 2173 gain_offset, txagc); 2174 return txagc; 2175 } 2176 2177 static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check) 2178 { 2179 u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0; 2180 u8 i; 2181 2182 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06); 2183 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0); 2184 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08); 2185 2186 if (is_check) { 2187 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00); 2188 val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); 2189 val1_i = abs(sign_extend32(val1_i, 11)); 2190 val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); 2191 val1_q = abs(sign_extend32(val1_q, 11)); 2192 2193 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f); 2194 val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); 2195 val2_i = abs(sign_extend32(val2_i, 11)); 2196 val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); 2197 val2_q = abs(sign_extend32(val2_q, 11)); 2198 2199 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n", 2200 phy_div(val1_i * val1_i + val1_q * val1_q, 2201 val2_i * val2_i + val2_q * val2_q)); 2202 } else { 2203 for (i = 0; i < 32; i++) { 2204 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i); 2205 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2206 "[DPK] PAS_Read[%02d]= 0x%08x\n", i, 2207 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD)); 2208 } 2209 } 2210 2211 if (val1_i * val1_i + val1_q * val1_q >= 2212 (val2_i * val2_i + val2_q * val2_q) * 8 / 5) 2213 return true; 2214 2215 return false; 2216 } 2217 2218 static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2219 enum rtw89_rf_path path, u8 kidx, u8 init_txagc, 2220 bool loss_only) 2221 { 2222 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2223 u8 step = DPK_AGC_STEP_SYNC_DGAIN; 2224 u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0; 2225 u8 goout = 0, agc_cnt = 0, limited_rxbb = 0; 2226 u16 dgain = 0; 2227 s8 offset; 2228 int limit = 200; 2229 2230 tmp_txagc = init_txagc; 2231 2232 do { 2233 switch (step) { 2234 case DPK_AGC_STEP_SYNC_DGAIN: 2235 if (_dpk_sync(rtwdev, phy, path, kidx)) { 2236 tmp_txagc = 0xff; 2237 goout = 1; 2238 break; 2239 } 2240 2241 dgain = _dpk_dgain_read(rtwdev); 2242 2243 if (loss_only == 1 || limited_rxbb == 1) 2244 step = DPK_AGC_STEP_GAIN_LOSS_IDX; 2245 else 2246 step = DPK_AGC_STEP_GAIN_ADJ; 2247 break; 2248 2249 case DPK_AGC_STEP_GAIN_ADJ: 2250 tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, 2251 RFREG_MASKRXBB); 2252 offset = _dpk_dgain_mapping(rtwdev, dgain); 2253 2254 if (tmp_rxbb + offset > 0x1f) { 2255 tmp_rxbb = 0x1f; 2256 limited_rxbb = 1; 2257 } else if (tmp_rxbb + offset < 0) { 2258 tmp_rxbb = 0; 2259 limited_rxbb = 1; 2260 } else { 2261 tmp_rxbb = tmp_rxbb + offset; 2262 } 2263 2264 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB, 2265 tmp_rxbb); 2266 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2267 "[DPK] Adjust RXBB (%d) = 0x%x\n", offset, tmp_rxbb); 2268 if (offset || agc_cnt == 0) { 2269 if (chan->band_width < RTW89_CHANNEL_WIDTH_80) 2270 _dpk_bypass_rxcfir(rtwdev, path, true); 2271 else 2272 _dpk_lbk_rxiqk(rtwdev, phy, path); 2273 } 2274 if (dgain > 1922 || dgain < 342) 2275 step = DPK_AGC_STEP_SYNC_DGAIN; 2276 else 2277 step = DPK_AGC_STEP_GAIN_LOSS_IDX; 2278 2279 agc_cnt++; 2280 break; 2281 2282 case DPK_AGC_STEP_GAIN_LOSS_IDX: 2283 _dpk_gainloss(rtwdev, phy, path, kidx); 2284 tmp_gl_idx = _dpk_gainloss_read(rtwdev); 2285 2286 if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) || 2287 tmp_gl_idx >= 7) 2288 step = DPK_AGC_STEP_GL_GT_CRITERION; 2289 else if (tmp_gl_idx == 0) 2290 step = DPK_AGC_STEP_GL_LT_CRITERION; 2291 else 2292 step = DPK_AGC_STEP_SET_TX_GAIN; 2293 break; 2294 2295 case DPK_AGC_STEP_GL_GT_CRITERION: 2296 if (tmp_txagc == 0x2e) { 2297 goout = 1; 2298 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2299 "[DPK] Txagc@lower bound!!\n"); 2300 } else { 2301 tmp_txagc = _dpk_set_offset(rtwdev, phy, path, 0x3); 2302 } 2303 step = DPK_AGC_STEP_GAIN_LOSS_IDX; 2304 agc_cnt++; 2305 break; 2306 2307 case DPK_AGC_STEP_GL_LT_CRITERION: 2308 if (tmp_txagc == 0x3f) { 2309 goout = 1; 2310 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2311 "[DPK] Txagc@upper bound!!\n"); 2312 } else { 2313 tmp_txagc = _dpk_set_offset(rtwdev, phy, path, 0xfe); 2314 } 2315 step = DPK_AGC_STEP_GAIN_LOSS_IDX; 2316 agc_cnt++; 2317 break; 2318 case DPK_AGC_STEP_SET_TX_GAIN: 2319 tmp_txagc = _dpk_set_offset(rtwdev, phy, path, tmp_gl_idx); 2320 goout = 1; 2321 agc_cnt++; 2322 break; 2323 2324 default: 2325 goout = 1; 2326 break; 2327 } 2328 } while (!goout && agc_cnt < 6 && limit-- > 0); 2329 2330 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2331 "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc, 2332 tmp_rxbb); 2333 2334 return tmp_txagc; 2335 } 2336 2337 static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order) 2338 { 2339 switch (order) { 2340 case 0: 2341 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order); 2342 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3); 2343 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1); 2344 break; 2345 case 1: 2346 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order); 2347 rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN); 2348 rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN); 2349 break; 2350 case 2: 2351 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order); 2352 rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN); 2353 rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN); 2354 break; 2355 default: 2356 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2357 "[DPK] Wrong MDPD order!!(0x%x)\n", order); 2358 break; 2359 } 2360 2361 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2362 "[DPK] Set MDPD order to 0x%x for IDL\n", order); 2363 } 2364 2365 static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2366 enum rtw89_rf_path path, u8 kidx, u8 gain) 2367 { 2368 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2369 2370 if (dpk->bp[path][kidx].bw < RTW89_CHANNEL_WIDTH_80 && 2371 dpk->bp[path][kidx].band == RTW89_BAND_5G) 2372 _dpk_set_mdpd_para(rtwdev, 0x2); 2373 else 2374 _dpk_set_mdpd_para(rtwdev, 0x0); 2375 2376 _dpk_one_shot(rtwdev, phy, path, MDPK_IDL); 2377 } 2378 2379 static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2380 enum rtw89_rf_path path, u8 kidx, u8 gain, u8 txagc) 2381 { 2382 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2383 const u16 pwsf = 0x78; 2384 u8 gs = dpk->dpk_gs[phy]; 2385 2386 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), 2387 B_COEF_SEL_MDPD, kidx); 2388 2389 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2390 "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n", txagc, 2391 pwsf, gs); 2392 2393 dpk->bp[path][kidx].txagc_dpk = txagc; 2394 rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8), 2395 0x3F << ((gain << 3) + (kidx << 4)), txagc); 2396 2397 dpk->bp[path][kidx].pwsf = pwsf; 2398 rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2), 2399 0x1FF << (gain << 4), pwsf); 2400 2401 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1); 2402 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0); 2403 2404 dpk->bp[path][kidx].gs = gs; 2405 if (dpk->dpk_gs[phy] == 0x7f) 2406 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), 2407 MASKDWORD, 0x007f7f7f); 2408 else 2409 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), 2410 MASKDWORD, 0x005b5b5b); 2411 2412 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), 2413 B_DPD_ORDER_V1, _dpk_order_convert(rtwdev)); 2414 rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD, 0x0); 2415 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL, 0x0); 2416 } 2417 2418 static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2419 enum rtw89_rf_path path) 2420 { 2421 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2422 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2423 bool is_reload = false; 2424 u8 idx, cur_band, cur_ch; 2425 2426 cur_band = chan->band_type; 2427 cur_ch = chan->channel; 2428 2429 for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) { 2430 if (cur_band != dpk->bp[path][idx].band || 2431 cur_ch != dpk->bp[path][idx].ch) 2432 continue; 2433 2434 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), 2435 B_COEF_SEL_MDPD, idx); 2436 dpk->cur_idx[path] = idx; 2437 is_reload = true; 2438 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2439 "[DPK] reload S%d[%d] success\n", path, idx); 2440 } 2441 2442 return is_reload; 2443 } 2444 2445 static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2446 enum rtw89_rf_path path, u8 gain) 2447 { 2448 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2449 u8 txagc = 0x38, kidx = dpk->cur_idx[path]; 2450 bool is_fail = false; 2451 2452 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2453 "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx); 2454 2455 _rfk_rf_direct_cntrl(rtwdev, path, false); 2456 _rfk_drf_direct_cntrl(rtwdev, path, false); 2457 2458 _dpk_kip_pwr_clk_on(rtwdev, path); 2459 _dpk_kip_set_txagc(rtwdev, phy, path, txagc); 2460 _dpk_rf_setting(rtwdev, gain, path, kidx); 2461 _dpk_rx_dck(rtwdev, phy, path); 2462 2463 _dpk_kip_preset(rtwdev, phy, path, kidx); 2464 _dpk_kip_set_rxagc(rtwdev, phy, path); 2465 _dpk_table_select(rtwdev, path, kidx, gain); 2466 2467 txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false); 2468 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust txagc = 0x%x\n", txagc); 2469 2470 if (txagc == 0xff) { 2471 is_fail = true; 2472 } else { 2473 _dpk_get_thermal(rtwdev, kidx, path); 2474 2475 _dpk_idl_mpa(rtwdev, phy, path, kidx, gain); 2476 2477 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); 2478 2479 _dpk_fill_result(rtwdev, phy, path, kidx, gain, txagc); 2480 } 2481 2482 if (!is_fail) 2483 dpk->bp[path][kidx].path_ok = true; 2484 else 2485 dpk->bp[path][kidx].path_ok = false; 2486 2487 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx, 2488 is_fail ? "Check" : "Success"); 2489 2490 return is_fail; 2491 } 2492 2493 static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, 2494 enum rtw89_phy_idx phy, u8 kpath) 2495 { 2496 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2497 static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120}; 2498 u32 kip_bkup[RTW8852B_DPK_RF_PATH][RTW8852B_DPK_KIP_REG_NUM] = {}; 2499 u32 backup_rf_val[RTW8852B_DPK_RF_PATH][BACKUP_RF_REGS_NR]; 2500 u32 backup_bb_val[BACKUP_BB_REGS_NR]; 2501 bool is_fail = true, reloaded[RTW8852B_DPK_RF_PATH] = {}; 2502 u8 path; 2503 2504 if (dpk->is_dpk_reload_en) { 2505 for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) { 2506 reloaded[path] = _dpk_reload_check(rtwdev, phy, path); 2507 if (!reloaded[path] && dpk->bp[path][0].ch) 2508 dpk->cur_idx[path] = !dpk->cur_idx[path]; 2509 else 2510 _dpk_onoff(rtwdev, path, false); 2511 } 2512 } else { 2513 for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) 2514 dpk->cur_idx[path] = 0; 2515 } 2516 2517 _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]); 2518 2519 for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) { 2520 _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path); 2521 _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path); 2522 _dpk_information(rtwdev, phy, path); 2523 if (rtwdev->is_tssi_mode[path]) 2524 _dpk_tssi_pause(rtwdev, path, true); 2525 } 2526 2527 _dpk_bb_afe_setting(rtwdev, phy, path, kpath); 2528 2529 for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) { 2530 is_fail = _dpk_main(rtwdev, phy, path, 1); 2531 _dpk_onoff(rtwdev, path, is_fail); 2532 } 2533 2534 _dpk_bb_afe_restore(rtwdev, phy, path, kpath); 2535 _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]); 2536 2537 for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) { 2538 _dpk_kip_restore(rtwdev, path); 2539 _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path); 2540 _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path); 2541 if (rtwdev->is_tssi_mode[path]) 2542 _dpk_tssi_pause(rtwdev, path, false); 2543 } 2544 } 2545 2546 static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 2547 { 2548 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2549 struct rtw89_fem_info *fem = &rtwdev->fem; 2550 2551 if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) { 2552 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2553 "[DPK] Skip DPK due to 2G_ext_PA exist!!\n"); 2554 return true; 2555 } else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) { 2556 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2557 "[DPK] Skip DPK due to 5G_ext_PA exist!!\n"); 2558 return true; 2559 } else if (fem->epa_6g && chan->band_type == RTW89_BAND_6G) { 2560 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2561 "[DPK] Skip DPK due to 6G_ext_PA exist!!\n"); 2562 return true; 2563 } 2564 2565 return false; 2566 } 2567 2568 static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 2569 { 2570 u8 path, kpath; 2571 2572 kpath = _kpath(rtwdev, phy); 2573 2574 for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) { 2575 if (kpath & BIT(path)) 2576 _dpk_onoff(rtwdev, path, true); 2577 } 2578 } 2579 2580 static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force) 2581 { 2582 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2583 "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n", 2584 RTW8852B_DPK_VER, rtwdev->hal.cv, 2585 RTW8852B_RF_REL_VERSION); 2586 2587 if (_dpk_bypass_check(rtwdev, phy)) 2588 _dpk_force_bypass(rtwdev, phy); 2589 else 2590 _dpk_cal_select(rtwdev, force, phy, RF_AB); 2591 } 2592 2593 static void _dpk_track(struct rtw89_dev *rtwdev) 2594 { 2595 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2596 s8 txagc_bb, txagc_bb_tp, ini_diff = 0, txagc_ofst; 2597 s8 delta_ther[2] = {}; 2598 u8 trk_idx, txagc_rf; 2599 u8 path, kidx; 2600 u16 pwsf[2]; 2601 u8 cur_ther; 2602 u32 tmp; 2603 2604 for (path = 0; path < RF_PATH_NUM_8852B; path++) { 2605 kidx = dpk->cur_idx[path]; 2606 2607 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2608 "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n", 2609 path, kidx, dpk->bp[path][kidx].ch); 2610 2611 cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); 2612 2613 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2614 "[DPK_TRK] thermal now = %d\n", cur_ther); 2615 2616 if (dpk->bp[path][kidx].ch && cur_ther) 2617 delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther; 2618 2619 if (dpk->bp[path][kidx].band == RTW89_BAND_2G) 2620 delta_ther[path] = delta_ther[path] * 3 / 2; 2621 else 2622 delta_ther[path] = delta_ther[path] * 5 / 2; 2623 2624 txagc_rf = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), 2625 0x0000003f); 2626 2627 if (rtwdev->is_tssi_mode[path]) { 2628 trk_idx = rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK); 2629 2630 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2631 "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n", 2632 txagc_rf, trk_idx); 2633 2634 txagc_bb = 2635 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), 2636 MASKBYTE2); 2637 txagc_bb_tp = 2638 rtw89_phy_read32_mask(rtwdev, R_TXAGC_TP + (path << 13), 2639 B_TXAGC_TP); 2640 2641 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2642 "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n", 2643 txagc_bb_tp, txagc_bb); 2644 2645 txagc_ofst = 2646 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), 2647 MASKBYTE3); 2648 2649 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2650 "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n", 2651 txagc_ofst, delta_ther[path]); 2652 tmp = rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8), 2653 B_DPD_COM_OF); 2654 if (tmp == 0x1) { 2655 txagc_ofst = 0; 2656 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2657 "[DPK_TRK] HW txagc offset mode\n"); 2658 } 2659 2660 if (txagc_rf && cur_ther) 2661 ini_diff = txagc_ofst + (delta_ther[path]); 2662 2663 tmp = rtw89_phy_read32_mask(rtwdev, 2664 R_P0_TXDPD + (path << 13), 2665 B_P0_TXDPD); 2666 if (tmp == 0x0) { 2667 pwsf[0] = dpk->bp[path][kidx].pwsf + 2668 txagc_bb_tp - txagc_bb + ini_diff; 2669 pwsf[1] = dpk->bp[path][kidx].pwsf + 2670 txagc_bb_tp - txagc_bb + ini_diff; 2671 } else { 2672 pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff; 2673 pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff; 2674 } 2675 2676 } else { 2677 pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff; 2678 pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff; 2679 } 2680 2681 tmp = rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS); 2682 if (!tmp && txagc_rf) { 2683 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2684 "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n", 2685 pwsf[0], pwsf[1]); 2686 2687 rtw89_phy_write32_mask(rtwdev, 2688 R_DPD_BND + (path << 8) + (kidx << 2), 2689 B_DPD_BND_0, pwsf[0]); 2690 rtw89_phy_write32_mask(rtwdev, 2691 R_DPD_BND + (path << 8) + (kidx << 2), 2692 B_DPD_BND_1, pwsf[1]); 2693 } 2694 } 2695 } 2696 2697 static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 2698 { 2699 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2700 u8 tx_scale, ofdm_bkof, path, kpath; 2701 2702 kpath = _kpath(rtwdev, phy); 2703 2704 ofdm_bkof = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_OFDM); 2705 tx_scale = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_SCA); 2706 2707 if (ofdm_bkof + tx_scale >= 44) { 2708 /* move dpd backoff to bb, and set dpd backoff to 0 */ 2709 dpk->dpk_gs[phy] = 0x7f; 2710 for (path = 0; path < RF_PATH_NUM_8852B; path++) { 2711 if (!(kpath & BIT(path))) 2712 continue; 2713 2714 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8), 2715 B_DPD_CFG, 0x7f7f7f); 2716 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2717 "[RFK] Set S%d DPD backoff to 0dB\n", path); 2718 } 2719 } else { 2720 dpk->dpk_gs[phy] = 0x5b; 2721 } 2722 } 2723 2724 static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2725 enum rtw89_rf_path path) 2726 { 2727 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2728 enum rtw89_band band = chan->band_type; 2729 2730 if (band == RTW89_BAND_2G) 2731 rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1); 2732 else 2733 rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1); 2734 } 2735 2736 static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2737 enum rtw89_rf_path path) 2738 { 2739 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2740 enum rtw89_band band = chan->band_type; 2741 2742 rtw89_rfk_parser(rtwdev, &rtw8852b_tssi_sys_defs_tbl); 2743 2744 if (path == RF_PATH_A) 2745 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, 2746 &rtw8852b_tssi_sys_a_defs_2g_tbl, 2747 &rtw8852b_tssi_sys_a_defs_5g_tbl); 2748 else 2749 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, 2750 &rtw8852b_tssi_sys_b_defs_2g_tbl, 2751 &rtw8852b_tssi_sys_b_defs_5g_tbl); 2752 } 2753 2754 static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, 2755 enum rtw89_phy_idx phy, 2756 enum rtw89_rf_path path) 2757 { 2758 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, 2759 &rtw8852b_tssi_init_txpwr_defs_a_tbl, 2760 &rtw8852b_tssi_init_txpwr_defs_b_tbl); 2761 } 2762 2763 static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev, 2764 enum rtw89_phy_idx phy, 2765 enum rtw89_rf_path path) 2766 { 2767 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, 2768 &rtw8852b_tssi_init_txpwr_he_tb_defs_a_tbl, 2769 &rtw8852b_tssi_init_txpwr_he_tb_defs_b_tbl); 2770 } 2771 2772 static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2773 enum rtw89_rf_path path) 2774 { 2775 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, 2776 &rtw8852b_tssi_dck_defs_a_tbl, 2777 &rtw8852b_tssi_dck_defs_b_tbl); 2778 } 2779 2780 static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2781 enum rtw89_rf_path path) 2782 { 2783 #define RTW8852B_TSSI_GET_VAL(ptr, idx) \ 2784 ({ \ 2785 s8 *__ptr = (ptr); \ 2786 u8 __idx = (idx), __i, __v; \ 2787 u32 __val = 0; \ 2788 for (__i = 0; __i < 4; __i++) { \ 2789 __v = (__ptr[__idx + __i]); \ 2790 __val |= (__v << (8 * __i)); \ 2791 } \ 2792 __val; \ 2793 }) 2794 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 2795 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2796 u8 ch = chan->channel; 2797 u8 subband = chan->subband_type; 2798 const s8 *thm_up_a = NULL; 2799 const s8 *thm_down_a = NULL; 2800 const s8 *thm_up_b = NULL; 2801 const s8 *thm_down_b = NULL; 2802 u8 thermal = 0xff; 2803 s8 thm_ofst[64] = {0}; 2804 u32 tmp = 0; 2805 u8 i, j; 2806 2807 switch (subband) { 2808 default: 2809 case RTW89_CH_2G: 2810 thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_2ga_p; 2811 thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_2ga_n; 2812 thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_2gb_p; 2813 thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_2gb_n; 2814 break; 2815 case RTW89_CH_5G_BAND_1: 2816 thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[0]; 2817 thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[0]; 2818 thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[0]; 2819 thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[0]; 2820 break; 2821 case RTW89_CH_5G_BAND_3: 2822 thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[1]; 2823 thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[1]; 2824 thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[1]; 2825 thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[1]; 2826 break; 2827 case RTW89_CH_5G_BAND_4: 2828 thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[2]; 2829 thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[2]; 2830 thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[2]; 2831 thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[2]; 2832 break; 2833 } 2834 2835 if (path == RF_PATH_A) { 2836 thermal = tssi_info->thermal[RF_PATH_A]; 2837 2838 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2839 "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal); 2840 2841 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0); 2842 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1); 2843 2844 if (thermal == 0xff) { 2845 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32); 2846 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32); 2847 2848 for (i = 0; i < 64; i += 4) { 2849 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0); 2850 2851 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2852 "[TSSI] write 0x%x val=0x%08x\n", 2853 R_P0_TSSI_BASE + i, 0x0); 2854 } 2855 2856 } else { 2857 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal); 2858 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 2859 thermal); 2860 2861 i = 0; 2862 for (j = 0; j < 32; j++) 2863 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 2864 -thm_down_a[i++] : 2865 -thm_down_a[DELTA_SWINGIDX_SIZE - 1]; 2866 2867 i = 1; 2868 for (j = 63; j >= 32; j--) 2869 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 2870 thm_up_a[i++] : 2871 thm_up_a[DELTA_SWINGIDX_SIZE - 1]; 2872 2873 for (i = 0; i < 64; i += 4) { 2874 tmp = RTW8852B_TSSI_GET_VAL(thm_ofst, i); 2875 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp); 2876 2877 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2878 "[TSSI] write 0x%x val=0x%08x\n", 2879 0x5c00 + i, tmp); 2880 } 2881 } 2882 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1); 2883 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0); 2884 2885 } else { 2886 thermal = tssi_info->thermal[RF_PATH_B]; 2887 2888 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2889 "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal); 2890 2891 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0); 2892 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1); 2893 2894 if (thermal == 0xff) { 2895 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32); 2896 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32); 2897 2898 for (i = 0; i < 64; i += 4) { 2899 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0); 2900 2901 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2902 "[TSSI] write 0x%x val=0x%08x\n", 2903 0x7c00 + i, 0x0); 2904 } 2905 2906 } else { 2907 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal); 2908 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 2909 thermal); 2910 2911 i = 0; 2912 for (j = 0; j < 32; j++) 2913 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 2914 -thm_down_b[i++] : 2915 -thm_down_b[DELTA_SWINGIDX_SIZE - 1]; 2916 2917 i = 1; 2918 for (j = 63; j >= 32; j--) 2919 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 2920 thm_up_b[i++] : 2921 thm_up_b[DELTA_SWINGIDX_SIZE - 1]; 2922 2923 for (i = 0; i < 64; i += 4) { 2924 tmp = RTW8852B_TSSI_GET_VAL(thm_ofst, i); 2925 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp); 2926 2927 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2928 "[TSSI] write 0x%x val=0x%08x\n", 2929 0x7c00 + i, tmp); 2930 } 2931 } 2932 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1); 2933 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0); 2934 } 2935 #undef RTW8852B_TSSI_GET_VAL 2936 } 2937 2938 static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2939 enum rtw89_rf_path path) 2940 { 2941 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, 2942 &rtw8852b_tssi_dac_gain_defs_a_tbl, 2943 &rtw8852b_tssi_dac_gain_defs_b_tbl); 2944 } 2945 2946 static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2947 enum rtw89_rf_path path) 2948 { 2949 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2950 enum rtw89_band band = chan->band_type; 2951 2952 if (path == RF_PATH_A) 2953 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, 2954 &rtw8852b_tssi_slope_a_defs_2g_tbl, 2955 &rtw8852b_tssi_slope_a_defs_5g_tbl); 2956 else 2957 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, 2958 &rtw8852b_tssi_slope_b_defs_2g_tbl, 2959 &rtw8852b_tssi_slope_b_defs_5g_tbl); 2960 } 2961 2962 static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2963 enum rtw89_rf_path path, bool all) 2964 { 2965 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2966 enum rtw89_band band = chan->band_type; 2967 const struct rtw89_rfk_tbl *tbl = NULL; 2968 u8 ch = chan->channel; 2969 2970 if (path == RF_PATH_A) { 2971 if (band == RTW89_BAND_2G) { 2972 if (all) 2973 tbl = &rtw8852b_tssi_align_a_2g_all_defs_tbl; 2974 else 2975 tbl = &rtw8852b_tssi_align_a_2g_part_defs_tbl; 2976 } else if (ch >= 36 && ch <= 64) { 2977 if (all) 2978 tbl = &rtw8852b_tssi_align_a_5g1_all_defs_tbl; 2979 else 2980 tbl = &rtw8852b_tssi_align_a_5g1_part_defs_tbl; 2981 } else if (ch >= 100 && ch <= 144) { 2982 if (all) 2983 tbl = &rtw8852b_tssi_align_a_5g2_all_defs_tbl; 2984 else 2985 tbl = &rtw8852b_tssi_align_a_5g2_part_defs_tbl; 2986 } else if (ch >= 149 && ch <= 177) { 2987 if (all) 2988 tbl = &rtw8852b_tssi_align_a_5g3_all_defs_tbl; 2989 else 2990 tbl = &rtw8852b_tssi_align_a_5g3_part_defs_tbl; 2991 } 2992 } else { 2993 if (ch >= 1 && ch <= 14) { 2994 if (all) 2995 tbl = &rtw8852b_tssi_align_b_2g_all_defs_tbl; 2996 else 2997 tbl = &rtw8852b_tssi_align_b_2g_part_defs_tbl; 2998 } else if (ch >= 36 && ch <= 64) { 2999 if (all) 3000 tbl = &rtw8852b_tssi_align_b_5g1_all_defs_tbl; 3001 else 3002 tbl = &rtw8852b_tssi_align_b_5g1_part_defs_tbl; 3003 } else if (ch >= 100 && ch <= 144) { 3004 if (all) 3005 tbl = &rtw8852b_tssi_align_b_5g2_all_defs_tbl; 3006 else 3007 tbl = &rtw8852b_tssi_align_b_5g2_part_defs_tbl; 3008 } else if (ch >= 149 && ch <= 177) { 3009 if (all) 3010 tbl = &rtw8852b_tssi_align_b_5g3_all_defs_tbl; 3011 else 3012 tbl = &rtw8852b_tssi_align_b_5g3_part_defs_tbl; 3013 } 3014 } 3015 3016 if (tbl) 3017 rtw89_rfk_parser(rtwdev, tbl); 3018 } 3019 3020 static void _tssi_set_tssi_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3021 enum rtw89_rf_path path) 3022 { 3023 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, 3024 &rtw8852b_tssi_slope_defs_a_tbl, 3025 &rtw8852b_tssi_slope_defs_b_tbl); 3026 } 3027 3028 static void _tssi_set_tssi_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3029 enum rtw89_rf_path path) 3030 { 3031 if (path == RF_PATH_A) 3032 rtw89_phy_write32_mask(rtwdev, R_P0_TSSIC, B_P0_TSSIC_BYPASS, 0x0); 3033 else 3034 rtw89_phy_write32_mask(rtwdev, R_P1_TSSIC, B_P1_TSSIC_BYPASS, 0x0); 3035 } 3036 3037 static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev, 3038 enum rtw89_phy_idx phy, 3039 enum rtw89_rf_path path) 3040 { 3041 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "======>%s path=%d\n", __func__, 3042 path); 3043 3044 if (path == RF_PATH_A) 3045 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_MIX, 0x010); 3046 else 3047 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_RFCTM_DEL, 0x010); 3048 } 3049 3050 static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 3051 { 3052 u8 i; 3053 3054 for (i = 0; i < RF_PATH_NUM_8852B; i++) { 3055 _tssi_set_tssi_track(rtwdev, phy, i); 3056 _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i); 3057 3058 if (i == RF_PATH_A) { 3059 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, 3060 B_P0_TSSI_MV_CLR, 0x0); 3061 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, 3062 B_P0_TSSI_EN, 0x0); 3063 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, 3064 B_P0_TSSI_EN, 0x1); 3065 rtw89_write_rf(rtwdev, i, RR_TXGA_V1, 3066 RR_TXGA_V1_TRK_EN, 0x1); 3067 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, 3068 B_P0_TSSI_RFC, 0x3); 3069 3070 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, 3071 B_P0_TSSI_OFT, 0xc0); 3072 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, 3073 B_P0_TSSI_OFT_EN, 0x0); 3074 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, 3075 B_P0_TSSI_OFT_EN, 0x1); 3076 3077 rtwdev->is_tssi_mode[RF_PATH_A] = true; 3078 } else { 3079 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, 3080 B_P1_TSSI_MV_CLR, 0x0); 3081 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, 3082 B_P1_TSSI_EN, 0x0); 3083 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, 3084 B_P1_TSSI_EN, 0x1); 3085 rtw89_write_rf(rtwdev, i, RR_TXGA_V1, 3086 RR_TXGA_V1_TRK_EN, 0x1); 3087 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, 3088 B_P1_TSSI_RFC, 0x3); 3089 3090 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, 3091 B_P1_TSSI_OFT, 0xc0); 3092 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, 3093 B_P1_TSSI_OFT_EN, 0x0); 3094 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, 3095 B_P1_TSSI_OFT_EN, 0x1); 3096 3097 rtwdev->is_tssi_mode[RF_PATH_B] = true; 3098 } 3099 } 3100 } 3101 3102 static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 3103 { 3104 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x0); 3105 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_RFC, 0x1); 3106 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, 0x1); 3107 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_EN, 0x0); 3108 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_RFC, 0x1); 3109 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_CLR, 0x1); 3110 3111 rtwdev->is_tssi_mode[RF_PATH_A] = false; 3112 rtwdev->is_tssi_mode[RF_PATH_B] = false; 3113 } 3114 3115 static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch) 3116 { 3117 switch (ch) { 3118 case 1 ... 2: 3119 return 0; 3120 case 3 ... 5: 3121 return 1; 3122 case 6 ... 8: 3123 return 2; 3124 case 9 ... 11: 3125 return 3; 3126 case 12 ... 13: 3127 return 4; 3128 case 14: 3129 return 5; 3130 } 3131 3132 return 0; 3133 } 3134 3135 #define TSSI_EXTRA_GROUP_BIT (BIT(31)) 3136 #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx)) 3137 #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT) 3138 #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT) 3139 #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1) 3140 3141 static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch) 3142 { 3143 switch (ch) { 3144 case 1 ... 2: 3145 return 0; 3146 case 3 ... 5: 3147 return 1; 3148 case 6 ... 8: 3149 return 2; 3150 case 9 ... 11: 3151 return 3; 3152 case 12 ... 14: 3153 return 4; 3154 case 36 ... 40: 3155 return 5; 3156 case 41 ... 43: 3157 return TSSI_EXTRA_GROUP(5); 3158 case 44 ... 48: 3159 return 6; 3160 case 49 ... 51: 3161 return TSSI_EXTRA_GROUP(6); 3162 case 52 ... 56: 3163 return 7; 3164 case 57 ... 59: 3165 return TSSI_EXTRA_GROUP(7); 3166 case 60 ... 64: 3167 return 8; 3168 case 100 ... 104: 3169 return 9; 3170 case 105 ... 107: 3171 return TSSI_EXTRA_GROUP(9); 3172 case 108 ... 112: 3173 return 10; 3174 case 113 ... 115: 3175 return TSSI_EXTRA_GROUP(10); 3176 case 116 ... 120: 3177 return 11; 3178 case 121 ... 123: 3179 return TSSI_EXTRA_GROUP(11); 3180 case 124 ... 128: 3181 return 12; 3182 case 129 ... 131: 3183 return TSSI_EXTRA_GROUP(12); 3184 case 132 ... 136: 3185 return 13; 3186 case 137 ... 139: 3187 return TSSI_EXTRA_GROUP(13); 3188 case 140 ... 144: 3189 return 14; 3190 case 149 ... 153: 3191 return 15; 3192 case 154 ... 156: 3193 return TSSI_EXTRA_GROUP(15); 3194 case 157 ... 161: 3195 return 16; 3196 case 162 ... 164: 3197 return TSSI_EXTRA_GROUP(16); 3198 case 165 ... 169: 3199 return 17; 3200 case 170 ... 172: 3201 return TSSI_EXTRA_GROUP(17); 3202 case 173 ... 177: 3203 return 18; 3204 } 3205 3206 return 0; 3207 } 3208 3209 static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch) 3210 { 3211 switch (ch) { 3212 case 1 ... 8: 3213 return 0; 3214 case 9 ... 14: 3215 return 1; 3216 case 36 ... 48: 3217 return 2; 3218 case 52 ... 64: 3219 return 3; 3220 case 100 ... 112: 3221 return 4; 3222 case 116 ... 128: 3223 return 5; 3224 case 132 ... 144: 3225 return 6; 3226 case 149 ... 177: 3227 return 7; 3228 } 3229 3230 return 0; 3231 } 3232 3233 static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3234 enum rtw89_rf_path path) 3235 { 3236 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3237 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 3238 u8 ch = chan->channel; 3239 u32 gidx, gidx_1st, gidx_2nd; 3240 s8 de_1st; 3241 s8 de_2nd; 3242 s8 val; 3243 3244 gidx = _tssi_get_ofdm_group(rtwdev, ch); 3245 3246 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3247 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", path, gidx); 3248 3249 if (IS_TSSI_EXTRA_GROUP(gidx)) { 3250 gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx); 3251 gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx); 3252 de_1st = tssi_info->tssi_mcs[path][gidx_1st]; 3253 de_2nd = tssi_info->tssi_mcs[path][gidx_2nd]; 3254 val = (de_1st + de_2nd) / 2; 3255 3256 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3257 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", 3258 path, val, de_1st, de_2nd); 3259 } else { 3260 val = tssi_info->tssi_mcs[path][gidx]; 3261 3262 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3263 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); 3264 } 3265 3266 return val; 3267 } 3268 3269 static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3270 enum rtw89_rf_path path) 3271 { 3272 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3273 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 3274 u8 ch = chan->channel; 3275 u32 tgidx, tgidx_1st, tgidx_2nd; 3276 s8 tde_1st; 3277 s8 tde_2nd; 3278 s8 val; 3279 3280 tgidx = _tssi_get_trim_group(rtwdev, ch); 3281 3282 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3283 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", 3284 path, tgidx); 3285 3286 if (IS_TSSI_EXTRA_GROUP(tgidx)) { 3287 tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx); 3288 tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx); 3289 tde_1st = tssi_info->tssi_trim[path][tgidx_1st]; 3290 tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd]; 3291 val = (tde_1st + tde_2nd) / 2; 3292 3293 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3294 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", 3295 path, val, tde_1st, tde_2nd); 3296 } else { 3297 val = tssi_info->tssi_trim[path][tgidx]; 3298 3299 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3300 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", 3301 path, val); 3302 } 3303 3304 return val; 3305 } 3306 3307 static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 3308 { 3309 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3310 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 3311 u8 ch = chan->channel; 3312 u8 gidx; 3313 s8 ofdm_de; 3314 s8 trim_de; 3315 s32 val; 3316 u32 i; 3317 3318 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n", 3319 phy, ch); 3320 3321 for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) { 3322 gidx = _tssi_get_cck_group(rtwdev, ch); 3323 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); 3324 val = tssi_info->tssi_cck[i][gidx] + trim_de; 3325 3326 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3327 "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n", 3328 i, gidx, tssi_info->tssi_cck[i][gidx], trim_de); 3329 3330 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val); 3331 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val); 3332 3333 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3334 "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n", 3335 _tssi_de_cck_long[i], 3336 rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i], 3337 _TSSI_DE_MASK)); 3338 3339 ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i); 3340 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); 3341 val = ofdm_de + trim_de; 3342 3343 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3344 "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n", 3345 i, ofdm_de, trim_de); 3346 3347 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val); 3348 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val); 3349 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val); 3350 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, val); 3351 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val); 3352 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val); 3353 3354 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3355 "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n", 3356 _tssi_de_mcs_20m[i], 3357 rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i], 3358 _TSSI_DE_MASK)); 3359 } 3360 } 3361 3362 static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 3363 { 3364 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3365 "[TSSI PA K]\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n" 3366 "0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n", 3367 R_TSSI_PA_K1 + (path << 13), 3368 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K1 + (path << 13), MASKDWORD), 3369 R_TSSI_PA_K2 + (path << 13), 3370 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K2 + (path << 13), MASKDWORD), 3371 R_P0_TSSI_ALIM1 + (path << 13), 3372 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD), 3373 R_P0_TSSI_ALIM3 + (path << 13), 3374 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD), 3375 R_TSSI_PA_K5 + (path << 13), 3376 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K5 + (path << 13), MASKDWORD), 3377 R_P0_TSSI_ALIM2 + (path << 13), 3378 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD), 3379 R_P0_TSSI_ALIM4 + (path << 13), 3380 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD), 3381 R_TSSI_PA_K8 + (path << 13), 3382 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K8 + (path << 13), MASKDWORD)); 3383 } 3384 3385 static void _tssi_alimentk_done(struct rtw89_dev *rtwdev, 3386 enum rtw89_phy_idx phy, enum rtw89_rf_path path) 3387 { 3388 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3389 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 3390 u8 channel = chan->channel; 3391 u8 band; 3392 3393 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3394 "======>%s phy=%d path=%d\n", __func__, phy, path); 3395 3396 if (channel >= 1 && channel <= 14) 3397 band = TSSI_ALIMK_2G; 3398 else if (channel >= 36 && channel <= 64) 3399 band = TSSI_ALIMK_5GL; 3400 else if (channel >= 100 && channel <= 144) 3401 band = TSSI_ALIMK_5GM; 3402 else if (channel >= 149 && channel <= 177) 3403 band = TSSI_ALIMK_5GH; 3404 else 3405 band = TSSI_ALIMK_2G; 3406 3407 if (tssi_info->alignment_done[path][band]) { 3408 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD, 3409 tssi_info->alignment_value[path][band][0]); 3410 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD, 3411 tssi_info->alignment_value[path][band][1]); 3412 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD, 3413 tssi_info->alignment_value[path][band][2]); 3414 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD, 3415 tssi_info->alignment_value[path][band][3]); 3416 } 3417 3418 _tssi_alimentk_dump_result(rtwdev, path); 3419 } 3420 3421 static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3422 enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm, 3423 u8 enable) 3424 { 3425 enum rtw89_rf_path_bit rx_path; 3426 3427 if (path == RF_PATH_A) 3428 rx_path = RF_A; 3429 else if (path == RF_PATH_B) 3430 rx_path = RF_B; 3431 else if (path == RF_PATH_AB) 3432 rx_path = RF_AB; 3433 else 3434 rx_path = RF_ABCD; /* don't change path, but still set others */ 3435 3436 if (enable) { 3437 rtw8852bx_bb_set_plcp_tx(rtwdev); 3438 rtw8852bx_bb_cfg_tx_path(rtwdev, path); 3439 rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path); 3440 rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy); 3441 } 3442 3443 rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy); 3444 } 3445 3446 static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev, 3447 enum rtw89_phy_idx phy, const u32 reg[], 3448 u32 reg_backup[], u32 reg_num) 3449 { 3450 u32 i; 3451 3452 for (i = 0; i < reg_num; i++) { 3453 reg_backup[i] = rtw89_phy_read32_mask(rtwdev, reg[i], MASKDWORD); 3454 3455 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3456 "[TSSI] Backup BB 0x%x = 0x%x\n", reg[i], 3457 reg_backup[i]); 3458 } 3459 } 3460 3461 static void _tssi_reload_bb_registers(struct rtw89_dev *rtwdev, 3462 enum rtw89_phy_idx phy, const u32 reg[], 3463 u32 reg_backup[], u32 reg_num) 3464 3465 { 3466 u32 i; 3467 3468 for (i = 0; i < reg_num; i++) { 3469 rtw89_phy_write32_mask(rtwdev, reg[i], MASKDWORD, reg_backup[i]); 3470 3471 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3472 "[TSSI] Reload BB 0x%x = 0x%x\n", reg[i], 3473 reg_backup[i]); 3474 } 3475 } 3476 3477 static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel) 3478 { 3479 u8 channel_index; 3480 3481 if (channel >= 1 && channel <= 14) 3482 channel_index = channel - 1; 3483 else if (channel >= 36 && channel <= 64) 3484 channel_index = (channel - 36) / 2 + 14; 3485 else if (channel >= 100 && channel <= 144) 3486 channel_index = ((channel - 100) / 2) + 15 + 14; 3487 else if (channel >= 149 && channel <= 177) 3488 channel_index = ((channel - 149) / 2) + 38 + 14; 3489 else 3490 channel_index = 0; 3491 3492 return channel_index; 3493 } 3494 3495 static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3496 enum rtw89_rf_path path, const s16 *power, 3497 u32 *tssi_cw_rpt) 3498 { 3499 u32 tx_counter, tx_counter_tmp; 3500 const int retry = 100; 3501 u32 tmp; 3502 int j, k; 3503 3504 for (j = 0; j < RTW8852B_TSSI_PATH_NR; j++) { 3505 rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x0); 3506 rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x1); 3507 3508 tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD); 3509 3510 tmp = rtw89_phy_read32_mask(rtwdev, _tssi_trigger[path], MASKDWORD); 3511 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3512 "[TSSI PA K] 0x%x = 0x%08x path=%d\n", 3513 _tssi_trigger[path], tmp, path); 3514 3515 if (j == 0) 3516 _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true); 3517 else 3518 _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true); 3519 3520 tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD); 3521 tx_counter_tmp -= tx_counter; 3522 3523 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3524 "[TSSI PA K] First HWTXcounter=%d path=%d\n", 3525 tx_counter_tmp, path); 3526 3527 for (k = 0; k < retry; k++) { 3528 tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path], 3529 B_TSSI_CWRPT_RDY); 3530 if (tmp) 3531 break; 3532 3533 udelay(30); 3534 3535 tx_counter_tmp = 3536 rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD); 3537 tx_counter_tmp -= tx_counter; 3538 3539 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3540 "[TSSI PA K] Flow k = %d HWTXcounter=%d path=%d\n", 3541 k, tx_counter_tmp, path); 3542 } 3543 3544 if (k >= retry) { 3545 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3546 "[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n", 3547 k, path); 3548 3549 _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false); 3550 return false; 3551 } 3552 3553 tssi_cw_rpt[j] = 3554 rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path], B_TSSI_CWRPT); 3555 3556 _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false); 3557 3558 tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD); 3559 tx_counter_tmp -= tx_counter; 3560 3561 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3562 "[TSSI PA K] Final HWTXcounter=%d path=%d\n", 3563 tx_counter_tmp, path); 3564 } 3565 3566 return true; 3567 } 3568 3569 static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3570 enum rtw89_rf_path path) 3571 { 3572 static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4, 3573 0x78e4, 0x49c0, 0x0d18, 0x0d80}; 3574 static const s16 power_2g[4] = {48, 20, 4, 4}; 3575 static const s16 power_5g[4] = {48, 20, 4, 4}; 3576 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3577 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 3578 s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3; 3579 u32 tssi_cw_rpt[RTW8852B_TSSI_PATH_NR] = {0}; 3580 u8 channel = chan->channel; 3581 u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel); 3582 struct rtw8852bx_bb_tssi_bak tssi_bak; 3583 s32 aliment_diff, tssi_cw_default; 3584 u32 start_time, finish_time; 3585 u32 bb_reg_backup[8] = {0}; 3586 const s16 *power; 3587 u8 band; 3588 bool ok; 3589 u32 tmp; 3590 u8 j; 3591 3592 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3593 "======> %s channel=%d path=%d\n", __func__, channel, 3594 path); 3595 3596 if (tssi_info->check_backup_aligmk[path][ch_idx]) { 3597 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD, 3598 tssi_info->alignment_backup_by_ch[path][ch_idx][0]); 3599 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD, 3600 tssi_info->alignment_backup_by_ch[path][ch_idx][1]); 3601 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD, 3602 tssi_info->alignment_backup_by_ch[path][ch_idx][2]); 3603 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD, 3604 tssi_info->alignment_backup_by_ch[path][ch_idx][3]); 3605 3606 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3607 "======> %s Reload TSSI Alignment !!!\n", __func__); 3608 _tssi_alimentk_dump_result(rtwdev, path); 3609 return; 3610 } 3611 3612 start_time = ktime_get_ns(); 3613 3614 if (chan->band_type == RTW89_BAND_2G) 3615 power = power_2g; 3616 else 3617 power = power_5g; 3618 3619 if (channel >= 1 && channel <= 14) 3620 band = TSSI_ALIMK_2G; 3621 else if (channel >= 36 && channel <= 64) 3622 band = TSSI_ALIMK_5GL; 3623 else if (channel >= 100 && channel <= 144) 3624 band = TSSI_ALIMK_5GM; 3625 else if (channel >= 149 && channel <= 177) 3626 band = TSSI_ALIMK_5GH; 3627 else 3628 band = TSSI_ALIMK_2G; 3629 3630 rtw8852bx_bb_backup_tssi(rtwdev, phy, &tssi_bak); 3631 _tssi_backup_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup)); 3632 3633 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x8); 3634 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x8); 3635 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2); 3636 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2); 3637 3638 ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt); 3639 if (!ok) 3640 goto out; 3641 3642 for (j = 0; j < RTW8852B_TSSI_PATH_NR; j++) { 3643 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3644 "[TSSI PA K] power[%d]=%d tssi_cw_rpt[%d]=%d\n", j, 3645 power[j], j, tssi_cw_rpt[j]); 3646 } 3647 3648 tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][1], 3649 _tssi_cw_default_mask[1]); 3650 tssi_cw_default = sign_extend32(tmp, 8); 3651 tssi_alim_offset_1 = tssi_cw_rpt[0] - ((power[0] - power[1]) * 2) - 3652 tssi_cw_rpt[1] + tssi_cw_default; 3653 aliment_diff = tssi_alim_offset_1 - tssi_cw_default; 3654 3655 tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][2], 3656 _tssi_cw_default_mask[2]); 3657 tssi_cw_default = sign_extend32(tmp, 8); 3658 tssi_alim_offset_2 = tssi_cw_default + aliment_diff; 3659 3660 tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][3], 3661 _tssi_cw_default_mask[3]); 3662 tssi_cw_default = sign_extend32(tmp, 8); 3663 tssi_alim_offset_3 = tssi_cw_default + aliment_diff; 3664 3665 if (path == RF_PATH_A) { 3666 tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) | 3667 FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) | 3668 FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3); 3669 3670 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM1, tmp); 3671 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2, B_P0_TSSI_ALIM2, tmp); 3672 3673 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3674 "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n", 3675 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3, B_P0_TSSI_ALIM31), 3676 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM11), 3677 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM12), 3678 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM13)); 3679 } else { 3680 tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) | 3681 FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) | 3682 FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3); 3683 3684 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM1, tmp); 3685 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM2, B_P1_TSSI_ALIM2, tmp); 3686 3687 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3688 "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n", 3689 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM3, B_P1_TSSI_ALIM31), 3690 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM11), 3691 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM12), 3692 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM13)); 3693 } 3694 3695 tssi_info->alignment_done[path][band] = true; 3696 tssi_info->alignment_value[path][band][0] = 3697 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD); 3698 tssi_info->alignment_value[path][band][1] = 3699 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD); 3700 tssi_info->alignment_value[path][band][2] = 3701 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD); 3702 tssi_info->alignment_value[path][band][3] = 3703 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD); 3704 3705 tssi_info->check_backup_aligmk[path][ch_idx] = true; 3706 tssi_info->alignment_backup_by_ch[path][ch_idx][0] = 3707 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD); 3708 tssi_info->alignment_backup_by_ch[path][ch_idx][1] = 3709 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD); 3710 tssi_info->alignment_backup_by_ch[path][ch_idx][2] = 3711 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD); 3712 tssi_info->alignment_backup_by_ch[path][ch_idx][3] = 3713 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD); 3714 3715 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3716 "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][0], 0x%x = 0x%08x\n", 3717 path, band, R_P0_TSSI_ALIM1 + (path << 13), 3718 tssi_info->alignment_value[path][band][0]); 3719 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3720 "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][1], 0x%x = 0x%08x\n", 3721 path, band, R_P0_TSSI_ALIM3 + (path << 13), 3722 tssi_info->alignment_value[path][band][1]); 3723 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3724 "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][2], 0x%x = 0x%08x\n", 3725 path, band, R_P0_TSSI_ALIM2 + (path << 13), 3726 tssi_info->alignment_value[path][band][2]); 3727 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3728 "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][3], 0x%x = 0x%08x\n", 3729 path, band, R_P0_TSSI_ALIM4 + (path << 13), 3730 tssi_info->alignment_value[path][band][3]); 3731 3732 out: 3733 _tssi_reload_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup)); 3734 rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak); 3735 rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0); 3736 3737 finish_time = ktime_get_ns(); 3738 tssi_info->tssi_alimk_time += finish_time - start_time; 3739 3740 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3741 "[TSSI PA K] %s processing time = %d ms\n", __func__, 3742 tssi_info->tssi_alimk_time); 3743 } 3744 3745 void rtw8852b_dpk_init(struct rtw89_dev *rtwdev) 3746 { 3747 _set_dpd_backoff(rtwdev, RTW89_PHY_0); 3748 } 3749 3750 void rtw8852b_rck(struct rtw89_dev *rtwdev) 3751 { 3752 u8 path; 3753 3754 for (path = 0; path < RF_PATH_NUM_8852B; path++) 3755 _rck(rtwdev, path); 3756 } 3757 3758 void rtw8852b_dack(struct rtw89_dev *rtwdev) 3759 { 3760 u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0); 3761 3762 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START); 3763 _dac_cal(rtwdev, false); 3764 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP); 3765 } 3766 3767 void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 3768 { 3769 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); 3770 u32 tx_en; 3771 3772 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START); 3773 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); 3774 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); 3775 3776 _iqk_init(rtwdev); 3777 _iqk(rtwdev, phy_idx, false); 3778 3779 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); 3780 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP); 3781 } 3782 3783 void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 3784 { 3785 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); 3786 u32 tx_en; 3787 3788 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START); 3789 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); 3790 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); 3791 3792 _rx_dck(rtwdev, phy_idx); 3793 3794 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); 3795 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP); 3796 } 3797 3798 void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 3799 { 3800 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); 3801 u32 tx_en; 3802 3803 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START); 3804 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); 3805 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); 3806 3807 rtwdev->dpk.is_dpk_enable = true; 3808 rtwdev->dpk.is_dpk_reload_en = false; 3809 _dpk(rtwdev, phy_idx, false); 3810 3811 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); 3812 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP); 3813 } 3814 3815 void rtw8852b_dpk_track(struct rtw89_dev *rtwdev) 3816 { 3817 _dpk_track(rtwdev); 3818 } 3819 3820 void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en) 3821 { 3822 u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB); 3823 u32 tx_en; 3824 u8 i; 3825 3826 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy); 3827 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START); 3828 3829 _tssi_disable(rtwdev, phy); 3830 3831 for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) { 3832 _tssi_rf_setting(rtwdev, phy, i); 3833 _tssi_set_sys(rtwdev, phy, i); 3834 _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i); 3835 _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i); 3836 _tssi_set_dck(rtwdev, phy, i); 3837 _tssi_set_tmeter_tbl(rtwdev, phy, i); 3838 _tssi_set_dac_gain_tbl(rtwdev, phy, i); 3839 _tssi_slope_cal_org(rtwdev, phy, i); 3840 _tssi_alignment_default(rtwdev, phy, i, true); 3841 _tssi_set_tssi_slope(rtwdev, phy, i); 3842 3843 rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL); 3844 _tmac_tx_pause(rtwdev, phy, true); 3845 if (hwtx_en) 3846 _tssi_alimentk(rtwdev, phy, i); 3847 _tmac_tx_pause(rtwdev, phy, false); 3848 rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en); 3849 } 3850 3851 _tssi_enable(rtwdev, phy); 3852 _tssi_set_efuse_to_de(rtwdev, phy); 3853 3854 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); 3855 } 3856 3857 void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 3858 { 3859 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 3860 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3861 u8 channel = chan->channel; 3862 u8 band; 3863 u32 i; 3864 3865 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3866 "======>%s phy=%d channel=%d\n", __func__, phy, channel); 3867 3868 if (channel >= 1 && channel <= 14) 3869 band = TSSI_ALIMK_2G; 3870 else if (channel >= 36 && channel <= 64) 3871 band = TSSI_ALIMK_5GL; 3872 else if (channel >= 100 && channel <= 144) 3873 band = TSSI_ALIMK_5GM; 3874 else if (channel >= 149 && channel <= 177) 3875 band = TSSI_ALIMK_5GH; 3876 else 3877 band = TSSI_ALIMK_2G; 3878 3879 _tssi_disable(rtwdev, phy); 3880 3881 for (i = RF_PATH_A; i < RTW8852B_TSSI_PATH_NR; i++) { 3882 _tssi_rf_setting(rtwdev, phy, i); 3883 _tssi_set_sys(rtwdev, phy, i); 3884 _tssi_set_tmeter_tbl(rtwdev, phy, i); 3885 3886 if (tssi_info->alignment_done[i][band]) 3887 _tssi_alimentk_done(rtwdev, phy, i); 3888 else 3889 _tssi_alignment_default(rtwdev, phy, i, true); 3890 } 3891 3892 _tssi_enable(rtwdev, phy); 3893 _tssi_set_efuse_to_de(rtwdev, phy); 3894 } 3895 3896 static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev, 3897 enum rtw89_phy_idx phy, bool enable) 3898 { 3899 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 3900 u8 channel = chan->channel; 3901 3902 rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n", 3903 __func__, channel); 3904 3905 if (enable) { 3906 if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B]) 3907 rtw8852b_tssi(rtwdev, phy, true); 3908 return; 3909 } 3910 3911 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3912 "======>%s 1 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n", 3913 __func__, 3914 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT), 3915 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT)); 3916 3917 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 0xc0); 3918 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT, 0xc0); 3919 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); 3920 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1); 3921 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0); 3922 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1); 3923 3924 _tssi_alimentk_done(rtwdev, phy, RF_PATH_A); 3925 _tssi_alimentk_done(rtwdev, phy, RF_PATH_B); 3926 3927 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3928 "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n", 3929 __func__, 3930 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT), 3931 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT)); 3932 3933 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3934 "======> %s SCAN_END\n", __func__); 3935 } 3936 3937 void rtw8852b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, 3938 enum rtw89_phy_idx phy_idx) 3939 { 3940 if (scan_start) 3941 rtw8852b_tssi_default_txagc(rtwdev, phy_idx, true); 3942 else 3943 rtw8852b_tssi_default_txagc(rtwdev, phy_idx, false); 3944 } 3945 3946 static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 3947 enum rtw89_bandwidth bw, bool dav) 3948 { 3949 u32 rf_reg18; 3950 u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1; 3951 3952 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__); 3953 3954 rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK); 3955 if (rf_reg18 == INV_RF_DATA) { 3956 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3957 "[RFK]Invalid RF_0x18 for Path-%d\n", path); 3958 return; 3959 } 3960 rf_reg18 &= ~RR_CFGCH_BW; 3961 3962 switch (bw) { 3963 case RTW89_CHANNEL_WIDTH_5: 3964 case RTW89_CHANNEL_WIDTH_10: 3965 case RTW89_CHANNEL_WIDTH_20: 3966 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M); 3967 break; 3968 case RTW89_CHANNEL_WIDTH_40: 3969 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M); 3970 break; 3971 case RTW89_CHANNEL_WIDTH_80: 3972 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M); 3973 break; 3974 default: 3975 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]Fail to set CH\n"); 3976 } 3977 3978 rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN | 3979 RR_CFGCH_BW2) & RFREG_MASK; 3980 rf_reg18 |= RR_CFGCH_BW2; 3981 rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18); 3982 3983 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set %x at path%d, %x =0x%x\n", 3984 bw, path, reg18_addr, 3985 rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK)); 3986 } 3987 3988 static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3989 enum rtw89_bandwidth bw) 3990 { 3991 _bw_setting(rtwdev, RF_PATH_A, bw, true); 3992 _bw_setting(rtwdev, RF_PATH_B, bw, true); 3993 _bw_setting(rtwdev, RF_PATH_A, bw, false); 3994 _bw_setting(rtwdev, RF_PATH_B, bw, false); 3995 } 3996 3997 static bool _set_s0_arfc18(struct rtw89_dev *rtwdev, u32 val) 3998 { 3999 u32 bak; 4000 u32 tmp; 4001 int ret; 4002 4003 bak = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK); 4004 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RR_LDO_SEL, 0x1); 4005 rtw89_write_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK, val); 4006 4007 ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp == 0, 1, 1000, 4008 false, rtwdev, RF_PATH_A, RR_LPF, RR_LPF_BUSY); 4009 if (ret) 4010 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]LCK timeout\n"); 4011 4012 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK, bak); 4013 4014 return !!ret; 4015 } 4016 4017 static void _lck_check(struct rtw89_dev *rtwdev) 4018 { 4019 u32 tmp; 4020 4021 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) { 4022 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN MMD reset\n"); 4023 4024 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x1); 4025 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x0); 4026 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x1); 4027 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x0); 4028 } 4029 4030 udelay(10); 4031 4032 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) { 4033 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]re-set RF 0x18\n"); 4034 4035 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1); 4036 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 4037 _set_s0_arfc18(rtwdev, tmp); 4038 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0); 4039 } 4040 4041 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) { 4042 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN off/on\n"); 4043 4044 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK); 4045 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK, tmp); 4046 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK); 4047 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK, tmp); 4048 4049 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x1); 4050 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0); 4051 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3); 4052 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x0); 4053 4054 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1); 4055 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 4056 _set_s0_arfc18(rtwdev, tmp); 4057 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0); 4058 4059 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]0xb2=%x, 0xc5=%x\n", 4060 rtw89_read_rf(rtwdev, RF_PATH_A, RR_VCO, RFREG_MASK), 4061 rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RFREG_MASK)); 4062 } 4063 } 4064 4065 static void _set_ch(struct rtw89_dev *rtwdev, u32 val) 4066 { 4067 bool timeout; 4068 4069 timeout = _set_s0_arfc18(rtwdev, val); 4070 if (!timeout) 4071 _lck_check(rtwdev); 4072 } 4073 4074 static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 4075 u8 central_ch, bool dav) 4076 { 4077 u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1; 4078 bool is_2g_ch = central_ch <= 14; 4079 u32 rf_reg18; 4080 4081 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__); 4082 4083 rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK); 4084 rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | 4085 RR_CFGCH_BCN | RR_CFGCH_BAND0 | RR_CFGCH_CH); 4086 rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch); 4087 4088 if (!is_2g_ch) 4089 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G) | 4090 FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G); 4091 4092 rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN | 4093 RR_CFGCH_BW2) & RFREG_MASK; 4094 rf_reg18 |= RR_CFGCH_BW2; 4095 4096 if (path == RF_PATH_A && dav) 4097 _set_ch(rtwdev, rf_reg18); 4098 else 4099 rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18); 4100 4101 rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 0); 4102 rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 1); 4103 4104 rtw89_debug(rtwdev, RTW89_DBG_RFK, 4105 "[RFK]CH: %d for Path-%d, reg0x%x = 0x%x\n", 4106 central_ch, path, reg18_addr, 4107 rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK)); 4108 } 4109 4110 static void _ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch) 4111 { 4112 _ch_setting(rtwdev, RF_PATH_A, central_ch, true); 4113 _ch_setting(rtwdev, RF_PATH_B, central_ch, true); 4114 _ch_setting(rtwdev, RF_PATH_A, central_ch, false); 4115 _ch_setting(rtwdev, RF_PATH_B, central_ch, false); 4116 } 4117 4118 static void _set_rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw, 4119 enum rtw89_rf_path path) 4120 { 4121 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1); 4122 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0x12); 4123 4124 if (bw == RTW89_CHANNEL_WIDTH_20) 4125 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x1b); 4126 else if (bw == RTW89_CHANNEL_WIDTH_40) 4127 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x13); 4128 else if (bw == RTW89_CHANNEL_WIDTH_80) 4129 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0xb); 4130 else 4131 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x3); 4132 4133 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set S%d RXBB BW 0x3F = 0x%x\n", path, 4134 rtw89_read_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB)); 4135 4136 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0); 4137 } 4138 4139 static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 4140 enum rtw89_bandwidth bw) 4141 { 4142 u8 kpath, path; 4143 4144 kpath = _kpath(rtwdev, phy); 4145 4146 for (path = 0; path < RF_PATH_NUM_8852B; path++) { 4147 if (!(kpath & BIT(path))) 4148 continue; 4149 4150 _set_rxbb_bw(rtwdev, bw, path); 4151 } 4152 } 4153 4154 static void rtw8852b_ctrl_bw_ch(struct rtw89_dev *rtwdev, 4155 enum rtw89_phy_idx phy, u8 central_ch, 4156 enum rtw89_band band, enum rtw89_bandwidth bw) 4157 { 4158 _ctrl_ch(rtwdev, central_ch); 4159 _ctrl_bw(rtwdev, phy, bw); 4160 _rxbb_bw(rtwdev, phy, bw); 4161 } 4162 4163 void rtw8852b_set_channel_rf(struct rtw89_dev *rtwdev, 4164 const struct rtw89_chan *chan, 4165 enum rtw89_phy_idx phy_idx) 4166 { 4167 rtw8852b_ctrl_bw_ch(rtwdev, phy_idx, chan->channel, chan->band_type, 4168 chan->band_width); 4169 } 4170