1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2022-2023 Realtek Corporation 3 */ 4 5 #include "coex.h" 6 #include "debug.h" 7 #include "mac.h" 8 #include "phy.h" 9 #include "reg.h" 10 #include "rtw8851b.h" 11 #include "rtw8851b_rfk.h" 12 #include "rtw8851b_rfk_table.h" 13 #include "rtw8851b_table.h" 14 15 #define DPK_VER_8851B 0x5 16 #define DPK_KIP_REG_NUM_8851B 7 17 #define DPK_RF_REG_NUM_8851B 4 18 #define DPK_KSET_NUM 4 19 #define RTW8851B_RXK_GROUP_NR 4 20 #define RTW8851B_RXK_GROUP_IDX_NR 2 21 #define RTW8851B_TXK_GROUP_NR 1 22 #define RTW8851B_IQK_VER 0x2a 23 #define RTW8851B_IQK_SS 1 24 #define RTW8851B_LOK_GRAM 10 25 #define RTW8851B_TSSI_PATH_NR 1 26 27 #define _TSSI_DE_MASK GENMASK(21, 12) 28 29 enum dpk_id { 30 LBK_RXIQK = 0x06, 31 SYNC = 0x10, 32 MDPK_IDL = 0x11, 33 MDPK_MPA = 0x12, 34 GAIN_LOSS = 0x13, 35 GAIN_CAL = 0x14, 36 DPK_RXAGC = 0x15, 37 KIP_PRESET = 0x16, 38 KIP_RESTORE = 0x17, 39 DPK_TXAGC = 0x19, 40 D_KIP_PRESET = 0x28, 41 D_TXAGC = 0x29, 42 D_RXAGC = 0x2a, 43 D_SYNC = 0x2b, 44 D_GAIN_LOSS = 0x2c, 45 D_MDPK_IDL = 0x2d, 46 D_MDPK_LDL = 0x2e, 47 D_GAIN_NORM = 0x2f, 48 D_KIP_THERMAL = 0x30, 49 D_KIP_RESTORE = 0x31 50 }; 51 52 enum dpk_agc_step { 53 DPK_AGC_STEP_SYNC_DGAIN, 54 DPK_AGC_STEP_GAIN_LOSS_IDX, 55 DPK_AGC_STEP_GL_GT_CRITERION, 56 DPK_AGC_STEP_GL_LT_CRITERION, 57 DPK_AGC_STEP_SET_TX_GAIN, 58 }; 59 60 enum rtw8851b_iqk_type { 61 ID_TXAGC = 0x0, 62 ID_FLOK_COARSE = 0x1, 63 ID_FLOK_FINE = 0x2, 64 ID_TXK = 0x3, 65 ID_RXAGC = 0x4, 66 ID_RXK = 0x5, 67 ID_NBTXK = 0x6, 68 ID_NBRXK = 0x7, 69 ID_FLOK_VBUFFER = 0x8, 70 ID_A_FLOK_COARSE = 0x9, 71 ID_G_FLOK_COARSE = 0xa, 72 ID_A_FLOK_FINE = 0xb, 73 ID_G_FLOK_FINE = 0xc, 74 ID_IQK_RESTORE = 0x10, 75 }; 76 77 enum rf_mode { 78 RF_SHUT_DOWN = 0x0, 79 RF_STANDBY = 0x1, 80 RF_TX = 0x2, 81 RF_RX = 0x3, 82 RF_TXIQK = 0x4, 83 RF_DPK = 0x5, 84 RF_RXK1 = 0x6, 85 RF_RXK2 = 0x7, 86 }; 87 88 static const u32 _tssi_de_cck_long[RF_PATH_NUM_8851B] = {0x5858}; 89 static const u32 _tssi_de_cck_short[RF_PATH_NUM_8851B] = {0x5860}; 90 static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8851B] = {0x5838}; 91 static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8851B] = {0x5840}; 92 static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8851B] = {0x5848}; 93 static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8851B] = {0x5850}; 94 static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8851B] = {0x5828}; 95 static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8851B] = {0x5830}; 96 static const u32 g_idxrxgain[RTW8851B_RXK_GROUP_NR] = {0x10e, 0x116, 0x28e, 0x296}; 97 static const u32 g_idxattc2[RTW8851B_RXK_GROUP_NR] = {0x0, 0xf, 0x0, 0xf}; 98 static const u32 g_idxrxagc[RTW8851B_RXK_GROUP_NR] = {0x0, 0x1, 0x2, 0x3}; 99 static const u32 a_idxrxgain[RTW8851B_RXK_GROUP_IDX_NR] = {0x10C, 0x28c}; 100 static const u32 a_idxattc2[RTW8851B_RXK_GROUP_IDX_NR] = {0xf, 0xf}; 101 static const u32 a_idxrxagc[RTW8851B_RXK_GROUP_IDX_NR] = {0x4, 0x6}; 102 static const u32 a_power_range[RTW8851B_TXK_GROUP_NR] = {0x0}; 103 static const u32 a_track_range[RTW8851B_TXK_GROUP_NR] = {0x6}; 104 static const u32 a_gain_bb[RTW8851B_TXK_GROUP_NR] = {0x0a}; 105 static const u32 a_itqt[RTW8851B_TXK_GROUP_NR] = {0x12}; 106 static const u32 g_power_range[RTW8851B_TXK_GROUP_NR] = {0x0}; 107 static const u32 g_track_range[RTW8851B_TXK_GROUP_NR] = {0x6}; 108 static const u32 g_gain_bb[RTW8851B_TXK_GROUP_NR] = {0x10}; 109 static const u32 g_itqt[RTW8851B_TXK_GROUP_NR] = {0x12}; 110 111 static const u32 rtw8851b_backup_bb_regs[] = {0xc0d4, 0xc0d8, 0xc0c4, 0xc0ec, 0xc0e8}; 112 static const u32 rtw8851b_backup_rf_regs[] = { 113 0xef, 0xde, 0x0, 0x1e, 0x2, 0x85, 0x90, 0x5}; 114 115 #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8851b_backup_bb_regs) 116 #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8851b_backup_rf_regs) 117 118 static const u32 dpk_kip_reg[DPK_KIP_REG_NUM_8851B] = { 119 0x813c, 0x8124, 0xc0ec, 0xc0e8, 0xc0c4, 0xc0d4, 0xc0d8}; 120 static const u32 dpk_rf_reg[DPK_RF_REG_NUM_8851B] = {0xde, 0x8f, 0x5, 0x10005}; 121 122 static void _set_ch(struct rtw89_dev *rtwdev, u32 val); 123 124 static u8 _rxk_5ghz_group_from_idx(u8 idx) 125 { 126 /* There are four RXK groups (RTW8851B_RXK_GROUP_NR), but only group 0 127 * and 2 are used in 5 GHz band, so reduce elements to 2. 128 */ 129 if (idx < RTW8851B_RXK_GROUP_IDX_NR) 130 return idx * 2; 131 132 return 0; 133 } 134 135 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 136 { 137 return RF_A; 138 } 139 140 static void _adc_fifo_rst(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 141 u8 path) 142 { 143 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0101); 144 fsleep(10); 145 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x1111); 146 } 147 148 static void _rfk_rf_direct_cntrl(struct rtw89_dev *rtwdev, 149 enum rtw89_rf_path path, bool is_bybb) 150 { 151 if (is_bybb) 152 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1); 153 else 154 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); 155 } 156 157 static void _rfk_drf_direct_cntrl(struct rtw89_dev *rtwdev, 158 enum rtw89_rf_path path, bool is_bybb) 159 { 160 if (is_bybb) 161 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1); 162 else 163 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0); 164 } 165 166 static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath) 167 { 168 u32 rf_mode; 169 u8 path; 170 int ret; 171 172 for (path = 0; path < RF_PATH_MAX; path++) { 173 if (!(kpath & BIT(path))) 174 continue; 175 176 ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, 177 rf_mode != 2, 2, 5000, false, 178 rtwdev, path, 0x00, RR_MOD_MASK); 179 rtw89_debug(rtwdev, RTW89_DBG_RFK, 180 "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", 181 path, ret); 182 } 183 } 184 185 static void _dack_reset(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 186 { 187 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x0); 188 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x1); 189 } 190 191 static void _drck(struct rtw89_dev *rtwdev) 192 { 193 u32 rck_d; 194 u32 val; 195 int ret; 196 197 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]Ddie RCK start!!!\n"); 198 199 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x1); 200 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1); 201 202 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 203 1, 10000, false, 204 rtwdev, R_DRCK_RES, B_DRCK_POL); 205 if (ret) 206 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n"); 207 208 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x0); 209 rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x1); 210 udelay(1); 211 rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x0); 212 213 rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, 0x7c00); 214 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0); 215 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, rck_d); 216 217 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n", 218 rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD)); 219 } 220 221 static void _addck_backup(struct rtw89_dev *rtwdev) 222 { 223 struct rtw89_dack_info *dack = &rtwdev->dack; 224 225 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0); 226 227 dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0); 228 dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1); 229 } 230 231 static void _addck_reload(struct rtw89_dev *rtwdev) 232 { 233 struct rtw89_dack_info *dack = &rtwdev->dack; 234 235 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1, dack->addck_d[0][0]); 236 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0, dack->addck_d[0][1]); 237 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3); 238 } 239 240 static void _dack_backup_s0(struct rtw89_dev *rtwdev) 241 { 242 struct rtw89_dack_info *dack = &rtwdev->dack; 243 u8 i; 244 245 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); 246 247 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { 248 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i); 249 dack->msbk_d[0][0][i] = 250 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0); 251 252 rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i); 253 dack->msbk_d[0][1][i] = 254 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1); 255 } 256 257 dack->biask_d[0][0] = 258 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00); 259 dack->biask_d[0][1] = 260 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01); 261 dack->dadck_d[0][0] = 262 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00) + 24; 263 dack->dadck_d[0][1] = 264 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01) + 24; 265 } 266 267 static void _dack_reload_by_path(struct rtw89_dev *rtwdev, 268 enum rtw89_rf_path path, u8 index) 269 { 270 struct rtw89_dack_info *dack = &rtwdev->dack; 271 u32 idx_offset, path_offset; 272 u32 offset, reg; 273 u32 tmp; 274 u8 i; 275 276 if (index == 0) 277 idx_offset = 0; 278 else 279 idx_offset = 0x14; 280 281 if (path == RF_PATH_A) 282 path_offset = 0; 283 else 284 path_offset = 0x28; 285 286 offset = idx_offset + path_offset; 287 288 rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_RST, 0x1); 289 rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_RST, 0x1); 290 291 /* msbk_d: 15/14/13/12 */ 292 tmp = 0x0; 293 for (i = 0; i < 4; i++) 294 tmp |= dack->msbk_d[path][index][i + 12] << (i * 8); 295 reg = 0xc200 + offset; 296 rtw89_phy_write32(rtwdev, reg, tmp); 297 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg, 298 rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD)); 299 300 /* msbk_d: 11/10/9/8 */ 301 tmp = 0x0; 302 for (i = 0; i < 4; i++) 303 tmp |= dack->msbk_d[path][index][i + 8] << (i * 8); 304 reg = 0xc204 + offset; 305 rtw89_phy_write32(rtwdev, reg, tmp); 306 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg, 307 rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD)); 308 309 /* msbk_d: 7/6/5/4 */ 310 tmp = 0x0; 311 for (i = 0; i < 4; i++) 312 tmp |= dack->msbk_d[path][index][i + 4] << (i * 8); 313 reg = 0xc208 + offset; 314 rtw89_phy_write32(rtwdev, reg, tmp); 315 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg, 316 rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD)); 317 318 /* msbk_d: 3/2/1/0 */ 319 tmp = 0x0; 320 for (i = 0; i < 4; i++) 321 tmp |= dack->msbk_d[path][index][i] << (i * 8); 322 reg = 0xc20c + offset; 323 rtw89_phy_write32(rtwdev, reg, tmp); 324 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg, 325 rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD)); 326 327 /* dadak_d/biask_d */ 328 tmp = 0x0; 329 tmp = (dack->biask_d[path][index] << 22) | 330 (dack->dadck_d[path][index] << 14); 331 reg = 0xc210 + offset; 332 rtw89_phy_write32(rtwdev, reg, tmp); 333 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg, 334 rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD)); 335 336 rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL + offset, B_DACKN0_EN, 0x1); 337 } 338 339 static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 340 { 341 u8 index; 342 343 for (index = 0; index < 2; index++) 344 _dack_reload_by_path(rtwdev, path, index); 345 } 346 347 static void _addck(struct rtw89_dev *rtwdev) 348 { 349 struct rtw89_dack_info *dack = &rtwdev->dack; 350 u32 val; 351 int ret; 352 353 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x1); 354 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x1); 355 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x0); 356 udelay(1); 357 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1); 358 359 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 360 1, 10000, false, 361 rtwdev, R_ADDCKR0, BIT(0)); 362 if (ret) { 363 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n"); 364 dack->addck_timeout[0] = true; 365 } 366 367 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret); 368 369 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x0); 370 } 371 372 static void _new_dadck(struct rtw89_dev *rtwdev) 373 { 374 struct rtw89_dack_info *dack = &rtwdev->dack; 375 u32 i_dc, q_dc, ic, qc; 376 u32 val; 377 int ret; 378 379 rtw89_rfk_parser(rtwdev, &rtw8851b_dadck_setup_defs_tbl); 380 381 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 382 1, 10000, false, 383 rtwdev, R_ADDCKR0, BIT(0)); 384 if (ret) { 385 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DADCK timeout\n"); 386 dack->addck_timeout[0] = true; 387 } 388 389 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DADCK ret = %d\n", ret); 390 391 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_IQ, 0x0); 392 i_dc = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_DC); 393 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_IQ, 0x1); 394 q_dc = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_DC); 395 396 ic = 0x80 - sign_extend32(i_dc, 11) * 6; 397 qc = 0x80 - sign_extend32(q_dc, 11) * 6; 398 399 rtw89_debug(rtwdev, RTW89_DBG_RFK, 400 "[DACK]before DADCK, i_dc=0x%x, q_dc=0x%x\n", i_dc, q_dc); 401 402 dack->dadck_d[0][0] = ic; 403 dack->dadck_d[0][1] = qc; 404 405 rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL, B_DACKN0_V, dack->dadck_d[0][0]); 406 rtw89_phy_write32_mask(rtwdev, R_DACKN1_CTL, B_DACKN1_V, dack->dadck_d[0][1]); 407 rtw89_debug(rtwdev, RTW89_DBG_RFK, 408 "[DACK]after DADCK, 0xc210=0x%x, 0xc224=0x%x\n", 409 rtw89_phy_read32_mask(rtwdev, R_DACKN0_CTL, MASKDWORD), 410 rtw89_phy_read32_mask(rtwdev, R_DACKN1_CTL, MASKDWORD)); 411 412 rtw89_rfk_parser(rtwdev, &rtw8851b_dadck_post_defs_tbl); 413 } 414 415 static bool _dack_s0_poll(struct rtw89_dev *rtwdev) 416 { 417 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 || 418 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 || 419 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 || 420 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0) 421 return false; 422 423 return true; 424 } 425 426 static void _dack_s0(struct rtw89_dev *rtwdev) 427 { 428 struct rtw89_dack_info *dack = &rtwdev->dack; 429 bool done; 430 int ret; 431 432 rtw89_rfk_parser(rtwdev, &rtw8851b_dack_s0_1_defs_tbl); 433 _dack_reset(rtwdev, RF_PATH_A); 434 rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1); 435 436 ret = read_poll_timeout_atomic(_dack_s0_poll, done, done, 437 1, 10000, false, rtwdev); 438 if (ret) { 439 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n"); 440 dack->msbk_timeout[0] = true; 441 } 442 443 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret); 444 445 rtw89_rfk_parser(rtwdev, &rtw8851b_dack_s0_2_defs_tbl); 446 447 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n"); 448 449 _dack_backup_s0(rtwdev); 450 _dack_reload(rtwdev, RF_PATH_A); 451 452 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0); 453 } 454 455 static void _dack(struct rtw89_dev *rtwdev) 456 { 457 _dack_s0(rtwdev); 458 } 459 460 static void _dack_dump(struct rtw89_dev *rtwdev) 461 { 462 struct rtw89_dack_info *dack = &rtwdev->dack; 463 u8 i; 464 u8 t; 465 466 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n", 467 dack->addck_d[0][0], dack->addck_d[0][1]); 468 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n", 469 dack->dadck_d[0][0], dack->dadck_d[0][1]); 470 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n", 471 dack->biask_d[0][0], dack->biask_d[0][1]); 472 473 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n"); 474 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { 475 t = dack->msbk_d[0][0][i]; 476 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); 477 } 478 479 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n"); 480 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { 481 t = dack->msbk_d[0][1][i]; 482 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); 483 } 484 } 485 486 static void _dack_manual_off(struct rtw89_dev *rtwdev) 487 { 488 rtw89_rfk_parser(rtwdev, &rtw8851b_dack_manual_off_defs_tbl); 489 } 490 491 static void _dac_cal(struct rtw89_dev *rtwdev, bool force) 492 { 493 struct rtw89_dack_info *dack = &rtwdev->dack; 494 u32 rf0_0; 495 496 dack->dack_done = false; 497 498 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK 0x2\n"); 499 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n"); 500 rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK); 501 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]RF0=0x%x\n", rf0_0); 502 503 _drck(rtwdev); 504 _dack_manual_off(rtwdev); 505 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1); 506 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0); 507 508 _addck(rtwdev); 509 _addck_backup(rtwdev); 510 _addck_reload(rtwdev); 511 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x40001); 512 513 _dack(rtwdev); 514 _new_dadck(rtwdev); 515 _dack_dump(rtwdev); 516 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1); 517 518 dack->dack_done = true; 519 dack->dack_cnt++; 520 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n"); 521 } 522 523 static void _rx_dck_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 524 enum rtw89_rf_path path, bool is_afe, 525 enum rtw89_chanctx_idx chanctx_idx) 526 { 527 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); 528 529 rtw89_debug(rtwdev, RTW89_DBG_RFK, 530 "[RX_DCK] ==== S%d RX DCK (%s / CH%d / %s / by %s)====\n", path, 531 chan->band_type == RTW89_BAND_2G ? "2G" : 532 chan->band_type == RTW89_BAND_5G ? "5G" : "6G", 533 chan->channel, 534 chan->band_width == RTW89_CHANNEL_WIDTH_20 ? "20M" : 535 chan->band_width == RTW89_CHANNEL_WIDTH_40 ? "40M" : "80M", 536 is_afe ? "AFE" : "RFC"); 537 } 538 539 static void _rxbb_ofst_swap(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 rf_mode) 540 { 541 u32 val, val_i, val_q; 542 543 val_i = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_S1); 544 val_q = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_S1); 545 546 val = val_q << 4 | val_i; 547 548 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_DIS, 0x1); 549 rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, rf_mode); 550 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, val); 551 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_DIS, 0x0); 552 553 rtw89_debug(rtwdev, RTW89_DBG_RFK, 554 "[RX_DCK] val_i = 0x%x, val_q = 0x%x, 0x3F = 0x%x\n", 555 val_i, val_q, val); 556 } 557 558 static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 rf_mode) 559 { 560 u32 val; 561 int ret; 562 563 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); 564 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1); 565 566 ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 567 2, 2000, false, 568 rtwdev, path, RR_DCK, BIT(8)); 569 570 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); 571 572 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] S%d RXDCK finish (ret = %d)\n", 573 path, ret); 574 575 _rxbb_ofst_swap(rtwdev, path, rf_mode); 576 } 577 578 static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe, 579 enum rtw89_chanctx_idx chanctx_idx) 580 { 581 u32 rf_reg5; 582 u8 path; 583 584 rtw89_debug(rtwdev, RTW89_DBG_RFK, 585 "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n", 586 0x2, rtwdev->hal.cv); 587 588 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 589 _rx_dck_info(rtwdev, phy, path, is_afe, chanctx_idx); 590 591 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); 592 593 if (rtwdev->is_tssi_mode[path]) 594 rtw89_phy_write32_mask(rtwdev, 595 R_P0_TSSI_TRK + (path << 13), 596 B_P0_TSSI_TRK_EN, 0x1); 597 598 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); 599 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RF_RX); 600 _set_rx_dck(rtwdev, path, RF_RX); 601 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); 602 603 if (rtwdev->is_tssi_mode[path]) 604 rtw89_phy_write32_mask(rtwdev, 605 R_P0_TSSI_TRK + (path << 13), 606 B_P0_TSSI_TRK_EN, 0x0); 607 } 608 } 609 610 static void _iqk_sram(struct rtw89_dev *rtwdev, u8 path) 611 { 612 u32 i; 613 614 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 615 616 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00020000); 617 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, MASKDWORD, 0x80000000); 618 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX2, MASKDWORD, 0x00000080); 619 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000); 620 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009); 621 622 for (i = 0; i <= 0x9f; i++) { 623 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 624 0x00010000 + i); 625 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", 626 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI)); 627 } 628 629 for (i = 0; i <= 0x9f; i++) { 630 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 631 0x00010000 + i); 632 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", 633 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ)); 634 } 635 636 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX2, MASKDWORD, 0x00000000); 637 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00000000); 638 } 639 640 static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path) 641 { 642 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); 643 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); 644 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1); 645 } 646 647 static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path) 648 { 649 bool fail1 = false, fail2 = false; 650 u32 val; 651 int ret; 652 653 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, 654 10, 8200, false, 655 rtwdev, 0xbff8, MASKBYTE0); 656 if (ret) { 657 fail1 = true; 658 rtw89_debug(rtwdev, RTW89_DBG_RFK, 659 "[IQK]NCTL1 IQK timeout!!!\n"); 660 } 661 662 fsleep(10); 663 664 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000, 665 10, 200, false, 666 rtwdev, R_RPT_COM, B_RPT_COM_RDY); 667 if (ret) { 668 fail2 = true; 669 rtw89_debug(rtwdev, RTW89_DBG_RFK, 670 "[IQK]NCTL2 IQK timeout!!!\n"); 671 } 672 673 fsleep(10); 674 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0); 675 676 rtw89_debug(rtwdev, RTW89_DBG_RFK, 677 "[IQK]S%x, ret = %d, notready = %x fail=%d,%d\n", 678 path, ret, fail1 || fail2, fail1, fail2); 679 680 return fail1 || fail2; 681 } 682 683 static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 684 u8 path, u8 ktype) 685 { 686 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 687 bool notready; 688 u32 iqk_cmd; 689 690 switch (ktype) { 691 case ID_A_FLOK_COARSE: 692 rtw89_debug(rtwdev, RTW89_DBG_RFK, 693 "[IQK]============ S%d ID_A_FLOK_COARSE ============\n", path); 694 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 695 iqk_cmd = 0x108 | (1 << (4 + path)); 696 break; 697 case ID_G_FLOK_COARSE: 698 rtw89_debug(rtwdev, RTW89_DBG_RFK, 699 "[IQK]============ S%d ID_G_FLOK_COARSE ============\n", path); 700 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 701 iqk_cmd = 0x108 | (1 << (4 + path)); 702 break; 703 case ID_A_FLOK_FINE: 704 rtw89_debug(rtwdev, RTW89_DBG_RFK, 705 "[IQK]============ S%d ID_A_FLOK_FINE ============\n", path); 706 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 707 iqk_cmd = 0x308 | (1 << (4 + path)); 708 break; 709 case ID_G_FLOK_FINE: 710 rtw89_debug(rtwdev, RTW89_DBG_RFK, 711 "[IQK]============ S%d ID_G_FLOK_FINE ============\n", path); 712 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 713 iqk_cmd = 0x308 | (1 << (4 + path)); 714 break; 715 case ID_TXK: 716 rtw89_debug(rtwdev, RTW89_DBG_RFK, 717 "[IQK]============ S%d ID_TXK ============\n", path); 718 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0); 719 iqk_cmd = 0x008 | (1 << (path + 4)) | 720 (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8); 721 break; 722 case ID_RXAGC: 723 rtw89_debug(rtwdev, RTW89_DBG_RFK, 724 "[IQK]============ S%d ID_RXAGC ============\n", path); 725 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 726 iqk_cmd = 0x708 | (1 << (4 + path)) | (path << 1); 727 break; 728 case ID_RXK: 729 rtw89_debug(rtwdev, RTW89_DBG_RFK, 730 "[IQK]============ S%d ID_RXK ============\n", path); 731 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 732 iqk_cmd = 0x008 | (1 << (path + 4)) | 733 (((0xc + iqk_info->iqk_bw[path]) & 0xf) << 8); 734 break; 735 case ID_NBTXK: 736 rtw89_debug(rtwdev, RTW89_DBG_RFK, 737 "[IQK]============ S%d ID_NBTXK ============\n", path); 738 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0); 739 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 740 0x00b); 741 iqk_cmd = 0x408 | (1 << (4 + path)); 742 break; 743 case ID_NBRXK: 744 rtw89_debug(rtwdev, RTW89_DBG_RFK, 745 "[IQK]============ S%d ID_NBRXK ============\n", path); 746 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 747 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 748 0x011); 749 iqk_cmd = 0x608 | (1 << (4 + path)); 750 break; 751 default: 752 return false; 753 } 754 755 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1); 756 notready = _iqk_check_cal(rtwdev, path); 757 if (iqk_info->iqk_sram_en && 758 (ktype == ID_NBRXK || ktype == ID_RXK)) 759 _iqk_sram(rtwdev, path); 760 761 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0); 762 rtw89_debug(rtwdev, RTW89_DBG_RFK, 763 "[IQK]S%x, ktype= %x, id = %x, notready = %x\n", 764 path, ktype, iqk_cmd + 1, notready); 765 766 return notready; 767 } 768 769 static bool _rxk_2g_group_sel(struct rtw89_dev *rtwdev, 770 enum rtw89_phy_idx phy_idx, u8 path) 771 { 772 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 773 bool kfail = false; 774 bool notready; 775 u32 rf_0; 776 u8 gp; 777 778 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 779 780 for (gp = 0; gp < RTW8851B_RXK_GROUP_NR; gp++) { 781 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp); 782 783 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, g_idxrxgain[gp]); 784 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2, g_idxattc2[gp]); 785 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 786 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0); 787 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, gp); 788 789 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); 790 fsleep(10); 791 rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); 792 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0); 793 rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, g_idxrxagc[gp]); 794 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); 795 796 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC); 797 798 rtw89_debug(rtwdev, RTW89_DBG_RFK, 799 "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n", path, 800 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD), 801 rtw89_read_rf(rtwdev, path, RR_MOD, 0x003e0)); 802 803 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13); 804 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); 805 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK); 806 iqk_info->nb_rxcfir[path] = 807 rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2; 808 809 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK); 810 811 rtw89_debug(rtwdev, RTW89_DBG_RFK, 812 "[IQK]S%x, WBRXK 0x8008 = 0x%x\n", path, 813 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 814 } 815 816 if (!notready) 817 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 818 819 if (kfail) 820 _iqk_sram(rtwdev, path); 821 822 if (kfail) { 823 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), 824 MASKDWORD, iqk_info->nb_rxcfir[path] | 0x2); 825 iqk_info->is_wb_txiqk[path] = false; 826 } else { 827 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), 828 MASKDWORD, 0x40000000); 829 iqk_info->is_wb_txiqk[path] = true; 830 } 831 832 rtw89_debug(rtwdev, RTW89_DBG_RFK, 833 "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail, 834 1 << path, iqk_info->nb_rxcfir[path]); 835 return kfail; 836 } 837 838 static bool _rxk_5g_group_sel(struct rtw89_dev *rtwdev, 839 enum rtw89_phy_idx phy_idx, u8 path) 840 { 841 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 842 bool kfail = false; 843 bool notready; 844 u32 rf_0; 845 u8 idx; 846 u8 gp; 847 848 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 849 850 for (idx = 0; idx < RTW8851B_RXK_GROUP_IDX_NR; idx++) { 851 gp = _rxk_5ghz_group_from_idx(idx); 852 853 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp); 854 855 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[idx]); 856 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[idx]); 857 858 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 859 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0); 860 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, gp); 861 862 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); 863 fsleep(100); 864 rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); 865 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0); 866 rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[idx]); 867 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); 868 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC); 869 870 rtw89_debug(rtwdev, RTW89_DBG_RFK, 871 "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n", path, 872 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD), 873 rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_RXB)); 874 875 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13); 876 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); 877 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK); 878 iqk_info->nb_rxcfir[path] = 879 rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2; 880 881 rtw89_debug(rtwdev, RTW89_DBG_RFK, 882 "[IQK]S%x, NBRXK 0x8008 = 0x%x\n", path, 883 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 884 885 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK); 886 887 rtw89_debug(rtwdev, RTW89_DBG_RFK, 888 "[IQK]S%x, WBRXK 0x8008 = 0x%x\n", path, 889 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 890 } 891 892 if (!notready) 893 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 894 895 if (kfail) 896 _iqk_sram(rtwdev, path); 897 898 if (kfail) { 899 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 900 iqk_info->nb_rxcfir[path] | 0x2); 901 iqk_info->is_wb_txiqk[path] = false; 902 } else { 903 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 904 0x40000000); 905 iqk_info->is_wb_txiqk[path] = true; 906 } 907 908 rtw89_debug(rtwdev, RTW89_DBG_RFK, 909 "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail, 910 1 << path, iqk_info->nb_rxcfir[path]); 911 return kfail; 912 } 913 914 static bool _iqk_5g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 915 u8 path) 916 { 917 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 918 bool kfail = false; 919 bool notready; 920 u8 idx = 0x1; 921 u32 rf_0; 922 u8 gp; 923 924 gp = _rxk_5ghz_group_from_idx(idx); 925 926 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 927 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp); 928 929 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[idx]); 930 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[idx]); 931 932 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 933 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0); 934 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, gp); 935 936 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); 937 fsleep(100); 938 rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); 939 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0); 940 rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[idx]); 941 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); 942 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC); 943 944 rtw89_debug(rtwdev, RTW89_DBG_RFK, 945 "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n", path, 946 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD), 947 rtw89_read_rf(rtwdev, path, RR_MOD, 0x003e0)); 948 949 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13); 950 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); 951 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK); 952 iqk_info->nb_rxcfir[path] = 953 rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2; 954 955 rtw89_debug(rtwdev, RTW89_DBG_RFK, 956 "[IQK]S%x, NBRXK 0x8008 = 0x%x\n", path, 957 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 958 959 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, WBRXK 0x8008 = 0x%x\n", 960 path, rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 961 962 if (!notready) 963 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 964 965 if (kfail) { 966 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), 967 MASKDWORD, 0x40000002); 968 iqk_info->is_wb_rxiqk[path] = false; 969 } else { 970 iqk_info->is_wb_rxiqk[path] = false; 971 } 972 973 rtw89_debug(rtwdev, RTW89_DBG_RFK, 974 "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail, 975 1 << path, iqk_info->nb_rxcfir[path]); 976 977 return kfail; 978 } 979 980 static bool _iqk_2g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 981 u8 path) 982 { 983 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 984 bool kfail = false; 985 bool notready; 986 u8 gp = 0x3; 987 u32 rf_0; 988 989 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 990 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp); 991 992 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, g_idxrxgain[gp]); 993 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2, g_idxattc2[gp]); 994 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 995 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0); 996 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, gp); 997 998 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); 999 fsleep(10); 1000 rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); 1001 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0); 1002 rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, g_idxrxagc[gp]); 1003 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); 1004 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC); 1005 1006 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1007 "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n", 1008 path, rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD), 1009 rtw89_read_rf(rtwdev, path, RR_MOD, 0x003e0)); 1010 1011 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13); 1012 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); 1013 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK); 1014 iqk_info->nb_rxcfir[path] = 1015 rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2; 1016 1017 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1018 "[IQK]S%x, NBRXK 0x8008 = 0x%x\n", path, 1019 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 1020 1021 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, WBRXK 0x8008 = 0x%x\n", 1022 path, rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 1023 1024 if (!notready) 1025 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 1026 1027 if (kfail) { 1028 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), 1029 MASKDWORD, 0x40000002); 1030 iqk_info->is_wb_rxiqk[path] = false; 1031 } else { 1032 iqk_info->is_wb_rxiqk[path] = false; 1033 } 1034 1035 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1036 "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail, 1037 1 << path, iqk_info->nb_rxcfir[path]); 1038 return kfail; 1039 } 1040 1041 static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path) 1042 { 1043 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1044 1045 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1); 1046 1047 if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) 1048 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_rxclk_80_defs_tbl); 1049 else 1050 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_rxclk_others_defs_tbl); 1051 } 1052 1053 static bool _txk_5g_group_sel(struct rtw89_dev *rtwdev, 1054 enum rtw89_phy_idx phy_idx, u8 path) 1055 { 1056 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1057 bool kfail = false; 1058 bool notready; 1059 u8 gp; 1060 1061 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1062 1063 for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) { 1064 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]); 1065 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]); 1066 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]); 1067 1068 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 1069 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1); 1070 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0); 1071 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp); 1072 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1073 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]); 1074 1075 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); 1076 iqk_info->nb_txcfir[path] = 1077 rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2; 1078 1079 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1080 MASKDWORD, a_itqt[gp]); 1081 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK); 1082 } 1083 1084 if (!notready) 1085 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 1086 1087 if (kfail) { 1088 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), 1089 MASKDWORD, iqk_info->nb_txcfir[path] | 0x2); 1090 iqk_info->is_wb_txiqk[path] = false; 1091 } else { 1092 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), 1093 MASKDWORD, 0x40000000); 1094 iqk_info->is_wb_txiqk[path] = true; 1095 } 1096 1097 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1098 "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n", path, kfail, 1099 1 << path, iqk_info->nb_txcfir[path]); 1100 return kfail; 1101 } 1102 1103 static bool _txk_2g_group_sel(struct rtw89_dev *rtwdev, 1104 enum rtw89_phy_idx phy_idx, u8 path) 1105 { 1106 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1107 bool kfail = false; 1108 bool notready; 1109 u8 gp; 1110 1111 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1112 1113 for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) { 1114 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, g_power_range[gp]); 1115 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, g_track_range[gp]); 1116 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, g_gain_bb[gp]); 1117 1118 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, g_itqt[gp]); 1119 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 1120 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1); 1121 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0); 1122 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp); 1123 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1124 1125 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); 1126 iqk_info->nb_txcfir[path] = 1127 rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2; 1128 1129 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1130 MASKDWORD, g_itqt[gp]); 1131 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK); 1132 } 1133 1134 if (!notready) 1135 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 1136 1137 if (kfail) { 1138 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), 1139 MASKDWORD, iqk_info->nb_txcfir[path] | 0x2); 1140 iqk_info->is_wb_txiqk[path] = false; 1141 } else { 1142 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), 1143 MASKDWORD, 0x40000000); 1144 iqk_info->is_wb_txiqk[path] = true; 1145 } 1146 1147 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1148 "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n", path, kfail, 1149 1 << path, iqk_info->nb_txcfir[path]); 1150 return kfail; 1151 } 1152 1153 static bool _iqk_5g_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1154 u8 path) 1155 { 1156 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1157 bool kfail = false; 1158 bool notready; 1159 u8 gp; 1160 1161 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1162 1163 for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) { 1164 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]); 1165 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]); 1166 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]); 1167 1168 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 1169 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1); 1170 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0); 1171 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp); 1172 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1173 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]); 1174 1175 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); 1176 iqk_info->nb_txcfir[path] = 1177 rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2; 1178 } 1179 1180 if (!notready) 1181 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 1182 1183 if (kfail) { 1184 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), 1185 MASKDWORD, 0x40000002); 1186 iqk_info->is_wb_rxiqk[path] = false; 1187 } else { 1188 iqk_info->is_wb_rxiqk[path] = false; 1189 } 1190 1191 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1192 "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n", path, kfail, 1193 1 << path, iqk_info->nb_txcfir[path]); 1194 return kfail; 1195 } 1196 1197 static bool _iqk_2g_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1198 u8 path) 1199 { 1200 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1201 bool kfail = false; 1202 bool notready; 1203 u8 gp; 1204 1205 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1206 1207 for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) { 1208 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, g_power_range[gp]); 1209 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, g_track_range[gp]); 1210 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, g_gain_bb[gp]); 1211 1212 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, g_itqt[gp]); 1213 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 1214 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1); 1215 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0); 1216 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp); 1217 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1218 1219 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); 1220 iqk_info->nb_txcfir[path] = 1221 rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), 1222 MASKDWORD) | 0x2; 1223 } 1224 1225 if (!notready) 1226 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 1227 1228 if (kfail) { 1229 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), 1230 MASKDWORD, 0x40000002); 1231 iqk_info->is_wb_rxiqk[path] = false; 1232 } else { 1233 iqk_info->is_wb_rxiqk[path] = false; 1234 } 1235 1236 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1237 "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n", path, kfail, 1238 1 << path, iqk_info->nb_txcfir[path]); 1239 return kfail; 1240 } 1241 1242 static bool _iqk_2g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1243 u8 path) 1244 { 1245 static const u32 g_txbb[RTW8851B_LOK_GRAM] = { 1246 0x02, 0x06, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x17}; 1247 static const u32 g_itqt[RTW8851B_LOK_GRAM] = { 1248 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x12, 0x12, 0x12, 0x1b}; 1249 static const u32 g_wa[RTW8851B_LOK_GRAM] = { 1250 0x00, 0x04, 0x08, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x17}; 1251 bool fail = false; 1252 u8 i; 1253 1254 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1255 1256 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTDBG, RR_LUTDBG_LOK, 0x0); 1257 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_GR0, 0x0); 1258 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_GR1, 0x6); 1259 1260 for (i = 0; i < RTW8851B_LOK_GRAM; i++) { 1261 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_TG, g_txbb[i]); 1262 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RR_LUTWA_M1, g_wa[i]); 1263 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 1264 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, g_itqt[i]); 1265 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021); 1266 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 1267 0x00000109 | (1 << (4 + path))); 1268 fail |= _iqk_check_cal(rtwdev, path); 1269 1270 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1271 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, g_itqt[i]); 1272 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 1273 0x00000309 | (1 << (4 + path))); 1274 fail |= _iqk_check_cal(rtwdev, path); 1275 1276 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1277 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0); 1278 1279 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1280 "[IQK]S0, i = %x, 0x8[19:15] = 0x%x,0x8[09:05] = 0x%x\n", i, 1281 rtw89_read_rf(rtwdev, RF_PATH_A, RR_DTXLOK, 0xf8000), 1282 rtw89_read_rf(rtwdev, RF_PATH_A, RR_DTXLOK, 0x003e0)); 1283 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1284 "[IQK]S0, i = %x, 0x9[19:16] = 0x%x,0x9[09:06] = 0x%x\n", i, 1285 rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV2, 0xf0000), 1286 rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV2, 0x003c0)); 1287 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1288 "[IQK]S0, i = %x, 0x58 = %x\n", i, 1289 rtw89_read_rf(rtwdev, RF_PATH_A, RR_TXMO, RFREG_MASK)); 1290 } 1291 1292 return fail; 1293 } 1294 1295 static bool _iqk_5g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1296 u8 path) 1297 { 1298 static const u32 a_txbb[RTW8851B_LOK_GRAM] = { 1299 0x02, 0x06, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x17}; 1300 static const u32 a_itqt[RTW8851B_LOK_GRAM] = { 1301 0x09, 0x09, 0x09, 0x12, 0x12, 0x12, 0x1b, 0x1b, 0x1b, 0x1b}; 1302 static const u32 a_wa[RTW8851B_LOK_GRAM] = { 1303 0x80, 0x84, 0x88, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x97}; 1304 bool fail = false; 1305 u8 i; 1306 1307 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1308 1309 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTDBG, RR_LUTDBG_LOK, 0x0); 1310 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_GR0, 0x0); 1311 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_GR1, 0x7); 1312 1313 for (i = 0; i < RTW8851B_LOK_GRAM; i++) { 1314 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_TG, a_txbb[i]); 1315 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RR_LUTWA_M1, a_wa[i]); 1316 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 1317 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, a_itqt[i]); 1318 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021); 1319 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 1320 0x00000109 | (1 << (4 + path))); 1321 fail |= _iqk_check_cal(rtwdev, path); 1322 1323 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1324 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, a_itqt[i]); 1325 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021); 1326 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 1327 0x00000309 | (1 << (4 + path))); 1328 fail |= _iqk_check_cal(rtwdev, path); 1329 1330 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1331 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0); 1332 1333 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1334 "[IQK]S0, i = %x, 0x8[19:15] = 0x%x,0x8[09:05] = 0x%x\n", i, 1335 rtw89_read_rf(rtwdev, RF_PATH_A, RR_DTXLOK, 0xf8000), 1336 rtw89_read_rf(rtwdev, RF_PATH_A, RR_DTXLOK, 0x003e0)); 1337 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1338 "[IQK]S0, i = %x, 0x9[19:16] = 0x%x,0x9[09:06] = 0x%x\n", i, 1339 rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV2, 0xf0000), 1340 rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV2, 0x003c0)); 1341 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1342 "[IQK]S0, i = %x, 0x58 = %x\n", i, 1343 rtw89_read_rf(rtwdev, RF_PATH_A, RR_TXMO, RFREG_MASK)); 1344 } 1345 1346 return fail; 1347 } 1348 1349 static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path) 1350 { 1351 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1352 1353 switch (iqk_info->iqk_band[path]) { 1354 case RTW89_BAND_2G: 1355 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RTW89_BAND_2G\n"); 1356 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_txk_2ghz_defs_tbl); 1357 break; 1358 case RTW89_BAND_5G: 1359 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RTW89_BAND_5G\n"); 1360 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_txk_5ghz_defs_tbl); 1361 break; 1362 default: 1363 break; 1364 } 1365 } 1366 1367 #define IQK_LOK_RETRY 1 1368 1369 static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1370 u8 path) 1371 { 1372 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1373 bool lok_is_fail; 1374 u8 i; 1375 1376 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1377 1378 for (i = 0; i < IQK_LOK_RETRY; i++) { 1379 _iqk_txk_setting(rtwdev, path); 1380 if (iqk_info->iqk_band[path] == RTW89_BAND_2G) 1381 lok_is_fail = _iqk_2g_lok(rtwdev, phy_idx, path); 1382 else 1383 lok_is_fail = _iqk_5g_lok(rtwdev, phy_idx, path); 1384 1385 if (!lok_is_fail) 1386 break; 1387 } 1388 1389 if (iqk_info->is_nbiqk) { 1390 if (iqk_info->iqk_band[path] == RTW89_BAND_2G) 1391 iqk_info->iqk_tx_fail[0][path] = 1392 _iqk_2g_nbtxk(rtwdev, phy_idx, path); 1393 else 1394 iqk_info->iqk_tx_fail[0][path] = 1395 _iqk_5g_nbtxk(rtwdev, phy_idx, path); 1396 } else { 1397 if (iqk_info->iqk_band[path] == RTW89_BAND_2G) 1398 iqk_info->iqk_tx_fail[0][path] = 1399 _txk_2g_group_sel(rtwdev, phy_idx, path); 1400 else 1401 iqk_info->iqk_tx_fail[0][path] = 1402 _txk_5g_group_sel(rtwdev, phy_idx, path); 1403 } 1404 1405 _iqk_rxclk_setting(rtwdev, path); 1406 _iqk_rxk_setting(rtwdev, path); 1407 _adc_fifo_rst(rtwdev, phy_idx, path); 1408 1409 if (iqk_info->is_nbiqk) { 1410 if (iqk_info->iqk_band[path] == RTW89_BAND_2G) 1411 iqk_info->iqk_rx_fail[0][path] = 1412 _iqk_2g_nbrxk(rtwdev, phy_idx, path); 1413 else 1414 iqk_info->iqk_rx_fail[0][path] = 1415 _iqk_5g_nbrxk(rtwdev, phy_idx, path); 1416 } else { 1417 if (iqk_info->iqk_band[path] == RTW89_BAND_2G) 1418 iqk_info->iqk_rx_fail[0][path] = 1419 _rxk_2g_group_sel(rtwdev, phy_idx, path); 1420 else 1421 iqk_info->iqk_rx_fail[0][path] = 1422 _rxk_5g_group_sel(rtwdev, phy_idx, path); 1423 } 1424 } 1425 1426 static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, 1427 u32 backup_bb_reg_val[]) 1428 { 1429 u32 i; 1430 1431 for (i = 0; i < BACKUP_BB_REGS_NR; i++) { 1432 backup_bb_reg_val[i] = 1433 rtw89_phy_read32_mask(rtwdev, rtw8851b_backup_bb_regs[i], 1434 MASKDWORD); 1435 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1436 "[RFK]backup bb reg : %x, value =%x\n", 1437 rtw8851b_backup_bb_regs[i], backup_bb_reg_val[i]); 1438 } 1439 } 1440 1441 static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, 1442 u32 backup_rf_reg_val[], u8 rf_path) 1443 { 1444 u32 i; 1445 1446 for (i = 0; i < BACKUP_RF_REGS_NR; i++) { 1447 backup_rf_reg_val[i] = 1448 rtw89_read_rf(rtwdev, rf_path, 1449 rtw8851b_backup_rf_regs[i], RFREG_MASK); 1450 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1451 "[RFK]backup rf S%d reg : %x, value =%x\n", rf_path, 1452 rtw8851b_backup_rf_regs[i], backup_rf_reg_val[i]); 1453 } 1454 } 1455 1456 static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev, 1457 const u32 backup_bb_reg_val[]) 1458 { 1459 u32 i; 1460 1461 for (i = 0; i < BACKUP_BB_REGS_NR; i++) { 1462 rtw89_phy_write32_mask(rtwdev, rtw8851b_backup_bb_regs[i], 1463 MASKDWORD, backup_bb_reg_val[i]); 1464 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1465 "[RFK]restore bb reg : %x, value =%x\n", 1466 rtw8851b_backup_bb_regs[i], backup_bb_reg_val[i]); 1467 } 1468 } 1469 1470 static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev, 1471 const u32 backup_rf_reg_val[], u8 rf_path) 1472 { 1473 u32 i; 1474 1475 for (i = 0; i < BACKUP_RF_REGS_NR; i++) { 1476 rtw89_write_rf(rtwdev, rf_path, rtw8851b_backup_rf_regs[i], 1477 RFREG_MASK, backup_rf_reg_val[i]); 1478 1479 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1480 "[RFK]restore rf S%d reg: %x, value =%x\n", rf_path, 1481 rtw8851b_backup_rf_regs[i], backup_rf_reg_val[i]); 1482 } 1483 } 1484 1485 static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1486 u8 path, enum rtw89_chanctx_idx chanctx_idx) 1487 { 1488 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); 1489 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1490 u8 idx = 0; 1491 1492 iqk_info->iqk_band[path] = chan->band_type; 1493 iqk_info->iqk_bw[path] = chan->band_width; 1494 iqk_info->iqk_ch[path] = chan->channel; 1495 iqk_info->iqk_table_idx[path] = idx; 1496 1497 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", 1498 path, phy, rtwdev->dbcc_en ? "on" : "off", 1499 iqk_info->iqk_band[path] == 0 ? "2G" : 1500 iqk_info->iqk_band[path] == 1 ? "5G" : "6G", 1501 iqk_info->iqk_ch[path], 1502 iqk_info->iqk_bw[path] == 0 ? "20M" : 1503 iqk_info->iqk_bw[path] == 1 ? "40M" : "80M"); 1504 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]times = 0x%x, ch =%x\n", 1505 iqk_info->iqk_times, idx); 1506 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, iqk_info->syn1to2= 0x%x\n", 1507 path, iqk_info->syn1to2); 1508 } 1509 1510 static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1511 u8 path) 1512 { 1513 _iqk_by_path(rtwdev, phy_idx, path); 1514 } 1515 1516 static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path) 1517 { 1518 bool fail; 1519 1520 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1521 1522 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00001219); 1523 fsleep(10); 1524 fail = _iqk_check_cal(rtwdev, path); 1525 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] restore fail=%d\n", fail); 1526 1527 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RR_LUTWE_LOK, 0x0); 1528 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTDBG, RR_LUTDBG_TIA, 0x0); 1529 1530 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1531 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000); 1532 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000); 1533 } 1534 1535 static void _iqk_afebb_restore(struct rtw89_dev *rtwdev, 1536 enum rtw89_phy_idx phy_idx, u8 path) 1537 { 1538 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_afebb_restore_defs_tbl); 1539 } 1540 1541 static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path) 1542 { 1543 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1544 1545 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); 1546 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080); 1547 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a); 1548 } 1549 1550 static void _iqk_macbb_setting(struct rtw89_dev *rtwdev, 1551 enum rtw89_phy_idx phy_idx, u8 path) 1552 { 1553 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1554 1555 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_macbb_defs_tbl); 1556 } 1557 1558 static void _iqk_init(struct rtw89_dev *rtwdev) 1559 { 1560 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1561 u8 idx, path; 1562 1563 rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0); 1564 1565 if (iqk_info->is_iqk_init) 1566 return; 1567 1568 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1569 1570 iqk_info->is_iqk_init = true; 1571 iqk_info->is_nbiqk = false; 1572 iqk_info->iqk_fft_en = false; 1573 iqk_info->iqk_sram_en = false; 1574 iqk_info->iqk_cfir_en = false; 1575 iqk_info->iqk_xym_en = false; 1576 iqk_info->iqk_times = 0x0; 1577 1578 for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) { 1579 iqk_info->iqk_channel[idx] = 0x0; 1580 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 1581 iqk_info->lok_cor_fail[idx][path] = false; 1582 iqk_info->lok_fin_fail[idx][path] = false; 1583 iqk_info->iqk_tx_fail[idx][path] = false; 1584 iqk_info->iqk_rx_fail[idx][path] = false; 1585 iqk_info->iqk_table_idx[path] = 0x0; 1586 } 1587 } 1588 } 1589 1590 static void _doiqk(struct rtw89_dev *rtwdev, bool force, 1591 enum rtw89_phy_idx phy_idx, u8 path, 1592 enum rtw89_chanctx_idx chanctx_idx) 1593 { 1594 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1595 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx); 1596 u32 backup_rf_val[RTW8851B_IQK_SS][BACKUP_RF_REGS_NR]; 1597 u32 backup_bb_val[BACKUP_BB_REGS_NR]; 1598 1599 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, 1600 BTC_WRFK_ONESHOT_START); 1601 1602 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1603 "[IQK]==========IQK start!!!!!==========\n"); 1604 iqk_info->iqk_times++; 1605 iqk_info->version = RTW8851B_IQK_VER; 1606 1607 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version); 1608 _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx); 1609 1610 _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]); 1611 _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path); 1612 _iqk_macbb_setting(rtwdev, phy_idx, path); 1613 _iqk_preset(rtwdev, path); 1614 _iqk_start_iqk(rtwdev, phy_idx, path); 1615 _iqk_restore(rtwdev, path); 1616 _iqk_afebb_restore(rtwdev, phy_idx, path); 1617 _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]); 1618 _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path); 1619 1620 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, 1621 BTC_WRFK_ONESHOT_STOP); 1622 } 1623 1624 static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1625 bool force, enum rtw89_chanctx_idx chanctx_idx) 1626 { 1627 _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx); 1628 } 1629 1630 static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 *reg, 1631 u32 reg_bkup[][DPK_KIP_REG_NUM_8851B], u8 path) 1632 { 1633 u8 i; 1634 1635 for (i = 0; i < DPK_KIP_REG_NUM_8851B; i++) { 1636 reg_bkup[path][i] = 1637 rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD); 1638 1639 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n", 1640 reg[i] + (path << 8), reg_bkup[path][i]); 1641 } 1642 } 1643 1644 static void _dpk_bkup_rf(struct rtw89_dev *rtwdev, const u32 *rf_reg, 1645 u32 rf_bkup[][DPK_RF_REG_NUM_8851B], u8 path) 1646 { 1647 u8 i; 1648 1649 for (i = 0; i < DPK_RF_REG_NUM_8851B; i++) { 1650 rf_bkup[path][i] = rtw89_read_rf(rtwdev, path, rf_reg[i], RFREG_MASK); 1651 1652 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup RF S%d 0x%x = %x\n", 1653 path, rf_reg[i], rf_bkup[path][i]); 1654 } 1655 } 1656 1657 static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 *reg, 1658 u32 reg_bkup[][DPK_KIP_REG_NUM_8851B], u8 path) 1659 { 1660 u8 i; 1661 1662 for (i = 0; i < DPK_KIP_REG_NUM_8851B; i++) { 1663 rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD, 1664 reg_bkup[path][i]); 1665 1666 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1667 "[DPK] Reload 0x%x = %x\n", 1668 reg[i] + (path << 8), reg_bkup[path][i]); 1669 } 1670 } 1671 1672 static void _dpk_reload_rf(struct rtw89_dev *rtwdev, const u32 *rf_reg, 1673 u32 rf_bkup[][DPK_RF_REG_NUM_8851B], u8 path) 1674 { 1675 u8 i; 1676 1677 for (i = 0; i < DPK_RF_REG_NUM_8851B; i++) { 1678 rtw89_write_rf(rtwdev, path, rf_reg[i], RFREG_MASK, rf_bkup[path][i]); 1679 1680 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1681 "[DPK] Reload RF S%d 0x%x = %x\n", path, 1682 rf_reg[i], rf_bkup[path][i]); 1683 } 1684 } 1685 1686 static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1687 enum rtw89_rf_path path, enum dpk_id id) 1688 { 1689 u16 dpk_cmd; 1690 u32 val; 1691 int ret; 1692 1693 dpk_cmd = ((id << 8) | (0x19 + path * 0x12)); 1694 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd); 1695 1696 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, 1697 10, 20000, false, 1698 rtwdev, 0xbff8, MASKBYTE0); 1699 if (ret) 1700 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot 1 timeout\n"); 1701 1702 udelay(1); 1703 1704 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000, 1705 1, 2000, false, 1706 rtwdev, R_RPT_COM, MASKLWORD); 1707 if (ret) 1708 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot 2 timeout\n"); 1709 1710 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0); 1711 1712 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1713 "[DPK] one-shot for %s = 0x%04x\n", 1714 id == 0x28 ? "KIP_PRESET" : 1715 id == 0x29 ? "DPK_TXAGC" : 1716 id == 0x2a ? "DPK_RXAGC" : 1717 id == 0x2b ? "SYNC" : 1718 id == 0x2c ? "GAIN_LOSS" : 1719 id == 0x2d ? "MDPK_IDL" : 1720 id == 0x2f ? "DPK_GAIN_NORM" : 1721 id == 0x31 ? "KIP_RESTORE" : 1722 id == 0x6 ? "LBK_RXIQK" : "Unknown id", 1723 dpk_cmd); 1724 } 1725 1726 static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 1727 bool off) 1728 { 1729 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1730 u8 kidx = dpk->cur_idx[path]; 1731 u8 off_reverse = off ? 0 : 1; 1732 u8 val; 1733 1734 val = dpk->is_dpk_enable * off_reverse * dpk->bp[path][kidx].path_ok; 1735 1736 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), 1737 0xf0000000, val); 1738 1739 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path, 1740 kidx, val == 0 ? "disable" : "enable"); 1741 } 1742 1743 static void _dpk_init(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 1744 { 1745 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1746 1747 u8 kidx = dpk->cur_idx[path]; 1748 1749 dpk->bp[path][kidx].path_ok = 0; 1750 } 1751 1752 static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1753 enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx) 1754 { 1755 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); 1756 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1757 1758 u8 kidx = dpk->cur_idx[path]; 1759 1760 dpk->bp[path][kidx].band = chan->band_type; 1761 dpk->bp[path][kidx].ch = chan->band_width; 1762 dpk->bp[path][kidx].bw = chan->channel; 1763 1764 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1765 "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n", 1766 path, dpk->cur_idx[path], phy, 1767 rtwdev->is_tssi_mode[path] ? "on" : "off", 1768 rtwdev->dbcc_en ? "on" : "off", 1769 dpk->bp[path][kidx].band == 0 ? "2G" : 1770 dpk->bp[path][kidx].band == 1 ? "5G" : "6G", 1771 dpk->bp[path][kidx].ch, 1772 dpk->bp[path][kidx].bw == 0 ? "20M" : 1773 dpk->bp[path][kidx].bw == 1 ? "40M" : 1774 dpk->bp[path][kidx].bw == 2 ? "80M" : "160M"); 1775 } 1776 1777 static void _dpk_rxagc_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 1778 bool turn_on) 1779 { 1780 if (path == RF_PATH_A) 1781 rtw89_phy_write32_mask(rtwdev, R_P0_AGC_CTL, B_P0_AGC_EN, turn_on); 1782 else 1783 rtw89_phy_write32_mask(rtwdev, R_P1_AGC_CTL, B_P1_AGC_EN, turn_on); 1784 1785 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d RXAGC is %s\n", path, 1786 turn_on ? "turn_on" : "turn_off"); 1787 } 1788 1789 static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 1790 { 1791 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(16 + path), 0x1); 1792 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(20 + path), 0x0); 1793 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(24 + path), 0x1); 1794 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(28 + path), 0x0); 1795 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0xd801dffd); 1796 1797 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_bb_afe_defs_tbl); 1798 1799 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(20 + path), 0x1); 1800 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(28 + path), 0x1); 1801 1802 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE setting\n", path); 1803 } 1804 1805 static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 1806 { 1807 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x0); 1808 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(16 + path), 0x1); 1809 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(20 + path), 0x0); 1810 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(24 + path), 0x1); 1811 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(28 + path), 0x0); 1812 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0x00000000); 1813 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), B_P0_TXCK_ALL, 0x00); 1814 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(16 + path), 0x0); 1815 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(24 + path), 0x0); 1816 1817 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE restore\n", path); 1818 } 1819 1820 static void _dpk_tssi_pause(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 1821 bool is_pause) 1822 { 1823 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13), 1824 B_P0_TSSI_TRK_EN, is_pause); 1825 1826 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path, 1827 is_pause ? "pause" : "resume"); 1828 } 1829 1830 static void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) 1831 { 1832 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1833 1834 if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) { 1835 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x0); 1836 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xffe0fa00); 1837 } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40) { 1838 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2); 1839 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xff4009e0); 1840 } else { 1841 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1); 1842 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xf9f007d0); 1843 } 1844 1845 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG Select for %s\n", 1846 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" : 1847 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M"); 1848 } 1849 1850 static void _dpk_txpwr_bb_force(struct rtw89_dev *rtwdev, 1851 enum rtw89_rf_path path, bool force) 1852 { 1853 rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_ON, force); 1854 rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H + (path << 13), B_TXPWRB_RDY, force); 1855 1856 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d txpwr_bb_force %s\n", 1857 path, force ? "on" : "off"); 1858 } 1859 1860 static void _dpk_kip_pwr_clk_onoff(struct rtw89_dev *rtwdev, bool turn_on) 1861 { 1862 if (turn_on) { 1863 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080); 1864 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a); 1865 } else { 1866 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000); 1867 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000); 1868 rtw89_phy_write32_mask(rtwdev, R_DPK_WR, BIT(18), 0x1); 1869 } 1870 } 1871 1872 static void _dpk_kip_control_rfc(struct rtw89_dev *rtwdev, 1873 enum rtw89_rf_path path, bool ctrl_by_kip) 1874 { 1875 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), 1876 B_IQK_RFC_ON, ctrl_by_kip); 1877 1878 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RFC is controlled by %s\n", 1879 ctrl_by_kip ? "KIP" : "BB"); 1880 } 1881 1882 static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1883 enum rtw89_rf_path path, u8 kidx) 1884 { 1885 rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, 1886 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK)); 1887 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), 1888 B_DPD_SEL, 0x01); 1889 1890 _dpk_kip_control_rfc(rtwdev, path, true); 1891 _dpk_one_shot(rtwdev, phy, path, D_KIP_PRESET); 1892 } 1893 1894 static void _dpk_kip_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1895 enum rtw89_rf_path path) 1896 { 1897 _dpk_one_shot(rtwdev, phy, path, D_KIP_RESTORE); 1898 _dpk_kip_control_rfc(rtwdev, path, false); 1899 _dpk_txpwr_bb_force(rtwdev, path, false); 1900 1901 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path); 1902 } 1903 1904 static void _dpk_kset_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 1905 { 1906 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1907 1908 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0x10); 1909 1910 dpk->cur_k_set = 1911 rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_KSET) - 1; 1912 } 1913 1914 static void _dpk_para_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) 1915 { 1916 static const u32 reg[RTW89_DPK_BKUP_NUM][DPK_KSET_NUM] = { 1917 {0x8190, 0x8194, 0x8198, 0x81a4}, 1918 {0x81a8, 0x81c4, 0x81c8, 0x81e8} 1919 }; 1920 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1921 u8 cur_k_set = dpk->cur_k_set; 1922 u32 para; 1923 1924 if (cur_k_set >= DPK_KSET_NUM) { 1925 rtw89_warn(rtwdev, "DPK cur_k_set = %d\n", cur_k_set); 1926 cur_k_set = 2; 1927 } 1928 1929 para = rtw89_phy_read32_mask(rtwdev, reg[kidx][cur_k_set] + (path << 8), 1930 MASKDWORD); 1931 1932 dpk->bp[path][kidx].txagc_dpk = (para >> 10) & 0x3f; 1933 dpk->bp[path][kidx].ther_dpk = (para >> 26) & 0x3f; 1934 1935 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1936 "[DPK] thermal/ txagc_RF (K%d) = 0x%x/ 0x%x\n", 1937 dpk->cur_k_set, dpk->bp[path][kidx].ther_dpk, 1938 dpk->bp[path][kidx].txagc_dpk); 1939 } 1940 1941 static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) 1942 { 1943 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1944 u8 corr_val, corr_idx, rxbb; 1945 u16 dc_i, dc_q; 1946 u8 rxbb_ov; 1947 1948 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0); 1949 1950 corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI); 1951 corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV); 1952 dpk->corr_idx[path][kidx] = corr_idx; 1953 dpk->corr_val[path][kidx] = corr_val; 1954 1955 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9); 1956 1957 dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); 1958 dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ); 1959 1960 dc_i = abs(sign_extend32(dc_i, 11)); 1961 dc_q = abs(sign_extend32(dc_q, 11)); 1962 1963 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1964 "[DPK] S%d Corr_idx/ Corr_val /DC I/Q, = %d / %d / %d / %d\n", 1965 path, corr_idx, corr_val, dc_i, dc_q); 1966 1967 dpk->dc_i[path][kidx] = dc_i; 1968 dpk->dc_q[path][kidx] = dc_q; 1969 1970 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x8); 1971 rxbb = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB); 1972 1973 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x31); 1974 rxbb_ov = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXOV); 1975 1976 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1977 "[DPK] S%d RXBB/ RXAGC_done /RXBB_ovlmt = %d / %d / %d\n", 1978 path, rxbb, 1979 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DONE), 1980 rxbb_ov); 1981 1982 if (dc_i > 200 || dc_q > 200 || corr_val < 170) 1983 return true; 1984 else 1985 return false; 1986 } 1987 1988 static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1989 enum rtw89_rf_path path, u8 dbm, 1990 bool set_from_bb) 1991 { 1992 if (set_from_bb) { 1993 dbm = clamp_t(u8, dbm, 7, 24); 1994 1995 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1996 "[DPK] set S%d txagc to %ddBm\n", path, dbm); 1997 rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), 1998 B_TXPWRB_VAL, dbm << 2); 1999 } 2000 2001 _dpk_one_shot(rtwdev, phy, path, D_TXAGC); 2002 _dpk_kset_query(rtwdev, path); 2003 } 2004 2005 static bool _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2006 enum rtw89_rf_path path, u8 kidx) 2007 { 2008 _dpk_kip_control_rfc(rtwdev, path, false); 2009 rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, 2010 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK)); 2011 _dpk_kip_control_rfc(rtwdev, path, true); 2012 2013 _dpk_one_shot(rtwdev, phy, path, D_RXAGC); 2014 return _dpk_sync_check(rtwdev, path, kidx); 2015 } 2016 2017 static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2018 enum rtw89_rf_path path) 2019 { 2020 u32 rf_11, reg_81cc; 2021 u8 cur_rxbb; 2022 2023 rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1); 2024 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1); 2025 2026 _dpk_kip_control_rfc(rtwdev, path, false); 2027 2028 cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_RXB); 2029 rf_11 = rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK); 2030 reg_81cc = rtw89_phy_read32_mask(rtwdev, R_KIP_IQP + (path << 8), 2031 B_KIP_IQP_SW); 2032 2033 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); 2034 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x3); 2035 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0xd); 2036 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RXB, 0x1f); 2037 2038 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x12); 2039 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, 0x3); 2040 2041 _dpk_kip_control_rfc(rtwdev, path, true); 2042 2043 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, MASKDWORD, 0x00250025); 2044 2045 _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK); 2046 2047 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path, 2048 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD)); 2049 2050 _dpk_kip_control_rfc(rtwdev, path, false); 2051 2052 rtw89_write_rf(rtwdev, path, RR_TXIG, RFREG_MASK, rf_11); 2053 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RXB, cur_rxbb); 2054 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, reg_81cc); 2055 2056 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0); 2057 rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0); 2058 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1); 2059 2060 _dpk_kip_control_rfc(rtwdev, path, true); 2061 } 2062 2063 static void _dpk_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) 2064 { 2065 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2066 2067 if (dpk->bp[path][kidx].band == RTW89_BAND_2G) { 2068 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50521); 2069 rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK); 2070 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x0); 2071 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x7); 2072 } else { 2073 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 2074 0x50521 | BIT(rtwdev->dbcc_en)); 2075 rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK); 2076 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RAA2_SATT, 0x3); 2077 } 2078 2079 rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1); 2080 rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1); 2081 rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0); 2082 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_EBW, 0x0); 2083 } 2084 2085 static void _dpk_bypass_rxiqc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 2086 { 2087 rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1); 2088 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 0x40000002); 2089 2090 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Bypass RXIQC\n"); 2091 } 2092 2093 static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev) 2094 { 2095 u16 dgain; 2096 2097 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0); 2098 dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); 2099 2100 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x\n", dgain); 2101 2102 return dgain; 2103 } 2104 2105 static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev) 2106 { 2107 u8 result; 2108 2109 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6); 2110 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1); 2111 result = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL); 2112 2113 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp GL = %d\n", result); 2114 2115 return result; 2116 } 2117 2118 static u8 _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2119 enum rtw89_rf_path path, u8 kidx) 2120 { 2121 _dpk_one_shot(rtwdev, phy, path, D_GAIN_LOSS); 2122 _dpk_kip_set_txagc(rtwdev, phy, path, 0xff, false); 2123 2124 rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A1, 0xf078); 2125 rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A0, 0x0); 2126 2127 return _dpk_gainloss_read(rtwdev); 2128 } 2129 2130 static u8 _dpk_pas_read(struct rtw89_dev *rtwdev, u8 is_check) 2131 { 2132 u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0; 2133 u32 val1_sqrt_sum, val2_sqrt_sum; 2134 u8 i; 2135 2136 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06); 2137 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0); 2138 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08); 2139 2140 if (is_check) { 2141 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00); 2142 val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); 2143 val1_i = abs(sign_extend32(val1_i, 11)); 2144 val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); 2145 val1_q = abs(sign_extend32(val1_q, 11)); 2146 2147 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f); 2148 val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); 2149 val2_i = abs(sign_extend32(val2_i, 11)); 2150 val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); 2151 val2_q = abs(sign_extend32(val2_q, 11)); 2152 2153 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n", 2154 phy_div(val1_i * val1_i + val1_q * val1_q, 2155 val2_i * val2_i + val2_q * val2_q)); 2156 } else { 2157 for (i = 0; i < 32; i++) { 2158 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i); 2159 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2160 "[DPK] PAS_Read[%02d]= 0x%08x\n", i, 2161 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD)); 2162 } 2163 } 2164 2165 val1_sqrt_sum = val1_i * val1_i + val1_q * val1_q; 2166 val2_sqrt_sum = val2_i * val2_i + val2_q * val2_q; 2167 2168 if (val1_sqrt_sum < val2_sqrt_sum) 2169 return 2; 2170 else if (val1_sqrt_sum >= val2_sqrt_sum * 8 / 5) 2171 return 1; 2172 else 2173 return 0; 2174 } 2175 2176 static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2177 enum rtw89_rf_path path, u8 kidx, u8 init_xdbm, u8 loss_only) 2178 { 2179 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2180 u8 tmp_dbm = init_xdbm, tmp_gl_idx = 0; 2181 u8 step = DPK_AGC_STEP_SYNC_DGAIN; 2182 u8 goout = 0, agc_cnt = 0; 2183 bool is_fail = false; 2184 int limit = 200; 2185 u8 tmp_rxbb; 2186 u16 dgain; 2187 2188 do { 2189 switch (step) { 2190 case DPK_AGC_STEP_SYNC_DGAIN: 2191 is_fail = _dpk_kip_set_rxagc(rtwdev, phy, path, kidx); 2192 2193 if (is_fail) { 2194 goout = 1; 2195 break; 2196 } 2197 2198 dgain = _dpk_dgain_read(rtwdev); 2199 2200 if (dgain > 0x5fc || dgain < 0x556) { 2201 _dpk_one_shot(rtwdev, phy, path, D_SYNC); 2202 dgain = _dpk_dgain_read(rtwdev); 2203 } 2204 2205 if (agc_cnt == 0) { 2206 if (dpk->bp[path][kidx].band == RTW89_BAND_2G) 2207 _dpk_bypass_rxiqc(rtwdev, path); 2208 else 2209 _dpk_lbk_rxiqk(rtwdev, phy, path); 2210 } 2211 step = DPK_AGC_STEP_GAIN_LOSS_IDX; 2212 break; 2213 2214 case DPK_AGC_STEP_GAIN_LOSS_IDX: 2215 tmp_gl_idx = _dpk_gainloss(rtwdev, phy, path, kidx); 2216 2217 if (_dpk_pas_read(rtwdev, true) == 2 && tmp_gl_idx > 0) 2218 step = DPK_AGC_STEP_GL_LT_CRITERION; 2219 else if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true) == 1) || 2220 tmp_gl_idx >= 7) 2221 step = DPK_AGC_STEP_GL_GT_CRITERION; 2222 else if (tmp_gl_idx == 0) 2223 step = DPK_AGC_STEP_GL_LT_CRITERION; 2224 else 2225 step = DPK_AGC_STEP_SET_TX_GAIN; 2226 break; 2227 2228 case DPK_AGC_STEP_GL_GT_CRITERION: 2229 if (tmp_dbm <= 7) { 2230 goout = 1; 2231 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2232 "[DPK] Txagc@lower bound!!\n"); 2233 } else { 2234 tmp_dbm = max_t(u8, tmp_dbm - 3, 7); 2235 _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true); 2236 } 2237 step = DPK_AGC_STEP_SYNC_DGAIN; 2238 agc_cnt++; 2239 break; 2240 2241 case DPK_AGC_STEP_GL_LT_CRITERION: 2242 if (tmp_dbm >= 24) { 2243 goout = 1; 2244 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2245 "[DPK] Txagc@upper bound!!\n"); 2246 } else { 2247 tmp_dbm = min_t(u8, tmp_dbm + 2, 24); 2248 _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true); 2249 } 2250 step = DPK_AGC_STEP_SYNC_DGAIN; 2251 agc_cnt++; 2252 break; 2253 2254 case DPK_AGC_STEP_SET_TX_GAIN: 2255 _dpk_kip_control_rfc(rtwdev, path, false); 2256 tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_RXB); 2257 tmp_rxbb = min_t(u8, tmp_rxbb + tmp_gl_idx, 0x1f); 2258 2259 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RXB, tmp_rxbb); 2260 2261 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2262 "[DPK] Adjust RXBB (%+d) = 0x%x\n", 2263 tmp_gl_idx, tmp_rxbb); 2264 _dpk_kip_control_rfc(rtwdev, path, true); 2265 goout = 1; 2266 break; 2267 default: 2268 goout = 1; 2269 break; 2270 } 2271 } while (!goout && agc_cnt < 6 && limit-- > 0); 2272 2273 return is_fail; 2274 } 2275 2276 static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order) 2277 { 2278 switch (order) { 2279 case 0: /* (5,3,1) */ 2280 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, 0x0); 2281 rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL_SEL, 0x2); 2282 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x4); 2283 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_DMAN, 0x1); 2284 break; 2285 case 1: /* (5,3,0) */ 2286 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, 0x1); 2287 rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL_SEL, 0x1); 2288 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x0); 2289 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_DMAN, 0x0); 2290 break; 2291 case 2: /* (5,0,0) */ 2292 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, 0x2); 2293 rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL_SEL, 0x0); 2294 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x0); 2295 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_DMAN, 0x0); 2296 break; 2297 case 3: /* (7,3,1) */ 2298 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, 0x3); 2299 rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL_SEL, 0x3); 2300 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x4); 2301 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_DMAN, 0x1); 2302 break; 2303 default: 2304 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2305 "[DPK] Wrong MDPD order!!(0x%x)\n", order); 2306 break; 2307 } 2308 2309 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set %s for IDL\n", 2310 order == 0x0 ? "(5,3,1)" : 2311 order == 0x1 ? "(5,3,0)" : 2312 order == 0x2 ? "(5,0,0)" : "(7,3,1)"); 2313 } 2314 2315 static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2316 enum rtw89_rf_path path, u8 kidx) 2317 { 2318 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_MA, 0x1); 2319 2320 if (rtw89_phy_read32_mask(rtwdev, R_IDL_MPA, B_IDL_MD500) == 0x1) 2321 _dpk_set_mdpd_para(rtwdev, 0x2); 2322 else if (rtw89_phy_read32_mask(rtwdev, R_IDL_MPA, B_IDL_MD530) == 0x1) 2323 _dpk_set_mdpd_para(rtwdev, 0x1); 2324 else 2325 _dpk_set_mdpd_para(rtwdev, 0x0); 2326 2327 rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL, 0x0); 2328 fsleep(1000); 2329 2330 _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL); 2331 } 2332 2333 static u8 _dpk_order_convert(struct rtw89_dev *rtwdev) 2334 { 2335 u32 order; 2336 u8 val; 2337 2338 order = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP); 2339 2340 switch (order) { 2341 case 0: /* (5,3,1) */ 2342 val = 0x6; 2343 break; 2344 case 1: /* (5,3,0) */ 2345 val = 0x2; 2346 break; 2347 case 2: /* (5,0,0) */ 2348 val = 0x0; 2349 break; 2350 default: 2351 val = 0xff; 2352 break; 2353 } 2354 2355 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val); 2356 2357 return val; 2358 } 2359 2360 static void _dpk_gain_normalize(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2361 enum rtw89_rf_path path, u8 kidx, bool is_execute) 2362 { 2363 static const u32 reg[RTW89_DPK_BKUP_NUM][DPK_KSET_NUM] = { 2364 {0x8190, 0x8194, 0x8198, 0x81a4}, 2365 {0x81a8, 0x81c4, 0x81c8, 0x81e8} 2366 }; 2367 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2368 u8 cur_k_set = dpk->cur_k_set; 2369 2370 if (cur_k_set >= DPK_KSET_NUM) { 2371 rtw89_warn(rtwdev, "DPK cur_k_set = %d\n", cur_k_set); 2372 cur_k_set = 2; 2373 } 2374 2375 if (is_execute) { 2376 rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), 2377 B_DPK_GN_AG, 0x200); 2378 rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), 2379 B_DPK_GN_EN, 0x3); 2380 2381 _dpk_one_shot(rtwdev, phy, path, D_GAIN_NORM); 2382 } else { 2383 rtw89_phy_write32_mask(rtwdev, reg[kidx][cur_k_set] + (path << 8), 2384 0x0000007F, 0x5b); 2385 } 2386 2387 dpk->bp[path][kidx].gs = 2388 rtw89_phy_read32_mask(rtwdev, reg[kidx][cur_k_set] + (path << 8), 2389 0x0000007F); 2390 } 2391 2392 static void _dpk_on(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2393 enum rtw89_rf_path path, u8 kidx) 2394 { 2395 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2396 2397 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1); 2398 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0); 2399 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), 2400 B_DPD_ORDER, _dpk_order_convert(rtwdev)); 2401 2402 dpk->bp[path][kidx].path_ok = 2403 dpk->bp[path][kidx].path_ok | BIT(dpk->cur_k_set); 2404 2405 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] path_ok = 0x%x\n", 2406 path, kidx, dpk->bp[path][kidx].path_ok); 2407 2408 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), 2409 B_DPD_MEN, dpk->bp[path][kidx].path_ok); 2410 2411 _dpk_gain_normalize(rtwdev, phy, path, kidx, false); 2412 } 2413 2414 static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2415 enum rtw89_rf_path path) 2416 { 2417 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2418 u8 kidx = dpk->cur_idx[path]; 2419 u8 init_xdbm = 17; 2420 bool is_fail; 2421 2422 if (dpk->bp[path][kidx].band != RTW89_BAND_2G) 2423 init_xdbm = 15; 2424 2425 _dpk_kip_control_rfc(rtwdev, path, false); 2426 _rfk_rf_direct_cntrl(rtwdev, path, false); 2427 rtw89_write_rf(rtwdev, path, RR_BBDC, RFREG_MASK, 0x03ffd); 2428 2429 _dpk_rf_setting(rtwdev, path, kidx); 2430 _set_rx_dck(rtwdev, path, RF_DPK); 2431 2432 _dpk_kip_pwr_clk_onoff(rtwdev, true); 2433 _dpk_kip_preset(rtwdev, phy, path, kidx); 2434 _dpk_txpwr_bb_force(rtwdev, path, true); 2435 _dpk_kip_set_txagc(rtwdev, phy, path, init_xdbm, true); 2436 _dpk_tpg_sel(rtwdev, path, kidx); 2437 is_fail = _dpk_agc(rtwdev, phy, path, kidx, init_xdbm, false); 2438 if (is_fail) 2439 goto _error; 2440 2441 _dpk_idl_mpa(rtwdev, phy, path, kidx); 2442 _dpk_para_query(rtwdev, path, kidx); 2443 2444 _dpk_on(rtwdev, phy, path, kidx); 2445 _error: 2446 _dpk_kip_control_rfc(rtwdev, path, false); 2447 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RF_RX); 2448 2449 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d]_K%d %s\n", path, kidx, 2450 dpk->cur_k_set, is_fail ? "need Check" : "is Success"); 2451 2452 return is_fail; 2453 } 2454 2455 static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, 2456 enum rtw89_phy_idx phy, u8 kpath, 2457 enum rtw89_chanctx_idx chanctx_idx) 2458 { 2459 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2460 u32 kip_bkup[RF_PATH_NUM_8851B][DPK_KIP_REG_NUM_8851B] = {}; 2461 u32 rf_bkup[RF_PATH_NUM_8851B][DPK_RF_REG_NUM_8851B] = {}; 2462 bool is_fail; 2463 u8 path; 2464 2465 for (path = 0; path < RF_PATH_NUM_8851B; path++) 2466 dpk->cur_idx[path] = 0; 2467 2468 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 2469 if (!(kpath & BIT(path))) 2470 continue; 2471 _dpk_bkup_kip(rtwdev, dpk_kip_reg, kip_bkup, path); 2472 _dpk_bkup_rf(rtwdev, dpk_rf_reg, rf_bkup, path); 2473 _dpk_information(rtwdev, phy, path, chanctx_idx); 2474 _dpk_init(rtwdev, path); 2475 2476 if (rtwdev->is_tssi_mode[path]) 2477 _dpk_tssi_pause(rtwdev, path, true); 2478 } 2479 2480 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 2481 if (!(kpath & BIT(path))) 2482 continue; 2483 2484 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2485 "[DPK] ========= S%d[%d] DPK Start =========\n", 2486 path, dpk->cur_idx[path]); 2487 2488 _dpk_rxagc_onoff(rtwdev, path, false); 2489 _rfk_drf_direct_cntrl(rtwdev, path, false); 2490 _dpk_bb_afe_setting(rtwdev, path); 2491 2492 is_fail = _dpk_main(rtwdev, phy, path); 2493 _dpk_onoff(rtwdev, path, is_fail); 2494 } 2495 2496 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 2497 if (!(kpath & BIT(path))) 2498 continue; 2499 2500 _dpk_kip_restore(rtwdev, phy, path); 2501 _dpk_reload_kip(rtwdev, dpk_kip_reg, kip_bkup, path); 2502 _dpk_reload_rf(rtwdev, dpk_rf_reg, rf_bkup, path); 2503 _dpk_bb_afe_restore(rtwdev, path); 2504 _dpk_rxagc_onoff(rtwdev, path, true); 2505 2506 if (rtwdev->is_tssi_mode[path]) 2507 _dpk_tssi_pause(rtwdev, path, false); 2508 } 2509 2510 _dpk_kip_pwr_clk_onoff(rtwdev, false); 2511 } 2512 2513 static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force, 2514 enum rtw89_chanctx_idx chanctx_idx) 2515 { 2516 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2517 "[DPK] ****** 8851B DPK Start (Ver: 0x%x, Cv: %d) ******\n", 2518 DPK_VER_8851B, rtwdev->hal.cv); 2519 2520 _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy), chanctx_idx); 2521 } 2522 2523 static void _dpk_track(struct rtw89_dev *rtwdev) 2524 { 2525 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2526 s8 txagc_bb, txagc_bb_tp, txagc_ofst; 2527 s16 pwsf_tssi_ofst; 2528 s8 delta_ther = 0; 2529 u8 path, kidx; 2530 u8 txagc_rf; 2531 u8 cur_ther; 2532 2533 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 2534 kidx = dpk->cur_idx[path]; 2535 2536 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2537 "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n", 2538 path, kidx, dpk->bp[path][kidx].ch); 2539 2540 txagc_rf = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), 2541 B_TXAGC_RF); 2542 txagc_bb = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), 2543 MASKBYTE2); 2544 txagc_bb_tp = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BTP + (path << 13), 2545 B_TXAGC_BTP); 2546 2547 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), 2548 B_KIP_RPT_SEL, 0xf); 2549 cur_ther = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), 2550 B_RPT_PER_TH); 2551 txagc_ofst = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), 2552 B_RPT_PER_OF); 2553 pwsf_tssi_ofst = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), 2554 B_RPT_PER_TSSI); 2555 pwsf_tssi_ofst = sign_extend32(pwsf_tssi_ofst, 12); 2556 2557 delta_ther = cur_ther - dpk->bp[path][kidx].ther_dpk; 2558 2559 delta_ther = delta_ther * 2 / 3; 2560 2561 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2562 "[DPK_TRK] extra delta_ther = %d (0x%x / 0x%x@k)\n", 2563 delta_ther, cur_ther, dpk->bp[path][kidx].ther_dpk); 2564 2565 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2566 "[DPK_TRK] delta_txagc = %d (0x%x / 0x%x@k)\n", 2567 txagc_rf - dpk->bp[path][kidx].txagc_dpk, 2568 txagc_rf, dpk->bp[path][kidx].txagc_dpk); 2569 2570 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2571 "[DPK_TRK] txagc_offset / pwsf_tssi_ofst = 0x%x / %+d\n", 2572 txagc_ofst, pwsf_tssi_ofst); 2573 2574 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2575 "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n", 2576 txagc_bb_tp, txagc_bb); 2577 2578 if (rtw89_phy_read32_mask(rtwdev, R_IDL_MPA, B_IDL_DN) == 0x0 && 2579 txagc_rf != 0) { 2580 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2581 "[DPK_TRK] New pwsf = 0x%x\n", 0x78 - delta_ther); 2582 2583 rtw89_phy_write32_mask(rtwdev, 2584 R_DPD_BND + (path << 8) + (kidx << 2), 2585 0x07FC0000, 0x78 - delta_ther); 2586 } 2587 } 2588 } 2589 2590 static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 2591 { 2592 u32 rf_reg5; 2593 u32 rck_val; 2594 u32 val; 2595 int ret; 2596 2597 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path); 2598 2599 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); 2600 2601 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); 2602 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); 2603 2604 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%05x\n", 2605 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK)); 2606 2607 /* RCK trigger */ 2608 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240); 2609 2610 ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30, 2611 false, rtwdev, path, RR_RCKS, BIT(3)); 2612 2613 rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA); 2614 2615 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] rck_val = 0x%x, ret = %d\n", 2616 rck_val, ret); 2617 2618 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val); 2619 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); 2620 2621 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF 0x1b = 0x%x\n", 2622 rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK)); 2623 } 2624 2625 static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2626 enum rtw89_rf_path path, const struct rtw89_chan *chan) 2627 { 2628 enum rtw89_band band = chan->band_type; 2629 2630 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_sys_defs_tbl); 2631 2632 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, 2633 &rtw8851b_tssi_sys_a_defs_2g_tbl, 2634 &rtw8851b_tssi_sys_a_defs_5g_tbl); 2635 } 2636 2637 static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, 2638 enum rtw89_phy_idx phy, 2639 enum rtw89_rf_path path) 2640 { 2641 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_init_txpwr_defs_a_tbl); 2642 } 2643 2644 static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev, 2645 enum rtw89_phy_idx phy, 2646 enum rtw89_rf_path path) 2647 { 2648 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_init_txpwr_he_tb_defs_a_tbl); 2649 } 2650 2651 static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2652 enum rtw89_rf_path path) 2653 { 2654 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_dck_defs_a_tbl); 2655 } 2656 2657 static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2658 enum rtw89_rf_path path, const struct rtw89_chan *chan) 2659 { 2660 #define RTW8851B_TSSI_GET_VAL(ptr, idx) \ 2661 ({ \ 2662 s8 *__ptr = (ptr); \ 2663 u8 __idx = (idx), __i, __v; \ 2664 u32 __val = 0; \ 2665 for (__i = 0; __i < 4; __i++) { \ 2666 __v = (__ptr[__idx + __i]); \ 2667 __val |= (__v << (8 * __i)); \ 2668 } \ 2669 __val; \ 2670 }) 2671 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 2672 u8 ch = chan->channel; 2673 u8 subband = chan->subband_type; 2674 const s8 *thm_up_a = NULL; 2675 const s8 *thm_down_a = NULL; 2676 u8 thermal = 0xff; 2677 s8 thm_ofst[64] = {0}; 2678 u32 tmp = 0; 2679 u8 i, j; 2680 2681 switch (subband) { 2682 default: 2683 case RTW89_CH_2G: 2684 thm_up_a = rtw89_8851b_trk_cfg.delta_swingidx_2ga_p; 2685 thm_down_a = rtw89_8851b_trk_cfg.delta_swingidx_2ga_n; 2686 break; 2687 case RTW89_CH_5G_BAND_1: 2688 thm_up_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_p[0]; 2689 thm_down_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_n[0]; 2690 break; 2691 case RTW89_CH_5G_BAND_3: 2692 thm_up_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_p[1]; 2693 thm_down_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_n[1]; 2694 break; 2695 case RTW89_CH_5G_BAND_4: 2696 thm_up_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_p[2]; 2697 thm_down_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_n[2]; 2698 break; 2699 } 2700 2701 if (path == RF_PATH_A) { 2702 thermal = tssi_info->thermal[RF_PATH_A]; 2703 2704 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2705 "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal); 2706 2707 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0); 2708 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1); 2709 2710 if (thermal == 0xff) { 2711 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32); 2712 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32); 2713 2714 for (i = 0; i < 64; i += 4) { 2715 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0); 2716 2717 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2718 "[TSSI] write 0x%x val=0x%08x\n", 2719 R_P0_TSSI_BASE + i, 0x0); 2720 } 2721 2722 } else { 2723 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 2724 thermal); 2725 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 2726 thermal); 2727 2728 i = 0; 2729 for (j = 0; j < 32; j++) 2730 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 2731 -thm_down_a[i++] : 2732 -thm_down_a[DELTA_SWINGIDX_SIZE - 1]; 2733 2734 i = 1; 2735 for (j = 63; j >= 32; j--) 2736 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 2737 thm_up_a[i++] : 2738 thm_up_a[DELTA_SWINGIDX_SIZE - 1]; 2739 2740 for (i = 0; i < 64; i += 4) { 2741 tmp = RTW8851B_TSSI_GET_VAL(thm_ofst, i); 2742 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp); 2743 2744 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2745 "[TSSI] write 0x%x val=0x%08x\n", 2746 0x5c00 + i, tmp); 2747 } 2748 } 2749 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1); 2750 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0); 2751 } 2752 #undef RTW8851B_TSSI_GET_VAL 2753 } 2754 2755 static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2756 enum rtw89_rf_path path) 2757 { 2758 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_dac_gain_defs_a_tbl); 2759 } 2760 2761 static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2762 enum rtw89_rf_path path, const struct rtw89_chan *chan) 2763 { 2764 enum rtw89_band band = chan->band_type; 2765 2766 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, 2767 &rtw8851b_tssi_slope_a_defs_2g_tbl, 2768 &rtw8851b_tssi_slope_a_defs_5g_tbl); 2769 } 2770 2771 static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2772 enum rtw89_rf_path path, bool all, 2773 const struct rtw89_chan *chan) 2774 { 2775 enum rtw89_band band = chan->band_type; 2776 2777 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, 2778 &rtw8851b_tssi_align_a_2g_defs_tbl, 2779 &rtw8851b_tssi_align_a_5g_defs_tbl); 2780 } 2781 2782 static void _tssi_set_tssi_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2783 enum rtw89_rf_path path) 2784 { 2785 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_slope_defs_a_tbl); 2786 } 2787 2788 static void _tssi_set_tssi_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2789 enum rtw89_rf_path path) 2790 { 2791 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_track_defs_a_tbl); 2792 } 2793 2794 static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev, 2795 enum rtw89_phy_idx phy, 2796 enum rtw89_rf_path path) 2797 { 2798 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_mv_avg_defs_a_tbl); 2799 } 2800 2801 static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 2802 { 2803 _tssi_set_tssi_track(rtwdev, phy, RF_PATH_A); 2804 _tssi_set_txagc_offset_mv_avg(rtwdev, phy, RF_PATH_A); 2805 2806 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, 0x0); 2807 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x0); 2808 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x1); 2809 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXGA_V1, RR_TXGA_V1_TRK_EN, 0x1); 2810 2811 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); 2812 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_RFC, 0x3); 2813 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 0xc0); 2814 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); 2815 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1); 2816 2817 rtwdev->is_tssi_mode[RF_PATH_A] = true; 2818 } 2819 2820 static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 2821 { 2822 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x0); 2823 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); 2824 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1); 2825 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); 2826 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, 0x1); 2827 2828 rtwdev->is_tssi_mode[RF_PATH_A] = false; 2829 } 2830 2831 static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch) 2832 { 2833 switch (ch) { 2834 case 1 ... 2: 2835 return 0; 2836 case 3 ... 5: 2837 return 1; 2838 case 6 ... 8: 2839 return 2; 2840 case 9 ... 11: 2841 return 3; 2842 case 12 ... 13: 2843 return 4; 2844 case 14: 2845 return 5; 2846 } 2847 2848 return 0; 2849 } 2850 2851 #define TSSI_EXTRA_GROUP_BIT (BIT(31)) 2852 #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx)) 2853 #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT) 2854 #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT) 2855 #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1) 2856 2857 static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch) 2858 { 2859 switch (ch) { 2860 case 1 ... 2: 2861 return 0; 2862 case 3 ... 5: 2863 return 1; 2864 case 6 ... 8: 2865 return 2; 2866 case 9 ... 11: 2867 return 3; 2868 case 12 ... 14: 2869 return 4; 2870 case 36 ... 40: 2871 return 5; 2872 case 41 ... 43: 2873 return TSSI_EXTRA_GROUP(5); 2874 case 44 ... 48: 2875 return 6; 2876 case 49 ... 51: 2877 return TSSI_EXTRA_GROUP(6); 2878 case 52 ... 56: 2879 return 7; 2880 case 57 ... 59: 2881 return TSSI_EXTRA_GROUP(7); 2882 case 60 ... 64: 2883 return 8; 2884 case 100 ... 104: 2885 return 9; 2886 case 105 ... 107: 2887 return TSSI_EXTRA_GROUP(9); 2888 case 108 ... 112: 2889 return 10; 2890 case 113 ... 115: 2891 return TSSI_EXTRA_GROUP(10); 2892 case 116 ... 120: 2893 return 11; 2894 case 121 ... 123: 2895 return TSSI_EXTRA_GROUP(11); 2896 case 124 ... 128: 2897 return 12; 2898 case 129 ... 131: 2899 return TSSI_EXTRA_GROUP(12); 2900 case 132 ... 136: 2901 return 13; 2902 case 137 ... 139: 2903 return TSSI_EXTRA_GROUP(13); 2904 case 140 ... 144: 2905 return 14; 2906 case 149 ... 153: 2907 return 15; 2908 case 154 ... 156: 2909 return TSSI_EXTRA_GROUP(15); 2910 case 157 ... 161: 2911 return 16; 2912 case 162 ... 164: 2913 return TSSI_EXTRA_GROUP(16); 2914 case 165 ... 169: 2915 return 17; 2916 case 170 ... 172: 2917 return TSSI_EXTRA_GROUP(17); 2918 case 173 ... 177: 2919 return 18; 2920 } 2921 2922 return 0; 2923 } 2924 2925 static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch) 2926 { 2927 switch (ch) { 2928 case 1 ... 8: 2929 return 0; 2930 case 9 ... 14: 2931 return 1; 2932 case 36 ... 48: 2933 return 2; 2934 case 52 ... 64: 2935 return 3; 2936 case 100 ... 112: 2937 return 4; 2938 case 116 ... 128: 2939 return 5; 2940 case 132 ... 144: 2941 return 6; 2942 case 149 ... 177: 2943 return 7; 2944 } 2945 2946 return 0; 2947 } 2948 2949 static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2950 enum rtw89_rf_path path, const struct rtw89_chan *chan) 2951 { 2952 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 2953 u32 gidx, gidx_1st, gidx_2nd; 2954 u8 ch = chan->channel; 2955 s8 de_1st; 2956 s8 de_2nd; 2957 s8 val; 2958 2959 gidx = _tssi_get_ofdm_group(rtwdev, ch); 2960 2961 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2962 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", path, gidx); 2963 2964 if (IS_TSSI_EXTRA_GROUP(gidx)) { 2965 gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx); 2966 gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx); 2967 de_1st = tssi_info->tssi_mcs[path][gidx_1st]; 2968 de_2nd = tssi_info->tssi_mcs[path][gidx_2nd]; 2969 val = (de_1st + de_2nd) / 2; 2970 2971 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2972 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", 2973 path, val, de_1st, de_2nd); 2974 } else { 2975 val = tssi_info->tssi_mcs[path][gidx]; 2976 2977 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2978 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); 2979 } 2980 2981 return val; 2982 } 2983 2984 static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2985 enum rtw89_rf_path path, const struct rtw89_chan *chan) 2986 { 2987 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 2988 u32 tgidx, tgidx_1st, tgidx_2nd; 2989 u8 ch = chan->channel; 2990 s8 tde_1st; 2991 s8 tde_2nd; 2992 s8 val; 2993 2994 tgidx = _tssi_get_trim_group(rtwdev, ch); 2995 2996 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2997 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", 2998 path, tgidx); 2999 3000 if (IS_TSSI_EXTRA_GROUP(tgidx)) { 3001 tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx); 3002 tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx); 3003 tde_1st = tssi_info->tssi_trim[path][tgidx_1st]; 3004 tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd]; 3005 val = (tde_1st + tde_2nd) / 2; 3006 3007 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3008 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", 3009 path, val, tde_1st, tde_2nd); 3010 } else { 3011 val = tssi_info->tssi_trim[path][tgidx]; 3012 3013 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3014 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", 3015 path, val); 3016 } 3017 3018 return val; 3019 } 3020 3021 static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3022 const struct rtw89_chan *chan) 3023 { 3024 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3025 u8 ch = chan->channel; 3026 u8 gidx; 3027 s8 ofdm_de; 3028 s8 trim_de; 3029 s32 val; 3030 u32 i; 3031 3032 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n", 3033 phy, ch); 3034 3035 for (i = RF_PATH_A; i < RTW8851B_TSSI_PATH_NR; i++) { 3036 gidx = _tssi_get_cck_group(rtwdev, ch); 3037 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan); 3038 val = tssi_info->tssi_cck[i][gidx] + trim_de; 3039 3040 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3041 "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n", 3042 i, gidx, tssi_info->tssi_cck[i][gidx], trim_de); 3043 3044 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val); 3045 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val); 3046 3047 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3048 "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n", 3049 _tssi_de_cck_long[i], 3050 rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i], 3051 _TSSI_DE_MASK)); 3052 3053 ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan); 3054 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan); 3055 val = ofdm_de + trim_de; 3056 3057 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3058 "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n", 3059 i, ofdm_de, trim_de); 3060 3061 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val); 3062 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val); 3063 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val); 3064 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, val); 3065 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val); 3066 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val); 3067 3068 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3069 "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n", 3070 _tssi_de_mcs_20m[i], 3071 rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i], 3072 _TSSI_DE_MASK)); 3073 } 3074 } 3075 3076 static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 3077 { 3078 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3079 "[TSSI PA K]\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n" 3080 "0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n", 3081 R_TSSI_PA_K1 + (path << 13), 3082 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K1 + (path << 13), MASKDWORD), 3083 R_TSSI_PA_K2 + (path << 13), 3084 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K2 + (path << 13), MASKDWORD), 3085 R_P0_TSSI_ALIM1 + (path << 13), 3086 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD), 3087 R_P0_TSSI_ALIM3 + (path << 13), 3088 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD), 3089 R_TSSI_PA_K5 + (path << 13), 3090 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K5 + (path << 13), MASKDWORD), 3091 R_P0_TSSI_ALIM2 + (path << 13), 3092 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD), 3093 R_P0_TSSI_ALIM4 + (path << 13), 3094 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD), 3095 R_TSSI_PA_K8 + (path << 13), 3096 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K8 + (path << 13), MASKDWORD)); 3097 } 3098 3099 static void _tssi_alimentk_done(struct rtw89_dev *rtwdev, 3100 enum rtw89_phy_idx phy, enum rtw89_rf_path path, 3101 const struct rtw89_chan *chan) 3102 { 3103 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3104 u8 channel = chan->channel; 3105 u8 band; 3106 3107 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3108 "======>%s phy=%d path=%d\n", __func__, phy, path); 3109 3110 if (channel >= 1 && channel <= 14) 3111 band = TSSI_ALIMK_2G; 3112 else if (channel >= 36 && channel <= 64) 3113 band = TSSI_ALIMK_5GL; 3114 else if (channel >= 100 && channel <= 144) 3115 band = TSSI_ALIMK_5GM; 3116 else if (channel >= 149 && channel <= 177) 3117 band = TSSI_ALIMK_5GH; 3118 else 3119 band = TSSI_ALIMK_2G; 3120 3121 if (tssi_info->alignment_done[path][band]) { 3122 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD, 3123 tssi_info->alignment_value[path][band][0]); 3124 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD, 3125 tssi_info->alignment_value[path][band][1]); 3126 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD, 3127 tssi_info->alignment_value[path][band][2]); 3128 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD, 3129 tssi_info->alignment_value[path][band][3]); 3130 } 3131 3132 _tssi_alimentk_dump_result(rtwdev, path); 3133 } 3134 3135 static void rtw8851b_by_rate_dpd(struct rtw89_dev *rtwdev) 3136 { 3137 rtw89_write32_mask(rtwdev, R_AX_PWR_SWING_OTHER_CTRL0, 3138 B_AX_CFIR_BY_RATE_OFF_MASK, 0x21861); 3139 } 3140 3141 void rtw8851b_dpk_init(struct rtw89_dev *rtwdev) 3142 { 3143 rtw8851b_by_rate_dpd(rtwdev); 3144 } 3145 3146 void rtw8851b_aack(struct rtw89_dev *rtwdev) 3147 { 3148 u32 tmp05, tmpd3, ib[4]; 3149 u32 tmp; 3150 int ret; 3151 int rek; 3152 int i; 3153 3154 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]DO AACK\n"); 3155 3156 tmp05 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK); 3157 tmpd3 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RFREG_MASK); 3158 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_MASK, 0x3); 3159 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x0); 3160 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_ST, 0x0); 3161 3162 for (rek = 0; rek < 4; rek++) { 3163 rtw89_write_rf(rtwdev, RF_PATH_A, RR_AACK, RFREG_MASK, 0x8201e); 3164 rtw89_write_rf(rtwdev, RF_PATH_A, RR_AACK, RFREG_MASK, 0x8201f); 3165 fsleep(100); 3166 3167 ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp, 3168 1, 1000, false, 3169 rtwdev, RF_PATH_A, 0xd0, BIT(16)); 3170 if (ret) 3171 rtw89_warn(rtwdev, "[LCK]AACK timeout\n"); 3172 3173 rtw89_write_rf(rtwdev, RF_PATH_A, RR_VCI, RR_VCI_ON, 0x1); 3174 for (i = 0; i < 4; i++) { 3175 rtw89_write_rf(rtwdev, RF_PATH_A, RR_VCO, RR_VCO_SEL, i); 3176 ib[i] = rtw89_read_rf(rtwdev, RF_PATH_A, RR_IBD, RR_IBD_VAL); 3177 } 3178 rtw89_write_rf(rtwdev, RF_PATH_A, RR_VCI, RR_VCI_ON, 0x0); 3179 3180 if (ib[0] != 0 && ib[1] != 0 && ib[2] != 0 && ib[3] != 0) 3181 break; 3182 } 3183 3184 if (rek != 0) 3185 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]AACK rek = %d\n", rek); 3186 3187 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, tmp05); 3188 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RFREG_MASK, tmpd3); 3189 } 3190 3191 static void _lck_keep_thermal(struct rtw89_dev *rtwdev) 3192 { 3193 struct rtw89_lck_info *lck = &rtwdev->lck; 3194 3195 lck->thermal[RF_PATH_A] = 3196 ewma_thermal_read(&rtwdev->phystat.avg_thermal[RF_PATH_A]); 3197 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 3198 "[LCK] path=%d thermal=0x%x", RF_PATH_A, lck->thermal[RF_PATH_A]); 3199 } 3200 3201 static void rtw8851b_lck(struct rtw89_dev *rtwdev) 3202 { 3203 u32 tmp05, tmp18, tmpd3; 3204 3205 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]DO LCK\n"); 3206 3207 tmp05 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK); 3208 tmp18 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 3209 tmpd3 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RFREG_MASK); 3210 3211 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_MASK, 0x3); 3212 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x0); 3213 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1); 3214 3215 _set_ch(rtwdev, tmp18); 3216 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RFREG_MASK, tmpd3); 3217 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, tmp05); 3218 3219 _lck_keep_thermal(rtwdev); 3220 } 3221 3222 #define RTW8851B_LCK_TH 8 3223 3224 void rtw8851b_lck_track(struct rtw89_dev *rtwdev) 3225 { 3226 struct rtw89_lck_info *lck = &rtwdev->lck; 3227 u8 cur_thermal; 3228 int delta; 3229 3230 cur_thermal = 3231 ewma_thermal_read(&rtwdev->phystat.avg_thermal[RF_PATH_A]); 3232 delta = abs((int)cur_thermal - lck->thermal[RF_PATH_A]); 3233 3234 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 3235 "[LCK] path=%d current thermal=0x%x delta=0x%x\n", 3236 RF_PATH_A, cur_thermal, delta); 3237 3238 if (delta >= RTW8851B_LCK_TH) { 3239 rtw8851b_aack(rtwdev); 3240 rtw8851b_lck(rtwdev); 3241 } 3242 } 3243 3244 void rtw8851b_lck_init(struct rtw89_dev *rtwdev) 3245 { 3246 _lck_keep_thermal(rtwdev); 3247 } 3248 3249 void rtw8851b_rck(struct rtw89_dev *rtwdev) 3250 { 3251 _rck(rtwdev, RF_PATH_A); 3252 } 3253 3254 void rtw8851b_dack(struct rtw89_dev *rtwdev) 3255 { 3256 _dac_cal(rtwdev, false); 3257 } 3258 3259 void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 3260 enum rtw89_chanctx_idx chanctx_idx) 3261 { 3262 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); 3263 u32 tx_en; 3264 3265 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START); 3266 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); 3267 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); 3268 3269 _iqk_init(rtwdev); 3270 _iqk(rtwdev, phy_idx, false, chanctx_idx); 3271 3272 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); 3273 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP); 3274 } 3275 3276 void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 3277 enum rtw89_chanctx_idx chanctx_idx) 3278 { 3279 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); 3280 u32 tx_en; 3281 3282 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START); 3283 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); 3284 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); 3285 3286 _rx_dck(rtwdev, phy_idx, false, chanctx_idx); 3287 3288 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); 3289 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP); 3290 } 3291 3292 void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 3293 enum rtw89_chanctx_idx chanctx_idx) 3294 { 3295 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); 3296 u32 tx_en; 3297 3298 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START); 3299 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); 3300 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); 3301 3302 rtwdev->dpk.is_dpk_enable = true; 3303 rtwdev->dpk.is_dpk_reload_en = false; 3304 _dpk(rtwdev, phy_idx, false, chanctx_idx); 3305 3306 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); 3307 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP); 3308 } 3309 3310 void rtw8851b_dpk_track(struct rtw89_dev *rtwdev) 3311 { 3312 _dpk_track(rtwdev); 3313 } 3314 3315 void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3316 bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx) 3317 { 3318 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); 3319 u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_A, chanctx_idx); 3320 u8 i; 3321 3322 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy); 3323 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START); 3324 3325 _tssi_disable(rtwdev, phy); 3326 3327 for (i = RF_PATH_A; i < RF_PATH_NUM_8851B; i++) { 3328 _tssi_set_sys(rtwdev, phy, i, chan); 3329 _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i); 3330 _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i); 3331 _tssi_set_dck(rtwdev, phy, i); 3332 _tssi_set_tmeter_tbl(rtwdev, phy, i, chan); 3333 _tssi_set_dac_gain_tbl(rtwdev, phy, i); 3334 _tssi_slope_cal_org(rtwdev, phy, i, chan); 3335 _tssi_alignment_default(rtwdev, phy, i, true, chan); 3336 _tssi_set_tssi_slope(rtwdev, phy, i); 3337 } 3338 3339 _tssi_enable(rtwdev, phy); 3340 _tssi_set_efuse_to_de(rtwdev, phy, chan); 3341 3342 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); 3343 } 3344 3345 void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3346 const struct rtw89_chan *chan) 3347 { 3348 u8 channel = chan->channel; 3349 u32 i; 3350 3351 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3352 "======>%s phy=%d channel=%d\n", __func__, phy, channel); 3353 3354 _tssi_disable(rtwdev, phy); 3355 3356 for (i = RF_PATH_A; i < RF_PATH_NUM_8851B; i++) { 3357 _tssi_set_sys(rtwdev, phy, i, chan); 3358 _tssi_set_tmeter_tbl(rtwdev, phy, i, chan); 3359 _tssi_slope_cal_org(rtwdev, phy, i, chan); 3360 _tssi_alignment_default(rtwdev, phy, i, true, chan); 3361 } 3362 3363 _tssi_enable(rtwdev, phy); 3364 _tssi_set_efuse_to_de(rtwdev, phy, chan); 3365 } 3366 3367 static void rtw8851b_tssi_default_txagc(struct rtw89_dev *rtwdev, 3368 enum rtw89_phy_idx phy, bool enable, 3369 enum rtw89_chanctx_idx chanctx_idx) 3370 { 3371 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); 3372 u8 channel = chan->channel; 3373 3374 rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n", 3375 __func__, channel); 3376 3377 if (enable) 3378 return; 3379 3380 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3381 "======>%s 1 SCAN_END Set 0x5818[7:0]=0x%x\n", 3382 __func__, 3383 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT)); 3384 3385 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 0xc0); 3386 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); 3387 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1); 3388 3389 _tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan); 3390 3391 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3392 "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x\n", 3393 __func__, 3394 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT)); 3395 3396 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3397 "======> %s SCAN_END\n", __func__); 3398 } 3399 3400 void rtw8851b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, 3401 enum rtw89_phy_idx phy_idx, 3402 enum rtw89_chanctx_idx chanctx_idx) 3403 { 3404 if (scan_start) 3405 rtw8851b_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx); 3406 else 3407 rtw8851b_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx); 3408 } 3409 3410 static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 3411 enum rtw89_bandwidth bw, bool dav) 3412 { 3413 u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1; 3414 u32 rf_reg18; 3415 3416 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__); 3417 3418 rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK); 3419 if (rf_reg18 == INV_RF_DATA) { 3420 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3421 "[RFK]Invalid RF_0x18 for Path-%d\n", path); 3422 return; 3423 } 3424 rf_reg18 &= ~RR_CFGCH_BW; 3425 3426 switch (bw) { 3427 case RTW89_CHANNEL_WIDTH_5: 3428 case RTW89_CHANNEL_WIDTH_10: 3429 case RTW89_CHANNEL_WIDTH_20: 3430 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M); 3431 break; 3432 case RTW89_CHANNEL_WIDTH_40: 3433 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M); 3434 break; 3435 case RTW89_CHANNEL_WIDTH_80: 3436 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M); 3437 break; 3438 default: 3439 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]Fail to set CH\n"); 3440 } 3441 3442 rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN | 3443 RR_CFGCH_BW2) & RFREG_MASK; 3444 rf_reg18 |= RR_CFGCH_BW2; 3445 rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18); 3446 3447 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set %x at path%d, %x =0x%x\n", 3448 bw, path, reg18_addr, 3449 rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK)); 3450 } 3451 3452 static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3453 enum rtw89_bandwidth bw) 3454 { 3455 _bw_setting(rtwdev, RF_PATH_A, bw, true); 3456 _bw_setting(rtwdev, RF_PATH_A, bw, false); 3457 } 3458 3459 static bool _set_s0_arfc18(struct rtw89_dev *rtwdev, u32 val) 3460 { 3461 u32 bak; 3462 u32 tmp; 3463 int ret; 3464 3465 bak = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK); 3466 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RR_LDO_SEL, 0x1); 3467 rtw89_write_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK, val); 3468 3469 ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp == 0, 1, 1000, 3470 false, rtwdev, RF_PATH_A, RR_LPF, RR_LPF_BUSY); 3471 if (ret) 3472 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]LCK timeout\n"); 3473 3474 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK, bak); 3475 3476 return !!ret; 3477 } 3478 3479 static void _lck_check(struct rtw89_dev *rtwdev) 3480 { 3481 u32 tmp; 3482 3483 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) { 3484 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN MMD reset\n"); 3485 3486 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x1); 3487 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x0); 3488 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x1); 3489 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x0); 3490 } 3491 3492 udelay(10); 3493 3494 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) { 3495 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]re-set RF 0x18\n"); 3496 3497 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1); 3498 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 3499 _set_s0_arfc18(rtwdev, tmp); 3500 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0); 3501 } 3502 3503 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) { 3504 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN off/on\n"); 3505 3506 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK); 3507 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK, tmp); 3508 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK); 3509 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK, tmp); 3510 3511 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x1); 3512 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0); 3513 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3); 3514 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x0); 3515 3516 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1); 3517 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 3518 _set_s0_arfc18(rtwdev, tmp); 3519 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0); 3520 3521 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]0xb2=%x, 0xc5=%x\n", 3522 rtw89_read_rf(rtwdev, RF_PATH_A, RR_VCO, RFREG_MASK), 3523 rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RFREG_MASK)); 3524 } 3525 } 3526 3527 static void _set_ch(struct rtw89_dev *rtwdev, u32 val) 3528 { 3529 bool timeout; 3530 3531 timeout = _set_s0_arfc18(rtwdev, val); 3532 if (!timeout) 3533 _lck_check(rtwdev); 3534 } 3535 3536 static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 3537 u8 central_ch, bool dav) 3538 { 3539 u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1; 3540 bool is_2g_ch = central_ch <= 14; 3541 u32 rf_reg18; 3542 3543 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__); 3544 3545 rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK); 3546 rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | 3547 RR_CFGCH_BCN | RR_CFGCH_BAND0 | RR_CFGCH_CH); 3548 rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch); 3549 3550 if (!is_2g_ch) 3551 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G) | 3552 FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G); 3553 3554 rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN | 3555 RR_CFGCH_BW2) & RFREG_MASK; 3556 rf_reg18 |= RR_CFGCH_BW2; 3557 3558 if (path == RF_PATH_A && dav) 3559 _set_ch(rtwdev, rf_reg18); 3560 else 3561 rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18); 3562 3563 rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 0); 3564 rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 1); 3565 3566 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3567 "[RFK]CH: %d for Path-%d, reg0x%x = 0x%x\n", 3568 central_ch, path, reg18_addr, 3569 rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK)); 3570 } 3571 3572 static void _ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch) 3573 { 3574 _ch_setting(rtwdev, RF_PATH_A, central_ch, true); 3575 _ch_setting(rtwdev, RF_PATH_A, central_ch, false); 3576 } 3577 3578 static void _set_rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw, 3579 enum rtw89_rf_path path) 3580 { 3581 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1); 3582 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0x12); 3583 3584 if (bw == RTW89_CHANNEL_WIDTH_20) 3585 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x1b); 3586 else if (bw == RTW89_CHANNEL_WIDTH_40) 3587 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x13); 3588 else if (bw == RTW89_CHANNEL_WIDTH_80) 3589 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0xb); 3590 else 3591 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x3); 3592 3593 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set S%d RXBB BW 0x3F = 0x%x\n", path, 3594 rtw89_read_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB)); 3595 3596 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0); 3597 } 3598 3599 static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3600 enum rtw89_bandwidth bw) 3601 { 3602 u8 kpath, path; 3603 3604 kpath = _kpath(rtwdev, phy); 3605 3606 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 3607 if (!(kpath & BIT(path))) 3608 continue; 3609 3610 _set_rxbb_bw(rtwdev, bw, path); 3611 } 3612 } 3613 3614 static void rtw8851b_ctrl_bw_ch(struct rtw89_dev *rtwdev, 3615 enum rtw89_phy_idx phy, u8 central_ch, 3616 enum rtw89_band band, enum rtw89_bandwidth bw) 3617 { 3618 _ctrl_ch(rtwdev, central_ch); 3619 _ctrl_bw(rtwdev, phy, bw); 3620 _rxbb_bw(rtwdev, phy, bw); 3621 } 3622 3623 void rtw8851b_set_channel_rf(struct rtw89_dev *rtwdev, 3624 const struct rtw89_chan *chan, 3625 enum rtw89_phy_idx phy_idx) 3626 { 3627 rtw8851b_ctrl_bw_ch(rtwdev, phy_idx, chan->channel, chan->band_type, 3628 chan->band_width); 3629 } 3630