1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2022-2023 Realtek Corporation 3 */ 4 5 #include "coex.h" 6 #include "debug.h" 7 #include "mac.h" 8 #include "phy.h" 9 #include "reg.h" 10 #include "rtw8851b.h" 11 #include "rtw8851b_rfk.h" 12 #include "rtw8851b_rfk_table.h" 13 #include "rtw8851b_table.h" 14 15 #define DPK_VER_8851B 0x11 16 #define DPK_KIP_REG_NUM_8851B 8 17 #define DPK_RF_REG_NUM_8851B 4 18 #define DPK_KSET_NUM 4 19 #define RTW8851B_RXK_GROUP_NR 4 20 #define RTW8851B_RXK_GROUP_IDX_NR 4 21 #define RTW8851B_A_TXK_GROUP_NR 2 22 #define RTW8851B_G_TXK_GROUP_NR 1 23 #define RTW8851B_IQK_VER 0x14 24 #define RTW8851B_IQK_SS 1 25 #define RTW8851B_LOK_GRAM 10 26 #define RTW8851B_TSSI_PATH_NR 1 27 28 #define _TSSI_DE_MASK GENMASK(21, 12) 29 30 enum dpk_id { 31 LBK_RXIQK = 0x06, 32 SYNC = 0x10, 33 MDPK_IDL = 0x11, 34 MDPK_MPA = 0x12, 35 GAIN_LOSS = 0x13, 36 GAIN_CAL = 0x14, 37 DPK_RXAGC = 0x15, 38 KIP_PRESET = 0x16, 39 KIP_RESTORE = 0x17, 40 DPK_TXAGC = 0x19, 41 D_KIP_PRESET = 0x28, 42 D_TXAGC = 0x29, 43 D_RXAGC = 0x2a, 44 D_SYNC = 0x2b, 45 D_GAIN_LOSS = 0x2c, 46 D_MDPK_IDL = 0x2d, 47 D_MDPK_LDL = 0x2e, 48 D_GAIN_NORM = 0x2f, 49 D_KIP_THERMAL = 0x30, 50 D_KIP_RESTORE = 0x31 51 }; 52 53 enum dpk_agc_step { 54 DPK_AGC_STEP_SYNC_DGAIN, 55 DPK_AGC_STEP_GAIN_LOSS_IDX, 56 DPK_AGC_STEP_GL_GT_CRITERION, 57 DPK_AGC_STEP_GL_LT_CRITERION, 58 DPK_AGC_STEP_SET_TX_GAIN, 59 }; 60 61 enum rtw8851b_iqk_type { 62 ID_TXAGC = 0x0, 63 ID_FLOK_COARSE = 0x1, 64 ID_FLOK_FINE = 0x2, 65 ID_TXK = 0x3, 66 ID_RXAGC = 0x4, 67 ID_RXK = 0x5, 68 ID_NBTXK = 0x6, 69 ID_NBRXK = 0x7, 70 ID_FLOK_VBUFFER = 0x8, 71 ID_A_FLOK_COARSE = 0x9, 72 ID_G_FLOK_COARSE = 0xa, 73 ID_A_FLOK_FINE = 0xb, 74 ID_G_FLOK_FINE = 0xc, 75 ID_IQK_RESTORE = 0x10, 76 }; 77 78 enum rf_mode { 79 RF_SHUT_DOWN = 0x0, 80 RF_STANDBY = 0x1, 81 RF_TX = 0x2, 82 RF_RX = 0x3, 83 RF_TXIQK = 0x4, 84 RF_DPK = 0x5, 85 RF_RXK1 = 0x6, 86 RF_RXK2 = 0x7, 87 }; 88 89 enum adc_ck { 90 ADC_NA = 0, 91 ADC_480M = 1, 92 ADC_960M = 2, 93 ADC_1920M = 3, 94 }; 95 96 enum dac_ck { 97 DAC_40M = 0, 98 DAC_80M = 1, 99 DAC_120M = 2, 100 DAC_160M = 3, 101 DAC_240M = 4, 102 DAC_320M = 5, 103 DAC_480M = 6, 104 DAC_960M = 7, 105 }; 106 107 static const u32 _tssi_de_cck_long[RF_PATH_NUM_8851B] = {0x5858}; 108 static const u32 _tssi_de_cck_short[RF_PATH_NUM_8851B] = {0x5860}; 109 static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8851B] = {0x5838}; 110 static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8851B] = {0x5840}; 111 static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8851B] = {0x5848}; 112 static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8851B] = {0x5850}; 113 static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8851B] = {0x5828}; 114 static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8851B] = {0x5830}; 115 static const u32 g_idxrxgain[RTW8851B_RXK_GROUP_NR] = {0x10e, 0x116, 0x28e, 0x296}; 116 static const u32 g_idxattc2[RTW8851B_RXK_GROUP_NR] = {0x0, 0xf, 0x0, 0xf}; 117 static const u32 g_idxrxagc[RTW8851B_RXK_GROUP_NR] = {0x0, 0x1, 0x2, 0x3}; 118 static const u32 a_idxrxgain[RTW8851B_RXK_GROUP_IDX_NR] = {0x10C, 0x112, 0x28c, 0x292}; 119 static const u32 a_idxattc2[RTW8851B_RXK_GROUP_IDX_NR] = {0xf, 0xf, 0xf, 0xf}; 120 static const u32 a_idxrxagc[RTW8851B_RXK_GROUP_IDX_NR] = {0x4, 0x5, 0x6, 0x7}; 121 static const u32 a_power_range[RTW8851B_A_TXK_GROUP_NR] = {0x0, 0x0}; 122 static const u32 a_track_range[RTW8851B_A_TXK_GROUP_NR] = {0x7, 0x7}; 123 static const u32 a_gain_bb[RTW8851B_A_TXK_GROUP_NR] = {0x08, 0x0d}; 124 static const u32 a_itqt[RTW8851B_A_TXK_GROUP_NR] = {0x12, 0x12}; 125 static const u32 a_att_smxr[RTW8851B_A_TXK_GROUP_NR] = {0x0, 0x2}; 126 static const u32 g_power_range[RTW8851B_G_TXK_GROUP_NR] = {0x0}; 127 static const u32 g_track_range[RTW8851B_G_TXK_GROUP_NR] = {0x6}; 128 static const u32 g_gain_bb[RTW8851B_G_TXK_GROUP_NR] = {0x10}; 129 static const u32 g_itqt[RTW8851B_G_TXK_GROUP_NR] = {0x12}; 130 131 static const u32 rtw8851b_backup_bb_regs[] = { 132 0xc0d4, 0xc0d8, 0xc0c4, 0xc0ec, 0xc0e8, 0x12a0, 0xc0f0}; 133 static const u32 rtw8851b_backup_rf_regs[] = { 134 0xef, 0xde, 0x0, 0x1e, 0x2, 0x85, 0x90, 0x5}; 135 136 #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8851b_backup_bb_regs) 137 #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8851b_backup_rf_regs) 138 139 static const u32 dpk_kip_reg[DPK_KIP_REG_NUM_8851B] = { 140 0x813c, 0x8124, 0xc0ec, 0xc0e8, 0xc0c4, 0xc0d4, 0xc0d8, 0x12a0}; 141 static const u32 dpk_rf_reg[DPK_RF_REG_NUM_8851B] = {0xde, 0x8f, 0x5, 0x10005}; 142 143 static void _set_ch(struct rtw89_dev *rtwdev, u32 val); 144 145 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 146 { 147 return RF_A; 148 } 149 150 static void _adc_fifo_rst(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 151 u8 path) 152 { 153 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0101); 154 fsleep(10); 155 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x1111); 156 } 157 158 static void _rfk_rf_direct_cntrl(struct rtw89_dev *rtwdev, 159 enum rtw89_rf_path path, bool is_bybb) 160 { 161 if (is_bybb) 162 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1); 163 else 164 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); 165 } 166 167 static void _rfk_drf_direct_cntrl(struct rtw89_dev *rtwdev, 168 enum rtw89_rf_path path, bool is_bybb) 169 { 170 if (is_bybb) 171 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1); 172 else 173 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0); 174 } 175 176 static void _txck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 177 bool force, enum dac_ck ck) 178 { 179 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x0); 180 181 if (!force) 182 return; 183 184 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_VAL, ck); 185 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x1); 186 } 187 188 static void _rxck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 189 bool force, enum adc_ck ck) 190 { 191 static const u32 ck960_8851b[] = {0x8, 0x2, 0x2, 0x4, 0xf, 0xa, 0x92}; 192 static const u32 ck1920_8851b[] = {0x9, 0x0, 0x0, 0x3, 0xf, 0xa, 0x49}; 193 const u32 *data; 194 195 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0); 196 if (!force) 197 return; 198 199 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck); 200 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1); 201 202 switch (ck) { 203 case ADC_960M: 204 data = ck960_8851b; 205 break; 206 case ADC_1920M: 207 default: 208 data = ck1920_8851b; 209 break; 210 } 211 212 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_CTL, data[0]); 213 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_EN, data[1]); 214 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, data[2]); 215 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, data[3]); 216 rtw89_phy_write32_mask(rtwdev, R_DRCK | (path << 8), B_DRCK_MUL, data[4]); 217 rtw89_phy_write32_mask(rtwdev, R_ADCMOD | (path << 8), B_ADCMOD_LP, data[5]); 218 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 8), B_P0_RXCK_ADJ, data[6]); 219 } 220 221 static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath) 222 { 223 u32 rf_mode; 224 u8 path; 225 int ret; 226 227 for (path = 0; path < RF_PATH_MAX; path++) { 228 if (!(kpath & BIT(path))) 229 continue; 230 231 ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, 232 rf_mode != 2, 2, 5000, false, 233 rtwdev, path, 0x00, RR_MOD_MASK); 234 rtw89_debug(rtwdev, RTW89_DBG_RFK, 235 "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", 236 path, ret); 237 } 238 } 239 240 static void _dack_reset(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 241 { 242 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x0); 243 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x1); 244 } 245 246 static void _drck(struct rtw89_dev *rtwdev) 247 { 248 u32 rck_d; 249 u32 val; 250 int ret; 251 252 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]Ddie RCK start!!!\n"); 253 254 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x1); 255 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1); 256 257 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 258 1, 10000, false, 259 rtwdev, R_DRCK_RES, B_DRCK_POL); 260 if (ret) 261 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n"); 262 263 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x0); 264 rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x1); 265 udelay(1); 266 rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x0); 267 268 rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, 0x7c00); 269 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0); 270 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, rck_d); 271 272 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n", 273 rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD)); 274 } 275 276 static void _addck_backup(struct rtw89_dev *rtwdev) 277 { 278 struct rtw89_dack_info *dack = &rtwdev->dack; 279 280 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0); 281 282 dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0); 283 dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1); 284 } 285 286 static void _addck_reload(struct rtw89_dev *rtwdev) 287 { 288 struct rtw89_dack_info *dack = &rtwdev->dack; 289 290 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1, dack->addck_d[0][0]); 291 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0, dack->addck_d[0][1]); 292 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3); 293 } 294 295 static void _dack_backup_s0(struct rtw89_dev *rtwdev) 296 { 297 struct rtw89_dack_info *dack = &rtwdev->dack; 298 u8 i; 299 300 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); 301 302 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { 303 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i); 304 dack->msbk_d[0][0][i] = 305 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0); 306 307 rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i); 308 dack->msbk_d[0][1][i] = 309 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1); 310 } 311 312 dack->biask_d[0][0] = 313 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00); 314 dack->biask_d[0][1] = 315 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01); 316 dack->dadck_d[0][0] = 317 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00) + 24; 318 dack->dadck_d[0][1] = 319 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01) + 24; 320 } 321 322 static void _dack_reload_by_path(struct rtw89_dev *rtwdev, 323 enum rtw89_rf_path path, u8 index) 324 { 325 struct rtw89_dack_info *dack = &rtwdev->dack; 326 u32 idx_offset, path_offset; 327 u32 offset, reg; 328 u32 tmp; 329 u8 i; 330 331 if (index == 0) 332 idx_offset = 0; 333 else 334 idx_offset = 0x14; 335 336 if (path == RF_PATH_A) 337 path_offset = 0; 338 else 339 path_offset = 0x28; 340 341 offset = idx_offset + path_offset; 342 343 rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_RST, 0x1); 344 rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_RST, 0x1); 345 346 /* msbk_d: 15/14/13/12 */ 347 tmp = 0x0; 348 for (i = 0; i < 4; i++) 349 tmp |= dack->msbk_d[path][index][i + 12] << (i * 8); 350 reg = 0xc200 + offset; 351 rtw89_phy_write32(rtwdev, reg, tmp); 352 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg, 353 rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD)); 354 355 /* msbk_d: 11/10/9/8 */ 356 tmp = 0x0; 357 for (i = 0; i < 4; i++) 358 tmp |= dack->msbk_d[path][index][i + 8] << (i * 8); 359 reg = 0xc204 + offset; 360 rtw89_phy_write32(rtwdev, reg, tmp); 361 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg, 362 rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD)); 363 364 /* msbk_d: 7/6/5/4 */ 365 tmp = 0x0; 366 for (i = 0; i < 4; i++) 367 tmp |= dack->msbk_d[path][index][i + 4] << (i * 8); 368 reg = 0xc208 + offset; 369 rtw89_phy_write32(rtwdev, reg, tmp); 370 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg, 371 rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD)); 372 373 /* msbk_d: 3/2/1/0 */ 374 tmp = 0x0; 375 for (i = 0; i < 4; i++) 376 tmp |= dack->msbk_d[path][index][i] << (i * 8); 377 reg = 0xc20c + offset; 378 rtw89_phy_write32(rtwdev, reg, tmp); 379 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg, 380 rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD)); 381 382 /* dadak_d/biask_d */ 383 tmp = 0x0; 384 tmp = (dack->biask_d[path][index] << 22) | 385 (dack->dadck_d[path][index] << 14); 386 reg = 0xc210 + offset; 387 rtw89_phy_write32(rtwdev, reg, tmp); 388 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg, 389 rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD)); 390 391 rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL + offset, B_DACKN0_EN, 0x1); 392 } 393 394 static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 395 { 396 u8 index; 397 398 for (index = 0; index < 2; index++) 399 _dack_reload_by_path(rtwdev, path, index); 400 } 401 402 static void _addck(struct rtw89_dev *rtwdev) 403 { 404 struct rtw89_dack_info *dack = &rtwdev->dack; 405 u32 val; 406 int ret; 407 408 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x1); 409 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x1); 410 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x0); 411 udelay(1); 412 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1); 413 414 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 415 1, 10000, false, 416 rtwdev, R_ADDCKR0, BIT(0)); 417 if (ret) { 418 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n"); 419 dack->addck_timeout[0] = true; 420 } 421 422 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret); 423 424 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x0); 425 } 426 427 static void _new_dadck(struct rtw89_dev *rtwdev) 428 { 429 struct rtw89_dack_info *dack = &rtwdev->dack; 430 u32 i_dc, q_dc, ic, qc; 431 u32 val; 432 int ret; 433 434 rtw89_rfk_parser(rtwdev, &rtw8851b_dadck_setup_defs_tbl); 435 436 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 437 1, 10000, false, 438 rtwdev, R_ADDCKR0, BIT(0)); 439 if (ret) { 440 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DADCK timeout\n"); 441 dack->addck_timeout[0] = true; 442 } 443 444 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DADCK ret = %d\n", ret); 445 446 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_IQ, 0x0); 447 i_dc = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_DC); 448 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_IQ, 0x1); 449 q_dc = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_DC); 450 451 ic = 0x80 - sign_extend32(i_dc, 11) * 6; 452 qc = 0x80 - sign_extend32(q_dc, 11) * 6; 453 454 rtw89_debug(rtwdev, RTW89_DBG_RFK, 455 "[DACK]before DADCK, i_dc=0x%x, q_dc=0x%x\n", i_dc, q_dc); 456 457 dack->dadck_d[0][0] = ic; 458 dack->dadck_d[0][1] = qc; 459 460 rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL, B_DACKN0_V, dack->dadck_d[0][0]); 461 rtw89_phy_write32_mask(rtwdev, R_DACKN1_CTL, B_DACKN1_V, dack->dadck_d[0][1]); 462 rtw89_debug(rtwdev, RTW89_DBG_RFK, 463 "[DACK]after DADCK, 0xc210=0x%x, 0xc224=0x%x\n", 464 rtw89_phy_read32_mask(rtwdev, R_DACKN0_CTL, MASKDWORD), 465 rtw89_phy_read32_mask(rtwdev, R_DACKN1_CTL, MASKDWORD)); 466 467 rtw89_rfk_parser(rtwdev, &rtw8851b_dadck_post_defs_tbl); 468 } 469 470 static bool _dack_s0_poll(struct rtw89_dev *rtwdev) 471 { 472 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 || 473 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 || 474 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 || 475 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0) 476 return false; 477 478 return true; 479 } 480 481 static void _dack_s0(struct rtw89_dev *rtwdev) 482 { 483 struct rtw89_dack_info *dack = &rtwdev->dack; 484 bool done; 485 int ret; 486 487 rtw89_rfk_parser(rtwdev, &rtw8851b_dack_s0_1_defs_tbl); 488 _dack_reset(rtwdev, RF_PATH_A); 489 rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1); 490 491 ret = read_poll_timeout_atomic(_dack_s0_poll, done, done, 492 1, 10000, false, rtwdev); 493 if (ret) { 494 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n"); 495 dack->msbk_timeout[0] = true; 496 } 497 498 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret); 499 500 rtw89_rfk_parser(rtwdev, &rtw8851b_dack_s0_2_defs_tbl); 501 502 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n"); 503 504 _dack_backup_s0(rtwdev); 505 _dack_reload(rtwdev, RF_PATH_A); 506 507 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0); 508 } 509 510 static void _dack(struct rtw89_dev *rtwdev) 511 { 512 _dack_s0(rtwdev); 513 } 514 515 static void _dack_dump(struct rtw89_dev *rtwdev) 516 { 517 struct rtw89_dack_info *dack = &rtwdev->dack; 518 u8 i; 519 u8 t; 520 521 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n", 522 dack->addck_d[0][0], dack->addck_d[0][1]); 523 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n", 524 dack->dadck_d[0][0], dack->dadck_d[0][1]); 525 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n", 526 dack->biask_d[0][0], dack->biask_d[0][1]); 527 528 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n"); 529 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { 530 t = dack->msbk_d[0][0][i]; 531 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); 532 } 533 534 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n"); 535 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { 536 t = dack->msbk_d[0][1][i]; 537 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); 538 } 539 } 540 541 static void _dack_manual_off(struct rtw89_dev *rtwdev) 542 { 543 rtw89_rfk_parser(rtwdev, &rtw8851b_dack_manual_off_defs_tbl); 544 } 545 546 static void _dac_cal(struct rtw89_dev *rtwdev, bool force) 547 { 548 struct rtw89_dack_info *dack = &rtwdev->dack; 549 u32 rf0_0; 550 551 dack->dack_done = false; 552 553 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK 0x2\n"); 554 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n"); 555 rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK); 556 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]RF0=0x%x\n", rf0_0); 557 558 _drck(rtwdev); 559 _dack_manual_off(rtwdev); 560 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1); 561 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0); 562 563 _addck(rtwdev); 564 _addck_backup(rtwdev); 565 _addck_reload(rtwdev); 566 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x40001); 567 568 _dack(rtwdev); 569 _new_dadck(rtwdev); 570 _dack_dump(rtwdev); 571 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1); 572 573 dack->dack_done = true; 574 dack->dack_cnt++; 575 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n"); 576 } 577 578 static void _rx_dck_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 579 enum rtw89_rf_path path, bool is_afe, 580 enum rtw89_chanctx_idx chanctx_idx) 581 { 582 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); 583 584 rtw89_debug(rtwdev, RTW89_DBG_RFK, 585 "[RX_DCK] ==== S%d RX DCK (%s / CH%d / %s / by %s)====\n", path, 586 chan->band_type == RTW89_BAND_2G ? "2G" : 587 chan->band_type == RTW89_BAND_5G ? "5G" : "6G", 588 chan->channel, 589 chan->band_width == RTW89_CHANNEL_WIDTH_20 ? "20M" : 590 chan->band_width == RTW89_CHANNEL_WIDTH_40 ? "40M" : "80M", 591 is_afe ? "AFE" : "RFC"); 592 } 593 594 static void _rxbb_ofst_swap(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 rf_mode) 595 { 596 u32 val, val_i, val_q; 597 598 val_i = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_S1); 599 val_q = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_S1); 600 601 val = val_q << 4 | val_i; 602 603 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_DIS, 0x1); 604 rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, rf_mode); 605 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, val); 606 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_DIS, 0x0); 607 608 rtw89_debug(rtwdev, RTW89_DBG_RFK, 609 "[RX_DCK] val_i = 0x%x, val_q = 0x%x, 0x3F = 0x%x\n", 610 val_i, val_q, val); 611 } 612 613 static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 rf_mode) 614 { 615 u32 val; 616 int ret; 617 618 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); 619 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1); 620 621 ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 622 2, 2000, false, 623 rtwdev, path, RR_DCK, BIT(8)); 624 625 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); 626 627 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] S%d RXDCK finish (ret = %d)\n", 628 path, ret); 629 630 _rxbb_ofst_swap(rtwdev, path, rf_mode); 631 } 632 633 static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe, 634 enum rtw89_chanctx_idx chanctx_idx) 635 { 636 u32 rf_reg5; 637 u8 path; 638 639 rtw89_debug(rtwdev, RTW89_DBG_RFK, 640 "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n", 641 0x2, rtwdev->hal.cv); 642 643 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 644 _rx_dck_info(rtwdev, phy, path, is_afe, chanctx_idx); 645 646 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); 647 648 if (rtwdev->is_tssi_mode[path]) 649 rtw89_phy_write32_mask(rtwdev, 650 R_P0_TSSI_TRK + (path << 13), 651 B_P0_TSSI_TRK_EN, 0x1); 652 653 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); 654 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RF_RX); 655 _set_rx_dck(rtwdev, path, RF_RX); 656 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); 657 658 if (rtwdev->is_tssi_mode[path]) 659 rtw89_phy_write32_mask(rtwdev, 660 R_P0_TSSI_TRK + (path << 13), 661 B_P0_TSSI_TRK_EN, 0x0); 662 } 663 } 664 665 static void _iqk_sram(struct rtw89_dev *rtwdev, u8 path) 666 { 667 u32 i; 668 669 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 670 671 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00020000); 672 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, MASKDWORD, 0x80000000); 673 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX2, MASKDWORD, 0x00000080); 674 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000); 675 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009); 676 677 for (i = 0; i <= 0x9f; i++) { 678 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 679 0x00010000 + i); 680 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", 681 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI)); 682 } 683 684 for (i = 0; i <= 0x9f; i++) { 685 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 686 0x00010000 + i); 687 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", 688 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ)); 689 } 690 691 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX2, MASKDWORD, 0x00000000); 692 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00000000); 693 } 694 695 static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path) 696 { 697 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); 698 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); 699 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1); 700 } 701 702 static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path) 703 { 704 bool fail1 = false, fail2 = false; 705 u32 val; 706 int ret; 707 708 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, 709 10, 8200, false, 710 rtwdev, 0xbff8, MASKBYTE0); 711 if (ret) { 712 fail1 = true; 713 rtw89_debug(rtwdev, RTW89_DBG_RFK, 714 "[IQK]NCTL1 IQK timeout!!!\n"); 715 } 716 717 fsleep(10); 718 719 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000, 720 10, 200, false, 721 rtwdev, R_RPT_COM, B_RPT_COM_RDY); 722 if (ret) { 723 fail2 = true; 724 rtw89_debug(rtwdev, RTW89_DBG_RFK, 725 "[IQK]NCTL2 IQK timeout!!!\n"); 726 } 727 728 fsleep(10); 729 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0); 730 731 rtw89_debug(rtwdev, RTW89_DBG_RFK, 732 "[IQK]S%x, ret = %d, notready = %x fail=%d,%d\n", 733 path, ret, fail1 || fail2, fail1, fail2); 734 735 return fail1 || fail2; 736 } 737 738 static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 739 u8 path, u8 ktype) 740 { 741 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 742 bool notready; 743 u32 iqk_cmd; 744 745 switch (ktype) { 746 case ID_A_FLOK_COARSE: 747 rtw89_debug(rtwdev, RTW89_DBG_RFK, 748 "[IQK]============ S%d ID_A_FLOK_COARSE ============\n", path); 749 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 750 iqk_cmd = 0x108 | (1 << (4 + path)); 751 break; 752 case ID_G_FLOK_COARSE: 753 rtw89_debug(rtwdev, RTW89_DBG_RFK, 754 "[IQK]============ S%d ID_G_FLOK_COARSE ============\n", path); 755 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 756 iqk_cmd = 0x108 | (1 << (4 + path)); 757 break; 758 case ID_A_FLOK_FINE: 759 rtw89_debug(rtwdev, RTW89_DBG_RFK, 760 "[IQK]============ S%d ID_A_FLOK_FINE ============\n", path); 761 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 762 iqk_cmd = 0x308 | (1 << (4 + path)); 763 break; 764 case ID_G_FLOK_FINE: 765 rtw89_debug(rtwdev, RTW89_DBG_RFK, 766 "[IQK]============ S%d ID_G_FLOK_FINE ============\n", path); 767 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 768 iqk_cmd = 0x308 | (1 << (4 + path)); 769 break; 770 case ID_TXK: 771 rtw89_debug(rtwdev, RTW89_DBG_RFK, 772 "[IQK]============ S%d ID_TXK ============\n", path); 773 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0); 774 iqk_cmd = 0x008 | (1 << (path + 4)) | 775 (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8); 776 break; 777 case ID_RXAGC: 778 rtw89_debug(rtwdev, RTW89_DBG_RFK, 779 "[IQK]============ S%d ID_RXAGC ============\n", path); 780 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 781 iqk_cmd = 0x708 | (1 << (4 + path)) | (path << 1); 782 break; 783 case ID_RXK: 784 rtw89_debug(rtwdev, RTW89_DBG_RFK, 785 "[IQK]============ S%d ID_RXK ============\n", path); 786 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 787 iqk_cmd = 0x008 | (1 << (path + 4)) | 788 (((0xc + iqk_info->iqk_bw[path]) & 0xf) << 8); 789 break; 790 case ID_NBTXK: 791 rtw89_debug(rtwdev, RTW89_DBG_RFK, 792 "[IQK]============ S%d ID_NBTXK ============\n", path); 793 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0); 794 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 795 0x11); 796 iqk_cmd = 0x408 | (1 << (4 + path)); 797 break; 798 case ID_NBRXK: 799 rtw89_debug(rtwdev, RTW89_DBG_RFK, 800 "[IQK]============ S%d ID_NBRXK ============\n", path); 801 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 802 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 803 0x011); 804 iqk_cmd = 0x608 | (1 << (4 + path)); 805 break; 806 default: 807 return false; 808 } 809 810 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1); 811 notready = _iqk_check_cal(rtwdev, path); 812 if (iqk_info->iqk_sram_en && 813 (ktype == ID_NBRXK || ktype == ID_RXK || ktype == ID_NBTXK)) 814 _iqk_sram(rtwdev, path); 815 816 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0); 817 rtw89_debug(rtwdev, RTW89_DBG_RFK, 818 "[IQK]S%x, ktype= %x, id = %x, notready = %x\n", 819 path, ktype, iqk_cmd + 1, notready); 820 821 return notready; 822 } 823 824 static bool _rxk_2g_group_sel(struct rtw89_dev *rtwdev, 825 enum rtw89_phy_idx phy_idx, u8 path) 826 { 827 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 828 bool kfail = false; 829 bool notready; 830 u32 rf_0; 831 u8 gp; 832 833 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 834 835 for (gp = 0; gp < RTW8851B_RXK_GROUP_NR; gp++) { 836 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp); 837 838 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, g_idxrxgain[gp]); 839 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2, g_idxattc2[gp]); 840 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 841 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0); 842 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, gp); 843 844 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); 845 fsleep(10); 846 rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); 847 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0); 848 rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, g_idxrxagc[gp]); 849 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); 850 851 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC); 852 853 rtw89_debug(rtwdev, RTW89_DBG_RFK, 854 "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n", path, 855 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD), 856 rtw89_read_rf(rtwdev, path, RR_MOD, 0x003e0)); 857 858 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13); 859 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); 860 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK); 861 iqk_info->nb_rxcfir[path] = 862 rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2; 863 864 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK); 865 866 rtw89_debug(rtwdev, RTW89_DBG_RFK, 867 "[IQK]S%x, WBRXK 0x8008 = 0x%x\n", path, 868 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 869 } 870 871 if (!notready) 872 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 873 874 if (kfail) 875 _iqk_sram(rtwdev, path); 876 877 if (kfail) { 878 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), 879 MASKDWORD, iqk_info->nb_rxcfir[path] | 0x2); 880 iqk_info->is_wb_txiqk[path] = false; 881 } else { 882 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), 883 MASKDWORD, 0x40000000); 884 iqk_info->is_wb_txiqk[path] = true; 885 } 886 887 rtw89_debug(rtwdev, RTW89_DBG_RFK, 888 "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail, 889 1 << path, iqk_info->nb_rxcfir[path]); 890 return kfail; 891 } 892 893 static bool _rxk_5g_group_sel(struct rtw89_dev *rtwdev, 894 enum rtw89_phy_idx phy_idx, u8 path) 895 { 896 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 897 bool kfail = false; 898 bool notready; 899 u32 rf_0; 900 u32 val; 901 u8 gp; 902 903 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 904 905 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x1000); 906 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x4); 907 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x17); 908 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x5); 909 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x27); 910 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x0); 911 912 val = rtw89_read_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20); 913 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_MASK, 0xc); 914 915 for (gp = 0; gp < RTW8851B_RXK_GROUP_IDX_NR; gp++) { 916 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp); 917 918 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[gp]); 919 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[gp]); 920 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20, 0x1); 921 922 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 923 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0); 924 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, gp); 925 926 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); 927 fsleep(100); 928 rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); 929 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0); 930 rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]); 931 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); 932 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC); 933 934 rtw89_debug(rtwdev, RTW89_DBG_RFK, 935 "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n", path, 936 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD), 937 rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_RXB)); 938 939 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13); 940 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); 941 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK); 942 iqk_info->nb_rxcfir[path] = 943 rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2; 944 945 rtw89_debug(rtwdev, RTW89_DBG_RFK, 946 "[IQK]S%x, NBRXK 0x8008 = 0x%x\n", path, 947 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 948 949 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK); 950 951 rtw89_debug(rtwdev, RTW89_DBG_RFK, 952 "[IQK]S%x, WBRXK 0x8008 = 0x%x\n", path, 953 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 954 } 955 956 if (!notready) 957 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 958 959 if (kfail) 960 _iqk_sram(rtwdev, path); 961 962 if (kfail) { 963 rtw89_phy_write32_mask(rtwdev, R_IQK_RES, B_IQK_RES_RXCFIR, 0x0); 964 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 965 iqk_info->nb_rxcfir[path] | 0x2); 966 iqk_info->is_wb_txiqk[path] = false; 967 } else { 968 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 969 0x40000000); 970 iqk_info->is_wb_txiqk[path] = true; 971 } 972 973 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20, val); 974 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x1000); 975 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x4); 976 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x37); 977 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x5); 978 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x27); 979 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x0); 980 981 rtw89_debug(rtwdev, RTW89_DBG_RFK, 982 "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail, 983 1 << path, iqk_info->nb_rxcfir[path]); 984 return kfail; 985 } 986 987 static bool _iqk_5g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 988 u8 path) 989 { 990 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 991 bool kfail = false; 992 bool notready; 993 u8 gp = 2; 994 u32 rf_0; 995 u32 val; 996 997 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 998 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp); 999 1000 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x1000); 1001 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x4); 1002 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x17); 1003 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x5); 1004 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x27); 1005 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x0); 1006 1007 val = rtw89_read_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20); 1008 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_MASK, 0xc); 1009 1010 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[gp]); 1011 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[gp]); 1012 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20, 0x1); 1013 1014 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 1015 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0); 1016 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, gp); 1017 1018 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); 1019 fsleep(100); 1020 rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); 1021 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0); 1022 rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]); 1023 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); 1024 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC); 1025 1026 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1027 "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n", path, 1028 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD), 1029 rtw89_read_rf(rtwdev, path, RR_MOD, 0x003e0)); 1030 1031 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13); 1032 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); 1033 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK); 1034 iqk_info->nb_rxcfir[path] = 1035 rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2; 1036 1037 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1038 "[IQK]S%x, NBRXK 0x8008 = 0x%x\n", path, 1039 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 1040 1041 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, WBRXK 0x8008 = 0x%x\n", 1042 path, rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 1043 1044 if (!notready) 1045 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 1046 1047 if (kfail) { 1048 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), 0xf, 0x0); 1049 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), 1050 MASKDWORD, 0x40000002); 1051 iqk_info->is_wb_rxiqk[path] = false; 1052 } else { 1053 iqk_info->is_wb_rxiqk[path] = false; 1054 } 1055 1056 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20, val); 1057 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x1000); 1058 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x4); 1059 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x37); 1060 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x5); 1061 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x27); 1062 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x0); 1063 1064 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1065 "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail, 1066 1 << path, iqk_info->nb_rxcfir[path]); 1067 1068 return kfail; 1069 } 1070 1071 static bool _iqk_2g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1072 u8 path) 1073 { 1074 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1075 bool kfail = false; 1076 bool notready; 1077 u8 gp = 0x3; 1078 u32 rf_0; 1079 1080 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1081 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp); 1082 1083 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, g_idxrxgain[gp]); 1084 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2, g_idxattc2[gp]); 1085 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 1086 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0); 1087 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, gp); 1088 1089 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); 1090 fsleep(10); 1091 rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); 1092 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0); 1093 rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, g_idxrxagc[gp]); 1094 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); 1095 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC); 1096 1097 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1098 "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n", 1099 path, rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD), 1100 rtw89_read_rf(rtwdev, path, RR_MOD, 0x003e0)); 1101 1102 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13); 1103 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); 1104 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK); 1105 iqk_info->nb_rxcfir[path] = 1106 rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2; 1107 1108 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1109 "[IQK]S%x, NBRXK 0x8008 = 0x%x\n", path, 1110 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 1111 1112 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, WBRXK 0x8008 = 0x%x\n", 1113 path, rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 1114 1115 if (!notready) 1116 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 1117 1118 if (kfail) { 1119 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), 1120 MASKDWORD, 0x40000002); 1121 iqk_info->is_wb_rxiqk[path] = false; 1122 } else { 1123 iqk_info->is_wb_rxiqk[path] = false; 1124 } 1125 1126 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1127 "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail, 1128 1 << path, iqk_info->nb_rxcfir[path]); 1129 return kfail; 1130 } 1131 1132 static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path) 1133 { 1134 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1135 1136 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1); 1137 1138 if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) { 1139 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0101); 1140 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_DPD_GDIS, 0x1); 1141 1142 _rxck_force(rtwdev, path, true, ADC_960M); 1143 1144 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_rxclk_80_defs_tbl); 1145 } else { 1146 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0101); 1147 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_DPD_GDIS, 0x1); 1148 1149 _rxck_force(rtwdev, path, true, ADC_960M); 1150 1151 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_rxclk_others_defs_tbl); 1152 } 1153 1154 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, (2)before RXK IQK\n", path); 1155 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x%x[07:10] = 0x%x\n", path, 1156 0xc0d4, rtw89_phy_read32_mask(rtwdev, 0xc0d4, GENMASK(10, 7))); 1157 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x%x[11:14] = 0x%x\n", path, 1158 0xc0d4, rtw89_phy_read32_mask(rtwdev, 0xc0d4, GENMASK(14, 11))); 1159 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x%x[26:27] = 0x%x\n", path, 1160 0xc0d4, rtw89_phy_read32_mask(rtwdev, 0xc0d4, GENMASK(27, 26))); 1161 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x%x[05:08] = 0x%x\n", path, 1162 0xc0d8, rtw89_phy_read32_mask(rtwdev, 0xc0d8, GENMASK(8, 5))); 1163 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x%x[17:21] = 0x%x\n", path, 1164 0xc0c4, rtw89_phy_read32_mask(rtwdev, 0xc0c4, GENMASK(21, 17))); 1165 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x%x[16:31] = 0x%x\n", path, 1166 0xc0e8, rtw89_phy_read32_mask(rtwdev, 0xc0e8, GENMASK(31, 16))); 1167 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x%x[04:05] = 0x%x\n", path, 1168 0xc0e4, rtw89_phy_read32_mask(rtwdev, 0xc0e4, GENMASK(5, 4))); 1169 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x%x[23:31] = 0x%x\n", path, 1170 0x12a0, rtw89_phy_read32_mask(rtwdev, 0x12a0, GENMASK(31, 23))); 1171 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x%x[13:14] = 0x%x\n", path, 1172 0xc0ec, rtw89_phy_read32_mask(rtwdev, 0xc0ec, GENMASK(14, 13))); 1173 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x%x[16:23] = 0x%x\n", path, 1174 0xc0ec, rtw89_phy_read32_mask(rtwdev, 0xc0ec, GENMASK(23, 16))); 1175 } 1176 1177 static bool _txk_5g_group_sel(struct rtw89_dev *rtwdev, 1178 enum rtw89_phy_idx phy_idx, u8 path) 1179 { 1180 static const u8 a_idx[RTW8851B_A_TXK_GROUP_NR] = {2, 3}; 1181 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1182 bool kfail = false; 1183 bool notready; 1184 u8 gp; 1185 1186 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1187 1188 rtw89_phy_write32_mask(rtwdev, R_CFIR_COEF, MASKDWORD, 0x33332222); 1189 1190 for (gp = 0x0; gp < RTW8851B_A_TXK_GROUP_NR; gp++) { 1191 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]); 1192 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]); 1193 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]); 1194 rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, a_att_smxr[gp]); 1195 1196 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 1197 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1); 1198 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0); 1199 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, a_idx[gp]); 1200 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1201 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x11); 1202 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]); 1203 1204 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); 1205 iqk_info->nb_txcfir[path] = 1206 rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2; 1207 1208 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1209 MASKDWORD, a_itqt[gp]); 1210 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK); 1211 } 1212 1213 if (!notready) 1214 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 1215 1216 if (kfail) { 1217 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), 1218 MASKDWORD, iqk_info->nb_txcfir[path] | 0x2); 1219 iqk_info->is_wb_txiqk[path] = false; 1220 } else { 1221 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), 1222 MASKDWORD, 0x40000000); 1223 iqk_info->is_wb_txiqk[path] = true; 1224 } 1225 1226 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1227 "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n", path, kfail, 1228 1 << path, iqk_info->nb_txcfir[path]); 1229 return kfail; 1230 } 1231 1232 static bool _txk_2g_group_sel(struct rtw89_dev *rtwdev, 1233 enum rtw89_phy_idx phy_idx, u8 path) 1234 { 1235 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1236 bool kfail = false; 1237 bool notready; 1238 u8 gp; 1239 1240 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1241 1242 rtw89_phy_write32_mask(rtwdev, R_CFIR_COEF, MASKDWORD, 0x0); 1243 1244 for (gp = 0x0; gp < RTW8851B_G_TXK_GROUP_NR; gp++) { 1245 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, g_power_range[gp]); 1246 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, g_track_range[gp]); 1247 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, g_gain_bb[gp]); 1248 1249 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, g_itqt[gp]); 1250 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 1251 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1); 1252 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0); 1253 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp); 1254 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1255 1256 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); 1257 iqk_info->nb_txcfir[path] = 1258 rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2; 1259 1260 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1261 MASKDWORD, g_itqt[gp]); 1262 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK); 1263 } 1264 1265 if (!notready) 1266 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 1267 1268 if (kfail) { 1269 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), 1270 MASKDWORD, iqk_info->nb_txcfir[path] | 0x2); 1271 iqk_info->is_wb_txiqk[path] = false; 1272 } else { 1273 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), 1274 MASKDWORD, 0x40000000); 1275 iqk_info->is_wb_txiqk[path] = true; 1276 } 1277 1278 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1279 "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n", path, kfail, 1280 1 << path, iqk_info->nb_txcfir[path]); 1281 return kfail; 1282 } 1283 1284 static bool _iqk_5g_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1285 u8 path) 1286 { 1287 static const u8 a_idx[RTW8851B_A_TXK_GROUP_NR] = {2, 3}; 1288 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1289 bool kfail = false; 1290 bool notready; 1291 u8 gp = 0; 1292 1293 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1294 1295 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]); 1296 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]); 1297 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]); 1298 rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, a_att_smxr[gp]); 1299 1300 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 1301 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1); 1302 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0); 1303 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, a_idx[gp]); 1304 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1305 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]); 1306 1307 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); 1308 iqk_info->nb_txcfir[path] = 1309 rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2; 1310 1311 if (!notready) 1312 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 1313 1314 if (kfail) { 1315 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), 1316 MASKDWORD, 0x40000002); 1317 iqk_info->is_wb_rxiqk[path] = false; 1318 } else { 1319 iqk_info->is_wb_rxiqk[path] = false; 1320 } 1321 1322 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1323 "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n", path, kfail, 1324 1 << path, iqk_info->nb_txcfir[path]); 1325 return kfail; 1326 } 1327 1328 static bool _iqk_2g_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1329 u8 path) 1330 { 1331 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1332 bool kfail = false; 1333 bool notready; 1334 u8 gp; 1335 1336 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1337 1338 for (gp = 0x0; gp < RTW8851B_G_TXK_GROUP_NR; gp++) { 1339 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, g_power_range[gp]); 1340 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, g_track_range[gp]); 1341 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, g_gain_bb[gp]); 1342 1343 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, g_itqt[gp]); 1344 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 1345 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1); 1346 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0); 1347 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp); 1348 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1349 1350 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); 1351 iqk_info->nb_txcfir[path] = 1352 rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), 1353 MASKDWORD) | 0x2; 1354 } 1355 1356 if (!notready) 1357 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 1358 1359 if (kfail) { 1360 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), 1361 MASKDWORD, 0x40000002); 1362 iqk_info->is_wb_rxiqk[path] = false; 1363 } else { 1364 iqk_info->is_wb_rxiqk[path] = false; 1365 } 1366 1367 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1368 "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n", path, kfail, 1369 1 << path, iqk_info->nb_txcfir[path]); 1370 return kfail; 1371 } 1372 1373 static bool _iqk_2g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1374 u8 path) 1375 { 1376 static const u32 g_txbb[RTW8851B_LOK_GRAM] = { 1377 0x02, 0x06, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x17}; 1378 static const u32 g_itqt[RTW8851B_LOK_GRAM] = { 1379 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x12, 0x12, 0x12, 0x1b}; 1380 static const u32 g_wa[RTW8851B_LOK_GRAM] = { 1381 0x00, 0x04, 0x08, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x17}; 1382 bool fail = false; 1383 u8 i; 1384 1385 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1386 1387 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTDBG, RR_LUTDBG_LOK, 0x0); 1388 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_GR0, 0x0); 1389 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_GR1, 0x6); 1390 1391 for (i = 0; i < RTW8851B_LOK_GRAM; i++) { 1392 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_TG, g_txbb[i]); 1393 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RR_LUTWA_M1, g_wa[i]); 1394 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 1395 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, g_itqt[i]); 1396 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021); 1397 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 1398 0x00000109 | (1 << (4 + path))); 1399 fail |= _iqk_check_cal(rtwdev, path); 1400 1401 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1402 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, g_itqt[i]); 1403 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 1404 0x00000309 | (1 << (4 + path))); 1405 fail |= _iqk_check_cal(rtwdev, path); 1406 1407 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1408 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0); 1409 1410 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1411 "[IQK]S0, i = %x, 0x8[19:15] = 0x%x,0x8[09:05] = 0x%x\n", i, 1412 rtw89_read_rf(rtwdev, RF_PATH_A, RR_DTXLOK, 0xf8000), 1413 rtw89_read_rf(rtwdev, RF_PATH_A, RR_DTXLOK, 0x003e0)); 1414 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1415 "[IQK]S0, i = %x, 0x9[19:16] = 0x%x,0x9[09:06] = 0x%x\n", i, 1416 rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV2, 0xf0000), 1417 rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV2, 0x003c0)); 1418 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1419 "[IQK]S0, i = %x, 0x58 = %x\n", i, 1420 rtw89_read_rf(rtwdev, RF_PATH_A, RR_TXMO, RFREG_MASK)); 1421 } 1422 1423 return fail; 1424 } 1425 1426 static bool _iqk_5g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1427 u8 path) 1428 { 1429 static const u32 a_txbb[RTW8851B_LOK_GRAM] = { 1430 0x02, 0x06, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x17}; 1431 static const u32 a_itqt[RTW8851B_LOK_GRAM] = { 1432 0x09, 0x09, 0x09, 0x12, 0x12, 0x12, 0x1b, 0x1b, 0x1b, 0x1b}; 1433 static const u32 a_wa[RTW8851B_LOK_GRAM] = { 1434 0x80, 0x84, 0x88, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x97}; 1435 bool fail = false; 1436 u8 i; 1437 1438 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1439 1440 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTDBG, RR_LUTDBG_LOK, 0x0); 1441 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_GR0, 0x0); 1442 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_GR1, 0x7); 1443 1444 for (i = 0; i < RTW8851B_LOK_GRAM; i++) { 1445 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_TG, a_txbb[i]); 1446 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RR_LUTWA_M1, a_wa[i]); 1447 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 1448 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, a_itqt[i]); 1449 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021); 1450 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 1451 0x00000109 | (1 << (4 + path))); 1452 fail |= _iqk_check_cal(rtwdev, path); 1453 1454 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1455 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, a_itqt[i]); 1456 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021); 1457 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 1458 0x00000309 | (1 << (4 + path))); 1459 fail |= _iqk_check_cal(rtwdev, path); 1460 1461 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1462 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0); 1463 1464 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1465 "[IQK]S0, i = %x, 0x8[19:15] = 0x%x,0x8[09:05] = 0x%x\n", i, 1466 rtw89_read_rf(rtwdev, RF_PATH_A, RR_DTXLOK, 0xf8000), 1467 rtw89_read_rf(rtwdev, RF_PATH_A, RR_DTXLOK, 0x003e0)); 1468 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1469 "[IQK]S0, i = %x, 0x9[19:16] = 0x%x,0x9[09:06] = 0x%x\n", i, 1470 rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV2, 0xf0000), 1471 rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV2, 0x003c0)); 1472 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1473 "[IQK]S0, i = %x, 0x58 = %x\n", i, 1474 rtw89_read_rf(rtwdev, RF_PATH_A, RR_TXMO, RFREG_MASK)); 1475 } 1476 1477 return fail; 1478 } 1479 1480 static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path) 1481 { 1482 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1483 1484 switch (iqk_info->iqk_band[path]) { 1485 case RTW89_BAND_2G: 1486 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RTW89_BAND_2G\n"); 1487 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_txk_2ghz_defs_tbl); 1488 break; 1489 case RTW89_BAND_5G: 1490 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RTW89_BAND_5G\n"); 1491 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_txk_5ghz_defs_tbl); 1492 break; 1493 default: 1494 break; 1495 } 1496 } 1497 1498 #define IQK_LOK_RETRY 1 1499 1500 static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1501 u8 path) 1502 { 1503 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1504 bool lok_is_fail; 1505 u8 i; 1506 1507 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1508 1509 for (i = 0; i < IQK_LOK_RETRY; i++) { 1510 _iqk_txk_setting(rtwdev, path); 1511 if (iqk_info->iqk_band[path] == RTW89_BAND_2G) 1512 lok_is_fail = _iqk_2g_lok(rtwdev, phy_idx, path); 1513 else 1514 lok_is_fail = _iqk_5g_lok(rtwdev, phy_idx, path); 1515 1516 if (!lok_is_fail) 1517 break; 1518 } 1519 1520 if (iqk_info->is_nbiqk) { 1521 if (iqk_info->iqk_band[path] == RTW89_BAND_2G) 1522 iqk_info->iqk_tx_fail[0][path] = 1523 _iqk_2g_nbtxk(rtwdev, phy_idx, path); 1524 else 1525 iqk_info->iqk_tx_fail[0][path] = 1526 _iqk_5g_nbtxk(rtwdev, phy_idx, path); 1527 } else { 1528 if (iqk_info->iqk_band[path] == RTW89_BAND_2G) 1529 iqk_info->iqk_tx_fail[0][path] = 1530 _txk_2g_group_sel(rtwdev, phy_idx, path); 1531 else 1532 iqk_info->iqk_tx_fail[0][path] = 1533 _txk_5g_group_sel(rtwdev, phy_idx, path); 1534 } 1535 1536 _iqk_rxclk_setting(rtwdev, path); 1537 _iqk_rxk_setting(rtwdev, path); 1538 _adc_fifo_rst(rtwdev, phy_idx, path); 1539 1540 if (iqk_info->is_nbiqk) { 1541 if (iqk_info->iqk_band[path] == RTW89_BAND_2G) 1542 iqk_info->iqk_rx_fail[0][path] = 1543 _iqk_2g_nbrxk(rtwdev, phy_idx, path); 1544 else 1545 iqk_info->iqk_rx_fail[0][path] = 1546 _iqk_5g_nbrxk(rtwdev, phy_idx, path); 1547 } else { 1548 if (iqk_info->iqk_band[path] == RTW89_BAND_2G) 1549 iqk_info->iqk_rx_fail[0][path] = 1550 _rxk_2g_group_sel(rtwdev, phy_idx, path); 1551 else 1552 iqk_info->iqk_rx_fail[0][path] = 1553 _rxk_5g_group_sel(rtwdev, phy_idx, path); 1554 } 1555 } 1556 1557 static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, 1558 u32 backup_bb_reg_val[]) 1559 { 1560 u32 i; 1561 1562 for (i = 0; i < BACKUP_BB_REGS_NR; i++) { 1563 backup_bb_reg_val[i] = 1564 rtw89_phy_read32_mask(rtwdev, rtw8851b_backup_bb_regs[i], 1565 MASKDWORD); 1566 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1567 "[RFK]backup bb reg : %x, value =%x\n", 1568 rtw8851b_backup_bb_regs[i], backup_bb_reg_val[i]); 1569 } 1570 } 1571 1572 static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, 1573 u32 backup_rf_reg_val[], u8 rf_path) 1574 { 1575 u32 i; 1576 1577 for (i = 0; i < BACKUP_RF_REGS_NR; i++) { 1578 backup_rf_reg_val[i] = 1579 rtw89_read_rf(rtwdev, rf_path, 1580 rtw8851b_backup_rf_regs[i], RFREG_MASK); 1581 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1582 "[RFK]backup rf S%d reg : %x, value =%x\n", rf_path, 1583 rtw8851b_backup_rf_regs[i], backup_rf_reg_val[i]); 1584 } 1585 } 1586 1587 static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev, 1588 const u32 backup_bb_reg_val[]) 1589 { 1590 u32 i; 1591 1592 for (i = 0; i < BACKUP_BB_REGS_NR; i++) { 1593 rtw89_phy_write32_mask(rtwdev, rtw8851b_backup_bb_regs[i], 1594 MASKDWORD, backup_bb_reg_val[i]); 1595 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1596 "[RFK]restore bb reg : %x, value =%x\n", 1597 rtw8851b_backup_bb_regs[i], backup_bb_reg_val[i]); 1598 } 1599 } 1600 1601 static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev, 1602 const u32 backup_rf_reg_val[], u8 rf_path) 1603 { 1604 u32 i; 1605 1606 for (i = 0; i < BACKUP_RF_REGS_NR; i++) { 1607 rtw89_write_rf(rtwdev, rf_path, rtw8851b_backup_rf_regs[i], 1608 RFREG_MASK, backup_rf_reg_val[i]); 1609 1610 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1611 "[RFK]restore rf S%d reg: %x, value =%x\n", rf_path, 1612 rtw8851b_backup_rf_regs[i], backup_rf_reg_val[i]); 1613 } 1614 } 1615 1616 static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1617 u8 path, enum rtw89_chanctx_idx chanctx_idx) 1618 { 1619 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); 1620 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1621 u8 idx = 0; 1622 1623 iqk_info->iqk_band[path] = chan->band_type; 1624 iqk_info->iqk_bw[path] = chan->band_width; 1625 iqk_info->iqk_ch[path] = chan->channel; 1626 iqk_info->iqk_table_idx[path] = idx; 1627 1628 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", 1629 path, phy, rtwdev->dbcc_en ? "on" : "off", 1630 iqk_info->iqk_band[path] == 0 ? "2G" : 1631 iqk_info->iqk_band[path] == 1 ? "5G" : "6G", 1632 iqk_info->iqk_ch[path], 1633 iqk_info->iqk_bw[path] == 0 ? "20M" : 1634 iqk_info->iqk_bw[path] == 1 ? "40M" : "80M"); 1635 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]times = 0x%x, ch =%x\n", 1636 iqk_info->iqk_times, idx); 1637 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, iqk_info->syn1to2= 0x%x\n", 1638 path, iqk_info->syn1to2); 1639 } 1640 1641 static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1642 u8 path) 1643 { 1644 _iqk_by_path(rtwdev, phy_idx, path); 1645 } 1646 1647 static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path) 1648 { 1649 bool fail; 1650 1651 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1652 1653 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00001219); 1654 fsleep(10); 1655 fail = _iqk_check_cal(rtwdev, path); 1656 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] restore fail=%d\n", fail); 1657 1658 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RR_LUTWE_LOK, 0x0); 1659 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTDBG, RR_LUTDBG_TIA, 0x0); 1660 1661 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1662 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000); 1663 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000); 1664 } 1665 1666 static void _iqk_afebb_restore(struct rtw89_dev *rtwdev, 1667 enum rtw89_phy_idx phy_idx, u8 path) 1668 { 1669 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_afebb_restore_defs_tbl); 1670 } 1671 1672 static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path) 1673 { 1674 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1675 1676 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); 1677 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080); 1678 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a); 1679 } 1680 1681 static void _iqk_macbb_setting(struct rtw89_dev *rtwdev, 1682 enum rtw89_phy_idx phy_idx, u8 path) 1683 { 1684 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1685 1686 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_macbb_defs_tbl); 1687 1688 _txck_force(rtwdev, path, true, DAC_960M); 1689 1690 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_DPD_GDIS, 0x1); 1691 1692 _rxck_force(rtwdev, path, true, ADC_1920M); 1693 1694 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_macbb_bh_defs_tbl); 1695 } 1696 1697 static void _iqk_init(struct rtw89_dev *rtwdev) 1698 { 1699 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1700 u8 idx, path; 1701 1702 if (iqk_info->is_iqk_init) 1703 return; 1704 1705 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1706 1707 iqk_info->is_iqk_init = true; 1708 iqk_info->is_nbiqk = false; 1709 iqk_info->iqk_fft_en = false; 1710 iqk_info->iqk_sram_en = false; 1711 iqk_info->iqk_cfir_en = false; 1712 iqk_info->iqk_xym_en = false; 1713 iqk_info->iqk_times = 0x0; 1714 1715 for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) { 1716 iqk_info->iqk_channel[idx] = 0x0; 1717 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 1718 iqk_info->lok_cor_fail[idx][path] = false; 1719 iqk_info->lok_fin_fail[idx][path] = false; 1720 iqk_info->iqk_tx_fail[idx][path] = false; 1721 iqk_info->iqk_rx_fail[idx][path] = false; 1722 iqk_info->iqk_table_idx[path] = 0x0; 1723 } 1724 } 1725 } 1726 1727 static void _doiqk(struct rtw89_dev *rtwdev, bool force, 1728 enum rtw89_phy_idx phy_idx, u8 path, 1729 enum rtw89_chanctx_idx chanctx_idx) 1730 { 1731 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1732 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx); 1733 u32 backup_rf_val[RTW8851B_IQK_SS][BACKUP_RF_REGS_NR]; 1734 u32 backup_bb_val[BACKUP_BB_REGS_NR]; 1735 1736 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, 1737 BTC_WRFK_ONESHOT_START); 1738 1739 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1740 "[IQK]==========IQK start!!!!!==========\n"); 1741 iqk_info->iqk_times++; 1742 iqk_info->version = RTW8851B_IQK_VER; 1743 1744 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version); 1745 _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx); 1746 1747 _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]); 1748 _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path); 1749 _iqk_macbb_setting(rtwdev, phy_idx, path); 1750 _iqk_preset(rtwdev, path); 1751 _iqk_start_iqk(rtwdev, phy_idx, path); 1752 _iqk_restore(rtwdev, path); 1753 _iqk_afebb_restore(rtwdev, phy_idx, path); 1754 _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]); 1755 _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path); 1756 1757 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, 1758 BTC_WRFK_ONESHOT_STOP); 1759 } 1760 1761 static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1762 bool force, enum rtw89_chanctx_idx chanctx_idx) 1763 { 1764 _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx); 1765 } 1766 1767 static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 *reg, 1768 u32 reg_bkup[][DPK_KIP_REG_NUM_8851B], u8 path) 1769 { 1770 u8 i; 1771 1772 for (i = 0; i < DPK_KIP_REG_NUM_8851B; i++) { 1773 reg_bkup[path][i] = 1774 rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD); 1775 1776 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n", 1777 reg[i] + (path << 8), reg_bkup[path][i]); 1778 } 1779 } 1780 1781 static void _dpk_bkup_rf(struct rtw89_dev *rtwdev, const u32 *rf_reg, 1782 u32 rf_bkup[][DPK_RF_REG_NUM_8851B], u8 path) 1783 { 1784 u8 i; 1785 1786 for (i = 0; i < DPK_RF_REG_NUM_8851B; i++) { 1787 rf_bkup[path][i] = rtw89_read_rf(rtwdev, path, rf_reg[i], RFREG_MASK); 1788 1789 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup RF S%d 0x%x = %x\n", 1790 path, rf_reg[i], rf_bkup[path][i]); 1791 } 1792 } 1793 1794 static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 *reg, 1795 u32 reg_bkup[][DPK_KIP_REG_NUM_8851B], u8 path) 1796 { 1797 u8 i; 1798 1799 for (i = 0; i < DPK_KIP_REG_NUM_8851B; i++) { 1800 rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD, 1801 reg_bkup[path][i]); 1802 1803 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1804 "[DPK] Reload 0x%x = %x\n", 1805 reg[i] + (path << 8), reg_bkup[path][i]); 1806 } 1807 } 1808 1809 static void _dpk_reload_rf(struct rtw89_dev *rtwdev, const u32 *rf_reg, 1810 u32 rf_bkup[][DPK_RF_REG_NUM_8851B], u8 path) 1811 { 1812 u8 i; 1813 1814 for (i = 0; i < DPK_RF_REG_NUM_8851B; i++) { 1815 rtw89_write_rf(rtwdev, path, rf_reg[i], RFREG_MASK, rf_bkup[path][i]); 1816 1817 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1818 "[DPK] Reload RF S%d 0x%x = %x\n", path, 1819 rf_reg[i], rf_bkup[path][i]); 1820 } 1821 } 1822 1823 static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1824 enum rtw89_rf_path path, enum dpk_id id) 1825 { 1826 u16 dpk_cmd; 1827 u32 val; 1828 int ret; 1829 1830 dpk_cmd = ((id << 8) | (0x19 + path * 0x12)); 1831 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd); 1832 1833 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, 1834 10, 20000, false, 1835 rtwdev, 0xbff8, MASKBYTE0); 1836 if (ret) 1837 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot 1 timeout\n"); 1838 1839 udelay(1); 1840 1841 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000, 1842 1, 2000, false, 1843 rtwdev, R_RPT_COM, MASKLWORD); 1844 if (ret) 1845 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot 2 timeout\n"); 1846 1847 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0); 1848 1849 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1850 "[DPK] one-shot for %s = 0x%04x\n", 1851 id == 0x28 ? "KIP_PRESET" : 1852 id == 0x29 ? "DPK_TXAGC" : 1853 id == 0x2a ? "DPK_RXAGC" : 1854 id == 0x2b ? "SYNC" : 1855 id == 0x2c ? "GAIN_LOSS" : 1856 id == 0x2d ? "MDPK_IDL" : 1857 id == 0x2f ? "DPK_GAIN_NORM" : 1858 id == 0x31 ? "KIP_RESTORE" : 1859 id == 0x6 ? "LBK_RXIQK" : "Unknown id", 1860 dpk_cmd); 1861 } 1862 1863 static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 1864 bool off) 1865 { 1866 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1867 u8 kidx = dpk->cur_idx[path]; 1868 u8 off_reverse = off ? 0 : 1; 1869 u8 val; 1870 1871 val = dpk->is_dpk_enable * off_reverse * dpk->bp[path][kidx].path_ok; 1872 1873 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), 1874 0xf0000000, val); 1875 1876 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path, 1877 kidx, val == 0 ? "disable" : "enable"); 1878 } 1879 1880 static void _dpk_init(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 1881 { 1882 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1883 1884 u8 kidx = dpk->cur_idx[path]; 1885 1886 dpk->bp[path][kidx].path_ok = 0; 1887 } 1888 1889 static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1890 enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx) 1891 { 1892 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); 1893 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1894 1895 u8 kidx = dpk->cur_idx[path]; 1896 1897 dpk->bp[path][kidx].band = chan->band_type; 1898 dpk->bp[path][kidx].ch = chan->band_width; 1899 dpk->bp[path][kidx].bw = chan->channel; 1900 1901 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1902 "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n", 1903 path, dpk->cur_idx[path], phy, 1904 rtwdev->is_tssi_mode[path] ? "on" : "off", 1905 rtwdev->dbcc_en ? "on" : "off", 1906 dpk->bp[path][kidx].band == 0 ? "2G" : 1907 dpk->bp[path][kidx].band == 1 ? "5G" : "6G", 1908 dpk->bp[path][kidx].ch, 1909 dpk->bp[path][kidx].bw == 0 ? "20M" : 1910 dpk->bp[path][kidx].bw == 1 ? "40M" : 1911 dpk->bp[path][kidx].bw == 2 ? "80M" : "160M"); 1912 } 1913 1914 static void _dpk_rxagc_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 1915 bool turn_on) 1916 { 1917 if (path == RF_PATH_A) 1918 rtw89_phy_write32_mask(rtwdev, R_P0_AGC_CTL, B_P0_AGC_EN, turn_on); 1919 else 1920 rtw89_phy_write32_mask(rtwdev, R_P1_AGC_CTL, B_P1_AGC_EN, turn_on); 1921 1922 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d RXAGC is %s\n", path, 1923 turn_on ? "turn_on" : "turn_off"); 1924 } 1925 1926 static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 1927 { 1928 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(16 + path), 0x1); 1929 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(20 + path), 0x0); 1930 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(24 + path), 0x1); 1931 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(28 + path), 0x0); 1932 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0xd801dffd); 1933 1934 _txck_force(rtwdev, path, true, DAC_960M); 1935 _rxck_force(rtwdev, path, true, ADC_1920M); 1936 1937 rtw89_phy_write32_mask(rtwdev, R_DCIM, B_DCIM_FR, 0x0); 1938 rtw89_phy_write32_mask(rtwdev, R_ADCMOD, B_ADCMOD_AUTO_RST, 0x1); 1939 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); 1940 udelay(1); 1941 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f); 1942 udelay(10); 1943 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13); 1944 udelay(2); 1945 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001); 1946 udelay(2); 1947 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041); 1948 udelay(10); 1949 1950 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(20 + path), 0x1); 1951 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(28 + path), 0x1); 1952 1953 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE setting\n", path); 1954 } 1955 1956 static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 1957 { 1958 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x0); 1959 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(16 + path), 0x1); 1960 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(20 + path), 0x0); 1961 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(24 + path), 0x1); 1962 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(28 + path), 0x0); 1963 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0x00000000); 1964 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), B_P0_TXCK_ALL, 0x00); 1965 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(16 + path), 0x0); 1966 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(24 + path), 0x0); 1967 1968 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE restore\n", path); 1969 } 1970 1971 static void _dpk_tssi_pause(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 1972 bool is_pause) 1973 { 1974 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13), 1975 B_P0_TSSI_TRK_EN, is_pause); 1976 1977 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path, 1978 is_pause ? "pause" : "resume"); 1979 } 1980 1981 static 1982 void _dpk_tssi_slope_k_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 1983 bool is_on) 1984 { 1985 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_SLOPE_CAL + (path << 13), 1986 B_P0_TSSI_SLOPE_CAL_EN, is_on); 1987 1988 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI slpoe_k %s\n", path, 1989 str_on_off(is_on)); 1990 } 1991 1992 static void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) 1993 { 1994 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1995 1996 if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) { 1997 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x0); 1998 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xffe0fa00); 1999 } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40) { 2000 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2); 2001 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xff4009e0); 2002 } else { 2003 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1); 2004 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xf9f007d0); 2005 } 2006 2007 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG Select for %s\n", 2008 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" : 2009 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M"); 2010 } 2011 2012 static void _dpk_txpwr_bb_force(struct rtw89_dev *rtwdev, 2013 enum rtw89_rf_path path, bool force) 2014 { 2015 rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_ON, force); 2016 rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H + (path << 13), B_TXPWRB_RDY, force); 2017 2018 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d txpwr_bb_force %s\n", 2019 path, force ? "on" : "off"); 2020 } 2021 2022 static void _dpk_kip_pwr_clk_onoff(struct rtw89_dev *rtwdev, bool turn_on) 2023 { 2024 if (turn_on) { 2025 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080); 2026 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a); 2027 } else { 2028 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000); 2029 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000); 2030 rtw89_phy_write32_mask(rtwdev, R_DPK_WR, BIT(18), 0x1); 2031 } 2032 } 2033 2034 static void _dpk_kip_control_rfc(struct rtw89_dev *rtwdev, 2035 enum rtw89_rf_path path, bool ctrl_by_kip) 2036 { 2037 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), 2038 B_IQK_RFC_ON, ctrl_by_kip); 2039 } 2040 2041 static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2042 enum rtw89_rf_path path, u8 kidx) 2043 { 2044 rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, 2045 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK)); 2046 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), 2047 B_DPD_SEL, 0x01); 2048 2049 _dpk_kip_control_rfc(rtwdev, path, true); 2050 _dpk_one_shot(rtwdev, phy, path, D_KIP_PRESET); 2051 } 2052 2053 static void _dpk_kip_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2054 enum rtw89_rf_path path) 2055 { 2056 _dpk_one_shot(rtwdev, phy, path, D_KIP_RESTORE); 2057 _dpk_kip_control_rfc(rtwdev, path, false); 2058 _dpk_txpwr_bb_force(rtwdev, path, false); 2059 2060 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path); 2061 } 2062 2063 static void _dpk_kset_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 2064 { 2065 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2066 2067 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0x10); 2068 2069 dpk->cur_k_set = 2070 rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_KSET) - 1; 2071 } 2072 2073 static void _dpk_para_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) 2074 { 2075 static const u32 reg[RTW89_DPK_BKUP_NUM][DPK_KSET_NUM] = { 2076 {0x8190, 0x8194, 0x8198, 0x81a4}, 2077 {0x81a8, 0x81c4, 0x81c8, 0x81e8} 2078 }; 2079 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2080 u8 cur_k_set = dpk->cur_k_set; 2081 u32 para; 2082 2083 if (cur_k_set >= DPK_KSET_NUM) { 2084 rtw89_warn(rtwdev, "DPK cur_k_set = %d\n", cur_k_set); 2085 cur_k_set = 2; 2086 } 2087 2088 para = rtw89_phy_read32_mask(rtwdev, reg[kidx][cur_k_set] + (path << 8), 2089 MASKDWORD); 2090 2091 dpk->bp[path][kidx].txagc_dpk = (para >> 10) & 0x3f; 2092 dpk->bp[path][kidx].ther_dpk = (para >> 26) & 0x3f; 2093 2094 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2095 "[DPK] thermal/ txagc_RF (K%d) = 0x%x/ 0x%x\n", 2096 dpk->cur_k_set, dpk->bp[path][kidx].ther_dpk, 2097 dpk->bp[path][kidx].txagc_dpk); 2098 } 2099 2100 static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) 2101 { 2102 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2103 u8 corr_val, corr_idx, rxbb; 2104 u16 dc_i, dc_q; 2105 u8 rxbb_ov; 2106 2107 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0); 2108 2109 corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI); 2110 corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV); 2111 dpk->corr_idx[path][kidx] = corr_idx; 2112 dpk->corr_val[path][kidx] = corr_val; 2113 2114 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9); 2115 2116 dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); 2117 dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ); 2118 2119 dc_i = abs(sign_extend32(dc_i, 11)); 2120 dc_q = abs(sign_extend32(dc_q, 11)); 2121 2122 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2123 "[DPK] S%d Corr_idx/ Corr_val /DC I/Q, = %d / %d / %d / %d\n", 2124 path, corr_idx, corr_val, dc_i, dc_q); 2125 2126 dpk->dc_i[path][kidx] = dc_i; 2127 dpk->dc_q[path][kidx] = dc_q; 2128 2129 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x8); 2130 rxbb = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB); 2131 2132 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x31); 2133 rxbb_ov = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXOV); 2134 2135 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2136 "[DPK] S%d RXBB/ RXAGC_done /RXBB_ovlmt = %d / %d / %d\n", 2137 path, rxbb, 2138 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DONE), 2139 rxbb_ov); 2140 2141 if (dc_i > 200 || dc_q > 200 || corr_val < 170) 2142 return true; 2143 else 2144 return false; 2145 } 2146 2147 static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2148 enum rtw89_rf_path path, u8 dbm, 2149 bool set_from_bb) 2150 { 2151 if (set_from_bb) { 2152 dbm = clamp_t(u8, dbm, 7, 24); 2153 2154 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2155 "[DPK] set S%d txagc to %ddBm\n", path, dbm); 2156 rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), 2157 B_TXPWRB_VAL, dbm << 2); 2158 } 2159 2160 _dpk_one_shot(rtwdev, phy, path, D_TXAGC); 2161 _dpk_kset_query(rtwdev, path); 2162 } 2163 2164 static bool _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2165 enum rtw89_rf_path path, u8 kidx) 2166 { 2167 _dpk_kip_control_rfc(rtwdev, path, false); 2168 rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, 2169 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK)); 2170 _dpk_kip_control_rfc(rtwdev, path, true); 2171 2172 _dpk_one_shot(rtwdev, phy, path, D_RXAGC); 2173 return _dpk_sync_check(rtwdev, path, kidx); 2174 } 2175 2176 static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2177 enum rtw89_rf_path path) 2178 { 2179 u32 rf_11, reg_81cc; 2180 u8 cur_rxbb; 2181 2182 rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1); 2183 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1); 2184 2185 _dpk_kip_control_rfc(rtwdev, path, false); 2186 2187 cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_RXB); 2188 rf_11 = rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK); 2189 reg_81cc = rtw89_phy_read32_mask(rtwdev, R_KIP_IQP + (path << 8), 2190 B_KIP_IQP_SW); 2191 2192 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); 2193 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x3); 2194 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0xd); 2195 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RXB, 0x1f); 2196 2197 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x12); 2198 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, 0x3); 2199 2200 _dpk_kip_control_rfc(rtwdev, path, true); 2201 2202 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, MASKDWORD, 0x00250025); 2203 2204 _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK); 2205 2206 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path, 2207 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD)); 2208 2209 _dpk_kip_control_rfc(rtwdev, path, false); 2210 2211 rtw89_write_rf(rtwdev, path, RR_TXIG, RFREG_MASK, rf_11); 2212 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RXB, cur_rxbb); 2213 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, reg_81cc); 2214 2215 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0); 2216 rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0); 2217 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1); 2218 2219 _dpk_kip_control_rfc(rtwdev, path, true); 2220 } 2221 2222 static void _dpk_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) 2223 { 2224 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2225 2226 if (dpk->bp[path][kidx].band == RTW89_BAND_2G) { 2227 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50521); 2228 rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK); 2229 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x0); 2230 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x7); 2231 } else { 2232 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 2233 0x50521 | BIT(rtwdev->dbcc_en)); 2234 rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK); 2235 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RAA2_SATT, 0x3); 2236 } 2237 2238 rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1); 2239 rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1); 2240 rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0); 2241 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_EBW, 0x0); 2242 } 2243 2244 static void _dpk_bypass_rxiqc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 2245 { 2246 rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1); 2247 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 0x40000002); 2248 2249 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Bypass RXIQC\n"); 2250 } 2251 2252 static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev) 2253 { 2254 u16 dgain; 2255 2256 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0); 2257 dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); 2258 2259 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x\n", dgain); 2260 2261 return dgain; 2262 } 2263 2264 static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev) 2265 { 2266 u8 result; 2267 2268 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6); 2269 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1); 2270 result = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL); 2271 2272 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp GL = %d\n", result); 2273 2274 return result; 2275 } 2276 2277 static u8 _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2278 enum rtw89_rf_path path, u8 kidx) 2279 { 2280 _dpk_one_shot(rtwdev, phy, path, D_GAIN_LOSS); 2281 _dpk_kip_set_txagc(rtwdev, phy, path, 0xff, false); 2282 2283 rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A1, 0xf078); 2284 rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A0, 0x0); 2285 2286 return _dpk_gainloss_read(rtwdev); 2287 } 2288 2289 static u8 _dpk_pas_read(struct rtw89_dev *rtwdev, u8 is_check) 2290 { 2291 u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0; 2292 u32 val1_sqrt_sum, val2_sqrt_sum; 2293 u8 i; 2294 2295 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06); 2296 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0); 2297 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08); 2298 2299 if (is_check) { 2300 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00); 2301 val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); 2302 val1_i = abs(sign_extend32(val1_i, 11)); 2303 val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); 2304 val1_q = abs(sign_extend32(val1_q, 11)); 2305 2306 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f); 2307 val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); 2308 val2_i = abs(sign_extend32(val2_i, 11)); 2309 val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); 2310 val2_q = abs(sign_extend32(val2_q, 11)); 2311 2312 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n", 2313 phy_div(val1_i * val1_i + val1_q * val1_q, 2314 val2_i * val2_i + val2_q * val2_q)); 2315 } else { 2316 for (i = 0; i < 32; i++) { 2317 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i); 2318 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2319 "[DPK] PAS_Read[%02d]= 0x%08x\n", i, 2320 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD)); 2321 } 2322 } 2323 2324 val1_sqrt_sum = val1_i * val1_i + val1_q * val1_q; 2325 val2_sqrt_sum = val2_i * val2_i + val2_q * val2_q; 2326 2327 if (val1_sqrt_sum < val2_sqrt_sum) 2328 return 2; 2329 else if (val1_sqrt_sum >= val2_sqrt_sum * 8 / 5) 2330 return 1; 2331 else 2332 return 0; 2333 } 2334 2335 static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2336 enum rtw89_rf_path path, u8 kidx, u8 init_xdbm, u8 loss_only) 2337 { 2338 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2339 u8 tmp_dbm = init_xdbm, tmp_gl_idx = 0; 2340 u8 step = DPK_AGC_STEP_SYNC_DGAIN; 2341 u8 goout = 0, agc_cnt = 0; 2342 bool is_fail = false; 2343 int limit = 200; 2344 u8 tmp_rxbb; 2345 u16 dgain; 2346 2347 do { 2348 switch (step) { 2349 case DPK_AGC_STEP_SYNC_DGAIN: 2350 is_fail = _dpk_kip_set_rxagc(rtwdev, phy, path, kidx); 2351 2352 if (is_fail) { 2353 goout = 1; 2354 break; 2355 } 2356 2357 dgain = _dpk_dgain_read(rtwdev); 2358 2359 if (dgain > 0x5fc || dgain < 0x556) { 2360 _dpk_one_shot(rtwdev, phy, path, D_SYNC); 2361 _dpk_dgain_read(rtwdev); 2362 } 2363 2364 if (agc_cnt == 0) { 2365 if (dpk->bp[path][kidx].band == RTW89_BAND_2G) 2366 _dpk_bypass_rxiqc(rtwdev, path); 2367 else 2368 _dpk_lbk_rxiqk(rtwdev, phy, path); 2369 } 2370 step = DPK_AGC_STEP_GAIN_LOSS_IDX; 2371 break; 2372 2373 case DPK_AGC_STEP_GAIN_LOSS_IDX: 2374 tmp_gl_idx = _dpk_gainloss(rtwdev, phy, path, kidx); 2375 2376 if (_dpk_pas_read(rtwdev, true) == 2 && tmp_gl_idx > 0) 2377 step = DPK_AGC_STEP_GL_LT_CRITERION; 2378 else if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true) == 1) || 2379 tmp_gl_idx >= 7) 2380 step = DPK_AGC_STEP_GL_GT_CRITERION; 2381 else if (tmp_gl_idx == 0) 2382 step = DPK_AGC_STEP_GL_LT_CRITERION; 2383 else 2384 step = DPK_AGC_STEP_SET_TX_GAIN; 2385 break; 2386 2387 case DPK_AGC_STEP_GL_GT_CRITERION: 2388 if (tmp_dbm <= 7) { 2389 goout = 1; 2390 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2391 "[DPK] Txagc@lower bound!!\n"); 2392 } else { 2393 tmp_dbm = max_t(u8, tmp_dbm - 3, 7); 2394 _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true); 2395 } 2396 step = DPK_AGC_STEP_SYNC_DGAIN; 2397 agc_cnt++; 2398 break; 2399 2400 case DPK_AGC_STEP_GL_LT_CRITERION: 2401 if (tmp_dbm >= 24) { 2402 goout = 1; 2403 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2404 "[DPK] Txagc@upper bound!!\n"); 2405 } else { 2406 tmp_dbm = min_t(u8, tmp_dbm + 2, 24); 2407 _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true); 2408 } 2409 step = DPK_AGC_STEP_SYNC_DGAIN; 2410 agc_cnt++; 2411 break; 2412 2413 case DPK_AGC_STEP_SET_TX_GAIN: 2414 _dpk_kip_control_rfc(rtwdev, path, false); 2415 tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_RXB); 2416 tmp_rxbb = min_t(u8, tmp_rxbb + tmp_gl_idx, 0x1f); 2417 2418 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RXB, tmp_rxbb); 2419 2420 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2421 "[DPK] Adjust RXBB (%+d) = 0x%x\n", 2422 tmp_gl_idx, tmp_rxbb); 2423 _dpk_kip_control_rfc(rtwdev, path, true); 2424 goout = 1; 2425 break; 2426 default: 2427 goout = 1; 2428 break; 2429 } 2430 } while (!goout && agc_cnt < 6 && limit-- > 0); 2431 2432 return is_fail; 2433 } 2434 2435 static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order) 2436 { 2437 switch (order) { 2438 case 0: /* (5,3,1) */ 2439 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, 0x0); 2440 rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL_SEL, 0x2); 2441 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3); 2442 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_DMAN, 0x1); 2443 break; 2444 case 1: /* (5,3,0) */ 2445 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, 0x1); 2446 rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL_SEL, 0x1); 2447 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x0); 2448 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_DMAN, 0x0); 2449 break; 2450 case 2: /* (5,0,0) */ 2451 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, 0x2); 2452 rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL_SEL, 0x0); 2453 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x0); 2454 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_DMAN, 0x0); 2455 break; 2456 case 3: /* (7,3,1) */ 2457 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, 0x3); 2458 rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL_SEL, 0x3); 2459 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x4); 2460 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_DMAN, 0x1); 2461 break; 2462 default: 2463 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2464 "[DPK] Wrong MDPD order!!(0x%x)\n", order); 2465 break; 2466 } 2467 2468 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set %s for IDL\n", 2469 order == 0x0 ? "(5,3,1)" : 2470 order == 0x1 ? "(5,3,0)" : 2471 order == 0x2 ? "(5,0,0)" : "(7,3,1)"); 2472 } 2473 2474 static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2475 enum rtw89_rf_path path, u8 kidx) 2476 { 2477 if (rtw89_phy_read32_mask(rtwdev, R_IDL_MPA, B_IDL_MD500) == 0x1) 2478 _dpk_set_mdpd_para(rtwdev, 0x2); 2479 else if (rtw89_phy_read32_mask(rtwdev, R_IDL_MPA, B_IDL_MD530) == 0x1) 2480 _dpk_set_mdpd_para(rtwdev, 0x1); 2481 else 2482 _dpk_set_mdpd_para(rtwdev, 0x0); 2483 2484 rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL, 0x0); 2485 fsleep(1000); 2486 2487 _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL); 2488 } 2489 2490 static u8 _dpk_order_convert(struct rtw89_dev *rtwdev) 2491 { 2492 u32 order; 2493 u8 val; 2494 2495 order = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP); 2496 2497 switch (order) { 2498 case 0: /* (5,3,1) */ 2499 val = 0x6; 2500 break; 2501 case 1: /* (5,3,0) */ 2502 val = 0x2; 2503 break; 2504 case 2: /* (5,0,0) */ 2505 val = 0x0; 2506 break; 2507 default: 2508 val = 0xff; 2509 break; 2510 } 2511 2512 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val); 2513 2514 return val; 2515 } 2516 2517 static void _dpk_gain_normalize(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2518 enum rtw89_rf_path path, u8 kidx, bool is_execute) 2519 { 2520 static const u32 reg[RTW89_DPK_BKUP_NUM][DPK_KSET_NUM] = { 2521 {0x8190, 0x8194, 0x8198, 0x81a4}, 2522 {0x81a8, 0x81c4, 0x81c8, 0x81e8} 2523 }; 2524 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2525 u8 cur_k_set = dpk->cur_k_set; 2526 2527 if (cur_k_set >= DPK_KSET_NUM) { 2528 rtw89_warn(rtwdev, "DPK cur_k_set = %d\n", cur_k_set); 2529 cur_k_set = 2; 2530 } 2531 2532 if (is_execute) { 2533 rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), 2534 B_DPK_GN_AG, 0x200); 2535 rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), 2536 B_DPK_GN_EN, 0x3); 2537 2538 _dpk_one_shot(rtwdev, phy, path, D_GAIN_NORM); 2539 } else { 2540 rtw89_phy_write32_mask(rtwdev, reg[kidx][cur_k_set] + (path << 8), 2541 0x0000007F, 0x5b); 2542 } 2543 2544 dpk->bp[path][kidx].gs = 2545 rtw89_phy_read32_mask(rtwdev, reg[kidx][cur_k_set] + (path << 8), 2546 0x0000007F); 2547 } 2548 2549 static void _dpk_on(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2550 enum rtw89_rf_path path, u8 kidx) 2551 { 2552 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2553 2554 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1); 2555 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0); 2556 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), 2557 B_DPD_ORDER, _dpk_order_convert(rtwdev)); 2558 2559 dpk->bp[path][kidx].path_ok = 2560 dpk->bp[path][kidx].path_ok | BIT(dpk->cur_k_set); 2561 2562 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] path_ok = 0x%x\n", 2563 path, kidx, dpk->bp[path][kidx].path_ok); 2564 2565 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), 2566 B_DPD_MEN, dpk->bp[path][kidx].path_ok); 2567 2568 _dpk_gain_normalize(rtwdev, phy, path, kidx, false); 2569 } 2570 2571 static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2572 enum rtw89_rf_path path) 2573 { 2574 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2575 u8 kidx = dpk->cur_idx[path]; 2576 u8 init_xdbm = 17; 2577 bool is_fail; 2578 2579 _dpk_kip_control_rfc(rtwdev, path, false); 2580 _rfk_rf_direct_cntrl(rtwdev, path, false); 2581 rtw89_write_rf(rtwdev, path, RR_BBDC, RFREG_MASK, 0x03ffd); 2582 2583 _dpk_rf_setting(rtwdev, path, kidx); 2584 _set_rx_dck(rtwdev, path, RF_DPK); 2585 2586 _dpk_kip_pwr_clk_onoff(rtwdev, true); 2587 _dpk_kip_preset(rtwdev, phy, path, kidx); 2588 _dpk_txpwr_bb_force(rtwdev, path, true); 2589 _dpk_kip_set_txagc(rtwdev, phy, path, init_xdbm, true); 2590 _dpk_tpg_sel(rtwdev, path, kidx); 2591 is_fail = _dpk_agc(rtwdev, phy, path, kidx, init_xdbm, false); 2592 if (is_fail) 2593 goto _error; 2594 2595 _dpk_idl_mpa(rtwdev, phy, path, kidx); 2596 _dpk_para_query(rtwdev, path, kidx); 2597 2598 _dpk_on(rtwdev, phy, path, kidx); 2599 _error: 2600 _dpk_kip_control_rfc(rtwdev, path, false); 2601 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RF_RX); 2602 2603 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d]_K%d %s\n", path, kidx, 2604 dpk->cur_k_set, is_fail ? "need Check" : "is Success"); 2605 2606 return is_fail; 2607 } 2608 2609 static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, 2610 enum rtw89_phy_idx phy, u8 kpath, 2611 enum rtw89_chanctx_idx chanctx_idx) 2612 { 2613 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2614 u32 kip_bkup[RF_PATH_NUM_8851B][DPK_KIP_REG_NUM_8851B] = {}; 2615 u32 rf_bkup[RF_PATH_NUM_8851B][DPK_RF_REG_NUM_8851B] = {}; 2616 bool is_fail; 2617 u8 path; 2618 2619 for (path = 0; path < RF_PATH_NUM_8851B; path++) 2620 dpk->cur_idx[path] = 0; 2621 2622 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 2623 if (!(kpath & BIT(path))) 2624 continue; 2625 _dpk_bkup_kip(rtwdev, dpk_kip_reg, kip_bkup, path); 2626 _dpk_bkup_rf(rtwdev, dpk_rf_reg, rf_bkup, path); 2627 _dpk_information(rtwdev, phy, path, chanctx_idx); 2628 _dpk_init(rtwdev, path); 2629 2630 if (rtwdev->is_tssi_mode[path]) 2631 _dpk_tssi_pause(rtwdev, path, true); 2632 } 2633 2634 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 2635 if (!(kpath & BIT(path))) 2636 continue; 2637 2638 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2639 "[DPK] ========= S%d[%d] DPK Start =========\n", 2640 path, dpk->cur_idx[path]); 2641 2642 _dpk_tssi_slope_k_onoff(rtwdev, path, false); 2643 _dpk_rxagc_onoff(rtwdev, path, false); 2644 _rfk_drf_direct_cntrl(rtwdev, path, false); 2645 _dpk_bb_afe_setting(rtwdev, path); 2646 2647 is_fail = _dpk_main(rtwdev, phy, path); 2648 _dpk_onoff(rtwdev, path, is_fail); 2649 } 2650 2651 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 2652 if (!(kpath & BIT(path))) 2653 continue; 2654 2655 _dpk_kip_restore(rtwdev, phy, path); 2656 _dpk_reload_kip(rtwdev, dpk_kip_reg, kip_bkup, path); 2657 _dpk_reload_rf(rtwdev, dpk_rf_reg, rf_bkup, path); 2658 _dpk_bb_afe_restore(rtwdev, path); 2659 _dpk_rxagc_onoff(rtwdev, path, true); 2660 _dpk_tssi_slope_k_onoff(rtwdev, path, true); 2661 if (rtwdev->is_tssi_mode[path]) 2662 _dpk_tssi_pause(rtwdev, path, false); 2663 } 2664 2665 _dpk_kip_pwr_clk_onoff(rtwdev, false); 2666 } 2667 2668 static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force, 2669 enum rtw89_chanctx_idx chanctx_idx) 2670 { 2671 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2672 "[DPK] ****** 8851B DPK Start (Ver: 0x%x, Cv: %d) ******\n", 2673 DPK_VER_8851B, rtwdev->hal.cv); 2674 2675 _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy), chanctx_idx); 2676 } 2677 2678 static void _dpk_track(struct rtw89_dev *rtwdev) 2679 { 2680 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2681 s8 txagc_bb, txagc_bb_tp, txagc_ofst; 2682 s16 pwsf_tssi_ofst; 2683 s8 delta_ther = 0; 2684 u8 path, kidx; 2685 u8 txagc_rf; 2686 u8 cur_ther; 2687 2688 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 2689 kidx = dpk->cur_idx[path]; 2690 2691 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2692 "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n", 2693 path, kidx, dpk->bp[path][kidx].ch); 2694 2695 txagc_rf = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), 2696 B_TXAGC_RF); 2697 txagc_bb = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), 2698 MASKBYTE2); 2699 txagc_bb_tp = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BTP + (path << 13), 2700 B_TXAGC_BTP); 2701 2702 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), 2703 B_KIP_RPT_SEL, 0xf); 2704 cur_ther = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), 2705 B_RPT_PER_TH); 2706 txagc_ofst = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), 2707 B_RPT_PER_OF); 2708 pwsf_tssi_ofst = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), 2709 B_RPT_PER_TSSI); 2710 pwsf_tssi_ofst = sign_extend32(pwsf_tssi_ofst, 12); 2711 2712 delta_ther = cur_ther - dpk->bp[path][kidx].ther_dpk; 2713 2714 delta_ther = delta_ther * 2 / 3; 2715 2716 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2717 "[DPK_TRK] extra delta_ther = %d (0x%x / 0x%x@k)\n", 2718 delta_ther, cur_ther, dpk->bp[path][kidx].ther_dpk); 2719 2720 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2721 "[DPK_TRK] delta_txagc = %d (0x%x / 0x%x@k)\n", 2722 txagc_rf - dpk->bp[path][kidx].txagc_dpk, 2723 txagc_rf, dpk->bp[path][kidx].txagc_dpk); 2724 2725 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2726 "[DPK_TRK] txagc_offset / pwsf_tssi_ofst = 0x%x / %+d\n", 2727 txagc_ofst, pwsf_tssi_ofst); 2728 2729 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2730 "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n", 2731 txagc_bb_tp, txagc_bb); 2732 2733 if (rtw89_phy_read32_mask(rtwdev, R_IDL_MPA, B_IDL_DN) == 0x0 && 2734 txagc_rf != 0) { 2735 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2736 "[DPK_TRK] New pwsf = 0x%x\n", 0x78 - delta_ther); 2737 2738 rtw89_phy_write32_mask(rtwdev, 2739 R_DPD_BND + (path << 8) + (kidx << 2), 2740 0x07FC0000, 0x78 - delta_ther); 2741 } 2742 } 2743 } 2744 2745 static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 2746 { 2747 u32 rf_reg5; 2748 u32 rck_val; 2749 u32 val; 2750 int ret; 2751 2752 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path); 2753 2754 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); 2755 2756 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); 2757 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); 2758 2759 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%05x\n", 2760 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK)); 2761 2762 /* RCK trigger */ 2763 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240); 2764 2765 ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30, 2766 false, rtwdev, path, RR_RCKS, BIT(3)); 2767 2768 rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA); 2769 2770 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] rck_val = 0x%x, ret = %d\n", 2771 rck_val, ret); 2772 2773 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val); 2774 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); 2775 2776 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF 0x1b = 0x%x\n", 2777 rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK)); 2778 } 2779 2780 static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2781 enum rtw89_rf_path path, const struct rtw89_chan *chan) 2782 { 2783 enum rtw89_band band = chan->band_type; 2784 2785 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_sys_defs_tbl); 2786 2787 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, 2788 &rtw8851b_tssi_sys_a_defs_2g_tbl, 2789 &rtw8851b_tssi_sys_a_defs_5g_tbl); 2790 } 2791 2792 static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, 2793 enum rtw89_phy_idx phy, 2794 enum rtw89_rf_path path) 2795 { 2796 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_init_txpwr_defs_a_tbl); 2797 } 2798 2799 static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev, 2800 enum rtw89_phy_idx phy, 2801 enum rtw89_rf_path path) 2802 { 2803 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_init_txpwr_he_tb_defs_a_tbl); 2804 } 2805 2806 static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2807 enum rtw89_rf_path path) 2808 { 2809 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_dck_defs_a_tbl); 2810 } 2811 2812 static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2813 enum rtw89_rf_path path, const struct rtw89_chan *chan) 2814 { 2815 #define RTW8851B_TSSI_GET_VAL(ptr, idx) \ 2816 ({ \ 2817 s8 *__ptr = (ptr); \ 2818 u8 __idx = (idx), __i, __v; \ 2819 u32 __val = 0; \ 2820 for (__i = 0; __i < 4; __i++) { \ 2821 __v = (__ptr[__idx + __i]); \ 2822 __val |= (__v << (8 * __i)); \ 2823 } \ 2824 __val; \ 2825 }) 2826 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 2827 u8 ch = chan->channel; 2828 u8 subband = chan->subband_type; 2829 const s8 *thm_up_a = NULL; 2830 const s8 *thm_down_a = NULL; 2831 u8 thermal = 0xff; 2832 s8 thm_ofst[64] = {0}; 2833 u32 tmp = 0; 2834 u8 i, j; 2835 2836 switch (subband) { 2837 default: 2838 case RTW89_CH_2G: 2839 thm_up_a = rtw89_8851b_trk_cfg.delta_swingidx_2ga_p; 2840 thm_down_a = rtw89_8851b_trk_cfg.delta_swingidx_2ga_n; 2841 break; 2842 case RTW89_CH_5G_BAND_1: 2843 thm_up_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_p[0]; 2844 thm_down_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_n[0]; 2845 break; 2846 case RTW89_CH_5G_BAND_3: 2847 thm_up_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_p[1]; 2848 thm_down_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_n[1]; 2849 break; 2850 case RTW89_CH_5G_BAND_4: 2851 thm_up_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_p[2]; 2852 thm_down_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_n[2]; 2853 break; 2854 } 2855 2856 if (path == RF_PATH_A) { 2857 thermal = tssi_info->thermal[RF_PATH_A]; 2858 2859 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2860 "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal); 2861 2862 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0); 2863 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1); 2864 2865 if (thermal == 0xff) { 2866 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32); 2867 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32); 2868 2869 for (i = 0; i < 64; i += 4) { 2870 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0); 2871 2872 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2873 "[TSSI] write 0x%x val=0x%08x\n", 2874 R_P0_TSSI_BASE + i, 0x0); 2875 } 2876 2877 } else { 2878 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 2879 thermal); 2880 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 2881 thermal); 2882 2883 i = 0; 2884 for (j = 0; j < 32; j++) 2885 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 2886 -thm_down_a[i++] : 2887 -thm_down_a[DELTA_SWINGIDX_SIZE - 1]; 2888 2889 i = 1; 2890 for (j = 63; j >= 32; j--) 2891 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 2892 thm_up_a[i++] : 2893 thm_up_a[DELTA_SWINGIDX_SIZE - 1]; 2894 2895 for (i = 0; i < 64; i += 4) { 2896 tmp = RTW8851B_TSSI_GET_VAL(thm_ofst, i); 2897 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp); 2898 2899 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2900 "[TSSI] write 0x%x val=0x%08x\n", 2901 0x5c00 + i, tmp); 2902 } 2903 } 2904 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1); 2905 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0); 2906 } 2907 #undef RTW8851B_TSSI_GET_VAL 2908 } 2909 2910 static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2911 enum rtw89_rf_path path) 2912 { 2913 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_dac_gain_defs_a_tbl); 2914 } 2915 2916 static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2917 enum rtw89_rf_path path, const struct rtw89_chan *chan) 2918 { 2919 enum rtw89_band band = chan->band_type; 2920 2921 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, 2922 &rtw8851b_tssi_slope_a_defs_2g_tbl, 2923 &rtw8851b_tssi_slope_a_defs_5g_tbl); 2924 } 2925 2926 static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2927 enum rtw89_rf_path path, bool all, 2928 const struct rtw89_chan *chan) 2929 { 2930 enum rtw89_band band = chan->band_type; 2931 2932 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, 2933 &rtw8851b_tssi_align_a_2g_defs_tbl, 2934 &rtw8851b_tssi_align_a_5g_defs_tbl); 2935 } 2936 2937 static void _tssi_set_tssi_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2938 enum rtw89_rf_path path) 2939 { 2940 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_slope_defs_a_tbl); 2941 } 2942 2943 static void _tssi_set_tssi_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2944 enum rtw89_rf_path path) 2945 { 2946 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_track_defs_a_tbl); 2947 } 2948 2949 static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev, 2950 enum rtw89_phy_idx phy, 2951 enum rtw89_rf_path path) 2952 { 2953 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_mv_avg_defs_a_tbl); 2954 } 2955 2956 static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 2957 { 2958 _tssi_set_tssi_track(rtwdev, phy, RF_PATH_A); 2959 _tssi_set_txagc_offset_mv_avg(rtwdev, phy, RF_PATH_A); 2960 2961 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, 0x0); 2962 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x0); 2963 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x1); 2964 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXGA_V1, RR_TXGA_V1_TRK_EN, 0x1); 2965 2966 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); 2967 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_RFC, 0x3); 2968 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 0xc0); 2969 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); 2970 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1); 2971 2972 rtwdev->is_tssi_mode[RF_PATH_A] = true; 2973 } 2974 2975 static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 2976 { 2977 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x0); 2978 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); 2979 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1); 2980 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); 2981 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, 0x1); 2982 2983 rtwdev->is_tssi_mode[RF_PATH_A] = false; 2984 } 2985 2986 static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch) 2987 { 2988 switch (ch) { 2989 case 1 ... 2: 2990 return 0; 2991 case 3 ... 5: 2992 return 1; 2993 case 6 ... 8: 2994 return 2; 2995 case 9 ... 11: 2996 return 3; 2997 case 12 ... 13: 2998 return 4; 2999 case 14: 3000 return 5; 3001 } 3002 3003 return 0; 3004 } 3005 3006 #define TSSI_EXTRA_GROUP_BIT (BIT(31)) 3007 #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx)) 3008 #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT) 3009 #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT) 3010 #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1) 3011 3012 static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch) 3013 { 3014 switch (ch) { 3015 case 1 ... 2: 3016 return 0; 3017 case 3 ... 5: 3018 return 1; 3019 case 6 ... 8: 3020 return 2; 3021 case 9 ... 11: 3022 return 3; 3023 case 12 ... 14: 3024 return 4; 3025 case 36 ... 40: 3026 return 5; 3027 case 41 ... 43: 3028 return TSSI_EXTRA_GROUP(5); 3029 case 44 ... 48: 3030 return 6; 3031 case 49 ... 51: 3032 return TSSI_EXTRA_GROUP(6); 3033 case 52 ... 56: 3034 return 7; 3035 case 57 ... 59: 3036 return TSSI_EXTRA_GROUP(7); 3037 case 60 ... 64: 3038 return 8; 3039 case 100 ... 104: 3040 return 9; 3041 case 105 ... 107: 3042 return TSSI_EXTRA_GROUP(9); 3043 case 108 ... 112: 3044 return 10; 3045 case 113 ... 115: 3046 return TSSI_EXTRA_GROUP(10); 3047 case 116 ... 120: 3048 return 11; 3049 case 121 ... 123: 3050 return TSSI_EXTRA_GROUP(11); 3051 case 124 ... 128: 3052 return 12; 3053 case 129 ... 131: 3054 return TSSI_EXTRA_GROUP(12); 3055 case 132 ... 136: 3056 return 13; 3057 case 137 ... 139: 3058 return TSSI_EXTRA_GROUP(13); 3059 case 140 ... 144: 3060 return 14; 3061 case 149 ... 153: 3062 return 15; 3063 case 154 ... 156: 3064 return TSSI_EXTRA_GROUP(15); 3065 case 157 ... 161: 3066 return 16; 3067 case 162 ... 164: 3068 return TSSI_EXTRA_GROUP(16); 3069 case 165 ... 169: 3070 return 17; 3071 case 170 ... 172: 3072 return TSSI_EXTRA_GROUP(17); 3073 case 173 ... 177: 3074 return 18; 3075 } 3076 3077 return 0; 3078 } 3079 3080 static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch) 3081 { 3082 switch (ch) { 3083 case 1 ... 8: 3084 return 0; 3085 case 9 ... 14: 3086 return 1; 3087 case 36 ... 48: 3088 return 2; 3089 case 52 ... 64: 3090 return 3; 3091 case 100 ... 112: 3092 return 4; 3093 case 116 ... 128: 3094 return 5; 3095 case 132 ... 144: 3096 return 6; 3097 case 149 ... 177: 3098 return 7; 3099 } 3100 3101 return 0; 3102 } 3103 3104 static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3105 enum rtw89_rf_path path, const struct rtw89_chan *chan) 3106 { 3107 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3108 u32 gidx, gidx_1st, gidx_2nd; 3109 u8 ch = chan->channel; 3110 s8 de_1st; 3111 s8 de_2nd; 3112 s8 val; 3113 3114 gidx = _tssi_get_ofdm_group(rtwdev, ch); 3115 3116 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3117 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", path, gidx); 3118 3119 if (IS_TSSI_EXTRA_GROUP(gidx)) { 3120 gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx); 3121 gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx); 3122 de_1st = tssi_info->tssi_mcs[path][gidx_1st]; 3123 de_2nd = tssi_info->tssi_mcs[path][gidx_2nd]; 3124 val = (de_1st + de_2nd) / 2; 3125 3126 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3127 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", 3128 path, val, de_1st, de_2nd); 3129 } else { 3130 val = tssi_info->tssi_mcs[path][gidx]; 3131 3132 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3133 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); 3134 } 3135 3136 return val; 3137 } 3138 3139 static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3140 enum rtw89_rf_path path, const struct rtw89_chan *chan) 3141 { 3142 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3143 u32 tgidx, tgidx_1st, tgidx_2nd; 3144 u8 ch = chan->channel; 3145 s8 tde_1st; 3146 s8 tde_2nd; 3147 s8 val; 3148 3149 tgidx = _tssi_get_trim_group(rtwdev, ch); 3150 3151 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3152 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", 3153 path, tgidx); 3154 3155 if (IS_TSSI_EXTRA_GROUP(tgidx)) { 3156 tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx); 3157 tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx); 3158 tde_1st = tssi_info->tssi_trim[path][tgidx_1st]; 3159 tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd]; 3160 val = (tde_1st + tde_2nd) / 2; 3161 3162 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3163 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", 3164 path, val, tde_1st, tde_2nd); 3165 } else { 3166 val = tssi_info->tssi_trim[path][tgidx]; 3167 3168 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3169 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", 3170 path, val); 3171 } 3172 3173 return val; 3174 } 3175 3176 static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3177 const struct rtw89_chan *chan) 3178 { 3179 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3180 u8 ch = chan->channel; 3181 u8 gidx; 3182 s8 ofdm_de; 3183 s8 trim_de; 3184 s32 val; 3185 u32 i; 3186 3187 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n", 3188 phy, ch); 3189 3190 for (i = RF_PATH_A; i < RTW8851B_TSSI_PATH_NR; i++) { 3191 gidx = _tssi_get_cck_group(rtwdev, ch); 3192 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan); 3193 val = tssi_info->tssi_cck[i][gidx] + trim_de; 3194 3195 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3196 "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n", 3197 i, gidx, tssi_info->tssi_cck[i][gidx], trim_de); 3198 3199 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val); 3200 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val); 3201 3202 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3203 "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n", 3204 _tssi_de_cck_long[i], 3205 rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i], 3206 _TSSI_DE_MASK)); 3207 3208 ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan); 3209 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan); 3210 val = ofdm_de + trim_de; 3211 3212 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3213 "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n", 3214 i, ofdm_de, trim_de); 3215 3216 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val); 3217 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val); 3218 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val); 3219 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, val); 3220 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val); 3221 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val); 3222 3223 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3224 "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n", 3225 _tssi_de_mcs_20m[i], 3226 rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i], 3227 _TSSI_DE_MASK)); 3228 } 3229 } 3230 3231 static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 3232 { 3233 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3234 "[TSSI PA K]\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n" 3235 "0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n", 3236 R_TSSI_PA_K1 + (path << 13), 3237 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K1 + (path << 13), MASKDWORD), 3238 R_TSSI_PA_K2 + (path << 13), 3239 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K2 + (path << 13), MASKDWORD), 3240 R_P0_TSSI_ALIM1 + (path << 13), 3241 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD), 3242 R_P0_TSSI_ALIM3 + (path << 13), 3243 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD), 3244 R_TSSI_PA_K5 + (path << 13), 3245 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K5 + (path << 13), MASKDWORD), 3246 R_P0_TSSI_ALIM2 + (path << 13), 3247 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD), 3248 R_P0_TSSI_ALIM4 + (path << 13), 3249 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD), 3250 R_TSSI_PA_K8 + (path << 13), 3251 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K8 + (path << 13), MASKDWORD)); 3252 } 3253 3254 static void _tssi_alimentk_done(struct rtw89_dev *rtwdev, 3255 enum rtw89_phy_idx phy, enum rtw89_rf_path path, 3256 const struct rtw89_chan *chan) 3257 { 3258 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3259 u8 channel = chan->channel; 3260 u8 band; 3261 3262 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3263 "======>%s phy=%d path=%d\n", __func__, phy, path); 3264 3265 if (channel >= 1 && channel <= 14) 3266 band = TSSI_ALIMK_2G; 3267 else if (channel >= 36 && channel <= 64) 3268 band = TSSI_ALIMK_5GL; 3269 else if (channel >= 100 && channel <= 144) 3270 band = TSSI_ALIMK_5GM; 3271 else if (channel >= 149 && channel <= 177) 3272 band = TSSI_ALIMK_5GH; 3273 else 3274 band = TSSI_ALIMK_2G; 3275 3276 if (tssi_info->alignment_done[path][band]) { 3277 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD, 3278 tssi_info->alignment_value[path][band][0]); 3279 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD, 3280 tssi_info->alignment_value[path][band][1]); 3281 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD, 3282 tssi_info->alignment_value[path][band][2]); 3283 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD, 3284 tssi_info->alignment_value[path][band][3]); 3285 } 3286 3287 _tssi_alimentk_dump_result(rtwdev, path); 3288 } 3289 3290 static void rtw8851b_by_rate_dpd(struct rtw89_dev *rtwdev) 3291 { 3292 rtw89_write32_mask(rtwdev, R_AX_PWR_SWING_OTHER_CTRL0, 3293 B_AX_CFIR_BY_RATE_OFF_MASK, 0x21861); 3294 } 3295 3296 void rtw8851b_dpk_init(struct rtw89_dev *rtwdev) 3297 { 3298 rtw8851b_by_rate_dpd(rtwdev); 3299 } 3300 3301 void rtw8851b_aack(struct rtw89_dev *rtwdev) 3302 { 3303 u32 tmp05, tmpd3, ib[4]; 3304 u32 tmp; 3305 int ret; 3306 int rek; 3307 int i; 3308 3309 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]DO AACK\n"); 3310 3311 tmp05 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK); 3312 tmpd3 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RFREG_MASK); 3313 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_MASK, 0x3); 3314 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x0); 3315 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_ST, 0x0); 3316 3317 for (rek = 0; rek < 4; rek++) { 3318 rtw89_write_rf(rtwdev, RF_PATH_A, RR_AACK, RFREG_MASK, 0x8201e); 3319 rtw89_write_rf(rtwdev, RF_PATH_A, RR_AACK, RFREG_MASK, 0x8201f); 3320 fsleep(100); 3321 3322 ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp, 3323 1, 1000, false, 3324 rtwdev, RF_PATH_A, 0xd0, BIT(16)); 3325 if (ret) 3326 rtw89_warn(rtwdev, "[LCK]AACK timeout\n"); 3327 3328 rtw89_write_rf(rtwdev, RF_PATH_A, RR_VCI, RR_VCI_ON, 0x1); 3329 for (i = 0; i < 4; i++) { 3330 rtw89_write_rf(rtwdev, RF_PATH_A, RR_VCO, RR_VCO_SEL, i); 3331 ib[i] = rtw89_read_rf(rtwdev, RF_PATH_A, RR_IBD, RR_IBD_VAL); 3332 } 3333 rtw89_write_rf(rtwdev, RF_PATH_A, RR_VCI, RR_VCI_ON, 0x0); 3334 3335 if (ib[0] != 0 && ib[1] != 0 && ib[2] != 0 && ib[3] != 0) 3336 break; 3337 } 3338 3339 if (rek != 0) 3340 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]AACK rek = %d\n", rek); 3341 3342 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, tmp05); 3343 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RFREG_MASK, tmpd3); 3344 } 3345 3346 static void _lck_keep_thermal(struct rtw89_dev *rtwdev) 3347 { 3348 struct rtw89_lck_info *lck = &rtwdev->lck; 3349 3350 lck->thermal[RF_PATH_A] = 3351 ewma_thermal_read(&rtwdev->phystat.avg_thermal[RF_PATH_A]); 3352 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 3353 "[LCK] path=%d thermal=0x%x", RF_PATH_A, lck->thermal[RF_PATH_A]); 3354 } 3355 3356 static void rtw8851b_lck(struct rtw89_dev *rtwdev) 3357 { 3358 u32 tmp05, tmp18, tmpd3; 3359 3360 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]DO LCK\n"); 3361 3362 tmp05 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK); 3363 tmp18 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 3364 tmpd3 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RFREG_MASK); 3365 3366 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_MASK, 0x3); 3367 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x0); 3368 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1); 3369 3370 _set_ch(rtwdev, tmp18); 3371 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RFREG_MASK, tmpd3); 3372 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, tmp05); 3373 3374 _lck_keep_thermal(rtwdev); 3375 } 3376 3377 #define RTW8851B_LCK_TH 8 3378 3379 void rtw8851b_lck_track(struct rtw89_dev *rtwdev) 3380 { 3381 struct rtw89_lck_info *lck = &rtwdev->lck; 3382 u8 cur_thermal; 3383 int delta; 3384 3385 cur_thermal = 3386 ewma_thermal_read(&rtwdev->phystat.avg_thermal[RF_PATH_A]); 3387 delta = abs((int)cur_thermal - lck->thermal[RF_PATH_A]); 3388 3389 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 3390 "[LCK] path=%d current thermal=0x%x delta=0x%x\n", 3391 RF_PATH_A, cur_thermal, delta); 3392 3393 if (delta >= RTW8851B_LCK_TH) { 3394 rtw8851b_aack(rtwdev); 3395 rtw8851b_lck(rtwdev); 3396 } 3397 } 3398 3399 void rtw8851b_lck_init(struct rtw89_dev *rtwdev) 3400 { 3401 _lck_keep_thermal(rtwdev); 3402 } 3403 3404 void rtw8851b_rck(struct rtw89_dev *rtwdev) 3405 { 3406 _rck(rtwdev, RF_PATH_A); 3407 } 3408 3409 void rtw8851b_dack(struct rtw89_dev *rtwdev) 3410 { 3411 _dac_cal(rtwdev, false); 3412 } 3413 3414 void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 3415 enum rtw89_chanctx_idx chanctx_idx) 3416 { 3417 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); 3418 u32 tx_en; 3419 3420 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START); 3421 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); 3422 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); 3423 3424 _iqk_init(rtwdev); 3425 _iqk(rtwdev, phy_idx, false, chanctx_idx); 3426 3427 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); 3428 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP); 3429 } 3430 3431 void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 3432 enum rtw89_chanctx_idx chanctx_idx) 3433 { 3434 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); 3435 u32 tx_en; 3436 3437 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START); 3438 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); 3439 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); 3440 3441 _rx_dck(rtwdev, phy_idx, false, chanctx_idx); 3442 3443 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); 3444 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP); 3445 } 3446 3447 void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 3448 enum rtw89_chanctx_idx chanctx_idx) 3449 { 3450 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); 3451 u32 tx_en; 3452 3453 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START); 3454 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); 3455 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); 3456 3457 rtwdev->dpk.is_dpk_enable = true; 3458 rtwdev->dpk.is_dpk_reload_en = false; 3459 _dpk(rtwdev, phy_idx, false, chanctx_idx); 3460 3461 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); 3462 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP); 3463 } 3464 3465 void rtw8851b_dpk_track(struct rtw89_dev *rtwdev) 3466 { 3467 _dpk_track(rtwdev); 3468 } 3469 3470 void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3471 bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx) 3472 { 3473 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); 3474 u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_A, chanctx_idx); 3475 u8 i; 3476 3477 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy); 3478 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START); 3479 3480 _tssi_disable(rtwdev, phy); 3481 3482 for (i = RF_PATH_A; i < RF_PATH_NUM_8851B; i++) { 3483 _tssi_set_sys(rtwdev, phy, i, chan); 3484 _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i); 3485 _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i); 3486 _tssi_set_dck(rtwdev, phy, i); 3487 _tssi_set_tmeter_tbl(rtwdev, phy, i, chan); 3488 _tssi_set_dac_gain_tbl(rtwdev, phy, i); 3489 _tssi_slope_cal_org(rtwdev, phy, i, chan); 3490 _tssi_alignment_default(rtwdev, phy, i, true, chan); 3491 _tssi_set_tssi_slope(rtwdev, phy, i); 3492 } 3493 3494 _tssi_enable(rtwdev, phy); 3495 _tssi_set_efuse_to_de(rtwdev, phy, chan); 3496 3497 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); 3498 } 3499 3500 void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3501 const struct rtw89_chan *chan) 3502 { 3503 u8 channel = chan->channel; 3504 u32 i; 3505 3506 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3507 "======>%s phy=%d channel=%d\n", __func__, phy, channel); 3508 3509 _tssi_disable(rtwdev, phy); 3510 3511 for (i = RF_PATH_A; i < RF_PATH_NUM_8851B; i++) { 3512 _tssi_set_sys(rtwdev, phy, i, chan); 3513 _tssi_set_tmeter_tbl(rtwdev, phy, i, chan); 3514 _tssi_slope_cal_org(rtwdev, phy, i, chan); 3515 _tssi_alignment_default(rtwdev, phy, i, true, chan); 3516 } 3517 3518 _tssi_enable(rtwdev, phy); 3519 _tssi_set_efuse_to_de(rtwdev, phy, chan); 3520 } 3521 3522 static void rtw8851b_tssi_default_txagc(struct rtw89_dev *rtwdev, 3523 enum rtw89_phy_idx phy, bool enable, 3524 enum rtw89_chanctx_idx chanctx_idx) 3525 { 3526 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); 3527 u8 channel = chan->channel; 3528 3529 rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n", 3530 __func__, channel); 3531 3532 if (enable) 3533 return; 3534 3535 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3536 "======>%s 1 SCAN_END Set 0x5818[7:0]=0x%x\n", 3537 __func__, 3538 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT)); 3539 3540 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 0xc0); 3541 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); 3542 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1); 3543 3544 _tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan); 3545 3546 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3547 "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x\n", 3548 __func__, 3549 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT)); 3550 3551 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3552 "======> %s SCAN_END\n", __func__); 3553 } 3554 3555 void rtw8851b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, 3556 enum rtw89_phy_idx phy_idx, 3557 enum rtw89_chanctx_idx chanctx_idx) 3558 { 3559 if (scan_start) 3560 rtw8851b_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx); 3561 else 3562 rtw8851b_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx); 3563 } 3564 3565 static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 3566 enum rtw89_bandwidth bw, bool dav) 3567 { 3568 u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1; 3569 u32 rf_reg18; 3570 3571 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__); 3572 3573 rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK); 3574 if (rf_reg18 == INV_RF_DATA) { 3575 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3576 "[RFK]Invalid RF_0x18 for Path-%d\n", path); 3577 return; 3578 } 3579 rf_reg18 &= ~RR_CFGCH_BW; 3580 3581 switch (bw) { 3582 case RTW89_CHANNEL_WIDTH_5: 3583 case RTW89_CHANNEL_WIDTH_10: 3584 case RTW89_CHANNEL_WIDTH_20: 3585 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M); 3586 break; 3587 case RTW89_CHANNEL_WIDTH_40: 3588 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M); 3589 break; 3590 case RTW89_CHANNEL_WIDTH_80: 3591 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M); 3592 break; 3593 default: 3594 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]Fail to set CH\n"); 3595 } 3596 3597 rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN | 3598 RR_CFGCH_BW2) & RFREG_MASK; 3599 rf_reg18 |= RR_CFGCH_BW2; 3600 rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18); 3601 3602 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set %x at path%d, %x =0x%x\n", 3603 bw, path, reg18_addr, 3604 rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK)); 3605 } 3606 3607 static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3608 enum rtw89_bandwidth bw) 3609 { 3610 _bw_setting(rtwdev, RF_PATH_A, bw, true); 3611 _bw_setting(rtwdev, RF_PATH_A, bw, false); 3612 } 3613 3614 static bool _set_s0_arfc18(struct rtw89_dev *rtwdev, u32 val) 3615 { 3616 u32 bak; 3617 u32 tmp; 3618 int ret; 3619 3620 bak = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK); 3621 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RR_LDO_SEL, 0x1); 3622 rtw89_write_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK, val); 3623 3624 ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp == 0, 1, 1000, 3625 false, rtwdev, RF_PATH_A, RR_LPF, RR_LPF_BUSY); 3626 if (ret) 3627 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]LCK timeout\n"); 3628 3629 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK, bak); 3630 3631 return !!ret; 3632 } 3633 3634 static void _lck_check(struct rtw89_dev *rtwdev) 3635 { 3636 u32 tmp; 3637 3638 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) { 3639 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN MMD reset\n"); 3640 3641 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x1); 3642 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x0); 3643 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x1); 3644 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x0); 3645 } 3646 3647 udelay(10); 3648 3649 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) { 3650 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]re-set RF 0x18\n"); 3651 3652 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1); 3653 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 3654 _set_s0_arfc18(rtwdev, tmp); 3655 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0); 3656 } 3657 3658 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) { 3659 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN off/on\n"); 3660 3661 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK); 3662 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK, tmp); 3663 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK); 3664 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK, tmp); 3665 3666 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x1); 3667 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0); 3668 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3); 3669 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x0); 3670 3671 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1); 3672 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 3673 _set_s0_arfc18(rtwdev, tmp); 3674 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0); 3675 3676 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]0xb2=%x, 0xc5=%x\n", 3677 rtw89_read_rf(rtwdev, RF_PATH_A, RR_VCO, RFREG_MASK), 3678 rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RFREG_MASK)); 3679 } 3680 } 3681 3682 static void _set_ch(struct rtw89_dev *rtwdev, u32 val) 3683 { 3684 bool timeout; 3685 3686 timeout = _set_s0_arfc18(rtwdev, val); 3687 if (!timeout) 3688 _lck_check(rtwdev); 3689 } 3690 3691 static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 3692 u8 central_ch, bool dav) 3693 { 3694 u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1; 3695 bool is_2g_ch = central_ch <= 14; 3696 u32 rf_reg18; 3697 3698 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__); 3699 3700 rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK); 3701 rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | 3702 RR_CFGCH_BCN | RR_CFGCH_BAND0 | RR_CFGCH_CH); 3703 rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch); 3704 3705 if (!is_2g_ch) 3706 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G) | 3707 FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G); 3708 3709 rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN | 3710 RR_CFGCH_BW2) & RFREG_MASK; 3711 rf_reg18 |= RR_CFGCH_BW2; 3712 3713 if (path == RF_PATH_A && dav) 3714 _set_ch(rtwdev, rf_reg18); 3715 else 3716 rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18); 3717 3718 rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 0); 3719 rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 1); 3720 3721 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3722 "[RFK]CH: %d for Path-%d, reg0x%x = 0x%x\n", 3723 central_ch, path, reg18_addr, 3724 rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK)); 3725 } 3726 3727 static void _ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch) 3728 { 3729 _ch_setting(rtwdev, RF_PATH_A, central_ch, true); 3730 _ch_setting(rtwdev, RF_PATH_A, central_ch, false); 3731 } 3732 3733 static void _set_rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw, 3734 enum rtw89_rf_path path) 3735 { 3736 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1); 3737 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0x12); 3738 3739 if (bw == RTW89_CHANNEL_WIDTH_20) 3740 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x1b); 3741 else if (bw == RTW89_CHANNEL_WIDTH_40) 3742 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x13); 3743 else if (bw == RTW89_CHANNEL_WIDTH_80) 3744 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0xb); 3745 else 3746 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x3); 3747 3748 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set S%d RXBB BW 0x3F = 0x%x\n", path, 3749 rtw89_read_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB)); 3750 3751 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0); 3752 } 3753 3754 static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3755 enum rtw89_bandwidth bw) 3756 { 3757 u8 kpath, path; 3758 3759 kpath = _kpath(rtwdev, phy); 3760 3761 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 3762 if (!(kpath & BIT(path))) 3763 continue; 3764 3765 _set_rxbb_bw(rtwdev, bw, path); 3766 } 3767 } 3768 3769 static void rtw8851b_ctrl_bw_ch(struct rtw89_dev *rtwdev, 3770 enum rtw89_phy_idx phy, u8 central_ch, 3771 enum rtw89_band band, enum rtw89_bandwidth bw) 3772 { 3773 _ctrl_ch(rtwdev, central_ch); 3774 _ctrl_bw(rtwdev, phy, bw); 3775 _rxbb_bw(rtwdev, phy, bw); 3776 } 3777 3778 void rtw8851b_set_channel_rf(struct rtw89_dev *rtwdev, 3779 const struct rtw89_chan *chan, 3780 enum rtw89_phy_idx phy_idx) 3781 { 3782 rtw8851b_ctrl_bw_ch(rtwdev, phy_idx, chan->channel, chan->band_type, 3783 chan->band_width); 3784 } 3785