1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2024 Realtek Corporation
3 */
4
5 #include "coex.h"
6 #include "debug.h"
7 #include "fw.h"
8 #include "mac.h"
9 #include "phy.h"
10 #include "reg.h"
11 #include "rtw8852bt.h"
12 #include "rtw8852bt_rfk.h"
13 #include "rtw8852bt_rfk_table.h"
14 #include "rtw8852b_common.h"
15
16 #define RTW8852BT_RXDCK_VER 0x1
17 #define RTW8852BT_IQK_VER 0x2a
18 #define RTW8852BT_SS 2
19 #define RTW8852BT_TSSI_PATH_NR 2
20 #define RTW8852BT_DPK_VER 0x06
21 #define DPK_RF_PATH_MAX_8852BT 2
22
23 #define _TSSI_DE_MASK GENMASK(21, 12)
24 #define DPK_TXAGC_LOWER 0x2e
25 #define DPK_TXAGC_UPPER 0x3f
26 #define DPK_TXAGC_INVAL 0xff
27 #define RFREG_MASKRXBB 0x003e0
28 #define RFREG_MASKMODE 0xf0000
29
30 enum rf_mode {
31 RF_SHUT_DOWN = 0x0,
32 RF_STANDBY = 0x1,
33 RF_TX = 0x2,
34 RF_RX = 0x3,
35 RF_TXIQK = 0x4,
36 RF_DPK = 0x5,
37 RF_RXK1 = 0x6,
38 RF_RXK2 = 0x7,
39 };
40
41 enum rtw8852bt_dpk_id {
42 LBK_RXIQK = 0x06,
43 SYNC = 0x10,
44 MDPK_IDL = 0x11,
45 MDPK_MPA = 0x12,
46 GAIN_LOSS = 0x13,
47 GAIN_CAL = 0x14,
48 DPK_RXAGC = 0x15,
49 KIP_PRESET = 0x16,
50 KIP_RESTORE = 0x17,
51 DPK_TXAGC = 0x19,
52 D_KIP_PRESET = 0x28,
53 D_TXAGC = 0x29,
54 D_RXAGC = 0x2a,
55 D_SYNC = 0x2b,
56 D_GAIN_LOSS = 0x2c,
57 D_MDPK_IDL = 0x2d,
58 D_GAIN_NORM = 0x2f,
59 D_KIP_THERMAL = 0x30,
60 D_KIP_RESTORE = 0x31
61 };
62
63 enum dpk_agc_step {
64 DPK_AGC_STEP_SYNC_DGAIN,
65 DPK_AGC_STEP_GAIN_ADJ,
66 DPK_AGC_STEP_GAIN_LOSS_IDX,
67 DPK_AGC_STEP_GL_GT_CRITERION,
68 DPK_AGC_STEP_GL_LT_CRITERION,
69 DPK_AGC_STEP_SET_TX_GAIN,
70 };
71
72 enum rtw8852bt_iqk_type {
73 ID_TXAGC = 0x0,
74 ID_FLOK_COARSE = 0x1,
75 ID_FLOK_FINE = 0x2,
76 ID_TXK = 0x3,
77 ID_RXAGC = 0x4,
78 ID_RXK = 0x5,
79 ID_NBTXK = 0x6,
80 ID_NBRXK = 0x7,
81 ID_FLOK_VBUFFER = 0x8,
82 ID_A_FLOK_COARSE = 0x9,
83 ID_G_FLOK_COARSE = 0xa,
84 ID_A_FLOK_FINE = 0xb,
85 ID_G_FLOK_FINE = 0xc,
86 ID_IQK_RESTORE = 0x10,
87 };
88
89 enum adc_ck {
90 ADC_NA = 0,
91 ADC_480M = 1,
92 ADC_960M = 2,
93 ADC_1920M = 3,
94 };
95
96 enum dac_ck {
97 DAC_40M = 0,
98 DAC_80M = 1,
99 DAC_120M = 2,
100 DAC_160M = 3,
101 DAC_240M = 4,
102 DAC_320M = 5,
103 DAC_480M = 6,
104 DAC_960M = 7,
105 };
106
107 static const u32 _tssi_trigger[RTW8852BT_TSSI_PATH_NR] = {0x5820, 0x7820};
108 static const u32 _tssi_cw_rpt_addr[RTW8852BT_TSSI_PATH_NR] = {0x1c18, 0x3c18};
109 static const u32 _tssi_cw_default_addr[RTW8852BT_TSSI_PATH_NR][4] = {
110 {0x5634, 0x5630, 0x5630, 0x5630},
111 {0x7634, 0x7630, 0x7630, 0x7630} };
112 static const u32 _tssi_cw_default_mask[4] = {
113 0x000003ff, 0x3ff00000, 0x000ffc00, 0x000003ff};
114 static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852BT] = {0x5858, 0x7858};
115 static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852BT] = {0x5860, 0x7860};
116 static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852BT] = {0x5838, 0x7838};
117 static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852BT] = {0x5840, 0x7840};
118 static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852BT] = {0x5848, 0x7848};
119 static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852BT] = {0x5850, 0x7850};
120 static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852BT] = {0x5828, 0x7828};
121 static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852BT] = {0x5830, 0x7830};
122
123 static const u32 rtw8852bt_backup_bb_regs[] = {0x2344, 0x5800, 0x7800, 0x0704};
124 static const u32 rtw8852bt_backup_rf_regs[] = {
125 0xde, 0xdf, 0x8b, 0x90, 0x97, 0x85, 0x5, 0x10005};
126 static const u32 rtw8852bt_backup_kip_regs[] = {
127 0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8, 0xc0c4, 0xc0ec,
128 0x823c, 0x8224, 0x8220, 0xc1d4, 0xc1d8, 0xc1c4, 0xc1ec};
129
130 #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852bt_backup_bb_regs)
131 #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852bt_backup_rf_regs)
132 #define BACKUP_KIP_REGS_NR ARRAY_SIZE(rtw8852bt_backup_kip_regs)
133
_rfk_get_thermal(struct rtw89_dev * rtwdev,u8 kidx,enum rtw89_rf_path path)134 static void _rfk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx, enum rtw89_rf_path path)
135 {
136 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
137
138 rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
139 rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x0);
140 rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
141
142 udelay(200);
143
144 dpk->bp[path][kidx].ther_dpk = rtw89_read_rf(rtwdev, path, RR_TM, RR_TM_VAL);
145
146 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n",
147 dpk->bp[path][kidx].ther_dpk);
148 }
149
_rfk_backup_bb_reg(struct rtw89_dev * rtwdev,u32 backup_bb_reg_val[])150 static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
151 {
152 u32 i;
153
154 for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
155 backup_bb_reg_val[i] =
156 rtw89_phy_read32_mask(rtwdev, rtw8852bt_backup_bb_regs[i], MASKDWORD);
157 rtw89_debug(rtwdev, RTW89_DBG_RFK,
158 "[RFK]backup bb reg : %x, value =%x\n",
159 rtw8852bt_backup_bb_regs[i], backup_bb_reg_val[i]);
160 }
161 }
162
_rfk_backup_kip_reg(struct rtw89_dev * rtwdev,u32 backup_kip_reg_val[])163 static void _rfk_backup_kip_reg(struct rtw89_dev *rtwdev, u32 backup_kip_reg_val[])
164 {
165 u32 i;
166
167 for (i = 0; i < BACKUP_KIP_REGS_NR; i++) {
168 backup_kip_reg_val[i] =
169 rtw89_phy_read32_mask(rtwdev, rtw8852bt_backup_kip_regs[i],
170 MASKDWORD);
171 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
172 rtw8852bt_backup_kip_regs[i], backup_kip_reg_val[i]);
173 }
174 }
175
176 static
_rfk_backup_rf_reg(struct rtw89_dev * rtwdev,u32 backup_rf_reg_val[],u8 rf_path)177 void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[], u8 rf_path)
178 {
179 u32 i;
180
181 for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
182 backup_rf_reg_val[i] =
183 rtw89_read_rf(rtwdev, rf_path, rtw8852bt_backup_rf_regs[i],
184 RFREG_MASK);
185
186 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup RF S%d 0x%x = %x\n",
187 rf_path, rtw8852bt_backup_rf_regs[i], backup_rf_reg_val[i]);
188 }
189 }
190
_rfk_reload_bb_reg(struct rtw89_dev * rtwdev,const u32 backup_bb_reg_val[])191 static void _rfk_reload_bb_reg(struct rtw89_dev *rtwdev, const u32 backup_bb_reg_val[])
192 {
193 u32 i;
194
195 for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
196 rtw89_phy_write32_mask(rtwdev, rtw8852bt_backup_bb_regs[i],
197 MASKDWORD, backup_bb_reg_val[i]);
198 rtw89_debug(rtwdev, RTW89_DBG_RFK,
199 "[RFK]restore bb reg : %x, value =%x\n",
200 rtw8852bt_backup_bb_regs[i], backup_bb_reg_val[i]);
201 }
202 }
203
_rfk_reload_kip_reg(struct rtw89_dev * rtwdev,u32 backup_kip_reg_val[])204 static void _rfk_reload_kip_reg(struct rtw89_dev *rtwdev, u32 backup_kip_reg_val[])
205 {
206 u32 i;
207
208 for (i = 0; i < BACKUP_KIP_REGS_NR; i++) {
209 rtw89_phy_write32_mask(rtwdev, rtw8852bt_backup_kip_regs[i],
210 MASKDWORD, backup_kip_reg_val[i]);
211
212 rtw89_debug(rtwdev, RTW89_DBG_RFK,
213 "[RFK]restore kip reg : %x, value =%x\n",
214 rtw8852bt_backup_kip_regs[i], backup_kip_reg_val[i]);
215 }
216 }
217
_rfk_reload_rf_reg(struct rtw89_dev * rtwdev,const u32 backup_rf_reg_val[],u8 rf_path)218 static void _rfk_reload_rf_reg(struct rtw89_dev *rtwdev,
219 const u32 backup_rf_reg_val[], u8 rf_path)
220 {
221 u32 i;
222
223 for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
224 rtw89_write_rf(rtwdev, rf_path, rtw8852bt_backup_rf_regs[i],
225 RFREG_MASK, backup_rf_reg_val[i]);
226
227 rtw89_debug(rtwdev, RTW89_DBG_RFK,
228 "[RFK]restore rf S%d reg: %x, value =%x\n", rf_path,
229 rtw8852bt_backup_rf_regs[i], backup_rf_reg_val[i]);
230 }
231 }
232
_kpath(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)233 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
234 {
235 u8 val;
236
237 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x,PHY%d\n",
238 rtwdev->dbcc_en, phy_idx);
239
240 if (!rtwdev->dbcc_en) {
241 val = RF_AB;
242 } else {
243 if (phy_idx == RTW89_PHY_0)
244 val = RF_A;
245 else
246 val = RF_B;
247 }
248 return val;
249 }
250
251 static
_txck_force(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool force,enum dac_ck ck)252 void _txck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool force,
253 enum dac_ck ck)
254 {
255 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x0);
256
257 if (!force)
258 return;
259
260 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_VAL, ck);
261 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x1);
262 }
263
264 static
_rxck_force(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool force,enum adc_ck ck)265 void _rxck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool force,
266 enum adc_ck ck)
267 {
268 u32 bw = 0;
269
270 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0);
271
272 if (!force)
273 return;
274
275 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck);
276 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1);
277
278 switch (ck) {
279 case ADC_480M:
280 bw = RTW89_CHANNEL_WIDTH_40;
281 break;
282 case ADC_960M:
283 bw = RTW89_CHANNEL_WIDTH_80;
284 break;
285 case ADC_1920M:
286 bw = RTW89_CHANNEL_WIDTH_160;
287 break;
288 default:
289 rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s==>Invalid ck", __func__);
290 break;
291 }
292
293 rtw8852bx_adc_cfg(rtwdev, bw, path);
294 }
295
_rfk_bb_afe_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kpath)296 static void _rfk_bb_afe_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
297 enum rtw89_rf_path path, u8 kpath)
298 {
299 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0303);
300 rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x1);
301 rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x1);
302 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x3);
303 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x3);
304 rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x1ffffff);
305 rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x1);
306 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x1);
307 rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x3ff);
308 rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN, 0x3);
309 rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, 0x1);
310 rtw89_phy_write32_mask(rtwdev, R_P0_PATH_RST, B_P0_PATH_RST, 0x1);
311 rtw89_phy_write32_mask(rtwdev, R_P1_PATH_RST, B_P1_PATH_RST, 0x1);
312 rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1);
313 rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x1);
314 rtw89_phy_write32_mask(rtwdev, R_DCFO_WEIGHT, B_DAC_CLK_IDX, 0x1);
315
316 _txck_force(rtwdev, RF_PATH_A, true, DAC_960M);
317 _txck_force(rtwdev, RF_PATH_B, true, DAC_960M);
318 _rxck_force(rtwdev, RF_PATH_A, true, ADC_1920M);
319 _rxck_force(rtwdev, RF_PATH_B, true, ADC_1920M);
320
321 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
322 B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x5);
323 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
324 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
325 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x1f);
326 udelay(1);
327 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x13);
328 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0001);
329 udelay(1);
330 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0041);
331 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_RSTB, 0x1);
332 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x3333);
333
334 rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H, B_TXPWRB_RDY, 0x1);
335 rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_EN, MASKLWORD, 0x0000);
336 rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_FORCE, B_P1_TXPW_RDY, 0x1);
337 rtw89_phy_write32_mask(rtwdev, R_P1_TXAGC_TH, MASKLWORD, 0x0000);
338 }
339
_rfk_bb_afe_restore(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kpath)340 static void _rfk_bb_afe_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
341 enum rtw89_rf_path path, u8 kpath)
342 {
343 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0303);
344 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
345 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
346 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x0);
347 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x0);
348 rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x0);
349 rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x0);
350 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x0);
351 rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x63);
352 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_TXCK_ALL, 0x00);
353 rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_TXCK_ALL, 0x00);
354 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
355 B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x0);
356 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0000);
357 rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x0);
358 rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x0);
359 rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0);
360
361 rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H, B_TXPWRB_RDY, 0x0);
362 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TXPW_RSTB, 0x1);
363 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TXPW_RSTB, 0x2);
364 rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_FORCE, B_P1_TXPW_RDY, 0x0);
365 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TXPW_RSTB, 0x1);
366 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TXPW_RSTB, 0x2);
367 }
368
_set_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)369 static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
370 enum rtw89_rf_path path)
371 {
372 rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0);
373 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
374 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
375 mdelay(1);
376 }
377
_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)378 static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
379 {
380 u8 path, dck_tune;
381 u32 rf_reg5;
382
383 rtw89_debug(rtwdev, RTW89_DBG_RFK,
384 "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, CV : 0x%x) ******\n",
385 RTW8852BT_RXDCK_VER, rtwdev->hal.cv);
386
387 for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
388 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
389 dck_tune = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE);
390
391 if (rtwdev->is_tssi_mode[path])
392 rtw89_phy_write32_mask(rtwdev,
393 R_P0_TSSI_TRK + (path << 13),
394 B_P0_TSSI_TRK_EN, 0x1);
395
396 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
397 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
398 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
399 _set_rx_dck(rtwdev, phy, path);
400 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
401 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
402
403 if (rtwdev->is_tssi_mode[path])
404 rtw89_phy_write32_mask(rtwdev,
405 R_P0_TSSI_TRK + (path << 13),
406 B_P0_TSSI_TRK_EN, 0x0);
407 }
408 }
409
_rck(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)410 static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
411 {
412 u32 rf_reg5;
413 u32 rck_val;
414 u32 val;
415 int ret;
416
417 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
418
419 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
420
421 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
422 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
423
424 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%05x\n",
425 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
426
427 /* RCK trigger */
428 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
429
430 ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30,
431 false, rtwdev, path, RR_RCKS, BIT(3));
432
433 rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
434
435 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] rck_val = 0x%x, ret = %d\n",
436 rck_val, ret);
437
438 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
439 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
440
441 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF 0x1b = 0x%x\n",
442 rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK));
443 }
444
_drck(struct rtw89_dev * rtwdev)445 static void _drck(struct rtw89_dev *rtwdev)
446 {
447 u32 rck_d;
448 u32 val;
449 int ret;
450
451 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]Ddie RCK start!!!\n");
452 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1);
453
454 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
455 1, 10000, false,
456 rtwdev, R_DRCK_RES, B_DRCK_POL);
457 if (ret)
458 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n");
459
460 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x0);
461 rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x1);
462 udelay(1);
463 rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x0);
464
465 rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, 0x7c00);
466 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0);
467 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, rck_d);
468
469 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n",
470 rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD));
471 }
472
_dack_backup_s0(struct rtw89_dev * rtwdev)473 static void _dack_backup_s0(struct rtw89_dev *rtwdev)
474 {
475 struct rtw89_dack_info *dack = &rtwdev->dack;
476 u8 i;
477
478 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
479
480 for (i = 0; i < 0x10; i++) {
481 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i);
482 dack->msbk_d[0][0][i] =
483 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0);
484
485 rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i);
486 dack->msbk_d[0][1][i] =
487 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1);
488 }
489
490 dack->biask_d[0][0] =
491 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00);
492 dack->biask_d[0][1] =
493 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01);
494
495 dack->dadck_d[0][0] =
496 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00);
497 dack->dadck_d[0][1] =
498 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01);
499 }
500
_dack_backup_s1(struct rtw89_dev * rtwdev)501 static void _dack_backup_s1(struct rtw89_dev *rtwdev)
502 {
503 struct rtw89_dack_info *dack = &rtwdev->dack;
504 u8 i;
505
506 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
507
508 for (i = 0; i < 0x10; i++) {
509 rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i);
510 dack->msbk_d[1][0][i] =
511 rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK10S);
512
513 rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i);
514 dack->msbk_d[1][1][i] =
515 rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK11S);
516 }
517
518 dack->biask_d[1][0] =
519 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10, B_DACK_BIAS10);
520 dack->biask_d[1][1] =
521 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11, B_DACK_BIAS11);
522
523 dack->dadck_d[1][0] =
524 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10, B_DACK_DADCK10);
525 dack->dadck_d[1][1] =
526 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11, B_DACK_DADCK11);
527 }
528
529 static
_dack_reset(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)530 void _dack_reset(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
531 {
532 if (path == RF_PATH_A) {
533 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x0);
534 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x1);
535 } else {
536 rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10_RST, 0x0);
537 rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10_RST, 0x1);
538 }
539 }
540
541 static
_dack_reload_by_path(struct rtw89_dev * rtwdev,u8 path,u8 index)542 void _dack_reload_by_path(struct rtw89_dev *rtwdev, u8 path, u8 index)
543 {
544 struct rtw89_dack_info *dack = &rtwdev->dack;
545 u32 tmp, tmp_offset, tmp_reg;
546 u32 idx_offset, path_offset;
547 u8 i;
548
549 if (index == 0)
550 idx_offset = 0;
551 else
552 idx_offset = 0x14;
553
554 if (path == RF_PATH_A)
555 path_offset = 0;
556 else
557 path_offset = 0x28;
558
559 tmp_offset = idx_offset + path_offset;
560
561 rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_RST, 0x1);
562 rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_RST, 0x1);
563 rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_RST, 0x1);
564 rtw89_phy_write32_mask(rtwdev, R_DACK2_K, B_DACK2_RST, 0x1);
565
566 /* msbk_d: 15/14/13/12 */
567 tmp = 0x0;
568 for (i = 0; i < 4; i++)
569 tmp |= dack->msbk_d[path][index][i + 12] << (i * 8);
570 tmp_reg = 0xc200 + tmp_offset;
571 rtw89_phy_write32(rtwdev, tmp_reg, tmp);
572 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
573 rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
574
575 /* msbk_d: 11/10/9/8 */
576 tmp = 0x0;
577 for (i = 0; i < 4; i++)
578 tmp |= dack->msbk_d[path][index][i + 8] << (i * 8);
579 tmp_reg = 0xc204 + tmp_offset;
580 rtw89_phy_write32(rtwdev, tmp_reg, tmp);
581 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
582 rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
583
584 /* msbk_d: 7/6/5/4 */
585 tmp = 0x0;
586 for (i = 0; i < 4; i++)
587 tmp |= dack->msbk_d[path][index][i + 4] << (i * 8);
588 tmp_reg = 0xc208 + tmp_offset;
589 rtw89_phy_write32(rtwdev, tmp_reg, tmp);
590 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
591 rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
592
593 /* msbk_d: 3/2/1/0 */
594 tmp = 0x0;
595 for (i = 0; i < 4; i++)
596 tmp |= dack->msbk_d[path][index][i] << (i * 8);
597 tmp_reg = 0xc20c + tmp_offset;
598 rtw89_phy_write32(rtwdev, tmp_reg, tmp);
599 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
600 rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
601
602 /* dadak_d/biask_d */
603 tmp = (dack->biask_d[path][index] << 22) |
604 (dack->dadck_d[path][index] << 14);
605 tmp_reg = 0xc210 + tmp_offset;
606 rtw89_phy_write32(rtwdev, tmp_reg, tmp);
607 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
608 rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
609
610 /* enable DACK result from reg */
611 rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL + tmp_offset, B_DACKN0_EN, 0x1);
612 }
613
614 static
_dack_reload(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)615 void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
616 {
617 u8 i;
618
619 for (i = 0; i < 2; i++)
620 _dack_reload_by_path(rtwdev, path, i);
621 }
622
_dack_s0_poll(struct rtw89_dev * rtwdev)623 static bool _dack_s0_poll(struct rtw89_dev *rtwdev)
624 {
625 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
626 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 ||
627 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
628 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
629 return false;
630
631 return true;
632 }
633
_dack_s0(struct rtw89_dev * rtwdev)634 static void _dack_s0(struct rtw89_dev *rtwdev)
635 {
636 struct rtw89_dack_info *dack = &rtwdev->dack;
637 bool done;
638 int ret;
639
640 _txck_force(rtwdev, RF_PATH_A, true, DAC_160M);
641
642 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
643 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, BIT(28), 0x1);
644 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN1, 0x0);
645 udelay(100);
646 rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_VAL, 0x30);
647 rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_VAL, 0x30);
648
649 _dack_reset(rtwdev, RF_PATH_A);
650
651 rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1);
652 udelay(1);
653
654 dack->msbk_timeout[0] = false;
655
656 ret = read_poll_timeout_atomic(_dack_s0_poll, done, done,
657 1, 20000, false, rtwdev);
658 if (ret) {
659 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n");
660 dack->msbk_timeout[0] = true;
661 }
662
663 rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x0);
664
665 _txck_force(rtwdev, RF_PATH_A, false, DAC_960M);
666 _dack_backup_s0(rtwdev);
667 _dack_reload(rtwdev, RF_PATH_A);
668
669 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
670 }
671
_dack_s1_poll(struct rtw89_dev * rtwdev)672 static bool _dack_s1_poll(struct rtw89_dev *rtwdev)
673 {
674 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 ||
675 rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0 ||
676 rtw89_phy_read32_mask(rtwdev, R_DACK_S1P2, B_DACK_S1P2_OK) == 0 ||
677 rtw89_phy_read32_mask(rtwdev, R_DACK_S1P3, B_DACK_S1P3_OK) == 0)
678 return false;
679
680 return true;
681 }
682
_dack_s1(struct rtw89_dev * rtwdev)683 static void _dack_s1(struct rtw89_dev *rtwdev)
684 {
685 struct rtw89_dack_info *dack = &rtwdev->dack;
686 bool done;
687 int ret;
688
689 _txck_force(rtwdev, RF_PATH_B, true, DAC_160M);
690
691 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
692 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, BIT(28), 0x1);
693 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN1, 0x0);
694 udelay(100);
695 rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_VAL, 0x30);
696 rtw89_phy_write32_mask(rtwdev, R_DACK2_K, B_DACK2_VAL, 0x30);
697
698 _dack_reset(rtwdev, RF_PATH_B);
699
700 rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x1);
701 udelay(1);
702
703 dack->msbk_timeout[1] = false;
704
705 ret = read_poll_timeout_atomic(_dack_s1_poll, done, done,
706 1, 10000, false, rtwdev);
707 if (ret) {
708 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DACK timeout\n");
709 dack->msbk_timeout[1] = true;
710 }
711
712 rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x0);
713
714 _txck_force(rtwdev, RF_PATH_B, false, DAC_960M);
715 _dack_backup_s1(rtwdev);
716 _dack_reload(rtwdev, RF_PATH_B);
717
718 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
719 }
720
_dack(struct rtw89_dev * rtwdev)721 static void _dack(struct rtw89_dev *rtwdev)
722 {
723 _dack_s0(rtwdev);
724 _dack_s1(rtwdev);
725 }
726
_dack_dump(struct rtw89_dev * rtwdev)727 static void _dack_dump(struct rtw89_dev *rtwdev)
728 {
729 struct rtw89_dack_info *dack = &rtwdev->dack;
730 u8 i;
731 u8 t;
732
733 rtw89_debug(rtwdev, RTW89_DBG_RFK,
734 "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
735 dack->addck_d[0][0], dack->addck_d[0][1]);
736 rtw89_debug(rtwdev, RTW89_DBG_RFK,
737 "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
738 dack->addck_d[1][0], dack->addck_d[1][1]);
739 rtw89_debug(rtwdev, RTW89_DBG_RFK,
740 "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
741 dack->dadck_d[0][0], dack->dadck_d[0][1]);
742 rtw89_debug(rtwdev, RTW89_DBG_RFK,
743 "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
744 dack->dadck_d[1][0], dack->dadck_d[1][1]);
745 rtw89_debug(rtwdev, RTW89_DBG_RFK,
746 "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
747 dack->biask_d[0][0], dack->biask_d[0][1]);
748 rtw89_debug(rtwdev, RTW89_DBG_RFK,
749 "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
750 dack->biask_d[1][0], dack->biask_d[1][1]);
751
752 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
753 for (i = 0; i < 0x10; i++) {
754 t = dack->msbk_d[0][0][i];
755 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
756 }
757
758 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
759 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
760 t = dack->msbk_d[0][1][i];
761 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
762 }
763
764 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
765 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
766 t = dack->msbk_d[1][0][i];
767 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
768 }
769
770 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
771 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
772 t = dack->msbk_d[1][1][i];
773 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
774 }
775 }
776
_addck_ori(struct rtw89_dev * rtwdev)777 static void _addck_ori(struct rtw89_dev *rtwdev)
778 {
779 struct rtw89_dack_info *dack = &rtwdev->dack;
780 u32 val;
781 int ret;
782
783 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x0);
784 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_MAN, 0x0);
785 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
786 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
787 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
788 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
789
790 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
791 udelay(100);
792
793 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
794 rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(4), 0x1);
795 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
796 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x1);
797 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x0);
798 udelay(1);
799
800 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1);
801 dack->addck_timeout[0] = false;
802
803 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
804 1, 10000, false,
805 rtwdev, R_ADDCKR0, BIT(0));
806 if (ret) {
807 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
808 dack->addck_timeout[0] = true;
809 }
810
811 rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(4), 0x0);
812 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
813 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
814 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
815
816 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0);
817 dack->addck_d[0][0] =
818 rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0);
819 dack->addck_d[0][1] =
820 rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1);
821 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
822
823 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
824 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
825 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
826 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
827
828 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
829 udelay(100);
830
831 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
832 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(4), 0x1);
833 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
834 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x1);
835 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x0);
836 udelay(1);
837
838 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1);
839 dack->addck_timeout[1] = false;
840
841 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
842 1, 10000, false,
843 rtwdev, R_ADDCKR1, BIT(0));
844 if (ret) {
845 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
846 dack->addck_timeout[1] = true;
847 }
848
849 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(4), 0x0);
850 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
851 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
852 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
853
854 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0);
855 dack->addck_d[1][0] =
856 rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A0);
857 dack->addck_d[1][1] =
858 rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A1);
859
860 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
861 }
862
_addck_reload(struct rtw89_dev * rtwdev)863 static void _addck_reload(struct rtw89_dev *rtwdev)
864 {
865 struct rtw89_dack_info *dack = &rtwdev->dack;
866
867 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1, dack->addck_d[0][0]);
868 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0, dack->addck_d[0][1]);
869
870 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3);
871
872 rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL1, dack->addck_d[1][0]);
873 rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL0, dack->addck_d[1][1]);
874
875 rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x3);
876 }
877
_dack_manual_off(struct rtw89_dev * rtwdev)878 static void _dack_manual_off(struct rtw89_dev *rtwdev)
879 {
880 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x0);
881 rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x0);
882
883 rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL, B_DACKN0_EN, 0x0);
884 rtw89_phy_write32_mask(rtwdev, R_DACKN1_CTL, B_DACKN1_ON, 0x0);
885 rtw89_phy_write32_mask(rtwdev, R_DACKN2_CTL, B_DACKN2_ON, 0x0);
886 rtw89_phy_write32_mask(rtwdev, R_DACKN3_CTL, B_DACKN3_ON, 0x0);
887 }
888
_dac_cal(struct rtw89_dev * rtwdev,bool force)889 static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
890 {
891 struct rtw89_dack_info *dack = &rtwdev->dack;
892
893 dack->dack_done = false;
894 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
895
896 _drck(rtwdev);
897 _dack_manual_off(rtwdev);
898 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x0);
899 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RFREG_MASK, 0x0);
900 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1);
901 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1);
902 _rxck_force(rtwdev, RF_PATH_A, true, ADC_960M);
903 _rxck_force(rtwdev, RF_PATH_B, true, ADC_960M);
904 _addck_ori(rtwdev);
905
906 _rxck_force(rtwdev, RF_PATH_A, false, ADC_960M);
907 _rxck_force(rtwdev, RF_PATH_B, false, ADC_960M);
908 _addck_reload(rtwdev);
909
910 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
911 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
912
913 _dack(rtwdev);
914 _dack_dump(rtwdev);
915 dack->dack_done = true;
916 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x1);
917 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RFREG_MASK, 0x1);
918
919 dack->dack_cnt++;
920 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
921 }
922
_iqk_check_cal(struct rtw89_dev * rtwdev,u8 path,u8 ktype)923 static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
924 {
925 bool notready = false;
926 u32 val;
927 int ret;
928
929 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
930 10, 8200, false,
931 rtwdev, R_RFK_ST, MASKBYTE0);
932 if (ret)
933 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL1 IQK timeout!!!\n");
934
935 udelay(10);
936
937 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
938 10, 400, false,
939 rtwdev, R_RPT_COM, B_RPT_COM_RDY);
940 if (ret) {
941 notready = true;
942 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL2 IQK timeout!!!\n");
943 }
944
945 udelay(10);
946 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
947
948 return notready;
949 }
950
_iqk_one_shot(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path,u8 ktype)951 static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
952 u8 path, u8 ktype)
953 {
954 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
955 u32 iqk_cmd;
956 bool fail;
957
958 switch (ktype) {
959 case ID_TXAGC:
960 iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1);
961 break;
962 case ID_FLOK_COARSE:
963 iqk_cmd = 0x108 | (1 << (4 + path));
964 break;
965 case ID_FLOK_FINE:
966 iqk_cmd = 0x208 | (1 << (4 + path));
967 break;
968 case ID_FLOK_VBUFFER:
969 iqk_cmd = 0x308 | (1 << (4 + path));
970 break;
971 case ID_TXK:
972 iqk_cmd = 0x008 | (1 << (path + 4)) |
973 (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8);
974 break;
975 case ID_RXAGC:
976 iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
977 break;
978 case ID_RXK:
979 iqk_cmd = 0x008 | (1 << (path + 4)) |
980 (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8);
981 break;
982 case ID_NBTXK:
983 iqk_cmd = 0x408 | (1 << (4 + path));
984 break;
985 case ID_NBRXK:
986 iqk_cmd = 0x608 | (1 << (4 + path));
987 break;
988 default:
989 return false;
990 }
991
992 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s, iqk_cmd = %x\n",
993 __func__, iqk_cmd + 1);
994
995 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
996 fail = _iqk_check_cal(rtwdev, path, ktype);
997
998 return fail;
999 }
1000
_iqk_txk_setting(struct rtw89_dev * rtwdev,u8 path)1001 static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
1002 {
1003 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1004
1005 switch (iqk_info->iqk_band[path]) {
1006 case RTW89_BAND_2G:
1007 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
1008 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0);
1009 rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
1010 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1011 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1012 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x00);
1013 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
1014 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1015 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x5);
1016 udelay(1);
1017 break;
1018 case RTW89_BAND_5G:
1019 rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x1);
1020 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1021 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1022 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x80);
1023 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
1024 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1025 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x4);
1026 udelay(1);
1027 break;
1028 default:
1029 break;
1030 }
1031 }
1032
_iqk_2g_lok(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1033 static bool _iqk_2g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1034 {
1035 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1036
1037 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1038 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1039 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09);
1040 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
1041 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000119 + (path << 4));
1042
1043 _iqk_check_cal(rtwdev, path, ID_FLOK_COARSE);
1044 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1045 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1046
1047 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1048 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1049 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24);
1050 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4));
1051
1052 _iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER);
1053 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1054 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1055
1056 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1057 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1058 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09);
1059 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000219 + (path << 4));
1060
1061 _iqk_check_cal(rtwdev, path, ID_FLOK_COARSE);
1062 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1063 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1064
1065 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1066 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1067 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24);
1068 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4));
1069
1070 _iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER);
1071
1072 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1073 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1074
1075 return false;
1076 }
1077
_iqk_5g_lok(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1078 static bool _iqk_5g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1079 {
1080 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1081
1082 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1083 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1084 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09);
1085 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
1086 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000119 + (path << 4));
1087
1088 _iqk_check_cal(rtwdev, path, ID_FLOK_COARSE);
1089 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1090 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1091
1092 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1093 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1094 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24);
1095 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4));
1096
1097 _iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER);
1098 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1099 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1100
1101 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1102 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1103 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09);
1104 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000219 + (path << 4));
1105
1106 _iqk_check_cal(rtwdev, path, ID_FLOK_COARSE);
1107 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1108 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1109
1110 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1111 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1112 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24);
1113 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4));
1114
1115 _iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER);
1116 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1117 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1118
1119 return false;
1120 }
1121
_iqk_2g_tx(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1122 static bool _iqk_2g_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1123 {
1124 static const u32 g_power_range[4] = {0x0, 0x0, 0x0, 0x0};
1125 static const u32 g_track_range[4] = {0x4, 0x4, 0x6, 0x6};
1126 static const u32 g_gain_bb[4] = {0x08, 0x0e, 0x08, 0x0e};
1127 static const u32 g_itqt[4] = {0x09, 0x12, 0x1b, 0x24};
1128 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1129 bool notready = false;
1130 bool kfail = false;
1131 u8 gp;
1132
1133 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1134
1135 for (gp = 0x0; gp < 0x4; gp++) {
1136 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
1137 g_power_range[gp]);
1138 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
1139 g_track_range[gp]);
1140 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
1141 g_gain_bb[gp]);
1142 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1143 0x00000100, 0x1);
1144 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1145 0x00000010, 0x1);
1146 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1147 0x00000004, 0x0);
1148 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1149 0x00000003, gp);
1150 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT,
1151 0x009);
1152 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1153 B_KIP_IQP_IQSW, g_itqt[gp]);
1154 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
1155 iqk_info->nb_txcfir[path] =
1156 rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1157
1158 if (iqk_info->is_nbiqk)
1159 break;
1160
1161 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1162 B_KIP_IQP_IQSW, g_itqt[gp]);
1163 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
1164 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1165
1166 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1167 "[IQK]S%x, gp = 0x%x, 0x8%x38 = 0x%x\n",
1168 path, gp, 1 << path, iqk_info->nb_txcfir[path]);
1169 }
1170
1171 if (!notready)
1172 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
1173
1174 if (kfail) {
1175 iqk_info->nb_txcfir[path] = 0x40000002;
1176 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1177 B_IQK_RES_TXCFIR, 0x0);
1178 }
1179
1180 return kfail;
1181 }
1182
_iqk_5g_tx(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1183 static bool _iqk_5g_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1184 {
1185 static const u32 a_power_range[4] = {0x0, 0x0, 0x0, 0x0};
1186 static const u32 a_track_range[4] = {0x3, 0x3, 0x6, 0x6};
1187 static const u32 a_gain_bb[4] = {0x08, 0x10, 0x08, 0x0e};
1188 static const u32 a_itqt[4] = {0x09, 0x12, 0x1b, 0x24};
1189 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1190 bool notready = false;
1191 bool kfail = false;
1192 u8 gp;
1193
1194 for (gp = 0x0; gp < 0x4; gp++) {
1195 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]);
1196 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]);
1197 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]);
1198
1199 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1200 MASKDWORD, a_itqt[gp]);
1201 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1202 0x00000100, 0x1);
1203 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1204 0x00000010, 0x1);
1205 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1206 0x00000004, 0x0);
1207 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1208 0x00000003, gp);
1209 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT,
1210 0x009);
1211 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1212 B_KIP_IQP_IQSW, a_itqt[gp]);
1213
1214 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
1215 iqk_info->nb_txcfir[path] =
1216 rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1217
1218 if (iqk_info->is_nbiqk)
1219 break;
1220
1221 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1222 B_KIP_IQP_IQSW, a_itqt[gp]);
1223 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
1224 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1225
1226 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1227 "[IQK]S%x, gp = 0x%x, 0x8%x38 = 0x%x\n",
1228 path, gp, 1 << path, iqk_info->nb_txcfir[path]);
1229 }
1230
1231 if (!notready)
1232 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
1233
1234 if (kfail) {
1235 iqk_info->nb_txcfir[path] = 0x40000002;
1236 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1237 B_IQK_RES_TXCFIR, 0x0);
1238 }
1239
1240 return kfail;
1241 }
1242
_iqk_adc_fifo_rst(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1243 static void _iqk_adc_fifo_rst(struct rtw89_dev *rtwdev,
1244 enum rtw89_phy_idx phy_idx, u8 path)
1245 {
1246 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303);
1247 udelay(10);
1248 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333);
1249 }
1250
_iqk_rxclk_setting(struct rtw89_dev * rtwdev,u8 path)1251 static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
1252 {
1253 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1254
1255 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1256 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303);
1257
1258 if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) {
1259 _rxck_force(rtwdev, RF_PATH_A, true, ADC_960M);
1260 _rxck_force(rtwdev, RF_PATH_B, true, ADC_960M);
1261 udelay(1);
1262
1263 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
1264 B_UPD_CLK_ADC_ON, 0x1);
1265 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
1266 B_UPD_CLK_ADC_VAL, 0x1);
1267 rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1,
1268 B_PATH0_SAMPL_DLY_T_MSK_V1, 0x2);
1269 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1,
1270 B_PATH1_SAMPL_DLY_T_MSK_V1, 0x2);
1271 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_BW1, 0x8);
1272 rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,
1273 B_PATH1_BW_SEL_MSK_V1, 0x8);
1274 } else {
1275 _rxck_force(rtwdev, RF_PATH_A, true, ADC_480M);
1276 _rxck_force(rtwdev, RF_PATH_B, true, ADC_480M);
1277 udelay(1);
1278
1279 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
1280 B_UPD_CLK_ADC_ON, 0x1);
1281 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
1282 B_UPD_CLK_ADC_VAL, 0x0);
1283 rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1,
1284 B_PATH0_SAMPL_DLY_T_MSK_V1, 0x3);
1285 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1,
1286 B_PATH1_SAMPL_DLY_T_MSK_V1, 0x3);
1287 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_BW1, 0xf);
1288 rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,
1289 B_PATH1_BW_SEL_MSK_V1, 0xf);
1290 }
1291
1292 rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, 0x00000780, 0x8);
1293 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x00000780, 0x8);
1294 rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, 0x00007800, 0x2);
1295 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x00007800, 0x2);
1296 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_MUL, 0x0);
1297 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
1298 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
1299 udelay(1);
1300 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f);
1301 udelay(1);
1302 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03);
1303 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001);
1304 udelay(1);
1305 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
1306 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333);
1307 }
1308
_iqk_2g_rx(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1309 static bool _iqk_2g_rx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1310 {
1311 static const u32 g_idxrxgain[2] = {0x212, 0x310};
1312 static const u32 g_idxattc2[2] = {0x00, 0x20};
1313 static const u32 g_idxattc1[2] = {0x3, 0x2};
1314 static const u32 g_idxrxagc[2] = {0x0, 0x2};
1315 static const u32 g_idx[2] = {0x0, 0x2};
1316 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1317 bool notready = false;
1318 bool kfail = false;
1319 u32 rf_18, tmp;
1320 u8 gp;
1321
1322 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1323
1324 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
1325 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1);
1326 rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1327 rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18);
1328
1329 for (gp = 0x0; gp < 0x2; gp++) {
1330 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, g_idxrxgain[gp]);
1331 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, g_idxattc2[gp]);
1332 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, g_idxattc1[gp]);
1333
1334 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1335 0x00000100, 0x1);
1336 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1337 0x00000010, 0x0);
1338 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1339 0x00000007, g_idx[gp]);
1340 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
1341 udelay(100);
1342 udelay(100);
1343
1344 tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
1345 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, tmp);
1346 rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, g_idxrxagc[gp]);
1347 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1348 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
1349
1350 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
1351 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1352 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, rf rxbb = %x\n", path,
1353 rtw89_read_rf(rtwdev, path, RR_MOD, 0x003c0));
1354
1355 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
1356 udelay(100);
1357 udelay(100);
1358 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1359 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
1360
1361 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
1362 iqk_info->nb_rxcfir[path] =
1363 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
1364 MASKDWORD) | 0x2;
1365 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1366 "[IQK]S%x, gp = 0x%x, 0x8%x3c = 0x%x\n", path,
1367 g_idx[gp], 1 << path, iqk_info->nb_rxcfir[path]);
1368
1369 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1370 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1371
1372 if (iqk_info->is_nbiqk)
1373 break;
1374
1375 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1376 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
1377 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1378 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1379 }
1380
1381 if (!notready)
1382 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
1383
1384 if (kfail) {
1385 iqk_info->nb_txcfir[path] = 0x40000002;
1386 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1387 B_IQK_RES_RXCFIR, 0x0);
1388 }
1389 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
1390
1391 return kfail;
1392 }
1393
_iqk_5g_rx(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1394 static bool _iqk_5g_rx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1395 {
1396 static const u32 a_idxrxgain[2] = {0x110, 0x290};
1397 static const u32 a_idxattc2[2] = {0x0f, 0x0f};
1398 static const u32 a_idxattc1[2] = {0x2, 0x2};
1399 static const u32 a_idxrxagc[2] = {0x4, 0x6};
1400 static const u32 a_idx[2] = {0x0, 0x2};
1401 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1402 bool notready = false;
1403 bool kfail = false;
1404 u32 rf_18, tmp;
1405 u8 gp;
1406
1407 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1408
1409 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
1410 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1);
1411 rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1412 rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18);
1413
1414 for (gp = 0x0; gp < 0x2; gp++) {
1415 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, a_idxrxgain[gp]);
1416 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT, a_idxattc2[gp]);
1417 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2, a_idxattc1[gp]);
1418
1419 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1420 0x00000100, 0x1);
1421 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1422 0x00000010, 0x0);
1423 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1424 0x00000007, a_idx[gp]);
1425 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
1426 udelay(100);
1427 udelay(100);
1428
1429 tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
1430 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, tmp);
1431 rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]);
1432 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1433 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
1434
1435 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
1436 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1437 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, rf rxbb = %x\n", path,
1438 rtw89_read_rf(rtwdev, path, RR_MOD, 0x003c0));
1439
1440 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
1441 udelay(200);
1442 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1443 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
1444 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
1445 iqk_info->nb_rxcfir[path] =
1446 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
1447 MASKDWORD) | 0x2;
1448 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1449 "[IQK]S%x, gp = 0x%x, 0x8%x3c = 0x%x\n",
1450 path, a_idx[gp], 1 << path, iqk_info->nb_rxcfir[path]);
1451 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1452 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1453
1454 if (iqk_info->is_nbiqk)
1455 break;
1456
1457 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1458 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
1459 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1460 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1461 }
1462
1463 if (!notready)
1464 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
1465
1466 if (kfail) {
1467 iqk_info->nb_txcfir[path] = 0x40000002;
1468 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1469 B_IQK_RES_RXCFIR, 0x0);
1470 }
1471 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
1472
1473 return kfail;
1474 }
1475
_iqk_by_path(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1476 static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1477 {
1478 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1479 bool lok_result = false;
1480 bool txk_result = false;
1481 bool rxk_result = false;
1482 u8 i;
1483
1484 for (i = 0; i < 3; i++) {
1485 _iqk_txk_setting(rtwdev, path);
1486 if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1487 lok_result = _iqk_2g_lok(rtwdev, phy_idx, path);
1488 else
1489 lok_result = _iqk_5g_lok(rtwdev, phy_idx, path);
1490
1491 if (!lok_result)
1492 break;
1493 }
1494
1495 if (lok_result) {
1496 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1497 "[IQK]!!!!!!!!!!LOK by Pass !!!!!!!!!!!\n");
1498 rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200);
1499 rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200);
1500 rtw89_write_rf(rtwdev, path, RR_LOKVB, RFREG_MASK, 0x80200);
1501 }
1502
1503 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x08[00:19] = 0x%x\n",
1504 rtw89_read_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK));
1505 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x09[00:19] = 0x%x\n",
1506 rtw89_read_rf(rtwdev, path, RR_RSV2, RFREG_MASK));
1507 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x0a[00:19] = 0x%x\n",
1508 rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK));
1509
1510 if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1511 txk_result = _iqk_2g_tx(rtwdev, phy_idx, path);
1512 else
1513 txk_result = _iqk_5g_tx(rtwdev, phy_idx, path);
1514
1515 _iqk_rxclk_setting(rtwdev, path);
1516 _iqk_adc_fifo_rst(rtwdev, phy_idx, path);
1517
1518 if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1519 rxk_result = _iqk_2g_rx(rtwdev, phy_idx, path);
1520 else
1521 rxk_result = _iqk_5g_rx(rtwdev, phy_idx, path);
1522
1523 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1524 "[IQK]result : lok_= %x, txk_= %x, rxk_= %x\n",
1525 lok_result, txk_result, rxk_result);
1526 }
1527
_iqk_get_ch_info(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 path,enum rtw89_chanctx_idx chanctx_idx)1528 static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path,
1529 enum rtw89_chanctx_idx chanctx_idx)
1530 {
1531 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
1532 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1533 u8 get_empty_table = false;
1534 u32 reg_rf18;
1535 u32 reg_35c;
1536 u8 idx;
1537
1538 for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
1539 if (iqk_info->iqk_mcc_ch[idx][path] == 0) {
1540 get_empty_table = true;
1541 break;
1542 }
1543 }
1544 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (1)idx = %x\n", idx);
1545
1546 if (!get_empty_table) {
1547 idx = iqk_info->iqk_table_idx[path] + 1;
1548 if (idx > 1)
1549 idx = 0;
1550 }
1551 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (2)idx = %x\n", idx);
1552
1553 reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1554 reg_35c = rtw89_phy_read32_mask(rtwdev, R_CIRST, B_CIRST_SYN);
1555
1556 iqk_info->iqk_band[path] = chan->band_type;
1557 iqk_info->iqk_bw[path] = chan->band_width;
1558 iqk_info->iqk_ch[path] = chan->channel;
1559 iqk_info->iqk_mcc_ch[idx][path] = chan->channel;
1560 iqk_info->iqk_table_idx[path] = idx;
1561
1562 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x, idx = %x\n",
1563 path, reg_rf18, idx);
1564 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x\n",
1565 path, reg_rf18);
1566 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x35c= 0x%x\n",
1567 path, reg_35c);
1568 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]times = 0x%x, ch =%x\n",
1569 iqk_info->iqk_times, idx);
1570 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_mcc_ch[%x][%x] = 0x%x\n",
1571 idx, path, iqk_info->iqk_mcc_ch[idx][path]);
1572 }
1573
_iqk_start_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1574 static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1575 {
1576 _iqk_by_path(rtwdev, phy_idx, path);
1577 }
1578
_iqk_restore(struct rtw89_dev * rtwdev,u8 path)1579 static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
1580 {
1581 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1582
1583 rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__);
1584
1585 if (iqk_info->is_nbiqk) {
1586 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
1587 MASKDWORD, iqk_info->nb_txcfir[path]);
1588 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1589 MASKDWORD, iqk_info->nb_rxcfir[path]);
1590 } else {
1591 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
1592 MASKDWORD, 0x40000000);
1593 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1594 MASKDWORD, 0x40000000);
1595 }
1596 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
1597 0x00000e19 + (path << 4));
1598
1599 _iqk_check_cal(rtwdev, path, 0x0);
1600
1601 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1602 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
1603 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1604
1605 rtw89_phy_write32_mask(rtwdev, R_KIP_CLK, MASKDWORD, 0x0);
1606 rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K2, 0x0);
1607 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), BIT(28), 0x0);
1608
1609 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1610 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1611 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0x3);
1612 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1613 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
1614 }
1615
_iqk_afebb_restore(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1616 static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
1617 enum rtw89_phy_idx phy_idx, u8 path)
1618 {
1619 rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__);
1620 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303);
1621 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
1622 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
1623 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x0);
1624 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x0);
1625 rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x0000000);
1626 rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x0);
1627 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x0);
1628 rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, 0x0000001f, 0x03);
1629 rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, 0x000003e0, 0x03);
1630 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_TXCK_ALL, 0x00);
1631 rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_TXCK_ALL, 0x00);
1632 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
1633 B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x0);
1634 rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0);
1635 rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x0);
1636 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0000);
1637 rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x0);
1638 rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x0);
1639 }
1640
_iqk_preset(struct rtw89_dev * rtwdev,u8 path)1641 static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
1642 {
1643 u8 idx = 0;
1644
1645 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), 0x00000001, idx);
1646 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 0x00000008, idx);
1647 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, 0x40000000);
1648 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 0x40000000);
1649
1650 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1651 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
1652 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
1653 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
1654 }
1655
_iqk_macbb_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1656 static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
1657 enum rtw89_phy_idx phy_idx, u8 path)
1658 {
1659 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303);
1660 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_GOT_TXRX, 0x3);
1661 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_GOT_TXRX, 0x3);
1662 rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x1);
1663 rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x1);
1664 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x3);
1665 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P0_CLKG_FORCE, 0x3);
1666 rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x1ffffff);
1667 rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x1);
1668 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x1);
1669 rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x3ff);
1670 rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN, 0x3);
1671 rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, 0x1);
1672 rtw89_phy_write32_mask(rtwdev, R_P0_PATH_RST, B_P0_PATH_RST, 0x1);
1673 rtw89_phy_write32_mask(rtwdev, R_P1_PATH_RST, B_P1_PATH_RST, 0x1);
1674 rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1);
1675 rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x1);
1676 rtw89_phy_write32_mask(rtwdev, R_DCFO_WEIGHT, B_DAC_CLK_IDX, 0x1);
1677
1678 _txck_force(rtwdev, RF_PATH_A, true, DAC_960M);
1679 _txck_force(rtwdev, RF_PATH_B, true, DAC_960M);
1680 _rxck_force(rtwdev, RF_PATH_A, true, ADC_1920M);
1681 _rxck_force(rtwdev, RF_PATH_B, true, ADC_1920M);
1682
1683 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1);
1684 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x2);
1685
1686 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
1687 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
1688 udelay(10);
1689 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
1690 udelay(10);
1691 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
1692 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
1693 udelay(10);
1694 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
1695 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_RSTB, 0x1);
1696 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333);
1697 }
1698
_iqk_init(struct rtw89_dev * rtwdev)1699 static void _iqk_init(struct rtw89_dev *rtwdev)
1700 {
1701 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1702 u8 idx, path;
1703
1704 rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0);
1705
1706 if (iqk_info->is_iqk_init)
1707 return;
1708
1709 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1710 iqk_info->is_iqk_init = true;
1711 iqk_info->is_nbiqk = false;
1712 iqk_info->iqk_fft_en = false;
1713 iqk_info->iqk_sram_en = false;
1714 iqk_info->iqk_cfir_en = false;
1715 iqk_info->iqk_xym_en = false;
1716 iqk_info->iqk_times = 0x0;
1717
1718 for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
1719 iqk_info->iqk_channel[idx] = 0x0;
1720 for (path = 0; path < RTW8852BT_SS; path++) {
1721 iqk_info->lok_cor_fail[idx][path] = false;
1722 iqk_info->lok_fin_fail[idx][path] = false;
1723 iqk_info->iqk_tx_fail[idx][path] = false;
1724 iqk_info->iqk_rx_fail[idx][path] = false;
1725 iqk_info->iqk_mcc_ch[idx][path] = 0x0;
1726 iqk_info->iqk_table_idx[path] = 0x0;
1727 }
1728 }
1729 }
1730
_wait_rx_mode(struct rtw89_dev * rtwdev,u8 kpath)1731 static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
1732 {
1733 u32 rf_mode;
1734 u8 path;
1735 int ret;
1736
1737 for (path = 0; path < RF_PATH_MAX; path++) {
1738 if (!(kpath & BIT(path)))
1739 continue;
1740
1741 ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode,
1742 rf_mode != 2, 2, 5000, false,
1743 rtwdev, path, RR_MOD, RR_MOD_MASK);
1744 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1745 "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", path, ret);
1746 }
1747 }
1748
_tmac_tx_pause(struct rtw89_dev * rtwdev,enum rtw89_phy_idx band_idx,bool is_pause)1749 static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx,
1750 bool is_pause)
1751 {
1752 if (!is_pause)
1753 return;
1754
1755 _wait_rx_mode(rtwdev, _kpath(rtwdev, band_idx));
1756 }
1757
_doiqk(struct rtw89_dev * rtwdev,bool force,enum rtw89_phy_idx phy_idx,u8 path,enum rtw89_chanctx_idx chanctx_idx)1758 static void _doiqk(struct rtw89_dev *rtwdev, bool force,
1759 enum rtw89_phy_idx phy_idx, u8 path,
1760 enum rtw89_chanctx_idx chanctx_idx)
1761 {
1762 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1763 u32 backup_bb_val[BACKUP_BB_REGS_NR];
1764 u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR];
1765 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
1766
1767 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
1768
1769 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1770 "[IQK]==========IQK start!!!!!==========\n");
1771 iqk_info->iqk_times++;
1772 iqk_info->version = RTW8852BT_IQK_VER;
1773
1774 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
1775 _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
1776
1777 _rfk_backup_bb_reg(rtwdev, backup_bb_val);
1778 _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
1779 _iqk_macbb_setting(rtwdev, phy_idx, path);
1780 _iqk_preset(rtwdev, path);
1781 _iqk_start_iqk(rtwdev, phy_idx, path);
1782 _iqk_restore(rtwdev, path);
1783 _iqk_afebb_restore(rtwdev, phy_idx, path);
1784 _rfk_reload_bb_reg(rtwdev, backup_bb_val);
1785 _rfk_reload_rf_reg(rtwdev, backup_rf_val[path], path);
1786
1787 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
1788 }
1789
_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,bool force,enum rtw89_chanctx_idx chanctx_idx)1790 static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
1791 enum rtw89_chanctx_idx chanctx_idx)
1792 {
1793 u8 kpath = _kpath(rtwdev, phy_idx);
1794
1795 switch (kpath) {
1796 case RF_A:
1797 _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
1798 break;
1799 case RF_B:
1800 _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
1801 break;
1802 case RF_AB:
1803 _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
1804 _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
1805 break;
1806 default:
1807 break;
1808 }
1809 }
1810
_dpk_onoff(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool off)1811 static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool off)
1812 {
1813 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1814 u8 val, kidx = dpk->cur_idx[path];
1815 bool off_reverse;
1816
1817 val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
1818
1819 if (off)
1820 off_reverse = false;
1821 else
1822 off_reverse = true;
1823
1824 val = dpk->is_dpk_enable & off_reverse & dpk->bp[path][kidx].path_ok;
1825
1826 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
1827 BIT(24), val);
1828
1829 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
1830 kidx, str_enable_disable(dpk->is_dpk_enable & off_reverse));
1831 }
1832
_dpk_one_shot(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,enum rtw8852bt_dpk_id id)1833 static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1834 enum rtw89_rf_path path, enum rtw8852bt_dpk_id id)
1835 {
1836 u16 dpk_cmd;
1837 u32 val;
1838 int ret;
1839
1840 dpk_cmd = (id << 8) | (0x19 + (path << 4));
1841 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
1842
1843 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
1844 1, 30000, false,
1845 rtwdev, R_RFK_ST, MASKBYTE0);
1846 if (ret)
1847 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot 1 over 30ms!!!!\n");
1848
1849 udelay(1);
1850 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00030000);
1851
1852 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
1853 1, 2000, false,
1854 rtwdev, R_RPT_COM, MASKLWORD);
1855 if (ret)
1856 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot 2 over 2ms!!!!\n");
1857
1858 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
1859 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1860 "[DPK] one-shot for %s = 0x%04x\n",
1861 id == 0x06 ? "LBK_RXIQK" :
1862 id == 0x10 ? "SYNC" :
1863 id == 0x11 ? "MDPK_IDL" :
1864 id == 0x12 ? "MDPK_MPA" :
1865 id == 0x13 ? "GAIN_LOSS" :
1866 id == 0x14 ? "PWR_CAL" :
1867 id == 0x15 ? "DPK_RXAGC" :
1868 id == 0x16 ? "KIP_PRESET" :
1869 id == 0x17 ? "KIP_RESTORE" :
1870 "DPK_TXAGC", dpk_cmd);
1871 }
1872
_dpk_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)1873 static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1874 enum rtw89_rf_path path)
1875 {
1876 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
1877 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
1878
1879 udelay(600);
1880
1881 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d RXDCK\n", path);
1882 }
1883
_dpk_information(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,enum rtw89_chanctx_idx chanctx_idx)1884 static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1885 enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
1886 {
1887 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
1888 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1889
1890 u8 kidx = dpk->cur_idx[path];
1891
1892 dpk->bp[path][kidx].band = chan->band_type;
1893 dpk->bp[path][kidx].ch = chan->channel;
1894 dpk->bp[path][kidx].bw = chan->band_width;
1895
1896 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1897 "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
1898 path, dpk->cur_idx[path], phy,
1899 rtwdev->is_tssi_mode[path] ? "on" : "off",
1900 rtwdev->dbcc_en ? "on" : "off",
1901 dpk->bp[path][kidx].band == 0 ? "2G" :
1902 dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
1903 dpk->bp[path][kidx].ch,
1904 dpk->bp[path][kidx].bw == 0 ? "20M" :
1905 dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
1906 }
1907
_dpk_tssi_pause(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_pause)1908 static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
1909 enum rtw89_rf_path path, bool is_pause)
1910 {
1911 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
1912 B_P0_TSSI_TRK_EN, is_pause);
1913
1914 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
1915 is_pause ? "pause" : "resume");
1916 }
1917
_dpk_kip_restore(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)1918 static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
1919 enum rtw89_rf_path path)
1920 {
1921 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
1922 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1923
1924 if (rtwdev->hal.cv > CHIP_CAV)
1925 rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8),
1926 B_DPD_COM_OF, 0x1);
1927
1928 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
1929 }
1930
_dpk_lbk_rxiqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 cur_rxbb,u32 rf_18)1931 static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1932 enum rtw89_rf_path path, u8 cur_rxbb, u32 rf_18)
1933 {
1934 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1);
1935 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR, 0x0);
1936
1937 rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18);
1938 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0xd);
1939 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1);
1940
1941 if (cur_rxbb >= 0x11)
1942 rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x13);
1943 else if (cur_rxbb <= 0xa)
1944 rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x00);
1945 else
1946 rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x05);
1947
1948 rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x0);
1949 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
1950 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80014);
1951
1952 udelay(100);
1953
1954 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1955 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x025);
1956
1957 _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
1958
1959 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1960
1961 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0);
1962 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0);
1963 rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0);
1964 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1);
1965 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0x5);
1966 }
1967
_dpk_rf_setting(struct rtw89_dev * rtwdev,u8 gain,enum rtw89_rf_path path,u8 kidx)1968 static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
1969 enum rtw89_rf_path path, u8 kidx)
1970 {
1971 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1972
1973 if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
1974 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
1975 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_FATT, 0xf2);
1976 rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
1977 rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
1978 } else {
1979 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
1980 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RAA2_SWATT, 0x5);
1981 rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
1982 rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
1983 rtw89_write_rf(rtwdev, path, RR_RXA_LNA, RFREG_MASK, 0x920FC);
1984 rtw89_write_rf(rtwdev, path, RR_XALNA2, RFREG_MASK, 0x002C0);
1985 rtw89_write_rf(rtwdev, path, RR_IQGEN, RFREG_MASK, 0x38800);
1986 }
1987
1988 rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1);
1989 rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1);
1990 rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0);
1991 }
1992
_dpk_bypass_rxcfir(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_bypass)1993 static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev,
1994 enum rtw89_rf_path path, bool is_bypass)
1995 {
1996 if (is_bypass) {
1997 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1998 B_RXIQC_BYPASS2, 0x1);
1999 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
2000 B_RXIQC_BYPASS, 0x1);
2001 } else {
2002 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
2003 B_RXIQC_BYPASS2, 0x0);
2004 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
2005 B_RXIQC_BYPASS, 0x0);
2006 }
2007 }
2008
2009 static
_dpk_tpg_sel(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx)2010 void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
2011 {
2012 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2013
2014 if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
2015 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x0);
2016 else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40)
2017 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
2018 else
2019 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
2020
2021 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
2022 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
2023 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
2024 }
2025
_dpk_table_select(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx,u8 gain)2026 static void _dpk_table_select(struct rtw89_dev *rtwdev,
2027 enum rtw89_rf_path path, u8 kidx, u8 gain)
2028 {
2029 u8 val;
2030
2031 val = 0x80 + kidx * 0x20 + gain * 0x10;
2032 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val);
2033 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2034 "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx,
2035 gain, val);
2036 }
2037
_dpk_sync_check(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx)2038 static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
2039 {
2040 #define DPK_SYNC_TH_DC_I 200
2041 #define DPK_SYNC_TH_DC_Q 200
2042 #define DPK_SYNC_TH_CORR 170
2043 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2044 u8 corr_val, corr_idx;
2045 u16 dc_i, dc_q;
2046 u32 corr, dc;
2047
2048 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
2049
2050 corr = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
2051 corr_idx = u32_get_bits(corr, B_PRT_COM_CORI);
2052 corr_val = u32_get_bits(corr, B_PRT_COM_CORV);
2053
2054 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2055 "[DPK] S%d Corr_idx / Corr_val = %d / %d\n",
2056 path, corr_idx, corr_val);
2057
2058 dpk->corr_idx[path][kidx] = corr_idx;
2059 dpk->corr_val[path][kidx] = corr_val;
2060
2061 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
2062
2063 dc = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
2064 dc_i = u32_get_bits(dc, B_PRT_COM_DCI);
2065 dc_q = u32_get_bits(dc, B_PRT_COM_DCQ);
2066
2067 dc_i = abs(sign_extend32(dc_i, 11));
2068 dc_q = abs(sign_extend32(dc_q, 11));
2069
2070 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n",
2071 path, dc_i, dc_q);
2072
2073 dpk->dc_i[path][kidx] = dc_i;
2074 dpk->dc_q[path][kidx] = dc_q;
2075
2076 if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
2077 corr_val < DPK_SYNC_TH_CORR)
2078 return true;
2079 else
2080 return false;
2081 }
2082
_dpk_sync(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2083 static void _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2084 enum rtw89_rf_path path, u8 kidx)
2085 {
2086 _dpk_one_shot(rtwdev, phy, path, SYNC);
2087 }
2088
_dpk_dgain_read(struct rtw89_dev * rtwdev)2089 static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
2090 {
2091 u16 dgain;
2092
2093 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
2094
2095 dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2096
2097 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x\n", dgain);
2098
2099 return dgain;
2100 }
2101
_dpk_dgain_mapping(struct rtw89_dev * rtwdev,u16 dgain)2102 static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain)
2103 {
2104 static const u16 bnd[15] = {
2105 0xbf1, 0xaa5, 0x97d, 0x875, 0x789, 0x6b7, 0x5fc, 0x556,
2106 0x4c1, 0x43d, 0x3c7, 0x35e, 0x2ac, 0x262, 0x220
2107 };
2108 s8 offset;
2109
2110 if (dgain >= bnd[0])
2111 offset = 0x6;
2112 else if (bnd[0] > dgain && dgain >= bnd[1])
2113 offset = 0x6;
2114 else if (bnd[1] > dgain && dgain >= bnd[2])
2115 offset = 0x5;
2116 else if (bnd[2] > dgain && dgain >= bnd[3])
2117 offset = 0x4;
2118 else if (bnd[3] > dgain && dgain >= bnd[4])
2119 offset = 0x3;
2120 else if (bnd[4] > dgain && dgain >= bnd[5])
2121 offset = 0x2;
2122 else if (bnd[5] > dgain && dgain >= bnd[6])
2123 offset = 0x1;
2124 else if (bnd[6] > dgain && dgain >= bnd[7])
2125 offset = 0x0;
2126 else if (bnd[7] > dgain && dgain >= bnd[8])
2127 offset = 0xff;
2128 else if (bnd[8] > dgain && dgain >= bnd[9])
2129 offset = 0xfe;
2130 else if (bnd[9] > dgain && dgain >= bnd[10])
2131 offset = 0xfd;
2132 else if (bnd[10] > dgain && dgain >= bnd[11])
2133 offset = 0xfc;
2134 else if (bnd[11] > dgain && dgain >= bnd[12])
2135 offset = 0xfb;
2136 else if (bnd[12] > dgain && dgain >= bnd[13])
2137 offset = 0xfa;
2138 else if (bnd[13] > dgain && dgain >= bnd[14])
2139 offset = 0xf9;
2140 else if (bnd[14] > dgain)
2141 offset = 0xf8;
2142 else
2143 offset = 0x0;
2144
2145 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain offset = %d\n", offset);
2146
2147 return offset;
2148 }
2149
_dpk_gainloss_read(struct rtw89_dev * rtwdev)2150 static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
2151 {
2152 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
2153 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
2154
2155 return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
2156 }
2157
_dpk_gainloss(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2158 static void _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2159 enum rtw89_rf_path path, u8 kidx)
2160 {
2161 _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
2162
2163 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
2164 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
2165 }
2166
_dpk_kip_preset(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2167 static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2168 enum rtw89_rf_path path, u8 kidx)
2169 {
2170 _dpk_tpg_sel(rtwdev, path, kidx);
2171 _dpk_one_shot(rtwdev, phy, path, KIP_PRESET);
2172 }
2173
_dpk_kip_pwr_clk_on(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)2174 static void _dpk_kip_pwr_clk_on(struct rtw89_dev *rtwdev,
2175 enum rtw89_rf_path path)
2176 {
2177 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
2178 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a);
2179 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
2180
2181 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP Power/CLK on\n");
2182 }
2183
2184 static
_dpk_txagc_check_8852bt(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 txagc)2185 u8 _dpk_txagc_check_8852bt(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 txagc)
2186 {
2187 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2188
2189 if (txagc >= dpk->max_dpk_txagc[path])
2190 txagc = dpk->max_dpk_txagc[path];
2191
2192 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set TxAGC = 0x%x\n", txagc);
2193
2194 return txagc;
2195 }
2196
_dpk_kip_set_txagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 txagc)2197 static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2198 enum rtw89_rf_path path, u8 txagc)
2199 {
2200 u8 val;
2201
2202 val = _dpk_txagc_check_8852bt(rtwdev, path, txagc);
2203 rtw89_write_rf(rtwdev, path, RR_TXAGC, RFREG_MASK, val);
2204 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
2205 _dpk_one_shot(rtwdev, phy, path, DPK_TXAGC);
2206 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
2207
2208 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set TXAGC = 0x%x\n", txagc);
2209 }
2210
_dpk_kip_set_rxagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2211 static void _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2212 enum rtw89_rf_path path)
2213 {
2214 rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, 0x50220);
2215 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
2216 _dpk_one_shot(rtwdev, phy, path, DPK_RXAGC);
2217 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
2218 }
2219
_dpk_set_offset(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 txagc,s8 gain_offset)2220 static u8 _dpk_set_offset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2221 enum rtw89_rf_path path, u8 txagc, s8 gain_offset)
2222 {
2223 txagc = rtw89_read_rf(rtwdev, path, RR_TXAGC, RFREG_MASK);
2224
2225 if ((txagc - gain_offset) < DPK_TXAGC_LOWER)
2226 txagc = DPK_TXAGC_LOWER;
2227 else if ((txagc - gain_offset) > DPK_TXAGC_UPPER)
2228 txagc = DPK_TXAGC_UPPER;
2229 else
2230 txagc = txagc - gain_offset;
2231
2232 _dpk_kip_set_txagc(rtwdev, phy, path, txagc);
2233
2234 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n",
2235 gain_offset, txagc);
2236 return txagc;
2237 }
2238
_dpk_pas_read(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 is_check)2239 static bool _dpk_pas_read(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
2240 u8 is_check)
2241 {
2242 u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
2243 u8 i;
2244
2245 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
2246 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0);
2247 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08);
2248
2249 if (is_check) {
2250 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
2251 val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2252 val1_i = abs(sign_extend32(val1_i, 11));
2253 val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2254 val1_q = abs(sign_extend32(val1_q, 11));
2255
2256 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
2257 val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2258 val2_i = abs(sign_extend32(val2_i, 11));
2259 val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2260 val2_q = abs(sign_extend32(val2_q, 11));
2261
2262 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
2263 phy_div(val1_i * val1_i + val1_q * val1_q,
2264 val2_i * val2_i + val2_q * val2_q));
2265 } else {
2266 for (i = 0; i < 32; i++) {
2267 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
2268 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2269 "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
2270 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
2271 }
2272 }
2273
2274 if (val1_i * val1_i + val1_q * val1_q >=
2275 (val2_i * val2_i + val2_q * val2_q) * 8 / 5)
2276 return true;
2277
2278 return false;
2279 }
2280
_dpk_agc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,u8 init_txagc,bool loss_only,enum rtw89_chanctx_idx chanctx_idx)2281 static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2282 enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
2283 bool loss_only, enum rtw89_chanctx_idx chanctx_idx)
2284 {
2285 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
2286 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2287 u8 goout = 0, agc_cnt = 0, limited_rxbb = 0, gl_cnt = 0;
2288 u8 tmp_txagc, tmp_rxbb, tmp_gl_idx = 0;
2289 u8 step = DPK_AGC_STEP_SYNC_DGAIN;
2290 int limit = 200;
2291 s8 offset = 0;
2292 u16 dgain = 0;
2293 u32 rf_18;
2294
2295 tmp_txagc = init_txagc;
2296
2297 tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB);
2298 rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
2299
2300 do {
2301 switch (step) {
2302 case DPK_AGC_STEP_SYNC_DGAIN:
2303 _dpk_sync(rtwdev, phy, path, kidx);
2304 if (agc_cnt == 0) {
2305 if (chan->band_width < 2)
2306 _dpk_bypass_rxcfir(rtwdev, path, true);
2307 else
2308 _dpk_lbk_rxiqk(rtwdev, phy, path,
2309 tmp_rxbb, rf_18);
2310 }
2311
2312 if (_dpk_sync_check(rtwdev, path, kidx) == true) {
2313 tmp_txagc = 0xff;
2314 goout = 1;
2315 break;
2316 }
2317
2318 dgain = _dpk_dgain_read(rtwdev);
2319 offset = _dpk_dgain_mapping(rtwdev, dgain);
2320
2321 if (loss_only == 1 || limited_rxbb == 1 || offset == 0)
2322 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2323 else
2324 step = DPK_AGC_STEP_GAIN_ADJ;
2325 break;
2326 case DPK_AGC_STEP_GAIN_ADJ:
2327 tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB);
2328
2329 if (tmp_rxbb + offset > 0x1f) {
2330 tmp_rxbb = 0x1f;
2331 limited_rxbb = 1;
2332 } else if (tmp_rxbb + offset < 0) {
2333 tmp_rxbb = 0;
2334 limited_rxbb = 1;
2335 } else {
2336 tmp_rxbb = tmp_rxbb + offset;
2337 }
2338
2339 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB, tmp_rxbb);
2340 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2341 "[DPK] Adjust RXBB (%d) = 0x%x\n", offset, tmp_rxbb);
2342
2343 if (chan->band_width == RTW89_CHANNEL_WIDTH_80)
2344 _dpk_lbk_rxiqk(rtwdev, phy, path, tmp_rxbb, rf_18);
2345 if (dgain > 1922 || dgain < 342)
2346 step = DPK_AGC_STEP_SYNC_DGAIN;
2347 else
2348 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2349
2350 agc_cnt++;
2351 break;
2352 case DPK_AGC_STEP_GAIN_LOSS_IDX:
2353 _dpk_gainloss(rtwdev, phy, path, kidx);
2354
2355 tmp_gl_idx = _dpk_gainloss_read(rtwdev);
2356
2357 if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, path, true)) ||
2358 tmp_gl_idx >= 7)
2359 step = DPK_AGC_STEP_GL_GT_CRITERION;
2360 else if (tmp_gl_idx == 0)
2361 step = DPK_AGC_STEP_GL_LT_CRITERION;
2362 else
2363 step = DPK_AGC_STEP_SET_TX_GAIN;
2364
2365 gl_cnt++;
2366 break;
2367 case DPK_AGC_STEP_GL_GT_CRITERION:
2368 if (tmp_txagc == 0x2e ||
2369 tmp_txagc == dpk->max_dpk_txagc[path]) {
2370 goout = 1;
2371 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2372 "[DPK] Txagc@lower bound!!\n");
2373 } else {
2374 tmp_txagc = _dpk_set_offset(rtwdev, phy, path,
2375 tmp_txagc, 0x3);
2376 }
2377 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2378 agc_cnt++;
2379 break;
2380
2381 case DPK_AGC_STEP_GL_LT_CRITERION:
2382 if (tmp_txagc == 0x3f || tmp_txagc == dpk->max_dpk_txagc[path]) {
2383 goout = 1;
2384 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2385 "[DPK] Txagc@upper bound!!\n");
2386 } else {
2387 tmp_txagc = _dpk_set_offset(rtwdev, phy, path,
2388 tmp_txagc, 0xfe);
2389 }
2390 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2391 agc_cnt++;
2392 break;
2393
2394 case DPK_AGC_STEP_SET_TX_GAIN:
2395 tmp_txagc = _dpk_set_offset(rtwdev, phy, path, tmp_txagc,
2396 tmp_gl_idx);
2397 goout = 1;
2398 agc_cnt++;
2399 break;
2400
2401 default:
2402 goout = 1;
2403 break;
2404 }
2405 } while (!goout && agc_cnt < 6 && limit-- > 0);
2406
2407 if (gl_cnt >= 6)
2408 _dpk_pas_read(rtwdev, path, false);
2409
2410 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2411 "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc, tmp_rxbb);
2412
2413 return tmp_txagc;
2414 }
2415
_dpk_set_mdpd_para(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 order)2416 static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev,
2417 enum rtw89_rf_path path, u8 order)
2418 {
2419 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2420
2421 switch (order) {
2422 case 0: /* (5,3,1) */
2423 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2424 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3);
2425 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1);
2426 dpk->dpk_order[path] = 0x3;
2427 break;
2428 case 1: /* (5,3,0) */
2429 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2430 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x0);
2431 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x0);
2432 dpk->dpk_order[path] = 0x1;
2433 break;
2434 case 2: /* (5,0,0) */
2435 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2436 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x0);
2437 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x0);
2438 dpk->dpk_order[path] = 0x0;
2439 break;
2440 default:
2441 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2442 "[DPK] Wrong MDPD order!!(0x%x)\n", order);
2443 break;
2444 }
2445
2446 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set %s for IDL\n",
2447 order == 0x0 ? "(5,3,1)" :
2448 order == 0x1 ? "(5,3,0)" : "(5,0,0)");
2449
2450 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2451 "[DPK] Set MDPD order to 0x%x for IDL\n", order);
2452 }
2453
_dpk_idl_mpa(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,u8 gain)2454 static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2455 enum rtw89_rf_path path, u8 kidx, u8 gain)
2456 {
2457 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2458
2459 if (dpk->bp[path][kidx].bw < RTW89_CHANNEL_WIDTH_80 &&
2460 dpk->bp[path][kidx].band == RTW89_BAND_5G)
2461 _dpk_set_mdpd_para(rtwdev, path, 0x2);
2462 else
2463 _dpk_set_mdpd_para(rtwdev, path, 0x0);
2464
2465 _dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
2466 }
2467
_dpk_fill_result(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,u8 gain,u8 txagc)2468 static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2469 enum rtw89_rf_path path, u8 kidx, u8 gain, u8 txagc)
2470 {
2471 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2472 u8 gs = dpk->dpk_gs[phy];
2473 u16 pwsf = 0x78;
2474
2475 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), BIT(8), kidx);
2476
2477 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2478 "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n",
2479 txagc, pwsf, gs);
2480
2481 dpk->bp[path][kidx].txagc_dpk = txagc;
2482 rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8),
2483 0x3F << ((gain << 3) + (kidx << 4)), txagc);
2484
2485 dpk->bp[path][kidx].pwsf = pwsf;
2486 rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2487 0x1FF << (gain << 4), pwsf);
2488
2489 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
2490 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0);
2491
2492 dpk->bp[path][kidx].gs = gs;
2493 if (dpk->dpk_gs[phy] == 0x7f)
2494 rtw89_phy_write32_mask(rtwdev,
2495 R_DPD_CH0A + (path << 8) + (kidx << 2),
2496 MASKDWORD, 0x007f7f7f);
2497 else
2498 rtw89_phy_write32_mask(rtwdev,
2499 R_DPD_CH0A + (path << 8) + (kidx << 2),
2500 MASKDWORD, 0x005b5b5b);
2501
2502 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2503 B_DPD_ORDER_V1, dpk->dpk_order[path]);
2504
2505 rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD, 0x0);
2506 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL, 0x0);
2507 }
2508
_dpk_reload_check(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,enum rtw89_chanctx_idx chanctx_idx)2509 static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2510 enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
2511 {
2512 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
2513 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2514 u8 idx, cur_band, cur_ch;
2515 bool is_reload = false;
2516
2517 cur_band = chan->band_type;
2518 cur_ch = chan->channel;
2519
2520 for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
2521 if (cur_band != dpk->bp[path][idx].band ||
2522 cur_ch != dpk->bp[path][idx].ch)
2523 continue;
2524
2525 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
2526 B_COEF_SEL_MDPD, idx);
2527 dpk->cur_idx[path] = idx;
2528 is_reload = true;
2529 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2530 "[DPK] reload S%d[%d] success\n", path, idx);
2531 }
2532
2533 return is_reload;
2534 }
2535
2536 static
_rf_direct_cntrl(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_bybb)2537 void _rf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool is_bybb)
2538 {
2539 if (is_bybb)
2540 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
2541 else
2542 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
2543 }
2544
2545 static
_drf_direct_cntrl(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_bybb)2546 void _drf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool is_bybb)
2547 {
2548 if (is_bybb)
2549 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
2550 else
2551 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
2552 }
2553
_dpk_main(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 gain,enum rtw89_chanctx_idx chanctx_idx)2554 static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2555 enum rtw89_rf_path path, u8 gain,
2556 enum rtw89_chanctx_idx chanctx_idx)
2557 {
2558 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2559 u8 txagc = 0x38, kidx = dpk->cur_idx[path];
2560 bool is_fail = false;
2561
2562 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2563 "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx);
2564
2565 _rf_direct_cntrl(rtwdev, path, false);
2566 _drf_direct_cntrl(rtwdev, path, false);
2567
2568 _dpk_kip_pwr_clk_on(rtwdev, path);
2569 _dpk_kip_set_txagc(rtwdev, phy, path, txagc);
2570 _dpk_rf_setting(rtwdev, gain, path, kidx);
2571 _dpk_rx_dck(rtwdev, phy, path);
2572 _dpk_kip_preset(rtwdev, phy, path, kidx);
2573 _dpk_kip_set_rxagc(rtwdev, phy, path);
2574 _dpk_table_select(rtwdev, path, kidx, gain);
2575
2576 txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false, chanctx_idx);
2577
2578 _rfk_get_thermal(rtwdev, kidx, path);
2579
2580 if (txagc == 0xff) {
2581 is_fail = true;
2582 goto _error;
2583 }
2584
2585 _dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
2586
2587 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, RF_RX);
2588 _dpk_fill_result(rtwdev, phy, path, kidx, gain, txagc);
2589
2590 _error:
2591 if (!is_fail)
2592 dpk->bp[path][kidx].path_ok = 1;
2593 else
2594 dpk->bp[path][kidx].path_ok = 0;
2595
2596 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
2597 is_fail ? "Check" : "Success");
2598
2599 _dpk_onoff(rtwdev, path, is_fail);
2600
2601 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
2602 is_fail ? "Check" : "Success");
2603
2604 return is_fail;
2605 }
2606
_dpk_cal_select(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 kpath,enum rtw89_chanctx_idx chanctx_idx)2607 static void _dpk_cal_select(struct rtw89_dev *rtwdev,
2608 enum rtw89_phy_idx phy, u8 kpath,
2609 enum rtw89_chanctx_idx chanctx_idx)
2610 {
2611 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2612 u32 backup_kip_val[BACKUP_KIP_REGS_NR];
2613 u32 backup_bb_val[BACKUP_BB_REGS_NR];
2614 u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR];
2615 bool reloaded[2] = {false};
2616 u8 path;
2617
2618 for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
2619 reloaded[path] = _dpk_reload_check(rtwdev, phy, path, chanctx_idx);
2620 if (!reloaded[path] && dpk->bp[path][0].ch != 0)
2621 dpk->cur_idx[path] = !dpk->cur_idx[path];
2622 else
2623 _dpk_onoff(rtwdev, path, false);
2624 }
2625
2626 _rfk_backup_bb_reg(rtwdev, backup_bb_val);
2627 _rfk_backup_kip_reg(rtwdev, backup_kip_val);
2628
2629 for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
2630 _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
2631 _dpk_information(rtwdev, phy, path, chanctx_idx);
2632 if (rtwdev->is_tssi_mode[path])
2633 _dpk_tssi_pause(rtwdev, path, true);
2634 }
2635
2636 _rfk_bb_afe_setting(rtwdev, phy, path, kpath);
2637
2638 for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++)
2639 _dpk_main(rtwdev, phy, path, 1, chanctx_idx);
2640
2641 _rfk_bb_afe_restore(rtwdev, phy, path, kpath);
2642
2643 _dpk_kip_restore(rtwdev, path);
2644 _rfk_reload_bb_reg(rtwdev, backup_bb_val);
2645 _rfk_reload_kip_reg(rtwdev, backup_kip_val);
2646
2647 for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
2648 _rfk_reload_rf_reg(rtwdev, backup_rf_val[path], path);
2649 if (rtwdev->is_tssi_mode[path])
2650 _dpk_tssi_pause(rtwdev, path, false);
2651 }
2652 }
2653
_dpk_bypass_check(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_chanctx_idx chanctx_idx)2654 static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2655 enum rtw89_chanctx_idx chanctx_idx)
2656 {
2657 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
2658 struct rtw89_fem_info *fem = &rtwdev->fem;
2659
2660 if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
2661 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2662 "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
2663 return true;
2664 } else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) {
2665 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2666 "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
2667 return true;
2668 } else if (fem->epa_6g && chan->band_type == RTW89_BAND_6G) {
2669 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2670 "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
2671 return true;
2672 }
2673
2674 return false;
2675 }
2676
_dpk_force_bypass(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)2677 static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2678 {
2679 u8 path, kpath;
2680
2681 kpath = _kpath(rtwdev, phy);
2682
2683 for (path = 0; path < RTW8852BT_SS; path++) {
2684 if (kpath & BIT(path))
2685 _dpk_onoff(rtwdev, path, true);
2686 }
2687 }
2688
_dpk_track(struct rtw89_dev * rtwdev)2689 static void _dpk_track(struct rtw89_dev *rtwdev)
2690 {
2691 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2692 s8 txagc_bb, txagc_bb_tp, ini_diff = 0, txagc_ofst;
2693 s8 delta_ther[2] = {};
2694 u8 trk_idx, txagc_rf;
2695 u8 path, kidx;
2696 u16 pwsf[2];
2697 u8 cur_ther;
2698 u32 tmp;
2699
2700 for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
2701 kidx = dpk->cur_idx[path];
2702
2703 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2704 "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
2705 path, kidx, dpk->bp[path][kidx].ch);
2706
2707 cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
2708
2709 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2710 "[DPK_TRK] thermal now = %d\n", cur_ther);
2711
2712 if (dpk->bp[path][kidx].ch && cur_ther)
2713 delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther;
2714
2715 if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
2716 delta_ther[path] = delta_ther[path] * 3 / 2;
2717 else
2718 delta_ther[path] = delta_ther[path] * 5 / 2;
2719
2720 txagc_rf = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2721 B_TXAGC_RF);
2722
2723 if (rtwdev->is_tssi_mode[path]) {
2724 trk_idx = rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK);
2725
2726 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2727 "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n",
2728 txagc_rf, trk_idx);
2729
2730 txagc_bb =
2731 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2732 MASKBYTE2);
2733 txagc_bb_tp =
2734 rtw89_phy_read32_mask(rtwdev, R_TXAGC_TP + (path << 13),
2735 B_TXAGC_TP);
2736
2737 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2738 "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
2739 txagc_bb_tp, txagc_bb);
2740
2741 txagc_ofst =
2742 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2743 MASKBYTE3);
2744
2745 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2746 "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n",
2747 txagc_ofst, delta_ther[path]);
2748 tmp = rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8),
2749 B_DPD_COM_OF);
2750 if (tmp == 0x1) {
2751 txagc_ofst = 0;
2752 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2753 "[DPK_TRK] HW txagc offset mode\n");
2754 }
2755
2756 if (txagc_rf && cur_ther)
2757 ini_diff = txagc_ofst + (delta_ther[path]);
2758
2759 tmp = rtw89_phy_read32_mask(rtwdev,
2760 R_P0_TXDPD + (path << 13),
2761 B_P0_TXDPD);
2762 if (tmp == 0x0) {
2763 pwsf[0] = dpk->bp[path][kidx].pwsf +
2764 txagc_bb_tp - txagc_bb + ini_diff;
2765 pwsf[1] = dpk->bp[path][kidx].pwsf +
2766 txagc_bb_tp - txagc_bb + ini_diff;
2767 } else {
2768 pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff;
2769 pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff;
2770 }
2771 } else {
2772 pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2773 pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2774 }
2775
2776 tmp = rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS);
2777 if (!tmp && txagc_rf) {
2778 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2779 "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n",
2780 pwsf[0], pwsf[1]);
2781
2782 rtw89_phy_write32_mask(rtwdev,
2783 R_DPD_BND + (path << 8) + (kidx << 2),
2784 B_DPD_BND_0, pwsf[0]);
2785 rtw89_phy_write32_mask(rtwdev,
2786 R_DPD_BND + (path << 8) + (kidx << 2),
2787 B_DPD_BND_1, pwsf[1]);
2788 }
2789 }
2790 }
2791
_set_dpd_backoff(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)2792 static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2793 {
2794 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2795 u8 tx_scale, ofdm_bkof, path, kpath;
2796
2797 kpath = _kpath(rtwdev, phy);
2798
2799 ofdm_bkof = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_OFDM);
2800 tx_scale = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_SCA);
2801
2802 if (ofdm_bkof + tx_scale >= 44) {
2803 /* move dpd backoff to bb, and set dpd backoff to 0 */
2804 dpk->dpk_gs[phy] = 0x7f;
2805 for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
2806 if (!(kpath & BIT(path)))
2807 continue;
2808
2809 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8),
2810 B_DPD_CFG, 0x7f7f7f);
2811 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2812 "[RFK] Set S%d DPD backoff to 0dB\n", path);
2813 }
2814 } else {
2815 dpk->dpk_gs[phy] = 0x5b;
2816 }
2817 }
2818
_tssi_dpk_off(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)2819 static void _tssi_dpk_off(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2820 {
2821 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A, BIT(24), 0x0);
2822 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0B, BIT(24), 0x0);
2823 }
2824
_tssi_rf_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)2825 static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2826 enum rtw89_rf_path path, const struct rtw89_chan *chan)
2827 {
2828 enum rtw89_band band = chan->band_type;
2829
2830 if (band == RTW89_BAND_2G)
2831 rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1);
2832 else
2833 rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
2834 }
2835
_tssi_set_sys(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)2836 static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2837 enum rtw89_rf_path path, const struct rtw89_chan *chan)
2838 {
2839 enum rtw89_band band = chan->band_type;
2840
2841 rtw89_rfk_parser(rtwdev, &rtw8852bt_tssi_sys_defs_tbl);
2842
2843 if (chan->band_width == RTW89_CHANNEL_WIDTH_80)
2844 rtw89_phy_write32_mask(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_BW80, 0x1);
2845 else
2846 rtw89_phy_write32_mask(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_BW80, 0x0);
2847
2848 if (path == RF_PATH_A)
2849 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2850 &rtw8852bt_tssi_sys_a_defs_2g_tbl,
2851 &rtw8852bt_tssi_sys_a_defs_5g_tbl);
2852 else
2853 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2854 &rtw8852bt_tssi_sys_b_defs_2g_tbl,
2855 &rtw8852bt_tssi_sys_b_defs_5g_tbl);
2856 }
2857
_tssi_ini_txpwr_ctrl_bb(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2858 static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev,
2859 enum rtw89_phy_idx phy,
2860 enum rtw89_rf_path path)
2861 {
2862 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2863 &rtw8852bt_tssi_init_txpwr_defs_a_tbl,
2864 &rtw8852bt_tssi_init_txpwr_defs_b_tbl);
2865 }
2866
_tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2867 static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
2868 enum rtw89_phy_idx phy,
2869 enum rtw89_rf_path path)
2870 {
2871 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2872 &rtw8852bt_tssi_init_txpwr_he_tb_defs_a_tbl,
2873 &rtw8852bt_tssi_init_txpwr_he_tb_defs_b_tbl);
2874 }
2875
_tssi_set_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2876 static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2877 enum rtw89_rf_path path)
2878 {
2879 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2880 &rtw8852bt_tssi_dck_defs_a_tbl,
2881 &rtw8852bt_tssi_dck_defs_b_tbl);
2882 }
2883
_tssi_set_tmeter_tbl(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)2884 static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2885 enum rtw89_rf_path path, const struct rtw89_chan *chan)
2886 {
2887 #define RTW8852BT_TSSI_GET_VAL(ptr, idx) \
2888 ({ \
2889 s8 *__ptr = (ptr); \
2890 u8 __idx = (idx), __i, __v; \
2891 u32 __val = 0; \
2892 for (__i = 0; __i < 4; __i++) { \
2893 __v = (__ptr[__idx + __i]); \
2894 __val |= (__v << (8 * __i)); \
2895 } \
2896 __val; \
2897 })
2898 struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
2899 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
2900 u8 ch = chan->channel;
2901 u8 subband = chan->subband_type;
2902 const s8 *thm_up_a = NULL;
2903 const s8 *thm_down_a = NULL;
2904 const s8 *thm_up_b = NULL;
2905 const s8 *thm_down_b = NULL;
2906 u8 thermal = 0xff;
2907 s8 thm_ofst[64] = {0};
2908 u32 tmp = 0;
2909 u8 i, j;
2910
2911 switch (subband) {
2912 default:
2913 case RTW89_CH_2G:
2914 thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0];
2915 thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0];
2916 thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0];
2917 thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0];
2918 break;
2919 case RTW89_CH_5G_BAND_1:
2920 thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0];
2921 thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0];
2922 thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0];
2923 thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0];
2924 break;
2925 case RTW89_CH_5G_BAND_3:
2926 thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1];
2927 thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1];
2928 thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1];
2929 thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1];
2930 break;
2931 case RTW89_CH_5G_BAND_4:
2932 thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2];
2933 thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2];
2934 thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2];
2935 thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2];
2936 break;
2937 }
2938
2939 if (path == RF_PATH_A) {
2940 thermal = tssi_info->thermal[RF_PATH_A];
2941
2942 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2943 "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
2944
2945 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
2946 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
2947
2948 if (thermal == 0xff) {
2949 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
2950 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
2951
2952 for (i = 0; i < 64; i += 4) {
2953 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
2954
2955 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2956 "[TSSI] write 0x%x val=0x%08x\n",
2957 R_P0_TSSI_BASE + i, 0x0);
2958 }
2959
2960 } else {
2961 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER,
2962 thermal);
2963 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
2964 thermal);
2965
2966 i = 0;
2967 for (j = 0; j < 32; j++)
2968 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2969 -thm_down_a[i++] :
2970 -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
2971
2972 i = 1;
2973 for (j = 63; j >= 32; j--)
2974 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2975 thm_up_a[i++] :
2976 thm_up_a[DELTA_SWINGIDX_SIZE - 1];
2977
2978 for (i = 0; i < 64; i += 4) {
2979 tmp = RTW8852BT_TSSI_GET_VAL(thm_ofst, i);
2980 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
2981
2982 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2983 "[TSSI] write 0x%x val=0x%08x\n",
2984 0x5c00 + i, tmp);
2985 }
2986 }
2987 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
2988 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
2989
2990 } else {
2991 thermal = tssi_info->thermal[RF_PATH_B];
2992
2993 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2994 "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
2995
2996 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
2997 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
2998
2999 if (thermal == 0xff) {
3000 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
3001 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
3002
3003 for (i = 0; i < 64; i += 4) {
3004 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
3005
3006 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3007 "[TSSI] write 0x%x val=0x%08x\n",
3008 0x7c00 + i, 0x0);
3009 }
3010
3011 } else {
3012 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER,
3013 thermal);
3014 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
3015 thermal);
3016
3017 i = 0;
3018 for (j = 0; j < 32; j++)
3019 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3020 -thm_down_b[i++] :
3021 -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
3022
3023 i = 1;
3024 for (j = 63; j >= 32; j--)
3025 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3026 thm_up_b[i++] :
3027 thm_up_b[DELTA_SWINGIDX_SIZE - 1];
3028
3029 for (i = 0; i < 64; i += 4) {
3030 tmp = RTW8852BT_TSSI_GET_VAL(thm_ofst, i);
3031 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
3032
3033 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3034 "[TSSI] write 0x%x val=0x%08x\n",
3035 0x7c00 + i, tmp);
3036 }
3037 }
3038 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
3039 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
3040 }
3041 #undef RTW8852BT_TSSI_GET_VAL
3042 }
3043
_tssi_set_dac_gain_tbl(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3044 static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3045 enum rtw89_rf_path path)
3046 {
3047 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3048 &rtw8852bt_tssi_dac_gain_defs_a_tbl,
3049 &rtw8852bt_tssi_dac_gain_defs_b_tbl);
3050 }
3051
_tssi_slope_cal_org(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3052 static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3053 enum rtw89_rf_path path, const struct rtw89_chan *chan)
3054 {
3055 enum rtw89_band band = chan->band_type;
3056
3057 if (path == RF_PATH_A)
3058 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
3059 &rtw8852bt_tssi_slope_a_defs_2g_tbl,
3060 &rtw8852bt_tssi_slope_a_defs_5g_tbl);
3061 else
3062 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
3063 &rtw8852bt_tssi_slope_b_defs_2g_tbl,
3064 &rtw8852bt_tssi_slope_b_defs_5g_tbl);
3065 }
3066
_tssi_alignment_default(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,bool all,const struct rtw89_chan * chan)3067 static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3068 enum rtw89_rf_path path, bool all,
3069 const struct rtw89_chan *chan)
3070 {
3071 enum rtw89_band band = chan->band_type;
3072 const struct rtw89_rfk_tbl *tbl = NULL;
3073 u8 ch = chan->channel;
3074
3075 if (path == RF_PATH_A) {
3076 if (band == RTW89_BAND_2G)
3077 tbl = &rtw8852bt_tssi_align_a_2g_all_defs_tbl;
3078 else if (ch >= 36 && ch <= 64)
3079 tbl = &rtw8852bt_tssi_align_a_5g1_all_defs_tbl;
3080 else if (ch >= 100 && ch <= 144)
3081 tbl = &rtw8852bt_tssi_align_a_5g2_all_defs_tbl;
3082 else if (ch >= 149 && ch <= 177)
3083 tbl = &rtw8852bt_tssi_align_a_5g3_all_defs_tbl;
3084 } else {
3085 if (ch >= 1 && ch <= 14)
3086 tbl = &rtw8852bt_tssi_align_b_2g_all_defs_tbl;
3087 else if (ch >= 36 && ch <= 64)
3088 tbl = &rtw8852bt_tssi_align_b_5g1_all_defs_tbl;
3089 else if (ch >= 100 && ch <= 144)
3090 tbl = &rtw8852bt_tssi_align_b_5g2_all_defs_tbl;
3091 else if (ch >= 149 && ch <= 177)
3092 tbl = &rtw8852bt_tssi_align_b_5g3_all_defs_tbl;
3093 }
3094
3095 if (tbl)
3096 rtw89_rfk_parser(rtwdev, tbl);
3097 }
3098
_tssi_set_tssi_slope(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3099 static void _tssi_set_tssi_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3100 enum rtw89_rf_path path)
3101 {
3102 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3103 &rtw8852bt_tssi_slope_defs_a_tbl,
3104 &rtw8852bt_tssi_slope_defs_b_tbl);
3105 }
3106
_tssi_set_tssi_track(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3107 static void _tssi_set_tssi_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3108 enum rtw89_rf_path path)
3109 {
3110 if (path == RF_PATH_A)
3111 rtw89_phy_write32_mask(rtwdev, R_P0_TSSIC, B_P0_TSSIC_BYPASS, 0x0);
3112 else
3113 rtw89_phy_write32_mask(rtwdev, R_P1_TSSIC, B_P1_TSSIC_BYPASS, 0x0);
3114 }
3115
_tssi_set_txagc_offset_mv_avg(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3116 static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
3117 enum rtw89_phy_idx phy,
3118 enum rtw89_rf_path path)
3119 {
3120 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "======>%s path=%d\n", __func__,
3121 path);
3122
3123 if (path == RF_PATH_A)
3124 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG,
3125 B_P0_TSSI_MV_MIX, 0x010);
3126 else
3127 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG,
3128 B_P1_RFCTM_DEL, 0x010);
3129 }
3130
_tssi_enable(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3131 static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3132 {
3133 u8 i;
3134
3135 for (i = 0; i < RF_PATH_NUM_8852BT; i++) {
3136 _tssi_set_tssi_track(rtwdev, phy, i);
3137 _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
3138
3139 if (i == RF_PATH_A) {
3140 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG,
3141 B_P0_TSSI_MV_CLR, 0x0);
3142 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
3143 B_P0_TSSI_EN, 0x0);
3144 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
3145 B_P0_TSSI_EN, 0x1);
3146 rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
3147 RR_TXGA_V1_TRK_EN, 0x1);
3148 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3149 B_P0_TSSI_RFC, 0x3);
3150
3151 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3152 B_P0_TSSI_OFT, 0xc0);
3153 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3154 B_P0_TSSI_OFT_EN, 0x0);
3155 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3156 B_P0_TSSI_OFT_EN, 0x1);
3157
3158 rtwdev->is_tssi_mode[RF_PATH_A] = true;
3159 } else {
3160 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG,
3161 B_P1_TSSI_MV_CLR, 0x0);
3162 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
3163 B_P1_TSSI_EN, 0x0);
3164 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
3165 B_P1_TSSI_EN, 0x1);
3166 rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
3167 RR_TXGA_V1_TRK_EN, 0x1);
3168 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3169 B_P1_TSSI_RFC, 0x3);
3170
3171 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3172 B_P1_TSSI_OFT, 0xc0);
3173 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3174 B_P1_TSSI_OFT_EN, 0x0);
3175 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3176 B_P1_TSSI_OFT_EN, 0x1);
3177
3178 rtwdev->is_tssi_mode[RF_PATH_B] = true;
3179 }
3180 }
3181 }
3182
_tssi_disable(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3183 static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3184 {
3185 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x0);
3186 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_RFC, 0x1);
3187 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, 0x1);
3188 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_EN, 0x0);
3189 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_RFC, 0x1);
3190 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_CLR, 0x1);
3191
3192 rtwdev->is_tssi_mode[RF_PATH_A] = false;
3193 rtwdev->is_tssi_mode[RF_PATH_B] = false;
3194 }
3195
_tssi_get_cck_group(struct rtw89_dev * rtwdev,u8 ch)3196 static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
3197 {
3198 switch (ch) {
3199 case 1 ... 2:
3200 return 0;
3201 case 3 ... 5:
3202 return 1;
3203 case 6 ... 8:
3204 return 2;
3205 case 9 ... 11:
3206 return 3;
3207 case 12 ... 13:
3208 return 4;
3209 case 14:
3210 return 5;
3211 }
3212
3213 return 0;
3214 }
3215
3216 #define TSSI_EXTRA_GROUP_BIT (BIT(31))
3217 #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
3218 #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
3219 #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
3220 #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
3221
_tssi_get_ofdm_group(struct rtw89_dev * rtwdev,u8 ch)3222 static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
3223 {
3224 switch (ch) {
3225 case 1 ... 2:
3226 return 0;
3227 case 3 ... 5:
3228 return 1;
3229 case 6 ... 8:
3230 return 2;
3231 case 9 ... 11:
3232 return 3;
3233 case 12 ... 14:
3234 return 4;
3235 case 36 ... 40:
3236 return 5;
3237 case 41 ... 43:
3238 return TSSI_EXTRA_GROUP(5);
3239 case 44 ... 48:
3240 return 6;
3241 case 49 ... 51:
3242 return TSSI_EXTRA_GROUP(6);
3243 case 52 ... 56:
3244 return 7;
3245 case 57 ... 59:
3246 return TSSI_EXTRA_GROUP(7);
3247 case 60 ... 64:
3248 return 8;
3249 case 100 ... 104:
3250 return 9;
3251 case 105 ... 107:
3252 return TSSI_EXTRA_GROUP(9);
3253 case 108 ... 112:
3254 return 10;
3255 case 113 ... 115:
3256 return TSSI_EXTRA_GROUP(10);
3257 case 116 ... 120:
3258 return 11;
3259 case 121 ... 123:
3260 return TSSI_EXTRA_GROUP(11);
3261 case 124 ... 128:
3262 return 12;
3263 case 129 ... 131:
3264 return TSSI_EXTRA_GROUP(12);
3265 case 132 ... 136:
3266 return 13;
3267 case 137 ... 139:
3268 return TSSI_EXTRA_GROUP(13);
3269 case 140 ... 144:
3270 return 14;
3271 case 149 ... 153:
3272 return 15;
3273 case 154 ... 156:
3274 return TSSI_EXTRA_GROUP(15);
3275 case 157 ... 161:
3276 return 16;
3277 case 162 ... 164:
3278 return TSSI_EXTRA_GROUP(16);
3279 case 165 ... 169:
3280 return 17;
3281 case 170 ... 172:
3282 return TSSI_EXTRA_GROUP(17);
3283 case 173 ... 177:
3284 return 18;
3285 }
3286
3287 return 0;
3288 }
3289
_tssi_get_trim_group(struct rtw89_dev * rtwdev,u8 ch)3290 static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
3291 {
3292 switch (ch) {
3293 case 1 ... 8:
3294 return 0;
3295 case 9 ... 14:
3296 return 1;
3297 case 36 ... 48:
3298 return 2;
3299 case 52 ... 64:
3300 return 3;
3301 case 100 ... 112:
3302 return 4;
3303 case 116 ... 128:
3304 return 5;
3305 case 132 ... 144:
3306 return 6;
3307 case 149 ... 177:
3308 return 7;
3309 }
3310
3311 return 0;
3312 }
3313
_tssi_get_ofdm_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3314 static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3315 enum rtw89_rf_path path, const struct rtw89_chan *chan)
3316 {
3317 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3318 u8 ch = chan->channel;
3319 u32 gidx, gidx_1st, gidx_2nd;
3320 s8 de_1st;
3321 s8 de_2nd;
3322 s8 val;
3323
3324 gidx = _tssi_get_ofdm_group(rtwdev, ch);
3325
3326 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3327 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", path, gidx);
3328
3329 if (IS_TSSI_EXTRA_GROUP(gidx)) {
3330 gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
3331 gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
3332 de_1st = tssi_info->tssi_mcs[path][gidx_1st];
3333 de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
3334 val = (de_1st + de_2nd) / 2;
3335
3336 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3337 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
3338 path, val, de_1st, de_2nd);
3339 } else {
3340 val = tssi_info->tssi_mcs[path][gidx];
3341
3342 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3343 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
3344 }
3345
3346 return val;
3347 }
3348
_tssi_get_ofdm_trim_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3349 static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3350 enum rtw89_rf_path path, const struct rtw89_chan *chan)
3351 {
3352 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3353 u8 ch = chan->channel;
3354 u32 tgidx, tgidx_1st, tgidx_2nd;
3355 s8 tde_1st;
3356 s8 tde_2nd;
3357 s8 val;
3358
3359 tgidx = _tssi_get_trim_group(rtwdev, ch);
3360
3361 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3362 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
3363 path, tgidx);
3364
3365 if (IS_TSSI_EXTRA_GROUP(tgidx)) {
3366 tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
3367 tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
3368 tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
3369 tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
3370 val = (tde_1st + tde_2nd) / 2;
3371
3372 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3373 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
3374 path, val, tde_1st, tde_2nd);
3375 } else {
3376 val = tssi_info->tssi_trim[path][tgidx];
3377
3378 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3379 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
3380 path, val);
3381 }
3382
3383 return val;
3384 }
3385
_tssi_set_efuse_to_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const struct rtw89_chan * chan)3386 static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3387 const struct rtw89_chan *chan)
3388 {
3389 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3390 u8 ch = chan->channel;
3391 u8 gidx;
3392 s8 ofdm_de;
3393 s8 trim_de;
3394 s32 val;
3395 u32 i;
3396
3397 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
3398 phy, ch);
3399
3400 for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) {
3401 gidx = _tssi_get_cck_group(rtwdev, ch);
3402 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
3403 val = tssi_info->tssi_cck[i][gidx] + trim_de;
3404
3405 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3406 "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
3407 i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
3408
3409 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val);
3410 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val);
3411
3412 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3413 "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
3414 _tssi_de_cck_long[i],
3415 rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
3416 _TSSI_DE_MASK));
3417
3418 ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
3419 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
3420 val = ofdm_de + trim_de;
3421
3422 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3423 "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
3424 i, ofdm_de, trim_de);
3425
3426 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val);
3427 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val);
3428 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val);
3429 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i],
3430 _TSSI_DE_MASK, val);
3431 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val);
3432 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val);
3433
3434 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3435 "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
3436 _tssi_de_mcs_20m[i],
3437 rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i],
3438 _TSSI_DE_MASK));
3439 }
3440 }
3441
_tssi_alimentk_dump_result(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)3442 static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
3443 {
3444 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3445 "[TSSI PA K]\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n"
3446 "0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n",
3447 R_TSSI_PA_K1 + (path << 13),
3448 rtw89_phy_read32(rtwdev, R_TSSI_PA_K1 + (path << 13)),
3449 R_TSSI_PA_K2 + (path << 13),
3450 rtw89_phy_read32(rtwdev, R_TSSI_PA_K2 + (path << 13)),
3451 R_P0_TSSI_ALIM1 + (path << 13),
3452 rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM1 + (path << 13)),
3453 R_P0_TSSI_ALIM3 + (path << 13),
3454 rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM3 + (path << 13)),
3455 R_TSSI_PA_K5 + (path << 13),
3456 rtw89_phy_read32(rtwdev, R_TSSI_PA_K5 + (path << 13)),
3457 R_P0_TSSI_ALIM2 + (path << 13),
3458 rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM2 + (path << 13)),
3459 R_P0_TSSI_ALIM4 + (path << 13),
3460 rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM4 + (path << 13)),
3461 R_TSSI_PA_K8 + (path << 13),
3462 rtw89_phy_read32(rtwdev, R_TSSI_PA_K8 + (path << 13)));
3463 }
3464
_tssi_alimentk_done(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3465 static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
3466 enum rtw89_phy_idx phy, enum rtw89_rf_path path,
3467 const struct rtw89_chan *chan)
3468 {
3469 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3470 u8 channel = chan->channel;
3471 u8 band;
3472
3473 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3474 "======>%s phy=%d path=%d\n", __func__, phy, path);
3475
3476 if (channel >= 1 && channel <= 14)
3477 band = TSSI_ALIMK_2G;
3478 else if (channel >= 36 && channel <= 64)
3479 band = TSSI_ALIMK_5GL;
3480 else if (channel >= 100 && channel <= 144)
3481 band = TSSI_ALIMK_5GM;
3482 else if (channel >= 149 && channel <= 177)
3483 band = TSSI_ALIMK_5GH;
3484 else
3485 band = TSSI_ALIMK_2G;
3486
3487 if (tssi_info->alignment_done[path][band]) {
3488 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD,
3489 tssi_info->alignment_value[path][band][0]);
3490 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD,
3491 tssi_info->alignment_value[path][band][1]);
3492 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD,
3493 tssi_info->alignment_value[path][band][2]);
3494 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD,
3495 tssi_info->alignment_value[path][band][3]);
3496 }
3497
3498 _tssi_alimentk_dump_result(rtwdev, path);
3499 }
3500
_tssi_hw_tx(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u16 cnt,u16 period,s16 pwr_dbm,u8 enable,const struct rtw89_chan * chan)3501 static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3502 enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm,
3503 u8 enable, const struct rtw89_chan *chan)
3504 {
3505 enum rtw89_rf_path_bit rx_path;
3506
3507 if (path == RF_PATH_A)
3508 rx_path = RF_A;
3509 else if (path == RF_PATH_B)
3510 rx_path = RF_B;
3511 else if (path == RF_PATH_AB)
3512 rx_path = RF_AB;
3513 else
3514 rx_path = RF_ABCD; /* don't change path, but still set others */
3515
3516 if (enable) {
3517 rtw8852bx_bb_set_plcp_tx(rtwdev);
3518 rtw8852bx_bb_cfg_tx_path(rtwdev, path);
3519 rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path, chan);
3520 rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy);
3521 }
3522
3523 rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy, chan);
3524 }
3525
_tssi_backup_bb_registers(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const u32 reg[],u32 reg_backup[],u32 reg_num)3526 static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
3527 enum rtw89_phy_idx phy, const u32 reg[],
3528 u32 reg_backup[], u32 reg_num)
3529 {
3530 u32 i;
3531
3532 for (i = 0; i < reg_num; i++) {
3533 reg_backup[i] = rtw89_phy_read32_mask(rtwdev, reg[i], MASKDWORD);
3534
3535 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3536 "[TSSI] Backup BB 0x%x = 0x%x\n", reg[i],
3537 reg_backup[i]);
3538 }
3539 }
3540
_tssi_reload_bb_registers(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const u32 reg[],u32 reg_backup[],u32 reg_num)3541 static void _tssi_reload_bb_registers(struct rtw89_dev *rtwdev,
3542 enum rtw89_phy_idx phy, const u32 reg[],
3543 u32 reg_backup[], u32 reg_num)
3544
3545 {
3546 u32 i;
3547
3548 for (i = 0; i < reg_num; i++) {
3549 rtw89_phy_write32_mask(rtwdev, reg[i], MASKDWORD, reg_backup[i]);
3550
3551 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3552 "[TSSI] Reload BB 0x%x = 0x%x\n", reg[i],
3553 reg_backup[i]);
3554 }
3555 }
3556
_tssi_ch_to_idx(struct rtw89_dev * rtwdev,u8 channel)3557 static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel)
3558 {
3559 u8 channel_index;
3560
3561 if (channel >= 1 && channel <= 14)
3562 channel_index = channel - 1;
3563 else if (channel >= 36 && channel <= 64)
3564 channel_index = (channel - 36) / 2 + 14;
3565 else if (channel >= 100 && channel <= 144)
3566 channel_index = ((channel - 100) / 2) + 15 + 14;
3567 else if (channel >= 149 && channel <= 177)
3568 channel_index = ((channel - 149) / 2) + 38 + 14;
3569 else
3570 channel_index = 0;
3571
3572 return channel_index;
3573 }
3574
_tssi_get_cw_report(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const s16 * power,u32 * tssi_cw_rpt,const struct rtw89_chan * chan)3575 static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3576 enum rtw89_rf_path path, const s16 *power,
3577 u32 *tssi_cw_rpt, const struct rtw89_chan *chan)
3578 {
3579 u32 tx_counter, tx_counter_tmp;
3580 const int retry = 100;
3581 u32 tmp;
3582 int j, k;
3583
3584 for (j = 0; j < RTW8852BT_TSSI_PATH_NR; j++) {
3585 rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x0);
3586 rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x1);
3587
3588 tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3589
3590 tmp = rtw89_phy_read32_mask(rtwdev, _tssi_trigger[path], MASKDWORD);
3591 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3592 "[TSSI PA K] 0x%x = 0x%08x path=%d\n",
3593 _tssi_trigger[path], tmp, path);
3594
3595 if (j == 0)
3596 _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true,
3597 chan);
3598 else
3599 _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true,
3600 chan);
3601
3602 tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3603 tx_counter_tmp -= tx_counter;
3604
3605 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3606 "[TSSI PA K] First HWTXcounter=%d path=%d\n",
3607 tx_counter_tmp, path);
3608
3609 for (k = 0; k < retry; k++) {
3610 tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path],
3611 B_TSSI_CWRPT_RDY);
3612 if (tmp)
3613 break;
3614
3615 udelay(30);
3616
3617 tx_counter_tmp =
3618 rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3619 tx_counter_tmp -= tx_counter;
3620
3621 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3622 "[TSSI PA K] Flow k = %d HWTXcounter=%d path=%d\n",
3623 k, tx_counter_tmp, path);
3624 }
3625
3626 if (k >= retry) {
3627 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3628 "[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n",
3629 k, path);
3630
3631 _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
3632 return false;
3633 }
3634
3635 tssi_cw_rpt[j] =
3636 rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path],
3637 B_TSSI_CWRPT);
3638
3639 _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
3640
3641 tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3642 tx_counter_tmp -= tx_counter;
3643
3644 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3645 "[TSSI PA K] Final HWTXcounter=%d path=%d\n",
3646 tx_counter_tmp, path);
3647 }
3648
3649 return true;
3650 }
3651
_tssi_alimentk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3652 static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3653 enum rtw89_rf_path path, const struct rtw89_chan *chan)
3654 {
3655 static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4,
3656 0x78e4, 0x49c0, 0x0d18, 0x0d80};
3657 static const s16 power_2g[4] = {48, 20, 4, -8};
3658 static const s16 power_5g[4] = {48, 20, 4, 4};
3659 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3660 s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3;
3661 u32 tssi_cw_rpt[RTW8852BT_TSSI_PATH_NR] = {};
3662 u8 channel = chan->channel;
3663 u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel);
3664 struct rtw8852bx_bb_tssi_bak tssi_bak;
3665 s32 aliment_diff, tssi_cw_default;
3666 u32 start_time, finish_time;
3667 u32 bb_reg_backup[8] = {};
3668 const s16 *power;
3669 u8 band;
3670 bool ok;
3671 u32 tmp;
3672 u8 j;
3673
3674 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3675 "======> %s channel=%d path=%d\n", __func__, channel,
3676 path);
3677
3678 start_time = ktime_get_ns();
3679
3680 if (chan->band_type == RTW89_BAND_2G)
3681 power = power_2g;
3682 else
3683 power = power_5g;
3684
3685 if (channel >= 1 && channel <= 14)
3686 band = TSSI_ALIMK_2G;
3687 else if (channel >= 36 && channel <= 64)
3688 band = TSSI_ALIMK_5GL;
3689 else if (channel >= 100 && channel <= 144)
3690 band = TSSI_ALIMK_5GM;
3691 else if (channel >= 149 && channel <= 177)
3692 band = TSSI_ALIMK_5GH;
3693 else
3694 band = TSSI_ALIMK_2G;
3695
3696 rtw8852bx_bb_backup_tssi(rtwdev, phy, &tssi_bak);
3697 _tssi_backup_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup,
3698 ARRAY_SIZE(bb_reg_backup));
3699
3700 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x8);
3701 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x8);
3702 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
3703 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
3704
3705 ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt, chan);
3706 if (!ok)
3707 goto out;
3708
3709 for (j = 0; j < RTW8852BT_TSSI_PATH_NR; j++) {
3710 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3711 "[TSSI PA K] power[%d]=%d tssi_cw_rpt[%d]=%d\n", j,
3712 power[j], j, tssi_cw_rpt[j]);
3713 }
3714
3715 tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][1],
3716 _tssi_cw_default_mask[1]);
3717 tssi_cw_default = sign_extend32(tmp, 8);
3718 tssi_alim_offset_1 = tssi_cw_rpt[0] - ((power[0] - power[1]) * 2) -
3719 tssi_cw_rpt[1] + tssi_cw_default;
3720 aliment_diff = tssi_alim_offset_1 - tssi_cw_default;
3721
3722 tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][2],
3723 _tssi_cw_default_mask[2]);
3724 tssi_cw_default = sign_extend32(tmp, 8);
3725 tssi_alim_offset_2 = tssi_cw_default + aliment_diff;
3726
3727 tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][3],
3728 _tssi_cw_default_mask[3]);
3729 tssi_cw_default = sign_extend32(tmp, 8);
3730 tssi_alim_offset_3 = tssi_cw_default + aliment_diff;
3731
3732 if (path == RF_PATH_A) {
3733 tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
3734 FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
3735 FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
3736
3737 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM1, tmp);
3738 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2, B_P0_TSSI_ALIM2, tmp);
3739
3740 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3741 "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n",
3742 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3, B_P0_TSSI_ALIM31),
3743 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM11),
3744 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM12),
3745 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM13));
3746 } else {
3747 tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
3748 FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
3749 FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
3750
3751 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM1, tmp);
3752 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM2, B_P1_TSSI_ALIM2, tmp);
3753
3754 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3755 "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n",
3756 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM3, B_P1_TSSI_ALIM31),
3757 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM11),
3758 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM12),
3759 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM13));
3760 }
3761
3762 tssi_info->alignment_done[path][band] = true;
3763 tssi_info->alignment_value[path][band][0] =
3764 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
3765 tssi_info->alignment_value[path][band][1] =
3766 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
3767 tssi_info->alignment_value[path][band][2] =
3768 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
3769 tssi_info->alignment_value[path][band][3] =
3770 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
3771
3772 tssi_info->check_backup_aligmk[path][ch_idx] = true;
3773 tssi_info->alignment_backup_by_ch[path][ch_idx][0] =
3774 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
3775 tssi_info->alignment_backup_by_ch[path][ch_idx][1] =
3776 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
3777 tssi_info->alignment_backup_by_ch[path][ch_idx][2] =
3778 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
3779 tssi_info->alignment_backup_by_ch[path][ch_idx][3] =
3780 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
3781
3782 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3783 "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][0], 0x%x = 0x%08x\n",
3784 path, band, R_P0_TSSI_ALIM1 + (path << 13),
3785 tssi_info->alignment_value[path][band][0]);
3786 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3787 "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][1], 0x%x = 0x%08x\n",
3788 path, band, R_P0_TSSI_ALIM3 + (path << 13),
3789 tssi_info->alignment_value[path][band][1]);
3790 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3791 "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][2], 0x%x = 0x%08x\n",
3792 path, band, R_P0_TSSI_ALIM2 + (path << 13),
3793 tssi_info->alignment_value[path][band][2]);
3794 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3795 "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][3], 0x%x = 0x%08x\n",
3796 path, band, R_P0_TSSI_ALIM4 + (path << 13),
3797 tssi_info->alignment_value[path][band][3]);
3798
3799 out:
3800 _tssi_reload_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup,
3801 ARRAY_SIZE(bb_reg_backup));
3802 rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak);
3803 rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0);
3804
3805 finish_time = ktime_get_ns();
3806 tssi_info->tssi_alimk_time += finish_time - start_time;
3807
3808 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3809 "[TSSI PA K] %s processing time = %d ms\n", __func__,
3810 tssi_info->tssi_alimk_time);
3811 }
3812
rtw8852bt_dpk_init(struct rtw89_dev * rtwdev)3813 void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev)
3814 {
3815 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
3816
3817 u8 path;
3818
3819 for (path = 0; path < 2; path++) {
3820 dpk->cur_idx[path] = 0;
3821 dpk->max_dpk_txagc[path] = 0x3F;
3822 }
3823
3824 dpk->is_dpk_enable = true;
3825 dpk->is_dpk_reload_en = false;
3826 _set_dpd_backoff(rtwdev, RTW89_PHY_0);
3827 }
3828
rtw8852bt_rck(struct rtw89_dev * rtwdev)3829 void rtw8852bt_rck(struct rtw89_dev *rtwdev)
3830 {
3831 u8 path;
3832
3833 for (path = 0; path < RF_PATH_NUM_8852BT; path++)
3834 _rck(rtwdev, path);
3835 }
3836
rtw8852bt_dack(struct rtw89_dev * rtwdev,enum rtw89_chanctx_idx chanctx_idx)3837 void rtw8852bt_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx)
3838 {
3839 u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
3840
3841 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
3842 _dac_cal(rtwdev, false);
3843 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
3844 }
3845
rtw8852bt_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,enum rtw89_chanctx_idx chanctx_idx)3846 void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
3847 enum rtw89_chanctx_idx chanctx_idx)
3848 {
3849 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
3850 u32 tx_en;
3851
3852 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
3853 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3854 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3855
3856 _iqk_init(rtwdev);
3857 _iqk(rtwdev, phy_idx, false, chanctx_idx);
3858
3859 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3860 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
3861 }
3862
rtw8852bt_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,enum rtw89_chanctx_idx chanctx_idx)3863 void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
3864 enum rtw89_chanctx_idx chanctx_idx)
3865 {
3866 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
3867 u32 tx_en;
3868
3869 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
3870 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3871 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3872
3873 _rx_dck(rtwdev, phy_idx);
3874
3875 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3876 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
3877 }
3878
rtw8852bt_dpk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,enum rtw89_chanctx_idx chanctx_idx)3879 void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
3880 enum rtw89_chanctx_idx chanctx_idx)
3881 {
3882 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3883 "[DPK] ****** DPK Start (Ver: 0x%x) ******\n", RTW8852BT_DPK_VER);
3884
3885 if (_dpk_bypass_check(rtwdev, phy_idx, chanctx_idx))
3886 _dpk_force_bypass(rtwdev, phy_idx);
3887 else
3888 _dpk_cal_select(rtwdev, phy_idx, RF_AB, chanctx_idx);
3889 }
3890
rtw8852bt_dpk_track(struct rtw89_dev * rtwdev)3891 void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev)
3892 {
3893 _dpk_track(rtwdev);
3894 }
3895
rtw8852bt_tssi(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool hwtx_en,enum rtw89_chanctx_idx chanctx_idx)3896 void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3897 bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx)
3898 {
3899 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
3900 static const u32 reg[2] = {R_DPD_CH0A, R_DPD_CH0B};
3901 u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB, chanctx_idx);
3902 u32 reg_backup[2] = {};
3903 u32 tx_en;
3904 u8 i;
3905
3906 _tssi_backup_bb_registers(rtwdev, phy, reg, reg_backup, 2);
3907 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
3908 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
3909
3910 _tssi_dpk_off(rtwdev, phy);
3911 _tssi_disable(rtwdev, phy);
3912
3913 for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) {
3914 _tssi_rf_setting(rtwdev, phy, i, chan);
3915 _tssi_set_sys(rtwdev, phy, i, chan);
3916 _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
3917 _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
3918 _tssi_set_dck(rtwdev, phy, i);
3919 _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
3920 _tssi_set_dac_gain_tbl(rtwdev, phy, i);
3921 _tssi_slope_cal_org(rtwdev, phy, i, chan);
3922 _tssi_alignment_default(rtwdev, phy, i, true, chan);
3923 _tssi_set_tssi_slope(rtwdev, phy, i);
3924
3925 rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
3926 _tmac_tx_pause(rtwdev, phy, true);
3927 if (hwtx_en)
3928 _tssi_alimentk(rtwdev, phy, i, chan);
3929 _tmac_tx_pause(rtwdev, phy, false);
3930 rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
3931 }
3932
3933 _tssi_enable(rtwdev, phy);
3934 _tssi_set_efuse_to_de(rtwdev, phy, chan);
3935
3936 _tssi_reload_bb_registers(rtwdev, phy, reg, reg_backup, 2);
3937
3938 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
3939 }
3940
rtw8852bt_tssi_scan(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const struct rtw89_chan * chan)3941 void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3942 const struct rtw89_chan *chan)
3943 {
3944 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3945 u8 channel = chan->channel;
3946 u8 band;
3947 u32 i;
3948
3949 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3950 "======>%s phy=%d channel=%d\n", __func__, phy, channel);
3951
3952 if (channel >= 1 && channel <= 14)
3953 band = TSSI_ALIMK_2G;
3954 else if (channel >= 36 && channel <= 64)
3955 band = TSSI_ALIMK_5GL;
3956 else if (channel >= 100 && channel <= 144)
3957 band = TSSI_ALIMK_5GM;
3958 else if (channel >= 149 && channel <= 177)
3959 band = TSSI_ALIMK_5GH;
3960 else
3961 band = TSSI_ALIMK_2G;
3962
3963 _tssi_disable(rtwdev, phy);
3964
3965 for (i = RF_PATH_A; i < RTW8852BT_TSSI_PATH_NR; i++) {
3966 _tssi_rf_setting(rtwdev, phy, i, chan);
3967 _tssi_set_sys(rtwdev, phy, i, chan);
3968 _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
3969
3970 if (tssi_info->alignment_done[i][band])
3971 _tssi_alimentk_done(rtwdev, phy, i, chan);
3972 else
3973 _tssi_alignment_default(rtwdev, phy, i, true, chan);
3974 }
3975
3976 _tssi_enable(rtwdev, phy);
3977 _tssi_set_efuse_to_de(rtwdev, phy, chan);
3978 }
3979
rtw8852bt_tssi_default_txagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool enable,enum rtw89_chanctx_idx chanctx_idx)3980 static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev,
3981 enum rtw89_phy_idx phy, bool enable,
3982 enum rtw89_chanctx_idx chanctx_idx)
3983 {
3984 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
3985 u8 channel = chan->channel;
3986
3987 rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n",
3988 __func__, channel);
3989
3990 if (enable)
3991 return;
3992
3993 rtw89_debug(rtwdev, RTW89_DBG_RFK,
3994 "======>%s 1 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
3995 __func__,
3996 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
3997 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
3998
3999 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 0xc0);
4000 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT, 0xc0);
4001 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
4002 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
4003 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
4004 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
4005
4006 _tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan);
4007 _tssi_alimentk_done(rtwdev, phy, RF_PATH_B, chan);
4008
4009 rtw89_debug(rtwdev, RTW89_DBG_RFK,
4010 "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
4011 __func__,
4012 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
4013 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
4014
4015 rtw89_debug(rtwdev, RTW89_DBG_RFK,
4016 "======> %s SCAN_END\n", __func__);
4017 }
4018
rtw8852bt_wifi_scan_notify(struct rtw89_dev * rtwdev,bool scan_start,enum rtw89_phy_idx phy_idx,enum rtw89_chanctx_idx chanctx_idx)4019 void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
4020 enum rtw89_phy_idx phy_idx,
4021 enum rtw89_chanctx_idx chanctx_idx)
4022 {
4023 if (scan_start)
4024 rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx);
4025 else
4026 rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx);
4027 }
4028
_bw_setting(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,enum rtw89_bandwidth bw,bool dav)4029 static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
4030 enum rtw89_bandwidth bw, bool dav)
4031 {
4032 u32 rf_reg18;
4033 u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
4034
4035 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
4036
4037 rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
4038 if (rf_reg18 == INV_RF_DATA) {
4039 rtw89_debug(rtwdev, RTW89_DBG_RFK,
4040 "[RFK]Invalid RF_0x18 for Path-%d\n", path);
4041 return;
4042 }
4043 rf_reg18 &= ~RR_CFGCH_BW;
4044
4045 switch (bw) {
4046 case RTW89_CHANNEL_WIDTH_5:
4047 case RTW89_CHANNEL_WIDTH_10:
4048 case RTW89_CHANNEL_WIDTH_20:
4049 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M);
4050 break;
4051 case RTW89_CHANNEL_WIDTH_40:
4052 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M);
4053 break;
4054 case RTW89_CHANNEL_WIDTH_80:
4055 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M);
4056 break;
4057 default:
4058 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]Fail to set CH\n");
4059 }
4060
4061 rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
4062 RR_CFGCH_BW2) & RFREG_MASK;
4063 rf_reg18 |= RR_CFGCH_BW2;
4064 rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
4065
4066 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set %x at path%d, %x =0x%x\n",
4067 bw, path, reg18_addr,
4068 rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
4069 }
4070
_ctrl_bw(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_bandwidth bw)4071 static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
4072 enum rtw89_bandwidth bw)
4073 {
4074 _bw_setting(rtwdev, RF_PATH_A, bw, true);
4075 _bw_setting(rtwdev, RF_PATH_B, bw, true);
4076 _bw_setting(rtwdev, RF_PATH_A, bw, false);
4077 _bw_setting(rtwdev, RF_PATH_B, bw, false);
4078 }
4079
_set_s0_arfc18(struct rtw89_dev * rtwdev,u32 val)4080 static bool _set_s0_arfc18(struct rtw89_dev *rtwdev, u32 val)
4081 {
4082 u32 tmp;
4083 int ret;
4084
4085 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
4086 rtw89_write_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK, val);
4087
4088 ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp == 0, 1, 1000,
4089 false, rtwdev, RF_PATH_A, RR_LPF, RR_LPF_BUSY);
4090 if (ret)
4091 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]LCK timeout\n");
4092
4093 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
4094 return !!ret;
4095 }
4096
_lck_check(struct rtw89_dev * rtwdev)4097 static void _lck_check(struct rtw89_dev *rtwdev)
4098 {
4099 u32 tmp;
4100
4101 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4102 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN MMD reset\n");
4103
4104 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x1);
4105 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x0);
4106 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x1);
4107 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x0);
4108 }
4109
4110 udelay(10);
4111
4112 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4113 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]re-set RF 0x18\n");
4114
4115 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
4116 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
4117 _set_s0_arfc18(rtwdev, tmp);
4118 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
4119 }
4120
4121 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4122 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN off/on\n");
4123
4124 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK);
4125 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK, tmp);
4126 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK);
4127 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK, tmp);
4128
4129 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x1);
4130 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0);
4131 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
4132 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x0);
4133
4134 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
4135 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
4136 _set_s0_arfc18(rtwdev, tmp);
4137 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
4138
4139 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]0xb2=%x, 0xc5=%x\n",
4140 rtw89_read_rf(rtwdev, RF_PATH_A, RR_VCO, RFREG_MASK),
4141 rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RFREG_MASK));
4142 }
4143 }
4144
_set_ch(struct rtw89_dev * rtwdev,u32 val)4145 static void _set_ch(struct rtw89_dev *rtwdev, u32 val)
4146 {
4147 bool timeout;
4148 u32 bak;
4149
4150 bak = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK);
4151 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RR_LDO_SEL, 0x1);
4152 timeout = _set_s0_arfc18(rtwdev, val);
4153 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK, bak);
4154 if (!timeout)
4155 _lck_check(rtwdev);
4156 }
4157
_ch_setting(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 central_ch,bool dav)4158 static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
4159 u8 central_ch, bool dav)
4160 {
4161 u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
4162 bool is_2g_ch = central_ch <= 14;
4163 u32 rf_reg18;
4164
4165 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
4166
4167 rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
4168 rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH |
4169 RR_CFGCH_BCN | RR_CFGCH_BAND0 | RR_CFGCH_CH);
4170 rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch);
4171
4172 if (!is_2g_ch)
4173 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G) |
4174 FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G);
4175
4176 rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
4177 RR_CFGCH_BW2) & RFREG_MASK;
4178 rf_reg18 |= RR_CFGCH_BW2;
4179
4180 if (path == RF_PATH_A && dav)
4181 _set_ch(rtwdev, rf_reg18);
4182 else
4183 rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
4184
4185 rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 0);
4186 rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 1);
4187
4188 rtw89_debug(rtwdev, RTW89_DBG_RFK,
4189 "[RFK]CH: %d for Path-%d, reg0x%x = 0x%x\n",
4190 central_ch, path, reg18_addr,
4191 rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
4192 }
4193
_ctrl_ch(struct rtw89_dev * rtwdev,u8 central_ch)4194 static void _ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch)
4195 {
4196 _ch_setting(rtwdev, RF_PATH_A, central_ch, true);
4197 _ch_setting(rtwdev, RF_PATH_B, central_ch, true);
4198 _ch_setting(rtwdev, RF_PATH_A, central_ch, false);
4199 _ch_setting(rtwdev, RF_PATH_B, central_ch, false);
4200 }
4201
_set_rxbb_bw(struct rtw89_dev * rtwdev,enum rtw89_bandwidth bw,enum rtw89_rf_path path)4202 static void _set_rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw,
4203 enum rtw89_rf_path path)
4204 {
4205 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1);
4206 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0x12);
4207
4208 if (bw == RTW89_CHANNEL_WIDTH_20)
4209 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x1b);
4210 else if (bw == RTW89_CHANNEL_WIDTH_40)
4211 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x13);
4212 else if (bw == RTW89_CHANNEL_WIDTH_80)
4213 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0xb);
4214 else
4215 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x3);
4216
4217 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set S%d RXBB BW 0x3F = 0x%x\n",
4218 path, rtw89_read_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB));
4219
4220 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0);
4221 }
4222
_rxbb_bw(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_bandwidth bw)4223 static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
4224 enum rtw89_bandwidth bw)
4225 {
4226 u8 kpath, path;
4227
4228 kpath = _kpath(rtwdev, phy);
4229
4230 for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
4231 if (!(kpath & BIT(path)))
4232 continue;
4233
4234 _set_rxbb_bw(rtwdev, bw, path);
4235 }
4236 }
4237
rtw8852bt_ctrl_bw_ch(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 central_ch,enum rtw89_band band,enum rtw89_bandwidth bw)4238 static void rtw8852bt_ctrl_bw_ch(struct rtw89_dev *rtwdev,
4239 enum rtw89_phy_idx phy, u8 central_ch,
4240 enum rtw89_band band, enum rtw89_bandwidth bw)
4241 {
4242 _ctrl_ch(rtwdev, central_ch);
4243 _ctrl_bw(rtwdev, phy, bw);
4244 _rxbb_bw(rtwdev, phy, bw);
4245 }
4246
rtw8852bt_set_channel_rf(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_phy_idx phy_idx)4247 void rtw8852bt_set_channel_rf(struct rtw89_dev *rtwdev,
4248 const struct rtw89_chan *chan,
4249 enum rtw89_phy_idx phy_idx)
4250 {
4251 rtw8852bt_ctrl_bw_ch(rtwdev, phy_idx, chan->channel, chan->band_type,
4252 chan->band_width);
4253 }
4254