xref: /freebsd/sys/contrib/dev/rtw89/rtw8852a_rfk.c (revision e5b786625f7f82a1fa91e41823332459ea5550f9)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020  Realtek Corporation
3  */
4 
5 #include "coex.h"
6 #include "debug.h"
7 #include "mac.h"
8 #include "phy.h"
9 #include "reg.h"
10 #include "rtw8852a.h"
11 #include "rtw8852a_rfk.h"
12 #include "rtw8852a_rfk_table.h"
13 #include "rtw8852a_table.h"
14 
15 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
16 {
17 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x,  PHY%d\n",
18 		    rtwdev->dbcc_en, phy_idx);
19 
20 	if (!rtwdev->dbcc_en)
21 		return RF_AB;
22 
23 	if (phy_idx == RTW89_PHY_0)
24 		return RF_A;
25 	else
26 		return RF_B;
27 }
28 
29 static const u32 rtw8852a_backup_bb_regs[] = {0x2344, 0x58f0, 0x78f0};
30 static const u32 rtw8852a_backup_rf_regs[] = {0xef, 0xde, 0x0, 0x1e, 0x2, 0x85, 0x90, 0x5};
31 #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852a_backup_bb_regs)
32 #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852a_backup_rf_regs)
33 
34 static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
35 {
36 	u32 i;
37 
38 	for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
39 		backup_bb_reg_val[i] =
40 			rtw89_phy_read32_mask(rtwdev, rtw8852a_backup_bb_regs[i],
41 					      MASKDWORD);
42 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
43 			    "[IQK]backup bb reg : %x, value =%x\n",
44 			    rtw8852a_backup_bb_regs[i], backup_bb_reg_val[i]);
45 	}
46 }
47 
48 static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
49 			       u8 rf_path)
50 {
51 	u32 i;
52 
53 	for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
54 		backup_rf_reg_val[i] =
55 			rtw89_read_rf(rtwdev, rf_path,
56 				      rtw8852a_backup_rf_regs[i], RFREG_MASK);
57 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
58 			    "[IQK]backup rf S%d reg : %x, value =%x\n", rf_path,
59 			    rtw8852a_backup_rf_regs[i], backup_rf_reg_val[i]);
60 	}
61 }
62 
63 static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev,
64 				u32 backup_bb_reg_val[])
65 {
66 	u32 i;
67 
68 	for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
69 		rtw89_phy_write32_mask(rtwdev, rtw8852a_backup_bb_regs[i],
70 				       MASKDWORD, backup_bb_reg_val[i]);
71 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
72 			    "[IQK]restore bb reg : %x, value =%x\n",
73 			    rtw8852a_backup_bb_regs[i], backup_bb_reg_val[i]);
74 	}
75 }
76 
77 static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev,
78 				u32 backup_rf_reg_val[], u8 rf_path)
79 {
80 	u32 i;
81 
82 	for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
83 		rtw89_write_rf(rtwdev, rf_path, rtw8852a_backup_rf_regs[i],
84 			       RFREG_MASK, backup_rf_reg_val[i]);
85 
86 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
87 			    "[IQK]restore rf S%d reg: %x, value =%x\n", rf_path,
88 			    rtw8852a_backup_rf_regs[i], backup_rf_reg_val[i]);
89 	}
90 }
91 
92 static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
93 {
94 	u8 path;
95 	u32 rf_mode;
96 	int ret;
97 
98 	for (path = 0; path < RF_PATH_MAX; path++) {
99 		if (!(kpath & BIT(path)))
100 			continue;
101 
102 		ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2,
103 					       2, 5000, false, rtwdev, path, 0x00,
104 					       RR_MOD_MASK);
105 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
106 			    "[RFK] Wait S%d to Rx mode!! (ret = %d)\n",
107 			    path, ret);
108 	}
109 }
110 
111 static void _dack_dump(struct rtw89_dev *rtwdev)
112 {
113 	struct rtw89_dack_info *dack = &rtwdev->dack;
114 	u8 i;
115 	u8 t;
116 
117 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
118 		    "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
119 		    dack->addck_d[0][0], dack->addck_d[0][1]);
120 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
121 		    "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
122 		    dack->addck_d[1][0], dack->addck_d[1][1]);
123 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
124 		    "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
125 		    dack->dadck_d[0][0], dack->dadck_d[0][1]);
126 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
127 		    "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
128 		    dack->dadck_d[1][0], dack->dadck_d[1][1]);
129 
130 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
131 		    "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
132 		    dack->biask_d[0][0], dack->biask_d[0][1]);
133 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
134 		    "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
135 		    dack->biask_d[1][0], dack->biask_d[1][1]);
136 
137 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
138 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
139 		t = dack->msbk_d[0][0][i];
140 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
141 	}
142 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
143 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
144 		t = dack->msbk_d[0][1][i];
145 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
146 	}
147 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
148 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
149 		t = dack->msbk_d[1][0][i];
150 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
151 	}
152 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
153 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
154 		t = dack->msbk_d[1][1][i];
155 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
156 	}
157 }
158 
159 static void _afe_init(struct rtw89_dev *rtwdev)
160 {
161 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_afe_init_defs_tbl);
162 }
163 
164 static void _addck_backup(struct rtw89_dev *rtwdev)
165 {
166 	struct rtw89_dack_info *dack = &rtwdev->dack;
167 
168 	rtw89_phy_write32_clr(rtwdev, R_S0_RXDC2, B_S0_RXDC2_SEL);
169 	dack->addck_d[0][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_ADDCK,
170 							 B_S0_ADDCK_Q);
171 	dack->addck_d[0][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_ADDCK,
172 							 B_S0_ADDCK_I);
173 
174 	rtw89_phy_write32_clr(rtwdev, R_S1_RXDC2, B_S1_RXDC2_SEL);
175 	dack->addck_d[1][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S1_ADDCK,
176 							 B_S1_ADDCK_Q);
177 	dack->addck_d[1][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S1_ADDCK,
178 							 B_S1_ADDCK_I);
179 }
180 
181 static void _addck_reload(struct rtw89_dev *rtwdev)
182 {
183 	struct rtw89_dack_info *dack = &rtwdev->dack;
184 
185 	rtw89_phy_write32_mask(rtwdev, R_S0_RXDC, B_S0_RXDC_I, dack->addck_d[0][0]);
186 	rtw89_phy_write32_mask(rtwdev, R_S0_RXDC2, B_S0_RXDC2_Q2,
187 			       (dack->addck_d[0][1] >> 6));
188 	rtw89_phy_write32_mask(rtwdev, R_S0_RXDC, B_S0_RXDC_Q,
189 			       (dack->addck_d[0][1] & 0x3f));
190 	rtw89_phy_write32_set(rtwdev, R_S0_RXDC2, B_S0_RXDC2_MEN);
191 	rtw89_phy_write32_mask(rtwdev, R_S1_RXDC, B_S1_RXDC_I, dack->addck_d[1][0]);
192 	rtw89_phy_write32_mask(rtwdev, R_S1_RXDC2, B_S1_RXDC2_Q2,
193 			       (dack->addck_d[1][1] >> 6));
194 	rtw89_phy_write32_mask(rtwdev, R_S1_RXDC, B_S1_RXDC_Q,
195 			       (dack->addck_d[1][1] & 0x3f));
196 	rtw89_phy_write32_set(rtwdev, R_S1_RXDC2, B_S1_RXDC2_EN);
197 }
198 
199 static void _dack_backup_s0(struct rtw89_dev *rtwdev)
200 {
201 	struct rtw89_dack_info *dack = &rtwdev->dack;
202 	u8 i;
203 
204 	rtw89_phy_write32_set(rtwdev, R_S0_DACKI, B_S0_DACKI_EN);
205 	rtw89_phy_write32_set(rtwdev, R_S0_DACKQ, B_S0_DACKQ_EN);
206 	rtw89_phy_write32_set(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG);
207 
208 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
209 		rtw89_phy_write32_mask(rtwdev, R_S0_DACKI, B_S0_DACKI_AR, i);
210 		dack->msbk_d[0][0][i] =
211 			(u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI7, B_S0_DACKI7_K);
212 		rtw89_phy_write32_mask(rtwdev, R_S0_DACKQ, B_S0_DACKQ_AR, i);
213 		dack->msbk_d[0][1][i] =
214 			(u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ7, B_S0_DACKQ7_K);
215 	}
216 	dack->biask_d[0][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI2,
217 							 B_S0_DACKI2_K);
218 	dack->biask_d[0][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ2,
219 							 B_S0_DACKQ2_K);
220 	dack->dadck_d[0][0] = (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI8,
221 							B_S0_DACKI8_K) - 8;
222 	dack->dadck_d[0][1] = (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ8,
223 							B_S0_DACKQ8_K) - 8;
224 }
225 
226 static void _dack_backup_s1(struct rtw89_dev *rtwdev)
227 {
228 	struct rtw89_dack_info *dack = &rtwdev->dack;
229 	u8 i;
230 
231 	rtw89_phy_write32_set(rtwdev, R_S1_DACKI, B_S1_DACKI_EN);
232 	rtw89_phy_write32_set(rtwdev, R_S1_DACKQ, B_S1_DACKQ_EN);
233 	rtw89_phy_write32_set(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON);
234 
235 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
236 		rtw89_phy_write32_mask(rtwdev, R_S1_DACKI, B_S1_DACKI_AR, i);
237 		dack->msbk_d[1][0][i] =
238 			(u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI7, B_S1_DACKI_K);
239 		rtw89_phy_write32_mask(rtwdev, R_S1_DACKQ, B_S1_DACKQ_AR, i);
240 		dack->msbk_d[1][1][i] =
241 			(u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ7, B_S1_DACKQ7_K);
242 	}
243 	dack->biask_d[1][0] =
244 		(u16)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI2, B_S1_DACKI2_K);
245 	dack->biask_d[1][1] =
246 		(u16)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ2, B_S1_DACKQ2_K);
247 	dack->dadck_d[1][0] =
248 		(u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI8, B_S1_DACKI8_K) - 8;
249 	dack->dadck_d[1][1] =
250 		(u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ8, B_S1_DACKQ8_K) - 8;
251 }
252 
253 static void _dack_reload_by_path(struct rtw89_dev *rtwdev,
254 				 enum rtw89_rf_path path, u8 index)
255 {
256 	struct rtw89_dack_info *dack = &rtwdev->dack;
257 	u32 tmp = 0, tmp_offset, tmp_reg;
258 	u8 i;
259 	u32 idx_offset, path_offset;
260 
261 	if (index == 0)
262 		idx_offset = 0;
263 	else
264 		idx_offset = 0x50;
265 
266 	if (path == RF_PATH_A)
267 		path_offset = 0;
268 	else
269 		path_offset = 0x2000;
270 
271 	tmp_offset = idx_offset + path_offset;
272 	/* msbk_d: 15/14/13/12 */
273 	tmp = 0x0;
274 	for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
275 		tmp |= dack->msbk_d[path][index][i + 12] << (i * 8);
276 	tmp_reg = 0x5e14 + tmp_offset;
277 	rtw89_phy_write32(rtwdev, tmp_reg, tmp);
278 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
279 		    rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
280 	/* msbk_d: 11/10/9/8 */
281 	tmp = 0x0;
282 	for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
283 		tmp |= dack->msbk_d[path][index][i + 8] << (i * 8);
284 	tmp_reg = 0x5e18 + tmp_offset;
285 	rtw89_phy_write32(rtwdev, tmp_reg, tmp);
286 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
287 		    rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
288 	/* msbk_d: 7/6/5/4 */
289 	tmp = 0x0;
290 	for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
291 		tmp |= dack->msbk_d[path][index][i + 4] << (i * 8);
292 	tmp_reg = 0x5e1c + tmp_offset;
293 	rtw89_phy_write32(rtwdev, tmp_reg, tmp);
294 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
295 		    rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
296 	/* msbk_d: 3/2/1/0 */
297 	tmp = 0x0;
298 	for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
299 		tmp |= dack->msbk_d[path][index][i] << (i * 8);
300 	tmp_reg = 0x5e20 + tmp_offset;
301 	rtw89_phy_write32(rtwdev, tmp_reg, tmp);
302 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
303 		    rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
304 	/* dadak_d/biask_d */
305 	tmp = 0x0;
306 	tmp = (dack->biask_d[path][index] << 22) |
307 	       (dack->dadck_d[path][index] << 14);
308 	tmp_reg = 0x5e24 + tmp_offset;
309 	rtw89_phy_write32(rtwdev, tmp_reg, tmp);
310 }
311 
312 static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
313 {
314 	u8 i;
315 
316 	for (i = 0; i < 2; i++)
317 		_dack_reload_by_path(rtwdev, path, i);
318 
319 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
320 				 &rtw8852a_rfk_dack_reload_defs_a_tbl,
321 				 &rtw8852a_rfk_dack_reload_defs_b_tbl);
322 }
323 
324 #define ADDC_T_AVG 100
325 static void _check_addc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
326 {
327 	s32 dc_re = 0, dc_im = 0;
328 	u32 tmp;
329 	u32 i;
330 
331 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
332 				 &rtw8852a_rfk_check_addc_defs_a_tbl,
333 				 &rtw8852a_rfk_check_addc_defs_b_tbl);
334 
335 	for (i = 0; i < ADDC_T_AVG; i++) {
336 		tmp = rtw89_phy_read32_mask(rtwdev, R_DBG32_D, MASKDWORD);
337 		dc_re += sign_extend32(FIELD_GET(0xfff000, tmp), 11);
338 		dc_im += sign_extend32(FIELD_GET(0xfff, tmp), 11);
339 	}
340 
341 	dc_re /= ADDC_T_AVG;
342 	dc_im /= ADDC_T_AVG;
343 
344 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
345 		    "[DACK]S%d,dc_re = 0x%x,dc_im =0x%x\n", path, dc_re, dc_im);
346 }
347 
348 static void _addck(struct rtw89_dev *rtwdev)
349 {
350 	struct rtw89_dack_info *dack = &rtwdev->dack;
351 	u32 val;
352 	int ret;
353 
354 	/* S0 */
355 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_reset_defs_a_tbl);
356 
357 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S0 ADDCK\n");
358 	_check_addc(rtwdev, RF_PATH_A);
359 
360 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_trigger_defs_a_tbl);
361 
362 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
363 				       false, rtwdev, 0x1e00, BIT(0));
364 	if (ret) {
365 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
366 		dack->addck_timeout[0] = true;
367 	}
368 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
369 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 ADDCK\n");
370 	_check_addc(rtwdev, RF_PATH_A);
371 
372 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_restore_defs_a_tbl);
373 
374 	/* S1 */
375 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_reset_defs_b_tbl);
376 
377 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S1 ADDCK\n");
378 	_check_addc(rtwdev, RF_PATH_B);
379 
380 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_trigger_defs_b_tbl);
381 
382 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
383 				       false, rtwdev, 0x3e00, BIT(0));
384 	if (ret) {
385 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
386 		dack->addck_timeout[1] = true;
387 	}
388 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
389 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 ADDCK\n");
390 	_check_addc(rtwdev, RF_PATH_B);
391 
392 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_restore_defs_b_tbl);
393 }
394 
395 static void _check_dadc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
396 {
397 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
398 				 &rtw8852a_rfk_check_dadc_defs_f_a_tbl,
399 				 &rtw8852a_rfk_check_dadc_defs_f_b_tbl);
400 
401 	_check_addc(rtwdev, path);
402 
403 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
404 				 &rtw8852a_rfk_check_dadc_defs_r_a_tbl,
405 				 &rtw8852a_rfk_check_dadc_defs_r_b_tbl);
406 }
407 
408 static void _dack_s0(struct rtw89_dev *rtwdev)
409 {
410 	struct rtw89_dack_info *dack = &rtwdev->dack;
411 	u32 val;
412 	int ret;
413 
414 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_f_a_tbl);
415 
416 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
417 				       false, rtwdev, 0x5e28, BIT(15));
418 	ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
419 					false, rtwdev, 0x5e78, BIT(15));
420 	if (ret) {
421 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK timeout\n");
422 		dack->msbk_timeout[0] = true;
423 	}
424 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
425 
426 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_m_a_tbl);
427 
428 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
429 				       false, rtwdev, 0x5e48, BIT(17));
430 	ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
431 					false, rtwdev, 0x5e98, BIT(17));
432 	if (ret) {
433 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DADACK timeout\n");
434 		dack->dadck_timeout[0] = true;
435 	}
436 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
437 
438 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_r_a_tbl);
439 
440 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
441 	_check_dadc(rtwdev, RF_PATH_A);
442 
443 	_dack_backup_s0(rtwdev);
444 	_dack_reload(rtwdev, RF_PATH_A);
445 
446 	rtw89_phy_write32_clr(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG);
447 }
448 
449 static void _dack_s1(struct rtw89_dev *rtwdev)
450 {
451 	struct rtw89_dack_info *dack = &rtwdev->dack;
452 	u32 val;
453 	int ret;
454 
455 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_f_b_tbl);
456 
457 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
458 				       false, rtwdev, 0x7e28, BIT(15));
459 	ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
460 					false, rtwdev, 0x7e78, BIT(15));
461 	if (ret) {
462 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK timeout\n");
463 		dack->msbk_timeout[1] = true;
464 	}
465 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
466 
467 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_m_b_tbl);
468 
469 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
470 				       false, rtwdev, 0x7e48, BIT(17));
471 	ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
472 					false, rtwdev, 0x7e98, BIT(17));
473 	if (ret) {
474 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DADCK timeout\n");
475 		dack->dadck_timeout[1] = true;
476 	}
477 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
478 
479 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_r_b_tbl);
480 
481 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n");
482 	_check_dadc(rtwdev, RF_PATH_B);
483 
484 	_dack_backup_s1(rtwdev);
485 	_dack_reload(rtwdev, RF_PATH_B);
486 
487 	rtw89_phy_write32_clr(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON);
488 }
489 
490 static void _dack(struct rtw89_dev *rtwdev)
491 {
492 	_dack_s0(rtwdev);
493 	_dack_s1(rtwdev);
494 }
495 
496 static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
497 {
498 	struct rtw89_dack_info *dack = &rtwdev->dack;
499 	u32 rf0_0, rf1_0;
500 	u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB);
501 
502 	dack->dack_done = false;
503 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
504 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
505 	rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
506 	rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK);
507 	_afe_init(rtwdev);
508 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
509 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
510 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x30001);
511 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x30001);
512 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
513 	_addck(rtwdev);
514 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
515 	_addck_backup(rtwdev);
516 	_addck_reload(rtwdev);
517 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x40001);
518 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x40001);
519 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
520 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
521 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
522 	_dack(rtwdev);
523 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
524 	_dack_dump(rtwdev);
525 	dack->dack_done = true;
526 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0);
527 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0);
528 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
529 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
530 	dack->dack_cnt++;
531 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
532 }
533 
534 #define RTW8852A_NCTL_VER 0xd
535 #define RTW8852A_IQK_VER 0x2a
536 #define RTW8852A_IQK_SS 2
537 #define RTW8852A_IQK_THR_REK 8
538 #define RTW8852A_IQK_CFIR_GROUP_NR 4
539 
540 enum rtw8852a_iqk_type {
541 	ID_TXAGC,
542 	ID_FLOK_COARSE,
543 	ID_FLOK_FINE,
544 	ID_TXK,
545 	ID_RXAGC,
546 	ID_RXK,
547 	ID_NBTXK,
548 	ID_NBRXK,
549 };
550 
551 static void _iqk_read_fft_dbcc0(struct rtw89_dev *rtwdev, u8 path)
552 {
553 	u8 i = 0x0;
554 	u32 fft[6] = {0x0};
555 
556 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
557 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00160000);
558 	fft[0] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
559 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00170000);
560 	fft[1] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
561 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00180000);
562 	fft[2] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
563 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00190000);
564 	fft[3] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
565 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x001a0000);
566 	fft[4] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
567 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x001b0000);
568 	fft[5] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
569 	for (i = 0; i < 6; i++)
570 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x,fft[%x]= %x\n",
571 			    path, i, fft[i]);
572 }
573 
574 static void _iqk_read_xym_dbcc0(struct rtw89_dev *rtwdev, u8 path)
575 {
576 	u8 i = 0x0;
577 	u32 tmp = 0x0;
578 
579 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
580 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, B_NCTL_CFG_SPAGE, path);
581 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF, B_IQK_DIF_TRX, 0x1);
582 
583 	for (i = 0x0; i < 0x18; i++) {
584 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N2, MASKDWORD, 0x000000c0 + i);
585 		rtw89_phy_write32_clr(rtwdev, R_NCTL_N2, MASKDWORD);
586 		tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
587 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = %x\n",
588 			    path, BIT(path), tmp);
589 		udelay(1);
590 	}
591 	rtw89_phy_write32_clr(rtwdev, R_IQK_DIF, B_IQK_DIF_TRX);
592 	rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, 0x40000000);
593 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N2, MASKDWORD, 0x80010100);
594 	udelay(1);
595 }
596 
597 static void _iqk_read_txcfir_dbcc0(struct rtw89_dev *rtwdev, u8 path,
598 				   u8 group)
599 {
600 	static const u32 base_addrs[RTW8852A_IQK_SS][RTW8852A_IQK_CFIR_GROUP_NR] = {
601 		{0x8f20, 0x8f54, 0x8f88, 0x8fbc},
602 		{0x9320, 0x9354, 0x9388, 0x93bc},
603 	};
604 	u8 idx = 0x0;
605 	u32 tmp = 0x0;
606 	u32 base_addr;
607 
608 	if (path >= RTW8852A_IQK_SS) {
609 		rtw89_warn(rtwdev, "cfir path %d out of range\n", path);
610 		return;
611 	}
612 	if (group >= RTW8852A_IQK_CFIR_GROUP_NR) {
613 		rtw89_warn(rtwdev, "cfir group %d out of range\n", group);
614 		return;
615 	}
616 
617 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
618 	rtw89_phy_write32_mask(rtwdev, R_W_COEF + (path << 8), MASKDWORD, 0x00000001);
619 
620 	base_addr = base_addrs[path][group];
621 
622 	for (idx = 0; idx < 0x0d; idx++) {
623 		tmp = rtw89_phy_read32_mask(rtwdev, base_addr + (idx << 2), MASKDWORD);
624 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
625 			    "[IQK] %x = %x\n",
626 			    base_addr + (idx << 2), tmp);
627 	}
628 
629 	if (path == 0x0) {
630 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
631 		tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C0, MASKDWORD);
632 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8f50 = %x\n", tmp);
633 		tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C1, MASKDWORD);
634 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8f84 = %x\n", tmp);
635 		tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C2, MASKDWORD);
636 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8fb8 = %x\n", tmp);
637 		tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C3, MASKDWORD);
638 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8fec = %x\n", tmp);
639 	} else {
640 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
641 		tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C0, MASKDWORD);
642 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9350 = %x\n", tmp);
643 		tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C1, MASKDWORD);
644 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9384 = %x\n", tmp);
645 		tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C2, MASKDWORD);
646 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x93b8 = %x\n", tmp);
647 		tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C3, MASKDWORD);
648 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x93ec = %x\n", tmp);
649 	}
650 	rtw89_phy_write32_clr(rtwdev, R_W_COEF + (path << 8), MASKDWORD);
651 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xc);
652 	udelay(1);
653 	tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), MASKDWORD);
654 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lxfc = %x\n", path,
655 		    BIT(path), tmp);
656 }
657 
658 static void _iqk_read_rxcfir_dbcc0(struct rtw89_dev *rtwdev, u8 path,
659 				   u8 group)
660 {
661 	static const u32 base_addrs[RTW8852A_IQK_SS][RTW8852A_IQK_CFIR_GROUP_NR] = {
662 		{0x8d00, 0x8d44, 0x8d88, 0x8dcc},
663 		{0x9100, 0x9144, 0x9188, 0x91cc},
664 	};
665 	u8 idx = 0x0;
666 	u32 tmp = 0x0;
667 	u32 base_addr;
668 
669 	if (path >= RTW8852A_IQK_SS) {
670 		rtw89_warn(rtwdev, "cfir path %d out of range\n", path);
671 		return;
672 	}
673 	if (group >= RTW8852A_IQK_CFIR_GROUP_NR) {
674 		rtw89_warn(rtwdev, "cfir group %d out of range\n", group);
675 		return;
676 	}
677 
678 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
679 	rtw89_phy_write32_mask(rtwdev, R_W_COEF + (path << 8), MASKDWORD, 0x00000001);
680 
681 	base_addr = base_addrs[path][group];
682 	for (idx = 0; idx < 0x10; idx++) {
683 		tmp = rtw89_phy_read32_mask(rtwdev, base_addr + (idx << 2), MASKDWORD);
684 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
685 			    "[IQK]%x = %x\n",
686 			    base_addr + (idx << 2), tmp);
687 	}
688 
689 	if (path == 0x0) {
690 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
691 		tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C0, MASKDWORD);
692 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8d40 = %x\n", tmp);
693 		tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C1, MASKDWORD);
694 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8d84 = %x\n", tmp);
695 		tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C2, MASKDWORD);
696 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8dc8 = %x\n", tmp);
697 		tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C3, MASKDWORD);
698 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8e0c = %x\n", tmp);
699 	} else {
700 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
701 		tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C0, MASKDWORD);
702 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9140 = %x\n", tmp);
703 		tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C1, MASKDWORD);
704 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9184 = %x\n", tmp);
705 		tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C2, MASKDWORD);
706 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x91c8 = %x\n", tmp);
707 		tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C3, MASKDWORD);
708 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x920c = %x\n", tmp);
709 	}
710 	rtw89_phy_write32_clr(rtwdev, R_W_COEF + (path << 8), MASKDWORD);
711 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xd);
712 	tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), MASKDWORD);
713 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lxfc = %x\n", path,
714 		    BIT(path), tmp);
715 }
716 
717 static void _iqk_sram(struct rtw89_dev *rtwdev, u8 path)
718 {
719 	u32 tmp = 0x0;
720 	u32 i = 0x0;
721 
722 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
723 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00020000);
724 	rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX2, MASKDWORD, 0x00000080);
725 	rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000);
726 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009);
727 
728 	for (i = 0; i <= 0x9f; i++) {
729 		rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 + i);
730 		tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
731 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", tmp);
732 	}
733 
734 	for (i = 0; i <= 0x9f; i++) {
735 		rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 + i);
736 		tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
737 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", tmp);
738 	}
739 	rtw89_phy_write32_clr(rtwdev, R_SRAM_IQRX2, MASKDWORD);
740 	rtw89_phy_write32_clr(rtwdev, R_SRAM_IQRX, MASKDWORD);
741 }
742 
743 static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
744 {
745 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
746 	u32 tmp = 0x0;
747 
748 	rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
749 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x3);
750 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
751 	udelay(1);
752 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H2, 0x3);
753 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
754 	udelay(1);
755 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
756 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H2, 0x0);
757 	udelay(1);
758 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0303);
759 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0000);
760 
761 	switch (iqk_info->iqk_band[path]) {
762 	case RTW89_BAND_2G:
763 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RXK2);
764 		rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1);
765 		break;
766 	case RTW89_BAND_5G:
767 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RXK2);
768 		rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x5);
769 		rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1);
770 		break;
771 	default:
772 		break;
773 	}
774 	tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
775 	rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
776 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13);
777 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
778 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1);
779 	fsleep(128);
780 }
781 
782 static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
783 {
784 	u32 tmp;
785 	u32 val;
786 	int ret;
787 
788 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, 1, 8200,
789 				       false, rtwdev, 0xbff8, MASKBYTE0);
790 	if (ret)
791 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]IQK timeout!!!\n");
792 	rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
793 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret);
794 	tmp = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD);
795 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
796 		    "[IQK]S%x, type= %x, 0x8008 = 0x%x\n", path, ktype, tmp);
797 
798 	return false;
799 }
800 
801 static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
802 			  enum rtw89_phy_idx phy_idx, u8 path, u8 ktype)
803 {
804 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
805 	bool fail = false;
806 	u32 iqk_cmd = 0x0;
807 	u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy_idx, path);
808 	u32 addr_rfc_ctl = 0x0;
809 
810 	if (path == RF_PATH_A)
811 		addr_rfc_ctl = 0x5864;
812 	else
813 		addr_rfc_ctl = 0x7864;
814 
815 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
816 	switch (ktype) {
817 	case ID_TXAGC:
818 		iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1);
819 		break;
820 	case ID_FLOK_COARSE:
821 		rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
822 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009);
823 		iqk_cmd = 0x108 | (1 << (4 + path));
824 		break;
825 	case ID_FLOK_FINE:
826 		rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
827 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009);
828 		iqk_cmd = 0x208 | (1 << (4 + path));
829 		break;
830 	case ID_TXK:
831 		rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000);
832 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x025);
833 		iqk_cmd = 0x008 | (1 << (path + 4)) |
834 			  (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8);
835 		break;
836 	case ID_RXAGC:
837 		iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
838 		break;
839 	case ID_RXK:
840 		rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
841 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
842 		iqk_cmd = 0x008 | (1 << (path + 4)) |
843 			  (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8);
844 		break;
845 	case ID_NBTXK:
846 		rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000);
847 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x025);
848 		iqk_cmd = 0x308 | (1 << (4 + path));
849 		break;
850 	case ID_NBRXK:
851 		rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
852 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
853 		iqk_cmd = 0x608 | (1 << (4 + path));
854 		break;
855 	default:
856 		return false;
857 	}
858 
859 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
860 	rtw89_phy_write32_set(rtwdev, R_DPK_CTL, B_DPK_CTL_EN);
861 	udelay(1);
862 	fail = _iqk_check_cal(rtwdev, path, ktype);
863 	if (iqk_info->iqk_xym_en)
864 		_iqk_read_xym_dbcc0(rtwdev, path);
865 	if (iqk_info->iqk_fft_en)
866 		_iqk_read_fft_dbcc0(rtwdev, path);
867 	if (iqk_info->iqk_sram_en)
868 		_iqk_sram(rtwdev, path);
869 	if (iqk_info->iqk_cfir_en) {
870 		if (ktype == ID_TXK) {
871 			_iqk_read_txcfir_dbcc0(rtwdev, path, 0x0);
872 			_iqk_read_txcfir_dbcc0(rtwdev, path, 0x1);
873 			_iqk_read_txcfir_dbcc0(rtwdev, path, 0x2);
874 			_iqk_read_txcfir_dbcc0(rtwdev, path, 0x3);
875 		} else {
876 			_iqk_read_rxcfir_dbcc0(rtwdev, path, 0x0);
877 			_iqk_read_rxcfir_dbcc0(rtwdev, path, 0x1);
878 			_iqk_read_rxcfir_dbcc0(rtwdev, path, 0x2);
879 			_iqk_read_rxcfir_dbcc0(rtwdev, path, 0x3);
880 		}
881 	}
882 
883 	rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000);
884 
885 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
886 
887 	return fail;
888 }
889 
890 static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
891 			   enum rtw89_phy_idx phy_idx, u8 path)
892 {
893 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
894 	static const u32 rxgn_a[4] = {0x18C, 0x1A0, 0x28C, 0x2A0};
895 	static const u32 attc2_a[4] = {0x0, 0x0, 0x07, 0x30};
896 	static const u32 attc1_a[4] = {0x7, 0x5, 0x1, 0x1};
897 	static const u32 rxgn_g[4] = {0x1CC, 0x1E0, 0x2CC, 0x2E0};
898 	static const u32 attc2_g[4] = {0x0, 0x15, 0x3, 0x1a};
899 	static const u32 attc1_g[4] = {0x1, 0x0, 0x1, 0x0};
900 	u8 gp = 0x0;
901 	bool fail = false;
902 	u32 rf0 = 0x0;
903 
904 	for (gp = 0; gp < 0x4; gp++) {
905 		switch (iqk_info->iqk_band[path]) {
906 		case RTW89_BAND_2G:
907 			rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, rxgn_g[gp]);
908 			rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, attc2_g[gp]);
909 			rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, attc1_g[gp]);
910 			break;
911 		case RTW89_BAND_5G:
912 			rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, rxgn_a[gp]);
913 			rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C2, attc2_a[gp]);
914 			rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C1, attc1_a[gp]);
915 			break;
916 		default:
917 			break;
918 		}
919 		rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET);
920 		rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
921 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI,
922 				       rf0 | iqk_info->syn1to2);
923 		rtw89_phy_write32_mask(rtwdev, R_IQK_COM, MASKDWORD, 0x40010100);
924 		rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR);
925 		rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
926 		rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
927 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp);
928 		rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN, 0x1);
929 		rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
930 		fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
931 		rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(16 + gp + path * 4), fail);
932 	}
933 
934 	switch (iqk_info->iqk_band[path]) {
935 	case RTW89_BAND_2G:
936 		rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
937 		rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
938 		break;
939 	case RTW89_BAND_5G:
940 		rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
941 		rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
942 		rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x0);
943 		break;
944 	default:
945 		break;
946 	}
947 	iqk_info->nb_rxcfir[path] = 0x40000000;
948 	rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
949 			       B_IQK_RES_RXCFIR, 0x5);
950 	iqk_info->is_wb_rxiqk[path] = true;
951 	return false;
952 }
953 
954 static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
955 		       enum rtw89_phy_idx phy_idx, u8 path)
956 {
957 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
958 	u8 group = 0x0;
959 	u32 rf0 = 0x0, tmp = 0x0;
960 	u32 idxrxgain_a = 0x1a0;
961 	u32 idxattc2_a = 0x00;
962 	u32 idxattc1_a = 0x5;
963 	u32 idxrxgain_g = 0x1E0;
964 	u32 idxattc2_g = 0x15;
965 	u32 idxattc1_g = 0x0;
966 	bool fail = false;
967 
968 	switch (iqk_info->iqk_band[path]) {
969 	case RTW89_BAND_2G:
970 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, idxrxgain_g);
971 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, idxattc2_g);
972 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, idxattc1_g);
973 		break;
974 	case RTW89_BAND_5G:
975 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, idxrxgain_a);
976 		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C2, idxattc2_a);
977 		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C1, idxattc1_a);
978 		break;
979 	default:
980 		break;
981 	}
982 	rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET);
983 	rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
984 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI,
985 			       rf0 | iqk_info->syn1to2);
986 	rtw89_phy_write32_mask(rtwdev, R_IQK_COM, MASKDWORD, 0x40010100);
987 	rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR);
988 	rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
989 	rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
990 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
991 			       B_CFIR_LUT_GP, group);
992 	rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN);
993 	rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
994 	fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
995 
996 	switch (iqk_info->iqk_band[path]) {
997 	case RTW89_BAND_2G:
998 		rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
999 		rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
1000 		break;
1001 	case RTW89_BAND_5G:
1002 		rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
1003 		rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
1004 		rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x0);
1005 		break;
1006 	default:
1007 		break;
1008 	}
1009 	if (!fail) {
1010 		tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
1011 		iqk_info->nb_rxcfir[path] = tmp | 0x2;
1012 	} else {
1013 		iqk_info->nb_rxcfir[path] = 0x40000002;
1014 	}
1015 	return fail;
1016 }
1017 
1018 static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
1019 {
1020 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1021 
1022 	if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) {
1023 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1024 		rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8),
1025 				       MASKDWORD, 0x4d000a08);
1026 		rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
1027 				       B_P0_RXCK_VAL, 0x2);
1028 		rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON);
1029 		rtw89_phy_write32_set(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON);
1030 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x1);
1031 	} else {
1032 		rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8),
1033 				       MASKDWORD, 0x44000a08);
1034 		rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
1035 				       B_P0_RXCK_VAL, 0x1);
1036 		rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON);
1037 		rtw89_phy_write32_set(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON);
1038 		rtw89_phy_write32_clr(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL);
1039 	}
1040 }
1041 
1042 static bool _txk_group_sel(struct rtw89_dev *rtwdev,
1043 			   enum rtw89_phy_idx phy_idx, u8 path)
1044 {
1045 	static const u32 a_txgain[4] = {0xE466, 0x646D, 0xE4E2, 0x64ED};
1046 	static const u32 g_txgain[4] = {0x60e8, 0x60f0, 0x61e8, 0x61ED};
1047 	static const u32 a_itqt[4] = {0x12, 0x12, 0x12, 0x1b};
1048 	static const u32 g_itqt[4] = {0x09, 0x12, 0x12, 0x12};
1049 	static const u32 g_attsmxr[4] = {0x0, 0x1, 0x1, 0x1};
1050 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1051 	bool fail = false;
1052 	u8 gp = 0x0;
1053 	u32 tmp = 0x0;
1054 
1055 	for (gp = 0x0; gp < 0x4; gp++) {
1056 		switch (iqk_info->iqk_band[path]) {
1057 		case RTW89_BAND_2G:
1058 			rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
1059 					       B_RFGAIN_BND, 0x08);
1060 			rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL,
1061 				       g_txgain[gp]);
1062 			rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1,
1063 				       g_attsmxr[gp]);
1064 			rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0,
1065 				       g_attsmxr[gp]);
1066 			rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1067 					       MASKDWORD, g_itqt[gp]);
1068 			break;
1069 		case RTW89_BAND_5G:
1070 			rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
1071 					       B_RFGAIN_BND, 0x04);
1072 			rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL,
1073 				       a_txgain[gp]);
1074 			rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1075 					       MASKDWORD, a_itqt[gp]);
1076 			break;
1077 		default:
1078 			break;
1079 		}
1080 		rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR);
1081 		rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
1082 		rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
1083 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1084 				       B_CFIR_LUT_GP, gp);
1085 		rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
1086 		fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
1087 		rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(8 + gp + path * 4), fail);
1088 	}
1089 
1090 	iqk_info->nb_txcfir[path] = 0x40000000;
1091 	rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1092 			       B_IQK_RES_TXCFIR, 0x5);
1093 	iqk_info->is_wb_txiqk[path] = true;
1094 	tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1095 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = 0x%x\n", path,
1096 		    BIT(path), tmp);
1097 	return false;
1098 }
1099 
1100 static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
1101 		       enum rtw89_phy_idx phy_idx, u8 path)
1102 {
1103 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1104 	u8 group = 0x2;
1105 	u32 a_mode_txgain = 0x64e2;
1106 	u32 g_mode_txgain = 0x61e8;
1107 	u32 attsmxr = 0x1;
1108 	u32 itqt = 0x12;
1109 	u32 tmp = 0x0;
1110 	bool fail = false;
1111 
1112 	switch (iqk_info->iqk_band[path]) {
1113 	case RTW89_BAND_2G:
1114 		rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
1115 				       B_RFGAIN_BND, 0x08);
1116 		rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, g_mode_txgain);
1117 		rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, attsmxr);
1118 		rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, attsmxr);
1119 		break;
1120 	case RTW89_BAND_5G:
1121 		rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
1122 				       B_RFGAIN_BND, 0x04);
1123 		rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, a_mode_txgain);
1124 		break;
1125 	default:
1126 		break;
1127 	}
1128 	rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR);
1129 	rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
1130 	rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
1131 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, group);
1132 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
1133 	rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
1134 	fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
1135 	if (!fail) {
1136 		tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1137 		iqk_info->nb_txcfir[path] = tmp | 0x2;
1138 	} else {
1139 		iqk_info->nb_txcfir[path] = 0x40000002;
1140 	}
1141 	tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1142 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = 0x%x\n", path,
1143 		    BIT(path), tmp);
1144 	return fail;
1145 }
1146 
1147 static void _lok_res_table(struct rtw89_dev *rtwdev, u8 path, u8 ibias)
1148 {
1149 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1150 
1151 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ibias = %x\n", path, ibias);
1152 	rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x2);
1153 	if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1154 		rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x0);
1155 	else
1156 		rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x1);
1157 	rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ibias);
1158 	rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0);
1159 }
1160 
1161 static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
1162 {
1163 	bool is_fail = false;
1164 	u32 tmp = 0x0;
1165 	u32 core_i = 0x0;
1166 	u32 core_q = 0x0;
1167 
1168 	tmp = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK);
1169 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK][FineLOK] S%x, 0x58 = 0x%x\n",
1170 		    path, tmp);
1171 	core_i = FIELD_GET(RR_TXMO_COI, tmp);
1172 	core_q = FIELD_GET(RR_TXMO_COQ, tmp);
1173 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, i = 0x%x\n", path, core_i);
1174 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, q = 0x%x\n", path, core_q);
1175 
1176 	if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d)
1177 		is_fail = true;
1178 	return is_fail;
1179 }
1180 
1181 static bool _iqk_lok(struct rtw89_dev *rtwdev,
1182 		     enum rtw89_phy_idx phy_idx, u8 path)
1183 {
1184 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1185 	u32 rf0 = 0x0;
1186 	u8 itqt = 0x12;
1187 	bool fail = false;
1188 	bool tmp = false;
1189 
1190 	switch (iqk_info->iqk_band[path]) {
1191 	case RTW89_BAND_2G:
1192 		rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, 0xe5e0);
1193 		itqt = 0x09;
1194 		break;
1195 	case RTW89_BAND_5G:
1196 		rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, 0xe4e0);
1197 		itqt = 0x12;
1198 		break;
1199 	default:
1200 		break;
1201 	}
1202 	rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET);
1203 	rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
1204 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF1, B_IQK_DIF1_TXPI,
1205 			       rf0 | iqk_info->syn1to2);
1206 	rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR);
1207 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
1208 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, 0x1);
1209 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, 0x0);
1210 	rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN);
1211 	rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
1212 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
1213 	tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE);
1214 	iqk_info->lok_cor_fail[0][path] = tmp;
1215 	fsleep(10);
1216 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
1217 	tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE);
1218 	iqk_info->lok_fin_fail[0][path] = tmp;
1219 	fail = _lok_finetune_check(rtwdev, path);
1220 	return fail;
1221 }
1222 
1223 static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
1224 {
1225 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1226 
1227 	rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
1228 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
1229 	udelay(1);
1230 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
1231 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
1232 	udelay(1);
1233 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
1234 	udelay(1);
1235 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0303);
1236 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0000);
1237 	switch (iqk_info->iqk_band[path]) {
1238 	case RTW89_BAND_2G:
1239 		rtw89_write_rf(rtwdev, path, RR_XALNA2, RR_XALNA2_SW, 0x00);
1240 		rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x3f);
1241 		rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
1242 		rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x1);
1243 		rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
1244 		rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EN, 0x0);
1245 		rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1246 		rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_LOK, 0x0);
1247 		rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_MASK, 0x000);
1248 		rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200);
1249 		rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200);
1250 		rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
1251 			       0x403e0 | iqk_info->syn1to2);
1252 		udelay(1);
1253 		break;
1254 	case RTW89_BAND_5G:
1255 		rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x00);
1256 		rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x3f);
1257 		rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x7);
1258 		rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EN, 0x0);
1259 		rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1260 		rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_LOK, 0x0);
1261 		rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_MASK, 0x100);
1262 		rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200);
1263 		rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200);
1264 		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, 0x1);
1265 		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, 0x0);
1266 		rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
1267 			       0x403e0 | iqk_info->syn1to2);
1268 		udelay(1);
1269 		break;
1270 	default:
1271 		break;
1272 	}
1273 }
1274 
1275 static void _iqk_txclk_setting(struct rtw89_dev *rtwdev, u8 path)
1276 {
1277 	rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
1278 }
1279 
1280 static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
1281 			  u8 path)
1282 {
1283 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1284 	u32 tmp = 0x0;
1285 	bool flag = 0x0;
1286 
1287 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_thermal = %lu\n", path,
1288 		    ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]));
1289 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_COR_fail= %d\n", path,
1290 		    iqk_info->lok_cor_fail[0][path]);
1291 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_FIN_fail= %d\n", path,
1292 		    iqk_info->lok_fin_fail[0][path]);
1293 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_TXIQK_fail = %d\n", path,
1294 		    iqk_info->iqk_tx_fail[0][path]);
1295 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_RXIQK_fail= %d,\n", path,
1296 		    iqk_info->iqk_rx_fail[0][path]);
1297 	flag = iqk_info->lok_cor_fail[0][path];
1298 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(0) << (path * 4), flag);
1299 	flag = iqk_info->lok_fin_fail[0][path];
1300 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(1) << (path * 4), flag);
1301 	flag = iqk_info->iqk_tx_fail[0][path];
1302 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(2) << (path * 4), flag);
1303 	flag = iqk_info->iqk_rx_fail[0][path];
1304 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(3) << (path * 4), flag);
1305 
1306 	tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD);
1307 	iqk_info->bp_iqkenable[path] = tmp;
1308 	tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1309 	iqk_info->bp_txkresult[path] = tmp;
1310 	tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
1311 	iqk_info->bp_rxkresult[path] = tmp;
1312 
1313 	rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT,
1314 			       (u8)iqk_info->iqk_times);
1315 
1316 	tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, 0x0000000f << (path * 4));
1317 	if (tmp != 0x0)
1318 		iqk_info->iqk_fail_cnt++;
1319 	rtw89_phy_write32_mask(rtwdev, R_IQKINF2, 0x00ff0000 << (path * 4),
1320 			       iqk_info->iqk_fail_cnt);
1321 }
1322 
1323 static
1324 void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1325 {
1326 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1327 	bool lok_is_fail = false;
1328 	u8 ibias = 0x1;
1329 	u8 i = 0;
1330 
1331 	_iqk_txclk_setting(rtwdev, path);
1332 
1333 	for (i = 0; i < 3; i++) {
1334 		_lok_res_table(rtwdev, path, ibias++);
1335 		_iqk_txk_setting(rtwdev, path);
1336 		lok_is_fail = _iqk_lok(rtwdev, phy_idx, path);
1337 		if (!lok_is_fail)
1338 			break;
1339 	}
1340 	if (iqk_info->is_nbiqk)
1341 		iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
1342 	else
1343 		iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
1344 
1345 	_iqk_rxclk_setting(rtwdev, path);
1346 	_iqk_rxk_setting(rtwdev, path);
1347 	if (iqk_info->is_nbiqk || rtwdev->dbcc_en || iqk_info->iqk_band[path] == RTW89_BAND_2G)
1348 		iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
1349 	else
1350 		iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
1351 
1352 	_iqk_info_iqk(rtwdev, phy_idx, path);
1353 }
1354 
1355 static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
1356 			     enum rtw89_phy_idx phy, u8 path)
1357 {
1358 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1359 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1360 	u32 reg_rf18 = 0x0, reg_35c = 0x0;
1361 	u8 idx = 0;
1362 	u8 get_empty_table = false;
1363 
1364 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1365 	for  (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
1366 		if (iqk_info->iqk_mcc_ch[idx][path] == 0) {
1367 			get_empty_table = true;
1368 			break;
1369 		}
1370 	}
1371 	if (!get_empty_table) {
1372 		idx = iqk_info->iqk_table_idx[path] + 1;
1373 		if (idx > RTW89_IQK_CHS_NR - 1)
1374 			idx = 0;
1375 	}
1376 	reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1377 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]cfg ch = %d\n", reg_rf18);
1378 	reg_35c = rtw89_phy_read32_mask(rtwdev, 0x35c, 0x00000c00);
1379 
1380 	iqk_info->iqk_band[path] = chan->band_type;
1381 	iqk_info->iqk_bw[path] = chan->band_width;
1382 	iqk_info->iqk_ch[path] = chan->channel;
1383 
1384 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1385 		    "[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path,
1386 		    iqk_info->iqk_band[path]);
1387 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_bw[%x] = 0x%x\n",
1388 		    path, iqk_info->iqk_bw[path]);
1389 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_ch[%x] = 0x%x\n",
1390 		    path, iqk_info->iqk_ch[path]);
1391 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1392 		    "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", path, phy,
1393 		    rtwdev->dbcc_en ? "on" : "off",
1394 		    iqk_info->iqk_band[path] == 0 ? "2G" :
1395 		    iqk_info->iqk_band[path] == 1 ? "5G" : "6G",
1396 		    iqk_info->iqk_ch[path],
1397 		    iqk_info->iqk_bw[path] == 0 ? "20M" :
1398 		    iqk_info->iqk_bw[path] == 1 ? "40M" : "80M");
1399 	if (reg_35c == 0x01)
1400 		iqk_info->syn1to2 = 0x1;
1401 	else
1402 		iqk_info->syn1to2 = 0x0;
1403 
1404 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852A_IQK_VER);
1405 	rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0x000f << (path * 16),
1406 			       (u8)iqk_info->iqk_band[path]);
1407 	rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0x00f0 << (path * 16),
1408 			       (u8)iqk_info->iqk_bw[path]);
1409 	rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0xff00 << (path * 16),
1410 			       (u8)iqk_info->iqk_ch[path]);
1411 
1412 	rtw89_phy_write32_mask(rtwdev, R_IQKINF2, 0x000000ff, RTW8852A_NCTL_VER);
1413 }
1414 
1415 static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
1416 			   u8 path)
1417 {
1418 	_iqk_by_path(rtwdev, phy_idx, path);
1419 }
1420 
1421 static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
1422 {
1423 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1424 
1425 	rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD,
1426 			       iqk_info->nb_txcfir[path]);
1427 	rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
1428 			       iqk_info->nb_rxcfir[path]);
1429 	rtw89_phy_write32_clr(rtwdev, R_NCTL_RPT, MASKDWORD);
1430 	rtw89_phy_write32_clr(rtwdev, R_MDPK_RX_DCK, MASKDWORD);
1431 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1432 	rtw89_phy_write32_clr(rtwdev, R_KPATH_CFG, MASKDWORD);
1433 	rtw89_phy_write32_clr(rtwdev, R_GAPK, B_GAPK_ADR);
1434 	rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0x10010000);
1435 	rtw89_phy_write32_clr(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN);
1436 	rtw89_phy_write32_mask(rtwdev, R_CFIR_MAP + (path << 8), MASKDWORD, 0xe4e4e4e4);
1437 	rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
1438 	rtw89_phy_write32_clr(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW);
1439 	rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), MASKDWORD, 0x00000002);
1440 	rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1441 	rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x0);
1442 	rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1443 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
1444 	rtw89_write_rf(rtwdev, path, RR_TXRSV, RR_TXRSV_GAPK, 0x0);
1445 	rtw89_write_rf(rtwdev, path, RR_BIAS, RR_BIAS_GAPK, 0x0);
1446 	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1447 }
1448 
1449 static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
1450 			       enum rtw89_phy_idx phy_idx, u8 path)
1451 {
1452 	const struct rtw89_rfk_tbl *tbl;
1453 
1454 	switch (_kpath(rtwdev, phy_idx)) {
1455 	case RF_A:
1456 		tbl = &rtw8852a_rfk_iqk_restore_defs_dbcc_path0_tbl;
1457 		break;
1458 	case RF_B:
1459 		tbl = &rtw8852a_rfk_iqk_restore_defs_dbcc_path1_tbl;
1460 		break;
1461 	default:
1462 		tbl = &rtw8852a_rfk_iqk_restore_defs_nondbcc_path01_tbl;
1463 		break;
1464 	}
1465 
1466 	rtw89_rfk_parser(rtwdev, tbl);
1467 }
1468 
1469 static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
1470 {
1471 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1472 	u8 idx = iqk_info->iqk_table_idx[path];
1473 
1474 	if (rtwdev->dbcc_en) {
1475 		rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
1476 				       B_COEF_SEL_IQC, path & 0x1);
1477 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1478 				       B_CFIR_LUT_G2, path & 0x1);
1479 	} else {
1480 		rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
1481 				       B_COEF_SEL_IQC, idx);
1482 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1483 				       B_CFIR_LUT_G2, idx);
1484 	}
1485 	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1486 	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
1487 	rtw89_phy_write32_clr(rtwdev, R_NCTL_RW, MASKDWORD);
1488 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
1489 	rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, MASKDWORD, 0x00200000);
1490 	rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, MASKDWORD, 0x80000000);
1491 	rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8), MASKDWORD);
1492 }
1493 
1494 static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
1495 			       enum rtw89_phy_idx phy_idx, u8 path)
1496 {
1497 	const struct rtw89_rfk_tbl *tbl;
1498 
1499 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===> %s\n", __func__);
1500 
1501 	switch (_kpath(rtwdev, phy_idx)) {
1502 	case RF_A:
1503 		tbl = &rtw8852a_rfk_iqk_set_defs_dbcc_path0_tbl;
1504 		break;
1505 	case RF_B:
1506 		tbl = &rtw8852a_rfk_iqk_set_defs_dbcc_path1_tbl;
1507 		break;
1508 	default:
1509 		tbl = &rtw8852a_rfk_iqk_set_defs_nondbcc_path01_tbl;
1510 		break;
1511 	}
1512 
1513 	rtw89_rfk_parser(rtwdev, tbl);
1514 }
1515 
1516 static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path)
1517 {
1518 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1519 	u8 phy_idx = 0x0;
1520 
1521 	iqk_info->iqk_times++;
1522 
1523 	if (path == 0x0)
1524 		phy_idx = RTW89_PHY_0;
1525 	else
1526 		phy_idx = RTW89_PHY_1;
1527 
1528 	_iqk_get_ch_info(rtwdev, phy_idx, path);
1529 	_iqk_macbb_setting(rtwdev, phy_idx, path);
1530 	_iqk_preset(rtwdev, path);
1531 	_iqk_start_iqk(rtwdev, phy_idx, path);
1532 	_iqk_restore(rtwdev, path);
1533 	_iqk_afebb_restore(rtwdev, phy_idx, path);
1534 }
1535 
1536 static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
1537 {
1538 	u32 rf_reg5, rck_val = 0;
1539 	u32 val;
1540 	int ret;
1541 
1542 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
1543 
1544 	rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
1545 
1546 	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1547 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
1548 
1549 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%x\n",
1550 		    rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
1551 
1552 	/* RCK trigger */
1553 	rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
1554 
1555 	ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 20,
1556 				       false, rtwdev, path, 0x1c, BIT(3));
1557 	if (ret)
1558 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RCK timeout\n");
1559 
1560 	rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
1561 	rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
1562 
1563 	/* RCK_ADC_OFFSET */
1564 	rtw89_write_rf(rtwdev, path, RR_RCKO, RR_RCKO_OFF, 0x4);
1565 
1566 	rtw89_write_rf(rtwdev, path, RR_RFC, RR_RFC_CKEN, 0x1);
1567 	rtw89_write_rf(rtwdev, path, RR_RFC, RR_RFC_CKEN, 0x0);
1568 
1569 	rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
1570 
1571 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1572 		    "[RCK] RF 0x1b / 0x1c / 0x1d = 0x%x / 0x%x / 0x%x\n",
1573 		    rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK),
1574 		    rtw89_read_rf(rtwdev, path, RR_RCKS, RFREG_MASK),
1575 		    rtw89_read_rf(rtwdev, path, RR_RCKO, RFREG_MASK));
1576 }
1577 
1578 static void _iqk_init(struct rtw89_dev *rtwdev)
1579 {
1580 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1581 	u8 ch, path;
1582 
1583 	rtw89_phy_write32_clr(rtwdev, R_IQKINF, MASKDWORD);
1584 	if (iqk_info->is_iqk_init)
1585 		return;
1586 
1587 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1588 	iqk_info->is_iqk_init = true;
1589 	iqk_info->is_nbiqk = false;
1590 	iqk_info->iqk_fft_en = false;
1591 	iqk_info->iqk_sram_en = false;
1592 	iqk_info->iqk_cfir_en = false;
1593 	iqk_info->iqk_xym_en = false;
1594 	iqk_info->iqk_times = 0x0;
1595 
1596 	for (ch = 0; ch < RTW89_IQK_CHS_NR; ch++) {
1597 		iqk_info->iqk_channel[ch] = 0x0;
1598 		for (path = 0; path < RTW8852A_IQK_SS; path++) {
1599 			iqk_info->lok_cor_fail[ch][path] = false;
1600 			iqk_info->lok_fin_fail[ch][path] = false;
1601 			iqk_info->iqk_tx_fail[ch][path] = false;
1602 			iqk_info->iqk_rx_fail[ch][path] = false;
1603 			iqk_info->iqk_mcc_ch[ch][path] = 0x0;
1604 			iqk_info->iqk_table_idx[path] = 0x0;
1605 		}
1606 	}
1607 }
1608 
1609 static void _doiqk(struct rtw89_dev *rtwdev, bool force,
1610 		   enum rtw89_phy_idx phy_idx, u8 path)
1611 {
1612 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1613 	u32 backup_bb_val[BACKUP_BB_REGS_NR];
1614 	u32 backup_rf_val[RTW8852A_IQK_SS][BACKUP_RF_REGS_NR];
1615 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
1616 
1617 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
1618 
1619 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1620 		    "[IQK]==========IQK start!!!!!==========\n");
1621 	iqk_info->iqk_times++;
1622 	iqk_info->version = RTW8852A_IQK_VER;
1623 
1624 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
1625 	_iqk_get_ch_info(rtwdev, phy_idx, path);
1626 	_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
1627 	_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
1628 	_iqk_macbb_setting(rtwdev, phy_idx, path);
1629 	_iqk_preset(rtwdev, path);
1630 	_iqk_start_iqk(rtwdev, phy_idx, path);
1631 	_iqk_restore(rtwdev, path);
1632 	_iqk_afebb_restore(rtwdev, phy_idx, path);
1633 	_rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
1634 	_rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
1635 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
1636 }
1637 
1638 static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
1639 {
1640 	switch (_kpath(rtwdev, phy_idx)) {
1641 	case RF_A:
1642 		_doiqk(rtwdev, force, phy_idx, RF_PATH_A);
1643 		break;
1644 	case RF_B:
1645 		_doiqk(rtwdev, force, phy_idx, RF_PATH_B);
1646 		break;
1647 	case RF_AB:
1648 		_doiqk(rtwdev, force, phy_idx, RF_PATH_A);
1649 		_doiqk(rtwdev, force, phy_idx, RF_PATH_B);
1650 		break;
1651 	default:
1652 		break;
1653 	}
1654 }
1655 
1656 #define RXDCK_VER_8852A 0xe
1657 
1658 static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1659 			enum rtw89_rf_path path, bool is_afe)
1660 {
1661 	u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path);
1662 	u32 ori_val;
1663 
1664 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1665 		    "[RX_DCK] ==== S%d RX DCK (by %s)====\n",
1666 		    path, is_afe ? "AFE" : "RFC");
1667 
1668 	ori_val = rtw89_phy_read32_mask(rtwdev, R_P0_RXCK + (path << 13), MASKDWORD);
1669 
1670 	if (is_afe) {
1671 		rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
1672 		rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON);
1673 		rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
1674 				       B_P0_RXCK_VAL, 0x3);
1675 		rtw89_phy_write32_set(rtwdev, R_S0_RXDC2 + (path << 13), B_S0_RXDC2_MEN);
1676 		rtw89_phy_write32_mask(rtwdev, R_S0_RXDC2 + (path << 13),
1677 				       B_S0_RXDC2_AVG, 0x3);
1678 		rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
1679 		rtw89_phy_write32_clr(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK);
1680 		rtw89_phy_write32_clr(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST);
1681 		rtw89_phy_write32_set(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST);
1682 		rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_CRXBB, 0x1);
1683 	}
1684 
1685 	rtw89_write_rf(rtwdev, path, RR_DCK2, RR_DCK2_CYCLE, 0x3f);
1686 	rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_SEL, is_afe);
1687 
1688 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_ONESHOT_START);
1689 
1690 	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
1691 	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
1692 
1693 	fsleep(600);
1694 
1695 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_ONESHOT_STOP);
1696 
1697 	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
1698 
1699 	if (is_afe) {
1700 		rtw89_phy_write32_clr(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
1701 		rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
1702 				       MASKDWORD, ori_val);
1703 	}
1704 }
1705 
1706 static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1707 		    bool is_afe)
1708 {
1709 	u8 path, kpath, dck_tune;
1710 	u32 rf_reg5;
1711 	u32 addr;
1712 
1713 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1714 		    "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n",
1715 		    RXDCK_VER_8852A, rtwdev->hal.cv);
1716 
1717 	kpath = _kpath(rtwdev, phy);
1718 
1719 	for (path = 0; path < 2; path++) {
1720 		if (!(kpath & BIT(path)))
1721 			continue;
1722 
1723 		rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
1724 		dck_tune = (u8)rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE);
1725 
1726 		if (rtwdev->is_tssi_mode[path]) {
1727 			addr = 0x5818 + (path << 13);
1728 			/* TSSI pause */
1729 			rtw89_phy_write32_set(rtwdev, addr, BIT(30));
1730 		}
1731 
1732 		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1733 		rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
1734 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
1735 		_set_rx_dck(rtwdev, phy, path, is_afe);
1736 		rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
1737 		rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
1738 
1739 		if (rtwdev->is_tssi_mode[path]) {
1740 			addr = 0x5818 + (path << 13);
1741 			/* TSSI resume */
1742 			rtw89_phy_write32_clr(rtwdev, addr, BIT(30));
1743 		}
1744 	}
1745 }
1746 
1747 #define RTW8852A_RF_REL_VERSION 34
1748 #define RTW8852A_DPK_VER 0x10
1749 #define RTW8852A_DPK_TH_AVG_NUM 4
1750 #define RTW8852A_DPK_RF_PATH 2
1751 #define RTW8852A_DPK_KIP_REG_NUM 2
1752 
1753 enum rtw8852a_dpk_id {
1754 	LBK_RXIQK	= 0x06,
1755 	SYNC		= 0x10,
1756 	MDPK_IDL	= 0x11,
1757 	MDPK_MPA	= 0x12,
1758 	GAIN_LOSS	= 0x13,
1759 	GAIN_CAL	= 0x14,
1760 };
1761 
1762 static void _rf_direct_cntrl(struct rtw89_dev *rtwdev,
1763 			     enum rtw89_rf_path path, bool is_bybb)
1764 {
1765 	if (is_bybb)
1766 		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1767 	else
1768 		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1769 }
1770 
1771 static void _dpk_onoff(struct rtw89_dev *rtwdev,
1772 		       enum rtw89_rf_path path, bool off);
1773 
1774 static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, u32 *reg,
1775 			  u32 reg_bkup[][RTW8852A_DPK_KIP_REG_NUM],
1776 			  u8 path)
1777 {
1778 	u8 i;
1779 
1780 	for (i = 0; i < RTW8852A_DPK_KIP_REG_NUM; i++) {
1781 		reg_bkup[path][i] = rtw89_phy_read32_mask(rtwdev,
1782 							  reg[i] + (path << 8),
1783 							  MASKDWORD);
1784 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
1785 			    reg[i] + (path << 8), reg_bkup[path][i]);
1786 	}
1787 }
1788 
1789 static void _dpk_reload_kip(struct rtw89_dev *rtwdev, u32 *reg,
1790 			    u32 reg_bkup[][RTW8852A_DPK_KIP_REG_NUM], u8 path)
1791 {
1792 	u8 i;
1793 
1794 	for (i = 0; i < RTW8852A_DPK_KIP_REG_NUM; i++) {
1795 		rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8),
1796 				       MASKDWORD, reg_bkup[path][i]);
1797 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n",
1798 			    reg[i] + (path << 8), reg_bkup[path][i]);
1799 	}
1800 }
1801 
1802 static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1803 			enum rtw89_rf_path path, enum rtw8852a_dpk_id id)
1804 {
1805 	u8 phy_map  = rtw89_btc_path_phymap(rtwdev, phy, path);
1806 	u16 dpk_cmd = 0x0;
1807 	u32 val;
1808 	int ret;
1809 
1810 	dpk_cmd = (u16)((id << 8) | (0x19 + (path << 4)));
1811 
1812 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_ONESHOT_START);
1813 
1814 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
1815 	rtw89_phy_write32_set(rtwdev, R_DPK_CTL, B_DPK_CTL_EN);
1816 
1817 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
1818 				       10, 20000, false, rtwdev, 0xbff8, MASKBYTE0);
1819 
1820 	rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
1821 
1822 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_ONESHOT_STOP);
1823 
1824 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1825 		    "[DPK] one-shot for %s = 0x%x (ret=%d)\n",
1826 		    id == 0x06 ? "LBK_RXIQK" :
1827 		    id == 0x10 ? "SYNC" :
1828 		    id == 0x11 ? "MDPK_IDL" :
1829 		    id == 0x12 ? "MDPK_MPA" :
1830 		    id == 0x13 ? "GAIN_LOSS" : "PWR_CAL",
1831 		    dpk_cmd, ret);
1832 
1833 	if (ret) {
1834 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1835 			    "[DPK] one-shot over 20ms!!!!\n");
1836 		return 1;
1837 	}
1838 
1839 	return 0;
1840 }
1841 
1842 static void _dpk_rx_dck(struct rtw89_dev *rtwdev,
1843 			enum rtw89_phy_idx phy,
1844 			enum rtw89_rf_path path)
1845 {
1846 	rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3);
1847 	_set_rx_dck(rtwdev, phy, path, false);
1848 }
1849 
1850 static void _dpk_information(struct rtw89_dev *rtwdev,
1851 			     enum rtw89_phy_idx phy,
1852 			     enum rtw89_rf_path path)
1853 {
1854 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1855 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1856 	u8 kidx = dpk->cur_idx[path];
1857 
1858 	dpk->bp[path][kidx].band = chan->band_type;
1859 	dpk->bp[path][kidx].ch = chan->channel;
1860 	dpk->bp[path][kidx].bw = chan->band_width;
1861 
1862 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1863 		    "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
1864 		    path, dpk->cur_idx[path], phy,
1865 		    rtwdev->is_tssi_mode[path] ? "on" : "off",
1866 		    rtwdev->dbcc_en ? "on" : "off",
1867 		    dpk->bp[path][kidx].band == 0 ? "2G" :
1868 		    dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
1869 		    dpk->bp[path][kidx].ch,
1870 		    dpk->bp[path][kidx].bw == 0 ? "20M" :
1871 		    dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
1872 }
1873 
1874 static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
1875 				enum rtw89_phy_idx phy,
1876 				enum rtw89_rf_path path, u8 kpath)
1877 {
1878 	switch (kpath) {
1879 	case RF_A:
1880 		rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sf_defs_a_tbl);
1881 
1882 		if (rtw89_phy_read32_mask(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL) == 0x0)
1883 			rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS);
1884 
1885 		rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sr_defs_a_tbl);
1886 		break;
1887 	case RF_B:
1888 		rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sf_defs_b_tbl);
1889 
1890 		if (rtw89_phy_read32_mask(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL) == 0x1)
1891 			rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS);
1892 
1893 		rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sr_defs_b_tbl);
1894 		break;
1895 	case RF_AB:
1896 		rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_s_defs_ab_tbl);
1897 		break;
1898 	default:
1899 		break;
1900 	}
1901 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1902 		    "[DPK] Set BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
1903 }
1904 
1905 static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev,
1906 				enum rtw89_phy_idx phy,
1907 				enum rtw89_rf_path path, u8 kpath)
1908 {
1909 	switch (kpath) {
1910 	case RF_A:
1911 		rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_a_tbl);
1912 		break;
1913 	case RF_B:
1914 		rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_b_tbl);
1915 		break;
1916 	case RF_AB:
1917 		rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_ab_tbl);
1918 		break;
1919 	default:
1920 		break;
1921 	}
1922 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1923 		    "[DPK] Restore BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
1924 }
1925 
1926 static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
1927 			    enum rtw89_rf_path path, bool is_pause)
1928 {
1929 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
1930 			       B_P0_TSSI_TRK_EN, is_pause);
1931 
1932 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
1933 		    is_pause ? "pause" : "resume");
1934 }
1935 
1936 static void _dpk_kip_setting(struct rtw89_dev *rtwdev,
1937 			     enum rtw89_rf_path path, u8 kidx)
1938 {
1939 	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
1940 	rtw89_phy_write32_mask(rtwdev, R_KIP_CLK, MASKDWORD, 0x00093f3f);
1941 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a);
1942 	rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
1943 	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG, B_DPK_CFG_IDX, 0x2);
1944 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, B_NCTL_CFG_SPAGE, path); /*subpage_id*/
1945 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8) + (kidx << 2),
1946 			       MASKDWORD, 0x003f2e2e);
1947 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
1948 			       MASKDWORD, 0x005b5b5b);
1949 
1950 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP setting for S%d[%d]!!\n",
1951 		    path, kidx);
1952 }
1953 
1954 static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
1955 			     enum rtw89_rf_path path)
1956 {
1957 	rtw89_phy_write32_clr(rtwdev, R_NCTL_RPT, MASKDWORD);
1958 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1959 	rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0x10010000);
1960 	rtw89_phy_write32_clr(rtwdev, R_KIP_CLK, MASKDWORD);
1961 
1962 	if (rtwdev->hal.cv > CHIP_CBV)
1963 		rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8), BIT(15), 0x1);
1964 
1965 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
1966 }
1967 
1968 static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
1969 			   enum rtw89_phy_idx phy,
1970 			   enum rtw89_rf_path path)
1971 {
1972 	u8 cur_rxbb;
1973 
1974 	cur_rxbb = (u8)rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
1975 
1976 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_lbk_rxiqk_defs_f_tbl);
1977 
1978 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
1979 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1);
1980 	rtw89_write_rf(rtwdev, path, RR_RXPOW, RR_RXPOW_IQK, 0x2);
1981 	rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK,
1982 		       rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK));
1983 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13);
1984 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
1985 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1);
1986 
1987 	fsleep(70);
1988 
1989 	rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTL, 0x1f);
1990 
1991 	if (cur_rxbb <= 0xa)
1992 		rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x3);
1993 	else if (cur_rxbb <= 0x10 && cur_rxbb >= 0xb)
1994 		rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x1);
1995 	else
1996 		rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x0);
1997 
1998 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
1999 
2000 	_dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
2001 
2002 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
2003 		    rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD));
2004 
2005 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0);
2006 	rtw89_write_rf(rtwdev, path, RR_RXPOW, RR_RXPOW_IQK, 0x0);
2007 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); /*POW IQKPLL*/
2008 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_DPK);
2009 
2010 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_lbk_rxiqk_defs_r_tbl);
2011 }
2012 
2013 static void _dpk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx,
2014 			     enum rtw89_rf_path path)
2015 {
2016 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2017 
2018 	dpk->bp[path][kidx].ther_dpk =
2019 		ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
2020 
2021 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n",
2022 		    dpk->bp[path][kidx].ther_dpk);
2023 }
2024 
2025 static u8 _dpk_set_tx_pwr(struct rtw89_dev *rtwdev, u8 gain,
2026 			  enum rtw89_rf_path path)
2027 {
2028 	u8 txagc_ori = 0x38;
2029 
2030 	rtw89_write_rf(rtwdev, path, RR_MODOPT, RFREG_MASK, txagc_ori);
2031 
2032 	return txagc_ori;
2033 }
2034 
2035 static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
2036 			    enum rtw89_rf_path path, u8 kidx)
2037 {
2038 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2039 
2040 	if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
2041 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DPK, 0x280b);
2042 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x0);
2043 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x4);
2044 		rtw89_write_rf(rtwdev, path, RR_MIXER, RR_MIXER_GN, 0x0);
2045 	} else {
2046 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DPK, 0x282e);
2047 		rtw89_write_rf(rtwdev, path, RR_BIASA2, RR_BIASA2_LB, 0x7);
2048 		rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW, 0x3);
2049 		rtw89_write_rf(rtwdev, path, RR_RXA, RR_RXA_DPK, 0x3);
2050 	}
2051 	rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1);
2052 	rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1);
2053 	rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0);
2054 
2055 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2056 		    "[DPK] RF 0x0/0x1/0x1a = 0x%x/ 0x%x/ 0x%x\n",
2057 		    rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK),
2058 		    rtw89_read_rf(rtwdev, path, RR_MODOPT, RFREG_MASK),
2059 		    rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK));
2060 }
2061 
2062 static void _dpk_manual_txcfir(struct rtw89_dev *rtwdev,
2063 			       enum rtw89_rf_path path, bool is_manual)
2064 {
2065 	u8 tmp_pad, tmp_txbb;
2066 
2067 	if (is_manual) {
2068 		rtw89_phy_write32_mask(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN, 0x1);
2069 		tmp_pad = (u8)rtw89_read_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_PAD);
2070 		rtw89_phy_write32_mask(rtwdev, R_RFGAIN + (path << 8),
2071 				       B_RFGAIN_PAD, tmp_pad);
2072 
2073 		tmp_txbb = (u8)rtw89_read_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_BB);
2074 		rtw89_phy_write32_mask(rtwdev, R_RFGAIN + (path << 8),
2075 				       B_RFGAIN_TXBB, tmp_txbb);
2076 
2077 		rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8),
2078 				       B_LOAD_COEF_CFIR, 0x1);
2079 		rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8),
2080 				      B_LOAD_COEF_CFIR);
2081 
2082 		rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), BIT(1), 0x1);
2083 
2084 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2085 			    "[DPK] PAD_man / TXBB_man = 0x%x / 0x%x\n", tmp_pad,
2086 			    tmp_txbb);
2087 	} else {
2088 		rtw89_phy_write32_clr(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN);
2089 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2090 			    "[DPK] disable manual switch TXCFIR\n");
2091 	}
2092 }
2093 
2094 static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev,
2095 			       enum rtw89_rf_path path, bool is_bypass)
2096 {
2097 	if (is_bypass) {
2098 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
2099 				       B_RXIQC_BYPASS2, 0x1);
2100 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
2101 				       B_RXIQC_BYPASS, 0x1);
2102 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2103 			    "[DPK] Bypass RXIQC (0x8%d3c = 0x%x)\n", 1 + path,
2104 			    rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
2105 						  MASKDWORD));
2106 	} else {
2107 		rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS2);
2108 		rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS);
2109 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2110 			    "[DPK] restore 0x8%d3c = 0x%x\n", 1 + path,
2111 			    rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
2112 						  MASKDWORD));
2113 	}
2114 }
2115 
2116 static
2117 void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
2118 {
2119 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2120 
2121 	if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
2122 		rtw89_phy_write32_clr(rtwdev, R_TPG_MOD, B_TPG_MOD_F);
2123 	else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40)
2124 		rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
2125 	else
2126 		rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
2127 
2128 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
2129 		    dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
2130 		    dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
2131 }
2132 
2133 static void _dpk_table_select(struct rtw89_dev *rtwdev,
2134 			      enum rtw89_rf_path path, u8 kidx, u8 gain)
2135 {
2136 	u8 val;
2137 
2138 	val = 0x80 + kidx * 0x20 + gain * 0x10;
2139 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val);
2140 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2141 		    "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx,
2142 		    gain, val);
2143 }
2144 
2145 static bool _dpk_sync_check(struct rtw89_dev *rtwdev,
2146 			    enum rtw89_rf_path path)
2147 {
2148 #define DPK_SYNC_TH_DC_I 200
2149 #define DPK_SYNC_TH_DC_Q 200
2150 #define DPK_SYNC_TH_CORR 170
2151 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2152 	u16 dc_i, dc_q;
2153 	u8 corr_val, corr_idx;
2154 
2155 	rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
2156 
2157 	corr_idx = (u8)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI);
2158 	corr_val = (u8)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV);
2159 
2160 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2161 		    "[DPK] S%d Corr_idx / Corr_val = %d / %d\n", path, corr_idx,
2162 		    corr_val);
2163 
2164 	dpk->corr_idx[path][0] = corr_idx;
2165 	dpk->corr_val[path][0] = corr_val;
2166 
2167 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
2168 
2169 	dc_i = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2170 	dc_q = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
2171 
2172 	dc_i = abs(sign_extend32(dc_i, 11));
2173 	dc_q = abs(sign_extend32(dc_q, 11));
2174 
2175 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n",
2176 		    path, dc_i, dc_q);
2177 
2178 	dpk->dc_i[path][0] = dc_i;
2179 	dpk->dc_q[path][0] = dc_q;
2180 
2181 	if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
2182 	    corr_val < DPK_SYNC_TH_CORR)
2183 		return true;
2184 	else
2185 		return false;
2186 }
2187 
2188 static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2189 		      enum rtw89_rf_path path, u8 kidx)
2190 {
2191 	_dpk_tpg_sel(rtwdev, path, kidx);
2192 	_dpk_one_shot(rtwdev, phy, path, SYNC);
2193 	return _dpk_sync_check(rtwdev, path); /*1= fail*/
2194 }
2195 
2196 static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
2197 {
2198 	u16 dgain = 0x0;
2199 
2200 	rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
2201 
2202 	rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR);
2203 
2204 	dgain = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2205 
2206 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x (%d)\n", dgain,
2207 		    dgain);
2208 
2209 	return dgain;
2210 }
2211 
2212 static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain)
2213 {
2214 	s8 offset;
2215 
2216 	if (dgain >= 0x783)
2217 		offset = 0x6;
2218 	else if (dgain <= 0x782 && dgain >= 0x551)
2219 		offset = 0x3;
2220 	else if (dgain <= 0x550 && dgain >= 0x3c4)
2221 		offset = 0x0;
2222 	else if (dgain <= 0x3c3 && dgain >= 0x2aa)
2223 		offset = -3;
2224 	else if (dgain <= 0x2a9 && dgain >= 0x1e3)
2225 		offset = -6;
2226 	else if (dgain <= 0x1e2 && dgain >= 0x156)
2227 		offset = -9;
2228 	else if (dgain <= 0x155)
2229 		offset = -12;
2230 	else
2231 		offset = 0x0;
2232 
2233 	return offset;
2234 }
2235 
2236 static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
2237 {
2238 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
2239 	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
2240 	return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
2241 }
2242 
2243 static void _dpk_gainloss(struct rtw89_dev *rtwdev,
2244 			  enum rtw89_phy_idx phy, enum rtw89_rf_path path,
2245 			  u8 kidx)
2246 {
2247 	_dpk_table_select(rtwdev, path, kidx, 1);
2248 	_dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
2249 }
2250 
2251 #define DPK_TXAGC_LOWER 0x2e
2252 #define DPK_TXAGC_UPPER 0x3f
2253 #define DPK_TXAGC_INVAL 0xff
2254 
2255 static u8 _dpk_set_offset(struct rtw89_dev *rtwdev,
2256 			  enum rtw89_rf_path path, s8 gain_offset)
2257 {
2258 	u8 txagc;
2259 
2260 	txagc = (u8)rtw89_read_rf(rtwdev, path, RR_MODOPT, RFREG_MASK);
2261 
2262 	if (txagc - gain_offset < DPK_TXAGC_LOWER)
2263 		txagc = DPK_TXAGC_LOWER;
2264 	else if (txagc - gain_offset > DPK_TXAGC_UPPER)
2265 		txagc = DPK_TXAGC_UPPER;
2266 	else
2267 		txagc = txagc - gain_offset;
2268 
2269 	rtw89_write_rf(rtwdev, path, RR_MODOPT, RFREG_MASK, txagc);
2270 
2271 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n",
2272 		    gain_offset, txagc);
2273 	return txagc;
2274 }
2275 
2276 enum dpk_agc_step {
2277 	DPK_AGC_STEP_SYNC_DGAIN,
2278 	DPK_AGC_STEP_GAIN_ADJ,
2279 	DPK_AGC_STEP_GAIN_LOSS_IDX,
2280 	DPK_AGC_STEP_GL_GT_CRITERION,
2281 	DPK_AGC_STEP_GL_LT_CRITERION,
2282 	DPK_AGC_STEP_SET_TX_GAIN,
2283 };
2284 
2285 static u8 _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
2286 {
2287 	u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
2288 	u8 i;
2289 
2290 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_pas_read_defs_tbl);
2291 
2292 	if (is_check) {
2293 		rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
2294 		val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2295 		val1_i = abs(sign_extend32(val1_i, 11));
2296 		val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2297 		val1_q = abs(sign_extend32(val1_q, 11));
2298 		rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
2299 		val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2300 		val2_i = abs(sign_extend32(val2_i, 11));
2301 		val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2302 		val2_q = abs(sign_extend32(val2_q, 11));
2303 
2304 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
2305 			    phy_div(val1_i * val1_i + val1_q * val1_q,
2306 				    val2_i * val2_i + val2_q * val2_q));
2307 
2308 	} else {
2309 		for (i = 0; i < 32; i++) {
2310 			rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
2311 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
2312 				    "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
2313 				    rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
2314 		}
2315 	}
2316 	if ((val1_i * val1_i + val1_q * val1_q) >=
2317 	    ((val2_i * val2_i + val2_q * val2_q) * 8 / 5))
2318 		return 1;
2319 	else
2320 		return 0;
2321 }
2322 
2323 static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2324 		   enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
2325 		   bool loss_only)
2326 {
2327 #define DPK_AGC_ADJ_LMT 6
2328 #define DPK_DGAIN_UPPER 1922
2329 #define DPK_DGAIN_LOWER 342
2330 #define DPK_RXBB_UPPER 0x1f
2331 #define DPK_RXBB_LOWER 0
2332 #define DPK_GL_CRIT 7
2333 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2334 	u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
2335 	u8 agc_cnt = 0;
2336 	bool limited_rxbb = false;
2337 	s8 offset = 0;
2338 	u16 dgain = 0;
2339 	u8 step = DPK_AGC_STEP_SYNC_DGAIN;
2340 	bool goout = false;
2341 
2342 	tmp_txagc = init_txagc;
2343 
2344 	do {
2345 		switch (step) {
2346 		case DPK_AGC_STEP_SYNC_DGAIN:
2347 			if (_dpk_sync(rtwdev, phy, path, kidx)) {
2348 				tmp_txagc = DPK_TXAGC_INVAL;
2349 				goout = true;
2350 				break;
2351 			}
2352 
2353 			dgain = _dpk_dgain_read(rtwdev);
2354 
2355 			if (loss_only || limited_rxbb)
2356 				step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2357 			else
2358 				step = DPK_AGC_STEP_GAIN_ADJ;
2359 			break;
2360 
2361 		case DPK_AGC_STEP_GAIN_ADJ:
2362 			tmp_rxbb = (u8)rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
2363 			offset = _dpk_dgain_mapping(rtwdev, dgain);
2364 
2365 			if (tmp_rxbb + offset > DPK_RXBB_UPPER) {
2366 				tmp_rxbb = DPK_RXBB_UPPER;
2367 				limited_rxbb = true;
2368 			} else if (tmp_rxbb + offset < DPK_RXBB_LOWER) {
2369 				tmp_rxbb = DPK_RXBB_LOWER;
2370 				limited_rxbb = true;
2371 			} else {
2372 				tmp_rxbb = tmp_rxbb + offset;
2373 			}
2374 
2375 			rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, tmp_rxbb);
2376 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
2377 				    "[DPK] Adjust RXBB (%d) = 0x%x\n", offset,
2378 				    tmp_rxbb);
2379 			if (offset != 0 || agc_cnt == 0) {
2380 				if (chan->band_width < RTW89_CHANNEL_WIDTH_80)
2381 					_dpk_bypass_rxcfir(rtwdev, path, true);
2382 				else
2383 					_dpk_lbk_rxiqk(rtwdev, phy, path);
2384 			}
2385 			if (dgain > DPK_DGAIN_UPPER || dgain < DPK_DGAIN_LOWER)
2386 				step = DPK_AGC_STEP_SYNC_DGAIN;
2387 			else
2388 				step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2389 
2390 			agc_cnt++;
2391 			break;
2392 
2393 		case DPK_AGC_STEP_GAIN_LOSS_IDX:
2394 			_dpk_gainloss(rtwdev, phy, path, kidx);
2395 			tmp_gl_idx = _dpk_gainloss_read(rtwdev);
2396 
2397 			if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
2398 			    tmp_gl_idx > DPK_GL_CRIT)
2399 				step = DPK_AGC_STEP_GL_GT_CRITERION;
2400 			else if (tmp_gl_idx == 0)
2401 				step = DPK_AGC_STEP_GL_LT_CRITERION;
2402 			else
2403 				step = DPK_AGC_STEP_SET_TX_GAIN;
2404 			break;
2405 
2406 		case DPK_AGC_STEP_GL_GT_CRITERION:
2407 			if (tmp_txagc == DPK_TXAGC_LOWER) {
2408 				goout = true;
2409 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
2410 					    "[DPK] Txagc@lower bound!!\n");
2411 			} else {
2412 				tmp_txagc = _dpk_set_offset(rtwdev, path, 3);
2413 			}
2414 			step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2415 			agc_cnt++;
2416 			break;
2417 
2418 		case DPK_AGC_STEP_GL_LT_CRITERION:
2419 			if (tmp_txagc == DPK_TXAGC_UPPER) {
2420 				goout = true;
2421 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
2422 					    "[DPK] Txagc@upper bound!!\n");
2423 			} else {
2424 				tmp_txagc = _dpk_set_offset(rtwdev, path, -2);
2425 			}
2426 			step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2427 			agc_cnt++;
2428 			break;
2429 
2430 		case DPK_AGC_STEP_SET_TX_GAIN:
2431 			tmp_txagc = _dpk_set_offset(rtwdev, path, tmp_gl_idx);
2432 			goout = true;
2433 			agc_cnt++;
2434 			break;
2435 
2436 		default:
2437 			goout = true;
2438 			break;
2439 		}
2440 	} while (!goout && (agc_cnt < DPK_AGC_ADJ_LMT));
2441 
2442 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2443 		    "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc,
2444 		    tmp_rxbb);
2445 
2446 	return tmp_txagc;
2447 }
2448 
2449 static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
2450 {
2451 	switch (order) {
2452 	case 0:
2453 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2454 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3);
2455 		rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1);
2456 		break;
2457 	case 1:
2458 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2459 		rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
2460 		rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
2461 		break;
2462 	case 2:
2463 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2464 		rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
2465 		rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
2466 		break;
2467 	default:
2468 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2469 			    "[DPK] Wrong MDPD order!!(0x%x)\n", order);
2470 		break;
2471 	}
2472 
2473 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2474 		    "[DPK] Set MDPD order to 0x%x for IDL\n", order);
2475 }
2476 
2477 static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2478 			 enum rtw89_rf_path path, u8 kidx, u8 gain)
2479 {
2480 	_dpk_set_mdpd_para(rtwdev, 0x0);
2481 	_dpk_table_select(rtwdev, path, kidx, 1);
2482 	_dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
2483 }
2484 
2485 static void _dpk_fill_result(struct rtw89_dev *rtwdev,
2486 			     enum rtw89_rf_path path, u8 kidx, u8 gain,
2487 			     u8 txagc)
2488 {
2489 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2490 
2491 	u16 pwsf = 0x78;
2492 	u8 gs = 0x5b;
2493 
2494 	rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_MDPD, kidx);
2495 
2496 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2497 		    "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n", txagc,
2498 		    pwsf, gs);
2499 
2500 	dpk->bp[path][kidx].txagc_dpk = txagc;
2501 	rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8),
2502 			       0x3F << ((gain << 3) + (kidx << 4)), txagc);
2503 
2504 	dpk->bp[path][kidx].pwsf = pwsf;
2505 	rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2506 			       0x1FF << (gain << 4), pwsf);
2507 
2508 	rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
2509 	rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD);
2510 
2511 	dpk->bp[path][kidx].gs = gs;
2512 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2513 			       MASKDWORD, 0x065b5b5b);
2514 
2515 	rtw89_phy_write32_clr(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD);
2516 
2517 	rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL);
2518 }
2519 
2520 static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2521 			      enum rtw89_rf_path path)
2522 {
2523 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2524 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2525 	bool is_reload = false;
2526 	u8 idx, cur_band, cur_ch;
2527 
2528 	cur_band = chan->band_type;
2529 	cur_ch = chan->channel;
2530 
2531 	for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
2532 		if (cur_band != dpk->bp[path][idx].band ||
2533 		    cur_ch != dpk->bp[path][idx].ch)
2534 			continue;
2535 
2536 		rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
2537 				       B_COEF_SEL_MDPD, idx);
2538 		dpk->cur_idx[path] = idx;
2539 		is_reload = true;
2540 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2541 			    "[DPK] reload S%d[%d] success\n", path, idx);
2542 	}
2543 
2544 	return is_reload;
2545 }
2546 
2547 static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2548 		      enum rtw89_rf_path path, u8 gain)
2549 {
2550 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2551 	u8 txagc = 0, kidx = dpk->cur_idx[path];
2552 	bool is_fail = false;
2553 
2554 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2555 		    "[DPK] ========= S%d[%d] DPK Start =========\n", path,
2556 		    kidx);
2557 
2558 	_rf_direct_cntrl(rtwdev, path, false);
2559 	txagc = _dpk_set_tx_pwr(rtwdev, gain, path);
2560 	_dpk_rf_setting(rtwdev, gain, path, kidx);
2561 	_dpk_rx_dck(rtwdev, phy, path);
2562 
2563 	_dpk_kip_setting(rtwdev, path, kidx);
2564 	_dpk_manual_txcfir(rtwdev, path, true);
2565 	txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
2566 	if (txagc == DPK_TXAGC_INVAL)
2567 		is_fail = true;
2568 	_dpk_get_thermal(rtwdev, kidx, path);
2569 
2570 	_dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
2571 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
2572 	_dpk_fill_result(rtwdev, path, kidx, gain, txagc);
2573 	_dpk_manual_txcfir(rtwdev, path, false);
2574 
2575 	if (!is_fail)
2576 		dpk->bp[path][kidx].path_ok = true;
2577 	else
2578 		dpk->bp[path][kidx].path_ok = false;
2579 
2580 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
2581 		    is_fail ? "Check" : "Success");
2582 
2583 	return is_fail;
2584 }
2585 
2586 static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
2587 			    enum rtw89_phy_idx phy, u8 kpath)
2588 {
2589 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2590 	u32 backup_bb_val[BACKUP_BB_REGS_NR];
2591 	u32 backup_rf_val[RTW8852A_DPK_RF_PATH][BACKUP_RF_REGS_NR];
2592 	u32 kip_bkup[RTW8852A_DPK_RF_PATH][RTW8852A_DPK_KIP_REG_NUM] = {{0}};
2593 	u32 kip_reg[] = {R_RXIQC, R_IQK_RES};
2594 	u8 path;
2595 	bool is_fail = true, reloaded[RTW8852A_DPK_RF_PATH] = {false};
2596 
2597 	if (dpk->is_dpk_reload_en) {
2598 		for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2599 			if (!(kpath & BIT(path)))
2600 				continue;
2601 
2602 			reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
2603 			if (!reloaded[path] && dpk->bp[path][0].ch != 0)
2604 				dpk->cur_idx[path] = !dpk->cur_idx[path];
2605 			else
2606 				_dpk_onoff(rtwdev, path, false);
2607 		}
2608 	} else {
2609 		for (path = 0; path < RTW8852A_DPK_RF_PATH; path++)
2610 			dpk->cur_idx[path] = 0;
2611 	}
2612 
2613 	if ((kpath == RF_A && reloaded[RF_PATH_A]) ||
2614 	    (kpath == RF_B && reloaded[RF_PATH_B]) ||
2615 	    (kpath == RF_AB && reloaded[RF_PATH_A] && reloaded[RF_PATH_B]))
2616 		return;
2617 
2618 	_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
2619 
2620 	for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2621 		if (!(kpath & BIT(path)) || reloaded[path])
2622 			continue;
2623 		if (rtwdev->is_tssi_mode[path])
2624 			_dpk_tssi_pause(rtwdev, path, true);
2625 		_dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
2626 		_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
2627 		_dpk_information(rtwdev, phy, path);
2628 	}
2629 
2630 	_dpk_bb_afe_setting(rtwdev, phy, path, kpath);
2631 
2632 	for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2633 		if (!(kpath & BIT(path)) || reloaded[path])
2634 			continue;
2635 
2636 		is_fail = _dpk_main(rtwdev, phy, path, 1);
2637 		_dpk_onoff(rtwdev, path, is_fail);
2638 	}
2639 
2640 	_dpk_bb_afe_restore(rtwdev, phy, path, kpath);
2641 	_rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
2642 
2643 	for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2644 		if (!(kpath & BIT(path)) || reloaded[path])
2645 			continue;
2646 
2647 		_dpk_kip_restore(rtwdev, path);
2648 		_dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path);
2649 		_rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
2650 		if (rtwdev->is_tssi_mode[path])
2651 			_dpk_tssi_pause(rtwdev, path, false);
2652 	}
2653 }
2654 
2655 static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2656 {
2657 	struct rtw89_fem_info *fem = &rtwdev->fem;
2658 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2659 
2660 	if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
2661 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2662 			    "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
2663 		return true;
2664 	} else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) {
2665 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2666 			    "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
2667 		return true;
2668 	}
2669 
2670 	return false;
2671 }
2672 
2673 static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2674 {
2675 	u8 path, kpath;
2676 
2677 	kpath = _kpath(rtwdev, phy);
2678 
2679 	for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2680 		if (kpath & BIT(path))
2681 			_dpk_onoff(rtwdev, path, true);
2682 	}
2683 }
2684 
2685 static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
2686 {
2687 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2688 		    "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
2689 		    RTW8852A_DPK_VER, rtwdev->hal.cv,
2690 		    RTW8852A_RF_REL_VERSION);
2691 
2692 	if (_dpk_bypass_check(rtwdev, phy))
2693 		_dpk_force_bypass(rtwdev, phy);
2694 	else
2695 		_dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
2696 }
2697 
2698 static void _dpk_onoff(struct rtw89_dev *rtwdev,
2699 		       enum rtw89_rf_path path, bool off)
2700 {
2701 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2702 	u8 val, kidx = dpk->cur_idx[path];
2703 
2704 	val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
2705 
2706 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2707 			       MASKBYTE3, 0x6 | val);
2708 
2709 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
2710 		    kidx, dpk->is_dpk_enable && !off ? "enable" : "disable");
2711 }
2712 
2713 static void _dpk_track(struct rtw89_dev *rtwdev)
2714 {
2715 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2716 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
2717 	u8 path, kidx;
2718 	u8 trk_idx = 0, txagc_rf = 0;
2719 	s8 txagc_bb = 0, txagc_bb_tp = 0, ini_diff = 0, txagc_ofst = 0;
2720 	u16 pwsf[2];
2721 	u8 cur_ther;
2722 	s8 delta_ther[2] = {0};
2723 
2724 	for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2725 		kidx = dpk->cur_idx[path];
2726 
2727 		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2728 			    "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
2729 			    path, kidx, dpk->bp[path][kidx].ch);
2730 
2731 		cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
2732 
2733 		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2734 			    "[DPK_TRK] thermal now = %d\n", cur_ther);
2735 
2736 		if (dpk->bp[path][kidx].ch != 0 && cur_ther != 0)
2737 			delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther;
2738 
2739 		if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
2740 			delta_ther[path] = delta_ther[path] * 3 / 2;
2741 		else
2742 			delta_ther[path] = delta_ther[path] * 5 / 2;
2743 
2744 		txagc_rf = (u8)rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB  + (path << 13),
2745 						     RR_MODOPT_M_TXPWR);
2746 
2747 		if (rtwdev->is_tssi_mode[path]) {
2748 			trk_idx = (u8)rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK);
2749 
2750 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2751 				    "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n",
2752 				    txagc_rf, trk_idx);
2753 
2754 			txagc_bb =
2755 				(s8)rtw89_phy_read32_mask(rtwdev,
2756 							  R_TXAGC_BB + (path << 13),
2757 							  MASKBYTE2);
2758 			txagc_bb_tp =
2759 				(u8)rtw89_phy_read32_mask(rtwdev,
2760 							  R_TXAGC_TP + (path << 13),
2761 							  B_TXAGC_TP);
2762 
2763 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2764 				    "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
2765 				    txagc_bb_tp, txagc_bb);
2766 
2767 			txagc_ofst =
2768 				(s8)rtw89_phy_read32_mask(rtwdev,
2769 							  R_TXAGC_BB + (path << 13),
2770 							  MASKBYTE3);
2771 
2772 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2773 				    "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n",
2774 				    txagc_ofst, delta_ther[path]);
2775 
2776 			if (rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8),
2777 						  BIT(15)) == 0x1)
2778 				txagc_ofst = 0;
2779 
2780 			if (txagc_rf != 0 && cur_ther != 0)
2781 				ini_diff = txagc_ofst + delta_ther[path];
2782 
2783 			if (rtw89_phy_read32_mask(rtwdev, R_P0_TXDPD + (path << 13),
2784 						  B_P0_TXDPD) == 0x0) {
2785 				pwsf[0] = dpk->bp[path][kidx].pwsf + txagc_bb_tp -
2786 					  txagc_bb + ini_diff +
2787 					  tssi_info->extra_ofst[path];
2788 				pwsf[1] = dpk->bp[path][kidx].pwsf + txagc_bb_tp -
2789 					  txagc_bb + ini_diff +
2790 					  tssi_info->extra_ofst[path];
2791 			} else {
2792 				pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff +
2793 					  tssi_info->extra_ofst[path];
2794 				pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff +
2795 					  tssi_info->extra_ofst[path];
2796 			}
2797 
2798 		} else {
2799 			pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2800 			pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2801 		}
2802 
2803 		if (rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS) == 0x0 &&
2804 		    txagc_rf != 0) {
2805 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2806 				    "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n",
2807 				    pwsf[0], pwsf[1]);
2808 
2809 			rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2810 					       0x000001FF, pwsf[0]);
2811 			rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2812 					       0x01FF0000, pwsf[1]);
2813 		}
2814 	}
2815 }
2816 
2817 static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2818 			     enum rtw89_rf_path path)
2819 {
2820 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2821 	enum rtw89_band band = chan->band_type;
2822 
2823 	if (band == RTW89_BAND_2G)
2824 		rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1);
2825 	else
2826 		rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
2827 }
2828 
2829 static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2830 {
2831 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2832 	enum rtw89_band band = chan->band_type;
2833 
2834 	rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_sys_defs_tbl);
2835 	rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2836 				 &rtw8852a_tssi_sys_defs_2g_tbl,
2837 				 &rtw8852a_tssi_sys_defs_5g_tbl);
2838 }
2839 
2840 static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2841 				    enum rtw89_rf_path path)
2842 {
2843 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2844 	enum rtw89_band band = chan->band_type;
2845 
2846 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2847 				 &rtw8852a_tssi_txpwr_ctrl_bb_defs_a_tbl,
2848 				 &rtw8852a_tssi_txpwr_ctrl_bb_defs_b_tbl);
2849 	rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2850 				 &rtw8852a_tssi_txpwr_ctrl_bb_defs_2g_tbl,
2851 				 &rtw8852a_tssi_txpwr_ctrl_bb_defs_5g_tbl);
2852 }
2853 
2854 static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
2855 					  enum rtw89_phy_idx phy,
2856 					  enum rtw89_rf_path path)
2857 {
2858 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2859 				 &rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl,
2860 				 &rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl);
2861 }
2862 
2863 static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2864 			  enum rtw89_rf_path path)
2865 {
2866 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2867 				 &rtw8852a_tssi_dck_defs_a_tbl,
2868 				 &rtw8852a_tssi_dck_defs_b_tbl);
2869 }
2870 
2871 static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2872 				 enum rtw89_rf_path path)
2873 {
2874 #define __get_val(ptr, idx)				\
2875 ({							\
2876 	s8 *__ptr = (ptr);				\
2877 	u8 __idx = (idx), __i, __v;			\
2878 	u32 __val = 0;					\
2879 	for (__i = 0; __i < 4; __i++) {			\
2880 		__v = (__ptr[__idx + __i]);		\
2881 		__val |= (__v << (8 * __i));		\
2882 	}						\
2883 	__val;						\
2884 })
2885 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
2886 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2887 	u8 ch = chan->channel;
2888 	u8 subband = chan->subband_type;
2889 	const s8 *thm_up_a = NULL;
2890 	const s8 *thm_down_a = NULL;
2891 	const s8 *thm_up_b = NULL;
2892 	const s8 *thm_down_b = NULL;
2893 	u8 thermal = 0xff;
2894 	s8 thm_ofst[64] = {0};
2895 	u32 tmp = 0;
2896 	u8 i, j;
2897 
2898 	switch (subband) {
2899 	default:
2900 	case RTW89_CH_2G:
2901 		thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_p;
2902 		thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_n;
2903 		thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_2gb_p;
2904 		thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_2gb_n;
2905 		break;
2906 	case RTW89_CH_5G_BAND_1:
2907 		thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[0];
2908 		thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[0];
2909 		thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[0];
2910 		thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[0];
2911 		break;
2912 	case RTW89_CH_5G_BAND_3:
2913 		thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[1];
2914 		thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[1];
2915 		thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[1];
2916 		thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[1];
2917 		break;
2918 	case RTW89_CH_5G_BAND_4:
2919 		thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[2];
2920 		thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[2];
2921 		thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[2];
2922 		thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[2];
2923 		break;
2924 	}
2925 
2926 	if (path == RF_PATH_A) {
2927 		thermal = tssi_info->thermal[RF_PATH_A];
2928 
2929 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2930 			    "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
2931 
2932 		rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
2933 		rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
2934 
2935 		if (thermal == 0xff) {
2936 			rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
2937 			rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
2938 
2939 			for (i = 0; i < 64; i += 4) {
2940 				rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
2941 
2942 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2943 					    "[TSSI] write 0x%x val=0x%08x\n",
2944 					    0x5c00 + i, 0x0);
2945 			}
2946 
2947 		} else {
2948 			rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal);
2949 			rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
2950 					       thermal);
2951 
2952 			i = 0;
2953 			for (j = 0; j < 32; j++)
2954 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2955 					      -thm_down_a[i++] :
2956 					      -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
2957 
2958 			i = 1;
2959 			for (j = 63; j >= 32; j--)
2960 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2961 					      thm_up_a[i++] :
2962 					      thm_up_a[DELTA_SWINGIDX_SIZE - 1];
2963 
2964 			for (i = 0; i < 64; i += 4) {
2965 				tmp = __get_val(thm_ofst, i);
2966 				rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
2967 
2968 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2969 					    "[TSSI] write 0x%x val=0x%08x\n",
2970 					    0x5c00 + i, tmp);
2971 			}
2972 		}
2973 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
2974 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
2975 
2976 	} else {
2977 		thermal = tssi_info->thermal[RF_PATH_B];
2978 
2979 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2980 			    "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
2981 
2982 		rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
2983 		rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
2984 
2985 		if (thermal == 0xff) {
2986 			rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
2987 			rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
2988 
2989 			for (i = 0; i < 64; i += 4) {
2990 				rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
2991 
2992 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2993 					    "[TSSI] write 0x%x val=0x%08x\n",
2994 					    0x7c00 + i, 0x0);
2995 			}
2996 
2997 		} else {
2998 			rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal);
2999 			rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
3000 					       thermal);
3001 
3002 			i = 0;
3003 			for (j = 0; j < 32; j++)
3004 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3005 					      -thm_down_b[i++] :
3006 					      -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
3007 
3008 			i = 1;
3009 			for (j = 63; j >= 32; j--)
3010 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3011 					      thm_up_b[i++] :
3012 					      thm_up_b[DELTA_SWINGIDX_SIZE - 1];
3013 
3014 			for (i = 0; i < 64; i += 4) {
3015 				tmp = __get_val(thm_ofst, i);
3016 				rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
3017 
3018 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3019 					    "[TSSI] write 0x%x val=0x%08x\n",
3020 					    0x7c00 + i, tmp);
3021 			}
3022 		}
3023 		rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
3024 		rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
3025 	}
3026 #undef __get_val
3027 }
3028 
3029 static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3030 				   enum rtw89_rf_path path)
3031 {
3032 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3033 				 &rtw8852a_tssi_dac_gain_tbl_defs_a_tbl,
3034 				 &rtw8852a_tssi_dac_gain_tbl_defs_b_tbl);
3035 }
3036 
3037 static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3038 				enum rtw89_rf_path path)
3039 {
3040 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3041 				 &rtw8852a_tssi_slope_cal_org_defs_a_tbl,
3042 				 &rtw8852a_tssi_slope_cal_org_defs_b_tbl);
3043 }
3044 
3045 static void _tssi_set_rf_gap_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3046 				 enum rtw89_rf_path path)
3047 {
3048 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3049 				 &rtw8852a_tssi_rf_gap_tbl_defs_a_tbl,
3050 				 &rtw8852a_tssi_rf_gap_tbl_defs_b_tbl);
3051 }
3052 
3053 static void _tssi_set_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3054 			    enum rtw89_rf_path path)
3055 {
3056 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3057 				 &rtw8852a_tssi_slope_defs_a_tbl,
3058 				 &rtw8852a_tssi_slope_defs_b_tbl);
3059 }
3060 
3061 static void _tssi_set_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3062 			    enum rtw89_rf_path path)
3063 {
3064 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3065 				 &rtw8852a_tssi_track_defs_a_tbl,
3066 				 &rtw8852a_tssi_track_defs_b_tbl);
3067 }
3068 
3069 static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
3070 					  enum rtw89_phy_idx phy,
3071 					  enum rtw89_rf_path path)
3072 {
3073 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3074 				 &rtw8852a_tssi_txagc_ofst_mv_avg_defs_a_tbl,
3075 				 &rtw8852a_tssi_txagc_ofst_mv_avg_defs_b_tbl);
3076 }
3077 
3078 static void _tssi_pak(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3079 		      enum rtw89_rf_path path)
3080 {
3081 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3082 	u8 subband = chan->subband_type;
3083 
3084 	switch (subband) {
3085 	default:
3086 	case RTW89_CH_2G:
3087 		rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3088 					 &rtw8852a_tssi_pak_defs_a_2g_tbl,
3089 					 &rtw8852a_tssi_pak_defs_b_2g_tbl);
3090 		break;
3091 	case RTW89_CH_5G_BAND_1:
3092 		rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3093 					 &rtw8852a_tssi_pak_defs_a_5g_1_tbl,
3094 					 &rtw8852a_tssi_pak_defs_b_5g_1_tbl);
3095 		break;
3096 	case RTW89_CH_5G_BAND_3:
3097 		rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3098 					 &rtw8852a_tssi_pak_defs_a_5g_3_tbl,
3099 					 &rtw8852a_tssi_pak_defs_b_5g_3_tbl);
3100 		break;
3101 	case RTW89_CH_5G_BAND_4:
3102 		rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3103 					 &rtw8852a_tssi_pak_defs_a_5g_4_tbl,
3104 					 &rtw8852a_tssi_pak_defs_b_5g_4_tbl);
3105 		break;
3106 	}
3107 }
3108 
3109 static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3110 {
3111 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3112 	u8 i;
3113 
3114 	for (i = 0; i < RF_PATH_NUM_8852A; i++) {
3115 		_tssi_set_track(rtwdev, phy, i);
3116 		_tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
3117 
3118 		rtw89_rfk_parser_by_cond(rtwdev, i == RF_PATH_A,
3119 					 &rtw8852a_tssi_enable_defs_a_tbl,
3120 					 &rtw8852a_tssi_enable_defs_b_tbl);
3121 
3122 		tssi_info->base_thermal[i] =
3123 			ewma_thermal_read(&rtwdev->phystat.avg_thermal[i]);
3124 		rtwdev->is_tssi_mode[i] = true;
3125 	}
3126 }
3127 
3128 static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3129 {
3130 	rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl);
3131 
3132 	rtwdev->is_tssi_mode[RF_PATH_A] = false;
3133 	rtwdev->is_tssi_mode[RF_PATH_B] = false;
3134 }
3135 
3136 static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
3137 {
3138 	switch (ch) {
3139 	case 1 ... 2:
3140 		return 0;
3141 	case 3 ... 5:
3142 		return 1;
3143 	case 6 ... 8:
3144 		return 2;
3145 	case 9 ... 11:
3146 		return 3;
3147 	case 12 ... 13:
3148 		return 4;
3149 	case 14:
3150 		return 5;
3151 	}
3152 
3153 	return 0;
3154 }
3155 
3156 #define TSSI_EXTRA_GROUP_BIT (BIT(31))
3157 #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
3158 #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
3159 #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
3160 #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
3161 
3162 static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
3163 {
3164 	switch (ch) {
3165 	case 1 ... 2:
3166 		return 0;
3167 	case 3 ... 5:
3168 		return 1;
3169 	case 6 ... 8:
3170 		return 2;
3171 	case 9 ... 11:
3172 		return 3;
3173 	case 12 ... 14:
3174 		return 4;
3175 	case 36 ... 40:
3176 		return 5;
3177 	case 41 ... 43:
3178 		return TSSI_EXTRA_GROUP(5);
3179 	case 44 ... 48:
3180 		return 6;
3181 	case 49 ... 51:
3182 		return TSSI_EXTRA_GROUP(6);
3183 	case 52 ... 56:
3184 		return 7;
3185 	case 57 ... 59:
3186 		return TSSI_EXTRA_GROUP(7);
3187 	case 60 ... 64:
3188 		return 8;
3189 	case 100 ... 104:
3190 		return 9;
3191 	case 105 ... 107:
3192 		return TSSI_EXTRA_GROUP(9);
3193 	case 108 ... 112:
3194 		return 10;
3195 	case 113 ... 115:
3196 		return TSSI_EXTRA_GROUP(10);
3197 	case 116 ... 120:
3198 		return 11;
3199 	case 121 ... 123:
3200 		return TSSI_EXTRA_GROUP(11);
3201 	case 124 ... 128:
3202 		return 12;
3203 	case 129 ... 131:
3204 		return TSSI_EXTRA_GROUP(12);
3205 	case 132 ... 136:
3206 		return 13;
3207 	case 137 ... 139:
3208 		return TSSI_EXTRA_GROUP(13);
3209 	case 140 ... 144:
3210 		return 14;
3211 	case 149 ... 153:
3212 		return 15;
3213 	case 154 ... 156:
3214 		return TSSI_EXTRA_GROUP(15);
3215 	case 157 ... 161:
3216 		return 16;
3217 	case 162 ... 164:
3218 		return TSSI_EXTRA_GROUP(16);
3219 	case 165 ... 169:
3220 		return 17;
3221 	case 170 ... 172:
3222 		return TSSI_EXTRA_GROUP(17);
3223 	case 173 ... 177:
3224 		return 18;
3225 	}
3226 
3227 	return 0;
3228 }
3229 
3230 static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
3231 {
3232 	switch (ch) {
3233 	case 1 ... 8:
3234 		return 0;
3235 	case 9 ... 14:
3236 		return 1;
3237 	case 36 ... 48:
3238 		return 2;
3239 	case 52 ... 64:
3240 		return 3;
3241 	case 100 ... 112:
3242 		return 4;
3243 	case 116 ... 128:
3244 		return 5;
3245 	case 132 ... 144:
3246 		return 6;
3247 	case 149 ... 177:
3248 		return 7;
3249 	}
3250 
3251 	return 0;
3252 }
3253 
3254 static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3255 			    enum rtw89_rf_path path)
3256 {
3257 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3258 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3259 	u8 ch = chan->channel;
3260 	u32 gidx, gidx_1st, gidx_2nd;
3261 	s8 de_1st = 0;
3262 	s8 de_2nd = 0;
3263 	s8 val;
3264 
3265 	gidx = _tssi_get_ofdm_group(rtwdev, ch);
3266 
3267 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3268 		    "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
3269 		    path, gidx);
3270 
3271 	if (IS_TSSI_EXTRA_GROUP(gidx)) {
3272 		gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
3273 		gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
3274 		de_1st = tssi_info->tssi_mcs[path][gidx_1st];
3275 		de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
3276 		val = (de_1st + de_2nd) / 2;
3277 
3278 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3279 			    "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
3280 			    path, val, de_1st, de_2nd);
3281 	} else {
3282 		val = tssi_info->tssi_mcs[path][gidx];
3283 
3284 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3285 			    "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
3286 	}
3287 
3288 	return val;
3289 }
3290 
3291 static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
3292 				 enum rtw89_phy_idx phy,
3293 				 enum rtw89_rf_path path)
3294 {
3295 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3296 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3297 	u8 ch = chan->channel;
3298 	u32 tgidx, tgidx_1st, tgidx_2nd;
3299 	s8 tde_1st = 0;
3300 	s8 tde_2nd = 0;
3301 	s8 val;
3302 
3303 	tgidx = _tssi_get_trim_group(rtwdev, ch);
3304 
3305 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3306 		    "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
3307 		    path, tgidx);
3308 
3309 	if (IS_TSSI_EXTRA_GROUP(tgidx)) {
3310 		tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
3311 		tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
3312 		tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
3313 		tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
3314 		val = (tde_1st + tde_2nd) / 2;
3315 
3316 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3317 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
3318 			    path, val, tde_1st, tde_2nd);
3319 	} else {
3320 		val = tssi_info->tssi_trim[path][tgidx];
3321 
3322 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3323 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
3324 			    path, val);
3325 	}
3326 
3327 	return val;
3328 }
3329 
3330 static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
3331 				  enum rtw89_phy_idx phy)
3332 {
3333 #define __DE_MASK 0x003ff000
3334 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3335 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3336 	static const u32 r_cck_long[RF_PATH_NUM_8852A] = {0x5858, 0x7858};
3337 	static const u32 r_cck_short[RF_PATH_NUM_8852A] = {0x5860, 0x7860};
3338 	static const u32 r_mcs_20m[RF_PATH_NUM_8852A] = {0x5838, 0x7838};
3339 	static const u32 r_mcs_40m[RF_PATH_NUM_8852A] = {0x5840, 0x7840};
3340 	static const u32 r_mcs_80m[RF_PATH_NUM_8852A] = {0x5848, 0x7848};
3341 	static const u32 r_mcs_80m_80m[RF_PATH_NUM_8852A] = {0x5850, 0x7850};
3342 	static const u32 r_mcs_5m[RF_PATH_NUM_8852A] = {0x5828, 0x7828};
3343 	static const u32 r_mcs_10m[RF_PATH_NUM_8852A] = {0x5830, 0x7830};
3344 	u8 ch = chan->channel;
3345 	u8 i, gidx;
3346 	s8 ofdm_de;
3347 	s8 trim_de;
3348 	s32 val;
3349 
3350 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
3351 		    phy, ch);
3352 
3353 	for (i = 0; i < RF_PATH_NUM_8852A; i++) {
3354 		gidx = _tssi_get_cck_group(rtwdev, ch);
3355 		trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
3356 		val = tssi_info->tssi_cck[i][gidx] + trim_de;
3357 
3358 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3359 			    "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
3360 			    i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
3361 
3362 		rtw89_phy_write32_mask(rtwdev, r_cck_long[i], __DE_MASK, val);
3363 		rtw89_phy_write32_mask(rtwdev, r_cck_short[i], __DE_MASK, val);
3364 
3365 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3366 			    "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
3367 			    r_cck_long[i],
3368 			    rtw89_phy_read32_mask(rtwdev, r_cck_long[i],
3369 						  __DE_MASK));
3370 
3371 		ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
3372 		trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
3373 		val = ofdm_de + trim_de;
3374 
3375 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3376 			    "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
3377 			    i, ofdm_de, trim_de);
3378 
3379 		rtw89_phy_write32_mask(rtwdev, r_mcs_20m[i], __DE_MASK, val);
3380 		rtw89_phy_write32_mask(rtwdev, r_mcs_40m[i], __DE_MASK, val);
3381 		rtw89_phy_write32_mask(rtwdev, r_mcs_80m[i], __DE_MASK, val);
3382 		rtw89_phy_write32_mask(rtwdev, r_mcs_80m_80m[i], __DE_MASK, val);
3383 		rtw89_phy_write32_mask(rtwdev, r_mcs_5m[i], __DE_MASK, val);
3384 		rtw89_phy_write32_mask(rtwdev, r_mcs_10m[i], __DE_MASK, val);
3385 
3386 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3387 			    "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
3388 			    r_mcs_20m[i],
3389 			    rtw89_phy_read32_mask(rtwdev, r_mcs_20m[i],
3390 						  __DE_MASK));
3391 	}
3392 #undef __DE_MASK
3393 }
3394 
3395 static void _tssi_track(struct rtw89_dev *rtwdev)
3396 {
3397 	static const u32 tx_gain_scale_table[] = {
3398 		0x400, 0x40e, 0x41d, 0x427, 0x43c, 0x44c, 0x45c, 0x46c,
3399 		0x400, 0x39d, 0x3ab, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f1
3400 	};
3401 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3402 	u8 path;
3403 	u8 cur_ther;
3404 	s32 delta_ther = 0, gain_offset_int, gain_offset_float;
3405 	s8 gain_offset;
3406 
3407 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRK] %s:\n",
3408 		    __func__);
3409 
3410 	if (!rtwdev->is_tssi_mode[RF_PATH_A])
3411 		return;
3412 	if (!rtwdev->is_tssi_mode[RF_PATH_B])
3413 		return;
3414 
3415 	for (path = RF_PATH_A; path < RF_PATH_NUM_8852A; path++) {
3416 		if (!tssi_info->tssi_tracking_check[path]) {
3417 			rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRK] return!!!\n");
3418 			continue;
3419 		}
3420 
3421 		cur_ther = (u8)rtw89_phy_read32_mask(rtwdev,
3422 						  R_TSSI_THER + (path << 13),
3423 						  B_TSSI_THER);
3424 
3425 		if (cur_ther == 0 || tssi_info->base_thermal[path] == 0)
3426 			continue;
3427 
3428 		delta_ther = cur_ther - tssi_info->base_thermal[path];
3429 
3430 		gain_offset = (s8)delta_ther * 15 / 10;
3431 
3432 		tssi_info->extra_ofst[path] = gain_offset;
3433 
3434 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3435 			    "[TSSI][TRK] base_thermal=%d gain_offset=0x%x path=%d\n",
3436 			    tssi_info->base_thermal[path], gain_offset, path);
3437 
3438 		gain_offset_int = gain_offset >> 3;
3439 		gain_offset_float = gain_offset & 7;
3440 
3441 		if (gain_offset_int > 15)
3442 			gain_offset_int = 15;
3443 		else if (gain_offset_int < -16)
3444 			gain_offset_int = -16;
3445 
3446 		rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_EN + (path << 13),
3447 				       B_DPD_OFT_EN, 0x1);
3448 
3449 		rtw89_phy_write32_mask(rtwdev, R_TXGAIN_SCALE + (path << 13),
3450 				       B_TXGAIN_SCALE_EN, 0x1);
3451 
3452 		rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_ADDR + (path << 13),
3453 				       B_DPD_OFT_ADDR, gain_offset_int);
3454 
3455 		rtw89_phy_write32_mask(rtwdev, R_TXGAIN_SCALE + (path << 13),
3456 				       B_TXGAIN_SCALE_OFT,
3457 				       tx_gain_scale_table[gain_offset_float]);
3458 	}
3459 }
3460 
3461 static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3462 {
3463 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3464 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3465 	u8 ch = chan->channel, ch_tmp;
3466 	u8 bw = chan->band_width;
3467 	u8 band = chan->band_type;
3468 	u8 subband = chan->subband_type;
3469 	s8 power;
3470 	s32 xdbm;
3471 
3472 	if (bw == RTW89_CHANNEL_WIDTH_40)
3473 		ch_tmp = ch - 2;
3474 	else if (bw == RTW89_CHANNEL_WIDTH_80)
3475 		ch_tmp = ch - 6;
3476 	else
3477 		ch_tmp = ch;
3478 
3479 	power = rtw89_phy_read_txpwr_limit(rtwdev, band, bw, RTW89_1TX,
3480 					   RTW89_RS_MCS, RTW89_NONBF, ch_tmp);
3481 
3482 	xdbm = power * 100 / 4;
3483 
3484 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d xdbm=%d\n",
3485 		    __func__, phy, xdbm);
3486 
3487 	if (xdbm > 1800 && subband == RTW89_CH_2G) {
3488 		tssi_info->tssi_tracking_check[RF_PATH_A] = true;
3489 		tssi_info->tssi_tracking_check[RF_PATH_B] = true;
3490 	} else {
3491 		rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_tracking_defs_tbl);
3492 		tssi_info->extra_ofst[RF_PATH_A] = 0;
3493 		tssi_info->extra_ofst[RF_PATH_B] = 0;
3494 		tssi_info->tssi_tracking_check[RF_PATH_A] = false;
3495 		tssi_info->tssi_tracking_check[RF_PATH_B] = false;
3496 	}
3497 }
3498 
3499 static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3500 			u8 path, s16 pwr_dbm, u8 enable)
3501 {
3502 	rtw8852a_bb_set_plcp_tx(rtwdev);
3503 	rtw8852a_bb_cfg_tx_path(rtwdev, path);
3504 	rtw8852a_bb_set_power(rtwdev, pwr_dbm, phy);
3505 	rtw8852a_bb_set_pmac_pkt_tx(rtwdev, enable, 20, 5000, 0, phy);
3506 }
3507 
3508 static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3509 {
3510 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3511 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3512 	const struct rtw89_chip_info *mac_reg = rtwdev->chip;
3513 	u8 ch = chan->channel, ch_tmp;
3514 	u8 bw = chan->band_width;
3515 	u8 band = chan->band_type;
3516 	u32 tx_en;
3517 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0);
3518 	s8 power;
3519 	s16 xdbm;
3520 	u32 i, tx_counter = 0;
3521 
3522 	if (bw == RTW89_CHANNEL_WIDTH_40)
3523 		ch_tmp = ch - 2;
3524 	else if (bw == RTW89_CHANNEL_WIDTH_80)
3525 		ch_tmp = ch - 6;
3526 	else
3527 		ch_tmp = ch;
3528 
3529 	power = rtw89_phy_read_txpwr_limit(rtwdev, band, RTW89_CHANNEL_WIDTH_20,
3530 					   RTW89_1TX, RTW89_RS_OFDM,
3531 					   RTW89_NONBF, ch_tmp);
3532 
3533 	xdbm = (power * 100) >> mac_reg->txpwr_factor_mac;
3534 
3535 	if (xdbm > 1800)
3536 		xdbm = 68;
3537 	else
3538 		xdbm = power * 2;
3539 
3540 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3541 		    "[TSSI] %s: phy=%d org_power=%d xdbm=%d\n",
3542 		    __func__, phy, power, xdbm);
3543 
3544 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
3545 	rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
3546 	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy));
3547 	tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3548 
3549 	_tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, true);
3550 	mdelay(15);
3551 	_tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, false);
3552 
3553 	tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD) -
3554 		    tx_counter;
3555 
3556 	if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, MASKHWORD) != 0xc000 &&
3557 	    rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, MASKHWORD) != 0x0) {
3558 		for (i = 0; i < 6; i++) {
3559 			tssi_info->default_txagc_offset[RF_PATH_A] =
3560 				rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB,
3561 						      MASKBYTE3);
3562 
3563 			if (tssi_info->default_txagc_offset[RF_PATH_A] != 0x0)
3564 				break;
3565 		}
3566 	}
3567 
3568 	if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, MASKHWORD) != 0xc000 &&
3569 	    rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, MASKHWORD) != 0x0) {
3570 		for (i = 0; i < 6; i++) {
3571 			tssi_info->default_txagc_offset[RF_PATH_B] =
3572 				rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1,
3573 						      MASKBYTE3);
3574 
3575 			if (tssi_info->default_txagc_offset[RF_PATH_B] != 0x0)
3576 				break;
3577 		}
3578 	}
3579 
3580 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3581 		    "[TSSI] %s: tx counter=%d\n",
3582 		    __func__, tx_counter);
3583 
3584 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3585 		    "[TSSI] Backup R_TXAGC_BB=0x%x R_TXAGC_BB_S1=0x%x\n",
3586 		    tssi_info->default_txagc_offset[RF_PATH_A],
3587 		    tssi_info->default_txagc_offset[RF_PATH_B]);
3588 
3589 	rtw8852a_bb_tx_mode_switch(rtwdev, phy, 0);
3590 
3591 	rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
3592 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
3593 }
3594 
3595 void rtw8852a_rck(struct rtw89_dev *rtwdev)
3596 {
3597 	u8 path;
3598 
3599 	for (path = 0; path < 2; path++)
3600 		_rck(rtwdev, path);
3601 }
3602 
3603 void rtw8852a_dack(struct rtw89_dev *rtwdev)
3604 {
3605 	u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
3606 
3607 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
3608 	_dac_cal(rtwdev, false);
3609 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
3610 }
3611 
3612 void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
3613 {
3614 	u32 tx_en;
3615 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3616 
3617 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
3618 	rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3619 	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3620 
3621 	_iqk_init(rtwdev);
3622 	if (rtwdev->dbcc_en)
3623 		_iqk_dbcc(rtwdev, phy_idx);
3624 	else
3625 		_iqk(rtwdev, phy_idx, false);
3626 
3627 	rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3628 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
3629 }
3630 
3631 void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
3632 		     bool is_afe)
3633 {
3634 	u32 tx_en;
3635 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3636 
3637 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
3638 	rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3639 	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3640 
3641 	_rx_dck(rtwdev, phy_idx, is_afe);
3642 
3643 	rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3644 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
3645 }
3646 
3647 void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
3648 {
3649 	u32 tx_en;
3650 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3651 
3652 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
3653 	rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3654 	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3655 
3656 	rtwdev->dpk.is_dpk_enable = true;
3657 	rtwdev->dpk.is_dpk_reload_en = false;
3658 	_dpk(rtwdev, phy_idx, false);
3659 
3660 	rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3661 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
3662 }
3663 
3664 void rtw8852a_dpk_track(struct rtw89_dev *rtwdev)
3665 {
3666 	_dpk_track(rtwdev);
3667 }
3668 
3669 void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3670 {
3671 	u8 i;
3672 
3673 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
3674 		    __func__, phy);
3675 
3676 	_tssi_disable(rtwdev, phy);
3677 
3678 	for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) {
3679 		_tssi_rf_setting(rtwdev, phy, i);
3680 		_tssi_set_sys(rtwdev, phy);
3681 		_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
3682 		_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
3683 		_tssi_set_dck(rtwdev, phy, i);
3684 		_tssi_set_tmeter_tbl(rtwdev, phy, i);
3685 		_tssi_set_dac_gain_tbl(rtwdev, phy, i);
3686 		_tssi_slope_cal_org(rtwdev, phy, i);
3687 		_tssi_set_rf_gap_tbl(rtwdev, phy, i);
3688 		_tssi_set_slope(rtwdev, phy, i);
3689 		_tssi_pak(rtwdev, phy, i);
3690 	}
3691 
3692 	_tssi_enable(rtwdev, phy);
3693 	_tssi_set_efuse_to_de(rtwdev, phy);
3694 	_tssi_high_power(rtwdev, phy);
3695 	_tssi_pre_tx(rtwdev, phy);
3696 }
3697 
3698 void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3699 {
3700 	u8 i;
3701 
3702 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
3703 		    __func__, phy);
3704 
3705 	if (!rtwdev->is_tssi_mode[RF_PATH_A])
3706 		return;
3707 	if (!rtwdev->is_tssi_mode[RF_PATH_B])
3708 		return;
3709 
3710 	_tssi_disable(rtwdev, phy);
3711 
3712 	for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) {
3713 		_tssi_rf_setting(rtwdev, phy, i);
3714 		_tssi_set_sys(rtwdev, phy);
3715 		_tssi_set_tmeter_tbl(rtwdev, phy, i);
3716 		_tssi_pak(rtwdev, phy, i);
3717 	}
3718 
3719 	_tssi_enable(rtwdev, phy);
3720 	_tssi_set_efuse_to_de(rtwdev, phy);
3721 }
3722 
3723 void rtw8852a_tssi_track(struct rtw89_dev *rtwdev)
3724 {
3725 	_tssi_track(rtwdev);
3726 }
3727 
3728 static
3729 void _rtw8852a_tssi_avg_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3730 {
3731 	if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
3732 		return;
3733 
3734 	/* disable */
3735 	rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl);
3736 
3737 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x0);
3738 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x0);
3739 
3740 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x0);
3741 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x0);
3742 
3743 	/* enable */
3744 	rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_enable_defs_ab_tbl);
3745 }
3746 
3747 static
3748 void _rtw8852a_tssi_set_avg(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3749 {
3750 	if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
3751 		return;
3752 
3753 	/* disable */
3754 	rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl);
3755 
3756 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x4);
3757 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
3758 
3759 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x4);
3760 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
3761 
3762 	/* enable */
3763 	rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_enable_defs_ab_tbl);
3764 }
3765 
3766 static void rtw8852a_tssi_set_avg(struct rtw89_dev *rtwdev,
3767 				  enum rtw89_phy_idx phy, bool enable)
3768 {
3769 	if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
3770 		return;
3771 
3772 	if (enable) {
3773 		/* SCAN_START */
3774 		_rtw8852a_tssi_avg_scan(rtwdev, phy);
3775 	} else {
3776 		/* SCAN_END */
3777 		_rtw8852a_tssi_set_avg(rtwdev, phy);
3778 	}
3779 }
3780 
3781 static void rtw8852a_tssi_default_txagc(struct rtw89_dev *rtwdev,
3782 					enum rtw89_phy_idx phy, bool enable)
3783 {
3784 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3785 	u8 i;
3786 
3787 	if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
3788 		return;
3789 
3790 	if (enable) {
3791 		if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0xc000 &&
3792 		    rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0x0) {
3793 			for (i = 0; i < 6; i++) {
3794 				tssi_info->default_txagc_offset[RF_PATH_A] =
3795 					rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB,
3796 							      B_TXAGC_BB);
3797 				if (tssi_info->default_txagc_offset[RF_PATH_A])
3798 					break;
3799 			}
3800 		}
3801 
3802 		if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0xc000 &&
3803 		    rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0x0) {
3804 			for (i = 0; i < 6; i++) {
3805 				tssi_info->default_txagc_offset[RF_PATH_B] =
3806 					rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1,
3807 							      B_TXAGC_BB_S1);
3808 				if (tssi_info->default_txagc_offset[RF_PATH_B])
3809 					break;
3810 			}
3811 		}
3812 	} else {
3813 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT,
3814 				       tssi_info->default_txagc_offset[RF_PATH_A]);
3815 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT,
3816 				       tssi_info->default_txagc_offset[RF_PATH_B]);
3817 
3818 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
3819 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
3820 
3821 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
3822 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
3823 	}
3824 }
3825 
3826 void rtw8852a_wifi_scan_notify(struct rtw89_dev *rtwdev,
3827 			       bool scan_start, enum rtw89_phy_idx phy_idx)
3828 {
3829 	if (scan_start) {
3830 		rtw8852a_tssi_default_txagc(rtwdev, phy_idx, true);
3831 		rtw8852a_tssi_set_avg(rtwdev, phy_idx, true);
3832 	} else {
3833 		rtw8852a_tssi_default_txagc(rtwdev, phy_idx, false);
3834 		rtw8852a_tssi_set_avg(rtwdev, phy_idx, false);
3835 	}
3836 }
3837