xref: /freebsd/sys/contrib/dev/rtw89/rtw8852bt_rfk.c (revision 3a56015a2f5d630910177fa79a522bb95511ccf7)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2024 Realtek Corporation
3  */
4 
5 #include "coex.h"
6 #include "debug.h"
7 #include "fw.h"
8 #include "mac.h"
9 #include "phy.h"
10 #include "reg.h"
11 #include "rtw8852bt.h"
12 #include "rtw8852bt_rfk.h"
13 #include "rtw8852bt_rfk_table.h"
14 #include "rtw8852b_common.h"
15 
16 #define RTW8852BT_RXDCK_VER 0x1
17 #define RTW8852BT_IQK_VER 0x2a
18 #define RTW8852BT_SS 2
19 #define RTW8852BT_TSSI_PATH_NR 2
20 #define RTW8852BT_DPK_VER 0x06
21 #define DPK_RF_PATH_MAX_8852BT 2
22 
23 #define _TSSI_DE_MASK GENMASK(21, 12)
24 #define DPK_TXAGC_LOWER 0x2e
25 #define DPK_TXAGC_UPPER 0x3f
26 #define DPK_TXAGC_INVAL 0xff
27 #define RFREG_MASKRXBB 0x003e0
28 #define RFREG_MASKMODE 0xf0000
29 
30 enum rf_mode {
31 	RF_SHUT_DOWN = 0x0,
32 	RF_STANDBY = 0x1,
33 	RF_TX = 0x2,
34 	RF_RX = 0x3,
35 	RF_TXIQK = 0x4,
36 	RF_DPK = 0x5,
37 	RF_RXK1 = 0x6,
38 	RF_RXK2 = 0x7,
39 };
40 
41 enum rtw8852bt_dpk_id {
42 	LBK_RXIQK	= 0x06,
43 	SYNC		= 0x10,
44 	MDPK_IDL	= 0x11,
45 	MDPK_MPA	= 0x12,
46 	GAIN_LOSS	= 0x13,
47 	GAIN_CAL	= 0x14,
48 	DPK_RXAGC	= 0x15,
49 	KIP_PRESET	= 0x16,
50 	KIP_RESTORE	= 0x17,
51 	DPK_TXAGC	= 0x19,
52 	D_KIP_PRESET	= 0x28,
53 	D_TXAGC		= 0x29,
54 	D_RXAGC		= 0x2a,
55 	D_SYNC		= 0x2b,
56 	D_GAIN_LOSS	= 0x2c,
57 	D_MDPK_IDL	= 0x2d,
58 	D_GAIN_NORM	= 0x2f,
59 	D_KIP_THERMAL	= 0x30,
60 	D_KIP_RESTORE	= 0x31
61 };
62 
63 enum dpk_agc_step {
64 	DPK_AGC_STEP_SYNC_DGAIN,
65 	DPK_AGC_STEP_GAIN_ADJ,
66 	DPK_AGC_STEP_GAIN_LOSS_IDX,
67 	DPK_AGC_STEP_GL_GT_CRITERION,
68 	DPK_AGC_STEP_GL_LT_CRITERION,
69 	DPK_AGC_STEP_SET_TX_GAIN,
70 };
71 
72 enum rtw8852bt_iqk_type {
73 	ID_TXAGC = 0x0,
74 	ID_FLOK_COARSE = 0x1,
75 	ID_FLOK_FINE = 0x2,
76 	ID_TXK = 0x3,
77 	ID_RXAGC = 0x4,
78 	ID_RXK = 0x5,
79 	ID_NBTXK = 0x6,
80 	ID_NBRXK = 0x7,
81 	ID_FLOK_VBUFFER = 0x8,
82 	ID_A_FLOK_COARSE = 0x9,
83 	ID_G_FLOK_COARSE = 0xa,
84 	ID_A_FLOK_FINE = 0xb,
85 	ID_G_FLOK_FINE = 0xc,
86 	ID_IQK_RESTORE = 0x10,
87 };
88 
89 enum adc_ck {
90 	ADC_NA = 0,
91 	ADC_480M = 1,
92 	ADC_960M = 2,
93 	ADC_1920M = 3,
94 };
95 
96 enum dac_ck {
97 	DAC_40M = 0,
98 	DAC_80M = 1,
99 	DAC_120M = 2,
100 	DAC_160M = 3,
101 	DAC_240M = 4,
102 	DAC_320M = 5,
103 	DAC_480M = 6,
104 	DAC_960M = 7,
105 };
106 
107 static const u32 _tssi_trigger[RTW8852BT_TSSI_PATH_NR] = {0x5820, 0x7820};
108 static const u32 _tssi_cw_rpt_addr[RTW8852BT_TSSI_PATH_NR] = {0x1c18, 0x3c18};
109 static const u32 _tssi_cw_default_addr[RTW8852BT_TSSI_PATH_NR][4] = {
110 	{0x5634, 0x5630, 0x5630, 0x5630},
111 	{0x7634, 0x7630, 0x7630, 0x7630} };
112 static const u32 _tssi_cw_default_mask[4] = {
113 	0x000003ff, 0x3ff00000, 0x000ffc00, 0x000003ff};
114 static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852BT] = {0x5858, 0x7858};
115 static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852BT] = {0x5860, 0x7860};
116 static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852BT] = {0x5838, 0x7838};
117 static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852BT] = {0x5840, 0x7840};
118 static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852BT] = {0x5848, 0x7848};
119 static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852BT] = {0x5850, 0x7850};
120 static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852BT] = {0x5828, 0x7828};
121 static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852BT] = {0x5830, 0x7830};
122 
123 static const u32 rtw8852bt_backup_bb_regs[] = {0x2344, 0x5800, 0x7800, 0x0704};
124 static const u32 rtw8852bt_backup_rf_regs[] = {
125 	0xde, 0xdf, 0x8b, 0x90, 0x97, 0x85, 0x5, 0x10005};
126 static const u32 rtw8852bt_backup_kip_regs[] = {
127 	0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8, 0xc0c4, 0xc0ec,
128 	0x823c, 0x8224, 0x8220, 0xc1d4, 0xc1d8, 0xc1c4, 0xc1ec};
129 
130 #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852bt_backup_bb_regs)
131 #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852bt_backup_rf_regs)
132 #define BACKUP_KIP_REGS_NR ARRAY_SIZE(rtw8852bt_backup_kip_regs)
133 
134 static void _rfk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx, enum rtw89_rf_path path)
135 {
136 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
137 
138 	rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
139 	rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x0);
140 	rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
141 
142 	udelay(200);
143 
144 	dpk->bp[path][kidx].ther_dpk = rtw89_read_rf(rtwdev, path, RR_TM, RR_TM_VAL);
145 
146 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n",
147 		    dpk->bp[path][kidx].ther_dpk);
148 }
149 
150 static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
151 {
152 	u32 i;
153 
154 	for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
155 		backup_bb_reg_val[i] =
156 		rtw89_phy_read32_mask(rtwdev, rtw8852bt_backup_bb_regs[i], MASKDWORD);
157 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
158 			    "[RFK]backup bb reg : %x, value =%x\n",
159 			    rtw8852bt_backup_bb_regs[i], backup_bb_reg_val[i]);
160 	}
161 }
162 
163 static void _rfk_backup_kip_reg(struct rtw89_dev *rtwdev, u32 backup_kip_reg_val[])
164 {
165 	u32 i;
166 
167 	for (i = 0; i < BACKUP_KIP_REGS_NR; i++) {
168 		backup_kip_reg_val[i] =
169 			rtw89_phy_read32_mask(rtwdev, rtw8852bt_backup_kip_regs[i],
170 					      MASKDWORD);
171 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
172 			    rtw8852bt_backup_kip_regs[i], backup_kip_reg_val[i]);
173 	}
174 }
175 
176 static
177 void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[], u8 rf_path)
178 {
179 	u32 i;
180 
181 	for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
182 		backup_rf_reg_val[i] =
183 			rtw89_read_rf(rtwdev, rf_path, rtw8852bt_backup_rf_regs[i],
184 				      RFREG_MASK);
185 
186 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup RF S%d 0x%x = %x\n",
187 			    rf_path, rtw8852bt_backup_rf_regs[i], backup_rf_reg_val[i]);
188 	}
189 }
190 
191 static void _rfk_reload_bb_reg(struct rtw89_dev *rtwdev, const u32 backup_bb_reg_val[])
192 {
193 	u32 i;
194 
195 	for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
196 		rtw89_phy_write32_mask(rtwdev, rtw8852bt_backup_bb_regs[i],
197 				       MASKDWORD, backup_bb_reg_val[i]);
198 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
199 			    "[RFK]restore bb reg : %x, value =%x\n",
200 			    rtw8852bt_backup_bb_regs[i], backup_bb_reg_val[i]);
201 	}
202 }
203 
204 static void _rfk_reload_kip_reg(struct rtw89_dev *rtwdev, u32 backup_kip_reg_val[])
205 {
206 	u32 i;
207 
208 	for (i = 0; i < BACKUP_KIP_REGS_NR; i++) {
209 		rtw89_phy_write32_mask(rtwdev, rtw8852bt_backup_kip_regs[i],
210 				       MASKDWORD, backup_kip_reg_val[i]);
211 
212 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
213 			    "[RFK]restore kip reg : %x, value =%x\n",
214 			    rtw8852bt_backup_kip_regs[i], backup_kip_reg_val[i]);
215 	}
216 }
217 
218 static void _rfk_reload_rf_reg(struct rtw89_dev *rtwdev,
219 			       const u32 backup_rf_reg_val[], u8 rf_path)
220 {
221 	u32 i;
222 
223 	for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
224 		rtw89_write_rf(rtwdev, rf_path, rtw8852bt_backup_rf_regs[i],
225 			       RFREG_MASK, backup_rf_reg_val[i]);
226 
227 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
228 			    "[RFK]restore rf S%d reg: %x, value =%x\n", rf_path,
229 			    rtw8852bt_backup_rf_regs[i], backup_rf_reg_val[i]);
230 	}
231 }
232 
233 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
234 {
235 	u8 val;
236 
237 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x,PHY%d\n",
238 		    rtwdev->dbcc_en, phy_idx);
239 
240 	if (!rtwdev->dbcc_en) {
241 		val = RF_AB;
242 	} else {
243 		if (phy_idx == RTW89_PHY_0)
244 			val = RF_A;
245 		else
246 			val = RF_B;
247 	}
248 	return val;
249 }
250 
251 static
252 void _txck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool force,
253 		 enum dac_ck ck)
254 {
255 	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x0);
256 
257 	if (!force)
258 		return;
259 
260 	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_VAL, ck);
261 	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x1);
262 }
263 
264 static
265 void _rxck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool force,
266 		 enum adc_ck ck)
267 {
268 	u32 bw = 0;
269 
270 	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0);
271 
272 	if (!force)
273 		return;
274 
275 	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck);
276 	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1);
277 
278 	switch (ck) {
279 	case ADC_480M:
280 		bw = RTW89_CHANNEL_WIDTH_40;
281 		break;
282 	case ADC_960M:
283 		bw = RTW89_CHANNEL_WIDTH_80;
284 		break;
285 	case ADC_1920M:
286 		bw = RTW89_CHANNEL_WIDTH_160;
287 		break;
288 	default:
289 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s==>Invalid ck", __func__);
290 		break;
291 	}
292 
293 	rtw8852bx_adc_cfg(rtwdev, bw, path);
294 }
295 
296 static void _rfk_bb_afe_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
297 				enum rtw89_rf_path path, u8 kpath)
298 {
299 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0303);
300 	rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x1);
301 	rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x1);
302 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x3);
303 	rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x3);
304 	rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x1ffffff);
305 	rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x1);
306 	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x1);
307 	rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x3ff);
308 	rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN, 0x3);
309 	rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, 0x1);
310 	rtw89_phy_write32_mask(rtwdev, R_P0_PATH_RST, B_P0_PATH_RST, 0x1);
311 	rtw89_phy_write32_mask(rtwdev, R_P1_PATH_RST, B_P1_PATH_RST, 0x1);
312 	rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1);
313 	rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x1);
314 	rtw89_phy_write32_mask(rtwdev, R_DCFO_WEIGHT, B_DAC_CLK_IDX, 0x1);
315 
316 	_txck_force(rtwdev, RF_PATH_A, true, DAC_960M);
317 	_txck_force(rtwdev, RF_PATH_B, true, DAC_960M);
318 	_rxck_force(rtwdev, RF_PATH_A, true, ADC_1920M);
319 	_rxck_force(rtwdev, RF_PATH_B, true, ADC_1920M);
320 
321 	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
322 			       B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x5);
323 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
324 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
325 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x1f);
326 	udelay(1);
327 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x13);
328 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0001);
329 	udelay(1);
330 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0041);
331 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_RSTB, 0x1);
332 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x3333);
333 
334 	rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H, B_TXPWRB_RDY, 0x1);
335 	rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_EN, MASKLWORD, 0x0000);
336 	rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_FORCE, B_P1_TXPW_RDY, 0x1);
337 	rtw89_phy_write32_mask(rtwdev, R_P1_TXAGC_TH, MASKLWORD, 0x0000);
338 }
339 
340 static void _rfk_bb_afe_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
341 				enum rtw89_rf_path path, u8 kpath)
342 {
343 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0303);
344 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
345 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
346 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x0);
347 	rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x0);
348 	rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x0);
349 	rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x0);
350 	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x0);
351 	rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x63);
352 	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_TXCK_ALL, 0x00);
353 	rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_TXCK_ALL, 0x00);
354 	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
355 			       B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x0);
356 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0000);
357 	rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x0);
358 	rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x0);
359 	rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0);
360 
361 	rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H, B_TXPWRB_RDY, 0x0);
362 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TXPW_RSTB, 0x1);
363 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TXPW_RSTB, 0x2);
364 	rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_FORCE, B_P1_TXPW_RDY, 0x0);
365 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TXPW_RSTB, 0x1);
366 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TXPW_RSTB, 0x2);
367 }
368 
369 static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
370 			enum rtw89_rf_path path)
371 {
372 	rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0);
373 	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
374 	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
375 	mdelay(1);
376 }
377 
378 static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
379 {
380 	u8 path, dck_tune;
381 	u32 rf_reg5;
382 
383 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
384 		    "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, CV : 0x%x) ******\n",
385 		    RTW8852BT_RXDCK_VER, rtwdev->hal.cv);
386 
387 	for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
388 		rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
389 		dck_tune = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE);
390 
391 		if (rtwdev->is_tssi_mode[path])
392 			rtw89_phy_write32_mask(rtwdev,
393 					       R_P0_TSSI_TRK + (path << 13),
394 					       B_P0_TSSI_TRK_EN, 0x1);
395 
396 		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
397 		rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
398 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
399 		_set_rx_dck(rtwdev, phy, path);
400 		rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
401 		rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
402 
403 		if (rtwdev->is_tssi_mode[path])
404 			rtw89_phy_write32_mask(rtwdev,
405 					       R_P0_TSSI_TRK + (path << 13),
406 					       B_P0_TSSI_TRK_EN, 0x0);
407 	}
408 }
409 
410 static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
411 {
412 	u32 rf_reg5;
413 	u32 rck_val;
414 	u32 val;
415 	int ret;
416 
417 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
418 
419 	rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
420 
421 	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
422 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
423 
424 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%05x\n",
425 		    rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
426 
427 	/* RCK trigger */
428 	rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
429 
430 	ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30,
431 				       false, rtwdev, path, RR_RCKS, BIT(3));
432 
433 	rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
434 
435 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] rck_val = 0x%x, ret = %d\n",
436 		    rck_val, ret);
437 
438 	rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
439 	rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
440 
441 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF 0x1b = 0x%x\n",
442 		    rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK));
443 }
444 
445 static void _drck(struct rtw89_dev *rtwdev)
446 {
447 	u32 rck_d;
448 	u32 val;
449 	int ret;
450 
451 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]Ddie RCK start!!!\n");
452 	rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1);
453 
454 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
455 				       1, 10000, false,
456 				       rtwdev, R_DRCK_RES, B_DRCK_POL);
457 	if (ret)
458 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n");
459 
460 	rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x0);
461 	rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x1);
462 	udelay(1);
463 	rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x0);
464 
465 	rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, 0x7c00);
466 	rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0);
467 	rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, rck_d);
468 
469 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n",
470 		    rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD));
471 }
472 
473 static void _dack_backup_s0(struct rtw89_dev *rtwdev)
474 {
475 	struct rtw89_dack_info *dack = &rtwdev->dack;
476 	u8 i;
477 
478 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
479 
480 	for (i = 0; i < 0x10; i++) {
481 		rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i);
482 		dack->msbk_d[0][0][i] =
483 			rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0);
484 
485 		rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i);
486 		dack->msbk_d[0][1][i] =
487 			rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1);
488 	}
489 
490 	dack->biask_d[0][0] =
491 		rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00);
492 	dack->biask_d[0][1] =
493 		rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01);
494 
495 	dack->dadck_d[0][0] =
496 		rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00);
497 	dack->dadck_d[0][1] =
498 		rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01);
499 }
500 
501 static void _dack_backup_s1(struct rtw89_dev *rtwdev)
502 {
503 	struct rtw89_dack_info *dack = &rtwdev->dack;
504 	u8 i;
505 
506 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
507 
508 	for (i = 0; i < 0x10; i++) {
509 		rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i);
510 		dack->msbk_d[1][0][i] =
511 			rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK10S);
512 
513 		rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i);
514 		dack->msbk_d[1][1][i] =
515 			rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK11S);
516 	}
517 
518 	dack->biask_d[1][0] =
519 		rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10, B_DACK_BIAS10);
520 	dack->biask_d[1][1] =
521 		rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11, B_DACK_BIAS11);
522 
523 	dack->dadck_d[1][0] =
524 		rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10, B_DACK_DADCK10);
525 	dack->dadck_d[1][1] =
526 		rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11, B_DACK_DADCK11);
527 }
528 
529 static
530 void _dack_reset(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
531 {
532 	if (path == RF_PATH_A) {
533 		rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x0);
534 		rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x1);
535 	} else {
536 		rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10_RST, 0x0);
537 		rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10_RST, 0x1);
538 	}
539 }
540 
541 static
542 void _dack_reload_by_path(struct rtw89_dev *rtwdev, u8 path, u8 index)
543 {
544 	struct rtw89_dack_info *dack = &rtwdev->dack;
545 	u32 tmp, tmp_offset, tmp_reg;
546 	u32 idx_offset, path_offset;
547 	u8 i;
548 
549 	if (index == 0)
550 		idx_offset = 0;
551 	else
552 		idx_offset = 0x14;
553 
554 	if (path == RF_PATH_A)
555 		path_offset = 0;
556 	else
557 		path_offset = 0x28;
558 
559 	tmp_offset = idx_offset + path_offset;
560 
561 	rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_RST, 0x1);
562 	rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_RST, 0x1);
563 	rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_RST, 0x1);
564 	rtw89_phy_write32_mask(rtwdev, R_DACK2_K, B_DACK2_RST, 0x1);
565 
566 	/* msbk_d: 15/14/13/12 */
567 	tmp = 0x0;
568 	for (i = 0; i < 4; i++)
569 		tmp |= dack->msbk_d[path][index][i + 12] << (i * 8);
570 	tmp_reg = 0xc200 + tmp_offset;
571 	rtw89_phy_write32(rtwdev, tmp_reg, tmp);
572 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
573 		    rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
574 
575 	/* msbk_d: 11/10/9/8 */
576 	tmp = 0x0;
577 	for (i = 0; i < 4; i++)
578 		tmp |= dack->msbk_d[path][index][i + 8] << (i * 8);
579 	tmp_reg = 0xc204 + tmp_offset;
580 	rtw89_phy_write32(rtwdev, tmp_reg, tmp);
581 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
582 		    rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
583 
584 	/* msbk_d: 7/6/5/4 */
585 	tmp = 0x0;
586 	for (i = 0; i < 4; i++)
587 		tmp |= dack->msbk_d[path][index][i + 4] << (i * 8);
588 	tmp_reg = 0xc208 + tmp_offset;
589 	rtw89_phy_write32(rtwdev, tmp_reg, tmp);
590 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
591 		    rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
592 
593 	/* msbk_d: 3/2/1/0 */
594 	tmp = 0x0;
595 	for (i = 0; i < 4; i++)
596 		tmp |= dack->msbk_d[path][index][i] << (i * 8);
597 	tmp_reg = 0xc20c + tmp_offset;
598 	rtw89_phy_write32(rtwdev, tmp_reg, tmp);
599 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
600 		    rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
601 
602 	/* dadak_d/biask_d */
603 	tmp = (dack->biask_d[path][index] << 22) |
604 	      (dack->dadck_d[path][index] << 14);
605 	tmp_reg = 0xc210 + tmp_offset;
606 	rtw89_phy_write32(rtwdev, tmp_reg, tmp);
607 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
608 		    rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
609 
610 	/* enable DACK result from reg */
611 	rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL + tmp_offset, B_DACKN0_EN, 0x1);
612 }
613 
614 static
615 void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
616 {
617 	u8 i;
618 
619 	for (i = 0; i < 2; i++)
620 		_dack_reload_by_path(rtwdev, path, i);
621 }
622 
623 static bool _dack_s0_poll(struct rtw89_dev *rtwdev)
624 {
625 	if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
626 	    rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 ||
627 	    rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
628 	    rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
629 		return false;
630 
631 	return true;
632 }
633 
634 static void _dack_s0(struct rtw89_dev *rtwdev)
635 {
636 	struct rtw89_dack_info *dack = &rtwdev->dack;
637 	bool done;
638 	int ret;
639 
640 	_txck_force(rtwdev, RF_PATH_A, true, DAC_160M);
641 
642 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
643 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, BIT(28), 0x1);
644 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN1, 0x0);
645 	udelay(100);
646 	rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_VAL, 0x30);
647 	rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_VAL, 0x30);
648 
649 	_dack_reset(rtwdev, RF_PATH_A);
650 
651 	rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1);
652 	udelay(1);
653 
654 	dack->msbk_timeout[0] = false;
655 
656 	ret = read_poll_timeout_atomic(_dack_s0_poll, done, done,
657 				       1, 20000, false, rtwdev);
658 	if (ret) {
659 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n");
660 		dack->msbk_timeout[0] = true;
661 	}
662 
663 	rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x0);
664 
665 	_txck_force(rtwdev, RF_PATH_A, false, DAC_960M);
666 	_dack_backup_s0(rtwdev);
667 	_dack_reload(rtwdev, RF_PATH_A);
668 
669 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
670 }
671 
672 static bool _dack_s1_poll(struct rtw89_dev *rtwdev)
673 {
674 	if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 ||
675 	    rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0 ||
676 	    rtw89_phy_read32_mask(rtwdev, R_DACK_S1P2, B_DACK_S1P2_OK) == 0 ||
677 	    rtw89_phy_read32_mask(rtwdev, R_DACK_S1P3, B_DACK_S1P3_OK) == 0)
678 		return false;
679 
680 	return true;
681 }
682 
683 static void _dack_s1(struct rtw89_dev *rtwdev)
684 {
685 	struct rtw89_dack_info *dack = &rtwdev->dack;
686 	bool done;
687 	int ret;
688 
689 	_txck_force(rtwdev, RF_PATH_B, true, DAC_160M);
690 
691 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
692 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, BIT(28), 0x1);
693 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN1, 0x0);
694 	udelay(100);
695 	rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_VAL, 0x30);
696 	rtw89_phy_write32_mask(rtwdev, R_DACK2_K, B_DACK2_VAL, 0x30);
697 
698 	_dack_reset(rtwdev, RF_PATH_B);
699 
700 	rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x1);
701 	udelay(1);
702 
703 	dack->msbk_timeout[1] = false;
704 
705 	ret = read_poll_timeout_atomic(_dack_s1_poll, done, done,
706 				       1, 10000, false, rtwdev);
707 	if (ret) {
708 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DACK timeout\n");
709 		dack->msbk_timeout[1] = true;
710 	}
711 
712 	rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x0);
713 
714 	_txck_force(rtwdev, RF_PATH_B, false, DAC_960M);
715 	_dack_backup_s1(rtwdev);
716 	_dack_reload(rtwdev, RF_PATH_B);
717 
718 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
719 }
720 
721 static void _dack(struct rtw89_dev *rtwdev)
722 {
723 	_dack_s0(rtwdev);
724 	_dack_s1(rtwdev);
725 }
726 
727 static void _dack_dump(struct rtw89_dev *rtwdev)
728 {
729 	struct rtw89_dack_info *dack = &rtwdev->dack;
730 	u8 i;
731 	u8 t;
732 
733 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
734 		    "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
735 		    dack->addck_d[0][0], dack->addck_d[0][1]);
736 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
737 		    "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
738 		    dack->addck_d[1][0], dack->addck_d[1][1]);
739 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
740 		    "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
741 		    dack->dadck_d[0][0], dack->dadck_d[0][1]);
742 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
743 		    "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
744 		    dack->dadck_d[1][0], dack->dadck_d[1][1]);
745 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
746 		    "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
747 		    dack->biask_d[0][0], dack->biask_d[0][1]);
748 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
749 		    "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
750 		    dack->biask_d[1][0], dack->biask_d[1][1]);
751 
752 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
753 	for (i = 0; i < 0x10; i++) {
754 		t = dack->msbk_d[0][0][i];
755 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
756 	}
757 
758 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
759 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
760 		t = dack->msbk_d[0][1][i];
761 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
762 	}
763 
764 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
765 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
766 		t = dack->msbk_d[1][0][i];
767 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
768 	}
769 
770 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
771 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
772 		t = dack->msbk_d[1][1][i];
773 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
774 	}
775 }
776 
777 static void _addck_ori(struct rtw89_dev *rtwdev)
778 {
779 	struct rtw89_dack_info *dack = &rtwdev->dack;
780 	u32 val;
781 	int ret;
782 
783 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x0);
784 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_MAN, 0x0);
785 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
786 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
787 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
788 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
789 
790 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
791 	udelay(100);
792 
793 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
794 	rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(4), 0x1);
795 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
796 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x1);
797 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x0);
798 	udelay(1);
799 
800 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1);
801 	dack->addck_timeout[0] = false;
802 
803 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
804 				       1, 10000, false,
805 				       rtwdev, R_ADDCKR0, BIT(0));
806 	if (ret) {
807 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
808 		dack->addck_timeout[0] = true;
809 	}
810 
811 	rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(4), 0x0);
812 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
813 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
814 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
815 
816 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0);
817 	dack->addck_d[0][0] =
818 		rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0);
819 	dack->addck_d[0][1] =
820 		rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1);
821 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
822 
823 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
824 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
825 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
826 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
827 
828 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
829 	udelay(100);
830 
831 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
832 	rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(4), 0x1);
833 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
834 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x1);
835 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x0);
836 	udelay(1);
837 
838 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1);
839 	dack->addck_timeout[1] = false;
840 
841 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
842 				       1, 10000, false,
843 				       rtwdev, R_ADDCKR1, BIT(0));
844 	if (ret) {
845 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
846 		dack->addck_timeout[1] = true;
847 	}
848 
849 	rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(4), 0x0);
850 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
851 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
852 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
853 
854 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0);
855 	dack->addck_d[1][0] =
856 		rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A0);
857 	dack->addck_d[1][1] =
858 		rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A1);
859 
860 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
861 }
862 
863 static void _addck_reload(struct rtw89_dev *rtwdev)
864 {
865 	struct rtw89_dack_info *dack = &rtwdev->dack;
866 
867 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1, dack->addck_d[0][0]);
868 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0, dack->addck_d[0][1]);
869 
870 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3);
871 
872 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL1, dack->addck_d[1][0]);
873 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL0, dack->addck_d[1][1]);
874 
875 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x3);
876 }
877 
878 static void _dack_manual_off(struct rtw89_dev *rtwdev)
879 {
880 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x0);
881 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x0);
882 
883 	rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL, B_DACKN0_EN, 0x0);
884 	rtw89_phy_write32_mask(rtwdev, R_DACKN1_CTL, B_DACKN1_ON, 0x0);
885 	rtw89_phy_write32_mask(rtwdev, R_DACKN2_CTL, B_DACKN2_ON, 0x0);
886 	rtw89_phy_write32_mask(rtwdev, R_DACKN3_CTL, B_DACKN3_ON, 0x0);
887 }
888 
889 static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
890 {
891 	struct rtw89_dack_info *dack = &rtwdev->dack;
892 
893 	dack->dack_done = false;
894 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
895 
896 	_drck(rtwdev);
897 	_dack_manual_off(rtwdev);
898 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x0);
899 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RFREG_MASK, 0x0);
900 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1);
901 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1);
902 	_rxck_force(rtwdev, RF_PATH_A, true, ADC_960M);
903 	_rxck_force(rtwdev, RF_PATH_B, true, ADC_960M);
904 	_addck_ori(rtwdev);
905 
906 	_rxck_force(rtwdev, RF_PATH_A, false, ADC_960M);
907 	_rxck_force(rtwdev, RF_PATH_B, false, ADC_960M);
908 	_addck_reload(rtwdev);
909 
910 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
911 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
912 
913 	_dack(rtwdev);
914 	_dack_dump(rtwdev);
915 	dack->dack_done = true;
916 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x1);
917 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RFREG_MASK, 0x1);
918 
919 	dack->dack_cnt++;
920 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
921 }
922 
923 static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
924 {
925 	bool notready = false;
926 	u32 val;
927 	int ret;
928 
929 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
930 				       10, 8200, false,
931 				       rtwdev, R_RFK_ST, MASKBYTE0);
932 	if (ret)
933 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL1 IQK timeout!!!\n");
934 
935 	udelay(10);
936 
937 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
938 				       10, 400, false,
939 				       rtwdev, R_RPT_COM, B_RPT_COM_RDY);
940 	if (ret) {
941 		notready = true;
942 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL2 IQK timeout!!!\n");
943 	}
944 
945 	udelay(10);
946 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
947 
948 	return notready;
949 }
950 
951 static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
952 			  u8 path, u8 ktype)
953 {
954 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
955 	u32 iqk_cmd;
956 	bool fail;
957 
958 	switch (ktype) {
959 	case ID_TXAGC:
960 		iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1);
961 		break;
962 	case ID_FLOK_COARSE:
963 		iqk_cmd = 0x108 | (1 << (4 + path));
964 		break;
965 	case ID_FLOK_FINE:
966 		iqk_cmd = 0x208 | (1 << (4 + path));
967 		break;
968 	case ID_FLOK_VBUFFER:
969 		iqk_cmd = 0x308 | (1 << (4 + path));
970 		break;
971 	case ID_TXK:
972 		iqk_cmd = 0x008 | (1 << (path + 4)) |
973 			  (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8);
974 		break;
975 	case ID_RXAGC:
976 		iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
977 		break;
978 	case ID_RXK:
979 		iqk_cmd = 0x008 | (1 << (path + 4)) |
980 			  (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8);
981 		break;
982 	case ID_NBTXK:
983 		iqk_cmd = 0x408 | (1 << (4 + path));
984 		break;
985 	case ID_NBRXK:
986 		iqk_cmd = 0x608 | (1 << (4 + path));
987 		break;
988 	default:
989 		return false;
990 	}
991 
992 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s, iqk_cmd = %x\n",
993 		    __func__, iqk_cmd + 1);
994 
995 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
996 	fail = _iqk_check_cal(rtwdev, path, ktype);
997 
998 	return fail;
999 }
1000 
1001 static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
1002 {
1003 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1004 
1005 	switch (iqk_info->iqk_band[path]) {
1006 	case RTW89_BAND_2G:
1007 		rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
1008 		rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0);
1009 		rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
1010 		rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1011 		rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1012 		rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x00);
1013 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
1014 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1015 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x5);
1016 		udelay(1);
1017 		break;
1018 	case RTW89_BAND_5G:
1019 		rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x1);
1020 		rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1021 		rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1022 		rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x80);
1023 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
1024 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1025 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x4);
1026 		udelay(1);
1027 		break;
1028 	default:
1029 		break;
1030 	}
1031 }
1032 
1033 static bool _iqk_2g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1034 {
1035 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1036 
1037 	rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1038 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1039 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09);
1040 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
1041 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000119 + (path << 4));
1042 
1043 	_iqk_check_cal(rtwdev, path, ID_FLOK_COARSE);
1044 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1045 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1046 
1047 	rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1048 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1049 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24);
1050 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4));
1051 
1052 	_iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER);
1053 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1054 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1055 
1056 	rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1057 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1058 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09);
1059 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000219 + (path << 4));
1060 
1061 	_iqk_check_cal(rtwdev, path, ID_FLOK_COARSE);
1062 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1063 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1064 
1065 	rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1066 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1067 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24);
1068 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4));
1069 
1070 	_iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER);
1071 
1072 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1073 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1074 
1075 	return false;
1076 }
1077 
1078 static bool _iqk_5g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1079 {
1080 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1081 
1082 	rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1083 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1084 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09);
1085 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
1086 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000119 + (path << 4));
1087 
1088 	_iqk_check_cal(rtwdev, path, ID_FLOK_COARSE);
1089 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1090 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1091 
1092 	rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1093 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1094 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24);
1095 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4));
1096 
1097 	_iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER);
1098 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1099 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1100 
1101 	rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1102 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1103 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09);
1104 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000219 + (path << 4));
1105 
1106 	_iqk_check_cal(rtwdev, path, ID_FLOK_COARSE);
1107 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1108 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1109 
1110 	rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1111 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1112 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24);
1113 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4));
1114 
1115 	_iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER);
1116 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1117 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1118 
1119 	return false;
1120 }
1121 
1122 static bool _iqk_2g_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1123 {
1124 	static const u32 g_power_range[4] = {0x0, 0x0, 0x0, 0x0};
1125 	static const u32 g_track_range[4] = {0x4, 0x4, 0x6, 0x6};
1126 	static const u32 g_gain_bb[4] = {0x08, 0x0e, 0x08, 0x0e};
1127 	static const u32 g_itqt[4] = {0x09, 0x12, 0x1b, 0x24};
1128 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1129 	bool notready = false;
1130 	bool kfail = false;
1131 	u8 gp;
1132 
1133 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1134 
1135 	for (gp = 0x0; gp < 0x4; gp++) {
1136 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
1137 			       g_power_range[gp]);
1138 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
1139 			       g_track_range[gp]);
1140 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
1141 			       g_gain_bb[gp]);
1142 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1143 				       0x00000100, 0x1);
1144 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1145 				       0x00000010, 0x1);
1146 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1147 				       0x00000004, 0x0);
1148 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1149 				       0x00000003, gp);
1150 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT,
1151 				       0x009);
1152 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1153 				       B_KIP_IQP_IQSW, g_itqt[gp]);
1154 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
1155 		iqk_info->nb_txcfir[path] =
1156 			rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1157 
1158 		if (iqk_info->is_nbiqk)
1159 			break;
1160 
1161 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1162 				       B_KIP_IQP_IQSW, g_itqt[gp]);
1163 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
1164 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1165 
1166 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1167 			    "[IQK]S%x, gp = 0x%x, 0x8%x38 = 0x%x\n",
1168 			    path, gp, 1 << path, iqk_info->nb_txcfir[path]);
1169 	}
1170 
1171 	if (!notready)
1172 		kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
1173 
1174 	if (kfail) {
1175 		iqk_info->nb_txcfir[path] = 0x40000002;
1176 		rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1177 				       B_IQK_RES_TXCFIR, 0x0);
1178 	}
1179 
1180 	return kfail;
1181 }
1182 
1183 static bool _iqk_5g_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1184 {
1185 	static const u32 a_power_range[4] = {0x0, 0x0, 0x0, 0x0};
1186 	static const u32 a_track_range[4] = {0x3, 0x3, 0x6, 0x6};
1187 	static const u32 a_gain_bb[4] = {0x08, 0x10, 0x08, 0x0e};
1188 	static const u32 a_itqt[4] = {0x09, 0x12, 0x1b, 0x24};
1189 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1190 	bool notready = false;
1191 	bool kfail = false;
1192 	u8 gp;
1193 
1194 	for (gp = 0x0; gp < 0x4; gp++) {
1195 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]);
1196 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]);
1197 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]);
1198 
1199 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1200 				       MASKDWORD, a_itqt[gp]);
1201 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1202 				       0x00000100, 0x1);
1203 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1204 				       0x00000010, 0x1);
1205 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1206 				       0x00000004, 0x0);
1207 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1208 				       0x00000003, gp);
1209 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT,
1210 				       0x009);
1211 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1212 				       B_KIP_IQP_IQSW, a_itqt[gp]);
1213 
1214 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
1215 		iqk_info->nb_txcfir[path] =
1216 			rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1217 
1218 		if (iqk_info->is_nbiqk)
1219 			break;
1220 
1221 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1222 				       B_KIP_IQP_IQSW, a_itqt[gp]);
1223 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
1224 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1225 
1226 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1227 			    "[IQK]S%x, gp = 0x%x, 0x8%x38 = 0x%x\n",
1228 			    path, gp, 1 << path, iqk_info->nb_txcfir[path]);
1229 	}
1230 
1231 	if (!notready)
1232 		kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
1233 
1234 	if (kfail) {
1235 		iqk_info->nb_txcfir[path] = 0x40000002;
1236 		rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1237 				       B_IQK_RES_TXCFIR, 0x0);
1238 	}
1239 
1240 	return kfail;
1241 }
1242 
1243 static void _iqk_adc_fifo_rst(struct rtw89_dev *rtwdev,
1244 			      enum rtw89_phy_idx phy_idx, u8 path)
1245 {
1246 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303);
1247 	udelay(10);
1248 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333);
1249 }
1250 
1251 static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
1252 {
1253 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1254 
1255 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1256 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303);
1257 
1258 	if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) {
1259 		_rxck_force(rtwdev, RF_PATH_A, true, ADC_960M);
1260 		_rxck_force(rtwdev, RF_PATH_B, true, ADC_960M);
1261 		udelay(1);
1262 
1263 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
1264 				       B_UPD_CLK_ADC_ON, 0x1);
1265 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
1266 				       B_UPD_CLK_ADC_VAL, 0x1);
1267 		rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1,
1268 				       B_PATH0_SAMPL_DLY_T_MSK_V1, 0x2);
1269 		rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1,
1270 				       B_PATH1_SAMPL_DLY_T_MSK_V1, 0x2);
1271 		rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_BW1, 0x8);
1272 		rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,
1273 				       B_PATH1_BW_SEL_MSK_V1, 0x8);
1274 	} else {
1275 		_rxck_force(rtwdev, RF_PATH_A, true, ADC_480M);
1276 		_rxck_force(rtwdev, RF_PATH_B, true, ADC_480M);
1277 		udelay(1);
1278 
1279 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
1280 				       B_UPD_CLK_ADC_ON, 0x1);
1281 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
1282 				       B_UPD_CLK_ADC_VAL, 0x0);
1283 		rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1,
1284 				       B_PATH0_SAMPL_DLY_T_MSK_V1, 0x3);
1285 		rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1,
1286 				       B_PATH1_SAMPL_DLY_T_MSK_V1, 0x3);
1287 		rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_BW1, 0xf);
1288 		rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,
1289 				       B_PATH1_BW_SEL_MSK_V1, 0xf);
1290 	}
1291 
1292 	rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, 0x00000780, 0x8);
1293 	rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x00000780, 0x8);
1294 	rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, 0x00007800, 0x2);
1295 	rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x00007800, 0x2);
1296 	rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_MUL, 0x0);
1297 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
1298 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
1299 	udelay(1);
1300 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f);
1301 	udelay(1);
1302 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03);
1303 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001);
1304 	udelay(1);
1305 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
1306 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333);
1307 }
1308 
1309 static bool _iqk_2g_rx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1310 {
1311 	static const u32 g_idxrxgain[2] = {0x212, 0x310};
1312 	static const u32 g_idxattc2[2] = {0x00, 0x20};
1313 	static const u32 g_idxattc1[2] = {0x3, 0x2};
1314 	static const u32 g_idxrxagc[2] = {0x0, 0x2};
1315 	static const u32 g_idx[2] = {0x0, 0x2};
1316 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1317 	bool notready = false;
1318 	bool kfail = false;
1319 	u32 rf_18, tmp;
1320 	u8 gp;
1321 
1322 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1323 
1324 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
1325 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1);
1326 	rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1327 	rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18);
1328 
1329 	for (gp = 0x0; gp < 0x2; gp++) {
1330 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, g_idxrxgain[gp]);
1331 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, g_idxattc2[gp]);
1332 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, g_idxattc1[gp]);
1333 
1334 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1335 				       0x00000100, 0x1);
1336 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1337 				       0x00000010, 0x0);
1338 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1339 				       0x00000007, g_idx[gp]);
1340 		rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
1341 		udelay(100);
1342 		udelay(100);
1343 
1344 		tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
1345 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, tmp);
1346 		rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, g_idxrxagc[gp]);
1347 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1348 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
1349 
1350 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
1351 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1352 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, rf rxbb  = %x\n", path,
1353 			    rtw89_read_rf(rtwdev, path, RR_MOD, 0x003c0));
1354 
1355 		rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
1356 		udelay(100);
1357 		udelay(100);
1358 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1359 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
1360 
1361 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
1362 		iqk_info->nb_rxcfir[path] =
1363 			rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
1364 					      MASKDWORD) | 0x2;
1365 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1366 			    "[IQK]S%x, gp = 0x%x, 0x8%x3c = 0x%x\n", path,
1367 			    g_idx[gp], 1 << path, iqk_info->nb_rxcfir[path]);
1368 
1369 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1370 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1371 
1372 		if (iqk_info->is_nbiqk)
1373 			break;
1374 
1375 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1376 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
1377 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1,  B_NCTL_N1_CIP, 0x00);
1378 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1379 	}
1380 
1381 	if (!notready)
1382 		kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
1383 
1384 	if (kfail) {
1385 		iqk_info->nb_txcfir[path] = 0x40000002;
1386 		rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1387 				       B_IQK_RES_RXCFIR, 0x0);
1388 	}
1389 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
1390 
1391 	return kfail;
1392 }
1393 
1394 static bool _iqk_5g_rx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1395 {
1396 	static const u32 a_idxrxgain[2] = {0x110, 0x290};
1397 	static const u32 a_idxattc2[2] = {0x0f, 0x0f};
1398 	static const u32 a_idxattc1[2] = {0x2, 0x2};
1399 	static const u32 a_idxrxagc[2] = {0x4, 0x6};
1400 	static const u32 a_idx[2] = {0x0, 0x2};
1401 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1402 	bool notready = false;
1403 	bool kfail = false;
1404 	u32 rf_18, tmp;
1405 	u8 gp;
1406 
1407 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1408 
1409 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
1410 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1);
1411 	rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1412 	rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18);
1413 
1414 	for (gp = 0x0; gp < 0x2; gp++) {
1415 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, a_idxrxgain[gp]);
1416 		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT, a_idxattc2[gp]);
1417 		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2, a_idxattc1[gp]);
1418 
1419 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1420 				       0x00000100, 0x1);
1421 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1422 				       0x00000010, 0x0);
1423 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1424 				       0x00000007, a_idx[gp]);
1425 		rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
1426 		udelay(100);
1427 		udelay(100);
1428 
1429 		tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
1430 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, tmp);
1431 		rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]);
1432 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1433 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
1434 
1435 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
1436 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1437 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, rf rxbb  = %x\n", path,
1438 			    rtw89_read_rf(rtwdev, path, RR_MOD, 0x003c0));
1439 
1440 		rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
1441 		udelay(200);
1442 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1443 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
1444 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
1445 		iqk_info->nb_rxcfir[path] =
1446 			rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
1447 					      MASKDWORD) | 0x2;
1448 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1449 			    "[IQK]S%x, gp = 0x%x, 0x8%x3c = 0x%x\n",
1450 			    path, a_idx[gp], 1 << path, iqk_info->nb_rxcfir[path]);
1451 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1452 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1453 
1454 		if (iqk_info->is_nbiqk)
1455 			break;
1456 
1457 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1458 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
1459 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1460 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1461 	}
1462 
1463 	if (!notready)
1464 		kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
1465 
1466 	if (kfail) {
1467 		iqk_info->nb_txcfir[path] = 0x40000002;
1468 		rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1469 				       B_IQK_RES_RXCFIR, 0x0);
1470 	}
1471 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
1472 
1473 	return kfail;
1474 }
1475 
1476 static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1477 {
1478 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1479 	bool lok_result = false;
1480 	bool txk_result = false;
1481 	bool rxk_result = false;
1482 	u8 i;
1483 
1484 	for (i = 0; i < 3; i++) {
1485 		_iqk_txk_setting(rtwdev, path);
1486 		if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1487 			lok_result = _iqk_2g_lok(rtwdev, phy_idx, path);
1488 		else
1489 			lok_result = _iqk_5g_lok(rtwdev, phy_idx, path);
1490 
1491 		if (!lok_result)
1492 			break;
1493 	}
1494 
1495 	if (lok_result) {
1496 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1497 			    "[IQK]!!!!!!!!!!LOK by Pass !!!!!!!!!!!\n");
1498 		rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200);
1499 		rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200);
1500 		rtw89_write_rf(rtwdev, path, RR_LOKVB, RFREG_MASK, 0x80200);
1501 	}
1502 
1503 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x08[00:19] = 0x%x\n",
1504 		    rtw89_read_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK));
1505 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x09[00:19] = 0x%x\n",
1506 		    rtw89_read_rf(rtwdev, path, RR_RSV2, RFREG_MASK));
1507 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x0a[00:19] = 0x%x\n",
1508 		    rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK));
1509 
1510 	if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1511 		txk_result = _iqk_2g_tx(rtwdev, phy_idx, path);
1512 	else
1513 		txk_result = _iqk_5g_tx(rtwdev, phy_idx, path);
1514 
1515 	_iqk_rxclk_setting(rtwdev, path);
1516 	_iqk_adc_fifo_rst(rtwdev, phy_idx, path);
1517 
1518 	if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1519 		rxk_result = _iqk_2g_rx(rtwdev, phy_idx, path);
1520 	else
1521 		rxk_result = _iqk_5g_rx(rtwdev, phy_idx, path);
1522 
1523 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1524 		    "[IQK]result  : lok_= %x, txk_= %x, rxk_= %x\n",
1525 		    lok_result, txk_result, rxk_result);
1526 }
1527 
1528 static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path)
1529 {
1530 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1531 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1532 	u8 get_empty_table = false;
1533 	u32 reg_rf18;
1534 	u32 reg_35c;
1535 	u8 idx;
1536 
1537 	for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
1538 		if (iqk_info->iqk_mcc_ch[idx][path] == 0) {
1539 			get_empty_table = true;
1540 			break;
1541 		}
1542 	}
1543 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (1)idx = %x\n", idx);
1544 
1545 	if (!get_empty_table) {
1546 		idx = iqk_info->iqk_table_idx[path] + 1;
1547 		if (idx > 1)
1548 			idx = 0;
1549 	}
1550 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (2)idx = %x\n", idx);
1551 
1552 	reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1553 	reg_35c = rtw89_phy_read32_mask(rtwdev, R_CIRST, B_CIRST_SYN);
1554 
1555 	iqk_info->iqk_band[path] = chan->band_type;
1556 	iqk_info->iqk_bw[path] = chan->band_width;
1557 	iqk_info->iqk_ch[path] = chan->channel;
1558 	iqk_info->iqk_mcc_ch[idx][path] = chan->channel;
1559 	iqk_info->iqk_table_idx[path] = idx;
1560 
1561 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x, idx = %x\n",
1562 		    path, reg_rf18, idx);
1563 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x\n",
1564 		    path, reg_rf18);
1565 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x35c= 0x%x\n",
1566 		    path, reg_35c);
1567 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]times = 0x%x, ch =%x\n",
1568 		    iqk_info->iqk_times, idx);
1569 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_mcc_ch[%x][%x] = 0x%x\n",
1570 		    idx, path, iqk_info->iqk_mcc_ch[idx][path]);
1571 }
1572 
1573 static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1574 {
1575 	_iqk_by_path(rtwdev, phy_idx, path);
1576 }
1577 
1578 static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
1579 {
1580 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1581 
1582 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__);
1583 
1584 	if (iqk_info->is_nbiqk) {
1585 		rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
1586 				       MASKDWORD, iqk_info->nb_txcfir[path]);
1587 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1588 				       MASKDWORD, iqk_info->nb_rxcfir[path]);
1589 	} else {
1590 		rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
1591 				       MASKDWORD, 0x40000000);
1592 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1593 				       MASKDWORD, 0x40000000);
1594 	}
1595 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
1596 			       0x00000e19 + (path << 4));
1597 
1598 	_iqk_check_cal(rtwdev, path, 0x0);
1599 
1600 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1601 	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
1602 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1603 
1604 	rtw89_phy_write32_mask(rtwdev, R_KIP_CLK, MASKDWORD, 0x0);
1605 	rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K2, 0x0);
1606 	rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), BIT(28), 0x0);
1607 
1608 	rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1609 	rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1610 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0x3);
1611 	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1612 	rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
1613 }
1614 
1615 static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
1616 			       enum rtw89_phy_idx phy_idx, u8 path)
1617 {
1618 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__);
1619 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303);
1620 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
1621 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
1622 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x0);
1623 	rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x0);
1624 	rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x0000000);
1625 	rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x0);
1626 	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x0);
1627 	rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, 0x0000001f, 0x03);
1628 	rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, 0x000003e0, 0x03);
1629 	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_TXCK_ALL, 0x00);
1630 	rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_TXCK_ALL, 0x00);
1631 	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
1632 			       B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x0);
1633 	rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0);
1634 	rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x0);
1635 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0000);
1636 	rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x0);
1637 	rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x0);
1638 }
1639 
1640 static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
1641 {
1642 	u8 idx = 0;
1643 
1644 	rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), 0x00000001, idx);
1645 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 0x00000008, idx);
1646 	rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, 0x40000000);
1647 	rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 0x40000000);
1648 
1649 	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1650 	rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
1651 	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
1652 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
1653 }
1654 
1655 static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
1656 			       enum rtw89_phy_idx phy_idx, u8 path)
1657 {
1658 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303);
1659 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_GOT_TXRX, 0x3);
1660 	rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_GOT_TXRX, 0x3);
1661 	rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x1);
1662 	rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x1);
1663 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x3);
1664 	rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P0_CLKG_FORCE, 0x3);
1665 	rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x1ffffff);
1666 	rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x1);
1667 	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x1);
1668 	rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x3ff);
1669 	rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN,  0x3);
1670 	rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, 0x1);
1671 	rtw89_phy_write32_mask(rtwdev, R_P0_PATH_RST, B_P0_PATH_RST, 0x1);
1672 	rtw89_phy_write32_mask(rtwdev, R_P1_PATH_RST, B_P1_PATH_RST, 0x1);
1673 	rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1);
1674 	rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x1);
1675 	rtw89_phy_write32_mask(rtwdev, R_DCFO_WEIGHT, B_DAC_CLK_IDX, 0x1);
1676 
1677 	_txck_force(rtwdev, RF_PATH_A, true, DAC_960M);
1678 	_txck_force(rtwdev, RF_PATH_B, true, DAC_960M);
1679 	_rxck_force(rtwdev, RF_PATH_A, true, ADC_1920M);
1680 	_rxck_force(rtwdev, RF_PATH_B, true, ADC_1920M);
1681 
1682 	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1);
1683 	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x2);
1684 
1685 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
1686 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
1687 	udelay(10);
1688 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
1689 	udelay(10);
1690 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
1691 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
1692 	udelay(10);
1693 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
1694 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_RSTB, 0x1);
1695 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333);
1696 }
1697 
1698 static void _iqk_init(struct rtw89_dev *rtwdev)
1699 {
1700 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1701 	u8 idx, path;
1702 
1703 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0);
1704 
1705 	if (iqk_info->is_iqk_init)
1706 		return;
1707 
1708 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1709 	iqk_info->is_iqk_init = true;
1710 	iqk_info->is_nbiqk = false;
1711 	iqk_info->iqk_fft_en = false;
1712 	iqk_info->iqk_sram_en = false;
1713 	iqk_info->iqk_cfir_en = false;
1714 	iqk_info->iqk_xym_en = false;
1715 	iqk_info->iqk_times = 0x0;
1716 
1717 	for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
1718 		iqk_info->iqk_channel[idx] = 0x0;
1719 		for (path = 0; path < RTW8852BT_SS; path++) {
1720 			iqk_info->lok_cor_fail[idx][path] = false;
1721 			iqk_info->lok_fin_fail[idx][path] = false;
1722 			iqk_info->iqk_tx_fail[idx][path] = false;
1723 			iqk_info->iqk_rx_fail[idx][path] = false;
1724 			iqk_info->iqk_mcc_ch[idx][path] = 0x0;
1725 			iqk_info->iqk_table_idx[path] = 0x0;
1726 		}
1727 	}
1728 }
1729 
1730 static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
1731 {
1732 	u32 rf_mode;
1733 	u8 path;
1734 	int ret;
1735 
1736 	for (path = 0; path < RF_PATH_MAX; path++) {
1737 		if (!(kpath & BIT(path)))
1738 			continue;
1739 
1740 		ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode,
1741 					       rf_mode != 2, 2, 5000, false,
1742 					       rtwdev, path, RR_MOD, RR_MOD_MASK);
1743 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1744 			    "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", path, ret);
1745 	}
1746 }
1747 
1748 static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx,
1749 			   bool is_pause)
1750 {
1751 	if (!is_pause)
1752 		return;
1753 
1754 	_wait_rx_mode(rtwdev, _kpath(rtwdev, band_idx));
1755 }
1756 
1757 static void _doiqk(struct rtw89_dev *rtwdev, bool force,
1758 		   enum rtw89_phy_idx phy_idx, u8 path)
1759 {
1760 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1761 	u32 backup_bb_val[BACKUP_BB_REGS_NR];
1762 	u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR];
1763 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
1764 
1765 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
1766 
1767 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1768 		    "[IQK]==========IQK start!!!!!==========\n");
1769 	iqk_info->iqk_times++;
1770 	iqk_info->version = RTW8852BT_IQK_VER;
1771 
1772 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
1773 	_iqk_get_ch_info(rtwdev, phy_idx, path);
1774 
1775 	_rfk_backup_bb_reg(rtwdev, backup_bb_val);
1776 	_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
1777 	_iqk_macbb_setting(rtwdev, phy_idx, path);
1778 	_iqk_preset(rtwdev, path);
1779 	_iqk_start_iqk(rtwdev, phy_idx, path);
1780 	_iqk_restore(rtwdev, path);
1781 	_iqk_afebb_restore(rtwdev, phy_idx, path);
1782 	_rfk_reload_bb_reg(rtwdev, backup_bb_val);
1783 	_rfk_reload_rf_reg(rtwdev, backup_rf_val[path], path);
1784 
1785 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
1786 }
1787 
1788 static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
1789 {
1790 	u8 kpath = _kpath(rtwdev, phy_idx);
1791 
1792 	switch (kpath) {
1793 	case RF_A:
1794 		_doiqk(rtwdev, force, phy_idx, RF_PATH_A);
1795 		break;
1796 	case RF_B:
1797 		_doiqk(rtwdev, force, phy_idx, RF_PATH_B);
1798 		break;
1799 	case RF_AB:
1800 		_doiqk(rtwdev, force, phy_idx, RF_PATH_A);
1801 		_doiqk(rtwdev, force, phy_idx, RF_PATH_B);
1802 		break;
1803 	default:
1804 		break;
1805 	}
1806 }
1807 
1808 static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool off)
1809 {
1810 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1811 	u8 val, kidx = dpk->cur_idx[path];
1812 	bool off_reverse;
1813 
1814 	val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
1815 
1816 	if (off)
1817 		off_reverse = false;
1818 	else
1819 		off_reverse = true;
1820 
1821 	val = dpk->is_dpk_enable & off_reverse & dpk->bp[path][kidx].path_ok;
1822 
1823 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
1824 			       BIT(24), val);
1825 
1826 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
1827 		    kidx, dpk->is_dpk_enable & off_reverse ? "enable" : "disable");
1828 }
1829 
1830 static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1831 			  enum rtw89_rf_path path, enum rtw8852bt_dpk_id id)
1832 {
1833 	u16 dpk_cmd;
1834 	u32 val;
1835 	int ret;
1836 
1837 	dpk_cmd = (id << 8) | (0x19 + (path << 4));
1838 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
1839 
1840 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
1841 				       1, 30000, false,
1842 				       rtwdev, R_RFK_ST, MASKBYTE0);
1843 	if (ret)
1844 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot 1 over 30ms!!!!\n");
1845 
1846 	udelay(1);
1847 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00030000);
1848 
1849 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
1850 				       1, 2000, false,
1851 				       rtwdev, R_RPT_COM, MASKLWORD);
1852 	if (ret)
1853 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot 2 over 2ms!!!!\n");
1854 
1855 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
1856 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1857 		    "[DPK] one-shot for %s = 0x%04x\n",
1858 		    id == 0x06 ? "LBK_RXIQK" :
1859 		    id == 0x10 ? "SYNC" :
1860 		    id == 0x11 ? "MDPK_IDL" :
1861 		    id == 0x12 ? "MDPK_MPA" :
1862 		    id == 0x13 ? "GAIN_LOSS" :
1863 		    id == 0x14 ? "PWR_CAL" :
1864 		    id == 0x15 ? "DPK_RXAGC" :
1865 		    id == 0x16 ? "KIP_PRESET" :
1866 		    id == 0x17 ? "KIP_RESOTRE" :
1867 		    "DPK_TXAGC", dpk_cmd);
1868 }
1869 
1870 static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1871 			enum rtw89_rf_path path)
1872 {
1873 	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
1874 	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
1875 
1876 	udelay(600);
1877 
1878 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d RXDCK\n", path);
1879 }
1880 
1881 static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1882 			     enum rtw89_rf_path path)
1883 {
1884 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1885 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1886 
1887 	u8 kidx = dpk->cur_idx[path];
1888 
1889 	dpk->bp[path][kidx].band = chan->band_type;
1890 	dpk->bp[path][kidx].ch = chan->channel;
1891 	dpk->bp[path][kidx].bw = chan->band_width;
1892 
1893 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1894 		    "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
1895 		    path, dpk->cur_idx[path], phy,
1896 		    rtwdev->is_tssi_mode[path] ? "on" : "off",
1897 		    rtwdev->dbcc_en ? "on" : "off",
1898 		    dpk->bp[path][kidx].band == 0 ? "2G" :
1899 		    dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
1900 		    dpk->bp[path][kidx].ch,
1901 		    dpk->bp[path][kidx].bw == 0 ? "20M" :
1902 		    dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
1903 }
1904 
1905 static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
1906 			    enum rtw89_rf_path path, bool is_pause)
1907 {
1908 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
1909 			       B_P0_TSSI_TRK_EN, is_pause);
1910 
1911 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
1912 		    is_pause ? "pause" : "resume");
1913 }
1914 
1915 static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
1916 			     enum rtw89_rf_path path)
1917 {
1918 	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
1919 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1920 
1921 	if (rtwdev->hal.cv > CHIP_CAV)
1922 		rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8),
1923 				       B_DPD_COM_OF, 0x1);
1924 
1925 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
1926 }
1927 
1928 static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1929 			   enum rtw89_rf_path path, u8 cur_rxbb, u32 rf_18)
1930 {
1931 	rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1);
1932 	rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR, 0x0);
1933 
1934 	rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18);
1935 	rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0xd);
1936 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1);
1937 
1938 	if (cur_rxbb >= 0x11)
1939 		rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x13);
1940 	else if (cur_rxbb <= 0xa)
1941 		rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x00);
1942 	else
1943 		rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x05);
1944 
1945 	rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x0);
1946 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
1947 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80014);
1948 
1949 	udelay(100);
1950 
1951 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1952 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x025);
1953 
1954 	_dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
1955 
1956 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1957 
1958 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0);
1959 	rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0);
1960 	rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0);
1961 	rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1);
1962 	rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0x5);
1963 }
1964 
1965 static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
1966 			    enum rtw89_rf_path path, u8 kidx)
1967 {
1968 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1969 
1970 	if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
1971 		rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
1972 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_FATT, 0xf2);
1973 		rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
1974 		rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
1975 	} else {
1976 		rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
1977 		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RAA2_SWATT, 0x5);
1978 		rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
1979 		rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
1980 		rtw89_write_rf(rtwdev, path, RR_RXA_LNA, RFREG_MASK, 0x920FC);
1981 		rtw89_write_rf(rtwdev, path, RR_XALNA2, RFREG_MASK, 0x002C0);
1982 		rtw89_write_rf(rtwdev, path, RR_IQGEN, RFREG_MASK, 0x38800);
1983 	}
1984 
1985 	rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1);
1986 	rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1);
1987 	rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0);
1988 }
1989 
1990 static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev,
1991 			       enum rtw89_rf_path path, bool is_bypass)
1992 {
1993 	if (is_bypass) {
1994 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1995 				       B_RXIQC_BYPASS2, 0x1);
1996 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1997 				       B_RXIQC_BYPASS, 0x1);
1998 	} else {
1999 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
2000 				       B_RXIQC_BYPASS2, 0x0);
2001 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
2002 				       B_RXIQC_BYPASS, 0x0);
2003 	}
2004 }
2005 
2006 static
2007 void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
2008 {
2009 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2010 
2011 	if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
2012 		rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x0);
2013 	else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40)
2014 		rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
2015 	else
2016 		rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
2017 
2018 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
2019 		    dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
2020 		    dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
2021 }
2022 
2023 static void _dpk_table_select(struct rtw89_dev *rtwdev,
2024 			      enum rtw89_rf_path path, u8 kidx, u8 gain)
2025 {
2026 	u8 val;
2027 
2028 	val = 0x80 + kidx * 0x20 + gain * 0x10;
2029 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val);
2030 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2031 		    "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx,
2032 		    gain, val);
2033 }
2034 
2035 static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
2036 {
2037 #define DPK_SYNC_TH_DC_I 200
2038 #define DPK_SYNC_TH_DC_Q 200
2039 #define DPK_SYNC_TH_CORR 170
2040 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2041 	u8 corr_val, corr_idx;
2042 	u16 dc_i, dc_q;
2043 	u32 corr, dc;
2044 
2045 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
2046 
2047 	corr = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
2048 	corr_idx = u32_get_bits(corr, B_PRT_COM_CORI);
2049 	corr_val = u32_get_bits(corr, B_PRT_COM_CORV);
2050 
2051 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2052 		    "[DPK] S%d Corr_idx / Corr_val = %d / %d\n",
2053 		    path, corr_idx, corr_val);
2054 
2055 	dpk->corr_idx[path][kidx] = corr_idx;
2056 	dpk->corr_val[path][kidx] = corr_val;
2057 
2058 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
2059 
2060 	dc = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
2061 	dc_i = u32_get_bits(dc, B_PRT_COM_DCI);
2062 	dc_q = u32_get_bits(dc, B_PRT_COM_DCQ);
2063 
2064 	dc_i = abs(sign_extend32(dc_i, 11));
2065 	dc_q = abs(sign_extend32(dc_q, 11));
2066 
2067 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n",
2068 		    path, dc_i, dc_q);
2069 
2070 	dpk->dc_i[path][kidx] = dc_i;
2071 	dpk->dc_q[path][kidx] = dc_q;
2072 
2073 	if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
2074 	    corr_val < DPK_SYNC_TH_CORR)
2075 		return true;
2076 	else
2077 		return false;
2078 }
2079 
2080 static void _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2081 		      enum rtw89_rf_path path, u8 kidx)
2082 {
2083 	_dpk_one_shot(rtwdev, phy, path, SYNC);
2084 }
2085 
2086 static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
2087 {
2088 	u16 dgain;
2089 
2090 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
2091 
2092 	dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2093 
2094 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x\n", dgain);
2095 
2096 	return dgain;
2097 }
2098 
2099 static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain)
2100 {
2101 	static const u16 bnd[15] = {
2102 		0xbf1, 0xaa5, 0x97d, 0x875, 0x789, 0x6b7, 0x5fc, 0x556,
2103 		0x4c1, 0x43d, 0x3c7, 0x35e, 0x2ac, 0x262, 0x220
2104 	};
2105 	s8 offset;
2106 
2107 	if (dgain >= bnd[0])
2108 		offset = 0x6;
2109 	else if (bnd[0] > dgain && dgain >= bnd[1])
2110 		offset = 0x6;
2111 	else if (bnd[1] > dgain && dgain >= bnd[2])
2112 		offset = 0x5;
2113 	else if (bnd[2] > dgain && dgain >= bnd[3])
2114 		offset = 0x4;
2115 	else if (bnd[3] > dgain && dgain >= bnd[4])
2116 		offset = 0x3;
2117 	else if (bnd[4] > dgain && dgain >= bnd[5])
2118 		offset = 0x2;
2119 	else if (bnd[5] > dgain && dgain >= bnd[6])
2120 		offset = 0x1;
2121 	else if (bnd[6] > dgain && dgain >= bnd[7])
2122 		offset = 0x0;
2123 	else if (bnd[7] > dgain && dgain >= bnd[8])
2124 		offset = 0xff;
2125 	else if (bnd[8] > dgain && dgain >= bnd[9])
2126 		offset = 0xfe;
2127 	else if (bnd[9] > dgain && dgain >= bnd[10])
2128 		offset = 0xfd;
2129 	else if (bnd[10] > dgain && dgain >= bnd[11])
2130 		offset = 0xfc;
2131 	else if (bnd[11] > dgain && dgain >= bnd[12])
2132 		offset = 0xfb;
2133 	else if (bnd[12] > dgain && dgain >= bnd[13])
2134 		offset = 0xfa;
2135 	else if (bnd[13] > dgain && dgain >= bnd[14])
2136 		offset = 0xf9;
2137 	else if (bnd[14] > dgain)
2138 		offset = 0xf8;
2139 	else
2140 		offset = 0x0;
2141 
2142 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain offset = %d\n", offset);
2143 
2144 	return offset;
2145 }
2146 
2147 static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
2148 {
2149 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
2150 	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
2151 
2152 	return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
2153 }
2154 
2155 static void _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2156 			  enum rtw89_rf_path path, u8 kidx)
2157 {
2158 	_dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
2159 
2160 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
2161 	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
2162 }
2163 
2164 static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2165 			    enum rtw89_rf_path path, u8 kidx)
2166 {
2167 	_dpk_tpg_sel(rtwdev, path, kidx);
2168 	_dpk_one_shot(rtwdev, phy, path, KIP_PRESET);
2169 }
2170 
2171 static void _dpk_kip_pwr_clk_on(struct rtw89_dev *rtwdev,
2172 				enum rtw89_rf_path path)
2173 {
2174 	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
2175 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a);
2176 	rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
2177 
2178 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP Power/CLK on\n");
2179 }
2180 
2181 static
2182 u8 _dpk_txagc_check_8852bt(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 txagc)
2183 {
2184 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2185 
2186 	if (txagc >= dpk->max_dpk_txagc[path])
2187 		txagc = dpk->max_dpk_txagc[path];
2188 
2189 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set TxAGC = 0x%x\n", txagc);
2190 
2191 	return txagc;
2192 }
2193 
2194 static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2195 			       enum rtw89_rf_path path, u8 txagc)
2196 {
2197 	u8 val;
2198 
2199 	val = _dpk_txagc_check_8852bt(rtwdev, path, txagc);
2200 	rtw89_write_rf(rtwdev, path, RR_TXAGC, RFREG_MASK, val);
2201 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
2202 	_dpk_one_shot(rtwdev, phy, path, DPK_TXAGC);
2203 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
2204 
2205 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set TXAGC = 0x%x\n", txagc);
2206 }
2207 
2208 static void _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2209 			       enum rtw89_rf_path path)
2210 {
2211 	rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, 0x50220);
2212 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
2213 	_dpk_one_shot(rtwdev, phy, path, DPK_RXAGC);
2214 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
2215 }
2216 
2217 static u8 _dpk_set_offset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2218 			  enum rtw89_rf_path path, u8 txagc, s8 gain_offset)
2219 {
2220 	txagc = rtw89_read_rf(rtwdev, path, RR_TXAGC, RFREG_MASK);
2221 
2222 	if ((txagc - gain_offset) < DPK_TXAGC_LOWER)
2223 		txagc = DPK_TXAGC_LOWER;
2224 	else if ((txagc - gain_offset) > DPK_TXAGC_UPPER)
2225 		txagc = DPK_TXAGC_UPPER;
2226 	else
2227 		txagc = txagc - gain_offset;
2228 
2229 	_dpk_kip_set_txagc(rtwdev, phy, path, txagc);
2230 
2231 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n",
2232 		    gain_offset, txagc);
2233 	return txagc;
2234 }
2235 
2236 static bool _dpk_pas_read(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
2237 			  u8 is_check)
2238 {
2239 	u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
2240 	u8 i;
2241 
2242 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
2243 	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0);
2244 	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08);
2245 
2246 	if (is_check) {
2247 		rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
2248 		val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2249 		val1_i = abs(sign_extend32(val1_i, 11));
2250 		val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2251 		val1_q = abs(sign_extend32(val1_q, 11));
2252 
2253 		rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
2254 		val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2255 		val2_i = abs(sign_extend32(val2_i, 11));
2256 		val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2257 		val2_q = abs(sign_extend32(val2_q, 11));
2258 
2259 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
2260 			    phy_div(val1_i * val1_i + val1_q * val1_q,
2261 				    val2_i * val2_i + val2_q * val2_q));
2262 	} else {
2263 		for (i = 0; i < 32; i++) {
2264 			rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
2265 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
2266 				    "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
2267 				    rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
2268 		}
2269 	}
2270 
2271 	if (val1_i * val1_i + val1_q * val1_q >=
2272 	    (val2_i * val2_i + val2_q * val2_q) * 8 / 5)
2273 		return true;
2274 
2275 	return false;
2276 }
2277 
2278 static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2279 		   enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
2280 		   bool loss_only)
2281 {
2282 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2283 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2284 	u8 goout = 0, agc_cnt = 0, limited_rxbb = 0, gl_cnt = 0;
2285 	u8 tmp_txagc, tmp_rxbb, tmp_gl_idx = 0;
2286 	u8 step = DPK_AGC_STEP_SYNC_DGAIN;
2287 	int limit = 200;
2288 	s8 offset = 0;
2289 	u16 dgain = 0;
2290 	u32 rf_18;
2291 
2292 	tmp_txagc = init_txagc;
2293 
2294 	tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB);
2295 	rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
2296 
2297 	do {
2298 		switch (step) {
2299 		case DPK_AGC_STEP_SYNC_DGAIN:
2300 			_dpk_sync(rtwdev, phy, path, kidx);
2301 			if (agc_cnt == 0) {
2302 				if (chan->band_width < 2)
2303 					_dpk_bypass_rxcfir(rtwdev, path, true);
2304 				else
2305 					_dpk_lbk_rxiqk(rtwdev, phy, path,
2306 						       tmp_rxbb, rf_18);
2307 			}
2308 
2309 			if (_dpk_sync_check(rtwdev, path, kidx) == true) {
2310 				tmp_txagc = 0xff;
2311 				goout = 1;
2312 				break;
2313 			}
2314 
2315 			dgain = _dpk_dgain_read(rtwdev);
2316 			offset = _dpk_dgain_mapping(rtwdev, dgain);
2317 
2318 			if (loss_only == 1 || limited_rxbb == 1 || offset == 0)
2319 				step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2320 			else
2321 				step = DPK_AGC_STEP_GAIN_ADJ;
2322 			break;
2323 		case DPK_AGC_STEP_GAIN_ADJ:
2324 			tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB);
2325 
2326 			if (tmp_rxbb + offset > 0x1f) {
2327 				tmp_rxbb = 0x1f;
2328 				limited_rxbb = 1;
2329 			} else if (tmp_rxbb + offset < 0) {
2330 				tmp_rxbb = 0;
2331 				limited_rxbb = 1;
2332 			} else {
2333 				tmp_rxbb = tmp_rxbb + offset;
2334 			}
2335 
2336 			rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB, tmp_rxbb);
2337 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
2338 				    "[DPK] Adjust RXBB (%d) = 0x%x\n", offset, tmp_rxbb);
2339 
2340 			if (chan->band_width == RTW89_CHANNEL_WIDTH_80)
2341 				_dpk_lbk_rxiqk(rtwdev, phy, path, tmp_rxbb, rf_18);
2342 			if (dgain > 1922 || dgain < 342)
2343 				step = DPK_AGC_STEP_SYNC_DGAIN;
2344 			else
2345 				step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2346 
2347 			agc_cnt++;
2348 			break;
2349 		case DPK_AGC_STEP_GAIN_LOSS_IDX:
2350 			_dpk_gainloss(rtwdev, phy, path, kidx);
2351 
2352 			tmp_gl_idx = _dpk_gainloss_read(rtwdev);
2353 
2354 			if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, path, true)) ||
2355 			    tmp_gl_idx >= 7)
2356 				step = DPK_AGC_STEP_GL_GT_CRITERION;
2357 			else if (tmp_gl_idx == 0)
2358 				step = DPK_AGC_STEP_GL_LT_CRITERION;
2359 			else
2360 				step = DPK_AGC_STEP_SET_TX_GAIN;
2361 
2362 			gl_cnt++;
2363 			break;
2364 		case DPK_AGC_STEP_GL_GT_CRITERION:
2365 			if (tmp_txagc == 0x2e ||
2366 			    tmp_txagc == dpk->max_dpk_txagc[path]) {
2367 				goout = 1;
2368 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
2369 					    "[DPK] Txagc@lower bound!!\n");
2370 			} else {
2371 				tmp_txagc = _dpk_set_offset(rtwdev, phy, path,
2372 							    tmp_txagc, 0x3);
2373 			}
2374 			step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2375 			agc_cnt++;
2376 			break;
2377 
2378 		case DPK_AGC_STEP_GL_LT_CRITERION:
2379 			if (tmp_txagc == 0x3f || tmp_txagc == dpk->max_dpk_txagc[path]) {
2380 				goout = 1;
2381 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
2382 					    "[DPK] Txagc@upper bound!!\n");
2383 			} else {
2384 				tmp_txagc = _dpk_set_offset(rtwdev, phy, path,
2385 							    tmp_txagc, 0xfe);
2386 			}
2387 			step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2388 			agc_cnt++;
2389 			break;
2390 
2391 		case DPK_AGC_STEP_SET_TX_GAIN:
2392 			tmp_txagc = _dpk_set_offset(rtwdev, phy, path, tmp_txagc,
2393 						    tmp_gl_idx);
2394 			goout = 1;
2395 			agc_cnt++;
2396 			break;
2397 
2398 		default:
2399 			goout = 1;
2400 			break;
2401 		}
2402 	} while (!goout && agc_cnt < 6 && limit-- > 0);
2403 
2404 	if (gl_cnt >= 6)
2405 		_dpk_pas_read(rtwdev, path, false);
2406 
2407 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2408 		    "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc, tmp_rxbb);
2409 
2410 	return tmp_txagc;
2411 }
2412 
2413 static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev,
2414 			       enum rtw89_rf_path path, u8 order)
2415 {
2416 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2417 
2418 	switch (order) {
2419 	case 0: /* (5,3,1) */
2420 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2421 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3);
2422 		rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1);
2423 		dpk->dpk_order[path] = 0x3;
2424 		break;
2425 	case 1: /* (5,3,0) */
2426 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2427 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x0);
2428 		rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x0);
2429 		dpk->dpk_order[path] = 0x1;
2430 		break;
2431 	case 2: /* (5,0,0) */
2432 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2433 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x0);
2434 		rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x0);
2435 		dpk->dpk_order[path] = 0x0;
2436 		break;
2437 	default:
2438 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2439 			    "[DPK] Wrong MDPD order!!(0x%x)\n", order);
2440 		break;
2441 	}
2442 
2443 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set %s for IDL\n",
2444 		    order == 0x0 ? "(5,3,1)" :
2445 		    order == 0x1 ? "(5,3,0)" : "(5,0,0)");
2446 
2447 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2448 		    "[DPK] Set MDPD order to 0x%x for IDL\n", order);
2449 }
2450 
2451 static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2452 			 enum rtw89_rf_path path, u8 kidx, u8 gain)
2453 {
2454 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2455 
2456 	if (dpk->bp[path][kidx].bw < RTW89_CHANNEL_WIDTH_80 &&
2457 	    dpk->bp[path][kidx].band == RTW89_BAND_5G)
2458 		_dpk_set_mdpd_para(rtwdev, path, 0x2);
2459 	else
2460 		_dpk_set_mdpd_para(rtwdev, path, 0x0);
2461 
2462 	_dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
2463 }
2464 
2465 static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2466 			     enum rtw89_rf_path path, u8 kidx, u8 gain, u8 txagc)
2467 {
2468 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2469 	u8 gs = dpk->dpk_gs[phy];
2470 	u16 pwsf = 0x78;
2471 
2472 	rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), BIT(8), kidx);
2473 
2474 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2475 		    "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n",
2476 		    txagc, pwsf, gs);
2477 
2478 	dpk->bp[path][kidx].txagc_dpk = txagc;
2479 	rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8),
2480 			       0x3F << ((gain << 3) + (kidx << 4)), txagc);
2481 
2482 	dpk->bp[path][kidx].pwsf = pwsf;
2483 	rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2484 			       0x1FF << (gain << 4), pwsf);
2485 
2486 	rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
2487 	rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0);
2488 
2489 	dpk->bp[path][kidx].gs = gs;
2490 	if (dpk->dpk_gs[phy] == 0x7f)
2491 		rtw89_phy_write32_mask(rtwdev,
2492 				       R_DPD_CH0A + (path << 8) + (kidx << 2),
2493 				       MASKDWORD, 0x007f7f7f);
2494 	else
2495 		rtw89_phy_write32_mask(rtwdev,
2496 				       R_DPD_CH0A + (path << 8) + (kidx << 2),
2497 				       MASKDWORD, 0x005b5b5b);
2498 
2499 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2500 			       B_DPD_ORDER_V1, dpk->dpk_order[path]);
2501 
2502 	rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD, 0x0);
2503 	rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL, 0x0);
2504 }
2505 
2506 static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2507 			      enum rtw89_rf_path path)
2508 {
2509 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2510 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2511 	u8 idx, cur_band, cur_ch;
2512 	bool is_reload = false;
2513 
2514 	cur_band = chan->band_type;
2515 	cur_ch = chan->channel;
2516 
2517 	for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
2518 		if (cur_band != dpk->bp[path][idx].band ||
2519 		    cur_ch != dpk->bp[path][idx].ch)
2520 			continue;
2521 
2522 		rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
2523 				       B_COEF_SEL_MDPD, idx);
2524 		dpk->cur_idx[path] = idx;
2525 		is_reload = true;
2526 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2527 			    "[DPK] reload S%d[%d] success\n", path, idx);
2528 	}
2529 
2530 	return is_reload;
2531 }
2532 
2533 static
2534 void _rf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool is_bybb)
2535 {
2536 	if (is_bybb)
2537 		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
2538 	else
2539 		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
2540 }
2541 
2542 static
2543 void _drf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool is_bybb)
2544 {
2545 	if (is_bybb)
2546 		rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
2547 	else
2548 		rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
2549 }
2550 
2551 static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2552 		      enum rtw89_rf_path path, u8 gain)
2553 {
2554 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2555 	u8 txagc = 0x38, kidx = dpk->cur_idx[path];
2556 	bool is_fail = false;
2557 
2558 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2559 		    "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx);
2560 
2561 	_rf_direct_cntrl(rtwdev, path, false);
2562 	_drf_direct_cntrl(rtwdev, path, false);
2563 
2564 	_dpk_kip_pwr_clk_on(rtwdev, path);
2565 	_dpk_kip_set_txagc(rtwdev, phy, path, txagc);
2566 	_dpk_rf_setting(rtwdev, gain, path, kidx);
2567 	_dpk_rx_dck(rtwdev, phy, path);
2568 	_dpk_kip_preset(rtwdev, phy, path, kidx);
2569 	_dpk_kip_set_rxagc(rtwdev, phy, path);
2570 	_dpk_table_select(rtwdev, path, kidx, gain);
2571 
2572 	txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
2573 
2574 	_rfk_get_thermal(rtwdev, kidx, path);
2575 
2576 	if (txagc == 0xff) {
2577 		is_fail = true;
2578 		goto _error;
2579 	}
2580 
2581 	_dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
2582 
2583 	rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, RF_RX);
2584 	_dpk_fill_result(rtwdev, phy, path, kidx, gain, txagc);
2585 
2586 _error:
2587 	if (!is_fail)
2588 		dpk->bp[path][kidx].path_ok = 1;
2589 	else
2590 		dpk->bp[path][kidx].path_ok = 0;
2591 
2592 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
2593 		    is_fail ? "Check" : "Success");
2594 
2595 	_dpk_onoff(rtwdev, path, is_fail);
2596 
2597 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
2598 		    is_fail ? "Check" : "Success");
2599 
2600 	return is_fail;
2601 }
2602 
2603 static void _dpk_cal_select(struct rtw89_dev *rtwdev,
2604 			    enum rtw89_phy_idx phy, u8 kpath)
2605 {
2606 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2607 	u32 backup_kip_val[BACKUP_KIP_REGS_NR];
2608 	u32 backup_bb_val[BACKUP_BB_REGS_NR];
2609 	u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR];
2610 	bool reloaded[2] = {false};
2611 	u8 path;
2612 
2613 	for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
2614 		reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
2615 		if (!reloaded[path] && dpk->bp[path][0].ch != 0)
2616 			dpk->cur_idx[path] = !dpk->cur_idx[path];
2617 		else
2618 			_dpk_onoff(rtwdev, path, false);
2619 	}
2620 
2621 	_rfk_backup_bb_reg(rtwdev, backup_bb_val);
2622 	_rfk_backup_kip_reg(rtwdev, backup_kip_val);
2623 
2624 	for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
2625 		_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
2626 		_dpk_information(rtwdev, phy, path);
2627 		if (rtwdev->is_tssi_mode[path])
2628 			_dpk_tssi_pause(rtwdev, path, true);
2629 	}
2630 
2631 	_rfk_bb_afe_setting(rtwdev, phy, path, kpath);
2632 
2633 	for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++)
2634 		_dpk_main(rtwdev, phy, path, 1);
2635 
2636 	_rfk_bb_afe_restore(rtwdev, phy, path, kpath);
2637 
2638 	_dpk_kip_restore(rtwdev, path);
2639 	_rfk_reload_bb_reg(rtwdev, backup_bb_val);
2640 	_rfk_reload_kip_reg(rtwdev, backup_kip_val);
2641 
2642 	for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
2643 		_rfk_reload_rf_reg(rtwdev, backup_rf_val[path], path);
2644 		if (rtwdev->is_tssi_mode[path])
2645 			_dpk_tssi_pause(rtwdev, path, false);
2646 	}
2647 }
2648 
2649 static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2650 {
2651 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2652 	struct rtw89_fem_info *fem = &rtwdev->fem;
2653 
2654 	if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
2655 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2656 			    "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
2657 		return true;
2658 	} else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) {
2659 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2660 			    "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
2661 		return true;
2662 	} else if (fem->epa_6g && chan->band_type == RTW89_BAND_6G) {
2663 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2664 			    "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
2665 		return true;
2666 	}
2667 
2668 	return false;
2669 }
2670 
2671 static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2672 {
2673 	u8 path, kpath;
2674 
2675 	kpath = _kpath(rtwdev, phy);
2676 
2677 	for (path = 0; path < RTW8852BT_SS; path++) {
2678 		if (kpath & BIT(path))
2679 			_dpk_onoff(rtwdev, path, true);
2680 	}
2681 }
2682 
2683 static void _dpk_track(struct rtw89_dev *rtwdev)
2684 {
2685 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2686 	s8 txagc_bb, txagc_bb_tp, ini_diff = 0, txagc_ofst;
2687 	s8 delta_ther[2] = {};
2688 	u8 trk_idx, txagc_rf;
2689 	u8 path, kidx;
2690 	u16 pwsf[2];
2691 	u8 cur_ther;
2692 	u32 tmp;
2693 
2694 	for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
2695 		kidx = dpk->cur_idx[path];
2696 
2697 		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2698 			    "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
2699 			    path, kidx, dpk->bp[path][kidx].ch);
2700 
2701 		cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
2702 
2703 		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2704 			    "[DPK_TRK] thermal now = %d\n", cur_ther);
2705 
2706 		if (dpk->bp[path][kidx].ch && cur_ther)
2707 			delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther;
2708 
2709 		if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
2710 			delta_ther[path] = delta_ther[path] * 3 / 2;
2711 		else
2712 			delta_ther[path] = delta_ther[path] * 5 / 2;
2713 
2714 		txagc_rf = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2715 						 B_TXAGC_RF);
2716 
2717 		if (rtwdev->is_tssi_mode[path]) {
2718 			trk_idx = rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK);
2719 
2720 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2721 				    "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n",
2722 				    txagc_rf, trk_idx);
2723 
2724 			txagc_bb =
2725 				rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2726 						      MASKBYTE2);
2727 			txagc_bb_tp =
2728 				rtw89_phy_read32_mask(rtwdev, R_TXAGC_TP + (path << 13),
2729 						      B_TXAGC_TP);
2730 
2731 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2732 				    "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
2733 				    txagc_bb_tp, txagc_bb);
2734 
2735 			txagc_ofst =
2736 				rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2737 						      MASKBYTE3);
2738 
2739 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2740 				    "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n",
2741 				    txagc_ofst, delta_ther[path]);
2742 			tmp = rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8),
2743 						    B_DPD_COM_OF);
2744 			if (tmp == 0x1) {
2745 				txagc_ofst = 0;
2746 				rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2747 					    "[DPK_TRK] HW txagc offset mode\n");
2748 			}
2749 
2750 			if (txagc_rf && cur_ther)
2751 				ini_diff = txagc_ofst + (delta_ther[path]);
2752 
2753 			tmp = rtw89_phy_read32_mask(rtwdev,
2754 						    R_P0_TXDPD + (path << 13),
2755 						    B_P0_TXDPD);
2756 			if (tmp == 0x0) {
2757 				pwsf[0] = dpk->bp[path][kidx].pwsf +
2758 					  txagc_bb_tp - txagc_bb + ini_diff;
2759 				pwsf[1] = dpk->bp[path][kidx].pwsf +
2760 					  txagc_bb_tp - txagc_bb + ini_diff;
2761 			} else {
2762 				pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff;
2763 				pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff;
2764 			}
2765 		} else {
2766 			pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2767 			pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2768 		}
2769 
2770 		tmp = rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS);
2771 		if (!tmp && txagc_rf) {
2772 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2773 				    "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n",
2774 				    pwsf[0], pwsf[1]);
2775 
2776 			rtw89_phy_write32_mask(rtwdev,
2777 					       R_DPD_BND + (path << 8) + (kidx << 2),
2778 					       B_DPD_BND_0, pwsf[0]);
2779 			rtw89_phy_write32_mask(rtwdev,
2780 					       R_DPD_BND + (path << 8) + (kidx << 2),
2781 					       B_DPD_BND_1, pwsf[1]);
2782 		}
2783 	}
2784 }
2785 
2786 static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2787 {
2788 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2789 	u8 tx_scale, ofdm_bkof, path, kpath;
2790 
2791 	kpath = _kpath(rtwdev, phy);
2792 
2793 	ofdm_bkof = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_OFDM);
2794 	tx_scale = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_SCA);
2795 
2796 	if (ofdm_bkof + tx_scale >= 44) {
2797 		/* move dpd backoff to bb, and set dpd backoff to 0 */
2798 		dpk->dpk_gs[phy] = 0x7f;
2799 		for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
2800 			if (!(kpath & BIT(path)))
2801 				continue;
2802 
2803 			rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8),
2804 					       B_DPD_CFG, 0x7f7f7f);
2805 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
2806 				    "[RFK] Set S%d DPD backoff to 0dB\n", path);
2807 		}
2808 	} else {
2809 		dpk->dpk_gs[phy] = 0x5b;
2810 	}
2811 }
2812 
2813 static void _tssi_dpk_off(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2814 {
2815 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A, BIT(24), 0x0);
2816 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0B, BIT(24), 0x0);
2817 }
2818 
2819 static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2820 			     enum rtw89_rf_path path)
2821 {
2822 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2823 	enum rtw89_band band = chan->band_type;
2824 
2825 	if (band == RTW89_BAND_2G)
2826 		rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1);
2827 	else
2828 		rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
2829 }
2830 
2831 static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2832 			  enum rtw89_rf_path path)
2833 {
2834 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2835 	enum rtw89_band band = chan->band_type;
2836 
2837 	rtw89_rfk_parser(rtwdev, &rtw8852bt_tssi_sys_defs_tbl);
2838 
2839 	if (chan->band_width == RTW89_CHANNEL_WIDTH_80)
2840 		rtw89_phy_write32_mask(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_BW80, 0x1);
2841 	else
2842 		rtw89_phy_write32_mask(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_BW80, 0x0);
2843 
2844 	if (path == RF_PATH_A)
2845 		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2846 					 &rtw8852bt_tssi_sys_a_defs_2g_tbl,
2847 					 &rtw8852bt_tssi_sys_a_defs_5g_tbl);
2848 	else
2849 		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2850 					 &rtw8852bt_tssi_sys_b_defs_2g_tbl,
2851 					 &rtw8852bt_tssi_sys_b_defs_5g_tbl);
2852 }
2853 
2854 static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev,
2855 				    enum rtw89_phy_idx phy,
2856 				    enum rtw89_rf_path path)
2857 {
2858 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2859 				 &rtw8852bt_tssi_init_txpwr_defs_a_tbl,
2860 				 &rtw8852bt_tssi_init_txpwr_defs_b_tbl);
2861 }
2862 
2863 static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
2864 					  enum rtw89_phy_idx phy,
2865 					  enum rtw89_rf_path path)
2866 {
2867 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2868 				 &rtw8852bt_tssi_init_txpwr_he_tb_defs_a_tbl,
2869 				 &rtw8852bt_tssi_init_txpwr_he_tb_defs_b_tbl);
2870 }
2871 
2872 static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2873 			  enum rtw89_rf_path path)
2874 {
2875 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2876 				 &rtw8852bt_tssi_dck_defs_a_tbl,
2877 				 &rtw8852bt_tssi_dck_defs_b_tbl);
2878 }
2879 
2880 static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2881 				 enum rtw89_rf_path path)
2882 {
2883 #define RTW8852BT_TSSI_GET_VAL(ptr, idx)			\
2884 ({							\
2885 	s8 *__ptr = (ptr);				\
2886 	u8 __idx = (idx), __i, __v;			\
2887 	u32 __val = 0;					\
2888 	for (__i = 0; __i < 4; __i++) {			\
2889 		__v = (__ptr[__idx + __i]);		\
2890 		__val |= (__v << (8 * __i));		\
2891 	}						\
2892 	__val;						\
2893 })
2894 	struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
2895 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
2896 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2897 	u8 ch = chan->channel;
2898 	u8 subband = chan->subband_type;
2899 	const s8 *thm_up_a = NULL;
2900 	const s8 *thm_down_a = NULL;
2901 	const s8 *thm_up_b = NULL;
2902 	const s8 *thm_down_b = NULL;
2903 	u8 thermal = 0xff;
2904 	s8 thm_ofst[64] = {0};
2905 	u32 tmp = 0;
2906 	u8 i, j;
2907 
2908 	switch (subband) {
2909 	default:
2910 	case RTW89_CH_2G:
2911 		thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0];
2912 		thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0];
2913 		thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0];
2914 		thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0];
2915 		break;
2916 	case RTW89_CH_5G_BAND_1:
2917 		thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0];
2918 		thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0];
2919 		thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0];
2920 		thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0];
2921 		break;
2922 	case RTW89_CH_5G_BAND_3:
2923 		thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1];
2924 		thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1];
2925 		thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1];
2926 		thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1];
2927 		break;
2928 	case RTW89_CH_5G_BAND_4:
2929 		thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2];
2930 		thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2];
2931 		thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2];
2932 		thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2];
2933 		break;
2934 	}
2935 
2936 	if (path == RF_PATH_A) {
2937 		thermal = tssi_info->thermal[RF_PATH_A];
2938 
2939 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2940 			    "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
2941 
2942 		rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
2943 		rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
2944 
2945 		if (thermal == 0xff) {
2946 			rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
2947 			rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
2948 
2949 			for (i = 0; i < 64; i += 4) {
2950 				rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
2951 
2952 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2953 					    "[TSSI] write 0x%x val=0x%08x\n",
2954 					    R_P0_TSSI_BASE + i, 0x0);
2955 			}
2956 
2957 		} else {
2958 			rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER,
2959 					       thermal);
2960 			rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
2961 					       thermal);
2962 
2963 			i = 0;
2964 			for (j = 0; j < 32; j++)
2965 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2966 					      -thm_down_a[i++] :
2967 					      -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
2968 
2969 			i = 1;
2970 			for (j = 63; j >= 32; j--)
2971 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2972 					      thm_up_a[i++] :
2973 					      thm_up_a[DELTA_SWINGIDX_SIZE - 1];
2974 
2975 			for (i = 0; i < 64; i += 4) {
2976 				tmp = RTW8852BT_TSSI_GET_VAL(thm_ofst, i);
2977 				rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
2978 
2979 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2980 					    "[TSSI] write 0x%x val=0x%08x\n",
2981 					    0x5c00 + i, tmp);
2982 			}
2983 		}
2984 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
2985 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
2986 
2987 	} else {
2988 		thermal = tssi_info->thermal[RF_PATH_B];
2989 
2990 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2991 			    "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
2992 
2993 		rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
2994 		rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
2995 
2996 		if (thermal == 0xff) {
2997 			rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
2998 			rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
2999 
3000 			for (i = 0; i < 64; i += 4) {
3001 				rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
3002 
3003 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3004 					    "[TSSI] write 0x%x val=0x%08x\n",
3005 					    0x7c00 + i, 0x0);
3006 			}
3007 
3008 		} else {
3009 			rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER,
3010 					       thermal);
3011 			rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
3012 					       thermal);
3013 
3014 			i = 0;
3015 			for (j = 0; j < 32; j++)
3016 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3017 					      -thm_down_b[i++] :
3018 					      -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
3019 
3020 			i = 1;
3021 			for (j = 63; j >= 32; j--)
3022 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3023 					      thm_up_b[i++] :
3024 					      thm_up_b[DELTA_SWINGIDX_SIZE - 1];
3025 
3026 			for (i = 0; i < 64; i += 4) {
3027 				tmp = RTW8852BT_TSSI_GET_VAL(thm_ofst, i);
3028 				rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
3029 
3030 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3031 					    "[TSSI] write 0x%x val=0x%08x\n",
3032 					    0x7c00 + i, tmp);
3033 			}
3034 		}
3035 		rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
3036 		rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
3037 	}
3038 #undef RTW8852BT_TSSI_GET_VAL
3039 }
3040 
3041 static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3042 				   enum rtw89_rf_path path)
3043 {
3044 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3045 				 &rtw8852bt_tssi_dac_gain_defs_a_tbl,
3046 				 &rtw8852bt_tssi_dac_gain_defs_b_tbl);
3047 }
3048 
3049 static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3050 				enum rtw89_rf_path path)
3051 {
3052 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3053 	enum rtw89_band band = chan->band_type;
3054 
3055 	if (path == RF_PATH_A)
3056 		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
3057 					 &rtw8852bt_tssi_slope_a_defs_2g_tbl,
3058 					 &rtw8852bt_tssi_slope_a_defs_5g_tbl);
3059 	else
3060 		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
3061 					 &rtw8852bt_tssi_slope_b_defs_2g_tbl,
3062 					 &rtw8852bt_tssi_slope_b_defs_5g_tbl);
3063 }
3064 
3065 static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3066 				    enum rtw89_rf_path path, bool all)
3067 {
3068 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3069 	enum rtw89_band band = chan->band_type;
3070 	const struct rtw89_rfk_tbl *tbl = NULL;
3071 	u8 ch = chan->channel;
3072 
3073 	if (path == RF_PATH_A) {
3074 		if (band == RTW89_BAND_2G)
3075 			tbl = &rtw8852bt_tssi_align_a_2g_all_defs_tbl;
3076 		else if (ch >= 36 && ch <= 64)
3077 			tbl = &rtw8852bt_tssi_align_a_5g1_all_defs_tbl;
3078 		else if (ch >= 100 && ch <= 144)
3079 			tbl = &rtw8852bt_tssi_align_a_5g2_all_defs_tbl;
3080 		else if (ch >= 149 && ch <= 177)
3081 			tbl = &rtw8852bt_tssi_align_a_5g3_all_defs_tbl;
3082 	} else {
3083 		if (ch >= 1 && ch <= 14)
3084 			tbl = &rtw8852bt_tssi_align_b_2g_all_defs_tbl;
3085 		else if (ch >= 36 && ch <= 64)
3086 			tbl = &rtw8852bt_tssi_align_b_5g1_all_defs_tbl;
3087 		else if (ch >= 100 && ch <= 144)
3088 			tbl = &rtw8852bt_tssi_align_b_5g2_all_defs_tbl;
3089 		else if (ch >= 149 && ch <= 177)
3090 			tbl = &rtw8852bt_tssi_align_b_5g3_all_defs_tbl;
3091 	}
3092 
3093 	if (tbl)
3094 		rtw89_rfk_parser(rtwdev, tbl);
3095 }
3096 
3097 static void _tssi_set_tssi_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3098 				 enum rtw89_rf_path path)
3099 {
3100 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3101 				 &rtw8852bt_tssi_slope_defs_a_tbl,
3102 				 &rtw8852bt_tssi_slope_defs_b_tbl);
3103 }
3104 
3105 static void _tssi_set_tssi_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3106 				 enum rtw89_rf_path path)
3107 {
3108 	if (path == RF_PATH_A)
3109 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSIC, B_P0_TSSIC_BYPASS, 0x0);
3110 	else
3111 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSIC, B_P1_TSSIC_BYPASS, 0x0);
3112 }
3113 
3114 static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
3115 					  enum rtw89_phy_idx phy,
3116 					  enum rtw89_rf_path path)
3117 {
3118 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "======>%s   path=%d\n", __func__,
3119 		    path);
3120 
3121 	if (path == RF_PATH_A)
3122 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG,
3123 				       B_P0_TSSI_MV_MIX, 0x010);
3124 	else
3125 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG,
3126 				       B_P1_RFCTM_DEL, 0x010);
3127 }
3128 
3129 static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3130 {
3131 	u8 i;
3132 
3133 	for (i = 0; i < RF_PATH_NUM_8852BT; i++) {
3134 		_tssi_set_tssi_track(rtwdev, phy, i);
3135 		_tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
3136 
3137 		if (i == RF_PATH_A) {
3138 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG,
3139 					       B_P0_TSSI_MV_CLR, 0x0);
3140 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
3141 					       B_P0_TSSI_EN, 0x0);
3142 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
3143 					       B_P0_TSSI_EN, 0x1);
3144 			rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
3145 				       RR_TXGA_V1_TRK_EN, 0x1);
3146 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3147 					       B_P0_TSSI_RFC, 0x3);
3148 
3149 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3150 					       B_P0_TSSI_OFT, 0xc0);
3151 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3152 					       B_P0_TSSI_OFT_EN, 0x0);
3153 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3154 					       B_P0_TSSI_OFT_EN, 0x1);
3155 
3156 			rtwdev->is_tssi_mode[RF_PATH_A] = true;
3157 		} else {
3158 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG,
3159 					       B_P1_TSSI_MV_CLR, 0x0);
3160 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
3161 					       B_P1_TSSI_EN, 0x0);
3162 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
3163 					       B_P1_TSSI_EN, 0x1);
3164 			rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
3165 				       RR_TXGA_V1_TRK_EN, 0x1);
3166 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3167 					       B_P1_TSSI_RFC, 0x3);
3168 
3169 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3170 					       B_P1_TSSI_OFT, 0xc0);
3171 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3172 					       B_P1_TSSI_OFT_EN, 0x0);
3173 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3174 					       B_P1_TSSI_OFT_EN, 0x1);
3175 
3176 			rtwdev->is_tssi_mode[RF_PATH_B] = true;
3177 		}
3178 	}
3179 }
3180 
3181 static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3182 {
3183 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x0);
3184 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_RFC, 0x1);
3185 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, 0x1);
3186 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_EN, 0x0);
3187 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_RFC, 0x1);
3188 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_CLR, 0x1);
3189 
3190 	rtwdev->is_tssi_mode[RF_PATH_A] = false;
3191 	rtwdev->is_tssi_mode[RF_PATH_B] = false;
3192 }
3193 
3194 static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
3195 {
3196 	switch (ch) {
3197 	case 1 ... 2:
3198 		return 0;
3199 	case 3 ... 5:
3200 		return 1;
3201 	case 6 ... 8:
3202 		return 2;
3203 	case 9 ... 11:
3204 		return 3;
3205 	case 12 ... 13:
3206 		return 4;
3207 	case 14:
3208 		return 5;
3209 	}
3210 
3211 	return 0;
3212 }
3213 
3214 #define TSSI_EXTRA_GROUP_BIT (BIT(31))
3215 #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
3216 #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
3217 #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
3218 #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
3219 
3220 static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
3221 {
3222 	switch (ch) {
3223 	case 1 ... 2:
3224 		return 0;
3225 	case 3 ... 5:
3226 		return 1;
3227 	case 6 ... 8:
3228 		return 2;
3229 	case 9 ... 11:
3230 		return 3;
3231 	case 12 ... 14:
3232 		return 4;
3233 	case 36 ... 40:
3234 		return 5;
3235 	case 41 ... 43:
3236 		return TSSI_EXTRA_GROUP(5);
3237 	case 44 ... 48:
3238 		return 6;
3239 	case 49 ... 51:
3240 		return TSSI_EXTRA_GROUP(6);
3241 	case 52 ... 56:
3242 		return 7;
3243 	case 57 ... 59:
3244 		return TSSI_EXTRA_GROUP(7);
3245 	case 60 ... 64:
3246 		return 8;
3247 	case 100 ... 104:
3248 		return 9;
3249 	case 105 ... 107:
3250 		return TSSI_EXTRA_GROUP(9);
3251 	case 108 ... 112:
3252 		return 10;
3253 	case 113 ... 115:
3254 		return TSSI_EXTRA_GROUP(10);
3255 	case 116 ... 120:
3256 		return 11;
3257 	case 121 ... 123:
3258 		return TSSI_EXTRA_GROUP(11);
3259 	case 124 ... 128:
3260 		return 12;
3261 	case 129 ... 131:
3262 		return TSSI_EXTRA_GROUP(12);
3263 	case 132 ... 136:
3264 		return 13;
3265 	case 137 ... 139:
3266 		return TSSI_EXTRA_GROUP(13);
3267 	case 140 ... 144:
3268 		return 14;
3269 	case 149 ... 153:
3270 		return 15;
3271 	case 154 ... 156:
3272 		return TSSI_EXTRA_GROUP(15);
3273 	case 157 ... 161:
3274 		return 16;
3275 	case 162 ... 164:
3276 		return TSSI_EXTRA_GROUP(16);
3277 	case 165 ... 169:
3278 		return 17;
3279 	case 170 ... 172:
3280 		return TSSI_EXTRA_GROUP(17);
3281 	case 173 ... 177:
3282 		return 18;
3283 	}
3284 
3285 	return 0;
3286 }
3287 
3288 static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
3289 {
3290 	switch (ch) {
3291 	case 1 ... 8:
3292 		return 0;
3293 	case 9 ... 14:
3294 		return 1;
3295 	case 36 ... 48:
3296 		return 2;
3297 	case 52 ... 64:
3298 		return 3;
3299 	case 100 ... 112:
3300 		return 4;
3301 	case 116 ... 128:
3302 		return 5;
3303 	case 132 ... 144:
3304 		return 6;
3305 	case 149 ... 177:
3306 		return 7;
3307 	}
3308 
3309 	return 0;
3310 }
3311 
3312 static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3313 			    enum rtw89_rf_path path)
3314 {
3315 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3316 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3317 	u8 ch = chan->channel;
3318 	u32 gidx, gidx_1st, gidx_2nd;
3319 	s8 de_1st;
3320 	s8 de_2nd;
3321 	s8 val;
3322 
3323 	gidx = _tssi_get_ofdm_group(rtwdev, ch);
3324 
3325 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3326 		    "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", path, gidx);
3327 
3328 	if (IS_TSSI_EXTRA_GROUP(gidx)) {
3329 		gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
3330 		gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
3331 		de_1st = tssi_info->tssi_mcs[path][gidx_1st];
3332 		de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
3333 		val = (de_1st + de_2nd) / 2;
3334 
3335 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3336 			    "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
3337 			    path, val, de_1st, de_2nd);
3338 	} else {
3339 		val = tssi_info->tssi_mcs[path][gidx];
3340 
3341 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3342 			    "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
3343 	}
3344 
3345 	return val;
3346 }
3347 
3348 static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3349 				 enum rtw89_rf_path path)
3350 {
3351 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3352 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3353 	u8 ch = chan->channel;
3354 	u32 tgidx, tgidx_1st, tgidx_2nd;
3355 	s8 tde_1st;
3356 	s8 tde_2nd;
3357 	s8 val;
3358 
3359 	tgidx = _tssi_get_trim_group(rtwdev, ch);
3360 
3361 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3362 		    "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
3363 		    path, tgidx);
3364 
3365 	if (IS_TSSI_EXTRA_GROUP(tgidx)) {
3366 		tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
3367 		tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
3368 		tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
3369 		tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
3370 		val = (tde_1st + tde_2nd) / 2;
3371 
3372 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3373 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
3374 			    path, val, tde_1st, tde_2nd);
3375 	} else {
3376 		val = tssi_info->tssi_trim[path][tgidx];
3377 
3378 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3379 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
3380 			    path, val);
3381 	}
3382 
3383 	return val;
3384 }
3385 
3386 static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3387 {
3388 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3389 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3390 	u8 ch = chan->channel;
3391 	u8 gidx;
3392 	s8 ofdm_de;
3393 	s8 trim_de;
3394 	s32 val;
3395 	u32 i;
3396 
3397 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
3398 		    phy, ch);
3399 
3400 	for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) {
3401 		gidx = _tssi_get_cck_group(rtwdev, ch);
3402 		trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
3403 		val = tssi_info->tssi_cck[i][gidx] + trim_de;
3404 
3405 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3406 			    "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
3407 			    i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
3408 
3409 		rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val);
3410 		rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val);
3411 
3412 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3413 			    "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
3414 			    _tssi_de_cck_long[i],
3415 			    rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
3416 						  _TSSI_DE_MASK));
3417 
3418 		ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
3419 		trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
3420 		val = ofdm_de + trim_de;
3421 
3422 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3423 			    "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
3424 			    i, ofdm_de, trim_de);
3425 
3426 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val);
3427 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val);
3428 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val);
3429 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i],
3430 				       _TSSI_DE_MASK, val);
3431 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val);
3432 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val);
3433 
3434 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3435 			    "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
3436 			    _tssi_de_mcs_20m[i],
3437 			    rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i],
3438 						  _TSSI_DE_MASK));
3439 	}
3440 }
3441 
3442 static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
3443 {
3444 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3445 		    "[TSSI PA K]\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n"
3446 		    "0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n",
3447 		    R_TSSI_PA_K1 + (path << 13),
3448 		    rtw89_phy_read32(rtwdev, R_TSSI_PA_K1 + (path << 13)),
3449 		    R_TSSI_PA_K2 + (path << 13),
3450 		    rtw89_phy_read32(rtwdev, R_TSSI_PA_K2 + (path << 13)),
3451 		    R_P0_TSSI_ALIM1 + (path << 13),
3452 		    rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM1 + (path << 13)),
3453 		    R_P0_TSSI_ALIM3 + (path << 13),
3454 		    rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM3 + (path << 13)),
3455 		    R_TSSI_PA_K5 + (path << 13),
3456 		    rtw89_phy_read32(rtwdev, R_TSSI_PA_K5 + (path << 13)),
3457 		    R_P0_TSSI_ALIM2 + (path << 13),
3458 		    rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM2 + (path << 13)),
3459 		    R_P0_TSSI_ALIM4 + (path << 13),
3460 		    rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM4 + (path << 13)),
3461 		    R_TSSI_PA_K8 + (path << 13),
3462 		    rtw89_phy_read32(rtwdev, R_TSSI_PA_K8 + (path << 13)));
3463 }
3464 
3465 static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
3466 				enum rtw89_phy_idx phy, enum rtw89_rf_path path)
3467 {
3468 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3469 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3470 	u8 channel = chan->channel;
3471 	u8 band;
3472 
3473 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3474 		    "======>%s   phy=%d   path=%d\n", __func__, phy, path);
3475 
3476 	if (channel >= 1 && channel <= 14)
3477 		band = TSSI_ALIMK_2G;
3478 	else if (channel >= 36 && channel <= 64)
3479 		band = TSSI_ALIMK_5GL;
3480 	else if (channel >= 100 && channel <= 144)
3481 		band = TSSI_ALIMK_5GM;
3482 	else if (channel >= 149 && channel <= 177)
3483 		band = TSSI_ALIMK_5GH;
3484 	else
3485 		band = TSSI_ALIMK_2G;
3486 
3487 	if (tssi_info->alignment_done[path][band]) {
3488 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD,
3489 				       tssi_info->alignment_value[path][band][0]);
3490 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD,
3491 				       tssi_info->alignment_value[path][band][1]);
3492 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD,
3493 				       tssi_info->alignment_value[path][band][2]);
3494 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD,
3495 				       tssi_info->alignment_value[path][band][3]);
3496 	}
3497 
3498 	_tssi_alimentk_dump_result(rtwdev, path);
3499 }
3500 
3501 static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3502 			enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm,
3503 			u8 enable)
3504 {
3505 	enum rtw89_rf_path_bit rx_path;
3506 
3507 	if (path == RF_PATH_A)
3508 		rx_path = RF_A;
3509 	else if (path == RF_PATH_B)
3510 		rx_path = RF_B;
3511 	else if (path == RF_PATH_AB)
3512 		rx_path = RF_AB;
3513 	else
3514 		rx_path = RF_ABCD; /* don't change path, but still set others */
3515 
3516 	if (enable) {
3517 		rtw8852bx_bb_set_plcp_tx(rtwdev);
3518 		rtw8852bx_bb_cfg_tx_path(rtwdev, path);
3519 		rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path);
3520 		rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy);
3521 	}
3522 
3523 	rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy);
3524 }
3525 
3526 static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
3527 				      enum rtw89_phy_idx phy, const u32 reg[],
3528 				      u32 reg_backup[], u32 reg_num)
3529 {
3530 	u32 i;
3531 
3532 	for (i = 0; i < reg_num; i++) {
3533 		reg_backup[i] = rtw89_phy_read32_mask(rtwdev, reg[i], MASKDWORD);
3534 
3535 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3536 			    "[TSSI] Backup BB 0x%x = 0x%x\n", reg[i],
3537 			    reg_backup[i]);
3538 	}
3539 }
3540 
3541 static void _tssi_reload_bb_registers(struct rtw89_dev *rtwdev,
3542 				      enum rtw89_phy_idx phy, const u32 reg[],
3543 				      u32 reg_backup[], u32 reg_num)
3544 
3545 {
3546 	u32 i;
3547 
3548 	for (i = 0; i < reg_num; i++) {
3549 		rtw89_phy_write32_mask(rtwdev, reg[i], MASKDWORD, reg_backup[i]);
3550 
3551 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3552 			    "[TSSI] Reload BB 0x%x = 0x%x\n", reg[i],
3553 			    reg_backup[i]);
3554 	}
3555 }
3556 
3557 static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel)
3558 {
3559 	u8 channel_index;
3560 
3561 	if (channel >= 1 && channel <= 14)
3562 		channel_index = channel - 1;
3563 	else if (channel >= 36 && channel <= 64)
3564 		channel_index = (channel - 36) / 2 + 14;
3565 	else if (channel >= 100 && channel <= 144)
3566 		channel_index = ((channel - 100) / 2) + 15 + 14;
3567 	else if (channel >= 149 && channel <= 177)
3568 		channel_index = ((channel - 149) / 2) + 38 + 14;
3569 	else
3570 		channel_index = 0;
3571 
3572 	return channel_index;
3573 }
3574 
3575 static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3576 				enum rtw89_rf_path path, const s16 *power,
3577 				u32 *tssi_cw_rpt)
3578 {
3579 	u32 tx_counter, tx_counter_tmp;
3580 	const int retry = 100;
3581 	u32 tmp;
3582 	int j, k;
3583 
3584 	for (j = 0; j < RTW8852BT_TSSI_PATH_NR; j++) {
3585 		rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x0);
3586 		rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x1);
3587 
3588 		tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3589 
3590 		tmp = rtw89_phy_read32_mask(rtwdev, _tssi_trigger[path], MASKDWORD);
3591 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3592 			    "[TSSI PA K] 0x%x = 0x%08x   path=%d\n",
3593 			    _tssi_trigger[path], tmp, path);
3594 
3595 		if (j == 0)
3596 			_tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true);
3597 		else
3598 			_tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true);
3599 
3600 		tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3601 		tx_counter_tmp -= tx_counter;
3602 
3603 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3604 			    "[TSSI PA K] First HWTXcounter=%d path=%d\n",
3605 			    tx_counter_tmp, path);
3606 
3607 		for (k = 0; k < retry; k++) {
3608 			tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path],
3609 						    B_TSSI_CWRPT_RDY);
3610 			if (tmp)
3611 				break;
3612 
3613 			udelay(30);
3614 
3615 			tx_counter_tmp =
3616 				rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3617 			tx_counter_tmp -= tx_counter;
3618 
3619 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
3620 				    "[TSSI PA K] Flow k = %d HWTXcounter=%d path=%d\n",
3621 				    k, tx_counter_tmp, path);
3622 		}
3623 
3624 		if (k >= retry) {
3625 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
3626 				    "[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n",
3627 				    k, path);
3628 
3629 			_tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
3630 			return false;
3631 		}
3632 
3633 		tssi_cw_rpt[j] =
3634 			rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path],
3635 					      B_TSSI_CWRPT);
3636 
3637 		_tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
3638 
3639 		tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3640 		tx_counter_tmp -= tx_counter;
3641 
3642 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3643 			    "[TSSI PA K] Final HWTXcounter=%d path=%d\n",
3644 			    tx_counter_tmp, path);
3645 	}
3646 
3647 	return true;
3648 }
3649 
3650 static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3651 			   enum rtw89_rf_path path)
3652 {
3653 	static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4,
3654 				      0x78e4, 0x49c0, 0x0d18, 0x0d80};
3655 	static const s16 power_2g[4] = {48, 20, 4, -8};
3656 	static const s16 power_5g[4] = {48, 20, 4, 4};
3657 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3658 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3659 	s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3;
3660 	u32 tssi_cw_rpt[RTW8852BT_TSSI_PATH_NR] = {};
3661 	u8 channel = chan->channel;
3662 	u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel);
3663 	struct rtw8852bx_bb_tssi_bak tssi_bak;
3664 	s32 aliment_diff, tssi_cw_default;
3665 	u32 start_time, finish_time;
3666 	u32 bb_reg_backup[8] = {};
3667 	const s16 *power;
3668 	u8 band;
3669 	bool ok;
3670 	u32 tmp;
3671 	u8 j;
3672 
3673 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3674 		    "======> %s   channel=%d   path=%d\n", __func__, channel,
3675 		    path);
3676 
3677 	start_time = ktime_get_ns();
3678 
3679 	if (chan->band_type == RTW89_BAND_2G)
3680 		power = power_2g;
3681 	else
3682 		power = power_5g;
3683 
3684 	if (channel >= 1 && channel <= 14)
3685 		band = TSSI_ALIMK_2G;
3686 	else if (channel >= 36 && channel <= 64)
3687 		band = TSSI_ALIMK_5GL;
3688 	else if (channel >= 100 && channel <= 144)
3689 		band = TSSI_ALIMK_5GM;
3690 	else if (channel >= 149 && channel <= 177)
3691 		band = TSSI_ALIMK_5GH;
3692 	else
3693 		band = TSSI_ALIMK_2G;
3694 
3695 	rtw8852bx_bb_backup_tssi(rtwdev, phy, &tssi_bak);
3696 	_tssi_backup_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup,
3697 				  ARRAY_SIZE(bb_reg_backup));
3698 
3699 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x8);
3700 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x8);
3701 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
3702 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
3703 
3704 	ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt);
3705 	if (!ok)
3706 		goto out;
3707 
3708 	for (j = 0; j < RTW8852BT_TSSI_PATH_NR; j++) {
3709 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3710 			    "[TSSI PA K] power[%d]=%d  tssi_cw_rpt[%d]=%d\n", j,
3711 			    power[j], j, tssi_cw_rpt[j]);
3712 	}
3713 
3714 	tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][1],
3715 				    _tssi_cw_default_mask[1]);
3716 	tssi_cw_default = sign_extend32(tmp, 8);
3717 	tssi_alim_offset_1 = tssi_cw_rpt[0] - ((power[0] - power[1]) * 2) -
3718 			     tssi_cw_rpt[1] + tssi_cw_default;
3719 	aliment_diff = tssi_alim_offset_1 - tssi_cw_default;
3720 
3721 	tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][2],
3722 				    _tssi_cw_default_mask[2]);
3723 	tssi_cw_default = sign_extend32(tmp, 8);
3724 	tssi_alim_offset_2 = tssi_cw_default + aliment_diff;
3725 
3726 	tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][3],
3727 				    _tssi_cw_default_mask[3]);
3728 	tssi_cw_default = sign_extend32(tmp, 8);
3729 	tssi_alim_offset_3 = tssi_cw_default + aliment_diff;
3730 
3731 	if (path == RF_PATH_A) {
3732 		tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
3733 		      FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
3734 		      FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
3735 
3736 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM1, tmp);
3737 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2, B_P0_TSSI_ALIM2, tmp);
3738 
3739 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3740 			    "[TSSI PA K] tssi_alim_offset = 0x%x   0x%x   0x%x   0x%x\n",
3741 			    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3, B_P0_TSSI_ALIM31),
3742 			    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM11),
3743 			    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM12),
3744 			    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM13));
3745 	} else {
3746 		tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
3747 		      FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
3748 		      FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
3749 
3750 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM1, tmp);
3751 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM2, B_P1_TSSI_ALIM2, tmp);
3752 
3753 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3754 			    "[TSSI PA K] tssi_alim_offset = 0x%x   0x%x   0x%x   0x%x\n",
3755 			    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM3, B_P1_TSSI_ALIM31),
3756 			    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM11),
3757 			    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM12),
3758 			    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM13));
3759 	}
3760 
3761 	tssi_info->alignment_done[path][band] = true;
3762 	tssi_info->alignment_value[path][band][0] =
3763 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
3764 	tssi_info->alignment_value[path][band][1] =
3765 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
3766 	tssi_info->alignment_value[path][band][2] =
3767 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
3768 	tssi_info->alignment_value[path][band][3] =
3769 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
3770 
3771 	tssi_info->check_backup_aligmk[path][ch_idx] = true;
3772 	tssi_info->alignment_backup_by_ch[path][ch_idx][0] =
3773 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
3774 	tssi_info->alignment_backup_by_ch[path][ch_idx][1] =
3775 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
3776 	tssi_info->alignment_backup_by_ch[path][ch_idx][2] =
3777 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
3778 	tssi_info->alignment_backup_by_ch[path][ch_idx][3] =
3779 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
3780 
3781 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3782 		    "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][0], 0x%x = 0x%08x\n",
3783 		    path, band, R_P0_TSSI_ALIM1 + (path << 13),
3784 		    tssi_info->alignment_value[path][band][0]);
3785 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3786 		    "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][1], 0x%x = 0x%08x\n",
3787 		    path, band, R_P0_TSSI_ALIM3 + (path << 13),
3788 		    tssi_info->alignment_value[path][band][1]);
3789 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3790 		    "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][2], 0x%x = 0x%08x\n",
3791 		    path, band, R_P0_TSSI_ALIM2 + (path << 13),
3792 		    tssi_info->alignment_value[path][band][2]);
3793 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3794 		    "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][3], 0x%x = 0x%08x\n",
3795 		    path, band, R_P0_TSSI_ALIM4 + (path << 13),
3796 		    tssi_info->alignment_value[path][band][3]);
3797 
3798 out:
3799 	_tssi_reload_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup,
3800 				  ARRAY_SIZE(bb_reg_backup));
3801 	rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak);
3802 	rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0);
3803 
3804 	finish_time = ktime_get_ns();
3805 	tssi_info->tssi_alimk_time += finish_time - start_time;
3806 
3807 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3808 		    "[TSSI PA K] %s processing time = %d ms\n", __func__,
3809 		    tssi_info->tssi_alimk_time);
3810 }
3811 
3812 void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev)
3813 {
3814 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
3815 
3816 	u8 path;
3817 
3818 	for (path = 0; path < 2; path++) {
3819 		dpk->cur_idx[path] = 0;
3820 		dpk->max_dpk_txagc[path] = 0x3F;
3821 	}
3822 
3823 	dpk->is_dpk_enable = true;
3824 	dpk->is_dpk_reload_en = false;
3825 	_set_dpd_backoff(rtwdev, RTW89_PHY_0);
3826 }
3827 
3828 void rtw8852bt_rck(struct rtw89_dev *rtwdev)
3829 {
3830 	u8 path;
3831 
3832 	for (path = 0; path < RF_PATH_NUM_8852BT; path++)
3833 		_rck(rtwdev, path);
3834 }
3835 
3836 void rtw8852bt_dack(struct rtw89_dev *rtwdev)
3837 {
3838 	u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
3839 
3840 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
3841 	_dac_cal(rtwdev, false);
3842 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
3843 }
3844 
3845 void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
3846 {
3847 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3848 	u32 tx_en;
3849 
3850 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
3851 	rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3852 	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3853 
3854 	_iqk_init(rtwdev);
3855 	_iqk(rtwdev, phy_idx, false);
3856 
3857 	rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3858 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
3859 }
3860 
3861 void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
3862 {
3863 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3864 	u32 tx_en;
3865 
3866 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
3867 	rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3868 	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3869 
3870 	_rx_dck(rtwdev, phy_idx);
3871 
3872 	rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3873 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
3874 }
3875 
3876 void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
3877 {
3878 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3879 		    "[DPK] ****** DPK Start (Ver: 0x%x) ******\n", RTW8852BT_DPK_VER);
3880 
3881 	if (_dpk_bypass_check(rtwdev, phy_idx))
3882 		_dpk_force_bypass(rtwdev, phy_idx);
3883 	else
3884 		_dpk_cal_select(rtwdev, phy_idx, RF_AB);
3885 }
3886 
3887 void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev)
3888 {
3889 	_dpk_track(rtwdev);
3890 }
3891 
3892 void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en)
3893 {
3894 	static const u32 reg[2] = {R_DPD_CH0A, R_DPD_CH0B};
3895 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB);
3896 	u32 reg_backup[2] = {};
3897 	u32 tx_en;
3898 	u8 i;
3899 
3900 	_tssi_backup_bb_registers(rtwdev, phy, reg, reg_backup, 2);
3901 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
3902 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
3903 
3904 	_tssi_dpk_off(rtwdev, phy);
3905 	_tssi_disable(rtwdev, phy);
3906 
3907 	for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) {
3908 		_tssi_rf_setting(rtwdev, phy, i);
3909 		_tssi_set_sys(rtwdev, phy, i);
3910 		_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
3911 		_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
3912 		_tssi_set_dck(rtwdev, phy, i);
3913 		_tssi_set_tmeter_tbl(rtwdev, phy, i);
3914 		_tssi_set_dac_gain_tbl(rtwdev, phy, i);
3915 		_tssi_slope_cal_org(rtwdev, phy, i);
3916 		_tssi_alignment_default(rtwdev, phy, i, true);
3917 		_tssi_set_tssi_slope(rtwdev, phy, i);
3918 
3919 		rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
3920 		_tmac_tx_pause(rtwdev, phy, true);
3921 		if (hwtx_en)
3922 			_tssi_alimentk(rtwdev, phy, i);
3923 		_tmac_tx_pause(rtwdev, phy, false);
3924 		rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
3925 	}
3926 
3927 	_tssi_enable(rtwdev, phy);
3928 	_tssi_set_efuse_to_de(rtwdev, phy);
3929 
3930 	_tssi_reload_bb_registers(rtwdev, phy, reg, reg_backup, 2);
3931 
3932 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
3933 }
3934 
3935 void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3936 {
3937 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3938 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3939 	u8 channel = chan->channel;
3940 	u8 band;
3941 	u32 i;
3942 
3943 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3944 		    "======>%s   phy=%d  channel=%d\n", __func__, phy, channel);
3945 
3946 	if (channel >= 1 && channel <= 14)
3947 		band = TSSI_ALIMK_2G;
3948 	else if (channel >= 36 && channel <= 64)
3949 		band = TSSI_ALIMK_5GL;
3950 	else if (channel >= 100 && channel <= 144)
3951 		band = TSSI_ALIMK_5GM;
3952 	else if (channel >= 149 && channel <= 177)
3953 		band = TSSI_ALIMK_5GH;
3954 	else
3955 		band = TSSI_ALIMK_2G;
3956 
3957 	_tssi_disable(rtwdev, phy);
3958 
3959 	for (i = RF_PATH_A; i < RTW8852BT_TSSI_PATH_NR; i++) {
3960 		_tssi_rf_setting(rtwdev, phy, i);
3961 		_tssi_set_sys(rtwdev, phy, i);
3962 		_tssi_set_tmeter_tbl(rtwdev, phy, i);
3963 
3964 		if (tssi_info->alignment_done[i][band])
3965 			_tssi_alimentk_done(rtwdev, phy, i);
3966 		else
3967 			_tssi_alignment_default(rtwdev, phy, i, true);
3968 	}
3969 
3970 	_tssi_enable(rtwdev, phy);
3971 	_tssi_set_efuse_to_de(rtwdev, phy);
3972 }
3973 
3974 static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev,
3975 					 enum rtw89_phy_idx phy, bool enable)
3976 {
3977 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3978 	u8 channel = chan->channel;
3979 
3980 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s   ch=%d\n",
3981 		    __func__, channel);
3982 
3983 	if (enable)
3984 		return;
3985 
3986 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3987 		    "======>%s 1 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
3988 		    __func__,
3989 		    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
3990 		    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
3991 
3992 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 0xc0);
3993 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT,  0xc0);
3994 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
3995 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
3996 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
3997 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
3998 
3999 	_tssi_alimentk_done(rtwdev, phy, RF_PATH_A);
4000 	_tssi_alimentk_done(rtwdev, phy, RF_PATH_B);
4001 
4002 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
4003 		    "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
4004 		    __func__,
4005 		    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
4006 		    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
4007 
4008 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
4009 		    "======> %s   SCAN_END\n", __func__);
4010 }
4011 
4012 void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
4013 				enum rtw89_phy_idx phy_idx)
4014 {
4015 	if (scan_start)
4016 		rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, true);
4017 	else
4018 		rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, false);
4019 }
4020