xref: /linux/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2022  Realtek Corporation
3  */
4 
5 #include "chan.h"
6 #include "coex.h"
7 #include "debug.h"
8 #include "mac.h"
9 #include "phy.h"
10 #include "reg.h"
11 #include "rtw8852b.h"
12 #include "rtw8852b_common.h"
13 #include "rtw8852b_rfk.h"
14 #include "rtw8852b_rfk_table.h"
15 #include "rtw8852b_table.h"
16 
17 #define RTW8852B_RXDCK_VER 0x1
18 #define RTW8852B_IQK_VER 0x2a
19 #define RTW8852B_IQK_SS 2
20 #define RTW8852B_RXK_GROUP_NR 4
21 #define RTW8852B_TSSI_PATH_NR 2
22 #define RTW8852B_RF_REL_VERSION 34
23 #define RTW8852B_DPK_VER 0x0d
24 #define RTW8852B_DPK_RF_PATH 2
25 #define RTW8852B_DPK_KIP_REG_NUM 3
26 
27 #define _TSSI_DE_MASK GENMASK(21, 12)
28 #define ADDC_T_AVG 100
29 #define DPK_TXAGC_LOWER 0x2e
30 #define DPK_TXAGC_UPPER 0x3f
31 #define DPK_TXAGC_INVAL 0xff
32 #define RFREG_MASKRXBB 0x003e0
33 #define RFREG_MASKMODE 0xf0000
34 
35 enum rtw8852b_dpk_id {
36 	LBK_RXIQK	= 0x06,
37 	SYNC		= 0x10,
38 	MDPK_IDL	= 0x11,
39 	MDPK_MPA	= 0x12,
40 	GAIN_LOSS	= 0x13,
41 	GAIN_CAL	= 0x14,
42 	DPK_RXAGC	= 0x15,
43 	KIP_PRESET	= 0x16,
44 	KIP_RESTORE	= 0x17,
45 	DPK_TXAGC	= 0x19,
46 	D_KIP_PRESET	= 0x28,
47 	D_TXAGC		= 0x29,
48 	D_RXAGC		= 0x2a,
49 	D_SYNC		= 0x2b,
50 	D_GAIN_LOSS	= 0x2c,
51 	D_MDPK_IDL	= 0x2d,
52 	D_GAIN_NORM	= 0x2f,
53 	D_KIP_THERMAL	= 0x30,
54 	D_KIP_RESTORE	= 0x31
55 };
56 
57 enum dpk_agc_step {
58 	DPK_AGC_STEP_SYNC_DGAIN,
59 	DPK_AGC_STEP_GAIN_ADJ,
60 	DPK_AGC_STEP_GAIN_LOSS_IDX,
61 	DPK_AGC_STEP_GL_GT_CRITERION,
62 	DPK_AGC_STEP_GL_LT_CRITERION,
63 	DPK_AGC_STEP_SET_TX_GAIN,
64 };
65 
66 enum rtw8852b_iqk_type {
67 	ID_TXAGC = 0x0,
68 	ID_FLOK_COARSE = 0x1,
69 	ID_FLOK_FINE = 0x2,
70 	ID_TXK = 0x3,
71 	ID_RXAGC = 0x4,
72 	ID_RXK = 0x5,
73 	ID_NBTXK = 0x6,
74 	ID_NBRXK = 0x7,
75 	ID_FLOK_VBUFFER = 0x8,
76 	ID_A_FLOK_COARSE = 0x9,
77 	ID_G_FLOK_COARSE = 0xa,
78 	ID_A_FLOK_FINE = 0xb,
79 	ID_G_FLOK_FINE = 0xc,
80 	ID_IQK_RESTORE = 0x10,
81 };
82 
83 static const u32 _tssi_trigger[RTW8852B_TSSI_PATH_NR] = {0x5820, 0x7820};
84 static const u32 _tssi_cw_rpt_addr[RTW8852B_TSSI_PATH_NR] = {0x1c18, 0x3c18};
85 static const u32 _tssi_cw_default_addr[RTW8852B_TSSI_PATH_NR][4] = {
86 	{0x5634, 0x5630, 0x5630, 0x5630},
87 	{0x7634, 0x7630, 0x7630, 0x7630} };
88 static const u32 _tssi_cw_default_mask[4] = {
89 	0x000003ff, 0x3ff00000, 0x000ffc00, 0x000003ff};
90 static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852B] = {0x5858, 0x7858};
91 static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852B] = {0x5860, 0x7860};
92 static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852B] = {0x5838, 0x7838};
93 static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852B] = {0x5840, 0x7840};
94 static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852B] = {0x5848, 0x7848};
95 static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852B] = {0x5850, 0x7850};
96 static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852B] = {0x5828, 0x7828};
97 static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852B] = {0x5830, 0x7830};
98 static const u32 _a_idxrxgain[RTW8852B_RXK_GROUP_NR] = {0x190, 0x198, 0x350, 0x352};
99 static const u32 _a_idxattc2[RTW8852B_RXK_GROUP_NR] = {0x0f, 0x0f, 0x3f, 0x7f};
100 static const u32 _a_idxattc1[RTW8852B_RXK_GROUP_NR] = {0x3, 0x1, 0x0, 0x0};
101 static const u32 _g_idxrxgain[RTW8852B_RXK_GROUP_NR] = {0x212, 0x21c, 0x350, 0x360};
102 static const u32 _g_idxattc2[RTW8852B_RXK_GROUP_NR] = {0x00, 0x00, 0x28, 0x5f};
103 static const u32 _g_idxattc1[RTW8852B_RXK_GROUP_NR] = {0x3, 0x3, 0x2, 0x1};
104 static const u32 _a_power_range[RTW8852B_RXK_GROUP_NR] = {0x0, 0x0, 0x0, 0x0};
105 static const u32 _a_track_range[RTW8852B_RXK_GROUP_NR] = {0x3, 0x3, 0x6, 0x6};
106 static const u32 _a_gain_bb[RTW8852B_RXK_GROUP_NR] = {0x08, 0x0e, 0x06, 0x0e};
107 static const u32 _a_itqt[RTW8852B_RXK_GROUP_NR] = {0x12, 0x12, 0x12, 0x1b};
108 static const u32 _g_power_range[RTW8852B_RXK_GROUP_NR] = {0x0, 0x0, 0x0, 0x0};
109 static const u32 _g_track_range[RTW8852B_RXK_GROUP_NR] = {0x4, 0x4, 0x6, 0x6};
110 static const u32 _g_gain_bb[RTW8852B_RXK_GROUP_NR] = {0x08, 0x0e, 0x06, 0x0e};
111 static const u32 _g_itqt[RTW8852B_RXK_GROUP_NR] = {0x09, 0x12, 0x1b, 0x24};
112 
113 static const u32 rtw8852b_backup_bb_regs[] = {0x2344, 0x5800, 0x7800};
114 static const u32 rtw8852b_backup_rf_regs[] = {
115 	0xde, 0xdf, 0x8b, 0x90, 0x97, 0x85, 0x1e, 0x0, 0x2, 0x5, 0x10005
116 };
117 
118 #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852b_backup_bb_regs)
119 #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852b_backup_rf_regs)
120 
121 static const struct rtw89_reg3_def rtw8852b_set_nondbcc_path01[] = {
122 	{0x20fc, 0xffff0000, 0x0303},
123 	{0x5864, 0x18000000, 0x3},
124 	{0x7864, 0x18000000, 0x3},
125 	{0x12b8, 0x40000000, 0x1},
126 	{0x32b8, 0x40000000, 0x1},
127 	{0x030c, 0xff000000, 0x13},
128 	{0x032c, 0xffff0000, 0x0041},
129 	{0x12b8, 0x10000000, 0x1},
130 	{0x58c8, 0x01000000, 0x1},
131 	{0x78c8, 0x01000000, 0x1},
132 	{0x5864, 0xc0000000, 0x3},
133 	{0x7864, 0xc0000000, 0x3},
134 	{0x2008, 0x01ffffff, 0x1ffffff},
135 	{0x0c1c, 0x00000004, 0x1},
136 	{0x0700, 0x08000000, 0x1},
137 	{0x0c70, 0x000003ff, 0x3ff},
138 	{0x0c60, 0x00000003, 0x3},
139 	{0x0c6c, 0x00000001, 0x1},
140 	{0x58ac, 0x08000000, 0x1},
141 	{0x78ac, 0x08000000, 0x1},
142 	{0x0c3c, 0x00000200, 0x1},
143 	{0x2344, 0x80000000, 0x1},
144 	{0x4490, 0x80000000, 0x1},
145 	{0x12a0, 0x00007000, 0x7},
146 	{0x12a0, 0x00008000, 0x1},
147 	{0x12a0, 0x00070000, 0x3},
148 	{0x12a0, 0x00080000, 0x1},
149 	{0x32a0, 0x00070000, 0x3},
150 	{0x32a0, 0x00080000, 0x1},
151 	{0x0700, 0x01000000, 0x1},
152 	{0x0700, 0x06000000, 0x2},
153 	{0x20fc, 0xffff0000, 0x3333},
154 };
155 
156 static const struct rtw89_reg3_def rtw8852b_restore_nondbcc_path01[] = {
157 	{0x20fc, 0xffff0000, 0x0303},
158 	{0x12b8, 0x40000000, 0x0},
159 	{0x32b8, 0x40000000, 0x0},
160 	{0x5864, 0xc0000000, 0x0},
161 	{0x7864, 0xc0000000, 0x0},
162 	{0x2008, 0x01ffffff, 0x0000000},
163 	{0x0c1c, 0x00000004, 0x0},
164 	{0x0700, 0x08000000, 0x0},
165 	{0x0c70, 0x0000001f, 0x03},
166 	{0x0c70, 0x000003e0, 0x03},
167 	{0x12a0, 0x000ff000, 0x00},
168 	{0x32a0, 0x000ff000, 0x00},
169 	{0x0700, 0x07000000, 0x0},
170 	{0x20fc, 0xffff0000, 0x0000},
171 	{0x58c8, 0x01000000, 0x0},
172 	{0x78c8, 0x01000000, 0x0},
173 	{0x0c3c, 0x00000200, 0x0},
174 	{0x2344, 0x80000000, 0x0},
175 };
176 
_rfk_backup_bb_reg(struct rtw89_dev * rtwdev,u32 backup_bb_reg_val[])177 static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
178 {
179 	u32 i;
180 
181 	for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
182 		backup_bb_reg_val[i] =
183 			rtw89_phy_read32_mask(rtwdev, rtw8852b_backup_bb_regs[i],
184 					      MASKDWORD);
185 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
186 			    "[RFK]backup bb reg : %x, value =%x\n",
187 			    rtw8852b_backup_bb_regs[i], backup_bb_reg_val[i]);
188 	}
189 }
190 
_rfk_backup_rf_reg(struct rtw89_dev * rtwdev,u32 backup_rf_reg_val[],u8 rf_path)191 static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
192 			       u8 rf_path)
193 {
194 	u32 i;
195 
196 	for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
197 		backup_rf_reg_val[i] =
198 			rtw89_read_rf(rtwdev, rf_path,
199 				      rtw8852b_backup_rf_regs[i], RFREG_MASK);
200 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
201 			    "[RFK]backup rf S%d reg : %x, value =%x\n", rf_path,
202 			    rtw8852b_backup_rf_regs[i], backup_rf_reg_val[i]);
203 	}
204 }
205 
_rfk_restore_bb_reg(struct rtw89_dev * rtwdev,const u32 backup_bb_reg_val[])206 static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev,
207 				const u32 backup_bb_reg_val[])
208 {
209 	u32 i;
210 
211 	for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
212 		rtw89_phy_write32_mask(rtwdev, rtw8852b_backup_bb_regs[i],
213 				       MASKDWORD, backup_bb_reg_val[i]);
214 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
215 			    "[RFK]restore bb reg : %x, value =%x\n",
216 			    rtw8852b_backup_bb_regs[i], backup_bb_reg_val[i]);
217 	}
218 }
219 
_rfk_restore_rf_reg(struct rtw89_dev * rtwdev,const u32 backup_rf_reg_val[],u8 rf_path)220 static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev,
221 				const u32 backup_rf_reg_val[], u8 rf_path)
222 {
223 	u32 i;
224 
225 	for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
226 		rtw89_write_rf(rtwdev, rf_path, rtw8852b_backup_rf_regs[i],
227 			       RFREG_MASK, backup_rf_reg_val[i]);
228 
229 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
230 			    "[RFK]restore rf S%d reg: %x, value =%x\n", rf_path,
231 			    rtw8852b_backup_rf_regs[i], backup_rf_reg_val[i]);
232 	}
233 }
234 
_rfk_rf_direct_cntrl(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_bybb)235 static void _rfk_rf_direct_cntrl(struct rtw89_dev *rtwdev,
236 				 enum rtw89_rf_path path, bool is_bybb)
237 {
238 	if (is_bybb)
239 		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
240 	else
241 		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
242 }
243 
_rfk_drf_direct_cntrl(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_bybb)244 static void _rfk_drf_direct_cntrl(struct rtw89_dev *rtwdev,
245 				  enum rtw89_rf_path path, bool is_bybb)
246 {
247 	if (is_bybb)
248 		rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
249 	else
250 		rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
251 }
252 
_iqk_check_cal(struct rtw89_dev * rtwdev,u8 path)253 static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path)
254 {
255 	bool fail = true;
256 	u32 val;
257 	int ret;
258 
259 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
260 				       1, 8200, false, rtwdev, 0xbff8, MASKBYTE0);
261 	if (ret)
262 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL1 IQK timeout!!!\n");
263 
264 	udelay(200);
265 
266 	if (!ret)
267 		fail = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
268 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
269 
270 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret);
271 	val = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD);
272 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8008 = 0x%x\n", path, val);
273 
274 	return fail;
275 }
276 
_kpath(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)277 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
278 {
279 	u8 val;
280 
281 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x,PHY%d\n",
282 		    rtwdev->dbcc_en, phy_idx);
283 
284 	if (!rtwdev->dbcc_en) {
285 		val = RF_AB;
286 	} else {
287 		if (phy_idx == RTW89_PHY_0)
288 			val = RF_A;
289 		else
290 			val = RF_B;
291 	}
292 	return val;
293 }
294 
_set_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)295 static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
296 			enum rtw89_rf_path path)
297 {
298 	rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0);
299 	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
300 	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
301 	mdelay(1);
302 }
303 
_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)304 static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
305 {
306 	u8 path, dck_tune;
307 	u32 rf_reg5;
308 
309 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
310 		    "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, CV : 0x%x) ******\n",
311 		    RTW8852B_RXDCK_VER, rtwdev->hal.cv);
312 
313 	for (path = 0; path < RF_PATH_NUM_8852B; path++) {
314 		rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
315 		dck_tune = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE);
316 
317 		if (rtwdev->is_tssi_mode[path])
318 			rtw89_phy_write32_mask(rtwdev,
319 					       R_P0_TSSI_TRK + (path << 13),
320 					       B_P0_TSSI_TRK_EN, 0x1);
321 
322 		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
323 		rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
324 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
325 		_set_rx_dck(rtwdev, phy, path);
326 		rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
327 		rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
328 
329 		if (rtwdev->is_tssi_mode[path])
330 			rtw89_phy_write32_mask(rtwdev,
331 					       R_P0_TSSI_TRK + (path << 13),
332 					       B_P0_TSSI_TRK_EN, 0x0);
333 	}
334 }
335 
_rck(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)336 static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
337 {
338 	u32 rf_reg5;
339 	u32 rck_val;
340 	u32 val;
341 	int ret;
342 
343 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
344 
345 	rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
346 
347 	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
348 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
349 
350 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%05x\n",
351 		    rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
352 
353 	/* RCK trigger */
354 	rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
355 
356 	ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30,
357 				       false, rtwdev, path, RR_RCKS, BIT(3));
358 
359 	rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
360 
361 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] rck_val = 0x%x, ret = %d\n",
362 		    rck_val, ret);
363 
364 	rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
365 	rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
366 
367 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF 0x1b = 0x%x\n",
368 		    rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK));
369 }
370 
_afe_init(struct rtw89_dev * rtwdev)371 static void _afe_init(struct rtw89_dev *rtwdev)
372 {
373 	rtw89_write32(rtwdev, R_AX_PHYREG_SET, 0xf);
374 
375 	rtw89_rfk_parser(rtwdev, &rtw8852b_afe_init_defs_tbl);
376 }
377 
_drck(struct rtw89_dev * rtwdev)378 static void _drck(struct rtw89_dev *rtwdev)
379 {
380 	u32 rck_d;
381 	u32 val;
382 	int ret;
383 
384 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]Ddie RCK start!!!\n");
385 	rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_KICK, 0x1);
386 
387 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
388 				       false, rtwdev, R_DRCK_RS, B_DRCK_RS_DONE);
389 	if (ret)
390 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n");
391 
392 	rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_KICK, 0x0);
393 	rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x1);
394 	udelay(1);
395 	rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x0);
396 	rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RS, B_DRCK_RS_LPS);
397 	rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_SEL, 0x0);
398 	rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_CV, rck_d);
399 
400 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0cc = 0x%x\n",
401 		    rtw89_phy_read32_mask(rtwdev, R_DRCK_V1, MASKDWORD));
402 }
403 
_addck_backup(struct rtw89_dev * rtwdev)404 static void _addck_backup(struct rtw89_dev *rtwdev)
405 {
406 	struct rtw89_dack_info *dack = &rtwdev->dack;
407 
408 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0);
409 	dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0);
410 	dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1);
411 
412 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0);
413 	dack->addck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A0);
414 	dack->addck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A1);
415 }
416 
_addck_reload(struct rtw89_dev * rtwdev)417 static void _addck_reload(struct rtw89_dev *rtwdev)
418 {
419 	struct rtw89_dack_info *dack = &rtwdev->dack;
420 
421 	/* S0 */
422 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0D, B_ADDCK0D_VAL, dack->addck_d[0][0]);
423 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_VAL, dack->addck_d[0][1] >> 6);
424 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0D, B_ADDCK0D_VAL2, dack->addck_d[0][1] & 0x3f);
425 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x3);
426 
427 	/* S1 */
428 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1D, B_ADDCK1D_VAL, dack->addck_d[1][0]);
429 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK0_VAL, dack->addck_d[1][1] >> 6);
430 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1D, B_ADDCK1D_VAL2, dack->addck_d[1][1] & 0x3f);
431 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_MAN, 0x3);
432 }
433 
_dack_backup_s0(struct rtw89_dev * rtwdev)434 static void _dack_backup_s0(struct rtw89_dev *rtwdev)
435 {
436 	struct rtw89_dack_info *dack = &rtwdev->dack;
437 	u8 i;
438 
439 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
440 
441 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
442 		rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i);
443 		dack->msbk_d[0][0][i] =
444 			rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0);
445 		rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i);
446 		dack->msbk_d[0][1][i] =
447 			rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1);
448 	}
449 
450 	dack->biask_d[0][0] =
451 		rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00);
452 	dack->biask_d[0][1] =
453 		rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01);
454 
455 	dack->dadck_d[0][0] =
456 		rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00);
457 	dack->dadck_d[0][1] =
458 		rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01);
459 }
460 
_dack_backup_s1(struct rtw89_dev * rtwdev)461 static void _dack_backup_s1(struct rtw89_dev *rtwdev)
462 {
463 	struct rtw89_dack_info *dack = &rtwdev->dack;
464 	u8 i;
465 
466 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
467 
468 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
469 		rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i);
470 		dack->msbk_d[1][0][i] =
471 			rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK10S);
472 		rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i);
473 		dack->msbk_d[1][1][i] =
474 			rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK11S);
475 	}
476 
477 	dack->biask_d[1][0] =
478 		rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10, B_DACK_BIAS10);
479 	dack->biask_d[1][1] =
480 		rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11, B_DACK_BIAS11);
481 
482 	dack->dadck_d[1][0] =
483 		rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10, B_DACK_DADCK10);
484 	dack->dadck_d[1][1] =
485 		rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11, B_DACK_DADCK11);
486 }
487 
_check_addc(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)488 static void _check_addc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
489 {
490 	s32 dc_re = 0, dc_im = 0;
491 	u32 tmp;
492 	u32 i;
493 
494 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
495 				 &rtw8852b_check_addc_defs_a_tbl,
496 				 &rtw8852b_check_addc_defs_b_tbl);
497 
498 	for (i = 0; i < ADDC_T_AVG; i++) {
499 		tmp = rtw89_phy_read32_mask(rtwdev, R_DBG32_D, MASKDWORD);
500 		dc_re += sign_extend32(FIELD_GET(0xfff000, tmp), 11);
501 		dc_im += sign_extend32(FIELD_GET(0xfff, tmp), 11);
502 	}
503 
504 	dc_re /= ADDC_T_AVG;
505 	dc_im /= ADDC_T_AVG;
506 
507 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
508 		    "[DACK]S%d,dc_re = 0x%x,dc_im =0x%x\n", path, dc_re, dc_im);
509 }
510 
_addck(struct rtw89_dev * rtwdev)511 static void _addck(struct rtw89_dev *rtwdev)
512 {
513 	struct rtw89_dack_info *dack = &rtwdev->dack;
514 	u32 val;
515 	int ret;
516 
517 	/* S0 */
518 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x0);
519 	rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x30, 0x0);
520 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
521 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
522 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
523 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
524 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
525 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
526 	rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(1), 0x1);
527 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
528 
529 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S0 ADDCK\n");
530 	_check_addc(rtwdev, RF_PATH_A);
531 
532 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x1);
533 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x0);
534 	udelay(1);
535 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1);
536 
537 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
538 				       false, rtwdev, R_ADDCKR0, BIT(0));
539 	if (ret) {
540 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
541 		dack->addck_timeout[0] = true;
542 	}
543 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
544 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 ADDCK\n");
545 	_check_addc(rtwdev, RF_PATH_A);
546 
547 	rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(1), 0x0);
548 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
549 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
550 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
551 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
552 
553 	/* S1 */
554 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
555 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
556 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
557 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
558 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
559 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
560 	rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(1), 0x1);
561 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
562 
563 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S1 ADDCK\n");
564 	_check_addc(rtwdev, RF_PATH_B);
565 
566 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x1);
567 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x0);
568 	udelay(1);
569 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1);
570 
571 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
572 				       false, rtwdev, R_ADDCKR1, BIT(0));
573 	if (ret) {
574 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
575 		dack->addck_timeout[1] = true;
576 	}
577 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
578 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 ADDCK\n");
579 	_check_addc(rtwdev, RF_PATH_B);
580 
581 	rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(1), 0x0);
582 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
583 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
584 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
585 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
586 }
587 
_check_dadc(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)588 static void _check_dadc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
589 {
590 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
591 				 &rtw8852b_check_dadc_en_defs_a_tbl,
592 				 &rtw8852b_check_dadc_en_defs_b_tbl);
593 
594 	_check_addc(rtwdev, path);
595 
596 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
597 				 &rtw8852b_check_dadc_dis_defs_a_tbl,
598 				 &rtw8852b_check_dadc_dis_defs_b_tbl);
599 }
600 
_dack_s0_check_done(struct rtw89_dev * rtwdev,bool part1)601 static bool _dack_s0_check_done(struct rtw89_dev *rtwdev, bool part1)
602 {
603 	if (part1) {
604 		if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
605 		    rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0)
606 			return false;
607 	} else {
608 		if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
609 		    rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
610 			return false;
611 	}
612 
613 	return true;
614 }
615 
_dack_s0(struct rtw89_dev * rtwdev)616 static void _dack_s0(struct rtw89_dev *rtwdev)
617 {
618 	struct rtw89_dack_info *dack = &rtwdev->dack;
619 	bool done;
620 	int ret;
621 
622 	rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_1_defs_tbl);
623 
624 	ret = read_poll_timeout_atomic(_dack_s0_check_done, done, done, 1, 10000,
625 				       false, rtwdev, true);
626 	if (ret) {
627 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK timeout\n");
628 		dack->msbk_timeout[0] = true;
629 	}
630 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
631 
632 	rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_2_defs_tbl);
633 
634 	ret = read_poll_timeout_atomic(_dack_s0_check_done, done, done, 1, 10000,
635 				       false, rtwdev, false);
636 	if (ret) {
637 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DADCK timeout\n");
638 		dack->dadck_timeout[0] = true;
639 	}
640 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
641 
642 	rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_3_defs_tbl);
643 
644 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
645 
646 	_dack_backup_s0(rtwdev);
647 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
648 }
649 
_dack_s1_check_done(struct rtw89_dev * rtwdev,bool part1)650 static bool _dack_s1_check_done(struct rtw89_dev *rtwdev, bool part1)
651 {
652 	if (part1) {
653 		if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 &&
654 		    rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0)
655 			return false;
656 	} else {
657 		if (rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK_S1P2_OK) == 0 &&
658 		    rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK_S1P3_OK) == 0)
659 			return false;
660 	}
661 
662 	return true;
663 }
664 
_dack_s1(struct rtw89_dev * rtwdev)665 static void _dack_s1(struct rtw89_dev *rtwdev)
666 {
667 	struct rtw89_dack_info *dack = &rtwdev->dack;
668 	bool done;
669 	int ret;
670 
671 	rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_1_defs_tbl);
672 
673 	ret = read_poll_timeout_atomic(_dack_s1_check_done, done, done, 1, 10000,
674 				       false, rtwdev, true);
675 	if (ret) {
676 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK timeout\n");
677 		dack->msbk_timeout[1] = true;
678 	}
679 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
680 
681 	rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_2_defs_tbl);
682 
683 	ret = read_poll_timeout_atomic(_dack_s1_check_done, done, done, 1, 10000,
684 				       false, rtwdev, false);
685 	if (ret) {
686 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DADCK timeout\n");
687 		dack->dadck_timeout[1] = true;
688 	}
689 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
690 
691 	rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_3_defs_tbl);
692 
693 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n");
694 
695 	_check_dadc(rtwdev, RF_PATH_B);
696 	_dack_backup_s1(rtwdev);
697 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
698 }
699 
_dack(struct rtw89_dev * rtwdev)700 static void _dack(struct rtw89_dev *rtwdev)
701 {
702 	_dack_s0(rtwdev);
703 	_dack_s1(rtwdev);
704 }
705 
_dack_dump(struct rtw89_dev * rtwdev)706 static void _dack_dump(struct rtw89_dev *rtwdev)
707 {
708 	struct rtw89_dack_info *dack = &rtwdev->dack;
709 	u8 i;
710 	u8 t;
711 
712 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
713 		    "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
714 		    dack->addck_d[0][0], dack->addck_d[0][1]);
715 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
716 		    "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
717 		    dack->addck_d[1][0], dack->addck_d[1][1]);
718 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
719 		    "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
720 		    dack->dadck_d[0][0], dack->dadck_d[0][1]);
721 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
722 		    "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
723 		    dack->dadck_d[1][0], dack->dadck_d[1][1]);
724 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
725 		    "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
726 		    dack->biask_d[0][0], dack->biask_d[0][1]);
727 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
728 		    "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
729 		    dack->biask_d[1][0], dack->biask_d[1][1]);
730 
731 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
732 	for (i = 0; i < 0x10; i++) {
733 		t = dack->msbk_d[0][0][i];
734 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
735 	}
736 
737 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
738 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
739 		t = dack->msbk_d[0][1][i];
740 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
741 	}
742 
743 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
744 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
745 		t = dack->msbk_d[1][0][i];
746 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
747 	}
748 
749 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
750 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
751 		t = dack->msbk_d[1][1][i];
752 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
753 	}
754 }
755 
_dac_cal(struct rtw89_dev * rtwdev,bool force)756 static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
757 {
758 	struct rtw89_dack_info *dack = &rtwdev->dack;
759 	u32 rf0_0, rf1_0;
760 
761 	dack->dack_done = false;
762 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK 0x1\n");
763 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
764 
765 	rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
766 	rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK);
767 	_afe_init(rtwdev);
768 	_drck(rtwdev);
769 
770 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
771 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
772 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1);
773 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1);
774 	_addck(rtwdev);
775 	_addck_backup(rtwdev);
776 	_addck_reload(rtwdev);
777 
778 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
779 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
780 	_dack(rtwdev);
781 	_dack_dump(rtwdev);
782 	dack->dack_done = true;
783 
784 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0);
785 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0);
786 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
787 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
788 	dack->dack_cnt++;
789 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
790 }
791 
_iqk_rxk_setting(struct rtw89_dev * rtwdev,u8 path)792 static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
793 {
794 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
795 	u32 tmp;
796 
797 	switch (iqk_info->iqk_band[path]) {
798 	case RTW89_BAND_2G:
799 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
800 		rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1);
801 		tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
802 		rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
803 		break;
804 	case RTW89_BAND_5G:
805 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
806 		rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1);
807 		tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
808 		rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
809 		break;
810 	default:
811 		break;
812 	}
813 }
814 
_iqk_one_shot(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path,u8 ktype)815 static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
816 			  u8 path, u8 ktype)
817 {
818 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
819 	u32 iqk_cmd;
820 	bool fail;
821 
822 	switch (ktype) {
823 	case ID_FLOK_COARSE:
824 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
825 		iqk_cmd = 0x108 | (1 << (4 + path));
826 		break;
827 	case ID_FLOK_FINE:
828 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
829 		iqk_cmd = 0x208 | (1 << (4 + path));
830 		break;
831 	case ID_FLOK_VBUFFER:
832 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
833 		iqk_cmd = 0x308 | (1 << (4 + path));
834 		break;
835 	case ID_TXK:
836 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
837 		iqk_cmd = 0x008 | (1 << (path + 4)) |
838 			  (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8);
839 		break;
840 	case ID_RXAGC:
841 		iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
842 		break;
843 	case ID_RXK:
844 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
845 		iqk_cmd = 0x008 | (1 << (path + 4)) |
846 			  (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8);
847 		break;
848 	case ID_NBTXK:
849 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
850 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x011);
851 		iqk_cmd = 0x408 | (1 << (4 + path));
852 		break;
853 	case ID_NBRXK:
854 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
855 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
856 		iqk_cmd = 0x608 | (1 << (4 + path));
857 		break;
858 	default:
859 		return false;
860 	}
861 
862 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
863 	udelay(1);
864 	fail = _iqk_check_cal(rtwdev, path);
865 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
866 
867 	return fail;
868 }
869 
_rxk_group_sel(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)870 static bool _rxk_group_sel(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
871 			   u8 path)
872 {
873 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
874 	bool kfail = false;
875 	bool fail;
876 	u8 gp;
877 
878 	for (gp = 0; gp < RTW8852B_RXK_GROUP_NR; gp++) {
879 		switch (iqk_info->iqk_band[path]) {
880 		case RTW89_BAND_2G:
881 			rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
882 				       _g_idxrxgain[gp]);
883 			rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G,
884 				       _g_idxattc2[gp]);
885 			rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G,
886 				       _g_idxattc1[gp]);
887 			break;
888 		case RTW89_BAND_5G:
889 			rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
890 				       _a_idxrxgain[gp]);
891 			rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT,
892 				       _a_idxattc2[gp]);
893 			rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2,
894 				       _a_idxattc1[gp]);
895 			break;
896 		default:
897 			break;
898 		}
899 
900 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
901 				       B_CFIR_LUT_SEL, 0x1);
902 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
903 				       B_CFIR_LUT_SET, 0x0);
904 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
905 				       B_CFIR_LUT_GP_V1, gp);
906 		fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
907 		rtw89_phy_write32_mask(rtwdev, R_IQKINF,
908 				       BIT(16 + gp + path * 4), fail);
909 		kfail |= fail;
910 	}
911 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
912 
913 	if (kfail) {
914 		iqk_info->nb_rxcfir[path] = 0x40000002;
915 		rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
916 				       B_IQK_RES_RXCFIR, 0x0);
917 		iqk_info->is_wb_rxiqk[path] = false;
918 	} else {
919 		iqk_info->nb_rxcfir[path] = 0x40000000;
920 		rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
921 				       B_IQK_RES_RXCFIR, 0x5);
922 		iqk_info->is_wb_rxiqk[path] = true;
923 	}
924 
925 	return kfail;
926 }
927 
_iqk_nbrxk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)928 static bool _iqk_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
929 		       u8 path)
930 {
931 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
932 	const u8 gp = 0x3;
933 	bool kfail = false;
934 	bool fail;
935 
936 	switch (iqk_info->iqk_band[path]) {
937 	case RTW89_BAND_2G:
938 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
939 			       _g_idxrxgain[gp]);
940 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G,
941 			       _g_idxattc2[gp]);
942 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G,
943 			       _g_idxattc1[gp]);
944 		break;
945 	case RTW89_BAND_5G:
946 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
947 			       _a_idxrxgain[gp]);
948 		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT,
949 			       _a_idxattc2[gp]);
950 		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2,
951 			       _a_idxattc1[gp]);
952 		break;
953 	default:
954 		break;
955 	}
956 
957 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
958 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x0);
959 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP_V1, gp);
960 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
961 	udelay(1);
962 
963 	fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
964 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(16 + gp + path * 4), fail);
965 	kfail |= fail;
966 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
967 
968 	if (!kfail)
969 		iqk_info->nb_rxcfir[path] =
970 			 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD) | 0x2;
971 	else
972 		iqk_info->nb_rxcfir[path] = 0x40000002;
973 
974 	return kfail;
975 }
976 
_iqk_rxclk_setting(struct rtw89_dev * rtwdev,u8 path)977 static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
978 {
979 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
980 
981 	if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) {
982 		rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
983 		rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
984 		udelay(1);
985 		rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f);
986 		udelay(1);
987 		rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03);
988 		rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001);
989 		udelay(1);
990 		rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
991 		rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_VAL, 0x2);
992 		rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_ON, 0x1);
993 		rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_VAL, 0x2);
994 		rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_ON, 0x1);
995 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1);
996 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x1);
997 	} else {
998 		rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
999 		rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
1000 		udelay(1);
1001 		rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f);
1002 		udelay(1);
1003 		rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03);
1004 		rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001);
1005 		udelay(1);
1006 		rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
1007 		rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_VAL, 0x1);
1008 		rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_ON, 0x1);
1009 		rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_VAL, 0x1);
1010 		rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_ON, 0x1);
1011 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1);
1012 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x0);
1013 	}
1014 }
1015 
_txk_group_sel(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1016 static bool _txk_group_sel(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1017 {
1018 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1019 	bool kfail = false;
1020 	bool fail;
1021 	u8 gp;
1022 
1023 	for (gp = 0x0; gp < RTW8852B_RXK_GROUP_NR; gp++) {
1024 		switch (iqk_info->iqk_band[path]) {
1025 		case RTW89_BAND_2G:
1026 			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
1027 				       _g_power_range[gp]);
1028 			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
1029 				       _g_track_range[gp]);
1030 			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
1031 				       _g_gain_bb[gp]);
1032 			rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1033 					       MASKDWORD, _g_itqt[gp]);
1034 			break;
1035 		case RTW89_BAND_5G:
1036 			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
1037 				       _a_power_range[gp]);
1038 			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
1039 				       _a_track_range[gp]);
1040 			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
1041 				       _a_gain_bb[gp]);
1042 			rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1043 					       MASKDWORD, _a_itqt[gp]);
1044 			break;
1045 		default:
1046 			break;
1047 		}
1048 
1049 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1050 				       B_CFIR_LUT_SEL, 0x1);
1051 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1052 				       B_CFIR_LUT_SET, 0x1);
1053 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1054 				       B_CFIR_LUT_G2, 0x0);
1055 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1056 				       B_CFIR_LUT_GP, gp);
1057 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1058 		fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
1059 		rtw89_phy_write32_mask(rtwdev, R_IQKINF,
1060 				       BIT(8 + gp + path * 4), fail);
1061 		kfail |= fail;
1062 	}
1063 
1064 	if (kfail) {
1065 		iqk_info->nb_txcfir[path] = 0x40000002;
1066 		rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1067 				       B_IQK_RES_TXCFIR, 0x0);
1068 		iqk_info->is_wb_txiqk[path] = false;
1069 	} else {
1070 		iqk_info->nb_txcfir[path] = 0x40000000;
1071 		rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1072 				       B_IQK_RES_TXCFIR, 0x5);
1073 		iqk_info->is_wb_txiqk[path] = true;
1074 	}
1075 
1076 	return kfail;
1077 }
1078 
_iqk_nbtxk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1079 static bool _iqk_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1080 {
1081 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1082 	bool kfail;
1083 	u8 gp = 0x2;
1084 
1085 	switch (iqk_info->iqk_band[path]) {
1086 	case RTW89_BAND_2G:
1087 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
1088 			       _g_power_range[gp]);
1089 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
1090 			       _g_track_range[gp]);
1091 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
1092 			       _g_gain_bb[gp]);
1093 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1094 				       MASKDWORD, _g_itqt[gp]);
1095 		break;
1096 	case RTW89_BAND_5G:
1097 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
1098 			       _a_power_range[gp]);
1099 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
1100 			       _a_track_range[gp]);
1101 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
1102 			       _a_gain_bb[gp]);
1103 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1104 				       MASKDWORD, _a_itqt[gp]);
1105 		break;
1106 	default:
1107 		break;
1108 	}
1109 
1110 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
1111 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x1);
1112 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G2, 0x0);
1113 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp);
1114 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1115 	kfail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
1116 
1117 	if (!kfail)
1118 		iqk_info->nb_txcfir[path] =
1119 			rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8),
1120 					      MASKDWORD) | 0x2;
1121 	else
1122 		iqk_info->nb_txcfir[path] = 0x40000002;
1123 
1124 	return kfail;
1125 }
1126 
_lok_res_table(struct rtw89_dev * rtwdev,u8 path,u8 ibias)1127 static void _lok_res_table(struct rtw89_dev *rtwdev, u8 path, u8 ibias)
1128 {
1129 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1130 
1131 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1132 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ibias = %x\n", path, ibias);
1133 
1134 	rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x2);
1135 	if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1136 		rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x0);
1137 	else
1138 		rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x1);
1139 	rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ibias);
1140 	rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0);
1141 	rtw89_write_rf(rtwdev, path, RR_TXVBUF, RR_TXVBUF_DACEN, 0x1);
1142 
1143 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x7c = %x\n", path,
1144 		    rtw89_read_rf(rtwdev, path, RR_TXVBUF, RFREG_MASK));
1145 }
1146 
_lok_finetune_check(struct rtw89_dev * rtwdev,u8 path)1147 static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
1148 {
1149 	struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
1150 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1151 	u8 ch = rfk_mcc->table_idx;
1152 	bool is_fail1, is_fail2;
1153 	u32 vbuff_i;
1154 	u32 vbuff_q;
1155 	u32 core_i;
1156 	u32 core_q;
1157 	u32 tmp;
1158 
1159 	tmp = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK);
1160 	core_i = FIELD_GET(RR_TXMO_COI, tmp);
1161 	core_q = FIELD_GET(RR_TXMO_COQ, tmp);
1162 
1163 	if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d)
1164 		is_fail1 = true;
1165 	else
1166 		is_fail1 = false;
1167 
1168 	iqk_info->lok_idac[ch][path] = tmp;
1169 
1170 	tmp = rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK);
1171 	vbuff_i = FIELD_GET(RR_LOKVB_COI, tmp);
1172 	vbuff_q = FIELD_GET(RR_LOKVB_COQ, tmp);
1173 
1174 	if (vbuff_i < 0x2 || vbuff_i > 0x3d || vbuff_q < 0x2 || vbuff_q > 0x3d)
1175 		is_fail2 = true;
1176 	else
1177 		is_fail2 = false;
1178 
1179 	iqk_info->lok_vbuf[ch][path] = tmp;
1180 
1181 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1182 		    "[IQK]S%x, lok_idac[%x][%x] = 0x%x\n", path, ch, path,
1183 		    iqk_info->lok_idac[ch][path]);
1184 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1185 		    "[IQK]S%x, lok_vbuf[%x][%x] = 0x%x\n", path, ch, path,
1186 		    iqk_info->lok_vbuf[ch][path]);
1187 
1188 	return is_fail1 | is_fail2;
1189 }
1190 
_iqk_lok(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1191 static bool _iqk_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1192 {
1193 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1194 	bool tmp;
1195 
1196 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
1197 
1198 	switch (iqk_info->iqk_band[path]) {
1199 	case RTW89_BAND_2G:
1200 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1201 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
1202 		break;
1203 	case RTW89_BAND_5G:
1204 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1205 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x4);
1206 		break;
1207 	default:
1208 		break;
1209 	}
1210 
1211 	switch (iqk_info->iqk_band[path]) {
1212 	case RTW89_BAND_2G:
1213 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1214 		break;
1215 	case RTW89_BAND_5G:
1216 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1217 		break;
1218 	default:
1219 		break;
1220 	}
1221 
1222 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x9);
1223 	tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE);
1224 	iqk_info->lok_cor_fail[0][path] = tmp;
1225 
1226 	switch (iqk_info->iqk_band[path]) {
1227 	case RTW89_BAND_2G:
1228 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1229 		break;
1230 	case RTW89_BAND_5G:
1231 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1232 		break;
1233 	default:
1234 		break;
1235 	}
1236 
1237 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x24);
1238 	tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
1239 
1240 	switch (iqk_info->iqk_band[path]) {
1241 	case RTW89_BAND_2G:
1242 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1243 		break;
1244 	case RTW89_BAND_5G:
1245 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1246 		break;
1247 	default:
1248 		break;
1249 	}
1250 
1251 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x9);
1252 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
1253 	tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE);
1254 	iqk_info->lok_fin_fail[0][path] = tmp;
1255 
1256 	switch (iqk_info->iqk_band[path]) {
1257 	case RTW89_BAND_2G:
1258 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1259 		break;
1260 	case RTW89_BAND_5G:
1261 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1262 		break;
1263 	default:
1264 		break;
1265 	}
1266 
1267 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x24);
1268 	_iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
1269 
1270 	return _lok_finetune_check(rtwdev, path);
1271 }
1272 
_iqk_txk_setting(struct rtw89_dev * rtwdev,u8 path)1273 static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
1274 {
1275 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1276 
1277 	switch (iqk_info->iqk_band[path]) {
1278 	case RTW89_BAND_2G:
1279 		rtw89_write_rf(rtwdev, path, RR_XALNA2, RR_XALNA2_SW2, 0x00);
1280 		rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
1281 		rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0);
1282 		rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
1283 		rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1284 		rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1285 		rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x00);
1286 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
1287 		udelay(1);
1288 		break;
1289 	case RTW89_BAND_5G:
1290 		rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x00);
1291 		rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x1);
1292 		rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1293 		rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1294 		rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x80);
1295 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
1296 		udelay(1);
1297 		break;
1298 	default:
1299 		break;
1300 	}
1301 }
1302 
_iqk_txclk_setting(struct rtw89_dev * rtwdev,u8 path)1303 static void _iqk_txclk_setting(struct rtw89_dev *rtwdev, u8 path)
1304 {
1305 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
1306 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
1307 	udelay(1);
1308 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
1309 	udelay(1);
1310 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
1311 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
1312 	udelay(1);
1313 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
1314 }
1315 
_iqk_info_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1316 static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1317 {
1318 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1319 	u32 tmp;
1320 	bool flag;
1321 
1322 	flag = iqk_info->lok_cor_fail[0][path];
1323 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FCOR << (path * 4), flag);
1324 	flag = iqk_info->lok_fin_fail[0][path];
1325 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FFIN << (path * 4), flag);
1326 	flag = iqk_info->iqk_tx_fail[0][path];
1327 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FTX << (path * 4), flag);
1328 	flag = iqk_info->iqk_rx_fail[0][path];
1329 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_F_RX << (path * 4), flag);
1330 
1331 	tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD);
1332 	iqk_info->bp_iqkenable[path] = tmp;
1333 	tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1334 	iqk_info->bp_txkresult[path] = tmp;
1335 	tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
1336 	iqk_info->bp_rxkresult[path] = tmp;
1337 
1338 	rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT, iqk_info->iqk_times);
1339 
1340 	tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, B_IQKINF_FAIL << (path * 4));
1341 	if (tmp)
1342 		iqk_info->iqk_fail_cnt++;
1343 	rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_FCNT << (path * 4),
1344 			       iqk_info->iqk_fail_cnt);
1345 }
1346 
_iqk_by_path(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1347 static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1348 {
1349 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1350 	bool lok_is_fail = false;
1351 	const int try = 3;
1352 	u8 ibias = 0x1;
1353 	u8 i;
1354 
1355 	_iqk_txclk_setting(rtwdev, path);
1356 
1357 	/* LOK */
1358 	for (i = 0; i < try; i++) {
1359 		_lok_res_table(rtwdev, path, ibias++);
1360 		_iqk_txk_setting(rtwdev, path);
1361 		lok_is_fail = _iqk_lok(rtwdev, phy_idx, path);
1362 		if (!lok_is_fail)
1363 			break;
1364 	}
1365 
1366 	if (lok_is_fail)
1367 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] LOK (%d) fail\n", path);
1368 
1369 	/* TXK */
1370 	if (iqk_info->is_nbiqk)
1371 		iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
1372 	else
1373 		iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
1374 
1375 	/* RX */
1376 	_iqk_rxclk_setting(rtwdev, path);
1377 	_iqk_rxk_setting(rtwdev, path);
1378 	if (iqk_info->is_nbiqk)
1379 		iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
1380 	else
1381 		iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
1382 
1383 	_iqk_info_iqk(rtwdev, phy_idx, path);
1384 }
1385 
_iqk_get_ch_info(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 path,enum rtw89_chanctx_idx chanctx_idx)1386 static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path,
1387 			     enum rtw89_chanctx_idx chanctx_idx)
1388 {
1389 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
1390 	struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
1391 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1392 	u8 idx = rfk_mcc->table_idx;
1393 	u32 reg_rf18;
1394 	u32 reg_35c;
1395 
1396 	reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1397 	reg_35c = rtw89_phy_read32_mask(rtwdev, R_CIRST, B_CIRST_SYN);
1398 
1399 	iqk_info->iqk_band[path] = chan->band_type;
1400 	iqk_info->iqk_bw[path] = chan->band_width;
1401 	iqk_info->iqk_ch[path] = chan->channel;
1402 	iqk_info->iqk_mcc_ch[idx][path] = chan->channel;
1403 	iqk_info->iqk_table_idx[path] = idx;
1404 
1405 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x, idx = %x\n",
1406 		    path, reg_rf18, idx);
1407 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x\n",
1408 		    path, reg_rf18);
1409 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]times = 0x%x, ch =%x\n",
1410 		    iqk_info->iqk_times, idx);
1411 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_mcc_ch[%x][%x] = 0x%x\n",
1412 		    idx, path, iqk_info->iqk_mcc_ch[idx][path]);
1413 
1414 	if (reg_35c == 0x01)
1415 		iqk_info->syn1to2 = 0x1;
1416 	else
1417 		iqk_info->syn1to2 = 0x0;
1418 
1419 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1420 		    "[IQK]S%x, iqk_info->syn1to2= 0x%x\n", path,
1421 		    iqk_info->syn1to2);
1422 
1423 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852B_IQK_VER);
1424 	/* 2GHz/5GHz/6GHz = 0/1/2 */
1425 	rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BAND << (path * 16),
1426 			       iqk_info->iqk_band[path]);
1427 	/* 20/40/80 = 0/1/2 */
1428 	rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BW << (path * 16),
1429 			       iqk_info->iqk_bw[path]);
1430 	rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_CH << (path * 16),
1431 			       iqk_info->iqk_ch[path]);
1432 }
1433 
_iqk_start_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1434 static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1435 {
1436 	_iqk_by_path(rtwdev, phy_idx, path);
1437 }
1438 
_iqk_restore(struct rtw89_dev * rtwdev,u8 path)1439 static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
1440 {
1441 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1442 	bool fail;
1443 
1444 	rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD,
1445 			       iqk_info->nb_txcfir[path]);
1446 	rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
1447 			       iqk_info->nb_rxcfir[path]);
1448 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
1449 			       0x00000e19 + (path << 4));
1450 	fail = _iqk_check_cal(rtwdev, path);
1451 
1452 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s result =%x\n", __func__, fail);
1453 
1454 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1455 	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
1456 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1457 	rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS, B_IQK_RES_K, 0x0);
1458 	rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K1, 0x0);
1459 	rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K2, 0x0);
1460 	rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1461 	rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1462 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0x3);
1463 	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1464 	rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
1465 }
1466 
_iqk_afebb_restore(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1467 static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
1468 			       enum rtw89_phy_idx phy_idx, u8 path)
1469 {
1470 	const struct rtw89_reg3_def *def;
1471 	int size;
1472 	u8 kpath;
1473 	int i;
1474 
1475 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__);
1476 
1477 	kpath = _kpath(rtwdev, phy_idx);
1478 
1479 	switch (kpath) {
1480 	case RF_A:
1481 	case RF_B:
1482 		return;
1483 	default:
1484 		size = ARRAY_SIZE(rtw8852b_restore_nondbcc_path01);
1485 		def = rtw8852b_restore_nondbcc_path01;
1486 		break;
1487 	}
1488 
1489 	for (i = 0; i < size; i++, def++)
1490 		rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
1491 }
1492 
_iqk_preset(struct rtw89_dev * rtwdev,u8 path)1493 static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
1494 {
1495 	struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
1496 	u8 idx = rfk_mcc->table_idx;
1497 
1498 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] idx = %x\n", idx);
1499 
1500 	rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, idx);
1501 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, idx);
1502 
1503 	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1504 	rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
1505 	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
1506 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
1507 
1508 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK](1)S%x, 0x8%x54 = 0x%x\n", path, 1 << path,
1509 		    rtw89_phy_read32_mask(rtwdev, R_CFIR_LUT + (path << 8), MASKDWORD));
1510 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK](1)S%x, 0x8%x04 = 0x%x\n", path, 1 << path,
1511 		    rtw89_phy_read32_mask(rtwdev, R_COEF_SEL + (path << 8), MASKDWORD));
1512 }
1513 
_iqk_macbb_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1514 static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
1515 			       enum rtw89_phy_idx phy_idx, u8 path)
1516 {
1517 	const struct rtw89_reg3_def *def;
1518 	int size;
1519 	u8 kpath;
1520 	int i;
1521 
1522 	kpath = _kpath(rtwdev, phy_idx);
1523 
1524 	switch (kpath) {
1525 	case RF_A:
1526 	case RF_B:
1527 		return;
1528 	default:
1529 		size = ARRAY_SIZE(rtw8852b_set_nondbcc_path01);
1530 		def = rtw8852b_set_nondbcc_path01;
1531 		break;
1532 	}
1533 
1534 	for (i = 0; i < size; i++, def++)
1535 		rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
1536 }
1537 
_iqk_init(struct rtw89_dev * rtwdev)1538 static void _iqk_init(struct rtw89_dev *rtwdev)
1539 {
1540 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1541 	u8 idx, path;
1542 
1543 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0);
1544 	if (iqk_info->is_iqk_init)
1545 		return;
1546 
1547 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1548 	iqk_info->is_iqk_init = true;
1549 	iqk_info->is_nbiqk = false;
1550 	iqk_info->iqk_fft_en = false;
1551 	iqk_info->iqk_sram_en = false;
1552 	iqk_info->iqk_cfir_en = false;
1553 	iqk_info->iqk_xym_en = false;
1554 	iqk_info->iqk_times = 0x0;
1555 
1556 	for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
1557 		iqk_info->iqk_channel[idx] = 0x0;
1558 		for (path = 0; path < RTW8852B_IQK_SS; path++) {
1559 			iqk_info->lok_cor_fail[idx][path] = false;
1560 			iqk_info->lok_fin_fail[idx][path] = false;
1561 			iqk_info->iqk_tx_fail[idx][path] = false;
1562 			iqk_info->iqk_rx_fail[idx][path] = false;
1563 			iqk_info->iqk_mcc_ch[idx][path] = 0x0;
1564 			iqk_info->iqk_table_idx[path] = 0x0;
1565 		}
1566 	}
1567 }
1568 
_wait_rx_mode(struct rtw89_dev * rtwdev,u8 kpath)1569 static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
1570 {
1571 	u32 rf_mode;
1572 	u8 path;
1573 	int ret;
1574 
1575 	for (path = 0; path < RF_PATH_MAX; path++) {
1576 		if (!(kpath & BIT(path)))
1577 			continue;
1578 
1579 		ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode,
1580 					       rf_mode != 2, 2, 5000, false,
1581 					       rtwdev, path, RR_MOD, RR_MOD_MASK);
1582 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1583 			    "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", path, ret);
1584 	}
1585 }
1586 
_tmac_tx_pause(struct rtw89_dev * rtwdev,enum rtw89_phy_idx band_idx,bool is_pause)1587 static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx,
1588 			   bool is_pause)
1589 {
1590 	if (!is_pause)
1591 		return;
1592 
1593 	_wait_rx_mode(rtwdev, _kpath(rtwdev, band_idx));
1594 }
1595 
_doiqk(struct rtw89_dev * rtwdev,bool force,enum rtw89_phy_idx phy_idx,u8 path,enum rtw89_chanctx_idx chanctx_idx)1596 static void _doiqk(struct rtw89_dev *rtwdev, bool force,
1597 		   enum rtw89_phy_idx phy_idx, u8 path,
1598 		   enum rtw89_chanctx_idx chanctx_idx)
1599 {
1600 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1601 	u32 backup_bb_val[BACKUP_BB_REGS_NR];
1602 	u32 backup_rf_val[RTW8852B_IQK_SS][BACKUP_RF_REGS_NR];
1603 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
1604 
1605 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
1606 
1607 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1608 		    "[IQK]==========IQK start!!!!!==========\n");
1609 	iqk_info->iqk_times++;
1610 	iqk_info->version = RTW8852B_IQK_VER;
1611 
1612 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
1613 	_iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
1614 
1615 	_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
1616 	_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
1617 	_iqk_macbb_setting(rtwdev, phy_idx, path);
1618 	_iqk_preset(rtwdev, path);
1619 	_iqk_start_iqk(rtwdev, phy_idx, path);
1620 	_iqk_restore(rtwdev, path);
1621 	_iqk_afebb_restore(rtwdev, phy_idx, path);
1622 	_rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
1623 	_rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
1624 
1625 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
1626 }
1627 
_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,bool force,enum rtw89_chanctx_idx chanctx_idx)1628 static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
1629 		 enum rtw89_chanctx_idx chanctx_idx)
1630 {
1631 	u8 kpath = _kpath(rtwdev, phy_idx);
1632 
1633 	switch (kpath) {
1634 	case RF_A:
1635 		_doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
1636 		break;
1637 	case RF_B:
1638 		_doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
1639 		break;
1640 	case RF_AB:
1641 		_doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
1642 		_doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
1643 		break;
1644 	default:
1645 		break;
1646 	}
1647 }
1648 
_dpk_bkup_kip(struct rtw89_dev * rtwdev,const u32 reg[],u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM],u8 path)1649 static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 reg[],
1650 			  u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM], u8 path)
1651 {
1652 	u8 i;
1653 
1654 	for (i = 0; i < RTW8852B_DPK_KIP_REG_NUM; i++) {
1655 		reg_bkup[path][i] =
1656 			rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD);
1657 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
1658 			    reg[i] + (path << 8), reg_bkup[path][i]);
1659 	}
1660 }
1661 
_dpk_reload_kip(struct rtw89_dev * rtwdev,const u32 reg[],const u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM],u8 path)1662 static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 reg[],
1663 			    const u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM], u8 path)
1664 {
1665 	u8 i;
1666 
1667 	for (i = 0; i < RTW8852B_DPK_KIP_REG_NUM; i++) {
1668 		rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD,
1669 				       reg_bkup[path][i]);
1670 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n",
1671 			    reg[i] + (path << 8), reg_bkup[path][i]);
1672 	}
1673 }
1674 
_dpk_order_convert(struct rtw89_dev * rtwdev)1675 static u8 _dpk_order_convert(struct rtw89_dev *rtwdev)
1676 {
1677 	u8 order;
1678 	u8 val;
1679 
1680 	order = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP);
1681 	val = 0x3 >> order;
1682 
1683 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val);
1684 
1685 	return val;
1686 }
1687 
_dpk_onoff(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool off)1688 static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool off)
1689 {
1690 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1691 	u8 val, kidx = dpk->cur_idx[path];
1692 
1693 	val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
1694 
1695 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
1696 			       MASKBYTE3, _dpk_order_convert(rtwdev) << 1 | val);
1697 
1698 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
1699 		    kidx, dpk->is_dpk_enable && !off ? "enable" : "disable");
1700 }
1701 
_dpk_one_shot(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,enum rtw8852b_dpk_id id)1702 static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1703 			  enum rtw89_rf_path path, enum rtw8852b_dpk_id id)
1704 {
1705 	u16 dpk_cmd;
1706 	u32 val;
1707 	int ret;
1708 
1709 	dpk_cmd = (id << 8) | (0x19 + (path << 4));
1710 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
1711 
1712 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
1713 				       1, 20000, false,
1714 				       rtwdev, 0xbff8, MASKBYTE0);
1715 	if (ret)
1716 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot over 20ms!!!!\n");
1717 
1718 	udelay(1);
1719 
1720 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00030000);
1721 
1722 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
1723 				       1, 2000, false,
1724 				       rtwdev, 0x80fc, MASKLWORD);
1725 	if (ret)
1726 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot over 20ms!!!!\n");
1727 
1728 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
1729 
1730 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1731 		    "[DPK] one-shot for %s = 0x%x\n",
1732 		    id == 0x06 ? "LBK_RXIQK" :
1733 		    id == 0x10 ? "SYNC" :
1734 		    id == 0x11 ? "MDPK_IDL" :
1735 		    id == 0x12 ? "MDPK_MPA" :
1736 		    id == 0x13 ? "GAIN_LOSS" :
1737 		    id == 0x14 ? "PWR_CAL" :
1738 		    id == 0x15 ? "DPK_RXAGC" :
1739 		    id == 0x16 ? "KIP_PRESET" :
1740 		    id == 0x17 ? "KIP_RESTORE" : "DPK_TXAGC",
1741 		    dpk_cmd);
1742 }
1743 
_dpk_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)1744 static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1745 			enum rtw89_rf_path path)
1746 {
1747 	rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3);
1748 	_set_rx_dck(rtwdev, phy, path);
1749 }
1750 
_dpk_information(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,enum rtw89_chanctx_idx chanctx_idx)1751 static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1752 			     enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
1753 {
1754 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
1755 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1756 
1757 	u8 kidx = dpk->cur_idx[path];
1758 
1759 	dpk->bp[path][kidx].band = chan->band_type;
1760 	dpk->bp[path][kidx].ch = chan->channel;
1761 	dpk->bp[path][kidx].bw = chan->band_width;
1762 
1763 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1764 		    "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
1765 		    path, dpk->cur_idx[path], phy,
1766 		    rtwdev->is_tssi_mode[path] ? "on" : "off",
1767 		    rtwdev->dbcc_en ? "on" : "off",
1768 		    dpk->bp[path][kidx].band == 0 ? "2G" :
1769 		    dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
1770 		    dpk->bp[path][kidx].ch,
1771 		    dpk->bp[path][kidx].bw == 0 ? "20M" :
1772 		    dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
1773 }
1774 
_dpk_bb_afe_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kpath,enum rtw89_chanctx_idx chanctx_idx)1775 static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
1776 				enum rtw89_phy_idx phy,
1777 				enum rtw89_rf_path path, u8 kpath,
1778 				enum rtw89_chanctx_idx chanctx_idx)
1779 {
1780 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
1781 
1782 	rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_defs_tbl);
1783 
1784 	if (chan->band_width == RTW89_CHANNEL_WIDTH_80) {
1785 		rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_EX, 0x1);
1786 		rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, B_PATH1_BW_SEL_EX, 0x1);
1787 	}
1788 
1789 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1790 		    "[DPK] Set BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
1791 }
1792 
_dpk_bb_afe_restore(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kpath,enum rtw89_chanctx_idx chanctx_idx)1793 static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev,
1794 				enum rtw89_phy_idx phy,
1795 				enum rtw89_rf_path path, u8 kpath,
1796 				enum rtw89_chanctx_idx chanctx_idx)
1797 {
1798 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
1799 
1800 	rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_restore_defs_tbl);
1801 
1802 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1803 		    "[DPK] Restore BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
1804 
1805 	if (chan->band_width == RTW89_CHANNEL_WIDTH_80) {
1806 		rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_EX, 0x0);
1807 		rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,  B_PATH1_BW_SEL_EX, 0x0);
1808 	}
1809 }
1810 
_dpk_tssi_pause(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_pause)1811 static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
1812 			    enum rtw89_rf_path path, bool is_pause)
1813 {
1814 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
1815 			       B_P0_TSSI_TRK_EN, is_pause);
1816 
1817 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
1818 		    is_pause ? "pause" : "resume");
1819 }
1820 
_dpk_kip_restore(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)1821 static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
1822 			     enum rtw89_rf_path path)
1823 {
1824 	rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_kip_defs_tbl);
1825 
1826 	if (rtwdev->hal.cv > CHIP_CAV)
1827 		rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8), B_DPD_COM_OF, 0x1);
1828 
1829 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
1830 }
1831 
_dpk_lbk_rxiqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)1832 static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1833 			   enum rtw89_rf_path path)
1834 {
1835 	u8 cur_rxbb;
1836 	u32 tmp;
1837 
1838 	cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB);
1839 
1840 	rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1);
1841 	rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR, 0x0);
1842 
1843 	tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1844 	rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
1845 	rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0xd);
1846 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1);
1847 
1848 	if (cur_rxbb >= 0x11)
1849 		rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x13);
1850 	else if (cur_rxbb <= 0xa)
1851 		rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x00);
1852 	else
1853 		rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x05);
1854 
1855 	rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x0);
1856 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
1857 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80014);
1858 	udelay(70);
1859 
1860 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1861 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x025);
1862 
1863 	_dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
1864 
1865 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
1866 		    rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD));
1867 
1868 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1869 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0);
1870 	rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0);
1871 	rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0);
1872 	rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1);
1873 	rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0x5);
1874 }
1875 
_dpk_get_thermal(struct rtw89_dev * rtwdev,u8 kidx,enum rtw89_rf_path path)1876 static void _dpk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx, enum rtw89_rf_path path)
1877 {
1878 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1879 
1880 	rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
1881 	rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x0);
1882 	rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
1883 
1884 	udelay(200);
1885 
1886 	dpk->bp[path][kidx].ther_dpk = rtw89_read_rf(rtwdev, path, RR_TM, RR_TM_VAL);
1887 
1888 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n",
1889 		    dpk->bp[path][kidx].ther_dpk);
1890 }
1891 
_dpk_rf_setting(struct rtw89_dev * rtwdev,u8 gain,enum rtw89_rf_path path,u8 kidx)1892 static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
1893 			    enum rtw89_rf_path path, u8 kidx)
1894 {
1895 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1896 
1897 	if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
1898 		rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
1899 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_FATT, 0xf2);
1900 		rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
1901 		rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
1902 	} else {
1903 		rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
1904 		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RAA2_SWATT, 0x5);
1905 		rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
1906 		rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
1907 		rtw89_write_rf(rtwdev, path, RR_RXA_LNA, RFREG_MASK, 0x920FC);
1908 		rtw89_write_rf(rtwdev, path, RR_XALNA2, RFREG_MASK, 0x002C0);
1909 		rtw89_write_rf(rtwdev, path, RR_IQGEN, RFREG_MASK, 0x38800);
1910 	}
1911 
1912 	rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1);
1913 	rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1);
1914 	rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0);
1915 
1916 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1917 		    "[DPK] ARF 0x0/0x11/0x1a = 0x%x/ 0x%x/ 0x%x\n",
1918 		    rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK),
1919 		    rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK),
1920 		    rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK));
1921 }
1922 
_dpk_bypass_rxcfir(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_bypass)1923 static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev,
1924 			       enum rtw89_rf_path path, bool is_bypass)
1925 {
1926 	if (is_bypass) {
1927 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1928 				       B_RXIQC_BYPASS2, 0x1);
1929 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1930 				       B_RXIQC_BYPASS, 0x1);
1931 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1932 			    "[DPK] Bypass RXIQC (0x8%d3c = 0x%x)\n", 1 + path,
1933 			    rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
1934 						  MASKDWORD));
1935 	} else {
1936 		rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS2);
1937 		rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS);
1938 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1939 			    "[DPK] restore 0x8%d3c = 0x%x\n", 1 + path,
1940 			    rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
1941 						  MASKDWORD));
1942 	}
1943 }
1944 
1945 static
_dpk_tpg_sel(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx)1946 void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
1947 {
1948 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1949 
1950 	if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
1951 		rtw89_phy_write32_clr(rtwdev, R_TPG_MOD, B_TPG_MOD_F);
1952 	else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40)
1953 		rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
1954 	else
1955 		rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
1956 
1957 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
1958 		    dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
1959 		    dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
1960 }
1961 
_dpk_table_select(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx,u8 gain)1962 static void _dpk_table_select(struct rtw89_dev *rtwdev,
1963 			      enum rtw89_rf_path path, u8 kidx, u8 gain)
1964 {
1965 	u8 val;
1966 
1967 	val = 0x80 + kidx * 0x20 + gain * 0x10;
1968 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val);
1969 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1970 		    "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx,
1971 		    gain, val);
1972 }
1973 
_dpk_sync_check(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx)1974 static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
1975 {
1976 #define DPK_SYNC_TH_DC_I 200
1977 #define DPK_SYNC_TH_DC_Q 200
1978 #define DPK_SYNC_TH_CORR 170
1979 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1980 	u16 dc_i, dc_q;
1981 	u8 corr_val, corr_idx;
1982 
1983 	rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
1984 
1985 	corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI);
1986 	corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV);
1987 
1988 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1989 		    "[DPK] S%d Corr_idx / Corr_val = %d / %d\n",
1990 		    path, corr_idx, corr_val);
1991 
1992 	dpk->corr_idx[path][kidx] = corr_idx;
1993 	dpk->corr_val[path][kidx] = corr_val;
1994 
1995 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
1996 
1997 	dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
1998 	dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
1999 
2000 	dc_i = abs(sign_extend32(dc_i, 11));
2001 	dc_q = abs(sign_extend32(dc_q, 11));
2002 
2003 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n",
2004 		    path, dc_i, dc_q);
2005 
2006 	dpk->dc_i[path][kidx] = dc_i;
2007 	dpk->dc_q[path][kidx] = dc_q;
2008 
2009 	if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
2010 	    corr_val < DPK_SYNC_TH_CORR)
2011 		return true;
2012 	else
2013 		return false;
2014 }
2015 
_dpk_sync(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2016 static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2017 		      enum rtw89_rf_path path, u8 kidx)
2018 {
2019 	_dpk_one_shot(rtwdev, phy, path, SYNC);
2020 
2021 	return _dpk_sync_check(rtwdev, path, kidx);
2022 }
2023 
_dpk_dgain_read(struct rtw89_dev * rtwdev)2024 static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
2025 {
2026 	u16 dgain;
2027 
2028 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
2029 
2030 	dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2031 
2032 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x\n", dgain);
2033 
2034 	return dgain;
2035 }
2036 
_dpk_dgain_mapping(struct rtw89_dev * rtwdev,u16 dgain)2037 static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain)
2038 {
2039 	static const u16 bnd[15] = {
2040 		0xbf1, 0xaa5, 0x97d, 0x875, 0x789, 0x6b7, 0x5fc, 0x556,
2041 		0x4c1, 0x43d, 0x3c7, 0x35e, 0x2ac, 0x262, 0x220
2042 	};
2043 	s8 offset;
2044 
2045 	if (dgain >= bnd[0])
2046 		offset = 0x6;
2047 	else if (bnd[0] > dgain && dgain >= bnd[1])
2048 		offset = 0x6;
2049 	else if (bnd[1] > dgain && dgain >= bnd[2])
2050 		offset = 0x5;
2051 	else if (bnd[2] > dgain && dgain >= bnd[3])
2052 		offset = 0x4;
2053 	else if (bnd[3] > dgain && dgain >= bnd[4])
2054 		offset = 0x3;
2055 	else if (bnd[4] > dgain && dgain >= bnd[5])
2056 		offset = 0x2;
2057 	else if (bnd[5] > dgain && dgain >= bnd[6])
2058 		offset = 0x1;
2059 	else if (bnd[6] > dgain && dgain >= bnd[7])
2060 		offset = 0x0;
2061 	else if (bnd[7] > dgain && dgain >= bnd[8])
2062 		offset = 0xff;
2063 	else if (bnd[8] > dgain && dgain >= bnd[9])
2064 		offset = 0xfe;
2065 	else if (bnd[9] > dgain && dgain >= bnd[10])
2066 		offset = 0xfd;
2067 	else if (bnd[10] > dgain && dgain >= bnd[11])
2068 		offset = 0xfc;
2069 	else if (bnd[11] > dgain && dgain >= bnd[12])
2070 		offset = 0xfb;
2071 	else if (bnd[12] > dgain && dgain >= bnd[13])
2072 		offset = 0xfa;
2073 	else if (bnd[13] > dgain && dgain >= bnd[14])
2074 		offset = 0xf9;
2075 	else if (bnd[14] > dgain)
2076 		offset = 0xf8;
2077 	else
2078 		offset = 0x0;
2079 
2080 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain offset = %d\n", offset);
2081 
2082 	return offset;
2083 }
2084 
_dpk_gainloss_read(struct rtw89_dev * rtwdev)2085 static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
2086 {
2087 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
2088 	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
2089 
2090 	return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
2091 }
2092 
_dpk_gainloss(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2093 static void _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2094 			  enum rtw89_rf_path path, u8 kidx)
2095 {
2096 	_dpk_table_select(rtwdev, path, kidx, 1);
2097 	_dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
2098 }
2099 
_dpk_kip_preset(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2100 static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2101 			    enum rtw89_rf_path path, u8 kidx)
2102 {
2103 	_dpk_tpg_sel(rtwdev, path, kidx);
2104 	_dpk_one_shot(rtwdev, phy, path, KIP_PRESET);
2105 }
2106 
_dpk_kip_pwr_clk_on(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)2107 static void _dpk_kip_pwr_clk_on(struct rtw89_dev *rtwdev,
2108 				enum rtw89_rf_path path)
2109 {
2110 	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
2111 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a);
2112 	rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
2113 
2114 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP Power/CLK on\n");
2115 }
2116 
_dpk_kip_set_txagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 txagc)2117 static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2118 			       enum rtw89_rf_path path, u8 txagc)
2119 {
2120 	rtw89_write_rf(rtwdev, path, RR_TXAGC, RFREG_MASK, txagc);
2121 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
2122 	_dpk_one_shot(rtwdev, phy, path, DPK_TXAGC);
2123 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
2124 
2125 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set TXAGC = 0x%x\n", txagc);
2126 }
2127 
_dpk_kip_set_rxagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2128 static void _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2129 			       enum rtw89_rf_path path)
2130 {
2131 	u32 tmp;
2132 
2133 	tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
2134 	rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, tmp);
2135 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
2136 	_dpk_one_shot(rtwdev, phy, path, DPK_RXAGC);
2137 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
2138 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL_V1, 0x8);
2139 
2140 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2141 		    "[DPK] set RXBB = 0x%x (RF0x0[9:5] = 0x%x)\n",
2142 		    rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB_V1),
2143 		    rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB));
2144 }
2145 
_dpk_set_offset(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,s8 gain_offset)2146 static u8 _dpk_set_offset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2147 			  enum rtw89_rf_path path, s8 gain_offset)
2148 {
2149 	u8 txagc;
2150 
2151 	txagc = rtw89_read_rf(rtwdev, path, RR_TXAGC, RFREG_MASK);
2152 
2153 	if (txagc - gain_offset < DPK_TXAGC_LOWER)
2154 		txagc = DPK_TXAGC_LOWER;
2155 	else if (txagc - gain_offset > DPK_TXAGC_UPPER)
2156 		txagc = DPK_TXAGC_UPPER;
2157 	else
2158 		txagc = txagc - gain_offset;
2159 
2160 	_dpk_kip_set_txagc(rtwdev, phy, path, txagc);
2161 
2162 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n",
2163 		    gain_offset, txagc);
2164 	return txagc;
2165 }
2166 
_dpk_pas_read(struct rtw89_dev * rtwdev,bool is_check)2167 static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
2168 {
2169 	u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
2170 	u8 i;
2171 
2172 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
2173 	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0);
2174 	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08);
2175 
2176 	if (is_check) {
2177 		rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
2178 		val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2179 		val1_i = abs(sign_extend32(val1_i, 11));
2180 		val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2181 		val1_q = abs(sign_extend32(val1_q, 11));
2182 
2183 		rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
2184 		val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2185 		val2_i = abs(sign_extend32(val2_i, 11));
2186 		val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2187 		val2_q = abs(sign_extend32(val2_q, 11));
2188 
2189 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
2190 			    phy_div(val1_i * val1_i + val1_q * val1_q,
2191 				    val2_i * val2_i + val2_q * val2_q));
2192 	} else {
2193 		for (i = 0; i < 32; i++) {
2194 			rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
2195 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
2196 				    "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
2197 				    rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
2198 		}
2199 	}
2200 
2201 	if (val1_i * val1_i + val1_q * val1_q >=
2202 	    (val2_i * val2_i + val2_q * val2_q) * 8 / 5)
2203 		return true;
2204 
2205 	return false;
2206 }
2207 
_dpk_agc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,u8 init_txagc,bool loss_only,enum rtw89_chanctx_idx chanctx_idx)2208 static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2209 		   enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
2210 		   bool loss_only, enum rtw89_chanctx_idx chanctx_idx)
2211 {
2212 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
2213 	u8 step = DPK_AGC_STEP_SYNC_DGAIN;
2214 	u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
2215 	u8 goout = 0, agc_cnt = 0, limited_rxbb = 0;
2216 	u16 dgain = 0;
2217 	s8 offset;
2218 	int limit = 200;
2219 
2220 	tmp_txagc = init_txagc;
2221 
2222 	do {
2223 		switch (step) {
2224 		case DPK_AGC_STEP_SYNC_DGAIN:
2225 			if (_dpk_sync(rtwdev, phy, path, kidx)) {
2226 				tmp_txagc = 0xff;
2227 				goout = 1;
2228 				break;
2229 			}
2230 
2231 			dgain = _dpk_dgain_read(rtwdev);
2232 
2233 			if (loss_only == 1 || limited_rxbb == 1)
2234 				step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2235 			else
2236 				step = DPK_AGC_STEP_GAIN_ADJ;
2237 			break;
2238 
2239 		case DPK_AGC_STEP_GAIN_ADJ:
2240 			tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD,
2241 						 RFREG_MASKRXBB);
2242 			offset = _dpk_dgain_mapping(rtwdev, dgain);
2243 
2244 			if (tmp_rxbb + offset > 0x1f) {
2245 				tmp_rxbb = 0x1f;
2246 				limited_rxbb = 1;
2247 			} else if (tmp_rxbb + offset < 0) {
2248 				tmp_rxbb = 0;
2249 				limited_rxbb = 1;
2250 			} else {
2251 				tmp_rxbb = tmp_rxbb + offset;
2252 			}
2253 
2254 			rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB,
2255 				       tmp_rxbb);
2256 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
2257 				    "[DPK] Adjust RXBB (%d) = 0x%x\n", offset, tmp_rxbb);
2258 			if (offset || agc_cnt == 0) {
2259 				if (chan->band_width < RTW89_CHANNEL_WIDTH_80)
2260 					_dpk_bypass_rxcfir(rtwdev, path, true);
2261 				else
2262 					_dpk_lbk_rxiqk(rtwdev, phy, path);
2263 			}
2264 			if (dgain > 1922 || dgain < 342)
2265 				step = DPK_AGC_STEP_SYNC_DGAIN;
2266 			else
2267 				step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2268 
2269 			agc_cnt++;
2270 			break;
2271 
2272 		case DPK_AGC_STEP_GAIN_LOSS_IDX:
2273 			_dpk_gainloss(rtwdev, phy, path, kidx);
2274 			tmp_gl_idx = _dpk_gainloss_read(rtwdev);
2275 
2276 			if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
2277 			    tmp_gl_idx >= 7)
2278 				step = DPK_AGC_STEP_GL_GT_CRITERION;
2279 			else if (tmp_gl_idx == 0)
2280 				step = DPK_AGC_STEP_GL_LT_CRITERION;
2281 			else
2282 				step = DPK_AGC_STEP_SET_TX_GAIN;
2283 			break;
2284 
2285 		case DPK_AGC_STEP_GL_GT_CRITERION:
2286 			if (tmp_txagc == 0x2e) {
2287 				goout = 1;
2288 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
2289 					    "[DPK] Txagc@lower bound!!\n");
2290 			} else {
2291 				tmp_txagc = _dpk_set_offset(rtwdev, phy, path, 0x3);
2292 			}
2293 			step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2294 			agc_cnt++;
2295 			break;
2296 
2297 		case DPK_AGC_STEP_GL_LT_CRITERION:
2298 			if (tmp_txagc == 0x3f) {
2299 				goout = 1;
2300 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
2301 					    "[DPK] Txagc@upper bound!!\n");
2302 			} else {
2303 				tmp_txagc = _dpk_set_offset(rtwdev, phy, path, 0xfe);
2304 			}
2305 			step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2306 			agc_cnt++;
2307 			break;
2308 		case DPK_AGC_STEP_SET_TX_GAIN:
2309 			tmp_txagc = _dpk_set_offset(rtwdev, phy, path, tmp_gl_idx);
2310 			goout = 1;
2311 			agc_cnt++;
2312 			break;
2313 
2314 		default:
2315 			goout = 1;
2316 			break;
2317 		}
2318 	} while (!goout && agc_cnt < 6 && limit-- > 0);
2319 
2320 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2321 		    "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc,
2322 		    tmp_rxbb);
2323 
2324 	return tmp_txagc;
2325 }
2326 
_dpk_set_mdpd_para(struct rtw89_dev * rtwdev,u8 order)2327 static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
2328 {
2329 	switch (order) {
2330 	case 0:
2331 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2332 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3);
2333 		rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1);
2334 		break;
2335 	case 1:
2336 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2337 		rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
2338 		rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
2339 		break;
2340 	case 2:
2341 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2342 		rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
2343 		rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
2344 		break;
2345 	default:
2346 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2347 			    "[DPK] Wrong MDPD order!!(0x%x)\n", order);
2348 		break;
2349 	}
2350 
2351 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2352 		    "[DPK] Set MDPD order to 0x%x for IDL\n", order);
2353 }
2354 
_dpk_idl_mpa(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,u8 gain)2355 static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2356 			 enum rtw89_rf_path path, u8 kidx, u8 gain)
2357 {
2358 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2359 
2360 	if (dpk->bp[path][kidx].bw < RTW89_CHANNEL_WIDTH_80 &&
2361 	    dpk->bp[path][kidx].band == RTW89_BAND_5G)
2362 		_dpk_set_mdpd_para(rtwdev, 0x2);
2363 	else
2364 		_dpk_set_mdpd_para(rtwdev, 0x0);
2365 
2366 	_dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
2367 }
2368 
_dpk_fill_result(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,u8 gain,u8 txagc)2369 static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2370 			     enum rtw89_rf_path path, u8 kidx, u8 gain, u8 txagc)
2371 {
2372 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2373 	const u16 pwsf = 0x78;
2374 	u8 gs = dpk->dpk_gs[phy];
2375 
2376 	rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
2377 			       B_COEF_SEL_MDPD, kidx);
2378 
2379 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2380 		    "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n", txagc,
2381 		    pwsf, gs);
2382 
2383 	dpk->bp[path][kidx].txagc_dpk = txagc;
2384 	rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8),
2385 			       0x3F << ((gain << 3) + (kidx << 4)), txagc);
2386 
2387 	dpk->bp[path][kidx].pwsf = pwsf;
2388 	rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2389 			       0x1FF << (gain << 4), pwsf);
2390 
2391 	rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
2392 	rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0);
2393 
2394 	dpk->bp[path][kidx].gs = gs;
2395 	if (dpk->dpk_gs[phy] == 0x7f)
2396 		rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2397 				       MASKDWORD, 0x007f7f7f);
2398 	else
2399 		rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2400 				       MASKDWORD, 0x005b5b5b);
2401 
2402 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2403 			       B_DPD_ORDER_V1, _dpk_order_convert(rtwdev));
2404 	rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD, 0x0);
2405 	rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL, 0x0);
2406 }
2407 
_dpk_reload_check(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,enum rtw89_chanctx_idx chanctx_idx)2408 static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2409 			      enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
2410 {
2411 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
2412 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2413 	bool is_reload = false;
2414 	u8 idx, cur_band, cur_ch;
2415 
2416 	cur_band = chan->band_type;
2417 	cur_ch = chan->channel;
2418 
2419 	for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
2420 		if (cur_band != dpk->bp[path][idx].band ||
2421 		    cur_ch != dpk->bp[path][idx].ch)
2422 			continue;
2423 
2424 		rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
2425 				       B_COEF_SEL_MDPD, idx);
2426 		dpk->cur_idx[path] = idx;
2427 		is_reload = true;
2428 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2429 			    "[DPK] reload S%d[%d] success\n", path, idx);
2430 	}
2431 
2432 	return is_reload;
2433 }
2434 
_dpk_main(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 gain,enum rtw89_chanctx_idx chanctx_idx)2435 static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2436 		      enum rtw89_rf_path path, u8 gain,
2437 		      enum rtw89_chanctx_idx chanctx_idx)
2438 {
2439 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2440 	u8 txagc = 0x38, kidx = dpk->cur_idx[path];
2441 	bool is_fail = false;
2442 
2443 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2444 		    "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx);
2445 
2446 	_rfk_rf_direct_cntrl(rtwdev, path, false);
2447 	_rfk_drf_direct_cntrl(rtwdev, path, false);
2448 
2449 	_dpk_kip_pwr_clk_on(rtwdev, path);
2450 	_dpk_kip_set_txagc(rtwdev, phy, path, txagc);
2451 	_dpk_rf_setting(rtwdev, gain, path, kidx);
2452 	_dpk_rx_dck(rtwdev, phy, path);
2453 
2454 	_dpk_kip_preset(rtwdev, phy, path, kidx);
2455 	_dpk_kip_set_rxagc(rtwdev, phy, path);
2456 	_dpk_table_select(rtwdev, path, kidx, gain);
2457 
2458 	txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false, chanctx_idx);
2459 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust txagc = 0x%x\n", txagc);
2460 
2461 	if (txagc == 0xff) {
2462 		is_fail = true;
2463 	} else {
2464 		_dpk_get_thermal(rtwdev, kidx, path);
2465 
2466 		_dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
2467 
2468 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
2469 
2470 		_dpk_fill_result(rtwdev, phy, path, kidx, gain, txagc);
2471 	}
2472 
2473 	if (!is_fail)
2474 		dpk->bp[path][kidx].path_ok = true;
2475 	else
2476 		dpk->bp[path][kidx].path_ok = false;
2477 
2478 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
2479 		    is_fail ? "Check" : "Success");
2480 
2481 	return is_fail;
2482 }
2483 
_dpk_cal_select(struct rtw89_dev * rtwdev,bool force,enum rtw89_phy_idx phy,u8 kpath,enum rtw89_chanctx_idx chanctx_idx)2484 static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
2485 			    enum rtw89_phy_idx phy, u8 kpath,
2486 			    enum rtw89_chanctx_idx chanctx_idx)
2487 {
2488 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2489 	static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120};
2490 	u32 kip_bkup[RTW8852B_DPK_RF_PATH][RTW8852B_DPK_KIP_REG_NUM] = {};
2491 	u32 backup_rf_val[RTW8852B_DPK_RF_PATH][BACKUP_RF_REGS_NR];
2492 	u32 backup_bb_val[BACKUP_BB_REGS_NR];
2493 	bool is_fail = true, reloaded[RTW8852B_DPK_RF_PATH] = {};
2494 	u8 path;
2495 
2496 	if (dpk->is_dpk_reload_en) {
2497 		for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
2498 			reloaded[path] = _dpk_reload_check(rtwdev, phy, path,
2499 							   chanctx_idx);
2500 			if (!reloaded[path] && dpk->bp[path][0].ch)
2501 				dpk->cur_idx[path] = !dpk->cur_idx[path];
2502 			else
2503 				_dpk_onoff(rtwdev, path, false);
2504 		}
2505 	} else {
2506 		for (path = 0; path < RTW8852B_DPK_RF_PATH; path++)
2507 			dpk->cur_idx[path] = 0;
2508 	}
2509 
2510 	_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
2511 
2512 	for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
2513 		_dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
2514 		_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
2515 		_dpk_information(rtwdev, phy, path, chanctx_idx);
2516 		if (rtwdev->is_tssi_mode[path])
2517 			_dpk_tssi_pause(rtwdev, path, true);
2518 	}
2519 
2520 	_dpk_bb_afe_setting(rtwdev, phy, path, kpath, chanctx_idx);
2521 
2522 	for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
2523 		is_fail = _dpk_main(rtwdev, phy, path, 1, chanctx_idx);
2524 		_dpk_onoff(rtwdev, path, is_fail);
2525 	}
2526 
2527 	_dpk_bb_afe_restore(rtwdev, phy, path, kpath, chanctx_idx);
2528 	_rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
2529 
2530 	for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
2531 		_dpk_kip_restore(rtwdev, path);
2532 		_dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path);
2533 		_rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
2534 		if (rtwdev->is_tssi_mode[path])
2535 			_dpk_tssi_pause(rtwdev, path, false);
2536 	}
2537 }
2538 
_dpk_bypass_check(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_chanctx_idx chanctx_idx)2539 static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2540 			      enum rtw89_chanctx_idx chanctx_idx)
2541 {
2542 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
2543 	struct rtw89_fem_info *fem = &rtwdev->fem;
2544 
2545 	if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
2546 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2547 			    "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
2548 		return true;
2549 	} else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) {
2550 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2551 			    "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
2552 		return true;
2553 	} else if (fem->epa_6g && chan->band_type == RTW89_BAND_6G) {
2554 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2555 			    "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
2556 		return true;
2557 	}
2558 
2559 	return false;
2560 }
2561 
_dpk_force_bypass(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)2562 static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2563 {
2564 	u8 path, kpath;
2565 
2566 	kpath = _kpath(rtwdev, phy);
2567 
2568 	for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
2569 		if (kpath & BIT(path))
2570 			_dpk_onoff(rtwdev, path, true);
2571 	}
2572 }
2573 
_dpk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool force,enum rtw89_chanctx_idx chanctx_idx)2574 static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force,
2575 		 enum rtw89_chanctx_idx chanctx_idx)
2576 {
2577 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2578 		    "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
2579 		    RTW8852B_DPK_VER, rtwdev->hal.cv,
2580 		    RTW8852B_RF_REL_VERSION);
2581 
2582 	if (_dpk_bypass_check(rtwdev, phy, chanctx_idx))
2583 		_dpk_force_bypass(rtwdev, phy);
2584 	else
2585 		_dpk_cal_select(rtwdev, force, phy, RF_AB, chanctx_idx);
2586 }
2587 
_dpk_track(struct rtw89_dev * rtwdev)2588 static void _dpk_track(struct rtw89_dev *rtwdev)
2589 {
2590 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2591 	s8 txagc_bb, txagc_bb_tp, ini_diff = 0, txagc_ofst;
2592 	s8 delta_ther[2] = {};
2593 	u8 trk_idx, txagc_rf;
2594 	u8 path, kidx;
2595 	u16 pwsf[2];
2596 	u8 cur_ther;
2597 	u32 tmp;
2598 
2599 	for (path = 0; path < RF_PATH_NUM_8852B; path++) {
2600 		kidx = dpk->cur_idx[path];
2601 
2602 		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2603 			    "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
2604 			    path, kidx, dpk->bp[path][kidx].ch);
2605 
2606 		cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
2607 
2608 		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2609 			    "[DPK_TRK] thermal now = %d\n", cur_ther);
2610 
2611 		if (dpk->bp[path][kidx].ch && cur_ther)
2612 			delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther;
2613 
2614 		if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
2615 			delta_ther[path] = delta_ther[path] * 3 / 2;
2616 		else
2617 			delta_ther[path] = delta_ther[path] * 5 / 2;
2618 
2619 		txagc_rf = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2620 						 0x0000003f);
2621 
2622 		if (rtwdev->is_tssi_mode[path]) {
2623 			trk_idx = rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK);
2624 
2625 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2626 				    "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n",
2627 				    txagc_rf, trk_idx);
2628 
2629 			txagc_bb =
2630 				rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2631 						      MASKBYTE2);
2632 			txagc_bb_tp =
2633 				rtw89_phy_read32_mask(rtwdev, R_TXAGC_TP + (path << 13),
2634 						      B_TXAGC_TP);
2635 
2636 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2637 				    "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
2638 				    txagc_bb_tp, txagc_bb);
2639 
2640 			txagc_ofst =
2641 				rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2642 						      MASKBYTE3);
2643 
2644 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2645 				    "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n",
2646 				    txagc_ofst, delta_ther[path]);
2647 			tmp = rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8),
2648 						    B_DPD_COM_OF);
2649 			if (tmp == 0x1) {
2650 				txagc_ofst = 0;
2651 				rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2652 					    "[DPK_TRK] HW txagc offset mode\n");
2653 			}
2654 
2655 			if (txagc_rf && cur_ther)
2656 				ini_diff = txagc_ofst + (delta_ther[path]);
2657 
2658 			tmp = rtw89_phy_read32_mask(rtwdev,
2659 						    R_P0_TXDPD + (path << 13),
2660 						    B_P0_TXDPD);
2661 			if (tmp == 0x0) {
2662 				pwsf[0] = dpk->bp[path][kidx].pwsf +
2663 					  txagc_bb_tp - txagc_bb + ini_diff;
2664 				pwsf[1] = dpk->bp[path][kidx].pwsf +
2665 					  txagc_bb_tp - txagc_bb + ini_diff;
2666 			} else {
2667 				pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff;
2668 				pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff;
2669 			}
2670 
2671 		} else {
2672 			pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2673 			pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2674 		}
2675 
2676 		tmp = rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS);
2677 		if (!tmp && txagc_rf) {
2678 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2679 				    "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n",
2680 				    pwsf[0], pwsf[1]);
2681 
2682 			rtw89_phy_write32_mask(rtwdev,
2683 					       R_DPD_BND + (path << 8) + (kidx << 2),
2684 					       B_DPD_BND_0, pwsf[0]);
2685 			rtw89_phy_write32_mask(rtwdev,
2686 					       R_DPD_BND + (path << 8) + (kidx << 2),
2687 					       B_DPD_BND_1, pwsf[1]);
2688 		}
2689 	}
2690 }
2691 
_set_dpd_backoff(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)2692 static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2693 {
2694 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2695 	u8 tx_scale, ofdm_bkof, path, kpath;
2696 
2697 	kpath = _kpath(rtwdev, phy);
2698 
2699 	ofdm_bkof = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_OFDM);
2700 	tx_scale = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_SCA);
2701 
2702 	if (ofdm_bkof + tx_scale >= 44) {
2703 		/* move dpd backoff to bb, and set dpd backoff to 0 */
2704 		dpk->dpk_gs[phy] = 0x7f;
2705 		for (path = 0; path < RF_PATH_NUM_8852B; path++) {
2706 			if (!(kpath & BIT(path)))
2707 				continue;
2708 
2709 			rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8),
2710 					       B_DPD_CFG, 0x7f7f7f);
2711 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
2712 				    "[RFK] Set S%d DPD backoff to 0dB\n", path);
2713 		}
2714 	} else {
2715 		dpk->dpk_gs[phy] = 0x5b;
2716 	}
2717 }
2718 
_tssi_rf_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)2719 static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2720 			     enum rtw89_rf_path path, const struct rtw89_chan *chan)
2721 {
2722 	enum rtw89_band band = chan->band_type;
2723 
2724 	if (band == RTW89_BAND_2G)
2725 		rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1);
2726 	else
2727 		rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
2728 }
2729 
_tssi_set_sys(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)2730 static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2731 			  enum rtw89_rf_path path, const struct rtw89_chan *chan)
2732 {
2733 	enum rtw89_band band = chan->band_type;
2734 
2735 	rtw89_rfk_parser(rtwdev, &rtw8852b_tssi_sys_defs_tbl);
2736 
2737 	if (path == RF_PATH_A)
2738 		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2739 					 &rtw8852b_tssi_sys_a_defs_2g_tbl,
2740 					 &rtw8852b_tssi_sys_a_defs_5g_tbl);
2741 	else
2742 		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2743 					 &rtw8852b_tssi_sys_b_defs_2g_tbl,
2744 					 &rtw8852b_tssi_sys_b_defs_5g_tbl);
2745 }
2746 
_tssi_ini_txpwr_ctrl_bb(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2747 static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev,
2748 				    enum rtw89_phy_idx phy,
2749 				    enum rtw89_rf_path path)
2750 {
2751 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2752 				 &rtw8852b_tssi_init_txpwr_defs_a_tbl,
2753 				 &rtw8852b_tssi_init_txpwr_defs_b_tbl);
2754 }
2755 
_tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2756 static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
2757 					  enum rtw89_phy_idx phy,
2758 					  enum rtw89_rf_path path)
2759 {
2760 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2761 				 &rtw8852b_tssi_init_txpwr_he_tb_defs_a_tbl,
2762 				 &rtw8852b_tssi_init_txpwr_he_tb_defs_b_tbl);
2763 }
2764 
_tssi_set_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2765 static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2766 			  enum rtw89_rf_path path)
2767 {
2768 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2769 				 &rtw8852b_tssi_dck_defs_a_tbl,
2770 				 &rtw8852b_tssi_dck_defs_b_tbl);
2771 }
2772 
_tssi_set_tmeter_tbl(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)2773 static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2774 				 enum rtw89_rf_path path, const struct rtw89_chan *chan)
2775 {
2776 #define RTW8852B_TSSI_GET_VAL(ptr, idx)			\
2777 ({							\
2778 	s8 *__ptr = (ptr);				\
2779 	u8 __idx = (idx), __i, __v;			\
2780 	u32 __val = 0;					\
2781 	for (__i = 0; __i < 4; __i++) {			\
2782 		__v = (__ptr[__idx + __i]);		\
2783 		__val |= (__v << (8 * __i));		\
2784 	}						\
2785 	__val;						\
2786 })
2787 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
2788 	u8 ch = chan->channel;
2789 	u8 subband = chan->subband_type;
2790 	const s8 *thm_up_a = NULL;
2791 	const s8 *thm_down_a = NULL;
2792 	const s8 *thm_up_b = NULL;
2793 	const s8 *thm_down_b = NULL;
2794 	u8 thermal = 0xff;
2795 	s8 thm_ofst[64] = {0};
2796 	u32 tmp = 0;
2797 	u8 i, j;
2798 
2799 	switch (subband) {
2800 	default:
2801 	case RTW89_CH_2G:
2802 		thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_2ga_p;
2803 		thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_2ga_n;
2804 		thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_2gb_p;
2805 		thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_2gb_n;
2806 		break;
2807 	case RTW89_CH_5G_BAND_1:
2808 		thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[0];
2809 		thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[0];
2810 		thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[0];
2811 		thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[0];
2812 		break;
2813 	case RTW89_CH_5G_BAND_3:
2814 		thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[1];
2815 		thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[1];
2816 		thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[1];
2817 		thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[1];
2818 		break;
2819 	case RTW89_CH_5G_BAND_4:
2820 		thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[2];
2821 		thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[2];
2822 		thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[2];
2823 		thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[2];
2824 		break;
2825 	}
2826 
2827 	if (path == RF_PATH_A) {
2828 		thermal = tssi_info->thermal[RF_PATH_A];
2829 
2830 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2831 			    "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
2832 
2833 		rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
2834 		rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
2835 
2836 		if (thermal == 0xff) {
2837 			rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
2838 			rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
2839 
2840 			for (i = 0; i < 64; i += 4) {
2841 				rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
2842 
2843 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2844 					    "[TSSI] write 0x%x val=0x%08x\n",
2845 					    R_P0_TSSI_BASE + i, 0x0);
2846 			}
2847 
2848 		} else {
2849 			rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal);
2850 			rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
2851 					       thermal);
2852 
2853 			i = 0;
2854 			for (j = 0; j < 32; j++)
2855 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2856 					      -thm_down_a[i++] :
2857 					      -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
2858 
2859 			i = 1;
2860 			for (j = 63; j >= 32; j--)
2861 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2862 					      thm_up_a[i++] :
2863 					      thm_up_a[DELTA_SWINGIDX_SIZE - 1];
2864 
2865 			for (i = 0; i < 64; i += 4) {
2866 				tmp = RTW8852B_TSSI_GET_VAL(thm_ofst, i);
2867 				rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
2868 
2869 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2870 					    "[TSSI] write 0x%x val=0x%08x\n",
2871 					    0x5c00 + i, tmp);
2872 			}
2873 		}
2874 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
2875 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
2876 
2877 	} else {
2878 		thermal = tssi_info->thermal[RF_PATH_B];
2879 
2880 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2881 			    "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
2882 
2883 		rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
2884 		rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
2885 
2886 		if (thermal == 0xff) {
2887 			rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
2888 			rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
2889 
2890 			for (i = 0; i < 64; i += 4) {
2891 				rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
2892 
2893 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2894 					    "[TSSI] write 0x%x val=0x%08x\n",
2895 					    0x7c00 + i, 0x0);
2896 			}
2897 
2898 		} else {
2899 			rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal);
2900 			rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
2901 					       thermal);
2902 
2903 			i = 0;
2904 			for (j = 0; j < 32; j++)
2905 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2906 					      -thm_down_b[i++] :
2907 					      -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
2908 
2909 			i = 1;
2910 			for (j = 63; j >= 32; j--)
2911 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2912 					      thm_up_b[i++] :
2913 					      thm_up_b[DELTA_SWINGIDX_SIZE - 1];
2914 
2915 			for (i = 0; i < 64; i += 4) {
2916 				tmp = RTW8852B_TSSI_GET_VAL(thm_ofst, i);
2917 				rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
2918 
2919 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2920 					    "[TSSI] write 0x%x val=0x%08x\n",
2921 					    0x7c00 + i, tmp);
2922 			}
2923 		}
2924 		rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
2925 		rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
2926 	}
2927 #undef RTW8852B_TSSI_GET_VAL
2928 }
2929 
_tssi_set_dac_gain_tbl(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2930 static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2931 				   enum rtw89_rf_path path)
2932 {
2933 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2934 				 &rtw8852b_tssi_dac_gain_defs_a_tbl,
2935 				 &rtw8852b_tssi_dac_gain_defs_b_tbl);
2936 }
2937 
_tssi_slope_cal_org(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)2938 static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2939 				enum rtw89_rf_path path, const struct rtw89_chan *chan)
2940 {
2941 	enum rtw89_band band = chan->band_type;
2942 
2943 	if (path == RF_PATH_A)
2944 		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2945 					 &rtw8852b_tssi_slope_a_defs_2g_tbl,
2946 					 &rtw8852b_tssi_slope_a_defs_5g_tbl);
2947 	else
2948 		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2949 					 &rtw8852b_tssi_slope_b_defs_2g_tbl,
2950 					 &rtw8852b_tssi_slope_b_defs_5g_tbl);
2951 }
2952 
_tssi_alignment_default(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,bool all,const struct rtw89_chan * chan)2953 static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2954 				    enum rtw89_rf_path path, bool all,
2955 				    const struct rtw89_chan *chan)
2956 {
2957 	enum rtw89_band band = chan->band_type;
2958 	const struct rtw89_rfk_tbl *tbl = NULL;
2959 	u8 ch = chan->channel;
2960 
2961 	if (path == RF_PATH_A) {
2962 		if (band == RTW89_BAND_2G) {
2963 			if (all)
2964 				tbl = &rtw8852b_tssi_align_a_2g_all_defs_tbl;
2965 			else
2966 				tbl = &rtw8852b_tssi_align_a_2g_part_defs_tbl;
2967 		} else if (ch >= 36 && ch <= 64) {
2968 			if (all)
2969 				tbl = &rtw8852b_tssi_align_a_5g1_all_defs_tbl;
2970 			else
2971 				tbl = &rtw8852b_tssi_align_a_5g1_part_defs_tbl;
2972 		} else if (ch >= 100 && ch <= 144) {
2973 			if (all)
2974 				tbl = &rtw8852b_tssi_align_a_5g2_all_defs_tbl;
2975 			else
2976 				tbl = &rtw8852b_tssi_align_a_5g2_part_defs_tbl;
2977 		} else if (ch >= 149 && ch <= 177) {
2978 			if (all)
2979 				tbl = &rtw8852b_tssi_align_a_5g3_all_defs_tbl;
2980 			else
2981 				tbl = &rtw8852b_tssi_align_a_5g3_part_defs_tbl;
2982 		}
2983 	} else {
2984 		if (ch >= 1 && ch <= 14) {
2985 			if (all)
2986 				tbl = &rtw8852b_tssi_align_b_2g_all_defs_tbl;
2987 			else
2988 				tbl = &rtw8852b_tssi_align_b_2g_part_defs_tbl;
2989 		} else if (ch >= 36 && ch <= 64) {
2990 			if (all)
2991 				tbl = &rtw8852b_tssi_align_b_5g1_all_defs_tbl;
2992 			else
2993 				tbl = &rtw8852b_tssi_align_b_5g1_part_defs_tbl;
2994 		} else if (ch >= 100 && ch <= 144) {
2995 			if (all)
2996 				tbl = &rtw8852b_tssi_align_b_5g2_all_defs_tbl;
2997 			else
2998 				tbl = &rtw8852b_tssi_align_b_5g2_part_defs_tbl;
2999 		} else if (ch >= 149 && ch <= 177) {
3000 			if (all)
3001 				tbl = &rtw8852b_tssi_align_b_5g3_all_defs_tbl;
3002 			else
3003 				tbl = &rtw8852b_tssi_align_b_5g3_part_defs_tbl;
3004 		}
3005 	}
3006 
3007 	if (tbl)
3008 		rtw89_rfk_parser(rtwdev, tbl);
3009 }
3010 
_tssi_set_tssi_slope(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3011 static void _tssi_set_tssi_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3012 				 enum rtw89_rf_path path)
3013 {
3014 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3015 				 &rtw8852b_tssi_slope_defs_a_tbl,
3016 				 &rtw8852b_tssi_slope_defs_b_tbl);
3017 }
3018 
_tssi_set_tssi_track(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3019 static void _tssi_set_tssi_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3020 				 enum rtw89_rf_path path)
3021 {
3022 	if (path == RF_PATH_A)
3023 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSIC, B_P0_TSSIC_BYPASS, 0x0);
3024 	else
3025 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSIC, B_P1_TSSIC_BYPASS, 0x0);
3026 }
3027 
_tssi_set_txagc_offset_mv_avg(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3028 static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
3029 					  enum rtw89_phy_idx phy,
3030 					  enum rtw89_rf_path path)
3031 {
3032 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "======>%s   path=%d\n", __func__,
3033 		    path);
3034 
3035 	if (path == RF_PATH_A)
3036 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_MIX, 0x010);
3037 	else
3038 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_RFCTM_DEL, 0x010);
3039 }
3040 
_tssi_enable(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3041 static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3042 {
3043 	u8 i;
3044 
3045 	for (i = 0; i < RF_PATH_NUM_8852B; i++) {
3046 		_tssi_set_tssi_track(rtwdev, phy, i);
3047 		_tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
3048 
3049 		if (i == RF_PATH_A) {
3050 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG,
3051 					       B_P0_TSSI_MV_CLR, 0x0);
3052 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
3053 					       B_P0_TSSI_EN, 0x0);
3054 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
3055 					       B_P0_TSSI_EN, 0x1);
3056 			rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
3057 				       RR_TXGA_V1_TRK_EN, 0x1);
3058 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3059 					       B_P0_TSSI_RFC, 0x3);
3060 
3061 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3062 					       B_P0_TSSI_OFT, 0xc0);
3063 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3064 					       B_P0_TSSI_OFT_EN, 0x0);
3065 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3066 					       B_P0_TSSI_OFT_EN, 0x1);
3067 
3068 			rtwdev->is_tssi_mode[RF_PATH_A] = true;
3069 		} else {
3070 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG,
3071 					       B_P1_TSSI_MV_CLR, 0x0);
3072 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
3073 					       B_P1_TSSI_EN, 0x0);
3074 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
3075 					       B_P1_TSSI_EN, 0x1);
3076 			rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
3077 				       RR_TXGA_V1_TRK_EN, 0x1);
3078 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3079 					       B_P1_TSSI_RFC, 0x3);
3080 
3081 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3082 					       B_P1_TSSI_OFT, 0xc0);
3083 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3084 					       B_P1_TSSI_OFT_EN, 0x0);
3085 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3086 					       B_P1_TSSI_OFT_EN, 0x1);
3087 
3088 			rtwdev->is_tssi_mode[RF_PATH_B] = true;
3089 		}
3090 	}
3091 }
3092 
_tssi_disable(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3093 static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3094 {
3095 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x0);
3096 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_RFC, 0x1);
3097 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, 0x1);
3098 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_EN, 0x0);
3099 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_RFC, 0x1);
3100 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_CLR, 0x1);
3101 
3102 	rtwdev->is_tssi_mode[RF_PATH_A] = false;
3103 	rtwdev->is_tssi_mode[RF_PATH_B] = false;
3104 }
3105 
_tssi_get_cck_group(struct rtw89_dev * rtwdev,u8 ch)3106 static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
3107 {
3108 	switch (ch) {
3109 	case 1 ... 2:
3110 		return 0;
3111 	case 3 ... 5:
3112 		return 1;
3113 	case 6 ... 8:
3114 		return 2;
3115 	case 9 ... 11:
3116 		return 3;
3117 	case 12 ... 13:
3118 		return 4;
3119 	case 14:
3120 		return 5;
3121 	}
3122 
3123 	return 0;
3124 }
3125 
3126 #define TSSI_EXTRA_GROUP_BIT (BIT(31))
3127 #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
3128 #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
3129 #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
3130 #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
3131 
_tssi_get_ofdm_group(struct rtw89_dev * rtwdev,u8 ch)3132 static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
3133 {
3134 	switch (ch) {
3135 	case 1 ... 2:
3136 		return 0;
3137 	case 3 ... 5:
3138 		return 1;
3139 	case 6 ... 8:
3140 		return 2;
3141 	case 9 ... 11:
3142 		return 3;
3143 	case 12 ... 14:
3144 		return 4;
3145 	case 36 ... 40:
3146 		return 5;
3147 	case 41 ... 43:
3148 		return TSSI_EXTRA_GROUP(5);
3149 	case 44 ... 48:
3150 		return 6;
3151 	case 49 ... 51:
3152 		return TSSI_EXTRA_GROUP(6);
3153 	case 52 ... 56:
3154 		return 7;
3155 	case 57 ... 59:
3156 		return TSSI_EXTRA_GROUP(7);
3157 	case 60 ... 64:
3158 		return 8;
3159 	case 100 ... 104:
3160 		return 9;
3161 	case 105 ... 107:
3162 		return TSSI_EXTRA_GROUP(9);
3163 	case 108 ... 112:
3164 		return 10;
3165 	case 113 ... 115:
3166 		return TSSI_EXTRA_GROUP(10);
3167 	case 116 ... 120:
3168 		return 11;
3169 	case 121 ... 123:
3170 		return TSSI_EXTRA_GROUP(11);
3171 	case 124 ... 128:
3172 		return 12;
3173 	case 129 ... 131:
3174 		return TSSI_EXTRA_GROUP(12);
3175 	case 132 ... 136:
3176 		return 13;
3177 	case 137 ... 139:
3178 		return TSSI_EXTRA_GROUP(13);
3179 	case 140 ... 144:
3180 		return 14;
3181 	case 149 ... 153:
3182 		return 15;
3183 	case 154 ... 156:
3184 		return TSSI_EXTRA_GROUP(15);
3185 	case 157 ... 161:
3186 		return 16;
3187 	case 162 ... 164:
3188 		return TSSI_EXTRA_GROUP(16);
3189 	case 165 ... 169:
3190 		return 17;
3191 	case 170 ... 172:
3192 		return TSSI_EXTRA_GROUP(17);
3193 	case 173 ... 177:
3194 		return 18;
3195 	}
3196 
3197 	return 0;
3198 }
3199 
_tssi_get_trim_group(struct rtw89_dev * rtwdev,u8 ch)3200 static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
3201 {
3202 	switch (ch) {
3203 	case 1 ... 8:
3204 		return 0;
3205 	case 9 ... 14:
3206 		return 1;
3207 	case 36 ... 48:
3208 		return 2;
3209 	case 52 ... 64:
3210 		return 3;
3211 	case 100 ... 112:
3212 		return 4;
3213 	case 116 ... 128:
3214 		return 5;
3215 	case 132 ... 144:
3216 		return 6;
3217 	case 149 ... 177:
3218 		return 7;
3219 	}
3220 
3221 	return 0;
3222 }
3223 
_tssi_get_ofdm_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3224 static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3225 			    enum rtw89_rf_path path, const struct rtw89_chan *chan)
3226 {
3227 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3228 	u8 ch = chan->channel;
3229 	u32 gidx, gidx_1st, gidx_2nd;
3230 	s8 de_1st;
3231 	s8 de_2nd;
3232 	s8 val;
3233 
3234 	gidx = _tssi_get_ofdm_group(rtwdev, ch);
3235 
3236 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3237 		    "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", path, gidx);
3238 
3239 	if (IS_TSSI_EXTRA_GROUP(gidx)) {
3240 		gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
3241 		gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
3242 		de_1st = tssi_info->tssi_mcs[path][gidx_1st];
3243 		de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
3244 		val = (de_1st + de_2nd) / 2;
3245 
3246 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3247 			    "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
3248 			    path, val, de_1st, de_2nd);
3249 	} else {
3250 		val = tssi_info->tssi_mcs[path][gidx];
3251 
3252 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3253 			    "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
3254 	}
3255 
3256 	return val;
3257 }
3258 
_tssi_get_ofdm_trim_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3259 static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3260 				 enum rtw89_rf_path path, const struct rtw89_chan *chan)
3261 {
3262 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3263 	u8 ch = chan->channel;
3264 	u32 tgidx, tgidx_1st, tgidx_2nd;
3265 	s8 tde_1st;
3266 	s8 tde_2nd;
3267 	s8 val;
3268 
3269 	tgidx = _tssi_get_trim_group(rtwdev, ch);
3270 
3271 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3272 		    "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
3273 		    path, tgidx);
3274 
3275 	if (IS_TSSI_EXTRA_GROUP(tgidx)) {
3276 		tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
3277 		tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
3278 		tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
3279 		tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
3280 		val = (tde_1st + tde_2nd) / 2;
3281 
3282 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3283 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
3284 			    path, val, tde_1st, tde_2nd);
3285 	} else {
3286 		val = tssi_info->tssi_trim[path][tgidx];
3287 
3288 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3289 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
3290 			    path, val);
3291 	}
3292 
3293 	return val;
3294 }
3295 
_tssi_set_efuse_to_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const struct rtw89_chan * chan)3296 static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3297 				  const struct rtw89_chan *chan)
3298 {
3299 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3300 	u8 ch = chan->channel;
3301 	u8 gidx;
3302 	s8 ofdm_de;
3303 	s8 trim_de;
3304 	s32 val;
3305 	u32 i;
3306 
3307 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
3308 		    phy, ch);
3309 
3310 	for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) {
3311 		gidx = _tssi_get_cck_group(rtwdev, ch);
3312 		trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
3313 		val = tssi_info->tssi_cck[i][gidx] + trim_de;
3314 
3315 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3316 			    "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
3317 			    i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
3318 
3319 		rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val);
3320 		rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val);
3321 
3322 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3323 			    "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
3324 			    _tssi_de_cck_long[i],
3325 			    rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
3326 						  _TSSI_DE_MASK));
3327 
3328 		ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
3329 		trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
3330 		val = ofdm_de + trim_de;
3331 
3332 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3333 			    "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
3334 			    i, ofdm_de, trim_de);
3335 
3336 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val);
3337 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val);
3338 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val);
3339 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, val);
3340 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val);
3341 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val);
3342 
3343 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3344 			    "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
3345 			    _tssi_de_mcs_20m[i],
3346 			    rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i],
3347 						  _TSSI_DE_MASK));
3348 	}
3349 }
3350 
_tssi_alimentk_dump_result(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)3351 static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
3352 {
3353 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3354 		    "[TSSI PA K]\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n"
3355 		    "0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n",
3356 		    R_TSSI_PA_K1 + (path << 13),
3357 		    rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K1 + (path << 13), MASKDWORD),
3358 		    R_TSSI_PA_K2 + (path << 13),
3359 		    rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K2 + (path << 13), MASKDWORD),
3360 		    R_P0_TSSI_ALIM1 + (path << 13),
3361 		    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD),
3362 		    R_P0_TSSI_ALIM3 + (path << 13),
3363 		    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD),
3364 		    R_TSSI_PA_K5 + (path << 13),
3365 		    rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K5 + (path << 13), MASKDWORD),
3366 		    R_P0_TSSI_ALIM2 + (path << 13),
3367 		    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD),
3368 		    R_P0_TSSI_ALIM4 + (path << 13),
3369 		    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD),
3370 		    R_TSSI_PA_K8 + (path << 13),
3371 		    rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K8 + (path << 13), MASKDWORD));
3372 }
3373 
_tssi_alimentk_done(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3374 static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
3375 				enum rtw89_phy_idx phy, enum rtw89_rf_path path,
3376 				const struct rtw89_chan *chan)
3377 {
3378 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3379 	u8 channel = chan->channel;
3380 	u8 band;
3381 
3382 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3383 		    "======>%s   phy=%d   path=%d\n", __func__, phy, path);
3384 
3385 	if (channel >= 1 && channel <= 14)
3386 		band = TSSI_ALIMK_2G;
3387 	else if (channel >= 36 && channel <= 64)
3388 		band = TSSI_ALIMK_5GL;
3389 	else if (channel >= 100 && channel <= 144)
3390 		band = TSSI_ALIMK_5GM;
3391 	else if (channel >= 149 && channel <= 177)
3392 		band = TSSI_ALIMK_5GH;
3393 	else
3394 		band = TSSI_ALIMK_2G;
3395 
3396 	if (tssi_info->alignment_done[path][band]) {
3397 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD,
3398 				       tssi_info->alignment_value[path][band][0]);
3399 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD,
3400 				       tssi_info->alignment_value[path][band][1]);
3401 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD,
3402 				       tssi_info->alignment_value[path][band][2]);
3403 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD,
3404 				       tssi_info->alignment_value[path][band][3]);
3405 	}
3406 
3407 	_tssi_alimentk_dump_result(rtwdev, path);
3408 }
3409 
_tssi_hw_tx(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u16 cnt,u16 period,s16 pwr_dbm,u8 enable,const struct rtw89_chan * chan)3410 static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3411 			enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm,
3412 			u8 enable, const struct rtw89_chan *chan)
3413 {
3414 	enum rtw89_rf_path_bit rx_path;
3415 
3416 	if (path == RF_PATH_A)
3417 		rx_path = RF_A;
3418 	else if (path == RF_PATH_B)
3419 		rx_path = RF_B;
3420 	else if (path == RF_PATH_AB)
3421 		rx_path = RF_AB;
3422 	else
3423 		rx_path = RF_ABCD; /* don't change path, but still set others */
3424 
3425 	if (enable) {
3426 		rtw8852bx_bb_set_plcp_tx(rtwdev);
3427 		rtw8852bx_bb_cfg_tx_path(rtwdev, path);
3428 		rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path, chan);
3429 		rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy);
3430 	}
3431 
3432 	rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy, chan);
3433 }
3434 
_tssi_backup_bb_registers(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const u32 reg[],u32 reg_backup[],u32 reg_num)3435 static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
3436 				      enum rtw89_phy_idx phy, const u32 reg[],
3437 				      u32 reg_backup[], u32 reg_num)
3438 {
3439 	u32 i;
3440 
3441 	for (i = 0; i < reg_num; i++) {
3442 		reg_backup[i] = rtw89_phy_read32_mask(rtwdev, reg[i], MASKDWORD);
3443 
3444 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3445 			    "[TSSI] Backup BB 0x%x = 0x%x\n", reg[i],
3446 			    reg_backup[i]);
3447 	}
3448 }
3449 
_tssi_reload_bb_registers(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const u32 reg[],u32 reg_backup[],u32 reg_num)3450 static void _tssi_reload_bb_registers(struct rtw89_dev *rtwdev,
3451 				      enum rtw89_phy_idx phy, const u32 reg[],
3452 				      u32 reg_backup[], u32 reg_num)
3453 
3454 {
3455 	u32 i;
3456 
3457 	for (i = 0; i < reg_num; i++) {
3458 		rtw89_phy_write32_mask(rtwdev, reg[i], MASKDWORD, reg_backup[i]);
3459 
3460 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3461 			    "[TSSI] Reload BB 0x%x = 0x%x\n", reg[i],
3462 			    reg_backup[i]);
3463 	}
3464 }
3465 
_tssi_ch_to_idx(struct rtw89_dev * rtwdev,u8 channel)3466 static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel)
3467 {
3468 	u8 channel_index;
3469 
3470 	if (channel >= 1 && channel <= 14)
3471 		channel_index = channel - 1;
3472 	else if (channel >= 36 && channel <= 64)
3473 		channel_index = (channel - 36) / 2 + 14;
3474 	else if (channel >= 100 && channel <= 144)
3475 		channel_index = ((channel - 100) / 2) + 15 + 14;
3476 	else if (channel >= 149 && channel <= 177)
3477 		channel_index = ((channel - 149) / 2) + 38 + 14;
3478 	else
3479 		channel_index = 0;
3480 
3481 	return channel_index;
3482 }
3483 
_tssi_get_cw_report(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const s16 * power,u32 * tssi_cw_rpt,const struct rtw89_chan * chan)3484 static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3485 				enum rtw89_rf_path path, const s16 *power,
3486 				u32 *tssi_cw_rpt, const struct rtw89_chan *chan)
3487 {
3488 	u32 tx_counter, tx_counter_tmp;
3489 	const int retry = 100;
3490 	u32 tmp;
3491 	int j, k;
3492 
3493 	for (j = 0; j < RTW8852B_TSSI_PATH_NR; j++) {
3494 		rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x0);
3495 		rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x1);
3496 
3497 		tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3498 
3499 		tmp = rtw89_phy_read32_mask(rtwdev, _tssi_trigger[path], MASKDWORD);
3500 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3501 			    "[TSSI PA K] 0x%x = 0x%08x   path=%d\n",
3502 			    _tssi_trigger[path], tmp, path);
3503 
3504 		if (j == 0)
3505 			_tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true, chan);
3506 		else
3507 			_tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true,
3508 				    chan);
3509 
3510 		tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3511 		tx_counter_tmp -= tx_counter;
3512 
3513 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3514 			    "[TSSI PA K] First HWTXcounter=%d path=%d\n",
3515 			    tx_counter_tmp, path);
3516 
3517 		for (k = 0; k < retry; k++) {
3518 			tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path],
3519 						    B_TSSI_CWRPT_RDY);
3520 			if (tmp)
3521 				break;
3522 
3523 			udelay(30);
3524 
3525 			tx_counter_tmp =
3526 				rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3527 			tx_counter_tmp -= tx_counter;
3528 
3529 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
3530 				    "[TSSI PA K] Flow k = %d HWTXcounter=%d path=%d\n",
3531 				    k, tx_counter_tmp, path);
3532 		}
3533 
3534 		if (k >= retry) {
3535 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
3536 				    "[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n",
3537 				    k, path);
3538 
3539 			_tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
3540 			return false;
3541 		}
3542 
3543 		tssi_cw_rpt[j] =
3544 			rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path], B_TSSI_CWRPT);
3545 
3546 		_tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
3547 
3548 		tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3549 		tx_counter_tmp -= tx_counter;
3550 
3551 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3552 			    "[TSSI PA K] Final HWTXcounter=%d path=%d\n",
3553 			    tx_counter_tmp, path);
3554 	}
3555 
3556 	return true;
3557 }
3558 
_tssi_alimentk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3559 static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3560 			   enum rtw89_rf_path path, const struct rtw89_chan *chan)
3561 {
3562 	static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4,
3563 				      0x78e4, 0x49c0, 0x0d18, 0x0d80};
3564 	static const s16 power_2g[4] = {48, 20, 4, 4};
3565 	static const s16 power_5g[4] = {48, 20, 4, 4};
3566 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3567 	s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3;
3568 	u32 tssi_cw_rpt[RTW8852B_TSSI_PATH_NR] = {0};
3569 	u8 channel = chan->channel;
3570 	u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel);
3571 	struct rtw8852bx_bb_tssi_bak tssi_bak;
3572 	s32 aliment_diff, tssi_cw_default;
3573 	u32 bb_reg_backup[8] = {0};
3574 	ktime_t start_time;
3575 	const s16 *power;
3576 	s64 this_time;
3577 	u8 band;
3578 	bool ok;
3579 	u32 tmp;
3580 	u8 j;
3581 
3582 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3583 		    "======> %s   channel=%d   path=%d\n", __func__, channel,
3584 		    path);
3585 
3586 	if (tssi_info->check_backup_aligmk[path][ch_idx]) {
3587 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD,
3588 				       tssi_info->alignment_backup_by_ch[path][ch_idx][0]);
3589 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD,
3590 				       tssi_info->alignment_backup_by_ch[path][ch_idx][1]);
3591 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD,
3592 				       tssi_info->alignment_backup_by_ch[path][ch_idx][2]);
3593 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD,
3594 				       tssi_info->alignment_backup_by_ch[path][ch_idx][3]);
3595 
3596 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3597 			    "======> %s   Reload TSSI Alignment !!!\n", __func__);
3598 		_tssi_alimentk_dump_result(rtwdev, path);
3599 		return;
3600 	}
3601 
3602 	start_time = ktime_get();
3603 
3604 	if (chan->band_type == RTW89_BAND_2G)
3605 		power = power_2g;
3606 	else
3607 		power = power_5g;
3608 
3609 	if (channel >= 1 && channel <= 14)
3610 		band = TSSI_ALIMK_2G;
3611 	else if (channel >= 36 && channel <= 64)
3612 		band = TSSI_ALIMK_5GL;
3613 	else if (channel >= 100 && channel <= 144)
3614 		band = TSSI_ALIMK_5GM;
3615 	else if (channel >= 149 && channel <= 177)
3616 		band = TSSI_ALIMK_5GH;
3617 	else
3618 		band = TSSI_ALIMK_2G;
3619 
3620 	rtw8852bx_bb_backup_tssi(rtwdev, phy, &tssi_bak);
3621 	_tssi_backup_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup));
3622 
3623 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x8);
3624 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x8);
3625 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
3626 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
3627 
3628 	ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt, chan);
3629 	if (!ok)
3630 		goto out;
3631 
3632 	for (j = 0; j < RTW8852B_TSSI_PATH_NR; j++) {
3633 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3634 			    "[TSSI PA K] power[%d]=%d  tssi_cw_rpt[%d]=%d\n", j,
3635 			    power[j], j, tssi_cw_rpt[j]);
3636 	}
3637 
3638 	tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][1],
3639 				    _tssi_cw_default_mask[1]);
3640 	tssi_cw_default = sign_extend32(tmp, 8);
3641 	tssi_alim_offset_1 = tssi_cw_rpt[0] - ((power[0] - power[1]) * 2) -
3642 			     tssi_cw_rpt[1] + tssi_cw_default;
3643 	aliment_diff = tssi_alim_offset_1 - tssi_cw_default;
3644 
3645 	tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][2],
3646 				    _tssi_cw_default_mask[2]);
3647 	tssi_cw_default = sign_extend32(tmp, 8);
3648 	tssi_alim_offset_2 = tssi_cw_default + aliment_diff;
3649 
3650 	tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][3],
3651 				    _tssi_cw_default_mask[3]);
3652 	tssi_cw_default = sign_extend32(tmp, 8);
3653 	tssi_alim_offset_3 = tssi_cw_default + aliment_diff;
3654 
3655 	if (path == RF_PATH_A) {
3656 		tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
3657 		      FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
3658 		      FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
3659 
3660 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM1, tmp);
3661 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2, B_P0_TSSI_ALIM2, tmp);
3662 
3663 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3664 			    "[TSSI PA K] tssi_alim_offset = 0x%x   0x%x   0x%x   0x%x\n",
3665 			    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3, B_P0_TSSI_ALIM31),
3666 			    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM11),
3667 			    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM12),
3668 			    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM13));
3669 	} else {
3670 		tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
3671 		      FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
3672 		      FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
3673 
3674 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM1, tmp);
3675 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM2, B_P1_TSSI_ALIM2, tmp);
3676 
3677 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3678 			    "[TSSI PA K] tssi_alim_offset = 0x%x   0x%x   0x%x   0x%x\n",
3679 			    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM3, B_P1_TSSI_ALIM31),
3680 			    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM11),
3681 			    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM12),
3682 			    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM13));
3683 	}
3684 
3685 	tssi_info->alignment_done[path][band] = true;
3686 	tssi_info->alignment_value[path][band][0] =
3687 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
3688 	tssi_info->alignment_value[path][band][1] =
3689 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
3690 	tssi_info->alignment_value[path][band][2] =
3691 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
3692 	tssi_info->alignment_value[path][band][3] =
3693 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
3694 
3695 	tssi_info->check_backup_aligmk[path][ch_idx] = true;
3696 	tssi_info->alignment_backup_by_ch[path][ch_idx][0] =
3697 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
3698 	tssi_info->alignment_backup_by_ch[path][ch_idx][1] =
3699 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
3700 	tssi_info->alignment_backup_by_ch[path][ch_idx][2] =
3701 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
3702 	tssi_info->alignment_backup_by_ch[path][ch_idx][3] =
3703 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
3704 
3705 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3706 		    "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][0], 0x%x = 0x%08x\n",
3707 		    path, band, R_P0_TSSI_ALIM1 + (path << 13),
3708 		    tssi_info->alignment_value[path][band][0]);
3709 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3710 		    "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][1], 0x%x = 0x%08x\n",
3711 		    path, band, R_P0_TSSI_ALIM3 + (path << 13),
3712 		    tssi_info->alignment_value[path][band][1]);
3713 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3714 		    "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][2], 0x%x = 0x%08x\n",
3715 		    path, band, R_P0_TSSI_ALIM2 + (path << 13),
3716 		    tssi_info->alignment_value[path][band][2]);
3717 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3718 		    "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][3], 0x%x = 0x%08x\n",
3719 		    path, band, R_P0_TSSI_ALIM4 + (path << 13),
3720 		    tssi_info->alignment_value[path][band][3]);
3721 
3722 out:
3723 	_tssi_reload_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup));
3724 	rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak);
3725 	rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0);
3726 
3727 	this_time = ktime_us_delta(ktime_get(), start_time);
3728 	tssi_info->tssi_alimk_time += this_time;
3729 
3730 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3731 		    "[TSSI PA K] %s processing time = %lld us (acc = %llu us)\n",
3732 		    __func__, this_time, tssi_info->tssi_alimk_time);
3733 }
3734 
rtw8852b_dpk_init(struct rtw89_dev * rtwdev)3735 void rtw8852b_dpk_init(struct rtw89_dev *rtwdev)
3736 {
3737 	_set_dpd_backoff(rtwdev, RTW89_PHY_0);
3738 }
3739 
rtw8852b_rck(struct rtw89_dev * rtwdev)3740 void rtw8852b_rck(struct rtw89_dev *rtwdev)
3741 {
3742 	u8 path;
3743 
3744 	for (path = 0; path < RF_PATH_NUM_8852B; path++)
3745 		_rck(rtwdev, path);
3746 }
3747 
rtw8852b_dack(struct rtw89_dev * rtwdev,enum rtw89_chanctx_idx chanctx_idx)3748 void rtw8852b_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx)
3749 {
3750 	u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
3751 
3752 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
3753 	_dac_cal(rtwdev, false);
3754 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
3755 }
3756 
rtw8852b_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,enum rtw89_chanctx_idx chanctx_idx)3757 void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
3758 		  enum rtw89_chanctx_idx chanctx_idx)
3759 {
3760 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
3761 	u32 tx_en;
3762 
3763 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
3764 	rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3765 	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3766 
3767 	_iqk_init(rtwdev);
3768 	_iqk(rtwdev, phy_idx, false, chanctx_idx);
3769 
3770 	rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3771 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
3772 }
3773 
rtw8852b_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,enum rtw89_chanctx_idx chanctx_idx)3774 void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
3775 		     enum rtw89_chanctx_idx chanctx_idx)
3776 {
3777 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
3778 	u32 tx_en;
3779 
3780 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
3781 	rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3782 	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3783 
3784 	_rx_dck(rtwdev, phy_idx);
3785 
3786 	rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3787 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
3788 }
3789 
rtw8852b_dpk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,enum rtw89_chanctx_idx chanctx_idx)3790 void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
3791 		  enum rtw89_chanctx_idx chanctx_idx)
3792 {
3793 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
3794 	u32 tx_en;
3795 
3796 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
3797 	rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3798 	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3799 
3800 	rtwdev->dpk.is_dpk_enable = true;
3801 	rtwdev->dpk.is_dpk_reload_en = false;
3802 	_dpk(rtwdev, phy_idx, false, chanctx_idx);
3803 
3804 	rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3805 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
3806 }
3807 
rtw8852b_dpk_track(struct rtw89_dev * rtwdev)3808 void rtw8852b_dpk_track(struct rtw89_dev *rtwdev)
3809 {
3810 	_dpk_track(rtwdev);
3811 }
3812 
rtw8852b_tssi(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool hwtx_en,enum rtw89_chanctx_idx chanctx_idx)3813 void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3814 		   bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx)
3815 {
3816 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
3817 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB, chanctx_idx);
3818 	u32 tx_en;
3819 	u8 i;
3820 
3821 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
3822 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
3823 
3824 	_tssi_disable(rtwdev, phy);
3825 
3826 	for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) {
3827 		_tssi_rf_setting(rtwdev, phy, i, chan);
3828 		_tssi_set_sys(rtwdev, phy, i, chan);
3829 		_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
3830 		_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
3831 		_tssi_set_dck(rtwdev, phy, i);
3832 		_tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
3833 		_tssi_set_dac_gain_tbl(rtwdev, phy, i);
3834 		_tssi_slope_cal_org(rtwdev, phy, i, chan);
3835 		_tssi_alignment_default(rtwdev, phy, i, true, chan);
3836 		_tssi_set_tssi_slope(rtwdev, phy, i);
3837 
3838 		rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
3839 		_tmac_tx_pause(rtwdev, phy, true);
3840 		if (hwtx_en)
3841 			_tssi_alimentk(rtwdev, phy, i, chan);
3842 		_tmac_tx_pause(rtwdev, phy, false);
3843 		rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
3844 	}
3845 
3846 	_tssi_enable(rtwdev, phy);
3847 	_tssi_set_efuse_to_de(rtwdev, phy, chan);
3848 
3849 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
3850 }
3851 
rtw8852b_tssi_scan(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const struct rtw89_chan * chan)3852 void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3853 			const struct rtw89_chan *chan)
3854 {
3855 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3856 	u8 channel = chan->channel;
3857 	u8 band;
3858 	u32 i;
3859 
3860 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3861 		    "======>%s   phy=%d  channel=%d\n", __func__, phy, channel);
3862 
3863 	if (channel >= 1 && channel <= 14)
3864 		band = TSSI_ALIMK_2G;
3865 	else if (channel >= 36 && channel <= 64)
3866 		band = TSSI_ALIMK_5GL;
3867 	else if (channel >= 100 && channel <= 144)
3868 		band = TSSI_ALIMK_5GM;
3869 	else if (channel >= 149 && channel <= 177)
3870 		band = TSSI_ALIMK_5GH;
3871 	else
3872 		band = TSSI_ALIMK_2G;
3873 
3874 	_tssi_disable(rtwdev, phy);
3875 
3876 	for (i = RF_PATH_A; i < RTW8852B_TSSI_PATH_NR; i++) {
3877 		_tssi_rf_setting(rtwdev, phy, i, chan);
3878 		_tssi_set_sys(rtwdev, phy, i, chan);
3879 		_tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
3880 
3881 		if (tssi_info->alignment_done[i][band])
3882 			_tssi_alimentk_done(rtwdev, phy, i, chan);
3883 		else
3884 			_tssi_alignment_default(rtwdev, phy, i, true, chan);
3885 	}
3886 
3887 	_tssi_enable(rtwdev, phy);
3888 	_tssi_set_efuse_to_de(rtwdev, phy, chan);
3889 }
3890 
rtw8852b_tssi_default_txagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool enable,enum rtw89_chanctx_idx chanctx_idx)3891 static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
3892 					enum rtw89_phy_idx phy, bool enable,
3893 					enum rtw89_chanctx_idx chanctx_idx)
3894 {
3895 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
3896 	u8 channel = chan->channel;
3897 
3898 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s   ch=%d\n",
3899 		    __func__, channel);
3900 
3901 	if (enable) {
3902 		if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
3903 			rtw8852b_tssi(rtwdev, phy, true, chanctx_idx);
3904 		return;
3905 	}
3906 
3907 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3908 		    "======>%s 1 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
3909 		    __func__,
3910 		    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
3911 		    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
3912 
3913 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 0xc0);
3914 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT,  0xc0);
3915 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
3916 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
3917 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
3918 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
3919 
3920 	_tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan);
3921 	_tssi_alimentk_done(rtwdev, phy, RF_PATH_B, chan);
3922 
3923 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3924 		    "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
3925 		    __func__,
3926 		    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
3927 		    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
3928 
3929 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3930 		    "======> %s   SCAN_END\n", __func__);
3931 }
3932 
rtw8852b_wifi_scan_notify(struct rtw89_dev * rtwdev,bool scan_start,enum rtw89_phy_idx phy_idx,enum rtw89_chanctx_idx chanctx_idx)3933 void rtw8852b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
3934 			       enum rtw89_phy_idx phy_idx,
3935 			       enum rtw89_chanctx_idx chanctx_idx)
3936 {
3937 	if (scan_start)
3938 		rtw8852b_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx);
3939 	else
3940 		rtw8852b_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx);
3941 }
3942 
_bw_setting(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,enum rtw89_bandwidth bw,bool dav)3943 static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
3944 			enum rtw89_bandwidth bw, bool dav)
3945 {
3946 	u32 rf_reg18;
3947 	u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
3948 
3949 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
3950 
3951 	rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
3952 	if (rf_reg18 == INV_RF_DATA) {
3953 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3954 			    "[RFK]Invalid RF_0x18 for Path-%d\n", path);
3955 		return;
3956 	}
3957 	rf_reg18 &= ~RR_CFGCH_BW;
3958 
3959 	switch (bw) {
3960 	case RTW89_CHANNEL_WIDTH_5:
3961 	case RTW89_CHANNEL_WIDTH_10:
3962 	case RTW89_CHANNEL_WIDTH_20:
3963 		rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M);
3964 		break;
3965 	case RTW89_CHANNEL_WIDTH_40:
3966 		rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M);
3967 		break;
3968 	case RTW89_CHANNEL_WIDTH_80:
3969 		rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M);
3970 		break;
3971 	default:
3972 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]Fail to set CH\n");
3973 	}
3974 
3975 	rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
3976 		      RR_CFGCH_BW2) & RFREG_MASK;
3977 	rf_reg18 |= RR_CFGCH_BW2;
3978 	rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
3979 
3980 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set %x at path%d, %x =0x%x\n",
3981 		    bw, path, reg18_addr,
3982 		    rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
3983 }
3984 
_ctrl_bw(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_bandwidth bw)3985 static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3986 		     enum rtw89_bandwidth bw)
3987 {
3988 	_bw_setting(rtwdev, RF_PATH_A, bw, true);
3989 	_bw_setting(rtwdev, RF_PATH_B, bw, true);
3990 	_bw_setting(rtwdev, RF_PATH_A, bw, false);
3991 	_bw_setting(rtwdev, RF_PATH_B, bw, false);
3992 }
3993 
_set_s0_arfc18(struct rtw89_dev * rtwdev,u32 val)3994 static bool _set_s0_arfc18(struct rtw89_dev *rtwdev, u32 val)
3995 {
3996 	u32 bak;
3997 	u32 tmp;
3998 	int ret;
3999 
4000 	bak = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK);
4001 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RR_LDO_SEL, 0x1);
4002 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK, val);
4003 
4004 	ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp == 0, 1, 1000,
4005 				       false, rtwdev, RF_PATH_A, RR_LPF, RR_LPF_BUSY);
4006 	if (ret)
4007 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]LCK timeout\n");
4008 
4009 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK, bak);
4010 
4011 	return !!ret;
4012 }
4013 
_lck_check(struct rtw89_dev * rtwdev)4014 static void _lck_check(struct rtw89_dev *rtwdev)
4015 {
4016 	u32 tmp;
4017 
4018 	if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4019 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN MMD reset\n");
4020 
4021 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x1);
4022 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x0);
4023 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x1);
4024 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x0);
4025 	}
4026 
4027 	udelay(10);
4028 
4029 	if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4030 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]re-set RF 0x18\n");
4031 
4032 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
4033 		tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
4034 		_set_s0_arfc18(rtwdev, tmp);
4035 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
4036 	}
4037 
4038 	if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4039 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN off/on\n");
4040 
4041 		tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK);
4042 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK, tmp);
4043 		tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK);
4044 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK, tmp);
4045 
4046 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x1);
4047 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0);
4048 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
4049 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x0);
4050 
4051 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
4052 		tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
4053 		_set_s0_arfc18(rtwdev, tmp);
4054 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
4055 
4056 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]0xb2=%x, 0xc5=%x\n",
4057 			    rtw89_read_rf(rtwdev, RF_PATH_A, RR_VCO, RFREG_MASK),
4058 			    rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RFREG_MASK));
4059 	}
4060 }
4061 
_set_ch(struct rtw89_dev * rtwdev,u32 val)4062 static void _set_ch(struct rtw89_dev *rtwdev, u32 val)
4063 {
4064 	bool timeout;
4065 
4066 	timeout = _set_s0_arfc18(rtwdev, val);
4067 	if (!timeout)
4068 		_lck_check(rtwdev);
4069 }
4070 
_ch_setting(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 central_ch,bool dav)4071 static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
4072 			u8 central_ch, bool dav)
4073 {
4074 	u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
4075 	bool is_2g_ch = central_ch <= 14;
4076 	u32 rf_reg18;
4077 
4078 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
4079 
4080 	rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
4081 	rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH |
4082 		      RR_CFGCH_BCN | RR_CFGCH_BAND0 | RR_CFGCH_CH);
4083 	rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch);
4084 
4085 	if (!is_2g_ch)
4086 		rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G) |
4087 			    FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G);
4088 
4089 	rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
4090 		      RR_CFGCH_BW2) & RFREG_MASK;
4091 	rf_reg18 |= RR_CFGCH_BW2;
4092 
4093 	if (path == RF_PATH_A && dav)
4094 		_set_ch(rtwdev, rf_reg18);
4095 	else
4096 		rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
4097 
4098 	rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 0);
4099 	rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 1);
4100 
4101 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
4102 		    "[RFK]CH: %d for Path-%d, reg0x%x = 0x%x\n",
4103 		    central_ch, path, reg18_addr,
4104 		    rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
4105 }
4106 
_ctrl_ch(struct rtw89_dev * rtwdev,u8 central_ch)4107 static void _ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch)
4108 {
4109 	_ch_setting(rtwdev, RF_PATH_A, central_ch, true);
4110 	_ch_setting(rtwdev, RF_PATH_B, central_ch, true);
4111 	_ch_setting(rtwdev, RF_PATH_A, central_ch, false);
4112 	_ch_setting(rtwdev, RF_PATH_B, central_ch, false);
4113 }
4114 
_set_rxbb_bw(struct rtw89_dev * rtwdev,enum rtw89_bandwidth bw,enum rtw89_rf_path path)4115 static void _set_rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw,
4116 			 enum rtw89_rf_path path)
4117 {
4118 	rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1);
4119 	rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0x12);
4120 
4121 	if (bw == RTW89_CHANNEL_WIDTH_20)
4122 		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x1b);
4123 	else if (bw == RTW89_CHANNEL_WIDTH_40)
4124 		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x13);
4125 	else if (bw == RTW89_CHANNEL_WIDTH_80)
4126 		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0xb);
4127 	else
4128 		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x3);
4129 
4130 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set S%d RXBB BW 0x3F = 0x%x\n", path,
4131 		    rtw89_read_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB));
4132 
4133 	rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0);
4134 }
4135 
_rxbb_bw(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_bandwidth bw)4136 static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
4137 		     enum rtw89_bandwidth bw)
4138 {
4139 	u8 kpath, path;
4140 
4141 	kpath = _kpath(rtwdev, phy);
4142 
4143 	for (path = 0; path < RF_PATH_NUM_8852B; path++) {
4144 		if (!(kpath & BIT(path)))
4145 			continue;
4146 
4147 		_set_rxbb_bw(rtwdev, bw, path);
4148 	}
4149 }
4150 
rtw8852b_ctrl_bw_ch(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 central_ch,enum rtw89_band band,enum rtw89_bandwidth bw)4151 static void rtw8852b_ctrl_bw_ch(struct rtw89_dev *rtwdev,
4152 				enum rtw89_phy_idx phy, u8 central_ch,
4153 				enum rtw89_band band, enum rtw89_bandwidth bw)
4154 {
4155 	_ctrl_ch(rtwdev, central_ch);
4156 	_ctrl_bw(rtwdev, phy, bw);
4157 	_rxbb_bw(rtwdev, phy, bw);
4158 }
4159 
rtw8852b_set_channel_rf(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_phy_idx phy_idx)4160 void rtw8852b_set_channel_rf(struct rtw89_dev *rtwdev,
4161 			     const struct rtw89_chan *chan,
4162 			     enum rtw89_phy_idx phy_idx)
4163 {
4164 	rtw8852b_ctrl_bw_ch(rtwdev, phy_idx, chan->channel, chan->band_type,
4165 			    chan->band_width);
4166 }
4167 
rtw8852b_mcc_get_ch_info(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)4168 void rtw8852b_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
4169 {
4170 	const struct rtw89_chan *chan = rtw89_mgnt_chan_get(rtwdev, 0);
4171 	struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
4172 	struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V0] = {};
4173 	u8 idx;
4174 
4175 	for (idx = 0; idx < ARRAY_SIZE(desc); idx++) {
4176 		struct rtw89_rfk_chan_desc *p = &desc[idx];
4177 
4178 		p->ch = rfk_mcc->ch[idx];
4179 
4180 		p->has_band = true;
4181 		p->band = rfk_mcc->band[idx];
4182 	}
4183 
4184 	idx = rtw89_rfk_chan_lookup(rtwdev, desc, ARRAY_SIZE(desc), chan);
4185 
4186 	rfk_mcc->ch[idx] = chan->channel;
4187 	rfk_mcc->band[idx] = chan->band_type;
4188 	rfk_mcc->table_idx = idx;
4189 }
4190 
rtw8852b_rfk_chanctx_cb(struct rtw89_dev * rtwdev,enum rtw89_chanctx_state state)4191 void rtw8852b_rfk_chanctx_cb(struct rtw89_dev *rtwdev,
4192 			     enum rtw89_chanctx_state state)
4193 {
4194 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
4195 	u8 path;
4196 
4197 	switch (state) {
4198 	case RTW89_CHANCTX_STATE_MCC_START:
4199 		dpk->is_dpk_enable = false;
4200 		for (path = 0; path < RTW8852B_DPK_RF_PATH; path++)
4201 			_dpk_onoff(rtwdev, path, false);
4202 		break;
4203 	case RTW89_CHANCTX_STATE_MCC_STOP:
4204 		dpk->is_dpk_enable = true;
4205 		for (path = 0; path < RTW8852B_DPK_RF_PATH; path++)
4206 			_dpk_onoff(rtwdev, path, false);
4207 		rtw8852b_dpk(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
4208 		break;
4209 	default:
4210 		break;
4211 	}
4212 }
4213