1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2022 Realtek Corporation
3 */
4
5 #include "chan.h"
6 #include "coex.h"
7 #include "debug.h"
8 #include "fw.h"
9 #include "phy.h"
10 #include "reg.h"
11 #include "rtw8852c.h"
12 #include "rtw8852c_rfk.h"
13 #include "rtw8852c_rfk_table.h"
14 #include "rtw8852c_table.h"
15
16 struct rxck_def {
17 u32 ctl;
18 u32 en;
19 u32 bw0;
20 u32 bw1;
21 u32 mul;
22 u32 lp;
23 };
24
25 #define _TSSI_DE_MASK GENMASK(21, 12)
26 static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852C] = {0x5858, 0x7858};
27 static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852C] = {0x5860, 0x7860};
28 static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852C] = {0x5838, 0x7838};
29 static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852C] = {0x5840, 0x7840};
30 static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852C] = {0x5848, 0x7848};
31 static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852C] = {0x5850, 0x7850};
32 static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852C] = {0x5828, 0x7828};
33 static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852C] = {0x5830, 0x7830};
34
35 static const u32 rtw8852c_backup_bb_regs[] = {
36 0x8120, 0xc0d4, 0xc0d8, 0xc0e8, 0x8220, 0xc1d4, 0xc1d8, 0xc1e8
37 };
38
39 static const u32 rtw8852c_backup_rf_regs[] = {
40 0xdf, 0x5f, 0x8f, 0x97, 0xa3, 0x5, 0x10005
41 };
42
43 #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852c_backup_bb_regs)
44 #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852c_backup_rf_regs)
45
46 #define RXK_GROUP_NR 4
47 static const u32 _rxk_a6_idxrxgain[RXK_GROUP_NR] = {0x190, 0x196, 0x290, 0x316};
48 static const u32 _rxk_a6_idxattc2[RXK_GROUP_NR] = {0x00, 0x0, 0x00, 0x00};
49 static const u32 _rxk_a_idxrxgain[RXK_GROUP_NR] = {0x190, 0x198, 0x310, 0x318};
50 static const u32 _rxk_a_idxattc2[RXK_GROUP_NR] = {0x00, 0x00, 0x00, 0x00};
51 static const u32 _rxk_g_idxrxgain[RXK_GROUP_NR] = {0x252, 0x26c, 0x350, 0x360};
52 static const u32 _rxk_g_idxattc2[RXK_GROUP_NR] = {0x00, 0x07, 0x00, 0x3};
53
54 #define TXK_GROUP_NR 3
55 static const u32 _txk_a6_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
56 static const u32 _txk_a6_track_range[TXK_GROUP_NR] = {0x6, 0x7, 0x7};
57 static const u32 _txk_a6_gain_bb[TXK_GROUP_NR] = {0x12, 0x09, 0x0e};
58 static const u32 _txk_a6_itqt[TXK_GROUP_NR] = {0x12, 0x12, 0x12};
59 static const u32 _txk_a_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
60 static const u32 _txk_a_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x7};
61 static const u32 _txk_a_gain_bb[TXK_GROUP_NR] = {0x12, 0x09, 0x0e};
62 static const u32 _txk_a_itqt[TXK_GROUP_NR] = {0x12, 0x12, 0x12};
63 static const u32 _txk_g_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
64 static const u32 _txk_g_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x6};
65 static const u32 _txk_g_gain_bb[TXK_GROUP_NR] = {0x0e, 0x0a, 0x0e};
66 static const u32 _txk_g_itqt[TXK_GROUP_NR] = { 0x12, 0x12, 0x12};
67
68 static const u32 dpk_par_regs[RTW89_DPK_RF_PATH][4] = {
69 {0x8190, 0x8194, 0x8198, 0x81a4},
70 {0x81a8, 0x81c4, 0x81c8, 0x81e8},
71 };
72
73 static const u8 _dck_addr_bs[RF_PATH_NUM_8852C] = {0x0, 0x10};
74 static const u8 _dck_addr[RF_PATH_NUM_8852C] = {0xc, 0x1c};
75
76 static const struct rxck_def _ck480M = {0x8, 0x2, 0x3, 0xf, 0x0, 0x9};
77 static const struct rxck_def _ck960M = {0x8, 0x2, 0x2, 0x8, 0x0, 0x9};
78 static const struct rxck_def _ck1920M = {0x8, 0x0, 0x2, 0x4, 0x6, 0x9};
79
_kpath(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)80 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
81 {
82 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n",
83 rtwdev->dbcc_en, phy_idx);
84
85 if (!rtwdev->dbcc_en)
86 return RF_AB;
87
88 if (phy_idx == RTW89_PHY_0)
89 return RF_A;
90 else
91 return RF_B;
92 }
93
_rfk_backup_bb_reg(struct rtw89_dev * rtwdev,u32 backup_bb_reg_val[])94 static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
95 {
96 u32 i;
97
98 for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
99 backup_bb_reg_val[i] =
100 rtw89_phy_read32_mask(rtwdev, rtw8852c_backup_bb_regs[i],
101 MASKDWORD);
102 rtw89_debug(rtwdev, RTW89_DBG_RFK,
103 "[IQK]backup bb reg : %x, value =%x\n",
104 rtw8852c_backup_bb_regs[i], backup_bb_reg_val[i]);
105 }
106 }
107
_rfk_backup_rf_reg(struct rtw89_dev * rtwdev,u32 backup_rf_reg_val[],u8 rf_path)108 static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
109 u8 rf_path)
110 {
111 u32 i;
112
113 for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
114 backup_rf_reg_val[i] =
115 rtw89_read_rf(rtwdev, rf_path,
116 rtw8852c_backup_rf_regs[i], RFREG_MASK);
117 rtw89_debug(rtwdev, RTW89_DBG_RFK,
118 "[IQK]backup rf S%d reg : %x, value =%x\n", rf_path,
119 rtw8852c_backup_rf_regs[i], backup_rf_reg_val[i]);
120 }
121 }
122
_rfk_restore_bb_reg(struct rtw89_dev * rtwdev,u32 backup_bb_reg_val[])123 static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
124 {
125 u32 i;
126
127 for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
128 rtw89_phy_write32_mask(rtwdev, rtw8852c_backup_bb_regs[i],
129 MASKDWORD, backup_bb_reg_val[i]);
130 rtw89_debug(rtwdev, RTW89_DBG_RFK,
131 "[IQK]restore bb reg : %x, value =%x\n",
132 rtw8852c_backup_bb_regs[i], backup_bb_reg_val[i]);
133 }
134 }
135
_rfk_restore_rf_reg(struct rtw89_dev * rtwdev,u32 backup_rf_reg_val[],u8 rf_path)136 static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
137 u8 rf_path)
138 {
139 u32 i;
140
141 for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
142 rtw89_write_rf(rtwdev, rf_path, rtw8852c_backup_rf_regs[i],
143 RFREG_MASK, backup_rf_reg_val[i]);
144
145 rtw89_debug(rtwdev, RTW89_DBG_RFK,
146 "[IQK]restore rf S%d reg: %x, value =%x\n", rf_path,
147 rtw8852c_backup_rf_regs[i], backup_rf_reg_val[i]);
148 }
149 }
150
_wait_rx_mode(struct rtw89_dev * rtwdev,u8 kpath)151 static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
152 {
153 u8 path;
154 u32 rf_mode;
155 int ret;
156
157 for (path = 0; path < RF_PATH_MAX; path++) {
158 if (!(kpath & BIT(path)))
159 continue;
160
161 ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2,
162 2, 5000, false, rtwdev, path, 0x00,
163 RR_MOD_MASK);
164 rtw89_debug(rtwdev, RTW89_DBG_RFK,
165 "[RFK] Wait S%d to Rx mode!! (ret = %d)\n",
166 path, ret);
167 }
168 }
169
_dack_dump(struct rtw89_dev * rtwdev)170 static void _dack_dump(struct rtw89_dev *rtwdev)
171 {
172 struct rtw89_dack_info *dack = &rtwdev->dack;
173 u8 i;
174 u8 t;
175
176 rtw89_debug(rtwdev, RTW89_DBG_RFK,
177 "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
178 dack->addck_d[0][0], dack->addck_d[0][1]);
179 rtw89_debug(rtwdev, RTW89_DBG_RFK,
180 "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
181 dack->addck_d[1][0], dack->addck_d[1][1]);
182 rtw89_debug(rtwdev, RTW89_DBG_RFK,
183 "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
184 dack->dadck_d[0][0], dack->dadck_d[0][1]);
185 rtw89_debug(rtwdev, RTW89_DBG_RFK,
186 "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
187 dack->dadck_d[1][0], dack->dadck_d[1][1]);
188
189 rtw89_debug(rtwdev, RTW89_DBG_RFK,
190 "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
191 dack->biask_d[0][0], dack->biask_d[0][1]);
192 rtw89_debug(rtwdev, RTW89_DBG_RFK,
193 "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
194 dack->biask_d[1][0], dack->biask_d[1][1]);
195
196 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
197 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
198 t = dack->msbk_d[0][0][i];
199 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
200 }
201 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
202 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
203 t = dack->msbk_d[0][1][i];
204 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
205 }
206 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
207 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
208 t = dack->msbk_d[1][0][i];
209 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
210 }
211 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
212 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
213 t = dack->msbk_d[1][1][i];
214 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
215 }
216 }
217
_addck_backup(struct rtw89_dev * rtwdev)218 static void _addck_backup(struct rtw89_dev *rtwdev)
219 {
220 struct rtw89_dack_info *dack = &rtwdev->dack;
221
222 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0);
223 dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0,
224 B_ADDCKR0_A0);
225 dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0,
226 B_ADDCKR0_A1);
227
228 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0);
229 dack->addck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1,
230 B_ADDCKR1_A0);
231 dack->addck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1,
232 B_ADDCKR1_A1);
233 }
234
_addck_reload(struct rtw89_dev * rtwdev)235 static void _addck_reload(struct rtw89_dev *rtwdev)
236 {
237 struct rtw89_dack_info *dack = &rtwdev->dack;
238
239 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1,
240 dack->addck_d[0][0]);
241 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0,
242 dack->addck_d[0][1]);
243 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3);
244 rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL1,
245 dack->addck_d[1][0]);
246 rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL0,
247 dack->addck_d[1][1]);
248 rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x3);
249 }
250
_dack_backup_s0(struct rtw89_dev * rtwdev)251 static void _dack_backup_s0(struct rtw89_dev *rtwdev)
252 {
253 struct rtw89_dack_info *dack = &rtwdev->dack;
254 u8 i;
255
256 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
257 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
258 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i);
259 dack->msbk_d[0][0][i] = rtw89_phy_read32_mask(rtwdev,
260 R_DACK_S0P2,
261 B_DACK_S0M0);
262 rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i);
263 dack->msbk_d[0][1][i] = rtw89_phy_read32_mask(rtwdev,
264 R_DACK_S0P3,
265 B_DACK_S0M1);
266 }
267 dack->biask_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00,
268 B_DACK_BIAS00);
269 dack->biask_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01,
270 B_DACK_BIAS01);
271 dack->dadck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00,
272 B_DACK_DADCK00);
273 dack->dadck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01,
274 B_DACK_DADCK01);
275 }
276
_dack_backup_s1(struct rtw89_dev * rtwdev)277 static void _dack_backup_s1(struct rtw89_dev *rtwdev)
278 {
279 struct rtw89_dack_info *dack = &rtwdev->dack;
280 u8 i;
281
282 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
283 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
284 rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i);
285 dack->msbk_d[1][0][i] = rtw89_phy_read32_mask(rtwdev,
286 R_DACK10S,
287 B_DACK10S);
288 rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i);
289 dack->msbk_d[1][1][i] = rtw89_phy_read32_mask(rtwdev,
290 R_DACK11S,
291 B_DACK11S);
292 }
293 dack->biask_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10,
294 B_DACK_BIAS10);
295 dack->biask_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11,
296 B_DACK_BIAS11);
297 dack->dadck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10,
298 B_DACK_DADCK10);
299 dack->dadck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11,
300 B_DACK_DADCK11);
301 }
302
_dack_reload_by_path(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 index)303 static void _dack_reload_by_path(struct rtw89_dev *rtwdev,
304 enum rtw89_rf_path path, u8 index)
305 {
306 struct rtw89_dack_info *dack = &rtwdev->dack;
307 u32 idx_offset, path_offset;
308 u32 val32, offset, addr;
309 u8 i;
310
311 idx_offset = (index == 0 ? 0 : 0x14);
312 path_offset = (path == RF_PATH_A ? 0 : 0x28);
313 offset = idx_offset + path_offset;
314
315 rtw89_rfk_parser(rtwdev, &rtw8852c_dack_reload_defs_tbl);
316
317 /* msbk_d: 15/14/13/12 */
318 val32 = 0x0;
319 for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
320 val32 |= dack->msbk_d[path][index][i + 12] << (i * 8);
321 addr = 0xc200 + offset;
322 rtw89_phy_write32(rtwdev, addr, val32);
323 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
324 rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
325
326 /* msbk_d: 11/10/9/8 */
327 val32 = 0x0;
328 for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
329 val32 |= dack->msbk_d[path][index][i + 8] << (i * 8);
330 addr = 0xc204 + offset;
331 rtw89_phy_write32(rtwdev, addr, val32);
332 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
333 rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
334
335 /* msbk_d: 7/6/5/4 */
336 val32 = 0x0;
337 for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
338 val32 |= dack->msbk_d[path][index][i + 4] << (i * 8);
339 addr = 0xc208 + offset;
340 rtw89_phy_write32(rtwdev, addr, val32);
341 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
342 rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
343
344 /* msbk_d: 3/2/1/0 */
345 val32 = 0x0;
346 for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
347 val32 |= dack->msbk_d[path][index][i] << (i * 8);
348 addr = 0xc20c + offset;
349 rtw89_phy_write32(rtwdev, addr, val32);
350 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
351 rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
352
353 /* dadak_d/biask_d */
354 val32 = (dack->biask_d[path][index] << 22) |
355 (dack->dadck_d[path][index] << 14);
356 addr = 0xc210 + offset;
357 rtw89_phy_write32(rtwdev, addr, val32);
358 rtw89_phy_write32_set(rtwdev, addr, BIT(0));
359 }
360
_dack_reload(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)361 static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
362 {
363 u8 i;
364
365 for (i = 0; i < 2; i++)
366 _dack_reload_by_path(rtwdev, path, i);
367 }
368
_addck(struct rtw89_dev * rtwdev)369 static void _addck(struct rtw89_dev *rtwdev)
370 {
371 struct rtw89_dack_info *dack = &rtwdev->dack;
372 u32 val;
373 int ret;
374
375 /* S0 */
376 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x1);
377 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x1);
378 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x0);
379 fsleep(1);
380 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1);
381
382 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
383 1, 10000, false, rtwdev, 0xc0fc, BIT(0));
384 if (ret) {
385 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
386 dack->addck_timeout[0] = true;
387 }
388
389 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x0);
390
391 /* S1 */
392 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_RST, 0x1);
393 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_EN, 0x1);
394 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_EN, 0x0);
395 udelay(1);
396 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1);
397
398 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
399 1, 10000, false, rtwdev, 0xc1fc, BIT(0));
400 if (ret) {
401 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
402 dack->addck_timeout[0] = true;
403 }
404 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_RST, 0x0);
405 }
406
_dack_reset(struct rtw89_dev * rtwdev,u8 path)407 static void _dack_reset(struct rtw89_dev *rtwdev, u8 path)
408 {
409 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
410 &rtw8852c_dack_reset_defs_a_tbl,
411 &rtw8852c_dack_reset_defs_b_tbl);
412 }
413
414 enum adc_ck {
415 ADC_NA = 0,
416 ADC_480M = 1,
417 ADC_960M = 2,
418 ADC_1920M = 3,
419 };
420
421 enum dac_ck {
422 DAC_40M = 0,
423 DAC_80M = 1,
424 DAC_120M = 2,
425 DAC_160M = 3,
426 DAC_240M = 4,
427 DAC_320M = 5,
428 DAC_480M = 6,
429 DAC_960M = 7,
430 };
431
432 enum rf_mode {
433 RF_SHUT_DOWN = 0x0,
434 RF_STANDBY = 0x1,
435 RF_TX = 0x2,
436 RF_RX = 0x3,
437 RF_TXIQK = 0x4,
438 RF_DPK = 0x5,
439 RF_RXK1 = 0x6,
440 RF_RXK2 = 0x7,
441 };
442
rtw8852c_txck_force(struct rtw89_dev * rtwdev,u8 path,bool force,enum dac_ck ck)443 static void rtw8852c_txck_force(struct rtw89_dev *rtwdev, u8 path, bool force,
444 enum dac_ck ck)
445 {
446 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x0);
447
448 if (!force)
449 return;
450
451 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_VAL, ck);
452 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x1);
453 }
454
rtw8852c_rxck_force(struct rtw89_dev * rtwdev,u8 path,bool force,enum adc_ck ck)455 static void rtw8852c_rxck_force(struct rtw89_dev *rtwdev, u8 path, bool force,
456 enum adc_ck ck)
457 {
458 const struct rxck_def *def;
459
460 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0);
461
462 if (!force)
463 return;
464
465 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck);
466 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1);
467
468 switch (ck) {
469 case ADC_480M:
470 def = &_ck480M;
471 break;
472 case ADC_960M:
473 def = &_ck960M;
474 break;
475 case ADC_1920M:
476 default:
477 def = &_ck1920M;
478 break;
479 }
480
481 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_CTL, def->ctl);
482 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_EN, def->en);
483 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, def->bw0);
484 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, def->bw1);
485 rtw89_phy_write32_mask(rtwdev, R_DRCK | (path << 8), B_DRCK_MUL, def->mul);
486 rtw89_phy_write32_mask(rtwdev, R_ADCMOD | (path << 8), B_ADCMOD_LP, def->lp);
487 }
488
_check_dack_done(struct rtw89_dev * rtwdev,bool s0)489 static bool _check_dack_done(struct rtw89_dev *rtwdev, bool s0)
490 {
491 if (s0) {
492 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
493 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 ||
494 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
495 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
496 return false;
497 } else {
498 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 ||
499 rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0 ||
500 rtw89_phy_read32_mask(rtwdev, R_DACK_S1P2, B_DACK_S1P2_OK) == 0 ||
501 rtw89_phy_read32_mask(rtwdev, R_DACK_S1P3, B_DACK_S1P3_OK) == 0)
502 return false;
503 }
504
505 return true;
506 }
507
_dack_s0(struct rtw89_dev * rtwdev)508 static void _dack_s0(struct rtw89_dev *rtwdev)
509 {
510 struct rtw89_dack_info *dack = &rtwdev->dack;
511 bool done;
512 int ret;
513
514 rtw8852c_txck_force(rtwdev, RF_PATH_A, true, DAC_160M);
515 rtw89_rfk_parser(rtwdev, &rtw8852c_dack_defs_s0_tbl);
516
517 _dack_reset(rtwdev, RF_PATH_A);
518
519 rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1);
520 ret = read_poll_timeout_atomic(_check_dack_done, done, done,
521 1, 10000, false, rtwdev, true);
522 if (ret) {
523 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n");
524 dack->msbk_timeout[0] = true;
525 }
526 rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x0);
527 rtw8852c_txck_force(rtwdev, RF_PATH_A, false, DAC_960M);
528 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
529
530 _dack_backup_s0(rtwdev);
531 _dack_reload(rtwdev, RF_PATH_A);
532 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
533 }
534
_dack_s1(struct rtw89_dev * rtwdev)535 static void _dack_s1(struct rtw89_dev *rtwdev)
536 {
537 struct rtw89_dack_info *dack = &rtwdev->dack;
538 bool done;
539 int ret;
540
541 rtw8852c_txck_force(rtwdev, RF_PATH_B, true, DAC_160M);
542 rtw89_rfk_parser(rtwdev, &rtw8852c_dack_defs_s1_tbl);
543
544 _dack_reset(rtwdev, RF_PATH_B);
545
546 rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x1);
547 ret = read_poll_timeout_atomic(_check_dack_done, done, done,
548 1, 10000, false, rtwdev, false);
549 if (ret) {
550 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DACK timeout\n");
551 dack->msbk_timeout[0] = true;
552 }
553 rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x0);
554 rtw8852c_txck_force(rtwdev, RF_PATH_B, false, DAC_960M);
555 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n");
556
557 _dack_backup_s1(rtwdev);
558 _dack_reload(rtwdev, RF_PATH_B);
559 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
560 }
561
_dack(struct rtw89_dev * rtwdev)562 static void _dack(struct rtw89_dev *rtwdev)
563 {
564 _dack_s0(rtwdev);
565 _dack_s1(rtwdev);
566 }
567
_drck(struct rtw89_dev * rtwdev)568 static void _drck(struct rtw89_dev *rtwdev)
569 {
570 u32 val;
571 int ret;
572
573 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1);
574 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
575 1, 10000, false, rtwdev, 0xc0c8, BIT(3));
576 if (ret)
577 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n");
578
579 rtw89_rfk_parser(rtwdev, &rtw8852c_drck_defs_tbl);
580
581 val = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, B_DRCK_RES);
582 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0);
583 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, val);
584 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n",
585 rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD));
586 }
587
_dac_cal(struct rtw89_dev * rtwdev,bool force,enum rtw89_chanctx_idx chanctx_idx)588 static void _dac_cal(struct rtw89_dev *rtwdev, bool force,
589 enum rtw89_chanctx_idx chanctx_idx)
590 {
591 struct rtw89_dack_info *dack = &rtwdev->dack;
592 u32 rf0_0, rf1_0;
593 u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB, chanctx_idx);
594
595 dack->dack_done = false;
596 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
597 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
598 rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
599 rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK);
600 _drck(rtwdev);
601
602 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
603 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
604 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1);
605 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1);
606 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
607 _addck(rtwdev);
608 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
609
610 _addck_backup(rtwdev);
611 _addck_reload(rtwdev);
612 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
613 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
614 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
615 _dack(rtwdev);
616 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
617
618 _dack_dump(rtwdev);
619 dack->dack_done = true;
620 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0);
621 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0);
622 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
623 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
624 dack->dack_cnt++;
625 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
626 }
627
628 #define RTW8852C_NCTL_VER 0xd
629 #define RTW8852C_IQK_VER 0x2a
630 #define RTW8852C_IQK_SS 2
631 #define RTW8852C_IQK_THR_REK 8
632 #define RTW8852C_IQK_CFIR_GROUP_NR 4
633
634 enum rtw8852c_iqk_type {
635 ID_TXAGC,
636 ID_G_FLOK_COARSE,
637 ID_A_FLOK_COARSE,
638 ID_G_FLOK_FINE,
639 ID_A_FLOK_FINE,
640 ID_FLOK_VBUFFER,
641 ID_TXK,
642 ID_RXAGC,
643 ID_RXK,
644 ID_NBTXK,
645 ID_NBRXK,
646 };
647
rtw8852c_disable_rxagc(struct rtw89_dev * rtwdev,u8 path,u8 en_rxgac)648 static void rtw8852c_disable_rxagc(struct rtw89_dev *rtwdev, u8 path, u8 en_rxgac)
649 {
650 if (path == RF_PATH_A)
651 rtw89_phy_write32_mask(rtwdev, R_P0_AGC_CTL, B_P0_AGC_EN, en_rxgac);
652 else
653 rtw89_phy_write32_mask(rtwdev, R_P1_AGC_CTL, B_P1_AGC_EN, en_rxgac);
654 }
655
_iqk_rxk_setting(struct rtw89_dev * rtwdev,u8 path)656 static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
657 {
658 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
659
660 if (path == RF_PATH_A)
661 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0101);
662 else
663 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0202);
664
665 switch (iqk_info->iqk_bw[path]) {
666 case RTW89_CHANNEL_WIDTH_20:
667 case RTW89_CHANNEL_WIDTH_40:
668 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
669 rtw8852c_rxck_force(rtwdev, path, true, ADC_480M);
670 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x0);
671 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
672 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
673 break;
674 case RTW89_CHANNEL_WIDTH_80:
675 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
676 rtw8852c_rxck_force(rtwdev, path, true, ADC_960M);
677 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x1);
678 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
679 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
680 break;
681 case RTW89_CHANNEL_WIDTH_160:
682 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
683 rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
684 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x2);
685 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
686 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
687 break;
688 default:
689 break;
690 }
691
692 rtw89_rfk_parser(rtwdev, &rtw8852c_iqk_rxk_cfg_defs_tbl);
693
694 if (path == RF_PATH_A)
695 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x1101);
696 else
697 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x2202);
698 }
699
_iqk_check_cal(struct rtw89_dev * rtwdev,u8 path,u8 ktype)700 static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
701 {
702 u32 tmp;
703 u32 val;
704 int ret;
705
706 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
707 1, 8200, false, rtwdev, 0xbff8, MASKBYTE0);
708 if (ret)
709 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]IQK timeout!!!\n");
710
711 rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
712 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret);
713 tmp = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD);
714 rtw89_debug(rtwdev, RTW89_DBG_RFK,
715 "[IQK]S%x, type= %x, 0x8008 = 0x%x\n", path, ktype, tmp);
716
717 return false;
718 }
719
_iqk_one_shot(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path,u8 ktype)720 static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
721 enum rtw89_phy_idx phy_idx, u8 path, u8 ktype)
722 {
723 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
724 u32 addr_rfc_ctl = R_UPD_CLK + (path << 13);
725 u32 iqk_cmd;
726 bool fail;
727
728 switch (ktype) {
729 case ID_TXAGC:
730 iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1);
731 break;
732 case ID_A_FLOK_COARSE:
733 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
734 iqk_cmd = 0x008 | (1 << (4 + path));
735 break;
736 case ID_G_FLOK_COARSE:
737 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
738 iqk_cmd = 0x108 | (1 << (4 + path));
739 break;
740 case ID_A_FLOK_FINE:
741 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
742 iqk_cmd = 0x508 | (1 << (4 + path));
743 break;
744 case ID_G_FLOK_FINE:
745 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
746 iqk_cmd = 0x208 | (1 << (4 + path));
747 break;
748 case ID_FLOK_VBUFFER:
749 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
750 iqk_cmd = 0x308 | (1 << (4 + path));
751 break;
752 case ID_TXK:
753 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
754 iqk_cmd = 0x008 | (1 << (4 + path)) | ((0x8 + iqk_info->iqk_bw[path]) << 8);
755 break;
756 case ID_RXAGC:
757 iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
758 break;
759 case ID_RXK:
760 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
761 iqk_cmd = 0x008 | (1 << (4 + path)) | ((0xc + iqk_info->iqk_bw[path]) << 8);
762 break;
763 case ID_NBTXK:
764 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
765 iqk_cmd = 0x408 | (1 << (4 + path));
766 break;
767 case ID_NBRXK:
768 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
769 iqk_cmd = 0x608 | (1 << (4 + path));
770 break;
771 default:
772 return false;
773 }
774
775 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
776 fsleep(15);
777 fail = _iqk_check_cal(rtwdev, path, ktype);
778 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
779
780 return fail;
781 }
782
_rxk_group_sel(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)783 static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
784 enum rtw89_phy_idx phy_idx, u8 path)
785 {
786 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
787 bool fail;
788 u32 tmp;
789 u32 bkrf0;
790 u8 gp;
791
792 bkrf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_NBW);
793 if (path == RF_PATH_B) {
794 rtw89_write_rf(rtwdev, RF_PATH_B, RR_IQKPLL, RR_IQKPLL_MOD, 0x3);
795 tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_MOD);
796 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_AGH, tmp);
797 tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_TXRX);
798 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_PLLCH, tmp);
799 }
800
801 switch (iqk_info->iqk_band[path]) {
802 case RTW89_BAND_2G:
803 default:
804 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
805 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
806 rtw89_write_rf(rtwdev, path, RR_RXG, RR_RXG_IQKMOD, 0x9);
807 break;
808 case RTW89_BAND_5G:
809 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
810 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
811 rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x8);
812 break;
813 case RTW89_BAND_6G:
814 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
815 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
816 rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x9);
817 break;
818 }
819
820 fsleep(10);
821
822 for (gp = 0; gp < RXK_GROUP_NR; gp++) {
823 switch (iqk_info->iqk_band[path]) {
824 case RTW89_BAND_2G:
825 default:
826 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
827 _rxk_g_idxrxgain[gp]);
828 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_VOBUF,
829 _rxk_g_idxattc2[gp]);
830 break;
831 case RTW89_BAND_5G:
832 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
833 _rxk_a_idxrxgain[gp]);
834 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT,
835 _rxk_a_idxattc2[gp]);
836 break;
837 case RTW89_BAND_6G:
838 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
839 _rxk_a6_idxrxgain[gp]);
840 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT,
841 _rxk_a6_idxattc2[gp]);
842 break;
843 }
844 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
845 B_CFIR_LUT_SEL, 0x1);
846 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
847 B_CFIR_LUT_SET, 0x0);
848 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
849 B_CFIR_LUT_GP_V1, gp);
850 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
851 }
852
853 if (path == RF_PATH_B)
854 rtw89_write_rf(rtwdev, path, RR_IQKPLL, RR_IQKPLL_MOD, 0x0);
855 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, bkrf0);
856
857 if (fail) {
858 iqk_info->nb_rxcfir[path] = 0x40000002;
859 iqk_info->is_wb_rxiqk[path] = false;
860 } else {
861 iqk_info->nb_rxcfir[path] = 0x40000000;
862 iqk_info->is_wb_rxiqk[path] = true;
863 }
864
865 return false;
866 }
867
_iqk_nbrxk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)868 static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
869 enum rtw89_phy_idx phy_idx, u8 path)
870 {
871 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
872 bool fail;
873 u32 tmp;
874 u32 bkrf0;
875 u8 gp = 0x2;
876
877 bkrf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_NBW);
878 if (path == RF_PATH_B) {
879 rtw89_write_rf(rtwdev, RF_PATH_B, RR_IQKPLL, RR_IQKPLL_MOD, 0x3);
880 tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_MOD);
881 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_AGH, tmp);
882 tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_TXRX);
883 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_PLLCH, tmp);
884 }
885
886 switch (iqk_info->iqk_band[path]) {
887 case RTW89_BAND_2G:
888 default:
889 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
890 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
891 rtw89_write_rf(rtwdev, path, RR_RXG, RR_RXG_IQKMOD, 0x9);
892 break;
893 case RTW89_BAND_5G:
894 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
895 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
896 rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x8);
897 break;
898 case RTW89_BAND_6G:
899 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
900 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
901 rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x9);
902 break;
903 }
904
905 fsleep(10);
906
907 switch (iqk_info->iqk_band[path]) {
908 case RTW89_BAND_2G:
909 default:
910 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_g_idxrxgain[gp]);
911 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_VOBUF, _rxk_g_idxattc2[gp]);
912 break;
913 case RTW89_BAND_5G:
914 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_a_idxrxgain[gp]);
915 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, _rxk_a_idxattc2[gp]);
916 break;
917 case RTW89_BAND_6G:
918 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_a6_idxrxgain[gp]);
919 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, _rxk_a6_idxattc2[gp]);
920 break;
921 }
922
923 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
924 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x0);
925 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP_V1, gp);
926 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
927
928 if (path == RF_PATH_B)
929 rtw89_write_rf(rtwdev, path, RR_IQKPLL, RR_IQKPLL_MOD, 0x0);
930
931 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, bkrf0);
932
933 if (fail)
934 iqk_info->nb_rxcfir[path] =
935 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
936 MASKDWORD) | 0x2;
937 else
938 iqk_info->nb_rxcfir[path] = 0x40000002;
939
940 iqk_info->is_wb_rxiqk[path] = false;
941 return fail;
942 }
943
_txk_group_sel(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)944 static bool _txk_group_sel(struct rtw89_dev *rtwdev,
945 enum rtw89_phy_idx phy_idx, u8 path)
946 {
947 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
948 bool fail;
949 u8 gp;
950
951 for (gp = 0; gp < TXK_GROUP_NR; gp++) {
952 switch (iqk_info->iqk_band[path]) {
953 case RTW89_BAND_2G:
954 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
955 _txk_g_power_range[gp]);
956 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
957 _txk_g_track_range[gp]);
958 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
959 _txk_g_gain_bb[gp]);
960 rtw89_phy_write32_mask(rtwdev,
961 R_KIP_IQP + (path << 8),
962 MASKDWORD, _txk_g_itqt[gp]);
963 break;
964 case RTW89_BAND_5G:
965 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
966 _txk_a_power_range[gp]);
967 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
968 _txk_a_track_range[gp]);
969 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
970 _txk_a_gain_bb[gp]);
971 rtw89_phy_write32_mask(rtwdev,
972 R_KIP_IQP + (path << 8),
973 MASKDWORD, _txk_a_itqt[gp]);
974 break;
975 case RTW89_BAND_6G:
976 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
977 _txk_a6_power_range[gp]);
978 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
979 _txk_a6_track_range[gp]);
980 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
981 _txk_a6_gain_bb[gp]);
982 rtw89_phy_write32_mask(rtwdev,
983 R_KIP_IQP + (path << 8),
984 MASKDWORD, _txk_a6_itqt[gp]);
985 break;
986 default:
987 break;
988 }
989 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
990 B_CFIR_LUT_SEL, 0x1);
991 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
992 B_CFIR_LUT_SET, 0x1);
993 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
994 B_CFIR_LUT_G2, 0x0);
995 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
996 B_CFIR_LUT_GP, gp + 1);
997 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x00b);
998 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
999 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
1000 }
1001
1002 if (fail) {
1003 iqk_info->nb_txcfir[path] = 0x40000002;
1004 iqk_info->is_wb_txiqk[path] = false;
1005 } else {
1006 iqk_info->nb_txcfir[path] = 0x40000000;
1007 iqk_info->is_wb_txiqk[path] = true;
1008 }
1009
1010 return fail;
1011 }
1012
_iqk_nbtxk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1013 static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
1014 enum rtw89_phy_idx phy_idx, u8 path)
1015 {
1016 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1017 bool fail;
1018 u8 gp = 0x2;
1019
1020 switch (iqk_info->iqk_band[path]) {
1021 case RTW89_BAND_2G:
1022 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_g_power_range[gp]);
1023 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_g_track_range[gp]);
1024 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_g_gain_bb[gp]);
1025 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1026 MASKDWORD, _txk_g_itqt[gp]);
1027 break;
1028 case RTW89_BAND_5G:
1029 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_a_power_range[gp]);
1030 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_a_track_range[gp]);
1031 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_a_gain_bb[gp]);
1032 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1033 MASKDWORD, _txk_a_itqt[gp]);
1034 break;
1035 case RTW89_BAND_6G:
1036 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_a6_power_range[gp]);
1037 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_a6_track_range[gp]);
1038 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_a6_gain_bb[gp]);
1039 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1040 MASKDWORD, _txk_a6_itqt[gp]);
1041 break;
1042 default:
1043 break;
1044 }
1045
1046 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
1047 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x1);
1048 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G2, 0x0);
1049 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp + 1);
1050 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x00b);
1051 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1052 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
1053
1054 if (!fail)
1055 iqk_info->nb_txcfir[path] =
1056 rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8),
1057 MASKDWORD) | 0x2;
1058 else
1059 iqk_info->nb_txcfir[path] = 0x40000002;
1060
1061 iqk_info->is_wb_txiqk[path] = false;
1062
1063 return fail;
1064 }
1065
_lok_finetune_check(struct rtw89_dev * rtwdev,u8 path)1066 static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
1067 {
1068 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
1069 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1070 u8 idx = rfk_mcc->table_idx;
1071 bool is_fail1, is_fail2;
1072 u32 val;
1073 u32 core_i;
1074 u32 core_q;
1075 u32 vbuff_i;
1076 u32 vbuff_q;
1077
1078 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1079 val = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK);
1080 core_i = FIELD_GET(RR_TXMO_COI, val);
1081 core_q = FIELD_GET(RR_TXMO_COQ, val);
1082
1083 if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d)
1084 is_fail1 = true;
1085 else
1086 is_fail1 = false;
1087
1088 iqk_info->lok_idac[idx][path] = val;
1089
1090 val = rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK);
1091 vbuff_i = FIELD_GET(RR_LOKVB_COI, val);
1092 vbuff_q = FIELD_GET(RR_LOKVB_COQ, val);
1093
1094 if (vbuff_i < 0x2 || vbuff_i > 0x3d || vbuff_q < 0x2 || vbuff_q > 0x3d)
1095 is_fail2 = true;
1096 else
1097 is_fail2 = false;
1098
1099 iqk_info->lok_vbuf[idx][path] = val;
1100
1101 return is_fail1 || is_fail2;
1102 }
1103
_iqk_lok(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1104 static bool _iqk_lok(struct rtw89_dev *rtwdev,
1105 enum rtw89_phy_idx phy_idx, u8 path)
1106 {
1107 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1108 u8 tmp_id = 0x0;
1109 bool fail = false;
1110 bool tmp = false;
1111
1112 /* Step 0: Init RF gain & tone idx= 8.25Mhz */
1113 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, IQK_DF4_TXT_8_25MHZ);
1114
1115 /* Step 1 START: _lok_coarse_fine_wi_swap */
1116 switch (iqk_info->iqk_band[path]) {
1117 case RTW89_BAND_2G:
1118 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
1119 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1120 B_KIP_IQP_IQSW, 0x9);
1121 tmp_id = ID_G_FLOK_COARSE;
1122 break;
1123 case RTW89_BAND_5G:
1124 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
1125 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1126 B_KIP_IQP_IQSW, 0x9);
1127 tmp_id = ID_A_FLOK_COARSE;
1128 break;
1129 case RTW89_BAND_6G:
1130 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
1131 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1132 B_KIP_IQP_IQSW, 0x9);
1133 tmp_id = ID_A_FLOK_COARSE;
1134 break;
1135 default:
1136 break;
1137 }
1138 tmp = _iqk_one_shot(rtwdev, phy_idx, path, tmp_id);
1139 iqk_info->lok_cor_fail[0][path] = tmp;
1140
1141 /* Step 2 */
1142 switch (iqk_info->iqk_band[path]) {
1143 case RTW89_BAND_2G:
1144 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1145 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1146 B_KIP_IQP_IQSW, 0x1b);
1147 break;
1148 case RTW89_BAND_5G:
1149 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1150 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1151 B_KIP_IQP_IQSW, 0x1b);
1152 break;
1153 case RTW89_BAND_6G:
1154 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1155 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1156 B_KIP_IQP_IQSW, 0x1b);
1157 break;
1158 default:
1159 break;
1160 }
1161 tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
1162
1163 /* Step 3 */
1164 switch (iqk_info->iqk_band[path]) {
1165 case RTW89_BAND_2G:
1166 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
1167 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1168 B_KIP_IQP_IQSW, 0x9);
1169 tmp_id = ID_G_FLOK_FINE;
1170 break;
1171 case RTW89_BAND_5G:
1172 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
1173 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1174 B_KIP_IQP_IQSW, 0x9);
1175 tmp_id = ID_A_FLOK_FINE;
1176 break;
1177 case RTW89_BAND_6G:
1178 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
1179 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1180 B_KIP_IQP_IQSW, 0x9);
1181 tmp_id = ID_A_FLOK_FINE;
1182 break;
1183 default:
1184 break;
1185 }
1186 tmp = _iqk_one_shot(rtwdev, phy_idx, path, tmp_id);
1187 iqk_info->lok_fin_fail[0][path] = tmp;
1188
1189 /* Step 4 large rf gain */
1190 switch (iqk_info->iqk_band[path]) {
1191 case RTW89_BAND_2G:
1192 default:
1193 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1194 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1195 B_KIP_IQP_IQSW, 0x1b);
1196 break;
1197 case RTW89_BAND_5G:
1198 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1199 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1200 B_KIP_IQP_IQSW, 0x1b);
1201 break;
1202 case RTW89_BAND_6G:
1203 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1204 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1205 B_KIP_IQP_IQSW, 0x1b);
1206 break;
1207 }
1208 tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
1209 fail = _lok_finetune_check(rtwdev, path);
1210
1211 return fail;
1212 }
1213
_iqk_txk_setting(struct rtw89_dev * rtwdev,u8 path)1214 static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
1215 {
1216 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1217
1218 switch (iqk_info->iqk_band[path]) {
1219 case RTW89_BAND_2G:
1220 default:
1221 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
1222 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0);
1223 rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
1224 rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
1225 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1226 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1227 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
1228 0x403e0 | iqk_info->syn1to2);
1229 fsleep(10);
1230 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1231 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
1232 break;
1233 case RTW89_BAND_5G:
1234 rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW2, 0x0);
1235 rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXAS, 0x1);
1236 rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
1237 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1238 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1239 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
1240 0x403e0 | iqk_info->syn1to2);
1241 fsleep(10);
1242 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1243 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
1244 break;
1245 case RTW89_BAND_6G:
1246 rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW2, 0x0);
1247 rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXAS, 0x1);
1248 rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
1249 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1250 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1251 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
1252 0x403e0 | iqk_info->syn1to2);
1253 fsleep(10);
1254 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1255 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
1256 break;
1257 }
1258 }
1259
_iqk_info_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1260 static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
1261 u8 path)
1262 {
1263 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1264 u32 tmp;
1265 bool flag;
1266
1267 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_thermal = %lu\n", path,
1268 ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]));
1269 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_COR_fail= %d\n", path,
1270 iqk_info->lok_cor_fail[0][path]);
1271 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_FIN_fail= %d\n", path,
1272 iqk_info->lok_fin_fail[0][path]);
1273 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_TXIQK_fail = %d\n", path,
1274 iqk_info->iqk_tx_fail[0][path]);
1275 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_RXIQK_fail= %d,\n", path,
1276 iqk_info->iqk_rx_fail[0][path]);
1277
1278 flag = iqk_info->lok_cor_fail[0][path];
1279 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FCOR << (path * 4), flag);
1280 flag = iqk_info->lok_fin_fail[0][path];
1281 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FFIN << (path * 4), flag);
1282 flag = iqk_info->iqk_tx_fail[0][path];
1283 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FTX << (path * 4), flag);
1284 flag = iqk_info->iqk_rx_fail[0][path];
1285 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_F_RX << (path * 4), flag);
1286
1287 tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD);
1288 iqk_info->bp_iqkenable[path] = tmp;
1289 tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1290 iqk_info->bp_txkresult[path] = tmp;
1291 tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
1292 iqk_info->bp_rxkresult[path] = tmp;
1293
1294 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT,
1295 iqk_info->iqk_times);
1296
1297 tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, B_IQKINF_FAIL << (path * 4));
1298 if (tmp != 0x0)
1299 iqk_info->iqk_fail_cnt++;
1300 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_FCNT << (path * 4),
1301 iqk_info->iqk_fail_cnt);
1302 }
1303
_iqk_by_path(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1304 static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1305 {
1306 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1307
1308 _iqk_txk_setting(rtwdev, path);
1309 iqk_info->lok_fail[path] = _iqk_lok(rtwdev, phy_idx, path);
1310
1311 if (iqk_info->is_nbiqk)
1312 iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
1313 else
1314 iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
1315
1316 _iqk_rxk_setting(rtwdev, path);
1317 if (iqk_info->is_nbiqk)
1318 iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
1319 else
1320 iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
1321
1322 _iqk_info_iqk(rtwdev, phy_idx, path);
1323 }
1324
_iqk_get_ch_info(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 path,enum rtw89_chanctx_idx chanctx_idx)1325 static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
1326 enum rtw89_phy_idx phy, u8 path,
1327 enum rtw89_chanctx_idx chanctx_idx)
1328 {
1329 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
1330 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1331
1332 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1333
1334 iqk_info->iqk_band[path] = chan->band_type;
1335 iqk_info->iqk_bw[path] = chan->band_width;
1336 iqk_info->iqk_ch[path] = chan->channel;
1337
1338 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1339 "[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path,
1340 iqk_info->iqk_band[path]);
1341 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_bw[%x] = 0x%x\n",
1342 path, iqk_info->iqk_bw[path]);
1343 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_ch[%x] = 0x%x\n",
1344 path, iqk_info->iqk_ch[path]);
1345 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1346 "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", path, phy,
1347 rtwdev->dbcc_en ? "on" : "off",
1348 iqk_info->iqk_band[path] == 0 ? "2G" :
1349 iqk_info->iqk_band[path] == 1 ? "5G" : "6G",
1350 iqk_info->iqk_ch[path],
1351 iqk_info->iqk_bw[path] == 0 ? "20M" :
1352 iqk_info->iqk_bw[path] == 1 ? "40M" : "80M");
1353 if (!rtwdev->dbcc_en)
1354 iqk_info->syn1to2 = 0x1;
1355 else
1356 iqk_info->syn1to2 = 0x3;
1357
1358 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852C_IQK_VER);
1359 rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BAND << (path * 16),
1360 iqk_info->iqk_band[path]);
1361 rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BW << (path * 16),
1362 iqk_info->iqk_bw[path]);
1363 rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_CH << (path * 16),
1364 iqk_info->iqk_ch[path]);
1365
1366 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_NCTLV, RTW8852C_NCTL_VER);
1367 }
1368
_iqk_start_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1369 static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
1370 u8 path)
1371 {
1372 _iqk_by_path(rtwdev, phy_idx, path);
1373 }
1374
_iqk_restore(struct rtw89_dev * rtwdev,u8 path)1375 static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
1376 {
1377 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1378 bool fail;
1379
1380 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD,
1381 iqk_info->nb_txcfir[path]);
1382 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
1383 iqk_info->nb_rxcfir[path]);
1384 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
1385 0x00001219 + (path << 4));
1386 fsleep(200);
1387 fail = _iqk_check_cal(rtwdev, path, 0x12);
1388 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] restore fail = %x\n", fail);
1389
1390 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1391 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
1392 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1393
1394 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1395 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
1396 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1397 }
1398
_iqk_afebb_restore(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1399 static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
1400 enum rtw89_phy_idx phy_idx, u8 path)
1401 {
1402 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
1403 &rtw8852c_iqk_afebb_restore_defs_a_tbl,
1404 &rtw8852c_iqk_afebb_restore_defs_b_tbl);
1405
1406 rtw8852c_disable_rxagc(rtwdev, path, 0x1);
1407 }
1408
_iqk_preset(struct rtw89_dev * rtwdev,u8 path)1409 static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
1410 {
1411 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
1412 u8 idx = 0;
1413
1414 idx = rfk_mcc->table_idx;
1415 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, idx);
1416 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, idx);
1417 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1418 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
1419 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
1420 }
1421
_iqk_macbb_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1422 static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
1423 enum rtw89_phy_idx phy_idx, u8 path)
1424 {
1425 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===> %s\n", __func__);
1426
1427 /* 01_BB_AFE_for DPK_S0_20210820 */
1428 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
1429 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
1430 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
1431 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
1432 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
1433
1434 /* disable rxgac */
1435 rtw8852c_disable_rxagc(rtwdev, path, 0x0);
1436 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), MASKDWORD, 0xf801fffd);
1437 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DPD_DIS, 0x1);
1438 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DAC_VAL, 0x1);
1439
1440 rtw8852c_txck_force(rtwdev, path, true, DAC_960M);
1441 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DPD_GDIS, 0x1);
1442
1443 rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
1444 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_ACK_VAL, 0x2);
1445
1446 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW | (path << 13), B_P0_NRBW_DBG, 0x1);
1447 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
1448 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
1449 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
1450 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
1451 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x1);
1452 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x1);
1453 }
1454
_rck(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)1455 static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
1456 {
1457 u32 rf_reg5, rck_val = 0;
1458 u32 val;
1459 int ret;
1460
1461 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
1462
1463 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
1464
1465 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1466 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
1467
1468 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%x\n",
1469 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
1470
1471 /* RCK trigger */
1472 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
1473
1474 ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 20,
1475 false, rtwdev, path, 0x1c, BIT(3));
1476 if (ret)
1477 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RCK timeout\n");
1478
1479 rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
1480 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
1481
1482 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
1483
1484 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1485 "[RCK] RF 0x1b / 0x1c = 0x%x / 0x%x\n",
1486 rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK),
1487 rtw89_read_rf(rtwdev, path, RR_RCKS, RFREG_MASK));
1488 }
1489
_iqk_init(struct rtw89_dev * rtwdev)1490 static void _iqk_init(struct rtw89_dev *rtwdev)
1491 {
1492 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1493 u8 ch, path;
1494
1495 rtw89_phy_write32_clr(rtwdev, R_IQKINF, MASKDWORD);
1496 if (iqk_info->is_iqk_init)
1497 return;
1498
1499 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1500 iqk_info->is_iqk_init = true;
1501 iqk_info->is_nbiqk = false;
1502 iqk_info->iqk_fft_en = false;
1503 iqk_info->iqk_sram_en = false;
1504 iqk_info->iqk_cfir_en = false;
1505 iqk_info->iqk_xym_en = false;
1506 iqk_info->iqk_times = 0x0;
1507
1508 for (ch = 0; ch < RTW89_IQK_CHS_NR; ch++) {
1509 iqk_info->iqk_channel[ch] = 0x0;
1510 for (path = 0; path < RTW8852C_IQK_SS; path++) {
1511 iqk_info->lok_cor_fail[ch][path] = false;
1512 iqk_info->lok_fin_fail[ch][path] = false;
1513 iqk_info->iqk_tx_fail[ch][path] = false;
1514 iqk_info->iqk_rx_fail[ch][path] = false;
1515 iqk_info->iqk_mcc_ch[ch][path] = 0x0;
1516 iqk_info->iqk_table_idx[path] = 0x0;
1517 }
1518 }
1519 }
1520
_doiqk(struct rtw89_dev * rtwdev,bool force,enum rtw89_phy_idx phy_idx,u8 path,enum rtw89_chanctx_idx chanctx_idx)1521 static void _doiqk(struct rtw89_dev *rtwdev, bool force,
1522 enum rtw89_phy_idx phy_idx, u8 path,
1523 enum rtw89_chanctx_idx chanctx_idx)
1524 {
1525 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1526 u32 backup_bb_val[BACKUP_BB_REGS_NR];
1527 u32 backup_rf_val[RTW8852C_IQK_SS][BACKUP_RF_REGS_NR];
1528 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
1529
1530 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
1531
1532 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1533 "[IQK]==========IQK start!!!!!==========\n");
1534 iqk_info->iqk_times++;
1535 iqk_info->version = RTW8852C_IQK_VER;
1536
1537 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
1538 _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
1539 _rfk_backup_bb_reg(rtwdev, backup_bb_val);
1540 _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
1541 _iqk_macbb_setting(rtwdev, phy_idx, path);
1542 _iqk_preset(rtwdev, path);
1543 _iqk_start_iqk(rtwdev, phy_idx, path);
1544 _iqk_restore(rtwdev, path);
1545 _iqk_afebb_restore(rtwdev, phy_idx, path);
1546 _rfk_restore_bb_reg(rtwdev, backup_bb_val);
1547 _rfk_restore_rf_reg(rtwdev, backup_rf_val[path], path);
1548 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
1549 }
1550
_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,bool force,enum rtw89_chanctx_idx chanctx_idx)1551 static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
1552 enum rtw89_chanctx_idx chanctx_idx)
1553 {
1554 switch (_kpath(rtwdev, phy_idx)) {
1555 case RF_A:
1556 _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
1557 break;
1558 case RF_B:
1559 _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
1560 break;
1561 case RF_AB:
1562 _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
1563 _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
1564 break;
1565 default:
1566 break;
1567 }
1568 }
1569
_rx_dck_value_rewrite(struct rtw89_dev * rtwdev,u8 path,u8 addr,u8 val_i,u8 val_q)1570 static void _rx_dck_value_rewrite(struct rtw89_dev *rtwdev, u8 path, u8 addr,
1571 u8 val_i, u8 val_q)
1572 {
1573 u32 ofst_val;
1574
1575 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1576 "[RX_DCK] rewrite val_i = 0x%x, val_q = 0x%x\n", val_i, val_q);
1577
1578 /* val_i and val_q are 7 bits, and target is 6 bits. */
1579 ofst_val = u32_encode_bits(val_q >> 1, RR_LUTWD0_MB) |
1580 u32_encode_bits(val_i >> 1, RR_LUTWD0_LB);
1581
1582 rtw89_write_rf(rtwdev, path, RR_LUTPLL, RR_CAL_RW, 0x1);
1583 rtw89_write_rf(rtwdev, path, RR_RFC, RR_WCAL, 0x1);
1584 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x1);
1585 rtw89_write_rf(rtwdev, path, RR_LUTWA, MASKBYTE0, addr);
1586 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ofst_val);
1587 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ofst_val);
1588 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
1589 rtw89_write_rf(rtwdev, path, RR_RFC, RR_WCAL, 0x0);
1590 rtw89_write_rf(rtwdev, path, RR_LUTPLL, RR_CAL_RW, 0x0);
1591
1592 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] Final val_i = 0x%x, val_q = 0x%x\n",
1593 u32_get_bits(ofst_val, RR_LUTWD0_LB) << 1,
1594 u32_get_bits(ofst_val, RR_LUTWD0_MB) << 1);
1595 }
1596
_rx_dck_rek_check(struct rtw89_dev * rtwdev,u8 path)1597 static bool _rx_dck_rek_check(struct rtw89_dev *rtwdev, u8 path)
1598 {
1599 u8 i_even_bs, q_even_bs;
1600 u8 i_odd_bs, q_odd_bs;
1601 u8 i_even, q_even;
1602 u8 i_odd, q_odd;
1603 const u8 th = 10;
1604 u8 i;
1605
1606 for (i = 0; i < RF_PATH_NUM_8852C; i++) {
1607 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr_bs[i]);
1608 i_even_bs = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
1609 q_even_bs = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
1610 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1611 "[RX_DCK] Gain[0x%x] i_even_bs/ q_even_bs = 0x%x/ 0x%x\n",
1612 _dck_addr_bs[i], i_even_bs, q_even_bs);
1613
1614 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr[i]);
1615 i_even = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
1616 q_even = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
1617 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1618 "[RX_DCK] Gain[0x%x] i_even/ q_even = 0x%x/ 0x%x\n",
1619 _dck_addr[i], i_even, q_even);
1620
1621 if (abs(i_even_bs - i_even) > th || abs(q_even_bs - q_even) > th)
1622 return true;
1623
1624 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr_bs[i] + 1);
1625 i_odd_bs = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
1626 q_odd_bs = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
1627 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1628 "[RX_DCK] Gain[0x%x] i_odd_bs/ q_odd_bs = 0x%x/ 0x%x\n",
1629 _dck_addr_bs[i] + 1, i_odd_bs, q_odd_bs);
1630
1631 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr[i] + 1);
1632 i_odd = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
1633 q_odd = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
1634 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1635 "[RX_DCK] Gain[0x%x] i_odd/ q_odd = 0x%x/ 0x%x\n",
1636 _dck_addr[i] + 1, i_odd, q_odd);
1637
1638 if (abs(i_odd_bs - i_odd) > th || abs(q_odd_bs - q_odd) > th)
1639 return true;
1640 }
1641
1642 return false;
1643 }
1644
_rx_dck_fix_if_need(struct rtw89_dev * rtwdev,u8 path,u8 addr,u8 val_i_bs,u8 val_q_bs,u8 val_i,u8 val_q)1645 static void _rx_dck_fix_if_need(struct rtw89_dev *rtwdev, u8 path, u8 addr,
1646 u8 val_i_bs, u8 val_q_bs, u8 val_i, u8 val_q)
1647 {
1648 const u8 th = 10;
1649
1650 if ((abs(val_i_bs - val_i) < th) && (abs(val_q_bs - val_q) <= th)) {
1651 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] offset check PASS!!\n");
1652 return;
1653 }
1654
1655 if (abs(val_i_bs - val_i) > th) {
1656 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1657 "[RX_DCK] val_i over TH (0x%x / 0x%x)\n", val_i_bs, val_i);
1658 val_i = val_i_bs;
1659 }
1660
1661 if (abs(val_q_bs - val_q) > th) {
1662 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1663 "[RX_DCK] val_q over TH (0x%x / 0x%x)\n", val_q_bs, val_q);
1664 val_q = val_q_bs;
1665 }
1666
1667 _rx_dck_value_rewrite(rtwdev, path, addr, val_i, val_q);
1668 }
1669
_rx_dck_recover(struct rtw89_dev * rtwdev,u8 path)1670 static void _rx_dck_recover(struct rtw89_dev *rtwdev, u8 path)
1671 {
1672 u8 i_even_bs, q_even_bs;
1673 u8 i_odd_bs, q_odd_bs;
1674 u8 i_even, q_even;
1675 u8 i_odd, q_odd;
1676 u8 i;
1677
1678 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] ===> recovery\n");
1679
1680 for (i = 0; i < RF_PATH_NUM_8852C; i++) {
1681 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr_bs[i]);
1682 i_even_bs = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
1683 q_even_bs = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
1684
1685 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr_bs[i] + 1);
1686 i_odd_bs = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
1687 q_odd_bs = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
1688
1689 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1690 "[RX_DCK] Gain[0x%x] i_even_bs/ q_even_bs = 0x%x/ 0x%x\n",
1691 _dck_addr_bs[i], i_even_bs, q_even_bs);
1692
1693 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr[i]);
1694 i_even = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
1695 q_even = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
1696
1697 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1698 "[RX_DCK] Gain[0x%x] i_even/ q_even = 0x%x/ 0x%x\n",
1699 _dck_addr[i], i_even, q_even);
1700 _rx_dck_fix_if_need(rtwdev, path, _dck_addr[i],
1701 i_even_bs, q_even_bs, i_even, q_even);
1702
1703 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1704 "[RX_DCK] Gain[0x%x] i_odd_bs/ q_odd_bs = 0x%x/ 0x%x\n",
1705 _dck_addr_bs[i] + 1, i_odd_bs, q_odd_bs);
1706
1707 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr[i] + 1);
1708 i_odd = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
1709 q_odd = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
1710
1711 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1712 "[RX_DCK] Gain[0x%x] i_odd/ q_odd = 0x%x/ 0x%x\n",
1713 _dck_addr[i] + 1, i_odd, q_odd);
1714 _rx_dck_fix_if_need(rtwdev, path, _dck_addr[i] + 1,
1715 i_odd_bs, q_odd_bs, i_odd, q_odd);
1716 }
1717 }
1718
_rx_dck_toggle(struct rtw89_dev * rtwdev,u8 path)1719 static void _rx_dck_toggle(struct rtw89_dev *rtwdev, u8 path)
1720 {
1721 int ret;
1722 u32 val;
1723
1724 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
1725 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
1726
1727 ret = read_poll_timeout_atomic(rtw89_read_rf, val, val,
1728 2, 2000, false, rtwdev, path,
1729 RR_DCK1, RR_DCK1_DONE);
1730 if (ret)
1731 rtw89_warn(rtwdev, "[RX_DCK] S%d RXDCK timeout\n", path);
1732 else
1733 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] S%d RXDCK finish\n", path);
1734
1735 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
1736 }
1737
_set_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 path,bool is_afe)1738 static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path,
1739 bool is_afe)
1740 {
1741 u8 res;
1742
1743 rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0);
1744
1745 _rx_dck_toggle(rtwdev, path);
1746 if (rtw89_read_rf(rtwdev, path, RR_DCKC, RR_DCKC_CHK) == 0)
1747 return;
1748 res = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_DONE);
1749 if (res > 1) {
1750 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_IDAC, res);
1751 _rx_dck_toggle(rtwdev, path);
1752 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_IDAC, 0x1);
1753 }
1754 }
1755
1756 static
_rx_dck_channel_calc(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan)1757 u8 _rx_dck_channel_calc(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan)
1758 {
1759 u8 target_ch = 0;
1760
1761 if (chan->band_type == RTW89_BAND_5G) {
1762 if (chan->channel >= 36 && chan->channel <= 64) {
1763 target_ch = 100;
1764 } else if (chan->channel >= 100 && chan->channel <= 144) {
1765 target_ch = chan->channel + 32;
1766 if (target_ch > 144)
1767 target_ch = chan->channel + 33;
1768 } else if (chan->channel >= 149 && chan->channel <= 177) {
1769 target_ch = chan->channel - 33;
1770 }
1771 } else if (chan->band_type == RTW89_BAND_6G) {
1772 if (chan->channel >= 1 && chan->channel <= 125)
1773 target_ch = chan->channel + 32;
1774 else
1775 target_ch = chan->channel - 32;
1776 } else {
1777 target_ch = chan->channel;
1778 }
1779
1780 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1781 "[RX_DCK] cur_ch / target_ch = %d / %d\n",
1782 chan->channel, target_ch);
1783
1784 return target_ch;
1785 }
1786
1787 #define RTW8852C_RF_REL_VERSION 34
1788 #define RTW8852C_DPK_VER 0xf
1789 #define RTW8852C_DPK_TH_AVG_NUM 4
1790 #define RTW8852C_DPK_RF_PATH 2
1791 #define RTW8852C_DPK_KIP_REG_NUM 7
1792 #define RTW8852C_DPK_RXSRAM_DBG 0
1793
1794 enum rtw8852c_dpk_id {
1795 LBK_RXIQK = 0x06,
1796 SYNC = 0x10,
1797 MDPK_IDL = 0x11,
1798 MDPK_MPA = 0x12,
1799 GAIN_LOSS = 0x13,
1800 GAIN_CAL = 0x14,
1801 DPK_RXAGC = 0x15,
1802 KIP_PRESET = 0x16,
1803 KIP_RESTORE = 0x17,
1804 DPK_TXAGC = 0x19,
1805 D_KIP_PRESET = 0x28,
1806 D_TXAGC = 0x29,
1807 D_RXAGC = 0x2a,
1808 D_SYNC = 0x2b,
1809 D_GAIN_LOSS = 0x2c,
1810 D_MDPK_IDL = 0x2d,
1811 D_GAIN_NORM = 0x2f,
1812 D_KIP_THERMAL = 0x30,
1813 D_KIP_RESTORE = 0x31
1814 };
1815
1816 #define DPK_TXAGC_LOWER 0x2e
1817 #define DPK_TXAGC_UPPER 0x3f
1818 #define DPK_TXAGC_INVAL 0xff
1819
1820 enum dpk_agc_step {
1821 DPK_AGC_STEP_SYNC_DGAIN,
1822 DPK_AGC_STEP_GAIN_LOSS_IDX,
1823 DPK_AGC_STEP_GL_GT_CRITERION,
1824 DPK_AGC_STEP_GL_LT_CRITERION,
1825 DPK_AGC_STEP_SET_TX_GAIN,
1826 };
1827
1828 enum dpk_pas_result {
1829 DPK_PAS_NOR,
1830 DPK_PAS_GT,
1831 DPK_PAS_LT,
1832 };
1833
_rf_direct_cntrl(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_bybb)1834 static void _rf_direct_cntrl(struct rtw89_dev *rtwdev,
1835 enum rtw89_rf_path path, bool is_bybb)
1836 {
1837 if (is_bybb)
1838 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1839 else
1840 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1841 }
1842
1843 static void _dpk_onoff(struct rtw89_dev *rtwdev,
1844 enum rtw89_rf_path path, bool off);
1845
_dpk_bkup_kip(struct rtw89_dev * rtwdev,const u32 reg[],u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM],u8 path)1846 static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 reg[],
1847 u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM], u8 path)
1848 {
1849 u8 i;
1850
1851 for (i = 0; i < RTW8852C_DPK_KIP_REG_NUM; i++) {
1852 reg_bkup[path][i] =
1853 rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD);
1854
1855 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
1856 reg[i] + (path << 8), reg_bkup[path][i]);
1857 }
1858 }
1859
_dpk_reload_kip(struct rtw89_dev * rtwdev,const u32 reg[],u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM],u8 path)1860 static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 reg[],
1861 u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM], u8 path)
1862 {
1863 u8 i;
1864
1865 for (i = 0; i < RTW8852C_DPK_KIP_REG_NUM; i++) {
1866 rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8),
1867 MASKDWORD, reg_bkup[path][i]);
1868 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n",
1869 reg[i] + (path << 8), reg_bkup[path][i]);
1870 }
1871 }
1872
_dpk_one_shot(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,enum rtw8852c_dpk_id id)1873 static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1874 enum rtw89_rf_path path, enum rtw8852c_dpk_id id)
1875 {
1876 u16 dpk_cmd;
1877 u32 val;
1878 int ret;
1879
1880 dpk_cmd = (u16)((id << 8) | (0x19 + path * 0x12));
1881
1882 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
1883
1884 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
1885 10, 20000, false, rtwdev, 0xbff8, MASKBYTE0);
1886 udelay(10);
1887 rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
1888
1889 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1890 "[DPK] one-shot for %s = 0x%x (ret=%d)\n",
1891 id == 0x06 ? "LBK_RXIQK" :
1892 id == 0x10 ? "SYNC" :
1893 id == 0x11 ? "MDPK_IDL" :
1894 id == 0x12 ? "MDPK_MPA" :
1895 id == 0x13 ? "GAIN_LOSS" : "PWR_CAL",
1896 dpk_cmd, ret);
1897
1898 if (ret) {
1899 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1900 "[DPK] one-shot over 20ms!!!!\n");
1901 return 1;
1902 }
1903
1904 return 0;
1905 }
1906
_dpk_information(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,enum rtw89_chanctx_idx chanctx_idx)1907 static void _dpk_information(struct rtw89_dev *rtwdev,
1908 enum rtw89_phy_idx phy,
1909 enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
1910 {
1911 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
1912 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1913
1914 u8 kidx = dpk->cur_idx[path];
1915
1916 dpk->bp[path][kidx].band = chan->band_type;
1917 dpk->bp[path][kidx].ch = chan->channel;
1918 dpk->bp[path][kidx].bw = chan->band_width;
1919
1920 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1921 "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
1922 path, dpk->cur_idx[path], phy,
1923 rtwdev->is_tssi_mode[path] ? "on" : "off",
1924 rtwdev->dbcc_en ? "on" : "off",
1925 dpk->bp[path][kidx].band == 0 ? "2G" :
1926 dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
1927 dpk->bp[path][kidx].ch,
1928 dpk->bp[path][kidx].bw == 0 ? "20M" :
1929 dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
1930 }
1931
_dpk_bb_afe_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kpath)1932 static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
1933 enum rtw89_phy_idx phy,
1934 enum rtw89_rf_path path, u8 kpath)
1935 {
1936 /*1. Keep ADC_fifo reset*/
1937 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
1938 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
1939 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
1940 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
1941
1942 /*2. BB for IQK DBG mode*/
1943 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0xd801dffd);
1944
1945 /*3.Set DAC clk*/
1946 rtw8852c_txck_force(rtwdev, path, true, DAC_960M);
1947
1948 /*4. Set ADC clk*/
1949 rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
1950 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13),
1951 B_P0_NRBW_DBG, 0x1);
1952 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x1f);
1953 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x13);
1954 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0001);
1955 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0041);
1956
1957 /*5. ADDA fifo rst*/
1958 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x1);
1959 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x1);
1960
1961 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE setting\n", path);
1962 }
1963
_dpk_bb_afe_restore(struct rtw89_dev * rtwdev,u8 path)1964 static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev, u8 path)
1965 {
1966 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13),
1967 B_P0_NRBW_DBG, 0x0);
1968 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
1969 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
1970 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
1971 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
1972 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0x00000000);
1973 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), B_P0_TXCK_ALL, 0x00);
1974 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x0);
1975 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x0);
1976
1977 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE restore\n", path);
1978 }
1979
_dpk_tssi_pause(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_pause)1980 static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
1981 enum rtw89_rf_path path, bool is_pause)
1982 {
1983 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
1984 B_P0_TSSI_TRK_EN, is_pause);
1985
1986 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
1987 is_pause ? "pause" : "resume");
1988 }
1989
_dpk_kip_control_rfc(struct rtw89_dev * rtwdev,u8 path,bool ctrl_by_kip)1990 static void _dpk_kip_control_rfc(struct rtw89_dev *rtwdev, u8 path, bool ctrl_by_kip)
1991 {
1992 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_IQK_RFC_ON, ctrl_by_kip);
1993 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RFC is controlled by %s\n",
1994 ctrl_by_kip ? "KIP" : "BB");
1995 }
1996
_dpk_txpwr_bb_force(struct rtw89_dev * rtwdev,u8 path,bool force)1997 static void _dpk_txpwr_bb_force(struct rtw89_dev *rtwdev, u8 path, bool force)
1998 {
1999 rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_ON, force);
2000 rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H + (path << 13), B_TXPWRB_RDY, force);
2001
2002 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d txpwr_bb_force %s\n",
2003 path, force ? "on" : "off");
2004 }
2005
_dpk_kip_restore(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2006 static void _dpk_kip_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2007 enum rtw89_rf_path path)
2008 {
2009 _dpk_one_shot(rtwdev, phy, path, D_KIP_RESTORE);
2010 _dpk_kip_control_rfc(rtwdev, path, false);
2011 _dpk_txpwr_bb_force(rtwdev, path, false);
2012 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
2013 }
2014
_dpk_lbk_rxiqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2015 static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
2016 enum rtw89_phy_idx phy,
2017 enum rtw89_rf_path path)
2018 {
2019 #define RX_TONE_IDX 0x00250025 /* Q.2 9.25MHz */
2020 u8 cur_rxbb;
2021 u32 rf_11, reg_81cc;
2022
2023 rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1);
2024 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1);
2025
2026 _dpk_kip_control_rfc(rtwdev, path, false);
2027
2028 cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
2029 rf_11 = rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK);
2030 reg_81cc = rtw89_phy_read32_mask(rtwdev, R_KIP_IQP + (path << 8),
2031 B_KIP_IQP_SW);
2032
2033 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
2034 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x3);
2035 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0xd);
2036 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, 0x1f);
2037
2038 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x12);
2039 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, 0x3);
2040
2041 _dpk_kip_control_rfc(rtwdev, path, true);
2042
2043 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, MASKDWORD, RX_TONE_IDX);
2044
2045 _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
2046
2047 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
2048 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD));
2049
2050 _dpk_kip_control_rfc(rtwdev, path, false);
2051
2052 rtw89_write_rf(rtwdev, path, RR_TXIG, RFREG_MASK, rf_11);
2053 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, cur_rxbb);
2054 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, reg_81cc);
2055
2056 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0);
2057 rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0);
2058 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1);
2059
2060 _dpk_kip_control_rfc(rtwdev, path, true);
2061 }
2062
_dpk_rf_setting(struct rtw89_dev * rtwdev,u8 gain,enum rtw89_rf_path path,u8 kidx)2063 static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
2064 enum rtw89_rf_path path, u8 kidx)
2065 {
2066 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2067
2068 if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
2069 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
2070 0x50121 | BIT(rtwdev->dbcc_en));
2071 rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK);
2072 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x2);
2073 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x4);
2074 rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
2075 rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
2076
2077 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2078 "[DPK] RF 0x0/0x83/0x9e/0x1a/0xdf/0x1001a = 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x\n",
2079 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK),
2080 rtw89_read_rf(rtwdev, path, RR_RXBB, RFREG_MASK),
2081 rtw89_read_rf(rtwdev, path, RR_TIA, RFREG_MASK),
2082 rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK),
2083 rtw89_read_rf(rtwdev, path, RR_LUTDBG, RFREG_MASK),
2084 rtw89_read_rf(rtwdev, path, 0x1001a, RFREG_MASK));
2085 } else {
2086 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
2087 0x50101 | BIT(rtwdev->dbcc_en));
2088 rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK);
2089
2090 if (dpk->bp[path][kidx].band == RTW89_BAND_6G && dpk->bp[path][kidx].ch >= 161)
2091 rtw89_write_rf(rtwdev, path, RR_IQGEN, RR_IQGEN_BIAS, 0x8);
2092
2093 rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
2094 rtw89_write_rf(rtwdev, path, RR_TXAC, RR_TXAC_IQG, 0x8);
2095
2096 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_ATT, 0x0);
2097 rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT2, 0x3);
2098 rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
2099 rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
2100
2101 if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160)
2102 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_EBW, 0x0);
2103 }
2104 }
2105
_dpk_tpg_sel(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx)2106 static void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
2107 {
2108 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2109
2110 if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160) {
2111 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x3);
2112 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0x0180ff30);
2113 } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) {
2114 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x0);
2115 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xffe0fa00);
2116 } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40) {
2117 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
2118 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xff4009e0);
2119 } else {
2120 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
2121 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xf9f007d0);
2122 }
2123 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
2124 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160 ? "160M" :
2125 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
2126 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
2127 }
2128
_dpk_sync_check(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx)2129 static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
2130 {
2131 #define DPK_SYNC_TH_DC_I 200
2132 #define DPK_SYNC_TH_DC_Q 200
2133 #define DPK_SYNC_TH_CORR 170
2134 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2135 u16 dc_i, dc_q;
2136 u8 corr_val, corr_idx, rxbb;
2137 u8 rxbb_ov;
2138
2139 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
2140
2141 corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI);
2142 corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV);
2143
2144 dpk->corr_idx[path][kidx] = corr_idx;
2145 dpk->corr_val[path][kidx] = corr_val;
2146
2147 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
2148
2149 dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2150 dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
2151
2152 dc_i = abs(sign_extend32(dc_i, 11));
2153 dc_q = abs(sign_extend32(dc_q, 11));
2154
2155 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2156 "[DPK] S%d Corr_idx/ Corr_val /DC I/Q, = %d / %d / %d / %d\n",
2157 path, corr_idx, corr_val, dc_i, dc_q);
2158
2159 dpk->dc_i[path][kidx] = dc_i;
2160 dpk->dc_q[path][kidx] = dc_q;
2161
2162 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x8);
2163 rxbb = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB);
2164
2165 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x31);
2166 rxbb_ov = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXOV);
2167
2168 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2169 "[DPK] S%d RXBB/ RXAGC_done /RXBB_ovlmt = %d / %d / %d\n",
2170 path, rxbb,
2171 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DONE),
2172 rxbb_ov);
2173
2174 if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
2175 corr_val < DPK_SYNC_TH_CORR)
2176 return true;
2177 else
2178 return false;
2179 }
2180
_dpk_dgain_read(struct rtw89_dev * rtwdev)2181 static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
2182 {
2183 u16 dgain = 0x0;
2184
2185 rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
2186
2187 dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2188
2189 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x (%d)\n", dgain, dgain);
2190
2191 return dgain;
2192 }
2193
_dpk_gainloss_read(struct rtw89_dev * rtwdev)2194 static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
2195 {
2196 u8 result;
2197
2198 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
2199 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
2200
2201 result = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
2202
2203 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp GL = %d\n", result);
2204
2205 return result;
2206 }
2207
_dpk_kset_query(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)2208 static void _dpk_kset_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
2209 {
2210 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2211
2212 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0x10);
2213 dpk->cur_k_set =
2214 rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), 0xE0000000) - 1;
2215 }
2216
_dpk_kip_set_txagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 dbm,bool set_from_bb)2217 static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2218 enum rtw89_rf_path path, u8 dbm, bool set_from_bb)
2219 {
2220 if (set_from_bb) {
2221 dbm = clamp_t(u8, dbm, 7, 24);
2222 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set S%d txagc to %ddBm\n", path, dbm);
2223 rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_VAL, dbm << 2);
2224 }
2225 _dpk_one_shot(rtwdev, phy, path, D_TXAGC);
2226 _dpk_kset_query(rtwdev, path);
2227 }
2228
_dpk_gainloss(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2229 static u8 _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2230 enum rtw89_rf_path path, u8 kidx)
2231 {
2232 _dpk_one_shot(rtwdev, phy, path, D_GAIN_LOSS);
2233 _dpk_kip_set_txagc(rtwdev, phy, path, 0xff, false);
2234
2235 rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A1, 0x0);
2236 rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A0, 0x0);
2237
2238 return _dpk_gainloss_read(rtwdev);
2239 }
2240
_dpk_pas_read(struct rtw89_dev * rtwdev,bool is_check)2241 static enum dpk_pas_result _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
2242 {
2243 u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
2244 u32 val1_sqrt_sum, val2_sqrt_sum;
2245 u8 i;
2246
2247 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
2248 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0);
2249 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08);
2250
2251 if (is_check) {
2252 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
2253 val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2254 val1_i = abs(sign_extend32(val1_i, 11));
2255 val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2256 val1_q = abs(sign_extend32(val1_q, 11));
2257
2258 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
2259 val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2260 val2_i = abs(sign_extend32(val2_i, 11));
2261 val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2262 val2_q = abs(sign_extend32(val2_q, 11));
2263
2264 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
2265 phy_div(val1_i * val1_i + val1_q * val1_q,
2266 val2_i * val2_i + val2_q * val2_q));
2267 } else {
2268 for (i = 0; i < 32; i++) {
2269 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
2270 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
2271 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
2272 }
2273 }
2274
2275 val1_sqrt_sum = val1_i * val1_i + val1_q * val1_q;
2276 val2_sqrt_sum = val2_i * val2_i + val2_q * val2_q;
2277
2278 if (val1_sqrt_sum < val2_sqrt_sum)
2279 return DPK_PAS_LT;
2280 else if (val1_sqrt_sum >= val2_sqrt_sum * 8 / 5)
2281 return DPK_PAS_GT;
2282 else
2283 return DPK_PAS_NOR;
2284 }
2285
_dpk_kip_set_rxagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2286 static bool _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2287 enum rtw89_rf_path path, u8 kidx)
2288 {
2289 _dpk_kip_control_rfc(rtwdev, path, false);
2290 rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD,
2291 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
2292 _dpk_kip_control_rfc(rtwdev, path, true);
2293
2294 _dpk_one_shot(rtwdev, phy, path, D_RXAGC);
2295
2296 return _dpk_sync_check(rtwdev, path, kidx);
2297 }
2298
_dpk_read_rxsram(struct rtw89_dev * rtwdev)2299 static void _dpk_read_rxsram(struct rtw89_dev *rtwdev)
2300 {
2301 u32 addr;
2302
2303 rtw89_rfk_parser(rtwdev, &rtw8852c_read_rxsram_pre_defs_tbl);
2304
2305 for (addr = 0; addr < 0x200; addr++) {
2306 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 | addr);
2307
2308 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RXSRAM[%03d] = 0x%07x\n", addr,
2309 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
2310 }
2311
2312 rtw89_rfk_parser(rtwdev, &rtw8852c_read_rxsram_post_defs_tbl);
2313 }
2314
_dpk_bypass_rxiqc(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)2315 static void _dpk_bypass_rxiqc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
2316 {
2317 rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1);
2318 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 0x40000002);
2319
2320 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Bypass RXIQC\n");
2321 }
2322
_dpk_agc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,u8 init_xdbm,u8 loss_only)2323 static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2324 enum rtw89_rf_path path, u8 kidx, u8 init_xdbm, u8 loss_only)
2325 {
2326 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2327 u8 step = DPK_AGC_STEP_SYNC_DGAIN;
2328 u8 tmp_dbm = init_xdbm, tmp_gl_idx = 0;
2329 u8 tmp_rxbb;
2330 u8 goout = 0, agc_cnt = 0;
2331 enum dpk_pas_result pas;
2332 u16 dgain = 0;
2333 bool is_fail = false;
2334 int limit = 200;
2335
2336 do {
2337 switch (step) {
2338 case DPK_AGC_STEP_SYNC_DGAIN:
2339 is_fail = _dpk_kip_set_rxagc(rtwdev, phy, path, kidx);
2340
2341 if (RTW8852C_DPK_RXSRAM_DBG)
2342 _dpk_read_rxsram(rtwdev);
2343
2344 if (is_fail) {
2345 goout = 1;
2346 break;
2347 }
2348
2349 dgain = _dpk_dgain_read(rtwdev);
2350
2351 if (dgain > 0x5fc || dgain < 0x556) {
2352 _dpk_one_shot(rtwdev, phy, path, D_SYNC);
2353 _dpk_dgain_read(rtwdev);
2354 }
2355
2356 if (agc_cnt == 0) {
2357 if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
2358 _dpk_bypass_rxiqc(rtwdev, path);
2359 else
2360 _dpk_lbk_rxiqk(rtwdev, phy, path);
2361 }
2362 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2363 break;
2364
2365 case DPK_AGC_STEP_GAIN_LOSS_IDX:
2366 tmp_gl_idx = _dpk_gainloss(rtwdev, phy, path, kidx);
2367 pas = _dpk_pas_read(rtwdev, true);
2368
2369 if (pas == DPK_PAS_LT && tmp_gl_idx > 0)
2370 step = DPK_AGC_STEP_GL_LT_CRITERION;
2371 else if (pas == DPK_PAS_GT && tmp_gl_idx == 0)
2372 step = DPK_AGC_STEP_GL_GT_CRITERION;
2373 else if (tmp_gl_idx >= 7)
2374 step = DPK_AGC_STEP_GL_GT_CRITERION;
2375 else if (tmp_gl_idx == 0)
2376 step = DPK_AGC_STEP_GL_LT_CRITERION;
2377 else
2378 step = DPK_AGC_STEP_SET_TX_GAIN;
2379 break;
2380
2381 case DPK_AGC_STEP_GL_GT_CRITERION:
2382 if (tmp_dbm <= 7) {
2383 goout = 1;
2384 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@lower bound!!\n");
2385 } else {
2386 tmp_dbm = max_t(u8, tmp_dbm - 3, 7);
2387 _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true);
2388 }
2389 step = DPK_AGC_STEP_SYNC_DGAIN;
2390 agc_cnt++;
2391 break;
2392
2393 case DPK_AGC_STEP_GL_LT_CRITERION:
2394 if (tmp_dbm >= 24) {
2395 goout = 1;
2396 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@upper bound!!\n");
2397 } else {
2398 tmp_dbm = min_t(u8, tmp_dbm + 2, 24);
2399 _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true);
2400 }
2401 step = DPK_AGC_STEP_SYNC_DGAIN;
2402 agc_cnt++;
2403 break;
2404
2405 case DPK_AGC_STEP_SET_TX_GAIN:
2406 _dpk_kip_control_rfc(rtwdev, path, false);
2407 tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
2408 if (tmp_rxbb + tmp_gl_idx > 0x1f)
2409 tmp_rxbb = 0x1f;
2410 else
2411 tmp_rxbb = tmp_rxbb + tmp_gl_idx;
2412
2413 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, tmp_rxbb);
2414 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust RXBB (%+d) = 0x%x\n",
2415 tmp_gl_idx, tmp_rxbb);
2416 _dpk_kip_control_rfc(rtwdev, path, true);
2417 goout = 1;
2418 break;
2419 default:
2420 goout = 1;
2421 break;
2422 }
2423 } while (!goout && agc_cnt < 6 && --limit > 0);
2424
2425 if (limit <= 0)
2426 rtw89_warn(rtwdev, "[DPK] exceed loop limit\n");
2427
2428 return is_fail;
2429 }
2430
_dpk_set_mdpd_para(struct rtw89_dev * rtwdev,u8 order)2431 static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
2432 {
2433 static const struct rtw89_rfk_tbl *order_tbls[] = {
2434 &rtw8852c_dpk_mdpd_order0_defs_tbl,
2435 &rtw8852c_dpk_mdpd_order1_defs_tbl,
2436 &rtw8852c_dpk_mdpd_order2_defs_tbl,
2437 &rtw8852c_dpk_mdpd_order3_defs_tbl,
2438 };
2439
2440 if (order >= ARRAY_SIZE(order_tbls)) {
2441 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Wrong MDPD order!!(0x%x)\n", order);
2442 return;
2443 }
2444
2445 rtw89_rfk_parser(rtwdev, order_tbls[order]);
2446
2447 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set %s for IDL\n",
2448 order == 0x0 ? "(5,3,1)" :
2449 order == 0x1 ? "(5,3,0)" :
2450 order == 0x2 ? "(5,0,0)" : "(7,3,1)");
2451 }
2452
_dpk_idl_mpa(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2453 static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2454 enum rtw89_rf_path path, u8 kidx)
2455 {
2456 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2457 u8 cnt;
2458 u8 ov_flag;
2459 u32 dpk_sync;
2460
2461 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_MA, 0x1);
2462
2463 if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T2) == 0x1)
2464 _dpk_set_mdpd_para(rtwdev, 0x2);
2465 else if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T1) == 0x1)
2466 _dpk_set_mdpd_para(rtwdev, 0x1);
2467 else if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T0) == 0x1)
2468 _dpk_set_mdpd_para(rtwdev, 0x0);
2469 else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_5 ||
2470 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_10 ||
2471 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_20)
2472 _dpk_set_mdpd_para(rtwdev, 0x2);
2473 else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ||
2474 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
2475 _dpk_set_mdpd_para(rtwdev, 0x1);
2476 else
2477 _dpk_set_mdpd_para(rtwdev, 0x0);
2478
2479 rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL, 0x0);
2480 fsleep(1000);
2481
2482 _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
2483 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
2484 dpk_sync = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
2485 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] dpk_sync = 0x%x\n", dpk_sync);
2486
2487 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0xf);
2488 ov_flag = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR);
2489 for (cnt = 0; cnt < 5 && ov_flag == 0x1; cnt++) {
2490 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] ReK due to MDPK ov!!!\n");
2491 _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
2492 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0xf);
2493 ov_flag = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR);
2494 }
2495
2496 if (ov_flag) {
2497 _dpk_set_mdpd_para(rtwdev, 0x2);
2498 _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
2499 }
2500 }
2501
_dpk_reload_check(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,enum rtw89_chanctx_idx chanctx_idx)2502 static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2503 enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
2504 {
2505 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
2506 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2507 bool is_reload = false;
2508 u8 idx, cur_band, cur_ch;
2509
2510 cur_band = chan->band_type;
2511 cur_ch = chan->channel;
2512
2513 for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
2514 if (cur_band != dpk->bp[path][idx].band ||
2515 cur_ch != dpk->bp[path][idx].ch)
2516 continue;
2517
2518 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
2519 B_COEF_SEL_MDPD, idx);
2520 dpk->cur_idx[path] = idx;
2521 is_reload = true;
2522 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2523 "[DPK] reload S%d[%d] success\n", path, idx);
2524 }
2525
2526 return is_reload;
2527 }
2528
_dpk_kip_pwr_clk_onoff(struct rtw89_dev * rtwdev,bool turn_on)2529 static void _dpk_kip_pwr_clk_onoff(struct rtw89_dev *rtwdev, bool turn_on)
2530 {
2531 rtw89_rfk_parser(rtwdev, turn_on ? &rtw8852c_dpk_kip_pwr_clk_on_defs_tbl :
2532 &rtw8852c_dpk_kip_pwr_clk_off_defs_tbl);
2533 }
2534
_dpk_kip_preset_8852c(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2535 static void _dpk_kip_preset_8852c(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2536 enum rtw89_rf_path path, u8 kidx)
2537 {
2538 rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD,
2539 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
2540
2541 if (rtwdev->hal.cv == CHIP_CAV)
2542 rtw89_phy_write32_mask(rtwdev,
2543 R_DPD_CH0A + (path << 8) + (kidx << 2),
2544 B_DPD_SEL, 0x01);
2545 else
2546 rtw89_phy_write32_mask(rtwdev,
2547 R_DPD_CH0A + (path << 8) + (kidx << 2),
2548 B_DPD_SEL, 0x0c);
2549
2550 _dpk_kip_control_rfc(rtwdev, path, true);
2551 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_MDPD, kidx);
2552
2553 _dpk_one_shot(rtwdev, phy, path, D_KIP_PRESET);
2554 }
2555
_dpk_para_query(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx)2556 static void _dpk_para_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
2557 {
2558 #define _DPK_PARA_TXAGC GENMASK(15, 10)
2559 #define _DPK_PARA_THER GENMASK(31, 26)
2560 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2561 u32 para;
2562
2563 para = rtw89_phy_read32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
2564 MASKDWORD);
2565
2566 dpk->bp[path][kidx].txagc_dpk = FIELD_GET(_DPK_PARA_TXAGC, para);
2567 dpk->bp[path][kidx].ther_dpk = FIELD_GET(_DPK_PARA_THER, para);
2568
2569 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal/ txagc_RF (K%d) = 0x%x/ 0x%x\n",
2570 dpk->cur_k_set, dpk->bp[path][kidx].ther_dpk, dpk->bp[path][kidx].txagc_dpk);
2571 }
2572
_dpk_gain_normalize_8852c(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,bool is_execute)2573 static void _dpk_gain_normalize_8852c(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2574 enum rtw89_rf_path path, u8 kidx, bool is_execute)
2575 {
2576 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2577
2578 if (is_execute) {
2579 rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), B_DPK_GN_AG, 0x200);
2580 rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), B_DPK_GN_EN, 0x3);
2581
2582 _dpk_one_shot(rtwdev, phy, path, D_GAIN_NORM);
2583 } else {
2584 rtw89_phy_write32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
2585 0x0000007F, 0x5b);
2586 }
2587 dpk->bp[path][kidx].gs =
2588 rtw89_phy_read32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
2589 0x0000007F);
2590 }
2591
_dpk_order_convert(struct rtw89_dev * rtwdev)2592 static u8 _dpk_order_convert(struct rtw89_dev *rtwdev)
2593 {
2594 u32 val32 = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP);
2595 u8 val;
2596
2597 switch (val32) {
2598 case 0:
2599 val = 0x6;
2600 break;
2601 case 1:
2602 val = 0x2;
2603 break;
2604 case 2:
2605 val = 0x0;
2606 break;
2607 case 3:
2608 val = 0x7;
2609 break;
2610 default:
2611 val = 0xff;
2612 break;
2613 }
2614
2615 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val);
2616
2617 return val;
2618 }
2619
_dpk_on(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2620 static void _dpk_on(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2621 enum rtw89_rf_path path, u8 kidx)
2622 {
2623 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2624
2625 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
2626 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0);
2627 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2628 B_DPD_ORDER, _dpk_order_convert(rtwdev));
2629
2630 dpk->bp[path][kidx].mdpd_en = BIT(dpk->cur_k_set);
2631 dpk->bp[path][kidx].path_ok = true;
2632
2633 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] path_ok = 0x%x\n",
2634 path, kidx, dpk->bp[path][kidx].mdpd_en);
2635
2636 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2637 B_DPD_MEN, dpk->bp[path][kidx].mdpd_en);
2638
2639 _dpk_gain_normalize_8852c(rtwdev, phy, path, kidx, false);
2640 }
2641
_dpk_main(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 gain)2642 static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2643 enum rtw89_rf_path path, u8 gain)
2644 {
2645 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2646 u8 kidx = dpk->cur_idx[path];
2647 u8 init_xdbm = 15;
2648 bool is_fail;
2649
2650 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2651 "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx);
2652 _dpk_kip_control_rfc(rtwdev, path, false);
2653 _rf_direct_cntrl(rtwdev, path, false);
2654 rtw89_write_rf(rtwdev, path, RR_BBDC, RFREG_MASK, 0x03ffd);
2655 _dpk_rf_setting(rtwdev, gain, path, kidx);
2656 _set_rx_dck(rtwdev, phy, path, false);
2657 _dpk_kip_pwr_clk_onoff(rtwdev, true);
2658 _dpk_kip_preset_8852c(rtwdev, phy, path, kidx);
2659 _dpk_txpwr_bb_force(rtwdev, path, true);
2660 _dpk_kip_set_txagc(rtwdev, phy, path, init_xdbm, true);
2661 _dpk_tpg_sel(rtwdev, path, kidx);
2662
2663 is_fail = _dpk_agc(rtwdev, phy, path, kidx, init_xdbm, false);
2664 if (is_fail)
2665 goto _error;
2666
2667 _dpk_idl_mpa(rtwdev, phy, path, kidx);
2668 _dpk_para_query(rtwdev, path, kidx);
2669 _dpk_on(rtwdev, phy, path, kidx);
2670
2671 _error:
2672 _dpk_kip_control_rfc(rtwdev, path, false);
2673 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RF_RX);
2674 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d]_K%d %s\n", path, kidx,
2675 dpk->cur_k_set, is_fail ? "need Check" : "is Success");
2676
2677 return is_fail;
2678 }
2679
_dpk_init(struct rtw89_dev * rtwdev,u8 path)2680 static void _dpk_init(struct rtw89_dev *rtwdev, u8 path)
2681 {
2682 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2683 u8 kidx = dpk->cur_idx[path];
2684
2685 dpk->bp[path][kidx].path_ok = false;
2686 }
2687
_dpk_drf_direct_cntrl(struct rtw89_dev * rtwdev,u8 path,bool is_bybb)2688 static void _dpk_drf_direct_cntrl(struct rtw89_dev *rtwdev, u8 path, bool is_bybb)
2689 {
2690 if (is_bybb)
2691 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
2692 else
2693 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
2694 }
2695
_dpk_cal_select(struct rtw89_dev * rtwdev,bool force,enum rtw89_phy_idx phy,u8 kpath,enum rtw89_chanctx_idx chanctx_idx)2696 static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
2697 enum rtw89_phy_idx phy, u8 kpath,
2698 enum rtw89_chanctx_idx chanctx_idx)
2699 {
2700 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2701 static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120, 0xc0c4, 0xc0e8, 0xc0d4, 0xc0d8};
2702 u32 backup_rf_val[RTW8852C_DPK_RF_PATH][BACKUP_RF_REGS_NR];
2703 u32 kip_bkup[RTW8852C_DPK_RF_PATH][RTW8852C_DPK_KIP_REG_NUM] = {};
2704 u8 path;
2705 bool is_fail = true, reloaded[RTW8852C_DPK_RF_PATH] = {false};
2706
2707 static_assert(ARRAY_SIZE(kip_reg) == RTW8852C_DPK_KIP_REG_NUM);
2708
2709 if (dpk->is_dpk_reload_en) {
2710 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
2711 if (!(kpath & BIT(path)))
2712 continue;
2713
2714 reloaded[path] = _dpk_reload_check(rtwdev, phy, path,
2715 chanctx_idx);
2716 if (!reloaded[path] && dpk->bp[path][0].ch != 0)
2717 dpk->cur_idx[path] = !dpk->cur_idx[path];
2718 else
2719 _dpk_onoff(rtwdev, path, false);
2720 }
2721 } else {
2722 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++)
2723 dpk->cur_idx[path] = 0;
2724 }
2725
2726 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
2727 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2728 "[DPK] ========= S%d[%d] DPK Init =========\n",
2729 path, dpk->cur_idx[path]);
2730 _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
2731 _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
2732 _dpk_information(rtwdev, phy, path, chanctx_idx);
2733 _dpk_init(rtwdev, path);
2734 if (rtwdev->is_tssi_mode[path])
2735 _dpk_tssi_pause(rtwdev, path, true);
2736 }
2737
2738 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
2739 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2740 "[DPK] ========= S%d[%d] DPK Start =========\n",
2741 path, dpk->cur_idx[path]);
2742 rtw8852c_disable_rxagc(rtwdev, path, 0x0);
2743 _dpk_drf_direct_cntrl(rtwdev, path, false);
2744 _dpk_bb_afe_setting(rtwdev, phy, path, kpath);
2745 is_fail = _dpk_main(rtwdev, phy, path, 1);
2746 _dpk_onoff(rtwdev, path, is_fail);
2747 }
2748
2749 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
2750 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2751 "[DPK] ========= S%d[%d] DPK Restore =========\n",
2752 path, dpk->cur_idx[path]);
2753 _dpk_kip_restore(rtwdev, phy, path);
2754 _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path);
2755 _rfk_restore_rf_reg(rtwdev, backup_rf_val[path], path);
2756 _dpk_bb_afe_restore(rtwdev, path);
2757 rtw8852c_disable_rxagc(rtwdev, path, 0x1);
2758 if (rtwdev->is_tssi_mode[path])
2759 _dpk_tssi_pause(rtwdev, path, false);
2760 }
2761
2762 _dpk_kip_pwr_clk_onoff(rtwdev, false);
2763 }
2764
_dpk_bypass_check(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_chanctx_idx chanctx_idx)2765 static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2766 enum rtw89_chanctx_idx chanctx_idx)
2767 {
2768 struct rtw89_fem_info *fem = &rtwdev->fem;
2769 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
2770 u8 band = chan->band_type;
2771
2772 if (rtwdev->hal.cv == CHIP_CAV && band != RTW89_BAND_2G) {
2773 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to CAV & not 2G!!\n");
2774 return true;
2775 } else if (fem->epa_2g && band == RTW89_BAND_2G) {
2776 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
2777 return true;
2778 } else if (fem->epa_5g && band == RTW89_BAND_5G) {
2779 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
2780 return true;
2781 } else if (fem->epa_6g && band == RTW89_BAND_6G) {
2782 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
2783 return true;
2784 }
2785
2786 return false;
2787 }
2788
_dpk_force_bypass(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)2789 static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2790 {
2791 u8 path, kpath;
2792
2793 kpath = _kpath(rtwdev, phy);
2794
2795 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
2796 if (kpath & BIT(path))
2797 _dpk_onoff(rtwdev, path, true);
2798 }
2799 }
2800
_dpk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool force,enum rtw89_chanctx_idx chanctx_idx)2801 static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force,
2802 enum rtw89_chanctx_idx chanctx_idx)
2803 {
2804 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2805 "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
2806 RTW8852C_DPK_VER, rtwdev->hal.cv,
2807 RTW8852C_RF_REL_VERSION);
2808
2809 if (_dpk_bypass_check(rtwdev, phy, chanctx_idx))
2810 _dpk_force_bypass(rtwdev, phy);
2811 else
2812 _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy), chanctx_idx);
2813
2814 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_DCKC, RR_DCKC_CHK) == 0x1)
2815 rtw8852c_rx_dck(rtwdev, phy, false);
2816 }
2817
_dpk_onoff(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool off)2818 static void _dpk_onoff(struct rtw89_dev *rtwdev,
2819 enum rtw89_rf_path path, bool off)
2820 {
2821 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2822 u8 val, kidx = dpk->cur_idx[path];
2823
2824 val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok ?
2825 dpk->bp[path][kidx].mdpd_en : 0;
2826
2827 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2828 B_DPD_MEN, val);
2829
2830 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
2831 kidx, dpk->is_dpk_enable && !off ? "enable" : "disable");
2832 }
2833
_dpk_track(struct rtw89_dev * rtwdev)2834 static void _dpk_track(struct rtw89_dev *rtwdev)
2835 {
2836 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2837 u8 path, kidx;
2838 u8 txagc_rf = 0;
2839 s8 txagc_bb = 0, txagc_bb_tp = 0, txagc_ofst = 0;
2840 u8 cur_ther;
2841 s8 delta_ther = 0;
2842 s16 pwsf_tssi_ofst;
2843
2844 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
2845 kidx = dpk->cur_idx[path];
2846 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2847 "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
2848 path, kidx, dpk->bp[path][kidx].ch);
2849
2850 txagc_rf =
2851 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), 0x0000003f);
2852 txagc_bb =
2853 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), MASKBYTE2);
2854 txagc_bb_tp =
2855 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BTP + (path << 13), B_TXAGC_BTP);
2856
2857 /* report from KIP */
2858 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xf);
2859 cur_ther =
2860 rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_TH);
2861 txagc_ofst =
2862 rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_OF);
2863 pwsf_tssi_ofst =
2864 rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_TSSI);
2865 pwsf_tssi_ofst = sign_extend32(pwsf_tssi_ofst, 12);
2866
2867 cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
2868
2869 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2870 "[DPK_TRK] thermal now = %d\n", cur_ther);
2871
2872 if (dpk->bp[path][kidx].ch != 0 && cur_ther != 0)
2873 delta_ther = dpk->bp[path][kidx].ther_dpk - cur_ther;
2874
2875 delta_ther = delta_ther * 1 / 2;
2876
2877 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2878 "[DPK_TRK] extra delta_ther = %d (0x%x / 0x%x@k)\n",
2879 delta_ther, cur_ther, dpk->bp[path][kidx].ther_dpk);
2880 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2881 "[DPK_TRK] delta_txagc = %d (0x%x / 0x%x@k)\n",
2882 txagc_rf - dpk->bp[path][kidx].txagc_dpk, txagc_rf,
2883 dpk->bp[path][kidx].txagc_dpk);
2884 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2885 "[DPK_TRK] txagc_offset / pwsf_tssi_ofst = 0x%x / %+d\n",
2886 txagc_ofst, pwsf_tssi_ofst);
2887 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2888 "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
2889 txagc_bb_tp, txagc_bb);
2890
2891 if (rtw89_phy_read32_mask(rtwdev, R_DPK_WR, B_DPK_WR_ST) == 0x0 &&
2892 txagc_rf != 0 && rtwdev->hal.cv == CHIP_CAV) {
2893 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2894 "[DPK_TRK] New pwsf = 0x%x\n", 0x78 - delta_ther);
2895
2896 rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2897 0x07FC0000, 0x78 - delta_ther);
2898 }
2899 }
2900 }
2901
_tssi_set_sys(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)2902 static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2903 enum rtw89_rf_path path, const struct rtw89_chan *chan)
2904 {
2905 enum rtw89_bandwidth bw = chan->band_width;
2906 enum rtw89_band band = chan->band_type;
2907 u32 clk = 0x0;
2908
2909 rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_sys_defs_tbl);
2910
2911 switch (bw) {
2912 case RTW89_CHANNEL_WIDTH_80:
2913 clk = 0x1;
2914 break;
2915 case RTW89_CHANNEL_WIDTH_80_80:
2916 case RTW89_CHANNEL_WIDTH_160:
2917 clk = 0x2;
2918 break;
2919 default:
2920 break;
2921 }
2922
2923 if (path == RF_PATH_A) {
2924 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ADC_CLK,
2925 B_P0_TSSI_ADC_CLK, clk);
2926 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2927 &rtw8852c_tssi_sys_defs_2g_a_tbl,
2928 &rtw8852c_tssi_sys_defs_5g_a_tbl);
2929 } else {
2930 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ADC_CLK,
2931 B_P1_TSSI_ADC_CLK, clk);
2932 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2933 &rtw8852c_tssi_sys_defs_2g_b_tbl,
2934 &rtw8852c_tssi_sys_defs_5g_b_tbl);
2935 }
2936 }
2937
_tssi_ini_txpwr_ctrl_bb(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2938 static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2939 enum rtw89_rf_path path)
2940 {
2941 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2942 &rtw8852c_tssi_txpwr_ctrl_bb_defs_a_tbl,
2943 &rtw8852c_tssi_txpwr_ctrl_bb_defs_b_tbl);
2944 }
2945
_tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2946 static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
2947 enum rtw89_phy_idx phy,
2948 enum rtw89_rf_path path)
2949 {
2950 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2951 &rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl,
2952 &rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl);
2953 }
2954
_tssi_set_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)2955 static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2956 enum rtw89_rf_path path, const struct rtw89_chan *chan)
2957 {
2958 enum rtw89_band band = chan->band_type;
2959
2960 if (path == RF_PATH_A) {
2961 rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_a_tbl);
2962 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2963 &rtw8852c_tssi_dck_defs_2g_a_tbl,
2964 &rtw8852c_tssi_dck_defs_5g_a_tbl);
2965 } else {
2966 rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_b_tbl);
2967 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2968 &rtw8852c_tssi_dck_defs_2g_b_tbl,
2969 &rtw8852c_tssi_dck_defs_5g_b_tbl);
2970 }
2971 }
2972
_tssi_set_bbgain_split(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2973 static void _tssi_set_bbgain_split(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2974 enum rtw89_rf_path path)
2975 {
2976 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2977 &rtw8852c_tssi_set_bbgain_split_a_tbl,
2978 &rtw8852c_tssi_set_bbgain_split_b_tbl);
2979 }
2980
_tssi_set_tmeter_tbl(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)2981 static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2982 enum rtw89_rf_path path, const struct rtw89_chan *chan)
2983 {
2984 #define RTW8852C_TSSI_GET_VAL(ptr, idx) \
2985 ({ \
2986 s8 *__ptr = (ptr); \
2987 u8 __idx = (idx), __i, __v; \
2988 u32 __val = 0; \
2989 for (__i = 0; __i < 4; __i++) { \
2990 __v = (__ptr[__idx + __i]); \
2991 __val |= (__v << (8 * __i)); \
2992 } \
2993 __val; \
2994 })
2995 struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
2996 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
2997 u8 ch = chan->channel;
2998 u8 subband = chan->subband_type;
2999 const s8 *thm_up_a = NULL;
3000 const s8 *thm_down_a = NULL;
3001 const s8 *thm_up_b = NULL;
3002 const s8 *thm_down_b = NULL;
3003 u8 thermal = 0xff;
3004 s8 thm_ofst[64] = {0};
3005 u32 tmp = 0;
3006 u8 i, j;
3007
3008 switch (subband) {
3009 default:
3010 case RTW89_CH_2G:
3011 thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0] :
3012 rtw89_8852c_trk_cfg.delta_swingidx_2ga_p;
3013 thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0] :
3014 rtw89_8852c_trk_cfg.delta_swingidx_2ga_n;
3015 thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0] :
3016 rtw89_8852c_trk_cfg.delta_swingidx_2gb_p;
3017 thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0] :
3018 rtw89_8852c_trk_cfg.delta_swingidx_2gb_n;
3019 break;
3020 case RTW89_CH_5G_BAND_1:
3021 thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0] :
3022 rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[0];
3023 thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0] :
3024 rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[0];
3025 thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0] :
3026 rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[0];
3027 thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0] :
3028 rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[0];
3029 break;
3030 case RTW89_CH_5G_BAND_3:
3031 thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1] :
3032 rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[1];
3033 thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1] :
3034 rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[1];
3035 thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1] :
3036 rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[1];
3037 thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1] :
3038 rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[1];
3039 break;
3040 case RTW89_CH_5G_BAND_4:
3041 thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2] :
3042 rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[2];
3043 thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2] :
3044 rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[2];
3045 thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2] :
3046 rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[2];
3047 thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2] :
3048 rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[2];
3049 break;
3050 case RTW89_CH_6G_BAND_IDX0:
3051 case RTW89_CH_6G_BAND_IDX1:
3052 thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][0] :
3053 rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[0];
3054 thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][0] :
3055 rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[0];
3056 thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][0] :
3057 rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[0];
3058 thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][0] :
3059 rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[0];
3060 break;
3061 case RTW89_CH_6G_BAND_IDX2:
3062 case RTW89_CH_6G_BAND_IDX3:
3063 thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][1] :
3064 rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[1];
3065 thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][1] :
3066 rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[1];
3067 thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][1] :
3068 rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[1];
3069 thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][1] :
3070 rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[1];
3071 break;
3072 case RTW89_CH_6G_BAND_IDX4:
3073 case RTW89_CH_6G_BAND_IDX5:
3074 thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][2] :
3075 rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[2];
3076 thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][2] :
3077 rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[2];
3078 thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][2] :
3079 rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[2];
3080 thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][2] :
3081 rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[2];
3082 break;
3083 case RTW89_CH_6G_BAND_IDX6:
3084 case RTW89_CH_6G_BAND_IDX7:
3085 thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][3] :
3086 rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[3];
3087 thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][3] :
3088 rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[3];
3089 thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][3] :
3090 rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[3];
3091 thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][3] :
3092 rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[3];
3093 break;
3094 }
3095
3096 if (path == RF_PATH_A) {
3097 thermal = tssi_info->thermal[RF_PATH_A];
3098
3099 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3100 "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
3101
3102 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
3103 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
3104
3105 if (thermal == 0xff) {
3106 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
3107 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
3108
3109 for (i = 0; i < 64; i += 4) {
3110 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
3111
3112 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3113 "[TSSI] write 0x%x val=0x%08x\n",
3114 0x5c00 + i, 0x0);
3115 }
3116
3117 } else {
3118 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal);
3119 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
3120 thermal);
3121
3122 i = 0;
3123 for (j = 0; j < 32; j++)
3124 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3125 -thm_down_a[i++] :
3126 -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
3127
3128 i = 1;
3129 for (j = 63; j >= 32; j--)
3130 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3131 thm_up_a[i++] :
3132 thm_up_a[DELTA_SWINGIDX_SIZE - 1];
3133
3134 for (i = 0; i < 64; i += 4) {
3135 tmp = RTW8852C_TSSI_GET_VAL(thm_ofst, i);
3136 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
3137
3138 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3139 "[TSSI] write 0x%x val=0x%08x\n",
3140 0x5c00 + i, tmp);
3141 }
3142 }
3143 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
3144 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
3145
3146 } else {
3147 thermal = tssi_info->thermal[RF_PATH_B];
3148
3149 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3150 "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
3151
3152 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
3153 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
3154
3155 if (thermal == 0xff) {
3156 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
3157 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
3158
3159 for (i = 0; i < 64; i += 4) {
3160 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
3161
3162 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3163 "[TSSI] write 0x%x val=0x%08x\n",
3164 0x7c00 + i, 0x0);
3165 }
3166
3167 } else {
3168 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal);
3169 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
3170 thermal);
3171
3172 i = 0;
3173 for (j = 0; j < 32; j++)
3174 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3175 -thm_down_b[i++] :
3176 -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
3177
3178 i = 1;
3179 for (j = 63; j >= 32; j--)
3180 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3181 thm_up_b[i++] :
3182 thm_up_b[DELTA_SWINGIDX_SIZE - 1];
3183
3184 for (i = 0; i < 64; i += 4) {
3185 tmp = RTW8852C_TSSI_GET_VAL(thm_ofst, i);
3186 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
3187
3188 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3189 "[TSSI] write 0x%x val=0x%08x\n",
3190 0x7c00 + i, tmp);
3191 }
3192 }
3193 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
3194 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
3195 }
3196 #undef RTW8852C_TSSI_GET_VAL
3197 }
3198
_tssi_slope_cal_org(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3199 static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3200 enum rtw89_rf_path path, const struct rtw89_chan *chan)
3201 {
3202 enum rtw89_band band = chan->band_type;
3203
3204 if (path == RF_PATH_A) {
3205 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
3206 &rtw8852c_tssi_slope_cal_org_defs_2g_a_tbl,
3207 &rtw8852c_tssi_slope_cal_org_defs_5g_a_tbl);
3208 } else {
3209 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
3210 &rtw8852c_tssi_slope_cal_org_defs_2g_b_tbl,
3211 &rtw8852c_tssi_slope_cal_org_defs_5g_b_tbl);
3212 }
3213 }
3214
_tssi_set_aligk_default(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3215 static void _tssi_set_aligk_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3216 enum rtw89_rf_path path,
3217 const struct rtw89_chan *chan)
3218 {
3219 enum rtw89_band band = chan->band_type;
3220 const struct rtw89_rfk_tbl *tbl;
3221
3222 if (path == RF_PATH_A) {
3223 if (band == RTW89_BAND_2G)
3224 tbl = &rtw8852c_tssi_set_aligk_default_defs_2g_a_tbl;
3225 else if (band == RTW89_BAND_6G)
3226 tbl = &rtw8852c_tssi_set_aligk_default_defs_6g_a_tbl;
3227 else
3228 tbl = &rtw8852c_tssi_set_aligk_default_defs_5g_a_tbl;
3229 } else {
3230 if (band == RTW89_BAND_2G)
3231 tbl = &rtw8852c_tssi_set_aligk_default_defs_2g_b_tbl;
3232 else if (band == RTW89_BAND_6G)
3233 tbl = &rtw8852c_tssi_set_aligk_default_defs_6g_b_tbl;
3234 else
3235 tbl = &rtw8852c_tssi_set_aligk_default_defs_5g_b_tbl;
3236 }
3237
3238 rtw89_rfk_parser(rtwdev, tbl);
3239 }
3240
_tssi_set_slope(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3241 static void _tssi_set_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3242 enum rtw89_rf_path path)
3243 {
3244 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3245 &rtw8852c_tssi_slope_defs_a_tbl,
3246 &rtw8852c_tssi_slope_defs_b_tbl);
3247 }
3248
_tssi_run_slope(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3249 static void _tssi_run_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3250 enum rtw89_rf_path path)
3251 {
3252 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3253 &rtw8852c_tssi_run_slope_defs_a_tbl,
3254 &rtw8852c_tssi_run_slope_defs_b_tbl);
3255 }
3256
_tssi_set_track(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3257 static void _tssi_set_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3258 enum rtw89_rf_path path)
3259 {
3260 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3261 &rtw8852c_tssi_track_defs_a_tbl,
3262 &rtw8852c_tssi_track_defs_b_tbl);
3263 }
3264
_tssi_set_txagc_offset_mv_avg(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3265 static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
3266 enum rtw89_phy_idx phy,
3267 enum rtw89_rf_path path)
3268 {
3269 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3270 &rtw8852c_tssi_txagc_ofst_mv_avg_defs_a_tbl,
3271 &rtw8852c_tssi_txagc_ofst_mv_avg_defs_b_tbl);
3272 }
3273
_tssi_enable(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3274 static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3275 {
3276 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3277 u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
3278
3279 if (rtwdev->dbcc_en) {
3280 if (phy == RTW89_PHY_0) {
3281 path = RF_PATH_A;
3282 path_max = RF_PATH_B;
3283 } else if (phy == RTW89_PHY_1) {
3284 path = RF_PATH_B;
3285 path_max = RF_PATH_NUM_8852C;
3286 }
3287 }
3288
3289 for (i = path; i < path_max; i++) {
3290 _tssi_set_track(rtwdev, phy, i);
3291 _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
3292
3293 rtw89_rfk_parser_by_cond(rtwdev, i == RF_PATH_A,
3294 &rtw8852c_tssi_enable_defs_a_tbl,
3295 &rtw8852c_tssi_enable_defs_b_tbl);
3296
3297 tssi_info->base_thermal[i] =
3298 ewma_thermal_read(&rtwdev->phystat.avg_thermal[i]);
3299 rtwdev->is_tssi_mode[i] = true;
3300 }
3301 }
3302
_tssi_disable(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3303 static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3304 {
3305 u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
3306
3307 if (rtwdev->dbcc_en) {
3308 if (phy == RTW89_PHY_0) {
3309 path = RF_PATH_A;
3310 path_max = RF_PATH_B;
3311 } else if (phy == RTW89_PHY_1) {
3312 path = RF_PATH_B;
3313 path_max = RF_PATH_NUM_8852C;
3314 }
3315 }
3316
3317 for (i = path; i < path_max; i++) {
3318 if (i == RF_PATH_A) {
3319 rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_disable_defs_a_tbl);
3320 rtwdev->is_tssi_mode[RF_PATH_A] = false;
3321 } else if (i == RF_PATH_B) {
3322 rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_disable_defs_b_tbl);
3323 rtwdev->is_tssi_mode[RF_PATH_B] = false;
3324 }
3325 }
3326 }
3327
_tssi_get_cck_group(struct rtw89_dev * rtwdev,u8 ch)3328 static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
3329 {
3330 switch (ch) {
3331 case 1 ... 2:
3332 return 0;
3333 case 3 ... 5:
3334 return 1;
3335 case 6 ... 8:
3336 return 2;
3337 case 9 ... 11:
3338 return 3;
3339 case 12 ... 13:
3340 return 4;
3341 case 14:
3342 return 5;
3343 }
3344
3345 return 0;
3346 }
3347
3348 #define TSSI_EXTRA_GROUP_BIT (BIT(31))
3349 #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
3350 #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
3351 #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
3352 #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
3353
_tssi_get_ofdm_group(struct rtw89_dev * rtwdev,u8 ch)3354 static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
3355 {
3356 switch (ch) {
3357 case 1 ... 2:
3358 return 0;
3359 case 3 ... 5:
3360 return 1;
3361 case 6 ... 8:
3362 return 2;
3363 case 9 ... 11:
3364 return 3;
3365 case 12 ... 14:
3366 return 4;
3367 case 36 ... 40:
3368 return 5;
3369 case 41 ... 43:
3370 return TSSI_EXTRA_GROUP(5);
3371 case 44 ... 48:
3372 return 6;
3373 case 49 ... 51:
3374 return TSSI_EXTRA_GROUP(6);
3375 case 52 ... 56:
3376 return 7;
3377 case 57 ... 59:
3378 return TSSI_EXTRA_GROUP(7);
3379 case 60 ... 64:
3380 return 8;
3381 case 100 ... 104:
3382 return 9;
3383 case 105 ... 107:
3384 return TSSI_EXTRA_GROUP(9);
3385 case 108 ... 112:
3386 return 10;
3387 case 113 ... 115:
3388 return TSSI_EXTRA_GROUP(10);
3389 case 116 ... 120:
3390 return 11;
3391 case 121 ... 123:
3392 return TSSI_EXTRA_GROUP(11);
3393 case 124 ... 128:
3394 return 12;
3395 case 129 ... 131:
3396 return TSSI_EXTRA_GROUP(12);
3397 case 132 ... 136:
3398 return 13;
3399 case 137 ... 139:
3400 return TSSI_EXTRA_GROUP(13);
3401 case 140 ... 144:
3402 return 14;
3403 case 149 ... 153:
3404 return 15;
3405 case 154 ... 156:
3406 return TSSI_EXTRA_GROUP(15);
3407 case 157 ... 161:
3408 return 16;
3409 case 162 ... 164:
3410 return TSSI_EXTRA_GROUP(16);
3411 case 165 ... 169:
3412 return 17;
3413 case 170 ... 172:
3414 return TSSI_EXTRA_GROUP(17);
3415 case 173 ... 177:
3416 return 18;
3417 }
3418
3419 return 0;
3420 }
3421
_tssi_get_6g_ofdm_group(struct rtw89_dev * rtwdev,u8 ch)3422 static u32 _tssi_get_6g_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
3423 {
3424 switch (ch) {
3425 case 1 ... 5:
3426 return 0;
3427 case 6 ... 8:
3428 return TSSI_EXTRA_GROUP(0);
3429 case 9 ... 13:
3430 return 1;
3431 case 14 ... 16:
3432 return TSSI_EXTRA_GROUP(1);
3433 case 17 ... 21:
3434 return 2;
3435 case 22 ... 24:
3436 return TSSI_EXTRA_GROUP(2);
3437 case 25 ... 29:
3438 return 3;
3439 case 33 ... 37:
3440 return 4;
3441 case 38 ... 40:
3442 return TSSI_EXTRA_GROUP(4);
3443 case 41 ... 45:
3444 return 5;
3445 case 46 ... 48:
3446 return TSSI_EXTRA_GROUP(5);
3447 case 49 ... 53:
3448 return 6;
3449 case 54 ... 56:
3450 return TSSI_EXTRA_GROUP(6);
3451 case 57 ... 61:
3452 return 7;
3453 case 65 ... 69:
3454 return 8;
3455 case 70 ... 72:
3456 return TSSI_EXTRA_GROUP(8);
3457 case 73 ... 77:
3458 return 9;
3459 case 78 ... 80:
3460 return TSSI_EXTRA_GROUP(9);
3461 case 81 ... 85:
3462 return 10;
3463 case 86 ... 88:
3464 return TSSI_EXTRA_GROUP(10);
3465 case 89 ... 93:
3466 return 11;
3467 case 97 ... 101:
3468 return 12;
3469 case 102 ... 104:
3470 return TSSI_EXTRA_GROUP(12);
3471 case 105 ... 109:
3472 return 13;
3473 case 110 ... 112:
3474 return TSSI_EXTRA_GROUP(13);
3475 case 113 ... 117:
3476 return 14;
3477 case 118 ... 120:
3478 return TSSI_EXTRA_GROUP(14);
3479 case 121 ... 125:
3480 return 15;
3481 case 129 ... 133:
3482 return 16;
3483 case 134 ... 136:
3484 return TSSI_EXTRA_GROUP(16);
3485 case 137 ... 141:
3486 return 17;
3487 case 142 ... 144:
3488 return TSSI_EXTRA_GROUP(17);
3489 case 145 ... 149:
3490 return 18;
3491 case 150 ... 152:
3492 return TSSI_EXTRA_GROUP(18);
3493 case 153 ... 157:
3494 return 19;
3495 case 161 ... 165:
3496 return 20;
3497 case 166 ... 168:
3498 return TSSI_EXTRA_GROUP(20);
3499 case 169 ... 173:
3500 return 21;
3501 case 174 ... 176:
3502 return TSSI_EXTRA_GROUP(21);
3503 case 177 ... 181:
3504 return 22;
3505 case 182 ... 184:
3506 return TSSI_EXTRA_GROUP(22);
3507 case 185 ... 189:
3508 return 23;
3509 case 193 ... 197:
3510 return 24;
3511 case 198 ... 200:
3512 return TSSI_EXTRA_GROUP(24);
3513 case 201 ... 205:
3514 return 25;
3515 case 206 ... 208:
3516 return TSSI_EXTRA_GROUP(25);
3517 case 209 ... 213:
3518 return 26;
3519 case 214 ... 216:
3520 return TSSI_EXTRA_GROUP(26);
3521 case 217 ... 221:
3522 return 27;
3523 case 225 ... 229:
3524 return 28;
3525 case 230 ... 232:
3526 return TSSI_EXTRA_GROUP(28);
3527 case 233 ... 237:
3528 return 29;
3529 case 238 ... 240:
3530 return TSSI_EXTRA_GROUP(29);
3531 case 241 ... 245:
3532 return 30;
3533 case 246 ... 248:
3534 return TSSI_EXTRA_GROUP(30);
3535 case 249 ... 253:
3536 return 31;
3537 }
3538
3539 return 0;
3540 }
3541
_tssi_get_trim_group(struct rtw89_dev * rtwdev,u8 ch)3542 static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
3543 {
3544 switch (ch) {
3545 case 1 ... 8:
3546 return 0;
3547 case 9 ... 14:
3548 return 1;
3549 case 36 ... 48:
3550 return 2;
3551 case 49 ... 51:
3552 return TSSI_EXTRA_GROUP(2);
3553 case 52 ... 64:
3554 return 3;
3555 case 100 ... 112:
3556 return 4;
3557 case 113 ... 115:
3558 return TSSI_EXTRA_GROUP(4);
3559 case 116 ... 128:
3560 return 5;
3561 case 132 ... 144:
3562 return 6;
3563 case 149 ... 177:
3564 return 7;
3565 }
3566
3567 return 0;
3568 }
3569
_tssi_get_6g_trim_group(struct rtw89_dev * rtwdev,u8 ch)3570 static u32 _tssi_get_6g_trim_group(struct rtw89_dev *rtwdev, u8 ch)
3571 {
3572 switch (ch) {
3573 case 1 ... 13:
3574 return 0;
3575 case 14 ... 16:
3576 return TSSI_EXTRA_GROUP(0);
3577 case 17 ... 29:
3578 return 1;
3579 case 33 ... 45:
3580 return 2;
3581 case 46 ... 48:
3582 return TSSI_EXTRA_GROUP(2);
3583 case 49 ... 61:
3584 return 3;
3585 case 65 ... 77:
3586 return 4;
3587 case 78 ... 80:
3588 return TSSI_EXTRA_GROUP(4);
3589 case 81 ... 93:
3590 return 5;
3591 case 97 ... 109:
3592 return 6;
3593 case 110 ... 112:
3594 return TSSI_EXTRA_GROUP(6);
3595 case 113 ... 125:
3596 return 7;
3597 case 129 ... 141:
3598 return 8;
3599 case 142 ... 144:
3600 return TSSI_EXTRA_GROUP(8);
3601 case 145 ... 157:
3602 return 9;
3603 case 161 ... 173:
3604 return 10;
3605 case 174 ... 176:
3606 return TSSI_EXTRA_GROUP(10);
3607 case 177 ... 189:
3608 return 11;
3609 case 193 ... 205:
3610 return 12;
3611 case 206 ... 208:
3612 return TSSI_EXTRA_GROUP(12);
3613 case 209 ... 221:
3614 return 13;
3615 case 225 ... 237:
3616 return 14;
3617 case 238 ... 240:
3618 return TSSI_EXTRA_GROUP(14);
3619 case 241 ... 253:
3620 return 15;
3621 }
3622
3623 return 0;
3624 }
3625
_tssi_get_ofdm_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3626 static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3627 enum rtw89_rf_path path, const struct rtw89_chan *chan)
3628 {
3629 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3630 enum rtw89_band band = chan->band_type;
3631 u8 ch = chan->channel;
3632 u32 gidx, gidx_1st, gidx_2nd;
3633 s8 de_1st;
3634 s8 de_2nd;
3635 s8 val;
3636
3637 if (band == RTW89_BAND_2G || band == RTW89_BAND_5G) {
3638 gidx = _tssi_get_ofdm_group(rtwdev, ch);
3639
3640 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3641 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
3642 path, gidx);
3643
3644 if (IS_TSSI_EXTRA_GROUP(gidx)) {
3645 gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
3646 gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
3647 de_1st = tssi_info->tssi_mcs[path][gidx_1st];
3648 de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
3649 val = (de_1st + de_2nd) / 2;
3650
3651 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3652 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
3653 path, val, de_1st, de_2nd);
3654 } else {
3655 val = tssi_info->tssi_mcs[path][gidx];
3656
3657 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3658 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
3659 }
3660 } else {
3661 gidx = _tssi_get_6g_ofdm_group(rtwdev, ch);
3662
3663 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3664 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
3665 path, gidx);
3666
3667 if (IS_TSSI_EXTRA_GROUP(gidx)) {
3668 gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
3669 gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
3670 de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st];
3671 de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd];
3672 val = (de_1st + de_2nd) / 2;
3673
3674 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3675 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
3676 path, val, de_1st, de_2nd);
3677 } else {
3678 val = tssi_info->tssi_6g_mcs[path][gidx];
3679
3680 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3681 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
3682 }
3683 }
3684
3685 return val;
3686 }
3687
_tssi_get_ofdm_trim_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3688 static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
3689 enum rtw89_phy_idx phy,
3690 enum rtw89_rf_path path, const struct rtw89_chan *chan)
3691 {
3692 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3693 enum rtw89_band band = chan->band_type;
3694 u8 ch = chan->channel;
3695 u32 tgidx, tgidx_1st, tgidx_2nd;
3696 s8 tde_1st = 0;
3697 s8 tde_2nd = 0;
3698 s8 val;
3699
3700 if (band == RTW89_BAND_2G || band == RTW89_BAND_5G) {
3701 tgidx = _tssi_get_trim_group(rtwdev, ch);
3702
3703 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3704 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
3705 path, tgidx);
3706
3707 if (IS_TSSI_EXTRA_GROUP(tgidx)) {
3708 tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
3709 tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
3710 tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
3711 tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
3712 val = (tde_1st + tde_2nd) / 2;
3713
3714 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3715 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
3716 path, val, tde_1st, tde_2nd);
3717 } else {
3718 val = tssi_info->tssi_trim[path][tgidx];
3719
3720 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3721 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
3722 path, val);
3723 }
3724 } else {
3725 tgidx = _tssi_get_6g_trim_group(rtwdev, ch);
3726
3727 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3728 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
3729 path, tgidx);
3730
3731 if (IS_TSSI_EXTRA_GROUP(tgidx)) {
3732 tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
3733 tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
3734 tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st];
3735 tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd];
3736 val = (tde_1st + tde_2nd) / 2;
3737
3738 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3739 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
3740 path, val, tde_1st, tde_2nd);
3741 } else {
3742 val = tssi_info->tssi_trim_6g[path][tgidx];
3743
3744 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3745 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
3746 path, val);
3747 }
3748 }
3749
3750 return val;
3751 }
3752
_tssi_set_efuse_to_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const struct rtw89_chan * chan)3753 static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
3754 enum rtw89_phy_idx phy, const struct rtw89_chan *chan)
3755 {
3756 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3757 u8 ch = chan->channel;
3758 u8 gidx;
3759 s8 ofdm_de;
3760 s8 trim_de;
3761 s32 val;
3762 u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
3763
3764 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
3765 phy, ch);
3766
3767 if (rtwdev->dbcc_en) {
3768 if (phy == RTW89_PHY_0) {
3769 path = RF_PATH_A;
3770 path_max = RF_PATH_B;
3771 } else if (phy == RTW89_PHY_1) {
3772 path = RF_PATH_B;
3773 path_max = RF_PATH_NUM_8852C;
3774 }
3775 }
3776
3777 for (i = path; i < path_max; i++) {
3778 gidx = _tssi_get_cck_group(rtwdev, ch);
3779 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
3780 val = tssi_info->tssi_cck[i][gidx] + trim_de;
3781
3782 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3783 "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
3784 i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
3785
3786 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val);
3787 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val);
3788
3789 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3790 "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
3791 _tssi_de_cck_long[i],
3792 rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
3793 _TSSI_DE_MASK));
3794
3795 ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
3796 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
3797 val = ofdm_de + trim_de;
3798
3799 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3800 "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
3801 i, ofdm_de, trim_de);
3802
3803 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val);
3804 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val);
3805 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val);
3806 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, val);
3807 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val);
3808 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val);
3809
3810 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3811 "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
3812 _tssi_de_mcs_20m[i],
3813 rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i],
3814 _TSSI_DE_MASK));
3815 }
3816 }
3817
rtw8852c_tssi_cont_en(struct rtw89_dev * rtwdev,bool en,enum rtw89_rf_path path,const struct rtw89_chan * chan)3818 static void rtw8852c_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
3819 enum rtw89_rf_path path, const struct rtw89_chan *chan)
3820 {
3821 static const u32 tssi_trk[2] = {0x5818, 0x7818};
3822 static const u32 tssi_en[2] = {0x5820, 0x7820};
3823
3824 if (en) {
3825 rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x0);
3826 rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x0);
3827 if (rtwdev->dbcc_en && path == RF_PATH_B)
3828 _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_1, chan);
3829 else
3830 _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_0, chan);
3831 } else {
3832 rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x1);
3833 rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x1);
3834 }
3835 }
3836
rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev * rtwdev,bool en,u8 phy_idx,const struct rtw89_chan * chan)3837 void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx,
3838 const struct rtw89_chan *chan)
3839 {
3840 if (!rtwdev->dbcc_en) {
3841 rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A, chan);
3842 rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B, chan);
3843 } else {
3844 if (phy_idx == RTW89_PHY_0)
3845 rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A, chan);
3846 else
3847 rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B, chan);
3848 }
3849 }
3850
_bw_setting(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,enum rtw89_bandwidth bw,bool is_dav)3851 static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
3852 enum rtw89_bandwidth bw, bool is_dav)
3853 {
3854 u32 rf_reg18;
3855 u32 reg_reg18_addr;
3856
3857 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
3858 if (is_dav)
3859 reg_reg18_addr = RR_CFGCH;
3860 else
3861 reg_reg18_addr = RR_CFGCH_V1;
3862
3863 rf_reg18 = rtw89_read_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK);
3864 rf_reg18 &= ~RR_CFGCH_BW;
3865
3866 switch (bw) {
3867 case RTW89_CHANNEL_WIDTH_5:
3868 case RTW89_CHANNEL_WIDTH_10:
3869 case RTW89_CHANNEL_WIDTH_20:
3870 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M);
3871 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x3);
3872 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xf);
3873 break;
3874 case RTW89_CHANNEL_WIDTH_40:
3875 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M);
3876 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x3);
3877 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xf);
3878 break;
3879 case RTW89_CHANNEL_WIDTH_80:
3880 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M);
3881 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x2);
3882 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xd);
3883 break;
3884 case RTW89_CHANNEL_WIDTH_160:
3885 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_160M);
3886 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x1);
3887 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xb);
3888 break;
3889 default:
3890 break;
3891 }
3892
3893 rtw89_write_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK, rf_reg18);
3894 }
3895
_ctrl_bw(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_bandwidth bw)3896 static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3897 enum rtw89_bandwidth bw)
3898 {
3899 bool is_dav;
3900 u8 kpath, path;
3901 u32 tmp = 0;
3902
3903 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
3904 kpath = _kpath(rtwdev, phy);
3905
3906 for (path = 0; path < 2; path++) {
3907 if (!(kpath & BIT(path)))
3908 continue;
3909
3910 is_dav = true;
3911 _bw_setting(rtwdev, path, bw, is_dav);
3912 is_dav = false;
3913 _bw_setting(rtwdev, path, bw, is_dav);
3914 if (rtwdev->dbcc_en)
3915 continue;
3916
3917 if (path == RF_PATH_B && rtwdev->hal.cv == CHIP_CAV) {
3918 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
3919 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
3920 rtw89_write_rf(rtwdev, RF_PATH_B, RR_APK, RR_APK_MOD, 0x3);
3921 rtw89_write_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK, tmp);
3922 fsleep(100);
3923 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
3924 }
3925 }
3926 }
3927
_ch_setting(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 central_ch,enum rtw89_band band,bool is_dav)3928 static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
3929 u8 central_ch, enum rtw89_band band, bool is_dav)
3930 {
3931 u32 rf_reg18;
3932 u32 reg_reg18_addr;
3933
3934 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
3935 if (is_dav)
3936 reg_reg18_addr = 0x18;
3937 else
3938 reg_reg18_addr = 0x10018;
3939
3940 rf_reg18 = rtw89_read_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK);
3941 rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_BAND0 | RR_CFGCH_CH);
3942 rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch);
3943
3944 switch (band) {
3945 case RTW89_BAND_2G:
3946 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_2G);
3947 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_2G);
3948 break;
3949 case RTW89_BAND_5G:
3950 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G);
3951 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G);
3952 break;
3953 case RTW89_BAND_6G:
3954 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_6G);
3955 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_6G);
3956 break;
3957 default:
3958 break;
3959 }
3960 rtw89_write_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK, rf_reg18);
3961 fsleep(100);
3962 }
3963
_ctrl_ch(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 central_ch,enum rtw89_band band)3964 static void _ctrl_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3965 u8 central_ch, enum rtw89_band band)
3966 {
3967 u8 kpath, path;
3968
3969 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
3970 if (band != RTW89_BAND_6G) {
3971 if ((central_ch > 14 && central_ch < 36) ||
3972 (central_ch > 64 && central_ch < 100) ||
3973 (central_ch > 144 && central_ch < 149) || central_ch > 177)
3974 return;
3975 } else {
3976 if (central_ch > 253 || central_ch == 2)
3977 return;
3978 }
3979
3980 kpath = _kpath(rtwdev, phy);
3981
3982 for (path = 0; path < 2; path++) {
3983 if (kpath & BIT(path)) {
3984 _ch_setting(rtwdev, path, central_ch, band, true);
3985 _ch_setting(rtwdev, path, central_ch, band, false);
3986 }
3987 }
3988 }
3989
_rxbb_bw(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_bandwidth bw)3990 static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3991 enum rtw89_bandwidth bw)
3992 {
3993 u8 kpath;
3994 u8 path;
3995 u32 val;
3996
3997 kpath = _kpath(rtwdev, phy);
3998 for (path = 0; path < 2; path++) {
3999 if (!(kpath & BIT(path)))
4000 continue;
4001
4002 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1);
4003 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0xa);
4004 switch (bw) {
4005 case RTW89_CHANNEL_WIDTH_20:
4006 val = 0x1b;
4007 break;
4008 case RTW89_CHANNEL_WIDTH_40:
4009 val = 0x13;
4010 break;
4011 case RTW89_CHANNEL_WIDTH_80:
4012 val = 0xb;
4013 break;
4014 case RTW89_CHANNEL_WIDTH_160:
4015 default:
4016 val = 0x3;
4017 break;
4018 }
4019 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, val);
4020 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0);
4021 }
4022 }
4023
_lck_keep_thermal(struct rtw89_dev * rtwdev)4024 static void _lck_keep_thermal(struct rtw89_dev *rtwdev)
4025 {
4026 struct rtw89_lck_info *lck = &rtwdev->lck;
4027 int path;
4028
4029 for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
4030 lck->thermal[path] =
4031 ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
4032 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
4033 "[LCK] path=%d thermal=0x%x", path, lck->thermal[path]);
4034 }
4035 }
4036
_lck(struct rtw89_dev * rtwdev)4037 static void _lck(struct rtw89_dev *rtwdev)
4038 {
4039 u32 tmp18[2];
4040 int path = rtwdev->dbcc_en ? 2 : 1;
4041 int i;
4042
4043 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "[LCK] DO LCK\n");
4044
4045 tmp18[0] = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
4046 tmp18[1] = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK);
4047
4048 for (i = 0; i < path; i++) {
4049 rtw89_write_rf(rtwdev, i, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
4050 rtw89_write_rf(rtwdev, i, RR_CFGCH, RFREG_MASK, tmp18[i]);
4051 rtw89_write_rf(rtwdev, i, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
4052 }
4053
4054 _lck_keep_thermal(rtwdev);
4055 }
4056
4057 #define RTW8852C_LCK_TH 8
4058
rtw8852c_lck_track(struct rtw89_dev * rtwdev)4059 void rtw8852c_lck_track(struct rtw89_dev *rtwdev)
4060 {
4061 struct rtw89_lck_info *lck = &rtwdev->lck;
4062 u8 cur_thermal;
4063 int delta;
4064 int path;
4065
4066 for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
4067 cur_thermal =
4068 ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
4069 delta = abs((int)cur_thermal - lck->thermal[path]);
4070
4071 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
4072 "[LCK] path=%d current thermal=0x%x delta=0x%x\n",
4073 path, cur_thermal, delta);
4074
4075 if (delta >= RTW8852C_LCK_TH) {
4076 _lck(rtwdev);
4077 return;
4078 }
4079 }
4080 }
4081
rtw8852c_lck_init(struct rtw89_dev * rtwdev)4082 void rtw8852c_lck_init(struct rtw89_dev *rtwdev)
4083 {
4084 _lck_keep_thermal(rtwdev);
4085 }
4086
4087 static
rtw8852c_ctrl_bw_ch(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 central_ch,enum rtw89_band band,enum rtw89_bandwidth bw)4088 void rtw8852c_ctrl_bw_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
4089 u8 central_ch, enum rtw89_band band,
4090 enum rtw89_bandwidth bw)
4091 {
4092 _ctrl_ch(rtwdev, phy, central_ch, band);
4093 _ctrl_bw(rtwdev, phy, bw);
4094 _rxbb_bw(rtwdev, phy, bw);
4095 }
4096
rtw8852c_set_channel_rf(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_phy_idx phy_idx)4097 void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
4098 const struct rtw89_chan *chan,
4099 enum rtw89_phy_idx phy_idx)
4100 {
4101 rtw8852c_ctrl_bw_ch(rtwdev, phy_idx, chan->channel,
4102 chan->band_type,
4103 chan->band_width);
4104 }
4105
rtw8852c_mcc_get_ch_info(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)4106 void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
4107 {
4108 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
4109 struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V0] = {};
4110 const struct rtw89_chan *chan;
4111 enum rtw89_entity_mode mode;
4112 u8 chan_idx;
4113 u8 idx;
4114
4115 mode = rtw89_get_entity_mode(rtwdev);
4116 switch (mode) {
4117 case RTW89_ENTITY_MODE_MCC_PREPARE:
4118 chan_idx = RTW89_CHANCTX_1;
4119 break;
4120 default:
4121 chan_idx = RTW89_CHANCTX_0;
4122 break;
4123 }
4124
4125 chan = rtw89_chan_get(rtwdev, chan_idx);
4126
4127 for (idx = 0; idx < ARRAY_SIZE(desc); idx++) {
4128 struct rtw89_rfk_chan_desc *p = &desc[idx];
4129
4130 p->ch = rfk_mcc->ch[idx];
4131
4132 p->has_band = true;
4133 p->band = rfk_mcc->band[idx];
4134 }
4135
4136 idx = rtw89_rfk_chan_lookup(rtwdev, desc, ARRAY_SIZE(desc), chan);
4137
4138 rfk_mcc->ch[idx] = chan->channel;
4139 rfk_mcc->band[idx] = chan->band_type;
4140 rfk_mcc->table_idx = idx;
4141 }
4142
rtw8852c_rck(struct rtw89_dev * rtwdev)4143 void rtw8852c_rck(struct rtw89_dev *rtwdev)
4144 {
4145 u8 path;
4146
4147 for (path = 0; path < 2; path++)
4148 _rck(rtwdev, path);
4149 }
4150
rtw8852c_dack(struct rtw89_dev * rtwdev,enum rtw89_chanctx_idx chanctx_idx)4151 void rtw8852c_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx)
4152 {
4153 u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
4154
4155 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
4156 _dac_cal(rtwdev, false, chanctx_idx);
4157 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
4158 }
4159
rtw8852c_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,enum rtw89_chanctx_idx chanctx_idx)4160 void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
4161 enum rtw89_chanctx_idx chanctx_idx)
4162 {
4163 u32 tx_en;
4164 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
4165
4166 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
4167 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
4168 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
4169
4170 _iqk_init(rtwdev);
4171 _iqk(rtwdev, phy_idx, false, chanctx_idx);
4172
4173 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
4174 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
4175 }
4176
4177 #define RXDCK_VER_8852C 0xe
4178
_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool is_afe,u8 retry_limit)4179 static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
4180 bool is_afe, u8 retry_limit)
4181 {
4182 struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
4183 u8 path, kpath;
4184 u32 rf_reg5;
4185 bool is_fail;
4186 u8 rek_cnt;
4187
4188 kpath = _kpath(rtwdev, phy);
4189 rtw89_debug(rtwdev, RTW89_DBG_RFK,
4190 "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n",
4191 RXDCK_VER_8852C, rtwdev->hal.cv);
4192
4193 for (path = 0; path < 2; path++) {
4194 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
4195 if (!(kpath & BIT(path)))
4196 continue;
4197
4198 if (rtwdev->is_tssi_mode[path])
4199 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
4200 B_P0_TSSI_TRK_EN, 0x1);
4201 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
4202 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
4203 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_LO_SEL, rtwdev->dbcc_en);
4204
4205 for (rek_cnt = 0; rek_cnt < retry_limit; rek_cnt++) {
4206 _set_rx_dck(rtwdev, phy, path, is_afe);
4207
4208 /* To reduce IO of dck_rek_check(), the last try is seen
4209 * as failure always, and then do recovery procedure.
4210 */
4211 if (rek_cnt == retry_limit - 1) {
4212 _rx_dck_recover(rtwdev, path);
4213 break;
4214 }
4215
4216 is_fail = _rx_dck_rek_check(rtwdev, path);
4217 if (!is_fail)
4218 break;
4219 }
4220
4221 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] rek_cnt[%d]=%d",
4222 path, rek_cnt);
4223
4224 rx_dck->thermal[path] = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
4225 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
4226
4227 if (rtwdev->is_tssi_mode[path])
4228 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
4229 B_P0_TSSI_TRK_EN, 0x0);
4230 }
4231 }
4232
rtw8852c_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool is_afe)4233 void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe)
4234 {
4235 _rx_dck(rtwdev, phy, is_afe, 1);
4236 }
4237
4238 #define RTW8852C_RX_DCK_TH 12
4239
rtw8852c_rx_dck_track(struct rtw89_dev * rtwdev)4240 void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev)
4241 {
4242 enum rtw89_chanctx_idx chanctx_idx = RTW89_CHANCTX_0;
4243 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
4244 struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
4245 enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
4246 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
4247 u8 dck_channel;
4248 u8 cur_thermal;
4249 u32 tx_en;
4250 int delta;
4251 int path;
4252
4253 if (chan->band_type == RTW89_BAND_2G)
4254 return;
4255
4256 if (rtwdev->scanning)
4257 return;
4258
4259 for (path = 0; path < RF_PATH_NUM_8852C; path++) {
4260 cur_thermal =
4261 ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
4262 delta = abs((int)cur_thermal - rx_dck->thermal[path]);
4263
4264 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
4265 "[RX_DCK] path=%d current thermal=0x%x delta=0x%x\n",
4266 path, cur_thermal, delta);
4267
4268 if (delta >= RTW8852C_RX_DCK_TH)
4269 goto trigger_rx_dck;
4270 }
4271
4272 return;
4273
4274 trigger_rx_dck:
4275 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
4276 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
4277
4278 for (path = 0; path < RF_PATH_NUM_8852C; path++) {
4279 dck_channel = _rx_dck_channel_calc(rtwdev, chan);
4280 _ctrl_ch(rtwdev, RTW89_PHY_0, dck_channel, chan->band_type);
4281 }
4282
4283 _rx_dck(rtwdev, RTW89_PHY_0, false, 20);
4284
4285 for (path = 0; path < RF_PATH_NUM_8852C; path++)
4286 _ctrl_ch(rtwdev, RTW89_PHY_0, chan->channel, chan->band_type);
4287
4288 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
4289 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
4290 }
4291
rtw8852c_dpk_init(struct rtw89_dev * rtwdev)4292 void rtw8852c_dpk_init(struct rtw89_dev *rtwdev)
4293 {
4294 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
4295
4296 dpk->is_dpk_enable = true;
4297 dpk->is_dpk_reload_en = false;
4298 }
4299
rtw8852c_dpk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,enum rtw89_chanctx_idx chanctx_idx)4300 void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
4301 enum rtw89_chanctx_idx chanctx_idx)
4302 {
4303 u32 tx_en;
4304 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
4305
4306 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
4307 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
4308 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
4309
4310 _dpk(rtwdev, phy_idx, false, chanctx_idx);
4311
4312 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
4313 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
4314 }
4315
rtw8852c_dpk_track(struct rtw89_dev * rtwdev)4316 void rtw8852c_dpk_track(struct rtw89_dev *rtwdev)
4317 {
4318 _dpk_track(rtwdev);
4319 }
4320
rtw8852c_tssi(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_chanctx_idx chanctx_idx)4321 void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
4322 enum rtw89_chanctx_idx chanctx_idx)
4323 {
4324 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
4325 u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
4326
4327 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
4328
4329 if (rtwdev->dbcc_en) {
4330 if (phy == RTW89_PHY_0) {
4331 path = RF_PATH_A;
4332 path_max = RF_PATH_B;
4333 } else if (phy == RTW89_PHY_1) {
4334 path = RF_PATH_B;
4335 path_max = RF_PATH_NUM_8852C;
4336 }
4337 }
4338
4339 _tssi_disable(rtwdev, phy);
4340
4341 for (i = path; i < path_max; i++) {
4342 _tssi_set_sys(rtwdev, phy, i, chan);
4343 _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
4344 _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
4345 _tssi_set_dck(rtwdev, phy, i, chan);
4346 _tssi_set_bbgain_split(rtwdev, phy, i);
4347 _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
4348 _tssi_slope_cal_org(rtwdev, phy, i, chan);
4349 _tssi_set_aligk_default(rtwdev, phy, i, chan);
4350 _tssi_set_slope(rtwdev, phy, i);
4351 _tssi_run_slope(rtwdev, phy, i);
4352 }
4353
4354 _tssi_enable(rtwdev, phy);
4355 _tssi_set_efuse_to_de(rtwdev, phy, chan);
4356 }
4357
rtw8852c_tssi_scan(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const struct rtw89_chan * chan)4358 void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
4359 const struct rtw89_chan *chan)
4360 {
4361 u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
4362
4363 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
4364 __func__, phy);
4365
4366 if (!rtwdev->is_tssi_mode[RF_PATH_A])
4367 return;
4368 if (!rtwdev->is_tssi_mode[RF_PATH_B])
4369 return;
4370
4371 if (rtwdev->dbcc_en) {
4372 if (phy == RTW89_PHY_0) {
4373 path = RF_PATH_A;
4374 path_max = RF_PATH_B;
4375 } else if (phy == RTW89_PHY_1) {
4376 path = RF_PATH_B;
4377 path_max = RF_PATH_NUM_8852C;
4378 }
4379 }
4380
4381 _tssi_disable(rtwdev, phy);
4382
4383 for (i = path; i < path_max; i++) {
4384 _tssi_set_sys(rtwdev, phy, i, chan);
4385 _tssi_set_dck(rtwdev, phy, i, chan);
4386 _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
4387 _tssi_slope_cal_org(rtwdev, phy, i, chan);
4388 _tssi_set_aligk_default(rtwdev, phy, i, chan);
4389 }
4390
4391 _tssi_enable(rtwdev, phy);
4392 _tssi_set_efuse_to_de(rtwdev, phy, chan);
4393 }
4394
rtw8852c_tssi_default_txagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool enable)4395 static void rtw8852c_tssi_default_txagc(struct rtw89_dev *rtwdev,
4396 enum rtw89_phy_idx phy, bool enable)
4397 {
4398 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
4399 u8 i;
4400
4401 if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
4402 return;
4403
4404 if (enable) {
4405 /* SCAN_START */
4406 if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0xc000 &&
4407 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0x0) {
4408 for (i = 0; i < 6; i++) {
4409 tssi_info->default_txagc_offset[RF_PATH_A] =
4410 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB,
4411 B_TXAGC_BB);
4412 if (tssi_info->default_txagc_offset[RF_PATH_A])
4413 break;
4414 }
4415 }
4416
4417 if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0xc000 &&
4418 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0x0) {
4419 for (i = 0; i < 6; i++) {
4420 tssi_info->default_txagc_offset[RF_PATH_B] =
4421 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1,
4422 B_TXAGC_BB_S1);
4423 if (tssi_info->default_txagc_offset[RF_PATH_B])
4424 break;
4425 }
4426 }
4427 } else {
4428 /* SCAN_END */
4429 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT,
4430 tssi_info->default_txagc_offset[RF_PATH_A]);
4431 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT,
4432 tssi_info->default_txagc_offset[RF_PATH_B]);
4433
4434 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
4435 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
4436
4437 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
4438 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
4439 }
4440 }
4441
rtw8852c_wifi_scan_notify(struct rtw89_dev * rtwdev,bool scan_start,enum rtw89_phy_idx phy_idx)4442 void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev,
4443 bool scan_start, enum rtw89_phy_idx phy_idx)
4444 {
4445 if (scan_start)
4446 rtw8852c_tssi_default_txagc(rtwdev, phy_idx, true);
4447 else
4448 rtw8852c_tssi_default_txagc(rtwdev, phy_idx, false);
4449 }
4450
rtw8852c_rfk_chanctx_cb(struct rtw89_dev * rtwdev,enum rtw89_chanctx_state state)4451 void rtw8852c_rfk_chanctx_cb(struct rtw89_dev *rtwdev,
4452 enum rtw89_chanctx_state state)
4453 {
4454 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
4455 u8 path;
4456
4457 switch (state) {
4458 case RTW89_CHANCTX_STATE_MCC_START:
4459 dpk->is_dpk_enable = false;
4460 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++)
4461 _dpk_onoff(rtwdev, path, false);
4462 break;
4463 case RTW89_CHANCTX_STATE_MCC_STOP:
4464 dpk->is_dpk_enable = true;
4465 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++)
4466 _dpk_onoff(rtwdev, path, false);
4467 rtw8852c_dpk(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
4468 break;
4469 default:
4470 break;
4471 }
4472 }
4473