xref: /linux/drivers/net/wireless/realtek/rtw89/phy.c (revision d30c1683aaecb93d2ab95685dc4300a33d3cea7a)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020  Realtek Corporation
3  */
4 
5 #include "acpi.h"
6 #include "chan.h"
7 #include "coex.h"
8 #include "debug.h"
9 #include "fw.h"
10 #include "mac.h"
11 #include "phy.h"
12 #include "ps.h"
13 #include "reg.h"
14 #include "sar.h"
15 #include "txrx.h"
16 #include "util.h"
17 
18 static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr)
19 {
20 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
21 
22 	return phy->phy0_phy1_offset(rtwdev, addr);
23 }
24 
25 static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev,
26 			     const struct rtw89_ra_report *report)
27 {
28 	u32 bit_rate = report->bit_rate;
29 
30 	/* lower than ofdm, do not aggregate */
31 	if (bit_rate < 550)
32 		return 1;
33 
34 	/* avoid AMSDU for legacy rate */
35 	if (report->might_fallback_legacy)
36 		return 1;
37 
38 	/* lower than 20M vht 2ss mcs8, make it small */
39 	if (bit_rate < 1800)
40 		return 1200;
41 
42 	/* lower than 40M vht 2ss mcs9, make it medium */
43 	if (bit_rate < 4000)
44 		return 2600;
45 
46 	/* not yet 80M vht 2ss mcs8/9, make it twice regular packet size */
47 	if (bit_rate < 7000)
48 		return 3500;
49 
50 	return rtwdev->chip->max_amsdu_limit;
51 }
52 
53 static u64 get_mcs_ra_mask(u16 mcs_map, u8 highest_mcs, u8 gap)
54 {
55 	u64 ra_mask = 0;
56 	u8 mcs_cap;
57 	int i, nss;
58 
59 	for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 12) {
60 		mcs_cap = mcs_map & 0x3;
61 		switch (mcs_cap) {
62 		case 2:
63 			ra_mask |= GENMASK_ULL(highest_mcs, 0) << nss;
64 			break;
65 		case 1:
66 			ra_mask |= GENMASK_ULL(highest_mcs - gap, 0) << nss;
67 			break;
68 		case 0:
69 			ra_mask |= GENMASK_ULL(highest_mcs - gap * 2, 0) << nss;
70 			break;
71 		default:
72 			break;
73 		}
74 	}
75 
76 	return ra_mask;
77 }
78 
79 static u64 get_he_ra_mask(struct ieee80211_link_sta *link_sta)
80 {
81 	struct ieee80211_sta_he_cap cap = link_sta->he_cap;
82 	u16 mcs_map;
83 
84 	switch (link_sta->bandwidth) {
85 	case IEEE80211_STA_RX_BW_160:
86 		if (cap.he_cap_elem.phy_cap_info[0] &
87 		    IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
88 			mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80p80);
89 		else
90 			mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_160);
91 		break;
92 	default:
93 		mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80);
94 	}
95 
96 	/* MCS11, MCS9, MCS7 */
97 	return get_mcs_ra_mask(mcs_map, 11, 2);
98 }
99 
100 static u64 get_eht_mcs_ra_mask(u8 *max_nss, u8 start_mcs, u8 n_nss)
101 {
102 	u64 nss_mcs_shift;
103 	u64 nss_mcs_val;
104 	u64 mask = 0;
105 	int i, j;
106 	u8 nss;
107 
108 	for (i = 0; i < n_nss; i++) {
109 		nss = u8_get_bits(max_nss[i], IEEE80211_EHT_MCS_NSS_RX);
110 		if (!nss)
111 			continue;
112 
113 		nss_mcs_val = GENMASK_ULL(start_mcs + i * 2, 0);
114 
115 		for (j = 0, nss_mcs_shift = 12; j < nss; j++, nss_mcs_shift += 16)
116 			mask |= nss_mcs_val << nss_mcs_shift;
117 	}
118 
119 	return mask;
120 }
121 
122 static u64 get_eht_ra_mask(struct rtw89_vif_link *rtwvif_link,
123 			   struct ieee80211_link_sta *link_sta)
124 {
125 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
126 	struct ieee80211_eht_mcs_nss_supp_20mhz_only *mcs_nss_20mhz;
127 	struct ieee80211_sta_eht_cap *eht_cap = &link_sta->eht_cap;
128 	struct ieee80211_eht_mcs_nss_supp_bw *mcs_nss;
129 	u8 *he_phy_cap = link_sta->he_cap.he_cap_elem.phy_cap_info;
130 
131 	switch (link_sta->bandwidth) {
132 	case IEEE80211_STA_RX_BW_320:
133 		mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._320;
134 		/* MCS 9, 11, 13 */
135 		return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3);
136 	case IEEE80211_STA_RX_BW_160:
137 		mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._160;
138 		/* MCS 9, 11, 13 */
139 		return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3);
140 	case IEEE80211_STA_RX_BW_20:
141 		if (vif->type == NL80211_IFTYPE_AP &&
142 		    !(he_phy_cap[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
143 			mcs_nss_20mhz = &eht_cap->eht_mcs_nss_supp.only_20mhz;
144 			/* MCS 7, 9, 11, 13 */
145 			return get_eht_mcs_ra_mask(mcs_nss_20mhz->rx_tx_max_nss, 7, 4);
146 		}
147 		fallthrough;
148 	case IEEE80211_STA_RX_BW_80:
149 	default:
150 		mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._80;
151 		/* MCS 9, 11, 13 */
152 		return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3);
153 	}
154 }
155 
156 #define RA_FLOOR_TABLE_SIZE	7
157 #define RA_FLOOR_UP_GAP		3
158 static u64 rtw89_phy_ra_mask_rssi(struct rtw89_dev *rtwdev, u8 rssi,
159 				  u8 ratr_state)
160 {
161 	u8 rssi_lv_t[RA_FLOOR_TABLE_SIZE] = {30, 44, 48, 52, 56, 60, 100};
162 	u8 rssi_lv = 0;
163 	u8 i;
164 
165 	rssi >>= 1;
166 	for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) {
167 		if (i >= ratr_state)
168 			rssi_lv_t[i] += RA_FLOOR_UP_GAP;
169 		if (rssi < rssi_lv_t[i]) {
170 			rssi_lv = i;
171 			break;
172 		}
173 	}
174 	if (rssi_lv == 0)
175 		return 0xffffffffffffffffULL;
176 	else if (rssi_lv == 1)
177 		return 0xfffffffffffffff0ULL;
178 	else if (rssi_lv == 2)
179 		return 0xffffffffffffefe0ULL;
180 	else if (rssi_lv == 3)
181 		return 0xffffffffffffcfc0ULL;
182 	else if (rssi_lv == 4)
183 		return 0xffffffffffff8f80ULL;
184 	else if (rssi_lv >= 5)
185 		return 0xffffffffffff0f00ULL;
186 
187 	return 0xffffffffffffffffULL;
188 }
189 
190 static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak)
191 {
192 	if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0)
193 		ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
194 
195 	if (ra_mask == 0)
196 		ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
197 
198 	return ra_mask;
199 }
200 
201 static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev,
202 				 struct rtw89_sta_link *rtwsta_link,
203 				 struct ieee80211_link_sta *link_sta,
204 				 const struct rtw89_chan *chan)
205 {
206 	struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask;
207 	enum nl80211_band band;
208 	u64 cfg_mask;
209 
210 	if (!rtwsta_link->use_cfg_mask)
211 		return -1;
212 
213 	switch (chan->band_type) {
214 	case RTW89_BAND_2G:
215 		band = NL80211_BAND_2GHZ;
216 		cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_2GHZ].legacy,
217 					   RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES);
218 		break;
219 	case RTW89_BAND_5G:
220 		band = NL80211_BAND_5GHZ;
221 		cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_5GHZ].legacy,
222 					   RA_MASK_OFDM_RATES);
223 		break;
224 	case RTW89_BAND_6G:
225 		band = NL80211_BAND_6GHZ;
226 		cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_6GHZ].legacy,
227 					   RA_MASK_OFDM_RATES);
228 		break;
229 	default:
230 		rtw89_warn(rtwdev, "unhandled band type %d\n", chan->band_type);
231 		return -1;
232 	}
233 
234 	if (link_sta->eht_cap.has_eht) {
235 		cfg_mask |= u64_encode_bits(mask->control[band].eht_mcs[0],
236 					    RA_MASK_EHT_1SS_RATES);
237 		cfg_mask |= u64_encode_bits(mask->control[band].eht_mcs[1],
238 					    RA_MASK_EHT_2SS_RATES);
239 	} else if (link_sta->he_cap.has_he) {
240 		cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[0],
241 					    RA_MASK_HE_1SS_RATES);
242 		cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[1],
243 					    RA_MASK_HE_2SS_RATES);
244 	} else if (link_sta->vht_cap.vht_supported) {
245 		cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0],
246 					    RA_MASK_VHT_1SS_RATES);
247 		cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1],
248 					    RA_MASK_VHT_2SS_RATES);
249 	} else if (link_sta->ht_cap.ht_supported) {
250 		cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0],
251 					    RA_MASK_HT_1SS_RATES);
252 		cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1],
253 					    RA_MASK_HT_2SS_RATES);
254 	}
255 
256 	return cfg_mask;
257 }
258 
259 static const u64
260 rtw89_ra_mask_ht_rates[4] = {RA_MASK_HT_1SS_RATES, RA_MASK_HT_2SS_RATES,
261 			     RA_MASK_HT_3SS_RATES, RA_MASK_HT_4SS_RATES};
262 static const u64
263 rtw89_ra_mask_vht_rates[4] = {RA_MASK_VHT_1SS_RATES, RA_MASK_VHT_2SS_RATES,
264 			      RA_MASK_VHT_3SS_RATES, RA_MASK_VHT_4SS_RATES};
265 static const u64
266 rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES,
267 			     RA_MASK_HE_3SS_RATES, RA_MASK_HE_4SS_RATES};
268 static const u64
269 rtw89_ra_mask_eht_rates[4] = {RA_MASK_EHT_1SS_RATES, RA_MASK_EHT_2SS_RATES,
270 			      RA_MASK_EHT_3SS_RATES, RA_MASK_EHT_4SS_RATES};
271 static const u64
272 rtw89_ra_mask_eht_mcs0_11[4] = {RA_MASK_EHT_1SS_MCS0_11, RA_MASK_EHT_2SS_MCS0_11,
273 				RA_MASK_EHT_3SS_MCS0_11, RA_MASK_EHT_4SS_MCS0_11};
274 
275 static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
276 				struct rtw89_sta_link *rtwsta_link,
277 				struct ieee80211_link_sta *link_sta,
278 				const struct rtw89_chan *chan,
279 				bool *fix_giltf_en, u8 *fix_giltf)
280 {
281 	struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask;
282 	u8 band = chan->band_type;
283 	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
284 	u8 he_ltf = mask->control[nl_band].he_ltf;
285 	u8 he_gi = mask->control[nl_band].he_gi;
286 
287 	*fix_giltf_en = true;
288 
289 	if (rtwdev->chip->chip_id == RTL8852C &&
290 	    chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
291 	    rtw89_sta_link_has_su_mu_4xhe08(link_sta))
292 		*fix_giltf = RTW89_GILTF_SGI_4XHE08;
293 	else
294 		*fix_giltf = RTW89_GILTF_2XHE08;
295 
296 	if (!(rtwsta_link->use_cfg_mask && link_sta->he_cap.has_he))
297 		return;
298 
299 	if (he_ltf == 2 && he_gi == 2) {
300 		*fix_giltf = RTW89_GILTF_LGI_4XHE32;
301 	} else if (he_ltf == 2 && he_gi == 0) {
302 		*fix_giltf = RTW89_GILTF_SGI_4XHE08;
303 	} else if (he_ltf == 1 && he_gi == 1) {
304 		*fix_giltf = RTW89_GILTF_2XHE16;
305 	} else if (he_ltf == 1 && he_gi == 0) {
306 		*fix_giltf = RTW89_GILTF_2XHE08;
307 	} else if (he_ltf == 0 && he_gi == 1) {
308 		*fix_giltf = RTW89_GILTF_1XHE16;
309 	} else if (he_ltf == 0 && he_gi == 0) {
310 		*fix_giltf = RTW89_GILTF_1XHE08;
311 	}
312 }
313 
314 static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
315 				    struct rtw89_vif_link *rtwvif_link,
316 				    struct rtw89_sta_link *rtwsta_link,
317 				    struct ieee80211_link_sta *link_sta,
318 				    bool p2p, bool csi)
319 {
320 	struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif_link->rate_pattern;
321 	struct rtw89_ra_info *ra = &rtwsta_link->ra;
322 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
323 						       rtwvif_link->chanctx_idx);
324 	const u64 *high_rate_masks = rtw89_ra_mask_ht_rates;
325 	u8 rssi = ewma_rssi_read(&rtwsta_link->avg_rssi);
326 	u64 ra_mask = 0;
327 	u64 ra_mask_bak;
328 	u8 mode = 0;
329 	u8 csi_mode = RTW89_RA_RPT_MODE_LEGACY;
330 	u8 bw_mode = 0;
331 	u8 stbc_en = 0;
332 	u8 ldpc_en = 0;
333 	u8 fix_giltf = 0;
334 	u8 i;
335 	bool sgi = false;
336 	bool fix_giltf_en = false;
337 
338 	memset(ra, 0, sizeof(*ra));
339 	/* Set the ra mask from sta's capability */
340 	if (link_sta->eht_cap.has_eht) {
341 		mode |= RTW89_RA_MODE_EHT;
342 		ra_mask |= get_eht_ra_mask(rtwvif_link, link_sta);
343 
344 		if (rtwdev->hal.no_mcs_12_13)
345 			high_rate_masks = rtw89_ra_mask_eht_mcs0_11;
346 		else
347 			high_rate_masks = rtw89_ra_mask_eht_rates;
348 
349 		rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta,
350 				    chan, &fix_giltf_en, &fix_giltf);
351 	} else if (link_sta->he_cap.has_he) {
352 		mode |= RTW89_RA_MODE_HE;
353 		csi_mode = RTW89_RA_RPT_MODE_HE;
354 		ra_mask |= get_he_ra_mask(link_sta);
355 		high_rate_masks = rtw89_ra_mask_he_rates;
356 		if (link_sta->he_cap.he_cap_elem.phy_cap_info[2] &
357 		    IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
358 			stbc_en = 1;
359 		if (link_sta->he_cap.he_cap_elem.phy_cap_info[1] &
360 		    IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
361 			ldpc_en = 1;
362 		rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta,
363 				    chan, &fix_giltf_en, &fix_giltf);
364 	} else if (link_sta->vht_cap.vht_supported) {
365 		u16 mcs_map = le16_to_cpu(link_sta->vht_cap.vht_mcs.rx_mcs_map);
366 
367 		mode |= RTW89_RA_MODE_VHT;
368 		csi_mode = RTW89_RA_RPT_MODE_VHT;
369 		/* MCS9 (non-20MHz), MCS8, MCS7 */
370 		if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
371 			ra_mask |= get_mcs_ra_mask(mcs_map, 8, 1);
372 		else
373 			ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1);
374 		high_rate_masks = rtw89_ra_mask_vht_rates;
375 		if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
376 			stbc_en = 1;
377 		if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
378 			ldpc_en = 1;
379 	} else if (link_sta->ht_cap.ht_supported) {
380 		mode |= RTW89_RA_MODE_HT;
381 		csi_mode = RTW89_RA_RPT_MODE_HT;
382 		ra_mask |= ((u64)link_sta->ht_cap.mcs.rx_mask[3] << 48) |
383 			   ((u64)link_sta->ht_cap.mcs.rx_mask[2] << 36) |
384 			   ((u64)link_sta->ht_cap.mcs.rx_mask[1] << 24) |
385 			   ((u64)link_sta->ht_cap.mcs.rx_mask[0] << 12);
386 		high_rate_masks = rtw89_ra_mask_ht_rates;
387 		if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
388 			stbc_en = 1;
389 		if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
390 			ldpc_en = 1;
391 	}
392 
393 	switch (chan->band_type) {
394 	case RTW89_BAND_2G:
395 		ra_mask |= link_sta->supp_rates[NL80211_BAND_2GHZ];
396 		if (link_sta->supp_rates[NL80211_BAND_2GHZ] & 0xf)
397 			mode |= RTW89_RA_MODE_CCK;
398 		if (link_sta->supp_rates[NL80211_BAND_2GHZ] & 0xff0)
399 			mode |= RTW89_RA_MODE_OFDM;
400 		break;
401 	case RTW89_BAND_5G:
402 		ra_mask |= (u64)link_sta->supp_rates[NL80211_BAND_5GHZ] << 4;
403 		mode |= RTW89_RA_MODE_OFDM;
404 		break;
405 	case RTW89_BAND_6G:
406 		ra_mask |= (u64)link_sta->supp_rates[NL80211_BAND_6GHZ] << 4;
407 		mode |= RTW89_RA_MODE_OFDM;
408 		break;
409 	default:
410 		rtw89_err(rtwdev, "Unknown band type\n");
411 		break;
412 	}
413 
414 	ra_mask_bak = ra_mask;
415 
416 	if (mode >= RTW89_RA_MODE_HT) {
417 		u64 mask = 0;
418 		for (i = 0; i < rtwdev->hal.tx_nss; i++)
419 			mask |= high_rate_masks[i];
420 		if (mode & RTW89_RA_MODE_OFDM)
421 			mask |= RA_MASK_SUBOFDM_RATES;
422 		if (mode & RTW89_RA_MODE_CCK)
423 			mask |= RA_MASK_SUBCCK_RATES;
424 		ra_mask &= mask;
425 	} else if (mode & RTW89_RA_MODE_OFDM) {
426 		ra_mask &= (RA_MASK_OFDM_RATES | RA_MASK_SUBCCK_RATES);
427 	}
428 
429 	if (mode != RTW89_RA_MODE_CCK)
430 		ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0);
431 
432 	ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak);
433 	ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta_link, link_sta, chan);
434 
435 	switch (link_sta->bandwidth) {
436 	case IEEE80211_STA_RX_BW_160:
437 		bw_mode = RTW89_CHANNEL_WIDTH_160;
438 		sgi = link_sta->vht_cap.vht_supported &&
439 		      (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
440 		break;
441 	case IEEE80211_STA_RX_BW_80:
442 		bw_mode = RTW89_CHANNEL_WIDTH_80;
443 		sgi = link_sta->vht_cap.vht_supported &&
444 		      (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
445 		break;
446 	case IEEE80211_STA_RX_BW_40:
447 		bw_mode = RTW89_CHANNEL_WIDTH_40;
448 		sgi = link_sta->ht_cap.ht_supported &&
449 		      (link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
450 		break;
451 	default:
452 		bw_mode = RTW89_CHANNEL_WIDTH_20;
453 		sgi = link_sta->ht_cap.ht_supported &&
454 		      (link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
455 		break;
456 	}
457 
458 	if (link_sta->he_cap.he_cap_elem.phy_cap_info[3] &
459 	    IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM)
460 		ra->dcm_cap = 1;
461 
462 	if (rate_pattern->enable && !p2p) {
463 		ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta_link, link_sta, chan);
464 		ra_mask &= rate_pattern->ra_mask;
465 		mode = rate_pattern->ra_mode;
466 	}
467 
468 	ra->bw_cap = bw_mode;
469 	ra->er_cap = rtwsta_link->er_cap;
470 	ra->mode_ctrl = mode;
471 	ra->macid = rtwsta_link->mac_id;
472 	ra->stbc_cap = stbc_en;
473 	ra->ldpc_cap = ldpc_en;
474 	ra->ss_num = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1;
475 	ra->en_sgi = sgi;
476 	ra->ra_mask = ra_mask;
477 	ra->fix_giltf_en = fix_giltf_en;
478 	ra->fix_giltf = fix_giltf;
479 	ra->partial_bw_er = link_sta->he_cap.has_he ?
480 			    !!(link_sta->he_cap.he_cap_elem.phy_cap_info[6] &
481 			       IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE) : 0;
482 	ra->band = chan->band_type;
483 
484 	if (!csi)
485 		return;
486 
487 	ra->fixed_csi_rate_en = false;
488 	ra->ra_csi_rate_en = true;
489 	ra->cr_tbl_sel = false;
490 	ra->band_num = rtwvif_link->phy_idx;
491 	ra->csi_bw = bw_mode;
492 	ra->csi_gi_ltf = RTW89_GILTF_LGI_4XHE32;
493 	ra->csi_mcs_ss_idx = 5;
494 	ra->csi_mode = csi_mode;
495 }
496 
497 void rtw89_phy_ra_update_sta_link(struct rtw89_dev *rtwdev,
498 				  struct rtw89_sta_link *rtwsta_link,
499 				  u32 changed)
500 {
501 	struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link;
502 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
503 	struct rtw89_ra_info *ra = &rtwsta_link->ra;
504 	struct ieee80211_link_sta *link_sta;
505 
506 	rcu_read_lock();
507 
508 	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false);
509 	rtw89_phy_ra_sta_update(rtwdev, rtwvif_link, rtwsta_link,
510 				link_sta, vif->p2p, false);
511 
512 	rcu_read_unlock();
513 
514 	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED)
515 		ra->upd_mask = 1;
516 	if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_NSS_CHANGED))
517 		ra->upd_bw_nss_mask = 1;
518 
519 	rtw89_debug(rtwdev, RTW89_DBG_RA,
520 		    "ra updat: macid = %d, bw = %d, nss = %d, gi = %d %d",
521 		    ra->macid,
522 		    ra->bw_cap,
523 		    ra->ss_num,
524 		    ra->en_sgi,
525 		    ra->giltf);
526 
527 	rtw89_fw_h2c_ra(rtwdev, ra, false);
528 }
529 
530 void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
531 			     u32 changed)
532 {
533 	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
534 	struct rtw89_sta_link *rtwsta_link;
535 	unsigned int link_id;
536 
537 	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id)
538 		rtw89_phy_ra_update_sta_link(rtwdev, rtwsta_link, changed);
539 }
540 
541 static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next,
542 				 u16 rate_base, u64 ra_mask, u8 ra_mode,
543 				 u32 rate_ctrl, u32 ctrl_skip, bool force)
544 {
545 	u8 n, c;
546 
547 	if (rate_ctrl == ctrl_skip)
548 		return true;
549 
550 	n = hweight32(rate_ctrl);
551 	if (n == 0)
552 		return true;
553 
554 	if (force && n != 1)
555 		return false;
556 
557 	if (next->enable)
558 		return false;
559 
560 	c = __fls(rate_ctrl);
561 	next->rate = rate_base + c;
562 	next->ra_mode = ra_mode;
563 	next->ra_mask = ra_mask;
564 	next->enable = true;
565 
566 	return true;
567 }
568 
569 enum __rtw89_hw_rate_invalid_bases {
570 	/* no EHT rate for ax chip */
571 	RTW89_HW_RATE_EHT_NSS1_MCS0 = RTW89_HW_RATE_INVAL,
572 	RTW89_HW_RATE_EHT_NSS2_MCS0 = RTW89_HW_RATE_INVAL,
573 	RTW89_HW_RATE_EHT_NSS3_MCS0 = RTW89_HW_RATE_INVAL,
574 	RTW89_HW_RATE_EHT_NSS4_MCS0 = RTW89_HW_RATE_INVAL,
575 };
576 
577 #define RTW89_HW_RATE_BY_CHIP_GEN(rate) \
578 	{ \
579 		[RTW89_CHIP_AX] = RTW89_HW_RATE_ ## rate, \
580 		[RTW89_CHIP_BE] = RTW89_HW_RATE_V1_ ## rate, \
581 	}
582 
583 static
584 void __rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
585 				  struct rtw89_vif_link *rtwvif_link,
586 				  const struct cfg80211_bitrate_mask *mask)
587 {
588 	struct ieee80211_supported_band *sband;
589 	struct rtw89_phy_rate_pattern next_pattern = {0};
590 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
591 						       rtwvif_link->chanctx_idx);
592 	static const u16 hw_rate_eht[][RTW89_CHIP_GEN_NUM] = {
593 		RTW89_HW_RATE_BY_CHIP_GEN(EHT_NSS1_MCS0),
594 		RTW89_HW_RATE_BY_CHIP_GEN(EHT_NSS2_MCS0),
595 		RTW89_HW_RATE_BY_CHIP_GEN(EHT_NSS3_MCS0),
596 		RTW89_HW_RATE_BY_CHIP_GEN(EHT_NSS4_MCS0),
597 	};
598 	static const u16 hw_rate_he[][RTW89_CHIP_GEN_NUM] = {
599 		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS1_MCS0),
600 		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS2_MCS0),
601 		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS3_MCS0),
602 		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS4_MCS0),
603 	};
604 	static const u16 hw_rate_vht[][RTW89_CHIP_GEN_NUM] = {
605 		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS1_MCS0),
606 		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS2_MCS0),
607 		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS3_MCS0),
608 		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS4_MCS0),
609 	};
610 	static const u16 hw_rate_ht[][RTW89_CHIP_GEN_NUM] = {
611 		RTW89_HW_RATE_BY_CHIP_GEN(MCS0),
612 		RTW89_HW_RATE_BY_CHIP_GEN(MCS8),
613 		RTW89_HW_RATE_BY_CHIP_GEN(MCS16),
614 		RTW89_HW_RATE_BY_CHIP_GEN(MCS24),
615 	};
616 	u8 band = chan->band_type;
617 	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
618 	enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
619 	u8 tx_nss = rtwdev->hal.tx_nss;
620 	u8 i;
621 
622 	if (chip_gen == RTW89_CHIP_AX)
623 		goto rs_11ax;
624 
625 	for (i = 0; i < tx_nss; i++)
626 		if (!__check_rate_pattern(&next_pattern, hw_rate_eht[i][chip_gen],
627 					  RA_MASK_EHT_RATES, RTW89_RA_MODE_EHT,
628 					  mask->control[nl_band].eht_mcs[i],
629 					  0, true))
630 			goto out;
631 
632 rs_11ax:
633 	for (i = 0; i < tx_nss; i++)
634 		if (!__check_rate_pattern(&next_pattern, hw_rate_he[i][chip_gen],
635 					  RA_MASK_HE_RATES, RTW89_RA_MODE_HE,
636 					  mask->control[nl_band].he_mcs[i],
637 					  0, true))
638 			goto out;
639 
640 	for (i = 0; i < tx_nss; i++)
641 		if (!__check_rate_pattern(&next_pattern, hw_rate_vht[i][chip_gen],
642 					  RA_MASK_VHT_RATES, RTW89_RA_MODE_VHT,
643 					  mask->control[nl_band].vht_mcs[i],
644 					  0, true))
645 			goto out;
646 
647 	for (i = 0; i < tx_nss; i++)
648 		if (!__check_rate_pattern(&next_pattern, hw_rate_ht[i][chip_gen],
649 					  RA_MASK_HT_RATES, RTW89_RA_MODE_HT,
650 					  mask->control[nl_band].ht_mcs[i],
651 					  0, true))
652 			goto out;
653 
654 	/* lagacy cannot be empty for nl80211_parse_tx_bitrate_mask, and
655 	 * require at least one basic rate for ieee80211_set_bitrate_mask,
656 	 * so the decision just depends on if all bitrates are set or not.
657 	 */
658 	sband = rtwdev->hw->wiphy->bands[nl_band];
659 	if (band == RTW89_BAND_2G) {
660 		if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_CCK1,
661 					  RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES,
662 					  RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM,
663 					  mask->control[nl_band].legacy,
664 					  BIT(sband->n_bitrates) - 1, false))
665 			goto out;
666 	} else {
667 		if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_OFDM6,
668 					  RA_MASK_OFDM_RATES, RTW89_RA_MODE_OFDM,
669 					  mask->control[nl_band].legacy,
670 					  BIT(sband->n_bitrates) - 1, false))
671 			goto out;
672 	}
673 
674 	if (!next_pattern.enable)
675 		goto out;
676 
677 	if (unlikely(next_pattern.rate >= RTW89_HW_RATE_INVAL)) {
678 		rtw89_debug(rtwdev, RTW89_DBG_RA,
679 			    "pattern invalid target: chip_gen %d, mode 0x%x\n",
680 			    chip_gen, next_pattern.ra_mode);
681 		goto out;
682 	}
683 
684 	rtwvif_link->rate_pattern = next_pattern;
685 	rtw89_debug(rtwdev, RTW89_DBG_RA,
686 		    "configure pattern: rate 0x%x, mask 0x%llx, mode 0x%x\n",
687 		    next_pattern.rate,
688 		    next_pattern.ra_mask,
689 		    next_pattern.ra_mode);
690 	return;
691 
692 out:
693 	rtwvif_link->rate_pattern.enable = false;
694 	rtw89_debug(rtwdev, RTW89_DBG_RA, "unset rate pattern\n");
695 }
696 
697 void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
698 				struct ieee80211_vif *vif,
699 				const struct cfg80211_bitrate_mask *mask)
700 {
701 	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
702 	struct rtw89_vif_link *rtwvif_link;
703 	unsigned int link_id;
704 
705 	rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
706 		__rtw89_phy_rate_pattern_vif(rtwdev, rtwvif_link, mask);
707 }
708 
709 static void rtw89_phy_ra_update_sta_iter(void *data, struct ieee80211_sta *sta)
710 {
711 	struct rtw89_dev *rtwdev = (struct rtw89_dev *)data;
712 
713 	rtw89_phy_ra_update_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
714 }
715 
716 void rtw89_phy_ra_update(struct rtw89_dev *rtwdev)
717 {
718 	ieee80211_iterate_stations_atomic(rtwdev->hw,
719 					  rtw89_phy_ra_update_sta_iter,
720 					  rtwdev);
721 }
722 
723 void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link)
724 {
725 	struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link;
726 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
727 	struct rtw89_ra_info *ra = &rtwsta_link->ra;
728 	u8 rssi = ewma_rssi_read(&rtwsta_link->avg_rssi) >> RSSI_FACTOR;
729 	struct ieee80211_link_sta *link_sta;
730 	bool csi;
731 
732 	rcu_read_lock();
733 
734 	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
735 	csi = rtw89_sta_has_beamformer_cap(link_sta);
736 
737 	rtw89_phy_ra_sta_update(rtwdev, rtwvif_link, rtwsta_link,
738 				link_sta, vif->p2p, csi);
739 
740 	rcu_read_unlock();
741 
742 	if (rssi > 40)
743 		ra->init_rate_lv = 1;
744 	else if (rssi > 20)
745 		ra->init_rate_lv = 2;
746 	else if (rssi > 1)
747 		ra->init_rate_lv = 3;
748 	else
749 		ra->init_rate_lv = 0;
750 	ra->upd_all = 1;
751 	rtw89_debug(rtwdev, RTW89_DBG_RA,
752 		    "ra assoc: macid = %d, mode = %d, bw = %d, nss = %d, lv = %d",
753 		    ra->macid,
754 		    ra->mode_ctrl,
755 		    ra->bw_cap,
756 		    ra->ss_num,
757 		    ra->init_rate_lv);
758 	rtw89_debug(rtwdev, RTW89_DBG_RA,
759 		    "ra assoc: dcm = %d, er = %d, ldpc = %d, stbc = %d, gi = %d %d",
760 		    ra->dcm_cap,
761 		    ra->er_cap,
762 		    ra->ldpc_cap,
763 		    ra->stbc_cap,
764 		    ra->en_sgi,
765 		    ra->giltf);
766 
767 	rtw89_fw_h2c_ra(rtwdev, ra, csi);
768 }
769 
770 u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
771 		      const struct rtw89_chan *chan,
772 		      enum rtw89_bandwidth dbw)
773 {
774 	enum rtw89_bandwidth cbw = chan->band_width;
775 	u8 pri_ch = chan->primary_channel;
776 	u8 central_ch = chan->channel;
777 	u8 txsc_idx = 0;
778 	u8 tmp = 0;
779 
780 	if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20)
781 		return txsc_idx;
782 
783 	switch (cbw) {
784 	case RTW89_CHANNEL_WIDTH_40:
785 		txsc_idx = pri_ch > central_ch ? 1 : 2;
786 		break;
787 	case RTW89_CHANNEL_WIDTH_80:
788 		if (dbw == RTW89_CHANNEL_WIDTH_20) {
789 			if (pri_ch > central_ch)
790 				txsc_idx = (pri_ch - central_ch) >> 1;
791 			else
792 				txsc_idx = ((central_ch - pri_ch) >> 1) + 1;
793 		} else {
794 			txsc_idx = pri_ch > central_ch ? 9 : 10;
795 		}
796 		break;
797 	case RTW89_CHANNEL_WIDTH_160:
798 		if (pri_ch > central_ch)
799 			tmp = (pri_ch - central_ch) >> 1;
800 		else
801 			tmp = ((central_ch - pri_ch) >> 1) + 1;
802 
803 		if (dbw == RTW89_CHANNEL_WIDTH_20) {
804 			txsc_idx = tmp;
805 		} else if (dbw == RTW89_CHANNEL_WIDTH_40) {
806 			if (tmp == 1 || tmp == 3)
807 				txsc_idx = 9;
808 			else if (tmp == 5 || tmp == 7)
809 				txsc_idx = 11;
810 			else if (tmp == 2 || tmp == 4)
811 				txsc_idx = 10;
812 			else if (tmp == 6 || tmp == 8)
813 				txsc_idx = 12;
814 			else
815 				return 0xff;
816 		} else {
817 			txsc_idx = pri_ch > central_ch ? 13 : 14;
818 		}
819 		break;
820 	case RTW89_CHANNEL_WIDTH_80_80:
821 		if (dbw == RTW89_CHANNEL_WIDTH_20) {
822 			if (pri_ch > central_ch)
823 				txsc_idx = (10 - (pri_ch - central_ch)) >> 1;
824 			else
825 				txsc_idx = ((central_ch - pri_ch) >> 1) + 5;
826 		} else if (dbw == RTW89_CHANNEL_WIDTH_40) {
827 			txsc_idx = pri_ch > central_ch ? 10 : 12;
828 		} else {
829 			txsc_idx = 14;
830 		}
831 		break;
832 	default:
833 		break;
834 	}
835 
836 	return txsc_idx;
837 }
838 EXPORT_SYMBOL(rtw89_phy_get_txsc);
839 
840 u8 rtw89_phy_get_txsb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
841 		      enum rtw89_bandwidth dbw)
842 {
843 	enum rtw89_bandwidth cbw = chan->band_width;
844 	u8 pri_ch = chan->primary_channel;
845 	u8 central_ch = chan->channel;
846 	u8 txsb_idx = 0;
847 
848 	if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20)
849 		return txsb_idx;
850 
851 	switch (cbw) {
852 	case RTW89_CHANNEL_WIDTH_40:
853 		txsb_idx = pri_ch > central_ch ? 1 : 0;
854 		break;
855 	case RTW89_CHANNEL_WIDTH_80:
856 		if (dbw == RTW89_CHANNEL_WIDTH_20)
857 			txsb_idx = (pri_ch - central_ch + 6) / 4;
858 		else
859 			txsb_idx = pri_ch > central_ch ? 1 : 0;
860 		break;
861 	case RTW89_CHANNEL_WIDTH_160:
862 		if (dbw == RTW89_CHANNEL_WIDTH_20)
863 			txsb_idx = (pri_ch - central_ch + 14) / 4;
864 		else if (dbw == RTW89_CHANNEL_WIDTH_40)
865 			txsb_idx = (pri_ch - central_ch + 12) / 8;
866 		else
867 			txsb_idx = pri_ch > central_ch ? 1 : 0;
868 		break;
869 	case RTW89_CHANNEL_WIDTH_320:
870 		if (dbw == RTW89_CHANNEL_WIDTH_20)
871 			txsb_idx = (pri_ch - central_ch + 30) / 4;
872 		else if (dbw == RTW89_CHANNEL_WIDTH_40)
873 			txsb_idx = (pri_ch - central_ch + 28) / 8;
874 		else if (dbw == RTW89_CHANNEL_WIDTH_80)
875 			txsb_idx = (pri_ch - central_ch + 24) / 16;
876 		else
877 			txsb_idx = pri_ch > central_ch ? 1 : 0;
878 		break;
879 	default:
880 		break;
881 	}
882 
883 	return txsb_idx;
884 }
885 EXPORT_SYMBOL(rtw89_phy_get_txsb);
886 
887 static bool rtw89_phy_check_swsi_busy(struct rtw89_dev *rtwdev)
888 {
889 	return !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_W_BUSY_V1) ||
890 	       !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_R_BUSY_V1);
891 }
892 
893 u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
894 		      u32 addr, u32 mask)
895 {
896 	const struct rtw89_chip_info *chip = rtwdev->chip;
897 	const u32 *base_addr = chip->rf_base_addr;
898 	u32 val, direct_addr;
899 
900 	if (rf_path >= rtwdev->chip->rf_path_num) {
901 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
902 		return INV_RF_DATA;
903 	}
904 
905 	addr &= 0xff;
906 	direct_addr = base_addr[rf_path] + (addr << 2);
907 	mask &= RFREG_MASK;
908 
909 	val = rtw89_phy_read32_mask(rtwdev, direct_addr, mask);
910 
911 	return val;
912 }
913 EXPORT_SYMBOL(rtw89_phy_read_rf);
914 
915 static u32 rtw89_phy_read_rf_a(struct rtw89_dev *rtwdev,
916 			       enum rtw89_rf_path rf_path, u32 addr, u32 mask)
917 {
918 	bool busy;
919 	bool done;
920 	u32 val;
921 	int ret;
922 
923 	ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
924 				       1, 30, false, rtwdev);
925 	if (ret) {
926 		rtw89_err(rtwdev, "read rf busy swsi\n");
927 		return INV_RF_DATA;
928 	}
929 
930 	mask &= RFREG_MASK;
931 
932 	val = FIELD_PREP(B_SWSI_READ_ADDR_PATH_V1, rf_path) |
933 	      FIELD_PREP(B_SWSI_READ_ADDR_ADDR_V1, addr);
934 	rtw89_phy_write32_mask(rtwdev, R_SWSI_READ_ADDR_V1, B_SWSI_READ_ADDR_V1, val);
935 	udelay(2);
936 
937 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done, 1,
938 				       30, false, rtwdev, R_SWSI_V1,
939 				       B_SWSI_R_DATA_DONE_V1);
940 	if (ret) {
941 		if (!test_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags))
942 			rtw89_err(rtwdev, "read swsi busy\n");
943 		return INV_RF_DATA;
944 	}
945 
946 	return rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, mask);
947 }
948 
949 u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
950 			 u32 addr, u32 mask)
951 {
952 	bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
953 
954 	if (rf_path >= rtwdev->chip->rf_path_num) {
955 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
956 		return INV_RF_DATA;
957 	}
958 
959 	if (ad_sel)
960 		return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
961 	else
962 		return rtw89_phy_read_rf_a(rtwdev, rf_path, addr, mask);
963 }
964 EXPORT_SYMBOL(rtw89_phy_read_rf_v1);
965 
966 static u32 rtw89_phy_read_full_rf_v2_a(struct rtw89_dev *rtwdev,
967 				       enum rtw89_rf_path rf_path, u32 addr)
968 {
969 	static const u16 r_addr_ofst[2] = {0x2C24, 0x2D24};
970 	static const u16 addr_ofst[2] = {0x2ADC, 0x2BDC};
971 	bool busy, done;
972 	int ret;
973 	u32 val;
974 
975 	rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_CTL_MASK, 0x1);
976 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
977 				       1, 3800, false,
978 				       rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_BUSY);
979 	if (ret) {
980 		rtw89_warn(rtwdev, "poll HWSI is busy\n");
981 		return INV_RF_DATA;
982 	}
983 
984 	rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_MASK, addr);
985 	rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_RD, 0x1);
986 	udelay(2);
987 
988 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done,
989 				       1, 3800, false,
990 				       rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_RDONE);
991 	if (ret) {
992 		rtw89_warn(rtwdev, "read HWSI is busy\n");
993 		val = INV_RF_DATA;
994 		goto out;
995 	}
996 
997 	val = rtw89_phy_read32_mask(rtwdev, r_addr_ofst[rf_path], RFREG_MASK);
998 out:
999 	rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_POLL_MASK, 0);
1000 
1001 	return val;
1002 }
1003 
1004 static u32 rtw89_phy_read_rf_v2_a(struct rtw89_dev *rtwdev,
1005 				  enum rtw89_rf_path rf_path, u32 addr, u32 mask)
1006 {
1007 	u32 val;
1008 
1009 	val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr);
1010 
1011 	return (val & mask) >> __ffs(mask);
1012 }
1013 
1014 u32 rtw89_phy_read_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1015 			 u32 addr, u32 mask)
1016 {
1017 	bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
1018 
1019 	if (rf_path >= rtwdev->chip->rf_path_num) {
1020 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
1021 		return INV_RF_DATA;
1022 	}
1023 
1024 	if (ad_sel)
1025 		return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
1026 	else
1027 		return rtw89_phy_read_rf_v2_a(rtwdev, rf_path, addr, mask);
1028 }
1029 EXPORT_SYMBOL(rtw89_phy_read_rf_v2);
1030 
1031 bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1032 			u32 addr, u32 mask, u32 data)
1033 {
1034 	const struct rtw89_chip_info *chip = rtwdev->chip;
1035 	const u32 *base_addr = chip->rf_base_addr;
1036 	u32 direct_addr;
1037 
1038 	if (rf_path >= rtwdev->chip->rf_path_num) {
1039 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
1040 		return false;
1041 	}
1042 
1043 	addr &= 0xff;
1044 	direct_addr = base_addr[rf_path] + (addr << 2);
1045 	mask &= RFREG_MASK;
1046 
1047 	rtw89_phy_write32_mask(rtwdev, direct_addr, mask, data);
1048 
1049 	/* delay to ensure writing properly */
1050 	udelay(1);
1051 
1052 	return true;
1053 }
1054 EXPORT_SYMBOL(rtw89_phy_write_rf);
1055 
1056 static bool rtw89_phy_write_rf_a(struct rtw89_dev *rtwdev,
1057 				 enum rtw89_rf_path rf_path, u32 addr, u32 mask,
1058 				 u32 data)
1059 {
1060 	u8 bit_shift;
1061 	u32 val;
1062 	bool busy, b_msk_en = false;
1063 	int ret;
1064 
1065 	ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
1066 				       1, 30, false, rtwdev);
1067 	if (ret) {
1068 		rtw89_err(rtwdev, "write rf busy swsi\n");
1069 		return false;
1070 	}
1071 
1072 	data &= RFREG_MASK;
1073 	mask &= RFREG_MASK;
1074 
1075 	if (mask != RFREG_MASK) {
1076 		b_msk_en = true;
1077 		rtw89_phy_write32_mask(rtwdev, R_SWSI_BIT_MASK_V1, RFREG_MASK,
1078 				       mask);
1079 		bit_shift = __ffs(mask);
1080 		data = (data << bit_shift) & RFREG_MASK;
1081 	}
1082 
1083 	val = FIELD_PREP(B_SWSI_DATA_BIT_MASK_EN_V1, b_msk_en) |
1084 	      FIELD_PREP(B_SWSI_DATA_PATH_V1, rf_path) |
1085 	      FIELD_PREP(B_SWSI_DATA_ADDR_V1, addr) |
1086 	      FIELD_PREP(B_SWSI_DATA_VAL_V1, data);
1087 
1088 	rtw89_phy_write32_mask(rtwdev, R_SWSI_DATA_V1, MASKDWORD, val);
1089 
1090 	return true;
1091 }
1092 
1093 bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1094 			   u32 addr, u32 mask, u32 data)
1095 {
1096 	bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
1097 
1098 	if (rf_path >= rtwdev->chip->rf_path_num) {
1099 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
1100 		return false;
1101 	}
1102 
1103 	if (ad_sel)
1104 		return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
1105 	else
1106 		return rtw89_phy_write_rf_a(rtwdev, rf_path, addr, mask, data);
1107 }
1108 EXPORT_SYMBOL(rtw89_phy_write_rf_v1);
1109 
1110 static
1111 bool rtw89_phy_write_full_rf_v2_a(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1112 				  u32 addr, u32 data)
1113 {
1114 	static const u32 addr_is_idle[2] = {0x2C24, 0x2D24};
1115 	static const u32 addr_ofst[2] = {0x2AE0, 0x2BE0};
1116 	bool busy;
1117 	u32 val;
1118 	int ret;
1119 
1120 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
1121 				       1, 3800, false,
1122 				       rtwdev, addr_is_idle[rf_path], BIT(29));
1123 	if (ret) {
1124 		rtw89_warn(rtwdev, "[%s] HWSI is busy\n", __func__);
1125 		return false;
1126 	}
1127 
1128 	val = u32_encode_bits(addr, B_HWSI_DATA_ADDR) |
1129 	      u32_encode_bits(data, B_HWSI_DATA_VAL);
1130 
1131 	rtw89_phy_write32(rtwdev, addr_ofst[rf_path], val);
1132 
1133 	return true;
1134 }
1135 
1136 static
1137 bool rtw89_phy_write_rf_a_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1138 			     u32 addr, u32 mask, u32 data)
1139 {
1140 	u32 val;
1141 
1142 	if (mask == RFREG_MASK) {
1143 		val = data;
1144 	} else {
1145 		val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr);
1146 		val &= ~mask;
1147 		val |= (data << __ffs(mask)) & mask;
1148 	}
1149 
1150 	return rtw89_phy_write_full_rf_v2_a(rtwdev, rf_path, addr, val);
1151 }
1152 
1153 bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1154 			   u32 addr, u32 mask, u32 data)
1155 {
1156 	bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
1157 
1158 	if (rf_path >= rtwdev->chip->rf_path_num) {
1159 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
1160 		return INV_RF_DATA;
1161 	}
1162 
1163 	if (ad_sel)
1164 		return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
1165 	else
1166 		return rtw89_phy_write_rf_a_v2(rtwdev, rf_path, addr, mask, data);
1167 }
1168 EXPORT_SYMBOL(rtw89_phy_write_rf_v2);
1169 
1170 static bool rtw89_chip_rf_v1(struct rtw89_dev *rtwdev)
1171 {
1172 	return rtwdev->chip->ops->write_rf == rtw89_phy_write_rf_v1;
1173 }
1174 
1175 static void __rtw89_phy_bb_reset(struct rtw89_dev *rtwdev,
1176 				 enum rtw89_phy_idx phy_idx)
1177 {
1178 	const struct rtw89_chip_info *chip = rtwdev->chip;
1179 
1180 	chip->ops->bb_reset(rtwdev, phy_idx);
1181 }
1182 
1183 static void rtw89_phy_bb_reset(struct rtw89_dev *rtwdev)
1184 {
1185 	__rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0);
1186 	if (rtwdev->dbcc_en)
1187 		__rtw89_phy_bb_reset(rtwdev, RTW89_PHY_1);
1188 }
1189 
1190 static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev,
1191 				    const struct rtw89_reg2_def *reg,
1192 				    enum rtw89_rf_path rf_path,
1193 				    void *extra_data)
1194 {
1195 	u32 addr;
1196 
1197 	if (reg->addr == 0xfe) {
1198 		mdelay(50);
1199 	} else if (reg->addr == 0xfd) {
1200 		mdelay(5);
1201 	} else if (reg->addr == 0xfc) {
1202 		mdelay(1);
1203 	} else if (reg->addr == 0xfb) {
1204 		udelay(50);
1205 	} else if (reg->addr == 0xfa) {
1206 		udelay(5);
1207 	} else if (reg->addr == 0xf9) {
1208 		udelay(1);
1209 	} else if (reg->data == BYPASS_CR_DATA) {
1210 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Bypass CR 0x%x\n", reg->addr);
1211 	} else {
1212 		addr = reg->addr;
1213 
1214 		if ((uintptr_t)extra_data == RTW89_PHY_1)
1215 			addr += rtw89_phy0_phy1_offset(rtwdev, reg->addr);
1216 
1217 		rtw89_phy_write32(rtwdev, addr, reg->data);
1218 	}
1219 }
1220 
1221 union rtw89_phy_bb_gain_arg {
1222 	u32 addr;
1223 	struct {
1224 		union {
1225 			u8 type;
1226 			struct {
1227 				u8 rxsc_start:4;
1228 				u8 bw:4;
1229 			};
1230 		};
1231 		u8 path;
1232 		u8 gain_band;
1233 		u8 cfg_type;
1234 	};
1235 } __packed;
1236 
1237 static void
1238 rtw89_phy_cfg_bb_gain_error(struct rtw89_dev *rtwdev,
1239 			    union rtw89_phy_bb_gain_arg arg, u32 data)
1240 {
1241 	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
1242 	u8 type = arg.type;
1243 	u8 path = arg.path;
1244 	u8 gband = arg.gain_band;
1245 	int i;
1246 
1247 	switch (type) {
1248 	case 0:
1249 		for (i = 0; i < 4; i++, data >>= 8)
1250 			gain->lna_gain[gband][path][i] = data & 0xff;
1251 		break;
1252 	case 1:
1253 		for (i = 4; i < 7; i++, data >>= 8)
1254 			gain->lna_gain[gband][path][i] = data & 0xff;
1255 		break;
1256 	case 2:
1257 		for (i = 0; i < 2; i++, data >>= 8)
1258 			gain->tia_gain[gband][path][i] = data & 0xff;
1259 		break;
1260 	default:
1261 		rtw89_warn(rtwdev,
1262 			   "bb gain error {0x%x:0x%x} with unknown type: %d\n",
1263 			   arg.addr, data, type);
1264 		break;
1265 	}
1266 }
1267 
1268 enum rtw89_phy_bb_rxsc_start_idx {
1269 	RTW89_BB_RXSC_START_IDX_FULL = 0,
1270 	RTW89_BB_RXSC_START_IDX_20 = 1,
1271 	RTW89_BB_RXSC_START_IDX_20_1 = 5,
1272 	RTW89_BB_RXSC_START_IDX_40 = 9,
1273 	RTW89_BB_RXSC_START_IDX_80 = 13,
1274 };
1275 
1276 static void
1277 rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev *rtwdev,
1278 			  union rtw89_phy_bb_gain_arg arg, u32 data)
1279 {
1280 	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
1281 	u8 rxsc_start = arg.rxsc_start;
1282 	u8 bw = arg.bw;
1283 	u8 path = arg.path;
1284 	u8 gband = arg.gain_band;
1285 	u8 rxsc;
1286 	s8 ofst;
1287 	int i;
1288 
1289 	switch (bw) {
1290 	case RTW89_CHANNEL_WIDTH_20:
1291 		gain->rpl_ofst_20[gband][path] = (s8)data;
1292 		break;
1293 	case RTW89_CHANNEL_WIDTH_40:
1294 		if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
1295 			gain->rpl_ofst_40[gband][path][0] = (s8)data;
1296 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
1297 			for (i = 0; i < 2; i++, data >>= 8) {
1298 				rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
1299 				ofst = (s8)(data & 0xff);
1300 				gain->rpl_ofst_40[gband][path][rxsc] = ofst;
1301 			}
1302 		}
1303 		break;
1304 	case RTW89_CHANNEL_WIDTH_80:
1305 		if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
1306 			gain->rpl_ofst_80[gband][path][0] = (s8)data;
1307 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
1308 			for (i = 0; i < 4; i++, data >>= 8) {
1309 				rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
1310 				ofst = (s8)(data & 0xff);
1311 				gain->rpl_ofst_80[gband][path][rxsc] = ofst;
1312 			}
1313 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) {
1314 			for (i = 0; i < 2; i++, data >>= 8) {
1315 				rxsc = RTW89_BB_RXSC_START_IDX_40 + i;
1316 				ofst = (s8)(data & 0xff);
1317 				gain->rpl_ofst_80[gband][path][rxsc] = ofst;
1318 			}
1319 		}
1320 		break;
1321 	case RTW89_CHANNEL_WIDTH_160:
1322 		if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
1323 			gain->rpl_ofst_160[gband][path][0] = (s8)data;
1324 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
1325 			for (i = 0; i < 4; i++, data >>= 8) {
1326 				rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
1327 				ofst = (s8)(data & 0xff);
1328 				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
1329 			}
1330 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20_1) {
1331 			for (i = 0; i < 4; i++, data >>= 8) {
1332 				rxsc = RTW89_BB_RXSC_START_IDX_20_1 + i;
1333 				ofst = (s8)(data & 0xff);
1334 				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
1335 			}
1336 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) {
1337 			for (i = 0; i < 4; i++, data >>= 8) {
1338 				rxsc = RTW89_BB_RXSC_START_IDX_40 + i;
1339 				ofst = (s8)(data & 0xff);
1340 				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
1341 			}
1342 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_80) {
1343 			for (i = 0; i < 2; i++, data >>= 8) {
1344 				rxsc = RTW89_BB_RXSC_START_IDX_80 + i;
1345 				ofst = (s8)(data & 0xff);
1346 				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
1347 			}
1348 		}
1349 		break;
1350 	default:
1351 		rtw89_warn(rtwdev,
1352 			   "bb rpl ofst {0x%x:0x%x} with unknown bw: %d\n",
1353 			   arg.addr, data, bw);
1354 		break;
1355 	}
1356 }
1357 
1358 static void
1359 rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev *rtwdev,
1360 			     union rtw89_phy_bb_gain_arg arg, u32 data)
1361 {
1362 	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
1363 	u8 type = arg.type;
1364 	u8 path = arg.path;
1365 	u8 gband = arg.gain_band;
1366 	int i;
1367 
1368 	switch (type) {
1369 	case 0:
1370 		for (i = 0; i < 4; i++, data >>= 8)
1371 			gain->lna_gain_bypass[gband][path][i] = data & 0xff;
1372 		break;
1373 	case 1:
1374 		for (i = 4; i < 7; i++, data >>= 8)
1375 			gain->lna_gain_bypass[gband][path][i] = data & 0xff;
1376 		break;
1377 	default:
1378 		rtw89_warn(rtwdev,
1379 			   "bb gain bypass {0x%x:0x%x} with unknown type: %d\n",
1380 			   arg.addr, data, type);
1381 		break;
1382 	}
1383 }
1384 
1385 static void
1386 rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev,
1387 			    union rtw89_phy_bb_gain_arg arg, u32 data)
1388 {
1389 	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
1390 	u8 type = arg.type;
1391 	u8 path = arg.path;
1392 	u8 gband = arg.gain_band;
1393 	int i;
1394 
1395 	switch (type) {
1396 	case 0:
1397 		for (i = 0; i < 4; i++, data >>= 8)
1398 			gain->lna_op1db[gband][path][i] = data & 0xff;
1399 		break;
1400 	case 1:
1401 		for (i = 4; i < 7; i++, data >>= 8)
1402 			gain->lna_op1db[gband][path][i] = data & 0xff;
1403 		break;
1404 	case 2:
1405 		for (i = 0; i < 4; i++, data >>= 8)
1406 			gain->tia_lna_op1db[gband][path][i] = data & 0xff;
1407 		break;
1408 	case 3:
1409 		for (i = 4; i < 8; i++, data >>= 8)
1410 			gain->tia_lna_op1db[gband][path][i] = data & 0xff;
1411 		break;
1412 	default:
1413 		rtw89_warn(rtwdev,
1414 			   "bb gain op1db {0x%x:0x%x} with unknown type: %d\n",
1415 			   arg.addr, data, type);
1416 		break;
1417 	}
1418 }
1419 
1420 static void rtw89_phy_config_bb_gain_ax(struct rtw89_dev *rtwdev,
1421 					const struct rtw89_reg2_def *reg,
1422 					enum rtw89_rf_path rf_path,
1423 					void *extra_data)
1424 {
1425 	const struct rtw89_chip_info *chip = rtwdev->chip;
1426 	union rtw89_phy_bb_gain_arg arg = { .addr = reg->addr };
1427 	struct rtw89_efuse *efuse = &rtwdev->efuse;
1428 
1429 	if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR)
1430 		return;
1431 
1432 	if (arg.path >= chip->rf_path_num)
1433 		return;
1434 
1435 	if (arg.addr >= 0xf9 && arg.addr <= 0xfe) {
1436 		rtw89_warn(rtwdev, "bb gain table with flow ctrl\n");
1437 		return;
1438 	}
1439 
1440 	switch (arg.cfg_type) {
1441 	case 0:
1442 		rtw89_phy_cfg_bb_gain_error(rtwdev, arg, reg->data);
1443 		break;
1444 	case 1:
1445 		rtw89_phy_cfg_bb_rpl_ofst(rtwdev, arg, reg->data);
1446 		break;
1447 	case 2:
1448 		rtw89_phy_cfg_bb_gain_bypass(rtwdev, arg, reg->data);
1449 		break;
1450 	case 3:
1451 		rtw89_phy_cfg_bb_gain_op1db(rtwdev, arg, reg->data);
1452 		break;
1453 	case 4:
1454 		/* This cfg_type is only used by rfe_type >= 50 with eFEM */
1455 		if (efuse->rfe_type < 50)
1456 			break;
1457 		fallthrough;
1458 	default:
1459 		rtw89_warn(rtwdev,
1460 			   "bb gain {0x%x:0x%x} with unknown cfg type: %d\n",
1461 			   arg.addr, reg->data, arg.cfg_type);
1462 		break;
1463 	}
1464 }
1465 
1466 static void
1467 rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev,
1468 			     const struct rtw89_reg2_def *reg,
1469 			     enum rtw89_rf_path rf_path,
1470 			     struct rtw89_fw_h2c_rf_reg_info *info)
1471 {
1472 	u16 idx = info->curr_idx % RTW89_H2C_RF_PAGE_SIZE;
1473 	u8 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE;
1474 
1475 	if (page >= RTW89_H2C_RF_PAGE_NUM) {
1476 		rtw89_warn(rtwdev, "RF parameters exceed size. path=%d, idx=%d",
1477 			   rf_path, info->curr_idx);
1478 		return;
1479 	}
1480 
1481 	info->rtw89_phy_config_rf_h2c[page][idx] =
1482 		cpu_to_le32((reg->addr << 20) | reg->data);
1483 	info->curr_idx++;
1484 }
1485 
1486 static int rtw89_phy_config_rf_reg_fw(struct rtw89_dev *rtwdev,
1487 				      struct rtw89_fw_h2c_rf_reg_info *info)
1488 {
1489 	u16 remain = info->curr_idx;
1490 	u16 len = 0;
1491 	u8 i;
1492 	int ret = 0;
1493 
1494 	if (remain > RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE) {
1495 		rtw89_warn(rtwdev,
1496 			   "rf reg h2c total len %d larger than %d\n",
1497 			   remain, RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE);
1498 		ret = -EINVAL;
1499 		goto out;
1500 	}
1501 
1502 	for (i = 0; i < RTW89_H2C_RF_PAGE_NUM && remain; i++, remain -= len) {
1503 		len = remain > RTW89_H2C_RF_PAGE_SIZE ? RTW89_H2C_RF_PAGE_SIZE : remain;
1504 		ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len * 4, i);
1505 		if (ret)
1506 			goto out;
1507 	}
1508 out:
1509 	info->curr_idx = 0;
1510 
1511 	return ret;
1512 }
1513 
1514 static void rtw89_phy_config_rf_reg_noio(struct rtw89_dev *rtwdev,
1515 					 const struct rtw89_reg2_def *reg,
1516 					 enum rtw89_rf_path rf_path,
1517 					 void *extra_data)
1518 {
1519 	u32 addr = reg->addr;
1520 
1521 	if (addr == 0xfe || addr == 0xfd || addr == 0xfc || addr == 0xfb ||
1522 	    addr == 0xfa || addr == 0xf9)
1523 		return;
1524 
1525 	if (rtw89_chip_rf_v1(rtwdev) && addr < 0x100)
1526 		return;
1527 
1528 	rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
1529 				     (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
1530 }
1531 
1532 static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev,
1533 				    const struct rtw89_reg2_def *reg,
1534 				    enum rtw89_rf_path rf_path,
1535 				    void *extra_data)
1536 {
1537 	if (reg->addr == 0xfe) {
1538 		mdelay(50);
1539 	} else if (reg->addr == 0xfd) {
1540 		mdelay(5);
1541 	} else if (reg->addr == 0xfc) {
1542 		mdelay(1);
1543 	} else if (reg->addr == 0xfb) {
1544 		udelay(50);
1545 	} else if (reg->addr == 0xfa) {
1546 		udelay(5);
1547 	} else if (reg->addr == 0xf9) {
1548 		udelay(1);
1549 	} else {
1550 		rtw89_write_rf(rtwdev, rf_path, reg->addr, 0xfffff, reg->data);
1551 		rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
1552 					     (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
1553 	}
1554 }
1555 
1556 void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
1557 				const struct rtw89_reg2_def *reg,
1558 				enum rtw89_rf_path rf_path,
1559 				void *extra_data)
1560 {
1561 	rtw89_write_rf(rtwdev, rf_path, reg->addr, RFREG_MASK, reg->data);
1562 
1563 	if (reg->addr < 0x100)
1564 		return;
1565 
1566 	rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
1567 				     (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
1568 }
1569 EXPORT_SYMBOL(rtw89_phy_config_rf_reg_v1);
1570 
1571 static int rtw89_phy_sel_headline(struct rtw89_dev *rtwdev,
1572 				  const struct rtw89_phy_table *table,
1573 				  u32 *headline_size, u32 *headline_idx,
1574 				  u8 rfe, u8 cv)
1575 {
1576 	const struct rtw89_reg2_def *reg;
1577 	u32 headline;
1578 	u32 compare, target;
1579 	u8 rfe_para, cv_para;
1580 	u8 cv_max = 0;
1581 	bool case_matched = false;
1582 	u32 i;
1583 
1584 	for (i = 0; i < table->n_regs; i++) {
1585 		reg = &table->regs[i];
1586 		headline = get_phy_headline(reg->addr);
1587 		if (headline != PHY_HEADLINE_VALID)
1588 			break;
1589 	}
1590 	*headline_size = i;
1591 	if (*headline_size == 0)
1592 		return 0;
1593 
1594 	/* case 1: RFE match, CV match */
1595 	compare = get_phy_compare(rfe, cv);
1596 	for (i = 0; i < *headline_size; i++) {
1597 		reg = &table->regs[i];
1598 		target = get_phy_target(reg->addr);
1599 		if (target == compare) {
1600 			*headline_idx = i;
1601 			return 0;
1602 		}
1603 	}
1604 
1605 	/* case 2: RFE match, CV don't care */
1606 	compare = get_phy_compare(rfe, PHY_COND_DONT_CARE);
1607 	for (i = 0; i < *headline_size; i++) {
1608 		reg = &table->regs[i];
1609 		target = get_phy_target(reg->addr);
1610 		if (target == compare) {
1611 			*headline_idx = i;
1612 			return 0;
1613 		}
1614 	}
1615 
1616 	/* case 3: RFE match, CV max in table */
1617 	for (i = 0; i < *headline_size; i++) {
1618 		reg = &table->regs[i];
1619 		rfe_para = get_phy_cond_rfe(reg->addr);
1620 		cv_para = get_phy_cond_cv(reg->addr);
1621 		if (rfe_para == rfe) {
1622 			if (cv_para >= cv_max) {
1623 				cv_max = cv_para;
1624 				*headline_idx = i;
1625 				case_matched = true;
1626 			}
1627 		}
1628 	}
1629 
1630 	if (case_matched)
1631 		return 0;
1632 
1633 	/* case 4: RFE don't care, CV max in table */
1634 	for (i = 0; i < *headline_size; i++) {
1635 		reg = &table->regs[i];
1636 		rfe_para = get_phy_cond_rfe(reg->addr);
1637 		cv_para = get_phy_cond_cv(reg->addr);
1638 		if (rfe_para == PHY_COND_DONT_CARE) {
1639 			if (cv_para >= cv_max) {
1640 				cv_max = cv_para;
1641 				*headline_idx = i;
1642 				case_matched = true;
1643 			}
1644 		}
1645 	}
1646 
1647 	if (case_matched)
1648 		return 0;
1649 
1650 	return -EINVAL;
1651 }
1652 
1653 static void rtw89_phy_init_reg(struct rtw89_dev *rtwdev,
1654 			       const struct rtw89_phy_table *table,
1655 			       void (*config)(struct rtw89_dev *rtwdev,
1656 					      const struct rtw89_reg2_def *reg,
1657 					      enum rtw89_rf_path rf_path,
1658 					      void *data),
1659 			       void *extra_data)
1660 {
1661 	const struct rtw89_reg2_def *reg;
1662 	enum rtw89_rf_path rf_path = table->rf_path;
1663 	u8 rfe = rtwdev->efuse.rfe_type;
1664 	u8 cv = rtwdev->hal.cv;
1665 	u32 i;
1666 	u32 headline_size = 0, headline_idx = 0;
1667 	u32 target = 0, cfg_target;
1668 	u8 cond;
1669 	bool is_matched = true;
1670 	bool target_found = false;
1671 	int ret;
1672 
1673 	ret = rtw89_phy_sel_headline(rtwdev, table, &headline_size,
1674 				     &headline_idx, rfe, cv);
1675 	if (ret) {
1676 		rtw89_err(rtwdev, "invalid PHY package: %d/%d\n", rfe, cv);
1677 		return;
1678 	}
1679 
1680 	cfg_target = get_phy_target(table->regs[headline_idx].addr);
1681 	for (i = headline_size; i < table->n_regs; i++) {
1682 		reg = &table->regs[i];
1683 		cond = get_phy_cond(reg->addr);
1684 		switch (cond) {
1685 		case PHY_COND_BRANCH_IF:
1686 		case PHY_COND_BRANCH_ELIF:
1687 			target = get_phy_target(reg->addr);
1688 			break;
1689 		case PHY_COND_BRANCH_ELSE:
1690 			is_matched = false;
1691 			if (!target_found) {
1692 				rtw89_warn(rtwdev, "failed to load CR %x/%x\n",
1693 					   reg->addr, reg->data);
1694 				return;
1695 			}
1696 			break;
1697 		case PHY_COND_BRANCH_END:
1698 			is_matched = true;
1699 			target_found = false;
1700 			break;
1701 		case PHY_COND_CHECK:
1702 			if (target_found) {
1703 				is_matched = false;
1704 				break;
1705 			}
1706 
1707 			if (target == cfg_target) {
1708 				is_matched = true;
1709 				target_found = true;
1710 			} else {
1711 				is_matched = false;
1712 				target_found = false;
1713 			}
1714 			break;
1715 		default:
1716 			if (is_matched)
1717 				config(rtwdev, reg, rf_path, extra_data);
1718 			break;
1719 		}
1720 	}
1721 }
1722 
1723 void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev)
1724 {
1725 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1726 	const struct rtw89_chip_info *chip = rtwdev->chip;
1727 	const struct rtw89_phy_table *bb_table;
1728 	const struct rtw89_phy_table *bb_gain_table;
1729 
1730 	bb_table = elm_info->bb_tbl ? elm_info->bb_tbl : chip->bb_table;
1731 	rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL);
1732 	if (rtwdev->dbcc_en)
1733 		rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg,
1734 				   (void *)RTW89_PHY_1);
1735 
1736 	rtw89_chip_init_txpwr_unit(rtwdev);
1737 
1738 	bb_gain_table = elm_info->bb_gain ? elm_info->bb_gain : chip->bb_gain_table;
1739 	if (bb_gain_table)
1740 		rtw89_phy_init_reg(rtwdev, bb_gain_table,
1741 				   chip->phy_def->config_bb_gain, NULL);
1742 
1743 	rtw89_phy_bb_reset(rtwdev);
1744 }
1745 
1746 void rtw89_phy_init_bb_afe(struct rtw89_dev *rtwdev)
1747 {
1748 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1749 	const struct rtw89_fw_element_hdr *afe_elm = elm_info->afe;
1750 	const struct rtw89_phy_afe_info *info;
1751 	u32 action, cat, class;
1752 	u32 addr, mask, val;
1753 	u32 poll, rpt;
1754 	u32 n, i;
1755 
1756 	if (!afe_elm)
1757 		return;
1758 
1759 	n = le32_to_cpu(afe_elm->size) / sizeof(*info);
1760 
1761 	for (i = 0; i < n; i++) {
1762 		info = &afe_elm->u.afe.infos[i];
1763 
1764 		class = le32_to_cpu(info->class);
1765 		switch (class) {
1766 		case RTW89_FW_AFE_CLASS_P0:
1767 		case RTW89_FW_AFE_CLASS_P1:
1768 		case RTW89_FW_AFE_CLASS_CMN:
1769 			/* Currently support two paths */
1770 			break;
1771 		case RTW89_FW_AFE_CLASS_P2:
1772 		case RTW89_FW_AFE_CLASS_P3:
1773 		case RTW89_FW_AFE_CLASS_P4:
1774 		default:
1775 			rtw89_warn(rtwdev, "unexpected AFE class %u\n", class);
1776 			continue;
1777 		}
1778 
1779 		addr = le32_to_cpu(info->addr);
1780 		mask = le32_to_cpu(info->mask);
1781 		val = le32_to_cpu(info->val);
1782 		cat = le32_to_cpu(info->cat);
1783 		action = le32_to_cpu(info->action);
1784 
1785 		switch (action) {
1786 		case RTW89_FW_AFE_ACTION_WRITE:
1787 			switch (cat) {
1788 			case RTW89_FW_AFE_CAT_MAC:
1789 			case RTW89_FW_AFE_CAT_MAC1:
1790 				rtw89_write32_mask(rtwdev, addr, mask, val);
1791 				break;
1792 			case RTW89_FW_AFE_CAT_AFEDIG:
1793 			case RTW89_FW_AFE_CAT_AFEDIG1:
1794 				rtw89_write32_mask(rtwdev, addr, mask, val);
1795 				break;
1796 			case RTW89_FW_AFE_CAT_BB:
1797 				rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0);
1798 				break;
1799 			case RTW89_FW_AFE_CAT_BB1:
1800 				rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1);
1801 				break;
1802 			default:
1803 				rtw89_warn(rtwdev,
1804 					   "unexpected AFE writing action %u\n", action);
1805 				break;
1806 			}
1807 			break;
1808 		case RTW89_FW_AFE_ACTION_POLL:
1809 			for (poll = 0; poll <= 10; poll++) {
1810 				/*
1811 				 * For CAT_BB, AFE reads register with mcu_offset 0,
1812 				 * so both CAT_MAC and CAT_BB use the same method.
1813 				 */
1814 				rpt = rtw89_read32_mask(rtwdev, addr, mask);
1815 				if (rpt == val)
1816 					goto poll_done;
1817 
1818 				fsleep(1);
1819 			}
1820 			rtw89_warn(rtwdev, "failed to poll AFE cat=%u addr=0x%x mask=0x%x\n",
1821 				   cat, addr, mask);
1822 poll_done:
1823 			break;
1824 		case RTW89_FW_AFE_ACTION_DELAY:
1825 			fsleep(addr);
1826 			break;
1827 		}
1828 	}
1829 }
1830 
1831 static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev)
1832 {
1833 	rtw89_phy_write32(rtwdev, 0x8080, 0x4);
1834 	udelay(1);
1835 	return rtw89_phy_read32(rtwdev, 0x8080);
1836 }
1837 
1838 void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio)
1839 {
1840 	void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg,
1841 		       enum rtw89_rf_path rf_path, void *data);
1842 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1843 	const struct rtw89_chip_info *chip = rtwdev->chip;
1844 	const struct rtw89_phy_table *rf_table;
1845 	struct rtw89_fw_h2c_rf_reg_info *rf_reg_info;
1846 	u8 path;
1847 
1848 	rf_reg_info = kzalloc(sizeof(*rf_reg_info), GFP_KERNEL);
1849 	if (!rf_reg_info)
1850 		return;
1851 
1852 	for (path = RF_PATH_A; path < chip->rf_path_num; path++) {
1853 		rf_table = elm_info->rf_radio[path] ?
1854 			   elm_info->rf_radio[path] : chip->rf_table[path];
1855 		rf_reg_info->rf_path = rf_table->rf_path;
1856 		if (noio)
1857 			config = rtw89_phy_config_rf_reg_noio;
1858 		else
1859 			config = rf_table->config ? rf_table->config :
1860 				 rtw89_phy_config_rf_reg;
1861 		rtw89_phy_init_reg(rtwdev, rf_table, config, (void *)rf_reg_info);
1862 		if (rtw89_phy_config_rf_reg_fw(rtwdev, rf_reg_info))
1863 			rtw89_warn(rtwdev, "rf path %d reg h2c config failed\n",
1864 				   rf_reg_info->rf_path);
1865 	}
1866 	kfree(rf_reg_info);
1867 }
1868 
1869 static void rtw89_phy_preinit_rf_nctl_ax(struct rtw89_dev *rtwdev)
1870 {
1871 	const struct rtw89_chip_info *chip = rtwdev->chip;
1872 	u32 val;
1873 	int ret;
1874 
1875 	/* IQK/DPK clock & reset */
1876 	rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x3);
1877 	rtw89_phy_write32_set(rtwdev, R_GNT_BT_WGT_EN, 0x1);
1878 	rtw89_phy_write32_set(rtwdev, R_P0_PATH_RST, 0x8000000);
1879 	if (chip->chip_id != RTL8851B)
1880 		rtw89_phy_write32_set(rtwdev, R_P1_PATH_RST, 0x8000000);
1881 	if (chip->chip_id == RTL8852B || chip->chip_id == RTL8852BT)
1882 		rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x2);
1883 
1884 	/* check 0x8080 */
1885 	rtw89_phy_write32(rtwdev, R_NCTL_CFG, 0x8);
1886 
1887 	ret = read_poll_timeout(rtw89_phy_nctl_poll, val, val == 0x4, 10,
1888 				1000, false, rtwdev);
1889 	if (ret)
1890 		rtw89_err(rtwdev, "failed to poll nctl block\n");
1891 }
1892 
1893 static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
1894 {
1895 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1896 	const struct rtw89_chip_info *chip = rtwdev->chip;
1897 	const struct rtw89_phy_table *nctl_table;
1898 
1899 	rtw89_phy_preinit_rf_nctl(rtwdev);
1900 
1901 	nctl_table = elm_info->rf_nctl ? elm_info->rf_nctl : chip->nctl_table;
1902 	rtw89_phy_init_reg(rtwdev, nctl_table, rtw89_phy_config_bb_reg, NULL);
1903 
1904 	if (chip->nctl_post_table)
1905 		rtw89_rfk_parser(rtwdev, chip->nctl_post_table);
1906 }
1907 
1908 static u32 rtw89_phy0_phy1_offset_ax(struct rtw89_dev *rtwdev, u32 addr)
1909 {
1910 	u32 phy_page = addr >> 8;
1911 	u32 ofst = 0;
1912 
1913 	switch (phy_page) {
1914 	case 0x6:
1915 	case 0x7:
1916 	case 0x8:
1917 	case 0x9:
1918 	case 0xa:
1919 	case 0xb:
1920 	case 0xc:
1921 	case 0xd:
1922 	case 0x19:
1923 	case 0x1a:
1924 	case 0x1b:
1925 		ofst = 0x2000;
1926 		break;
1927 	default:
1928 		/* warning case */
1929 		ofst = 0;
1930 		break;
1931 	}
1932 
1933 	if (phy_page >= 0x40 && phy_page <= 0x4f)
1934 		ofst = 0x2000;
1935 
1936 	return ofst;
1937 }
1938 
1939 void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
1940 			   u32 data, enum rtw89_phy_idx phy_idx)
1941 {
1942 	if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
1943 		addr += rtw89_phy0_phy1_offset(rtwdev, addr);
1944 	rtw89_phy_write32_mask(rtwdev, addr, mask, data);
1945 }
1946 EXPORT_SYMBOL(rtw89_phy_write32_idx);
1947 
1948 void rtw89_phy_write32_idx_set(struct rtw89_dev *rtwdev, u32 addr, u32 bits,
1949 			       enum rtw89_phy_idx phy_idx)
1950 {
1951 	if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
1952 		addr += rtw89_phy0_phy1_offset(rtwdev, addr);
1953 	rtw89_phy_write32_set(rtwdev, addr, bits);
1954 }
1955 EXPORT_SYMBOL(rtw89_phy_write32_idx_set);
1956 
1957 void rtw89_phy_write32_idx_clr(struct rtw89_dev *rtwdev, u32 addr, u32 bits,
1958 			       enum rtw89_phy_idx phy_idx)
1959 {
1960 	if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
1961 		addr += rtw89_phy0_phy1_offset(rtwdev, addr);
1962 	rtw89_phy_write32_clr(rtwdev, addr, bits);
1963 }
1964 EXPORT_SYMBOL(rtw89_phy_write32_idx_clr);
1965 
1966 u32 rtw89_phy_read32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
1967 			 enum rtw89_phy_idx phy_idx)
1968 {
1969 	if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
1970 		addr += rtw89_phy0_phy1_offset(rtwdev, addr);
1971 	return rtw89_phy_read32_mask(rtwdev, addr, mask);
1972 }
1973 EXPORT_SYMBOL(rtw89_phy_read32_idx);
1974 
1975 void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
1976 			    u32 val)
1977 {
1978 	rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0);
1979 
1980 	if (!rtwdev->dbcc_en)
1981 		return;
1982 
1983 	rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1);
1984 }
1985 EXPORT_SYMBOL(rtw89_phy_set_phy_regs);
1986 
1987 void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
1988 			      const struct rtw89_phy_reg3_tbl *tbl)
1989 {
1990 	const struct rtw89_reg3_def *reg3;
1991 	int i;
1992 
1993 	for (i = 0; i < tbl->size; i++) {
1994 		reg3 = &tbl->reg3[i];
1995 		rtw89_phy_write32_mask(rtwdev, reg3->addr, reg3->mask, reg3->data);
1996 	}
1997 }
1998 EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl);
1999 
2000 static u8 rtw89_phy_ant_gain_domain_to_regd(struct rtw89_dev *rtwdev, u8 ant_gain_regd)
2001 {
2002 	switch (ant_gain_regd) {
2003 	case RTW89_ANT_GAIN_ETSI:
2004 		return RTW89_ETSI;
2005 	default:
2006 		rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2007 			    "unknown antenna gain domain: %d\n",
2008 			    ant_gain_regd);
2009 		return RTW89_REGD_NUM;
2010 	}
2011 }
2012 
2013 /* antenna gain in unit of 0.25 dbm */
2014 #define RTW89_ANT_GAIN_2GHZ_MIN -8
2015 #define RTW89_ANT_GAIN_2GHZ_MAX 14
2016 #define RTW89_ANT_GAIN_5GHZ_MIN -8
2017 #define RTW89_ANT_GAIN_5GHZ_MAX 20
2018 #define RTW89_ANT_GAIN_6GHZ_MIN -8
2019 #define RTW89_ANT_GAIN_6GHZ_MAX 20
2020 
2021 #define RTW89_ANT_GAIN_REF_2GHZ 14
2022 #define RTW89_ANT_GAIN_REF_5GHZ 20
2023 #define RTW89_ANT_GAIN_REF_6GHZ 20
2024 
2025 void rtw89_phy_ant_gain_init(struct rtw89_dev *rtwdev)
2026 {
2027 	struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
2028 	const struct rtw89_chip_info *chip = rtwdev->chip;
2029 	struct rtw89_acpi_rtag_result res = {};
2030 	u32 domain;
2031 	int ret;
2032 	u8 i, j;
2033 	u8 regd;
2034 	u8 val;
2035 
2036 	if (!chip->support_ant_gain)
2037 		return;
2038 
2039 	ret = rtw89_acpi_evaluate_rtag(rtwdev, &res);
2040 	if (ret) {
2041 		rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2042 			    "acpi: cannot eval rtag: %d\n", ret);
2043 		return;
2044 	}
2045 
2046 	if (res.revision != 0) {
2047 		rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2048 			    "unknown rtag revision: %d\n", res.revision);
2049 		return;
2050 	}
2051 
2052 	domain = get_unaligned_le32(&res.domain);
2053 
2054 	for (i = 0; i < RTW89_ANT_GAIN_DOMAIN_NUM; i++) {
2055 		if (!(domain & BIT(i)))
2056 			continue;
2057 
2058 		regd = rtw89_phy_ant_gain_domain_to_regd(rtwdev, i);
2059 		if (regd >= RTW89_REGD_NUM)
2060 			continue;
2061 		ant_gain->regd_enabled |= BIT(regd);
2062 	}
2063 
2064 	for (i = 0; i < RTW89_ANT_GAIN_CHAIN_NUM; i++) {
2065 		for (j = 0; j < RTW89_ANT_GAIN_SUBBAND_NR; j++) {
2066 			val = res.ant_gain_table[i][j];
2067 			switch (j) {
2068 			default:
2069 			case RTW89_ANT_GAIN_2GHZ_SUBBAND:
2070 				val = RTW89_ANT_GAIN_REF_2GHZ -
2071 				      clamp_t(s8, val,
2072 					      RTW89_ANT_GAIN_2GHZ_MIN,
2073 					      RTW89_ANT_GAIN_2GHZ_MAX);
2074 				break;
2075 			case RTW89_ANT_GAIN_5GHZ_SUBBAND_1:
2076 			case RTW89_ANT_GAIN_5GHZ_SUBBAND_2:
2077 			case RTW89_ANT_GAIN_5GHZ_SUBBAND_2E:
2078 			case RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4:
2079 				val = RTW89_ANT_GAIN_REF_5GHZ -
2080 				      clamp_t(s8, val,
2081 					      RTW89_ANT_GAIN_5GHZ_MIN,
2082 					      RTW89_ANT_GAIN_5GHZ_MAX);
2083 				break;
2084 			case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L:
2085 			case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H:
2086 			case RTW89_ANT_GAIN_6GHZ_SUBBAND_6:
2087 			case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L:
2088 			case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H:
2089 			case RTW89_ANT_GAIN_6GHZ_SUBBAND_8:
2090 				val = RTW89_ANT_GAIN_REF_6GHZ -
2091 				      clamp_t(s8, val,
2092 					      RTW89_ANT_GAIN_6GHZ_MIN,
2093 					      RTW89_ANT_GAIN_6GHZ_MAX);
2094 			}
2095 			ant_gain->offset[i][j] = val;
2096 		}
2097 	}
2098 }
2099 
2100 static
2101 enum rtw89_ant_gain_subband rtw89_phy_ant_gain_get_subband(struct rtw89_dev *rtwdev,
2102 							   u32 center_freq)
2103 {
2104 	switch (center_freq) {
2105 	default:
2106 		rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2107 			    "center freq: %u to antenna gain subband is unhandled\n",
2108 			    center_freq);
2109 		fallthrough;
2110 	case 2412 ... 2484:
2111 		return RTW89_ANT_GAIN_2GHZ_SUBBAND;
2112 	case 5180 ... 5240:
2113 		return RTW89_ANT_GAIN_5GHZ_SUBBAND_1;
2114 	case 5250 ... 5320:
2115 		return RTW89_ANT_GAIN_5GHZ_SUBBAND_2;
2116 	case 5500 ... 5720:
2117 		return RTW89_ANT_GAIN_5GHZ_SUBBAND_2E;
2118 	case 5745 ... 5885:
2119 		return RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4;
2120 	case 5955 ... 6155:
2121 		return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L;
2122 	case 6175 ... 6415:
2123 		return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H;
2124 	case 6435 ... 6515:
2125 		return RTW89_ANT_GAIN_6GHZ_SUBBAND_6;
2126 	case 6535 ... 6695:
2127 		return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L;
2128 	case 6715 ... 6855:
2129 		return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H;
2130 
2131 	/* freq 6875 (ch 185, 20MHz) spans RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H
2132 	 * and RTW89_ANT_GAIN_6GHZ_SUBBAND_8, so directly describe it with
2133 	 * struct rtw89_6ghz_span.
2134 	 */
2135 
2136 	case 6895 ... 7115:
2137 		return RTW89_ANT_GAIN_6GHZ_SUBBAND_8;
2138 	}
2139 }
2140 
2141 static s8 rtw89_phy_ant_gain_query(struct rtw89_dev *rtwdev,
2142 				   enum rtw89_rf_path path, u32 center_freq)
2143 {
2144 	struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
2145 	enum rtw89_ant_gain_subband subband_l, subband_h;
2146 	const struct rtw89_6ghz_span *span;
2147 
2148 	span = rtw89_get_6ghz_span(rtwdev, center_freq);
2149 
2150 	if (span && RTW89_ANT_GAIN_SPAN_VALID(span)) {
2151 		subband_l = span->ant_gain_subband_low;
2152 		subband_h = span->ant_gain_subband_high;
2153 	} else {
2154 		subband_l = rtw89_phy_ant_gain_get_subband(rtwdev, center_freq);
2155 		subband_h = subband_l;
2156 	}
2157 
2158 	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2159 		    "center_freq %u: antenna gain subband {%u, %u}\n",
2160 		    center_freq, subband_l, subband_h);
2161 
2162 	return min(ant_gain->offset[path][subband_l],
2163 		   ant_gain->offset[path][subband_h]);
2164 }
2165 
2166 static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u32 center_freq)
2167 {
2168 	s8 offset_patha, offset_pathb;
2169 
2170 	offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, center_freq);
2171 	offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, center_freq);
2172 
2173 	if (RTW89_CHK_FW_FEATURE(NO_POWER_DIFFERENCE, &rtwdev->fw))
2174 		return min(offset_patha, offset_pathb);
2175 
2176 	return max(offset_patha, offset_pathb);
2177 }
2178 
2179 static bool rtw89_can_apply_ant_gain(struct rtw89_dev *rtwdev, u8 band)
2180 {
2181 	const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
2182 	struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
2183 	const struct rtw89_chip_info *chip = rtwdev->chip;
2184 	u8 regd = rtw89_regd_get(rtwdev, band);
2185 
2186 	if (!chip->support_ant_gain)
2187 		return false;
2188 
2189 	if (ant_gain->block_country || !(ant_gain->regd_enabled & BIT(regd)))
2190 		return false;
2191 
2192 	if (!rfe_parms->has_da)
2193 		return false;
2194 
2195 	return true;
2196 }
2197 
2198 s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
2199 				  const struct rtw89_chan *chan)
2200 {
2201 	s8 offset_patha, offset_pathb;
2202 
2203 	if (!rtw89_can_apply_ant_gain(rtwdev, chan->band_type))
2204 		return 0;
2205 
2206 	if (RTW89_CHK_FW_FEATURE(NO_POWER_DIFFERENCE, &rtwdev->fw))
2207 		return 0;
2208 
2209 	offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq);
2210 	offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq);
2211 
2212 	return rtw89_phy_txpwr_rf_to_bb(rtwdev, offset_patha - offset_pathb);
2213 }
2214 EXPORT_SYMBOL(rtw89_phy_ant_gain_pwr_offset);
2215 
2216 int rtw89_print_ant_gain(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
2217 			 const struct rtw89_chan *chan)
2218 {
2219 	char *p = buf, *end = buf + bufsz;
2220 	s8 offset_patha, offset_pathb;
2221 
2222 	if (!rtw89_can_apply_ant_gain(rtwdev, chan->band_type)) {
2223 		p += scnprintf(p, end - p, "no DAG is applied\n");
2224 		goto out;
2225 	}
2226 
2227 	offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq);
2228 	offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq);
2229 
2230 	p += scnprintf(p, end - p, "ChainA offset: %d dBm\n", offset_patha);
2231 	p += scnprintf(p, end - p, "ChainB offset: %d dBm\n", offset_pathb);
2232 
2233 out:
2234 	return p - buf;
2235 }
2236 
2237 static const u8 rtw89_rs_idx_num_ax[] = {
2238 	[RTW89_RS_CCK] = RTW89_RATE_CCK_NUM,
2239 	[RTW89_RS_OFDM] = RTW89_RATE_OFDM_NUM,
2240 	[RTW89_RS_MCS] = RTW89_RATE_MCS_NUM_AX,
2241 	[RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_NUM,
2242 	[RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_NUM_AX,
2243 };
2244 
2245 static const u8 rtw89_rs_nss_num_ax[] = {
2246 	[RTW89_RS_CCK] = 1,
2247 	[RTW89_RS_OFDM] = 1,
2248 	[RTW89_RS_MCS] = RTW89_NSS_NUM,
2249 	[RTW89_RS_HEDCM] = RTW89_NSS_HEDCM_NUM,
2250 	[RTW89_RS_OFFSET] = 1,
2251 };
2252 
2253 s8 *rtw89_phy_raw_byr_seek(struct rtw89_dev *rtwdev,
2254 			   struct rtw89_txpwr_byrate *head,
2255 			   const struct rtw89_rate_desc *desc)
2256 {
2257 	switch (desc->rs) {
2258 	case RTW89_RS_CCK:
2259 		return &head->cck[desc->idx];
2260 	case RTW89_RS_OFDM:
2261 		return &head->ofdm[desc->idx];
2262 	case RTW89_RS_MCS:
2263 		return &head->mcs[desc->ofdma][desc->nss][desc->idx];
2264 	case RTW89_RS_HEDCM:
2265 		return &head->hedcm[desc->ofdma][desc->nss][desc->idx];
2266 	case RTW89_RS_OFFSET:
2267 		return &head->offset[desc->idx];
2268 	default:
2269 		rtw89_warn(rtwdev, "unrecognized byr rs: %d\n", desc->rs);
2270 		return &head->trap;
2271 	}
2272 }
2273 
2274 void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
2275 				 const struct rtw89_txpwr_table *tbl)
2276 {
2277 	const struct rtw89_txpwr_byrate_cfg *cfg = tbl->data;
2278 	const struct rtw89_txpwr_byrate_cfg *end = cfg + tbl->size;
2279 	struct rtw89_txpwr_byrate *byr_head;
2280 	struct rtw89_rate_desc desc = {};
2281 	s8 *byr;
2282 	u32 data;
2283 	u8 i;
2284 
2285 	for (; cfg < end; cfg++) {
2286 		byr_head = &rtwdev->byr[cfg->band][0];
2287 		desc.rs = cfg->rs;
2288 		desc.nss = cfg->nss;
2289 		data = cfg->data;
2290 
2291 		for (i = 0; i < cfg->len; i++, data >>= 8) {
2292 			desc.idx = cfg->shf + i;
2293 			byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc);
2294 			*byr = data & 0xff;
2295 		}
2296 	}
2297 }
2298 EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate);
2299 
2300 static s8 rtw89_phy_txpwr_dbm_without_tolerance(s8 dbm)
2301 {
2302 	const u8 tssi_deviation_point = 0;
2303 	const u8 tssi_max_deviation = 2;
2304 
2305 	if (dbm <= tssi_deviation_point)
2306 		dbm -= tssi_max_deviation;
2307 
2308 	return dbm;
2309 }
2310 
2311 static s8 rtw89_phy_get_tpe_constraint(struct rtw89_dev *rtwdev, u8 band)
2312 {
2313 	struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
2314 	const struct rtw89_reg_6ghz_tpe *tpe = &regulatory->reg_6ghz_tpe;
2315 	s8 cstr = S8_MAX;
2316 
2317 	if (band == RTW89_BAND_6G && tpe->valid)
2318 		cstr = rtw89_phy_txpwr_dbm_without_tolerance(tpe->constraint);
2319 
2320 	return rtw89_phy_txpwr_dbm_to_mac(rtwdev, cstr);
2321 }
2322 
2323 s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw,
2324 			       const struct rtw89_rate_desc *rate_desc)
2325 {
2326 	struct rtw89_txpwr_byrate *byr_head;
2327 	s8 *byr;
2328 
2329 	if (rate_desc->rs == RTW89_RS_CCK)
2330 		band = RTW89_BAND_2G;
2331 
2332 	byr_head = &rtwdev->byr[band][bw];
2333 	byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, rate_desc);
2334 
2335 	return rtw89_phy_txpwr_rf_to_mac(rtwdev, *byr);
2336 }
2337 
2338 static u8 rtw89_channel_6g_to_idx(struct rtw89_dev *rtwdev, u8 channel_6g)
2339 {
2340 	switch (channel_6g) {
2341 	case 1 ... 29:
2342 		return (channel_6g - 1) / 2;
2343 	case 33 ... 61:
2344 		return (channel_6g - 3) / 2;
2345 	case 65 ... 93:
2346 		return (channel_6g - 5) / 2;
2347 	case 97 ... 125:
2348 		return (channel_6g - 7) / 2;
2349 	case 129 ... 157:
2350 		return (channel_6g - 9) / 2;
2351 	case 161 ... 189:
2352 		return (channel_6g - 11) / 2;
2353 	case 193 ... 221:
2354 		return (channel_6g - 13) / 2;
2355 	case 225 ... 253:
2356 		return (channel_6g - 15) / 2;
2357 	default:
2358 		rtw89_warn(rtwdev, "unknown 6g channel: %d\n", channel_6g);
2359 		return 0;
2360 	}
2361 }
2362 
2363 static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 band, u8 channel)
2364 {
2365 	if (band == RTW89_BAND_6G)
2366 		return rtw89_channel_6g_to_idx(rtwdev, channel);
2367 
2368 	switch (channel) {
2369 	case 1 ... 14:
2370 		return channel - 1;
2371 	case 36 ... 64:
2372 		return (channel - 36) / 2;
2373 	case 100 ... 144:
2374 		return ((channel - 100) / 2) + 15;
2375 	case 149 ... 177:
2376 		return ((channel - 149) / 2) + 38;
2377 	default:
2378 		rtw89_warn(rtwdev, "unknown channel: %d\n", channel);
2379 		return 0;
2380 	}
2381 }
2382 
2383 static bool rtw89_phy_validate_txpwr_limit_bw(struct rtw89_dev *rtwdev,
2384 					      u8 band, u8 bw)
2385 {
2386 	switch (band) {
2387 	case RTW89_BAND_2G:
2388 		return bw < RTW89_2G_BW_NUM;
2389 	case RTW89_BAND_5G:
2390 		return bw < RTW89_5G_BW_NUM;
2391 	case RTW89_BAND_6G:
2392 		return bw < RTW89_6G_BW_NUM;
2393 	default:
2394 		return false;
2395 	}
2396 }
2397 
2398 s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
2399 			      u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch)
2400 {
2401 	const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
2402 	const struct rtw89_txpwr_rule_2ghz *rule_da_2ghz = &rfe_parms->rule_da_2ghz;
2403 	const struct rtw89_txpwr_rule_5ghz *rule_da_5ghz = &rfe_parms->rule_da_5ghz;
2404 	const struct rtw89_txpwr_rule_6ghz *rule_da_6ghz = &rfe_parms->rule_da_6ghz;
2405 	const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz;
2406 	const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz;
2407 	const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz;
2408 	struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
2409 	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
2410 	bool has_ant_gain = rtw89_can_apply_ant_gain(rtwdev, band);
2411 	u32 freq = ieee80211_channel_to_frequency(ch, nl_band);
2412 	u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
2413 	s8 lmt = 0, da_lmt = S8_MAX, sar, offset = 0;
2414 	u8 regd = rtw89_regd_get(rtwdev, band);
2415 	u8 reg6 = regulatory->reg_6ghz_power;
2416 	struct rtw89_sar_parm sar_parm = {
2417 		.center_freq = freq,
2418 		.ntx = ntx,
2419 	};
2420 	s8 cstr;
2421 
2422 	if (!rtw89_phy_validate_txpwr_limit_bw(rtwdev, band, bw)) {
2423 		rtw89_warn(rtwdev, "invalid band %u bandwidth %u\n", band, bw);
2424 		return 0;
2425 	}
2426 
2427 	switch (band) {
2428 	case RTW89_BAND_2G:
2429 		if (has_ant_gain)
2430 			da_lmt = (*rule_da_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
2431 
2432 		lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
2433 		if (lmt)
2434 			break;
2435 
2436 		lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx];
2437 		break;
2438 	case RTW89_BAND_5G:
2439 		if (has_ant_gain)
2440 			da_lmt = (*rule_da_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
2441 
2442 		lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
2443 		if (lmt)
2444 			break;
2445 
2446 		lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx];
2447 		break;
2448 	case RTW89_BAND_6G:
2449 		if (has_ant_gain)
2450 			da_lmt = (*rule_da_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx];
2451 
2452 		lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx];
2453 		if (lmt)
2454 			break;
2455 
2456 		lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][RTW89_WW]
2457 				       [RTW89_REG_6GHZ_POWER_DFLT]
2458 				       [ch_idx];
2459 		break;
2460 	default:
2461 		rtw89_warn(rtwdev, "unknown band type: %d\n", band);
2462 		return 0;
2463 	}
2464 
2465 	da_lmt = da_lmt ?: S8_MAX;
2466 	if (da_lmt != S8_MAX)
2467 		offset = rtw89_phy_ant_gain_offset(rtwdev, freq);
2468 
2469 	lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, min(lmt + offset, da_lmt));
2470 	sar = rtw89_query_sar(rtwdev, &sar_parm);
2471 	cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
2472 
2473 	return min3(lmt, sar, cstr);
2474 }
2475 EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit);
2476 
2477 #define __fill_txpwr_limit_nonbf_bf(ptr, band, bw, ntx, rs, ch)		\
2478 	do {								\
2479 		u8 __i;							\
2480 		for (__i = 0; __i < RTW89_BF_NUM; __i++)		\
2481 			ptr[__i] = rtw89_phy_read_txpwr_limit(rtwdev,	\
2482 							      band,	\
2483 							      bw, ntx,	\
2484 							      rs, __i,	\
2485 							      (ch));	\
2486 	} while (0)
2487 
2488 static void rtw89_phy_fill_txpwr_limit_20m_ax(struct rtw89_dev *rtwdev,
2489 					      struct rtw89_txpwr_limit_ax *lmt,
2490 					      u8 band, u8 ntx, u8 ch)
2491 {
2492 	__fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
2493 				    ntx, RTW89_RS_CCK, ch);
2494 	__fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
2495 				    ntx, RTW89_RS_CCK, ch);
2496 	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
2497 				    ntx, RTW89_RS_OFDM, ch);
2498 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
2499 				    RTW89_CHANNEL_WIDTH_20,
2500 				    ntx, RTW89_RS_MCS, ch);
2501 }
2502 
2503 static void rtw89_phy_fill_txpwr_limit_40m_ax(struct rtw89_dev *rtwdev,
2504 					      struct rtw89_txpwr_limit_ax *lmt,
2505 					      u8 band, u8 ntx, u8 ch, u8 pri_ch)
2506 {
2507 	__fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
2508 				    ntx, RTW89_RS_CCK, ch - 2);
2509 	__fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
2510 				    ntx, RTW89_RS_CCK, ch);
2511 	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
2512 				    ntx, RTW89_RS_OFDM, pri_ch);
2513 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
2514 				    RTW89_CHANNEL_WIDTH_20,
2515 				    ntx, RTW89_RS_MCS, ch - 2);
2516 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
2517 				    RTW89_CHANNEL_WIDTH_20,
2518 				    ntx, RTW89_RS_MCS, ch + 2);
2519 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
2520 				    RTW89_CHANNEL_WIDTH_40,
2521 				    ntx, RTW89_RS_MCS, ch);
2522 }
2523 
2524 static void rtw89_phy_fill_txpwr_limit_80m_ax(struct rtw89_dev *rtwdev,
2525 					      struct rtw89_txpwr_limit_ax *lmt,
2526 					      u8 band, u8 ntx, u8 ch, u8 pri_ch)
2527 {
2528 	s8 val_0p5_n[RTW89_BF_NUM];
2529 	s8 val_0p5_p[RTW89_BF_NUM];
2530 	u8 i;
2531 
2532 	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
2533 				    ntx, RTW89_RS_OFDM, pri_ch);
2534 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
2535 				    RTW89_CHANNEL_WIDTH_20,
2536 				    ntx, RTW89_RS_MCS, ch - 6);
2537 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
2538 				    RTW89_CHANNEL_WIDTH_20,
2539 				    ntx, RTW89_RS_MCS, ch - 2);
2540 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band,
2541 				    RTW89_CHANNEL_WIDTH_20,
2542 				    ntx, RTW89_RS_MCS, ch + 2);
2543 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band,
2544 				    RTW89_CHANNEL_WIDTH_20,
2545 				    ntx, RTW89_RS_MCS, ch + 6);
2546 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
2547 				    RTW89_CHANNEL_WIDTH_40,
2548 				    ntx, RTW89_RS_MCS, ch - 4);
2549 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band,
2550 				    RTW89_CHANNEL_WIDTH_40,
2551 				    ntx, RTW89_RS_MCS, ch + 4);
2552 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band,
2553 				    RTW89_CHANNEL_WIDTH_80,
2554 				    ntx, RTW89_RS_MCS, ch);
2555 
2556 	__fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40,
2557 				    ntx, RTW89_RS_MCS, ch - 4);
2558 	__fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40,
2559 				    ntx, RTW89_RS_MCS, ch + 4);
2560 
2561 	for (i = 0; i < RTW89_BF_NUM; i++)
2562 		lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
2563 }
2564 
2565 static void rtw89_phy_fill_txpwr_limit_160m_ax(struct rtw89_dev *rtwdev,
2566 					       struct rtw89_txpwr_limit_ax *lmt,
2567 					       u8 band, u8 ntx, u8 ch, u8 pri_ch)
2568 {
2569 	s8 val_0p5_n[RTW89_BF_NUM];
2570 	s8 val_0p5_p[RTW89_BF_NUM];
2571 	s8 val_2p5_n[RTW89_BF_NUM];
2572 	s8 val_2p5_p[RTW89_BF_NUM];
2573 	u8 i;
2574 
2575 	/* fill ofdm section */
2576 	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
2577 				    ntx, RTW89_RS_OFDM, pri_ch);
2578 
2579 	/* fill mcs 20m section */
2580 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
2581 				    RTW89_CHANNEL_WIDTH_20,
2582 				    ntx, RTW89_RS_MCS, ch - 14);
2583 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
2584 				    RTW89_CHANNEL_WIDTH_20,
2585 				    ntx, RTW89_RS_MCS, ch - 10);
2586 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band,
2587 				    RTW89_CHANNEL_WIDTH_20,
2588 				    ntx, RTW89_RS_MCS, ch - 6);
2589 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band,
2590 				    RTW89_CHANNEL_WIDTH_20,
2591 				    ntx, RTW89_RS_MCS, ch - 2);
2592 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[4], band,
2593 				    RTW89_CHANNEL_WIDTH_20,
2594 				    ntx, RTW89_RS_MCS, ch + 2);
2595 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[5], band,
2596 				    RTW89_CHANNEL_WIDTH_20,
2597 				    ntx, RTW89_RS_MCS, ch + 6);
2598 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[6], band,
2599 				    RTW89_CHANNEL_WIDTH_20,
2600 				    ntx, RTW89_RS_MCS, ch + 10);
2601 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[7], band,
2602 				    RTW89_CHANNEL_WIDTH_20,
2603 				    ntx, RTW89_RS_MCS, ch + 14);
2604 
2605 	/* fill mcs 40m section */
2606 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
2607 				    RTW89_CHANNEL_WIDTH_40,
2608 				    ntx, RTW89_RS_MCS, ch - 12);
2609 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band,
2610 				    RTW89_CHANNEL_WIDTH_40,
2611 				    ntx, RTW89_RS_MCS, ch - 4);
2612 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[2], band,
2613 				    RTW89_CHANNEL_WIDTH_40,
2614 				    ntx, RTW89_RS_MCS, ch + 4);
2615 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[3], band,
2616 				    RTW89_CHANNEL_WIDTH_40,
2617 				    ntx, RTW89_RS_MCS, ch + 12);
2618 
2619 	/* fill mcs 80m section */
2620 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band,
2621 				    RTW89_CHANNEL_WIDTH_80,
2622 				    ntx, RTW89_RS_MCS, ch - 8);
2623 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[1], band,
2624 				    RTW89_CHANNEL_WIDTH_80,
2625 				    ntx, RTW89_RS_MCS, ch + 8);
2626 
2627 	/* fill mcs 160m section */
2628 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_160m, band,
2629 				    RTW89_CHANNEL_WIDTH_160,
2630 				    ntx, RTW89_RS_MCS, ch);
2631 
2632 	/* fill mcs 40m 0p5 section */
2633 	__fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40,
2634 				    ntx, RTW89_RS_MCS, ch - 4);
2635 	__fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40,
2636 				    ntx, RTW89_RS_MCS, ch + 4);
2637 
2638 	for (i = 0; i < RTW89_BF_NUM; i++)
2639 		lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
2640 
2641 	/* fill mcs 40m 2p5 section */
2642 	__fill_txpwr_limit_nonbf_bf(val_2p5_n, band, RTW89_CHANNEL_WIDTH_40,
2643 				    ntx, RTW89_RS_MCS, ch - 8);
2644 	__fill_txpwr_limit_nonbf_bf(val_2p5_p, band, RTW89_CHANNEL_WIDTH_40,
2645 				    ntx, RTW89_RS_MCS, ch + 8);
2646 
2647 	for (i = 0; i < RTW89_BF_NUM; i++)
2648 		lmt->mcs_40m_2p5[i] = min_t(s8, val_2p5_n[i], val_2p5_p[i]);
2649 }
2650 
2651 static
2652 void rtw89_phy_fill_txpwr_limit_ax(struct rtw89_dev *rtwdev,
2653 				   const struct rtw89_chan *chan,
2654 				   struct rtw89_txpwr_limit_ax *lmt,
2655 				   u8 ntx)
2656 {
2657 	u8 band = chan->band_type;
2658 	u8 pri_ch = chan->primary_channel;
2659 	u8 ch = chan->channel;
2660 	u8 bw = chan->band_width;
2661 
2662 	memset(lmt, 0, sizeof(*lmt));
2663 
2664 	switch (bw) {
2665 	case RTW89_CHANNEL_WIDTH_20:
2666 		rtw89_phy_fill_txpwr_limit_20m_ax(rtwdev, lmt, band, ntx, ch);
2667 		break;
2668 	case RTW89_CHANNEL_WIDTH_40:
2669 		rtw89_phy_fill_txpwr_limit_40m_ax(rtwdev, lmt, band, ntx, ch,
2670 						  pri_ch);
2671 		break;
2672 	case RTW89_CHANNEL_WIDTH_80:
2673 		rtw89_phy_fill_txpwr_limit_80m_ax(rtwdev, lmt, band, ntx, ch,
2674 						  pri_ch);
2675 		break;
2676 	case RTW89_CHANNEL_WIDTH_160:
2677 		rtw89_phy_fill_txpwr_limit_160m_ax(rtwdev, lmt, band, ntx, ch,
2678 						   pri_ch);
2679 		break;
2680 	}
2681 }
2682 
2683 s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
2684 				 u8 ru, u8 ntx, u8 ch)
2685 {
2686 	const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
2687 	const struct rtw89_txpwr_rule_2ghz *rule_da_2ghz = &rfe_parms->rule_da_2ghz;
2688 	const struct rtw89_txpwr_rule_5ghz *rule_da_5ghz = &rfe_parms->rule_da_5ghz;
2689 	const struct rtw89_txpwr_rule_6ghz *rule_da_6ghz = &rfe_parms->rule_da_6ghz;
2690 	const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz;
2691 	const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz;
2692 	const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz;
2693 	struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
2694 	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
2695 	bool has_ant_gain = rtw89_can_apply_ant_gain(rtwdev, band);
2696 	u32 freq = ieee80211_channel_to_frequency(ch, nl_band);
2697 	u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
2698 	s8 lmt_ru = 0, da_lmt_ru = S8_MAX, sar, offset = 0;
2699 	u8 regd = rtw89_regd_get(rtwdev, band);
2700 	u8 reg6 = regulatory->reg_6ghz_power;
2701 	struct rtw89_sar_parm sar_parm = {
2702 		.center_freq = freq,
2703 		.ntx = ntx,
2704 	};
2705 	s8 cstr;
2706 
2707 	switch (band) {
2708 	case RTW89_BAND_2G:
2709 		if (has_ant_gain)
2710 			da_lmt_ru = (*rule_da_2ghz->lmt_ru)[ru][ntx][regd][ch_idx];
2711 
2712 		lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][regd][ch_idx];
2713 		if (lmt_ru)
2714 			break;
2715 
2716 		lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx];
2717 		break;
2718 	case RTW89_BAND_5G:
2719 		if (has_ant_gain)
2720 			da_lmt_ru = (*rule_da_5ghz->lmt_ru)[ru][ntx][regd][ch_idx];
2721 
2722 		lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][regd][ch_idx];
2723 		if (lmt_ru)
2724 			break;
2725 
2726 		lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx];
2727 		break;
2728 	case RTW89_BAND_6G:
2729 		if (has_ant_gain)
2730 			da_lmt_ru = (*rule_da_6ghz->lmt_ru)[ru][ntx][regd][reg6][ch_idx];
2731 
2732 		lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][regd][reg6][ch_idx];
2733 		if (lmt_ru)
2734 			break;
2735 
2736 		lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][RTW89_WW]
2737 					     [RTW89_REG_6GHZ_POWER_DFLT]
2738 					     [ch_idx];
2739 		break;
2740 	default:
2741 		rtw89_warn(rtwdev, "unknown band type: %d\n", band);
2742 		return 0;
2743 	}
2744 
2745 	da_lmt_ru = da_lmt_ru ?: S8_MAX;
2746 	if (da_lmt_ru != S8_MAX)
2747 		offset = rtw89_phy_ant_gain_offset(rtwdev, freq);
2748 
2749 	lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, min(lmt_ru + offset, da_lmt_ru));
2750 	sar = rtw89_query_sar(rtwdev, &sar_parm);
2751 	cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
2752 
2753 	return min3(lmt_ru, sar, cstr);
2754 }
2755 
2756 static void
2757 rtw89_phy_fill_txpwr_limit_ru_20m_ax(struct rtw89_dev *rtwdev,
2758 				     struct rtw89_txpwr_limit_ru_ax *lmt_ru,
2759 				     u8 band, u8 ntx, u8 ch)
2760 {
2761 	lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2762 							RTW89_RU26,
2763 							ntx, ch);
2764 	lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2765 							RTW89_RU52,
2766 							ntx, ch);
2767 	lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2768 							 RTW89_RU106,
2769 							 ntx, ch);
2770 }
2771 
2772 static void
2773 rtw89_phy_fill_txpwr_limit_ru_40m_ax(struct rtw89_dev *rtwdev,
2774 				     struct rtw89_txpwr_limit_ru_ax *lmt_ru,
2775 				     u8 band, u8 ntx, u8 ch)
2776 {
2777 	lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2778 							RTW89_RU26,
2779 							ntx, ch - 2);
2780 	lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2781 							RTW89_RU26,
2782 							ntx, ch + 2);
2783 	lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2784 							RTW89_RU52,
2785 							ntx, ch - 2);
2786 	lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2787 							RTW89_RU52,
2788 							ntx, ch + 2);
2789 	lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2790 							 RTW89_RU106,
2791 							 ntx, ch - 2);
2792 	lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2793 							 RTW89_RU106,
2794 							 ntx, ch + 2);
2795 }
2796 
2797 static void
2798 rtw89_phy_fill_txpwr_limit_ru_80m_ax(struct rtw89_dev *rtwdev,
2799 				     struct rtw89_txpwr_limit_ru_ax *lmt_ru,
2800 				     u8 band, u8 ntx, u8 ch)
2801 {
2802 	lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2803 							RTW89_RU26,
2804 							ntx, ch - 6);
2805 	lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2806 							RTW89_RU26,
2807 							ntx, ch - 2);
2808 	lmt_ru->ru26[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2809 							RTW89_RU26,
2810 							ntx, ch + 2);
2811 	lmt_ru->ru26[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2812 							RTW89_RU26,
2813 							ntx, ch + 6);
2814 	lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2815 							RTW89_RU52,
2816 							ntx, ch - 6);
2817 	lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2818 							RTW89_RU52,
2819 							ntx, ch - 2);
2820 	lmt_ru->ru52[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2821 							RTW89_RU52,
2822 							ntx, ch + 2);
2823 	lmt_ru->ru52[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2824 							RTW89_RU52,
2825 							ntx, ch + 6);
2826 	lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2827 							 RTW89_RU106,
2828 							 ntx, ch - 6);
2829 	lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2830 							 RTW89_RU106,
2831 							 ntx, ch - 2);
2832 	lmt_ru->ru106[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2833 							 RTW89_RU106,
2834 							 ntx, ch + 2);
2835 	lmt_ru->ru106[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2836 							 RTW89_RU106,
2837 							 ntx, ch + 6);
2838 }
2839 
2840 static void
2841 rtw89_phy_fill_txpwr_limit_ru_160m_ax(struct rtw89_dev *rtwdev,
2842 				      struct rtw89_txpwr_limit_ru_ax *lmt_ru,
2843 				      u8 band, u8 ntx, u8 ch)
2844 {
2845 	static const int ofst[] = { -14, -10, -6, -2, 2, 6, 10, 14 };
2846 	int i;
2847 
2848 	static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM_AX);
2849 	for (i = 0; i < RTW89_RU_SEC_NUM_AX; i++) {
2850 		lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2851 								RTW89_RU26,
2852 								ntx,
2853 								ch + ofst[i]);
2854 		lmt_ru->ru52[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2855 								RTW89_RU52,
2856 								ntx,
2857 								ch + ofst[i]);
2858 		lmt_ru->ru106[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2859 								 RTW89_RU106,
2860 								 ntx,
2861 								 ch + ofst[i]);
2862 	}
2863 }
2864 
2865 static
2866 void rtw89_phy_fill_txpwr_limit_ru_ax(struct rtw89_dev *rtwdev,
2867 				      const struct rtw89_chan *chan,
2868 				      struct rtw89_txpwr_limit_ru_ax *lmt_ru,
2869 				      u8 ntx)
2870 {
2871 	u8 band = chan->band_type;
2872 	u8 ch = chan->channel;
2873 	u8 bw = chan->band_width;
2874 
2875 	memset(lmt_ru, 0, sizeof(*lmt_ru));
2876 
2877 	switch (bw) {
2878 	case RTW89_CHANNEL_WIDTH_20:
2879 		rtw89_phy_fill_txpwr_limit_ru_20m_ax(rtwdev, lmt_ru, band, ntx,
2880 						     ch);
2881 		break;
2882 	case RTW89_CHANNEL_WIDTH_40:
2883 		rtw89_phy_fill_txpwr_limit_ru_40m_ax(rtwdev, lmt_ru, band, ntx,
2884 						     ch);
2885 		break;
2886 	case RTW89_CHANNEL_WIDTH_80:
2887 		rtw89_phy_fill_txpwr_limit_ru_80m_ax(rtwdev, lmt_ru, band, ntx,
2888 						     ch);
2889 		break;
2890 	case RTW89_CHANNEL_WIDTH_160:
2891 		rtw89_phy_fill_txpwr_limit_ru_160m_ax(rtwdev, lmt_ru, band, ntx,
2892 						      ch);
2893 		break;
2894 	}
2895 }
2896 
2897 static void rtw89_phy_set_txpwr_byrate_ax(struct rtw89_dev *rtwdev,
2898 					  const struct rtw89_chan *chan,
2899 					  enum rtw89_phy_idx phy_idx)
2900 {
2901 	u8 max_nss_num = rtwdev->chip->rf_path_num;
2902 	static const u8 rs[] = {
2903 		RTW89_RS_CCK,
2904 		RTW89_RS_OFDM,
2905 		RTW89_RS_MCS,
2906 		RTW89_RS_HEDCM,
2907 	};
2908 	struct rtw89_rate_desc cur = {};
2909 	u8 band = chan->band_type;
2910 	u8 ch = chan->channel;
2911 	u32 addr, val;
2912 	s8 v[4] = {};
2913 	u8 i;
2914 
2915 	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2916 		    "[TXPWR] set txpwr byrate with ch=%d\n", ch);
2917 
2918 	BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_CCK] % 4);
2919 	BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_OFDM] % 4);
2920 	BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_MCS] % 4);
2921 	BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_HEDCM] % 4);
2922 
2923 	addr = R_AX_PWR_BY_RATE;
2924 	for (cur.nss = 0; cur.nss < max_nss_num; cur.nss++) {
2925 		for (i = 0; i < ARRAY_SIZE(rs); i++) {
2926 			if (cur.nss >= rtw89_rs_nss_num_ax[rs[i]])
2927 				continue;
2928 
2929 			cur.rs = rs[i];
2930 			for (cur.idx = 0; cur.idx < rtw89_rs_idx_num_ax[rs[i]];
2931 			     cur.idx++) {
2932 				v[cur.idx % 4] =
2933 					rtw89_phy_read_txpwr_byrate(rtwdev,
2934 								    band, 0,
2935 								    &cur);
2936 
2937 				if ((cur.idx + 1) % 4)
2938 					continue;
2939 
2940 				val = FIELD_PREP(GENMASK(7, 0), v[0]) |
2941 				      FIELD_PREP(GENMASK(15, 8), v[1]) |
2942 				      FIELD_PREP(GENMASK(23, 16), v[2]) |
2943 				      FIELD_PREP(GENMASK(31, 24), v[3]);
2944 
2945 				rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr,
2946 							val);
2947 				addr += 4;
2948 			}
2949 		}
2950 	}
2951 }
2952 
2953 static
2954 void rtw89_phy_set_txpwr_offset_ax(struct rtw89_dev *rtwdev,
2955 				   const struct rtw89_chan *chan,
2956 				   enum rtw89_phy_idx phy_idx)
2957 {
2958 	struct rtw89_rate_desc desc = {
2959 		.nss = RTW89_NSS_1,
2960 		.rs = RTW89_RS_OFFSET,
2961 	};
2962 	u8 band = chan->band_type;
2963 	s8 v[RTW89_RATE_OFFSET_NUM_AX] = {};
2964 	u32 val;
2965 
2966 	rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n");
2967 
2968 	for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_NUM_AX; desc.idx++)
2969 		v[desc.idx] = rtw89_phy_read_txpwr_byrate(rtwdev, band, 0, &desc);
2970 
2971 	BUILD_BUG_ON(RTW89_RATE_OFFSET_NUM_AX != 5);
2972 	val = FIELD_PREP(GENMASK(3, 0), v[0]) |
2973 	      FIELD_PREP(GENMASK(7, 4), v[1]) |
2974 	      FIELD_PREP(GENMASK(11, 8), v[2]) |
2975 	      FIELD_PREP(GENMASK(15, 12), v[3]) |
2976 	      FIELD_PREP(GENMASK(19, 16), v[4]);
2977 
2978 	rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL,
2979 				     GENMASK(19, 0), val);
2980 }
2981 
2982 static void rtw89_phy_set_txpwr_limit_ax(struct rtw89_dev *rtwdev,
2983 					 const struct rtw89_chan *chan,
2984 					 enum rtw89_phy_idx phy_idx)
2985 {
2986 	u8 max_ntx_num = rtwdev->chip->rf_path_num;
2987 	struct rtw89_txpwr_limit_ax lmt;
2988 	u8 ch = chan->channel;
2989 	u8 bw = chan->band_width;
2990 	const s8 *ptr;
2991 	u32 addr, val;
2992 	u8 i, j;
2993 
2994 	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2995 		    "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw);
2996 
2997 	BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ax) !=
2998 		     RTW89_TXPWR_LMT_PAGE_SIZE_AX);
2999 
3000 	addr = R_AX_PWR_LMT;
3001 	for (i = 0; i < max_ntx_num; i++) {
3002 		rtw89_phy_fill_txpwr_limit_ax(rtwdev, chan, &lmt, i);
3003 
3004 		ptr = (s8 *)&lmt;
3005 		for (j = 0; j < RTW89_TXPWR_LMT_PAGE_SIZE_AX;
3006 		     j += 4, addr += 4, ptr += 4) {
3007 			val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
3008 			      FIELD_PREP(GENMASK(15, 8), ptr[1]) |
3009 			      FIELD_PREP(GENMASK(23, 16), ptr[2]) |
3010 			      FIELD_PREP(GENMASK(31, 24), ptr[3]);
3011 
3012 			rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
3013 		}
3014 	}
3015 }
3016 
3017 static void rtw89_phy_set_txpwr_limit_ru_ax(struct rtw89_dev *rtwdev,
3018 					    const struct rtw89_chan *chan,
3019 					    enum rtw89_phy_idx phy_idx)
3020 {
3021 	u8 max_ntx_num = rtwdev->chip->rf_path_num;
3022 	struct rtw89_txpwr_limit_ru_ax lmt_ru;
3023 	u8 ch = chan->channel;
3024 	u8 bw = chan->band_width;
3025 	const s8 *ptr;
3026 	u32 addr, val;
3027 	u8 i, j;
3028 
3029 	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
3030 		    "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw);
3031 
3032 	BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ru_ax) !=
3033 		     RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX);
3034 
3035 	addr = R_AX_PWR_RU_LMT;
3036 	for (i = 0; i < max_ntx_num; i++) {
3037 		rtw89_phy_fill_txpwr_limit_ru_ax(rtwdev, chan, &lmt_ru, i);
3038 
3039 		ptr = (s8 *)&lmt_ru;
3040 		for (j = 0; j < RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX;
3041 		     j += 4, addr += 4, ptr += 4) {
3042 			val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
3043 			      FIELD_PREP(GENMASK(15, 8), ptr[1]) |
3044 			      FIELD_PREP(GENMASK(23, 16), ptr[2]) |
3045 			      FIELD_PREP(GENMASK(31, 24), ptr[3]);
3046 
3047 			rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
3048 		}
3049 	}
3050 }
3051 
3052 struct rtw89_phy_iter_ra_data {
3053 	struct rtw89_dev *rtwdev;
3054 	struct sk_buff *c2h;
3055 };
3056 
3057 static void __rtw89_phy_c2h_ra_rpt_iter(struct rtw89_sta_link *rtwsta_link,
3058 					struct ieee80211_link_sta *link_sta,
3059 					struct rtw89_phy_iter_ra_data *ra_data)
3060 {
3061 	struct rtw89_dev *rtwdev = ra_data->rtwdev;
3062 	const struct rtw89_c2h_ra_rpt *c2h =
3063 		(const struct rtw89_c2h_ra_rpt *)ra_data->c2h->data;
3064 	struct rtw89_ra_report *ra_report = &rtwsta_link->ra_report;
3065 	const struct rtw89_chip_info *chip = rtwdev->chip;
3066 	bool format_v1 = chip->chip_gen == RTW89_CHIP_BE;
3067 	u8 mode, rate, bw, giltf, mac_id;
3068 	u16 legacy_bitrate;
3069 	bool valid;
3070 	u8 mcs = 0;
3071 	u8 t;
3072 
3073 	mac_id = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MACID);
3074 	if (mac_id != rtwsta_link->mac_id)
3075 		return;
3076 
3077 	rate = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MCSNSS);
3078 	bw = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW);
3079 	giltf = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_GILTF);
3080 	mode = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL);
3081 
3082 	if (format_v1) {
3083 		t = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MCSNSS_B7);
3084 		rate |= u8_encode_bits(t, BIT(7));
3085 		t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW_B2);
3086 		bw |= u8_encode_bits(t, BIT(2));
3087 		t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL_B2);
3088 		mode |= u8_encode_bits(t, BIT(2));
3089 	}
3090 
3091 	if (mode == RTW89_RA_RPT_MODE_LEGACY) {
3092 		valid = rtw89_legacy_rate_to_bitrate(rtwdev, rate, &legacy_bitrate);
3093 		if (!valid)
3094 			return;
3095 	}
3096 
3097 	memset(&ra_report->txrate, 0, sizeof(ra_report->txrate));
3098 
3099 	switch (mode) {
3100 	case RTW89_RA_RPT_MODE_LEGACY:
3101 		ra_report->txrate.legacy = legacy_bitrate;
3102 		break;
3103 	case RTW89_RA_RPT_MODE_HT:
3104 		ra_report->txrate.flags |= RATE_INFO_FLAGS_MCS;
3105 		if (RTW89_CHK_FW_FEATURE(OLD_HT_RA_FORMAT, &rtwdev->fw))
3106 			rate = RTW89_MK_HT_RATE(FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate),
3107 						FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate));
3108 		else
3109 			rate = FIELD_GET(RTW89_RA_RATE_MASK_HT_MCS, rate);
3110 		ra_report->txrate.mcs = rate;
3111 		if (giltf)
3112 			ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
3113 		mcs = ra_report->txrate.mcs & 0x07;
3114 		break;
3115 	case RTW89_RA_RPT_MODE_VHT:
3116 		ra_report->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
3117 		ra_report->txrate.mcs = format_v1 ?
3118 			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) :
3119 			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS);
3120 		ra_report->txrate.nss = format_v1 ?
3121 			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 :
3122 			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1;
3123 		if (giltf)
3124 			ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
3125 		mcs = ra_report->txrate.mcs;
3126 		break;
3127 	case RTW89_RA_RPT_MODE_HE:
3128 		ra_report->txrate.flags |= RATE_INFO_FLAGS_HE_MCS;
3129 		ra_report->txrate.mcs = format_v1 ?
3130 			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) :
3131 			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS);
3132 		ra_report->txrate.nss  = format_v1 ?
3133 			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 :
3134 			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1;
3135 		if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08)
3136 			ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_0_8;
3137 		else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16)
3138 			ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_1_6;
3139 		else
3140 			ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_3_2;
3141 		mcs = ra_report->txrate.mcs;
3142 		break;
3143 	case RTW89_RA_RPT_MODE_EHT:
3144 		ra_report->txrate.flags |= RATE_INFO_FLAGS_EHT_MCS;
3145 		ra_report->txrate.mcs = u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1);
3146 		ra_report->txrate.nss = u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1;
3147 		if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08)
3148 			ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_0_8;
3149 		else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16)
3150 			ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_1_6;
3151 		else
3152 			ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_3_2;
3153 		mcs = ra_report->txrate.mcs;
3154 		break;
3155 	}
3156 
3157 	ra_report->txrate.bw = rtw89_hw_to_rate_info_bw(bw);
3158 	ra_report->bit_rate = cfg80211_calculate_bitrate(&ra_report->txrate);
3159 	ra_report->hw_rate = format_v1 ?
3160 			     u16_encode_bits(mode, RTW89_HW_RATE_V1_MASK_MOD) |
3161 			     u16_encode_bits(rate, RTW89_HW_RATE_V1_MASK_VAL) :
3162 			     u16_encode_bits(mode, RTW89_HW_RATE_MASK_MOD) |
3163 			     u16_encode_bits(rate, RTW89_HW_RATE_MASK_VAL);
3164 	ra_report->might_fallback_legacy = mcs <= 2;
3165 	link_sta->agg.max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report);
3166 	rtwsta_link->max_agg_wait = link_sta->agg.max_rc_amsdu_len / 1500 - 1;
3167 }
3168 
3169 static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
3170 {
3171 	struct rtw89_phy_iter_ra_data *ra_data = (struct rtw89_phy_iter_ra_data *)data;
3172 	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
3173 	struct rtw89_sta_link *rtwsta_link;
3174 	struct ieee80211_link_sta *link_sta;
3175 	unsigned int link_id;
3176 
3177 	rcu_read_lock();
3178 
3179 	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
3180 		link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false);
3181 		__rtw89_phy_c2h_ra_rpt_iter(rtwsta_link, link_sta, ra_data);
3182 	}
3183 
3184 	rcu_read_unlock();
3185 }
3186 
3187 static void
3188 rtw89_phy_c2h_ra_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3189 {
3190 	struct rtw89_phy_iter_ra_data ra_data;
3191 
3192 	ra_data.rtwdev = rtwdev;
3193 	ra_data.c2h = c2h;
3194 	ieee80211_iterate_stations_atomic(rtwdev->hw,
3195 					  rtw89_phy_c2h_ra_rpt_iter,
3196 					  &ra_data);
3197 }
3198 
3199 static
3200 void (* const rtw89_phy_c2h_ra_handler[])(struct rtw89_dev *rtwdev,
3201 					  struct sk_buff *c2h, u32 len) = {
3202 	[RTW89_PHY_C2H_FUNC_STS_RPT] = rtw89_phy_c2h_ra_rpt,
3203 	[RTW89_PHY_C2H_FUNC_MU_GPTBL_RPT] = NULL,
3204 	[RTW89_PHY_C2H_FUNC_TXSTS] = NULL,
3205 };
3206 
3207 static void
3208 rtw89_phy_c2h_lowrt_rty(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3209 {
3210 }
3211 
3212 static void
3213 rtw89_phy_c2h_fw_scan_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3214 {
3215 	const struct rtw89_c2h_fw_scan_rpt *c2h_rpt =
3216 		(const struct rtw89_c2h_fw_scan_rpt *)c2h->data;
3217 
3218 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
3219 		    "%s: band: %u, op_chan: %u, PD_low_bd(ofdm, cck): (-%d, %d), phy_idx: %u\n",
3220 		    __func__, c2h_rpt->band, c2h_rpt->center_ch,
3221 		    PD_LOWER_BOUND_BASE - (c2h_rpt->ofdm_pd_idx << 1),
3222 		    c2h_rpt->cck_pd_idx, c2h_rpt->phy_idx);
3223 }
3224 
3225 static
3226 void (* const rtw89_phy_c2h_dm_handler[])(struct rtw89_dev *rtwdev,
3227 					  struct sk_buff *c2h, u32 len) = {
3228 	[RTW89_PHY_C2H_DM_FUNC_FW_TEST] = NULL,
3229 	[RTW89_PHY_C2H_DM_FUNC_FW_TRIG_TX_RPT] = NULL,
3230 	[RTW89_PHY_C2H_DM_FUNC_SIGB] = NULL,
3231 	[RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY] = rtw89_phy_c2h_lowrt_rty,
3232 	[RTW89_PHY_C2H_DM_FUNC_MCC_DIG] = NULL,
3233 	[RTW89_PHY_C2H_DM_FUNC_FW_SCAN] = rtw89_phy_c2h_fw_scan_rpt,
3234 };
3235 
3236 static
3237 void rtw89_phy_c2h_rfk_tas_pwr(struct rtw89_dev *rtwdev,
3238 			       const struct rtw89_c2h_rf_tas_rpt_log *content)
3239 {
3240 	const enum rtw89_sar_sources src = rtwdev->sar.src;
3241 	struct rtw89_tas_info *tas = &rtwdev->tas;
3242 	u64 linear = 0;
3243 	u32 i, cur_idx;
3244 	s16 txpwr;
3245 
3246 	if (!tas->enable || src == RTW89_SAR_SOURCE_NONE)
3247 		return;
3248 
3249 	cur_idx = le32_to_cpu(content->cur_idx);
3250 	for (i = 0; i < cur_idx; i++) {
3251 		txpwr = le16_to_cpu(content->txpwr_history[i]);
3252 		linear += rtw89_db_quarter_to_linear(txpwr);
3253 
3254 		rtw89_debug(rtwdev, RTW89_DBG_SAR,
3255 			    "tas: index: %u, txpwr: %d\n", i, txpwr);
3256 	}
3257 
3258 	if (cur_idx == 0)
3259 		tas->instant_txpwr = rtw89_db_to_linear(0);
3260 	else
3261 		tas->instant_txpwr = DIV_ROUND_DOWN_ULL(linear, cur_idx);
3262 }
3263 
3264 static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev,
3265 				      enum rtw89_phy_c2h_rfk_log_func func,
3266 				      void *content, u16 len)
3267 {
3268 	struct rtw89_c2h_rf_txgapk_rpt_log *txgapk;
3269 	struct rtw89_c2h_rf_rxdck_rpt_log *rxdck;
3270 	struct rtw89_c2h_rf_dack_rpt_log *dack;
3271 	struct rtw89_c2h_rf_tssi_rpt_log *tssi;
3272 	struct rtw89_c2h_rf_dpk_rpt_log *dpk;
3273 	struct rtw89_c2h_rf_iqk_rpt_log *iqk;
3274 	int i, j, k;
3275 
3276 	switch (func) {
3277 	case RTW89_PHY_C2H_RFK_LOG_FUNC_IQK:
3278 		if (len != sizeof(*iqk))
3279 			goto out;
3280 
3281 		iqk = content;
3282 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3283 			    "[IQK] iqk->is_iqk_init = %x\n", iqk->is_iqk_init);
3284 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3285 			    "[IQK] iqk->is_reload = %x\n", iqk->is_reload);
3286 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3287 			    "[IQK] iqk->is_nbiqk = %x\n", iqk->is_nbiqk);
3288 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3289 			    "[IQK] iqk->txiqk_en = %x\n", iqk->txiqk_en);
3290 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3291 			    "[IQK] iqk->rxiqk_en = %x\n", iqk->rxiqk_en);
3292 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3293 			    "[IQK] iqk->lok_en = %x\n", iqk->lok_en);
3294 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3295 			    "[IQK] iqk->iqk_xym_en = %x\n", iqk->iqk_xym_en);
3296 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3297 			    "[IQK] iqk->iqk_sram_en = %x\n", iqk->iqk_sram_en);
3298 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3299 			    "[IQK] iqk->iqk_fft_en = %x\n", iqk->iqk_fft_en);
3300 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3301 			    "[IQK] iqk->is_fw_iqk = %x\n", iqk->is_fw_iqk);
3302 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3303 			    "[IQK] iqk->is_iqk_enable = %x\n", iqk->is_iqk_enable);
3304 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3305 			    "[IQK] iqk->iqk_cfir_en = %x\n", iqk->iqk_cfir_en);
3306 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3307 			    "[IQK] iqk->thermal_rek_en = %x\n", iqk->thermal_rek_en);
3308 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3309 			    "[IQK] iqk->version = %x\n", iqk->version);
3310 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3311 			    "[IQK] iqk->phy = %x\n", iqk->phy);
3312 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3313 			    "[IQK] iqk->fwk_status = %x\n", iqk->fwk_status);
3314 
3315 		for (i = 0; i < 2; i++) {
3316 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
3317 				    "[IQK] ======== Path %x  ========\n", i);
3318 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_band[%d] = %x\n",
3319 				    i, iqk->iqk_band[i]);
3320 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_ch[%d] = %x\n",
3321 				    i, iqk->iqk_ch[i]);
3322 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_bw[%d] = %x\n",
3323 				    i, iqk->iqk_bw[i]);
3324 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->lok_idac[%d] = %x\n",
3325 				    i, le32_to_cpu(iqk->lok_idac[i]));
3326 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->lok_vbuf[%d] = %x\n",
3327 				    i, le32_to_cpu(iqk->lok_vbuf[i]));
3328 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_tx_fail[%d] = %x\n",
3329 				    i, iqk->iqk_tx_fail[i]);
3330 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_rx_fail[%d] = %x\n",
3331 				    i, iqk->iqk_rx_fail[i]);
3332 			for (j = 0; j < 4; j++)
3333 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
3334 					    "[IQK] iqk->rftxgain[%d][%d] = %x\n",
3335 					    i, j, le32_to_cpu(iqk->rftxgain[i][j]));
3336 			for (j = 0; j < 4; j++)
3337 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
3338 					    "[IQK] iqk->tx_xym[%d][%d] = %x\n",
3339 					    i, j, le32_to_cpu(iqk->tx_xym[i][j]));
3340 			for (j = 0; j < 4; j++)
3341 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
3342 					    "[IQK] iqk->rfrxgain[%d][%d] = %x\n",
3343 					    i, j, le32_to_cpu(iqk->rfrxgain[i][j]));
3344 			for (j = 0; j < 4; j++)
3345 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
3346 					    "[IQK] iqk->rx_xym[%d][%d] = %x\n",
3347 					    i, j, le32_to_cpu(iqk->rx_xym[i][j]));
3348 		}
3349 		return;
3350 	case RTW89_PHY_C2H_RFK_LOG_FUNC_DPK:
3351 		if (len != sizeof(*dpk))
3352 			goto out;
3353 
3354 		dpk = content;
3355 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3356 			    "DPK ver:%d idx:%2ph band:%2ph bw:%2ph ch:%2ph path:%2ph\n",
3357 			    dpk->ver, dpk->idx, dpk->band, dpk->bw, dpk->ch, dpk->path_ok);
3358 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3359 			    "DPK txagc:%2ph ther:%2ph gs:%2ph dc_i:%4ph dc_q:%4ph\n",
3360 			    dpk->txagc, dpk->ther, dpk->gs, dpk->dc_i, dpk->dc_q);
3361 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3362 			    "DPK corr_v:%2ph corr_i:%2ph to:%2ph ov:%2ph\n",
3363 			    dpk->corr_val, dpk->corr_idx, dpk->is_timeout, dpk->rxbb_ov);
3364 		return;
3365 	case RTW89_PHY_C2H_RFK_LOG_FUNC_DACK:
3366 		if (len != sizeof(*dack))
3367 			goto out;
3368 
3369 		dack = content;
3370 
3371 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]FWDACK SUMMARY!!!!!\n");
3372 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3373 			    "[DACK]FWDACK ver = 0x%x, FWDACK rpt_ver = 0x%x, driver rpt_ver = 0x%x\n",
3374 			    dack->fwdack_ver, dack->fwdack_info_ver, 0x2);
3375 
3376 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3377 			    "[DACK]timeout code = [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
3378 			    dack->addck_timeout, dack->cdack_timeout, dack->dadck_timeout,
3379 			    dack->adgaink_timeout, dack->msbk_timeout);
3380 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3381 			    "[DACK]DACK fail = 0x%x\n", dack->dack_fail);
3382 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3383 			    "[DACK]S0 WBADCK = [0x%x]\n", dack->wbdck_d[0]);
3384 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3385 			    "[DACK]S1 WBADCK = [0x%x]\n", dack->wbdck_d[1]);
3386 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3387 			    "[DACK]DRCK = [0x%x]\n", dack->rck_d);
3388 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 CDACK ic = [0x%x, 0x%x]\n",
3389 			    dack->cdack_d[0][0][0], dack->cdack_d[0][0][1]);
3390 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 CDACK qc = [0x%x, 0x%x]\n",
3391 			    dack->cdack_d[0][1][0], dack->cdack_d[0][1][1]);
3392 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 CDACK ic = [0x%x, 0x%x]\n",
3393 			    dack->cdack_d[1][0][0], dack->cdack_d[1][0][1]);
3394 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 CDACK qc = [0x%x, 0x%x]\n",
3395 			    dack->cdack_d[1][1][0], dack->cdack_d[1][1][1]);
3396 
3397 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK ic = [0x%x, 0x%x]\n",
3398 			    ((u32)dack->addck2_hd[0][0][0] << 8) | dack->addck2_ld[0][0][0],
3399 			    ((u32)dack->addck2_hd[0][0][1] << 8) | dack->addck2_ld[0][0][1]);
3400 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK qc = [0x%x, 0x%x]\n",
3401 			    ((u32)dack->addck2_hd[0][1][0] << 8) | dack->addck2_ld[0][1][0],
3402 			    ((u32)dack->addck2_hd[0][1][1] << 8) | dack->addck2_ld[0][1][1]);
3403 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_DCK ic = [0x%x, 0x%x]\n",
3404 			    ((u32)dack->addck2_hd[1][0][0] << 8) | dack->addck2_ld[1][0][0],
3405 			    ((u32)dack->addck2_hd[1][0][1] << 8) | dack->addck2_ld[1][0][1]);
3406 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_DCK qc = [0x%x, 0x%x]\n",
3407 			    ((u32)dack->addck2_hd[1][1][0] << 8) | dack->addck2_ld[1][1][0],
3408 			    ((u32)dack->addck2_hd[1][1][1] << 8) | dack->addck2_ld[1][1][1]);
3409 
3410 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_GAINK ic = 0x%x, qc = 0x%x\n",
3411 			    dack->adgaink_d[0][0], dack->adgaink_d[0][1]);
3412 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_GAINK ic = 0x%x, qc = 0x%x\n",
3413 			    dack->adgaink_d[1][0], dack->adgaink_d[1][1]);
3414 
3415 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
3416 			    dack->dadck_d[0][0], dack->dadck_d[0][1]);
3417 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
3418 			    dack->dadck_d[1][0], dack->dadck_d[1][1]);
3419 
3420 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 biask iqc = 0x%x\n",
3421 			    ((u32)dack->biask_hd[0][0] << 8) | dack->biask_ld[0][0]);
3422 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 biask iqc = 0x%x\n",
3423 			    ((u32)dack->biask_hd[1][0] << 8) | dack->biask_ld[1][0]);
3424 
3425 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
3426 		for (i = 0; i < 0x10; i++)
3427 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n",
3428 				    dack->msbk_d[0][0][i]);
3429 
3430 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
3431 		for (i = 0; i < 0x10; i++)
3432 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n",
3433 				    dack->msbk_d[0][1][i]);
3434 
3435 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
3436 		for (i = 0; i < 0x10; i++)
3437 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n",
3438 				    dack->msbk_d[1][0][i]);
3439 
3440 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
3441 		for (i = 0; i < 0x10; i++)
3442 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n",
3443 				    dack->msbk_d[1][1][i]);
3444 		return;
3445 	case RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK:
3446 		if (len != sizeof(*rxdck))
3447 			goto out;
3448 
3449 		rxdck = content;
3450 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3451 			    "RXDCK ver:%d band:%2ph bw:%2ph ch:%2ph to:%2ph\n",
3452 			    rxdck->ver, rxdck->band, rxdck->bw, rxdck->ch,
3453 			    rxdck->timeout);
3454 		return;
3455 	case RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI:
3456 		if (len != sizeof(*tssi))
3457 			goto out;
3458 
3459 		tssi = content;
3460 		for (i = 0; i < 2; i++) {
3461 			for (j = 0; j < 2; j++) {
3462 				for (k = 0; k < 4; k++) {
3463 					rtw89_debug(rtwdev, RTW89_DBG_RFK,
3464 						    "[TSSI] alignment_power_cw_h[%d][%d][%d]=%d\n",
3465 						    i, j, k, tssi->alignment_power_cw_h[i][j][k]);
3466 					rtw89_debug(rtwdev, RTW89_DBG_RFK,
3467 						    "[TSSI] alignment_power_cw_l[%d][%d][%d]=%d\n",
3468 						    i, j, k, tssi->alignment_power_cw_l[i][j][k]);
3469 					rtw89_debug(rtwdev, RTW89_DBG_RFK,
3470 						    "[TSSI] alignment_power[%d][%d][%d]=%d\n",
3471 						    i, j, k, tssi->alignment_power[i][j][k]);
3472 					rtw89_debug(rtwdev, RTW89_DBG_RFK,
3473 						    "[TSSI] alignment_power_cw[%d][%d][%d]=%d\n",
3474 						    i, j, k,
3475 						    (tssi->alignment_power_cw_h[i][j][k] << 8) +
3476 						     tssi->alignment_power_cw_l[i][j][k]);
3477 				}
3478 
3479 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
3480 					    "[TSSI] tssi_alimk_state[%d][%d]=%d\n",
3481 					    i, j, tssi->tssi_alimk_state[i][j]);
3482 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
3483 					    "[TSSI] default_txagc_offset[%d]=%d\n",
3484 					    j, tssi->default_txagc_offset[0][j]);
3485 			}
3486 		}
3487 		return;
3488 	case RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK:
3489 		if (len != sizeof(*txgapk))
3490 			goto out;
3491 
3492 		txgapk = content;
3493 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3494 			    "[TXGAPK]rpt r0x8010[0]=0x%x, r0x8010[1]=0x%x\n",
3495 			    le32_to_cpu(txgapk->r0x8010[0]),
3496 			    le32_to_cpu(txgapk->r0x8010[1]));
3497 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt chk_id = %d\n",
3498 			    txgapk->chk_id);
3499 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt chk_cnt = %d\n",
3500 			    le32_to_cpu(txgapk->chk_cnt));
3501 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt ver = 0x%x\n",
3502 			    txgapk->ver);
3503 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt rsv1 = %d\n",
3504 			    txgapk->rsv1);
3505 
3506 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt track_d[0] = %*ph\n",
3507 			    (int)sizeof(txgapk->track_d[0]), txgapk->track_d[0]);
3508 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[0] = %*ph\n",
3509 			    (int)sizeof(txgapk->power_d[0]), txgapk->power_d[0]);
3510 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt track_d[1] = %*ph\n",
3511 			    (int)sizeof(txgapk->track_d[1]), txgapk->track_d[1]);
3512 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[1] = %*ph\n",
3513 			    (int)sizeof(txgapk->power_d[1]), txgapk->power_d[1]);
3514 		return;
3515 	case RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR:
3516 		if (len != sizeof(struct rtw89_c2h_rf_tas_rpt_log))
3517 			goto out;
3518 
3519 		rtw89_phy_c2h_rfk_tas_pwr(rtwdev, content);
3520 
3521 		return;
3522 	default:
3523 		break;
3524 	}
3525 
3526 out:
3527 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3528 		    "unexpected RFK func %d report log with length %d\n", func, len);
3529 }
3530 
3531 static bool rtw89_phy_c2h_rfk_run_log(struct rtw89_dev *rtwdev,
3532 				      enum rtw89_phy_c2h_rfk_log_func func,
3533 				      void *content, u16 len)
3534 {
3535 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
3536 	const struct rtw89_c2h_rf_run_log *log = content;
3537 	const struct rtw89_fw_element_hdr *elm;
3538 	u32 fmt_idx;
3539 	u16 offset;
3540 
3541 	if (sizeof(*log) != len)
3542 		return false;
3543 
3544 	if (!elm_info->rfk_log_fmt)
3545 		return false;
3546 
3547 	elm = elm_info->rfk_log_fmt->elm[func];
3548 	fmt_idx = le32_to_cpu(log->fmt_idx);
3549 	if (!elm || fmt_idx >= elm->u.rfk_log_fmt.nr)
3550 		return false;
3551 
3552 	offset = le16_to_cpu(elm->u.rfk_log_fmt.offset[fmt_idx]);
3553 	if (offset == 0)
3554 		return false;
3555 
3556 	rtw89_debug(rtwdev, RTW89_DBG_RFK, &elm->u.common.contents[offset],
3557 		    le32_to_cpu(log->arg[0]), le32_to_cpu(log->arg[1]),
3558 		    le32_to_cpu(log->arg[2]), le32_to_cpu(log->arg[3]));
3559 
3560 	return true;
3561 }
3562 
3563 static void rtw89_phy_c2h_rfk_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
3564 				  u32 len, enum rtw89_phy_c2h_rfk_log_func func,
3565 				  const char *rfk_name)
3566 {
3567 	struct rtw89_c2h_hdr *c2h_hdr = (struct rtw89_c2h_hdr *)c2h->data;
3568 	struct rtw89_c2h_rf_log_hdr *log_hdr;
3569 	void *log_ptr = c2h_hdr;
3570 	u16 content_len;
3571 	u16 chunk_len;
3572 	bool handled;
3573 
3574 	log_ptr += sizeof(*c2h_hdr);
3575 	len -= sizeof(*c2h_hdr);
3576 
3577 	while (len > sizeof(*log_hdr)) {
3578 		log_hdr = log_ptr;
3579 		content_len = le16_to_cpu(log_hdr->len);
3580 		chunk_len = content_len + sizeof(*log_hdr);
3581 
3582 		if (chunk_len > len)
3583 			break;
3584 
3585 		switch (log_hdr->type) {
3586 		case RTW89_RF_RUN_LOG:
3587 			handled = rtw89_phy_c2h_rfk_run_log(rtwdev, func,
3588 							    log_hdr->content, content_len);
3589 			if (handled)
3590 				break;
3591 
3592 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s run: %*ph\n",
3593 				    rfk_name, content_len, log_hdr->content);
3594 			break;
3595 		case RTW89_RF_RPT_LOG:
3596 			rtw89_phy_c2h_rfk_rpt_log(rtwdev, func,
3597 						  log_hdr->content, content_len);
3598 			break;
3599 		default:
3600 			return;
3601 		}
3602 
3603 		log_ptr += chunk_len;
3604 		len -= chunk_len;
3605 	}
3606 }
3607 
3608 static void
3609 rtw89_phy_c2h_rfk_log_iqk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3610 {
3611 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3612 			      RTW89_PHY_C2H_RFK_LOG_FUNC_IQK, "IQK");
3613 }
3614 
3615 static void
3616 rtw89_phy_c2h_rfk_log_dpk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3617 {
3618 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3619 			      RTW89_PHY_C2H_RFK_LOG_FUNC_DPK, "DPK");
3620 }
3621 
3622 static void
3623 rtw89_phy_c2h_rfk_log_dack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3624 {
3625 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3626 			      RTW89_PHY_C2H_RFK_LOG_FUNC_DACK, "DACK");
3627 }
3628 
3629 static void
3630 rtw89_phy_c2h_rfk_log_rxdck(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3631 {
3632 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3633 			      RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK, "RX_DCK");
3634 }
3635 
3636 static void
3637 rtw89_phy_c2h_rfk_log_tssi(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3638 {
3639 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3640 			      RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI, "TSSI");
3641 }
3642 
3643 static void
3644 rtw89_phy_c2h_rfk_log_txgapk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3645 {
3646 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3647 			      RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK, "TXGAPK");
3648 }
3649 
3650 static void
3651 rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3652 {
3653 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3654 			      RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR, "TAS");
3655 }
3656 
3657 static
3658 void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev,
3659 					       struct sk_buff *c2h, u32 len) = {
3660 	[RTW89_PHY_C2H_RFK_LOG_FUNC_IQK] = rtw89_phy_c2h_rfk_log_iqk,
3661 	[RTW89_PHY_C2H_RFK_LOG_FUNC_DPK] = rtw89_phy_c2h_rfk_log_dpk,
3662 	[RTW89_PHY_C2H_RFK_LOG_FUNC_DACK] = rtw89_phy_c2h_rfk_log_dack,
3663 	[RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK] = rtw89_phy_c2h_rfk_log_rxdck,
3664 	[RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI] = rtw89_phy_c2h_rfk_log_tssi,
3665 	[RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK] = rtw89_phy_c2h_rfk_log_txgapk,
3666 	[RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr,
3667 };
3668 
3669 static
3670 void rtw89_phy_rfk_report_prep(struct rtw89_dev *rtwdev)
3671 {
3672 	struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
3673 
3674 	wait->state = RTW89_RFK_STATE_START;
3675 	wait->start_time = ktime_get();
3676 	reinit_completion(&wait->completion);
3677 }
3678 
3679 static
3680 int rtw89_phy_rfk_report_wait(struct rtw89_dev *rtwdev, const char *rfk_name,
3681 			      unsigned int ms)
3682 {
3683 	struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
3684 	unsigned long time_left;
3685 
3686 	/* Since we can't receive C2H event during SER, use a fixed delay. */
3687 	if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) {
3688 		fsleep(1000 * ms / 2);
3689 		goto out;
3690 	}
3691 
3692 	time_left = wait_for_completion_timeout(&wait->completion,
3693 						msecs_to_jiffies(ms));
3694 	if (time_left == 0) {
3695 		rtw89_warn(rtwdev, "failed to wait RF %s\n", rfk_name);
3696 		return -ETIMEDOUT;
3697 	} else if (wait->state != RTW89_RFK_STATE_OK) {
3698 		rtw89_warn(rtwdev, "failed to do RF %s result from state %d\n",
3699 			   rfk_name, wait->state);
3700 		return -EFAULT;
3701 	}
3702 
3703 out:
3704 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "RF %s takes %lld ms to complete\n",
3705 		    rfk_name, ktime_ms_delta(ktime_get(), wait->start_time));
3706 
3707 	return 0;
3708 }
3709 
3710 static void
3711 rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3712 {
3713 	const struct rtw89_c2h_rfk_report *report =
3714 		(const struct rtw89_c2h_rfk_report *)c2h->data;
3715 	struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
3716 
3717 	wait->state = report->state;
3718 	wait->version = report->version;
3719 
3720 	complete(&wait->completion);
3721 
3722 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3723 		    "RFK report state %d with version %d (%*ph)\n",
3724 		    wait->state, wait->version,
3725 		    (int)(len - sizeof(report->hdr)), &report->state);
3726 }
3727 
3728 static void
3729 rtw89_phy_c2h_rfk_report_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3730 {
3731 	const struct rtw89_c2h_rf_tas_info *report =
3732 		(const struct rtw89_c2h_rf_tas_info *)c2h->data;
3733 
3734 	rtw89_phy_c2h_rfk_tas_pwr(rtwdev, &report->content);
3735 }
3736 
3737 static
3738 void (* const rtw89_phy_c2h_rfk_report_handler[])(struct rtw89_dev *rtwdev,
3739 						  struct sk_buff *c2h, u32 len) = {
3740 	[RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE] = rtw89_phy_c2h_rfk_report_state,
3741 	[RTW89_PHY_C2H_RFK_REPORT_FUNC_TAS_PWR] = rtw89_phy_c2h_rfk_report_tas_pwr,
3742 };
3743 
3744 bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
3745 {
3746 	switch (class) {
3747 	case RTW89_PHY_C2H_RFK_LOG:
3748 		switch (func) {
3749 		case RTW89_PHY_C2H_RFK_LOG_FUNC_IQK:
3750 		case RTW89_PHY_C2H_RFK_LOG_FUNC_DPK:
3751 		case RTW89_PHY_C2H_RFK_LOG_FUNC_DACK:
3752 		case RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK:
3753 		case RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI:
3754 		case RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK:
3755 			return true;
3756 		default:
3757 			return false;
3758 		}
3759 	case RTW89_PHY_C2H_RFK_REPORT:
3760 		switch (func) {
3761 		case RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE:
3762 			return true;
3763 		default:
3764 			return false;
3765 		}
3766 	default:
3767 		return false;
3768 	}
3769 }
3770 
3771 void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
3772 			  u32 len, u8 class, u8 func)
3773 {
3774 	void (*handler)(struct rtw89_dev *rtwdev,
3775 			struct sk_buff *c2h, u32 len) = NULL;
3776 
3777 	switch (class) {
3778 	case RTW89_PHY_C2H_CLASS_RA:
3779 		if (func < RTW89_PHY_C2H_FUNC_RA_MAX)
3780 			handler = rtw89_phy_c2h_ra_handler[func];
3781 		break;
3782 	case RTW89_PHY_C2H_RFK_LOG:
3783 		if (func < ARRAY_SIZE(rtw89_phy_c2h_rfk_log_handler))
3784 			handler = rtw89_phy_c2h_rfk_log_handler[func];
3785 		break;
3786 	case RTW89_PHY_C2H_RFK_REPORT:
3787 		if (func < ARRAY_SIZE(rtw89_phy_c2h_rfk_report_handler))
3788 			handler = rtw89_phy_c2h_rfk_report_handler[func];
3789 		break;
3790 	case RTW89_PHY_C2H_CLASS_DM:
3791 		if (func < ARRAY_SIZE(rtw89_phy_c2h_dm_handler))
3792 			handler = rtw89_phy_c2h_dm_handler[func];
3793 		break;
3794 	default:
3795 		break;
3796 	}
3797 	if (!handler) {
3798 		rtw89_info_once(rtwdev, "PHY c2h class %d func %d not support\n",
3799 				class, func);
3800 		return;
3801 	}
3802 	handler(rtwdev, skb, len);
3803 }
3804 
3805 int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev,
3806 				    enum rtw89_phy_idx phy_idx,
3807 				    unsigned int ms)
3808 {
3809 	int ret;
3810 
3811 	rtw89_phy_rfk_report_prep(rtwdev);
3812 
3813 	ret = rtw89_fw_h2c_rf_pre_ntfy(rtwdev, phy_idx);
3814 	if (ret)
3815 		return ret;
3816 
3817 	return rtw89_phy_rfk_report_wait(rtwdev, "PRE_NTFY", ms);
3818 }
3819 EXPORT_SYMBOL(rtw89_phy_rfk_pre_ntfy_and_wait);
3820 
3821 int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
3822 				enum rtw89_phy_idx phy_idx,
3823 				const struct rtw89_chan *chan,
3824 				enum rtw89_tssi_mode tssi_mode,
3825 				unsigned int ms)
3826 {
3827 	int ret;
3828 
3829 	rtw89_phy_rfk_report_prep(rtwdev);
3830 
3831 	ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, chan, tssi_mode);
3832 	if (ret)
3833 		return ret;
3834 
3835 	return rtw89_phy_rfk_report_wait(rtwdev, "TSSI", ms);
3836 }
3837 EXPORT_SYMBOL(rtw89_phy_rfk_tssi_and_wait);
3838 
3839 int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev,
3840 			       enum rtw89_phy_idx phy_idx,
3841 			       const struct rtw89_chan *chan,
3842 			       unsigned int ms)
3843 {
3844 	int ret;
3845 
3846 	rtw89_phy_rfk_report_prep(rtwdev);
3847 
3848 	ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx, chan);
3849 	if (ret)
3850 		return ret;
3851 
3852 	return rtw89_phy_rfk_report_wait(rtwdev, "IQK", ms);
3853 }
3854 EXPORT_SYMBOL(rtw89_phy_rfk_iqk_and_wait);
3855 
3856 int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev,
3857 			       enum rtw89_phy_idx phy_idx,
3858 			       const struct rtw89_chan *chan,
3859 			       unsigned int ms)
3860 {
3861 	int ret;
3862 
3863 	rtw89_phy_rfk_report_prep(rtwdev);
3864 
3865 	ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx, chan);
3866 	if (ret)
3867 		return ret;
3868 
3869 	return rtw89_phy_rfk_report_wait(rtwdev, "DPK", ms);
3870 }
3871 EXPORT_SYMBOL(rtw89_phy_rfk_dpk_and_wait);
3872 
3873 int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev,
3874 				  enum rtw89_phy_idx phy_idx,
3875 				  const struct rtw89_chan *chan,
3876 				  unsigned int ms)
3877 {
3878 	int ret;
3879 
3880 	rtw89_phy_rfk_report_prep(rtwdev);
3881 
3882 	ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx, chan);
3883 	if (ret)
3884 		return ret;
3885 
3886 	return rtw89_phy_rfk_report_wait(rtwdev, "TXGAPK", ms);
3887 }
3888 EXPORT_SYMBOL(rtw89_phy_rfk_txgapk_and_wait);
3889 
3890 int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev,
3891 				enum rtw89_phy_idx phy_idx,
3892 				const struct rtw89_chan *chan,
3893 				unsigned int ms)
3894 {
3895 	int ret;
3896 
3897 	rtw89_phy_rfk_report_prep(rtwdev);
3898 
3899 	ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx, chan);
3900 	if (ret)
3901 		return ret;
3902 
3903 	return rtw89_phy_rfk_report_wait(rtwdev, "DACK", ms);
3904 }
3905 EXPORT_SYMBOL(rtw89_phy_rfk_dack_and_wait);
3906 
3907 int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
3908 				 enum rtw89_phy_idx phy_idx,
3909 				 const struct rtw89_chan *chan,
3910 				 bool is_chl_k, unsigned int ms)
3911 {
3912 	int ret;
3913 
3914 	rtw89_phy_rfk_report_prep(rtwdev);
3915 
3916 	ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx, chan, is_chl_k);
3917 	if (ret)
3918 		return ret;
3919 
3920 	return rtw89_phy_rfk_report_wait(rtwdev, "RX_DCK", ms);
3921 }
3922 EXPORT_SYMBOL(rtw89_phy_rfk_rxdck_and_wait);
3923 
3924 static u32 phy_tssi_get_cck_group(u8 ch)
3925 {
3926 	switch (ch) {
3927 	case 1 ... 2:
3928 		return 0;
3929 	case 3 ... 5:
3930 		return 1;
3931 	case 6 ... 8:
3932 		return 2;
3933 	case 9 ... 11:
3934 		return 3;
3935 	case 12 ... 13:
3936 		return 4;
3937 	case 14:
3938 		return 5;
3939 	}
3940 
3941 	return 0;
3942 }
3943 
3944 #define PHY_TSSI_EXTRA_GROUP_BIT BIT(31)
3945 #define PHY_TSSI_EXTRA_GROUP(idx) (PHY_TSSI_EXTRA_GROUP_BIT | (idx))
3946 #define PHY_IS_TSSI_EXTRA_GROUP(group) ((group) & PHY_TSSI_EXTRA_GROUP_BIT)
3947 #define PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) \
3948 	((group) & ~PHY_TSSI_EXTRA_GROUP_BIT)
3949 #define PHY_TSSI_EXTRA_GET_GROUP_IDX2(group) \
3950 	(PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
3951 
3952 static u32 phy_tssi_get_ofdm_group(u8 ch)
3953 {
3954 	switch (ch) {
3955 	case 1 ... 2:
3956 		return 0;
3957 	case 3 ... 5:
3958 		return 1;
3959 	case 6 ... 8:
3960 		return 2;
3961 	case 9 ... 11:
3962 		return 3;
3963 	case 12 ... 14:
3964 		return 4;
3965 	case 36 ... 40:
3966 		return 5;
3967 	case 41 ... 43:
3968 		return PHY_TSSI_EXTRA_GROUP(5);
3969 	case 44 ... 48:
3970 		return 6;
3971 	case 49 ... 51:
3972 		return PHY_TSSI_EXTRA_GROUP(6);
3973 	case 52 ... 56:
3974 		return 7;
3975 	case 57 ... 59:
3976 		return PHY_TSSI_EXTRA_GROUP(7);
3977 	case 60 ... 64:
3978 		return 8;
3979 	case 100 ... 104:
3980 		return 9;
3981 	case 105 ... 107:
3982 		return PHY_TSSI_EXTRA_GROUP(9);
3983 	case 108 ... 112:
3984 		return 10;
3985 	case 113 ... 115:
3986 		return PHY_TSSI_EXTRA_GROUP(10);
3987 	case 116 ... 120:
3988 		return 11;
3989 	case 121 ... 123:
3990 		return PHY_TSSI_EXTRA_GROUP(11);
3991 	case 124 ... 128:
3992 		return 12;
3993 	case 129 ... 131:
3994 		return PHY_TSSI_EXTRA_GROUP(12);
3995 	case 132 ... 136:
3996 		return 13;
3997 	case 137 ... 139:
3998 		return PHY_TSSI_EXTRA_GROUP(13);
3999 	case 140 ... 144:
4000 		return 14;
4001 	case 149 ... 153:
4002 		return 15;
4003 	case 154 ... 156:
4004 		return PHY_TSSI_EXTRA_GROUP(15);
4005 	case 157 ... 161:
4006 		return 16;
4007 	case 162 ... 164:
4008 		return PHY_TSSI_EXTRA_GROUP(16);
4009 	case 165 ... 169:
4010 		return 17;
4011 	case 170 ... 172:
4012 		return PHY_TSSI_EXTRA_GROUP(17);
4013 	case 173 ... 177:
4014 		return 18;
4015 	}
4016 
4017 	return 0;
4018 }
4019 
4020 static u32 phy_tssi_get_6g_ofdm_group(u8 ch)
4021 {
4022 	switch (ch) {
4023 	case 1 ... 5:
4024 		return 0;
4025 	case 6 ... 8:
4026 		return PHY_TSSI_EXTRA_GROUP(0);
4027 	case 9 ... 13:
4028 		return 1;
4029 	case 14 ... 16:
4030 		return PHY_TSSI_EXTRA_GROUP(1);
4031 	case 17 ... 21:
4032 		return 2;
4033 	case 22 ... 24:
4034 		return PHY_TSSI_EXTRA_GROUP(2);
4035 	case 25 ... 29:
4036 		return 3;
4037 	case 33 ... 37:
4038 		return 4;
4039 	case 38 ... 40:
4040 		return PHY_TSSI_EXTRA_GROUP(4);
4041 	case 41 ... 45:
4042 		return 5;
4043 	case 46 ... 48:
4044 		return PHY_TSSI_EXTRA_GROUP(5);
4045 	case 49 ... 53:
4046 		return 6;
4047 	case 54 ... 56:
4048 		return PHY_TSSI_EXTRA_GROUP(6);
4049 	case 57 ... 61:
4050 		return 7;
4051 	case 65 ... 69:
4052 		return 8;
4053 	case 70 ... 72:
4054 		return PHY_TSSI_EXTRA_GROUP(8);
4055 	case 73 ... 77:
4056 		return 9;
4057 	case 78 ... 80:
4058 		return PHY_TSSI_EXTRA_GROUP(9);
4059 	case 81 ... 85:
4060 		return 10;
4061 	case 86 ... 88:
4062 		return PHY_TSSI_EXTRA_GROUP(10);
4063 	case 89 ... 93:
4064 		return 11;
4065 	case 97 ... 101:
4066 		return 12;
4067 	case 102 ... 104:
4068 		return PHY_TSSI_EXTRA_GROUP(12);
4069 	case 105 ... 109:
4070 		return 13;
4071 	case 110 ... 112:
4072 		return PHY_TSSI_EXTRA_GROUP(13);
4073 	case 113 ... 117:
4074 		return 14;
4075 	case 118 ... 120:
4076 		return PHY_TSSI_EXTRA_GROUP(14);
4077 	case 121 ... 125:
4078 		return 15;
4079 	case 129 ... 133:
4080 		return 16;
4081 	case 134 ... 136:
4082 		return PHY_TSSI_EXTRA_GROUP(16);
4083 	case 137 ... 141:
4084 		return 17;
4085 	case 142 ... 144:
4086 		return PHY_TSSI_EXTRA_GROUP(17);
4087 	case 145 ... 149:
4088 		return 18;
4089 	case 150 ... 152:
4090 		return PHY_TSSI_EXTRA_GROUP(18);
4091 	case 153 ... 157:
4092 		return 19;
4093 	case 161 ... 165:
4094 		return 20;
4095 	case 166 ... 168:
4096 		return PHY_TSSI_EXTRA_GROUP(20);
4097 	case 169 ... 173:
4098 		return 21;
4099 	case 174 ... 176:
4100 		return PHY_TSSI_EXTRA_GROUP(21);
4101 	case 177 ... 181:
4102 		return 22;
4103 	case 182 ... 184:
4104 		return PHY_TSSI_EXTRA_GROUP(22);
4105 	case 185 ... 189:
4106 		return 23;
4107 	case 193 ... 197:
4108 		return 24;
4109 	case 198 ... 200:
4110 		return PHY_TSSI_EXTRA_GROUP(24);
4111 	case 201 ... 205:
4112 		return 25;
4113 	case 206 ... 208:
4114 		return PHY_TSSI_EXTRA_GROUP(25);
4115 	case 209 ... 213:
4116 		return 26;
4117 	case 214 ... 216:
4118 		return PHY_TSSI_EXTRA_GROUP(26);
4119 	case 217 ... 221:
4120 		return 27;
4121 	case 225 ... 229:
4122 		return 28;
4123 	case 230 ... 232:
4124 		return PHY_TSSI_EXTRA_GROUP(28);
4125 	case 233 ... 237:
4126 		return 29;
4127 	case 238 ... 240:
4128 		return PHY_TSSI_EXTRA_GROUP(29);
4129 	case 241 ... 245:
4130 		return 30;
4131 	case 246 ... 248:
4132 		return PHY_TSSI_EXTRA_GROUP(30);
4133 	case 249 ... 253:
4134 		return 31;
4135 	}
4136 
4137 	return 0;
4138 }
4139 
4140 static u32 phy_tssi_get_trim_group(u8 ch)
4141 {
4142 	switch (ch) {
4143 	case 1 ... 8:
4144 		return 0;
4145 	case 9 ... 14:
4146 		return 1;
4147 	case 36 ... 48:
4148 		return 2;
4149 	case 49 ... 51:
4150 		return PHY_TSSI_EXTRA_GROUP(2);
4151 	case 52 ... 64:
4152 		return 3;
4153 	case 100 ... 112:
4154 		return 4;
4155 	case 113 ... 115:
4156 		return PHY_TSSI_EXTRA_GROUP(4);
4157 	case 116 ... 128:
4158 		return 5;
4159 	case 132 ... 144:
4160 		return 6;
4161 	case 149 ... 177:
4162 		return 7;
4163 	}
4164 
4165 	return 0;
4166 }
4167 
4168 static u32 phy_tssi_get_6g_trim_group(u8 ch)
4169 {
4170 	switch (ch) {
4171 	case 1 ... 13:
4172 		return 0;
4173 	case 14 ... 16:
4174 		return PHY_TSSI_EXTRA_GROUP(0);
4175 	case 17 ... 29:
4176 		return 1;
4177 	case 33 ... 45:
4178 		return 2;
4179 	case 46 ... 48:
4180 		return PHY_TSSI_EXTRA_GROUP(2);
4181 	case 49 ... 61:
4182 		return 3;
4183 	case 65 ... 77:
4184 		return 4;
4185 	case 78 ... 80:
4186 		return PHY_TSSI_EXTRA_GROUP(4);
4187 	case 81 ... 93:
4188 		return 5;
4189 	case 97 ... 109:
4190 		return 6;
4191 	case 110 ... 112:
4192 		return PHY_TSSI_EXTRA_GROUP(6);
4193 	case 113 ... 125:
4194 		return 7;
4195 	case 129 ... 141:
4196 		return 8;
4197 	case 142 ... 144:
4198 		return PHY_TSSI_EXTRA_GROUP(8);
4199 	case 145 ... 157:
4200 		return 9;
4201 	case 161 ... 173:
4202 		return 10;
4203 	case 174 ... 176:
4204 		return PHY_TSSI_EXTRA_GROUP(10);
4205 	case 177 ... 189:
4206 		return 11;
4207 	case 193 ... 205:
4208 		return 12;
4209 	case 206 ... 208:
4210 		return PHY_TSSI_EXTRA_GROUP(12);
4211 	case 209 ... 221:
4212 		return 13;
4213 	case 225 ... 237:
4214 		return 14;
4215 	case 238 ... 240:
4216 		return PHY_TSSI_EXTRA_GROUP(14);
4217 	case 241 ... 253:
4218 		return 15;
4219 	}
4220 
4221 	return 0;
4222 }
4223 
4224 static s8 phy_tssi_get_ofdm_de(struct rtw89_dev *rtwdev,
4225 			       enum rtw89_phy_idx phy,
4226 			       const struct rtw89_chan *chan,
4227 			       enum rtw89_rf_path path)
4228 {
4229 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
4230 	enum rtw89_band band = chan->band_type;
4231 	u8 ch = chan->channel;
4232 	u32 gidx_1st;
4233 	u32 gidx_2nd;
4234 	s8 de_1st;
4235 	s8 de_2nd;
4236 	u32 gidx;
4237 	s8 val;
4238 
4239 	if (band == RTW89_BAND_6G)
4240 		goto calc_6g;
4241 
4242 	gidx = phy_tssi_get_ofdm_group(ch);
4243 
4244 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4245 		    "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
4246 		    path, gidx);
4247 
4248 	if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) {
4249 		gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx);
4250 		gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx);
4251 		de_1st = tssi_info->tssi_mcs[path][gidx_1st];
4252 		de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
4253 		val = (de_1st + de_2nd) / 2;
4254 
4255 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4256 			    "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
4257 			    path, val, de_1st, de_2nd);
4258 	} else {
4259 		val = tssi_info->tssi_mcs[path][gidx];
4260 
4261 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4262 			    "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
4263 	}
4264 
4265 	return val;
4266 
4267 calc_6g:
4268 	gidx = phy_tssi_get_6g_ofdm_group(ch);
4269 
4270 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4271 		    "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
4272 		    path, gidx);
4273 
4274 	if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) {
4275 		gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx);
4276 		gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx);
4277 		de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st];
4278 		de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd];
4279 		val = (de_1st + de_2nd) / 2;
4280 
4281 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4282 			    "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
4283 			    path, val, de_1st, de_2nd);
4284 	} else {
4285 		val = tssi_info->tssi_6g_mcs[path][gidx];
4286 
4287 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4288 			    "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
4289 	}
4290 
4291 	return val;
4292 }
4293 
4294 static s8 phy_tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
4295 				    enum rtw89_phy_idx phy,
4296 				    const struct rtw89_chan *chan,
4297 				    enum rtw89_rf_path path)
4298 {
4299 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
4300 	enum rtw89_band band = chan->band_type;
4301 	u8 ch = chan->channel;
4302 	u32 tgidx_1st;
4303 	u32 tgidx_2nd;
4304 	s8 tde_1st;
4305 	s8 tde_2nd;
4306 	u32 tgidx;
4307 	s8 val;
4308 
4309 	if (band == RTW89_BAND_6G)
4310 		goto calc_6g;
4311 
4312 	tgidx = phy_tssi_get_trim_group(ch);
4313 
4314 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4315 		    "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
4316 		    path, tgidx);
4317 
4318 	if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) {
4319 		tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
4320 		tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
4321 		tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
4322 		tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
4323 		val = (tde_1st + tde_2nd) / 2;
4324 
4325 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4326 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
4327 			    path, val, tde_1st, tde_2nd);
4328 	} else {
4329 		val = tssi_info->tssi_trim[path][tgidx];
4330 
4331 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4332 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
4333 			    path, val);
4334 	}
4335 
4336 	return val;
4337 
4338 calc_6g:
4339 	tgidx = phy_tssi_get_6g_trim_group(ch);
4340 
4341 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4342 		    "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
4343 		    path, tgidx);
4344 
4345 	if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) {
4346 		tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
4347 		tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
4348 		tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st];
4349 		tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd];
4350 		val = (tde_1st + tde_2nd) / 2;
4351 
4352 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4353 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
4354 			    path, val, tde_1st, tde_2nd);
4355 	} else {
4356 		val = tssi_info->tssi_trim_6g[path][tgidx];
4357 
4358 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4359 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
4360 			    path, val);
4361 	}
4362 
4363 	return val;
4364 }
4365 
4366 void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev,
4367 					       enum rtw89_phy_idx phy,
4368 					       const struct rtw89_chan *chan,
4369 					       struct rtw89_h2c_rf_tssi *h2c)
4370 {
4371 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
4372 	u8 ch = chan->channel;
4373 	s8 trim_de;
4374 	s8 ofdm_de;
4375 	s8 cck_de;
4376 	u8 gidx;
4377 	s8 val;
4378 	int i;
4379 
4380 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
4381 		    phy, ch);
4382 
4383 	for (i = RF_PATH_A; i <= RF_PATH_B; i++) {
4384 		trim_de = phy_tssi_get_ofdm_trim_de(rtwdev, phy, chan, i);
4385 		h2c->curr_tssi_trim_de[i] = trim_de;
4386 
4387 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4388 			    "[TSSI][TRIM]: path=%d trim_de=0x%x\n", i, trim_de);
4389 
4390 		gidx = phy_tssi_get_cck_group(ch);
4391 		cck_de = tssi_info->tssi_cck[i][gidx];
4392 		val = u32_get_bits(cck_de + trim_de, 0xff);
4393 
4394 		h2c->curr_tssi_cck_de[i] = 0x0;
4395 		h2c->curr_tssi_cck_de_20m[i] = val;
4396 		h2c->curr_tssi_cck_de_40m[i] = val;
4397 		h2c->curr_tssi_efuse_cck_de[i] = cck_de;
4398 
4399 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4400 			    "[TSSI][TRIM]: path=%d cck_de=0x%x\n", i, cck_de);
4401 
4402 		ofdm_de = phy_tssi_get_ofdm_de(rtwdev, phy, chan, i);
4403 		val = u32_get_bits(ofdm_de + trim_de, 0xff);
4404 
4405 		h2c->curr_tssi_ofdm_de[i] = 0x0;
4406 		h2c->curr_tssi_ofdm_de_20m[i] = val;
4407 		h2c->curr_tssi_ofdm_de_40m[i] = val;
4408 		h2c->curr_tssi_ofdm_de_80m[i] = val;
4409 		h2c->curr_tssi_ofdm_de_160m[i] = val;
4410 		h2c->curr_tssi_ofdm_de_320m[i] = val;
4411 		h2c->curr_tssi_efuse_ofdm_de[i] = ofdm_de;
4412 
4413 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4414 			    "[TSSI][TRIM]: path=%d ofdm_de=0x%x\n", i, ofdm_de);
4415 	}
4416 }
4417 
4418 void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev,
4419 					      enum rtw89_phy_idx phy,
4420 					      const struct rtw89_chan *chan,
4421 					      struct rtw89_h2c_rf_tssi *h2c)
4422 {
4423 	struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
4424 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
4425 	const s8 *thm_up[RF_PATH_B + 1] = {};
4426 	const s8 *thm_down[RF_PATH_B + 1] = {};
4427 	u8 subband = chan->subband_type;
4428 	s8 thm_ofst[128] = {0};
4429 	u8 thermal;
4430 	u8 path;
4431 	u8 i, j;
4432 
4433 	switch (subband) {
4434 	default:
4435 	case RTW89_CH_2G:
4436 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0];
4437 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0];
4438 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0];
4439 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0];
4440 		break;
4441 	case RTW89_CH_5G_BAND_1:
4442 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0];
4443 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0];
4444 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0];
4445 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0];
4446 		break;
4447 	case RTW89_CH_5G_BAND_3:
4448 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1];
4449 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1];
4450 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1];
4451 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1];
4452 		break;
4453 	case RTW89_CH_5G_BAND_4:
4454 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2];
4455 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2];
4456 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2];
4457 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2];
4458 		break;
4459 	case RTW89_CH_6G_BAND_IDX0:
4460 	case RTW89_CH_6G_BAND_IDX1:
4461 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][0];
4462 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][0];
4463 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][0];
4464 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][0];
4465 		break;
4466 	case RTW89_CH_6G_BAND_IDX2:
4467 	case RTW89_CH_6G_BAND_IDX3:
4468 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][1];
4469 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][1];
4470 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][1];
4471 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][1];
4472 		break;
4473 	case RTW89_CH_6G_BAND_IDX4:
4474 	case RTW89_CH_6G_BAND_IDX5:
4475 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][2];
4476 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][2];
4477 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][2];
4478 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][2];
4479 		break;
4480 	case RTW89_CH_6G_BAND_IDX6:
4481 	case RTW89_CH_6G_BAND_IDX7:
4482 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][3];
4483 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][3];
4484 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][3];
4485 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][3];
4486 		break;
4487 	}
4488 
4489 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4490 		    "[TSSI] tmeter tbl on subband: %u\n", subband);
4491 
4492 	for (path = RF_PATH_A; path <= RF_PATH_B; path++) {
4493 		thermal = tssi_info->thermal[path];
4494 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4495 			    "path: %u, pg thermal: 0x%x\n", path, thermal);
4496 
4497 		if (thermal == 0xff) {
4498 			h2c->pg_thermal[path] = 0x38;
4499 			memset(h2c->ftable[path], 0, sizeof(h2c->ftable[path]));
4500 			continue;
4501 		}
4502 
4503 		h2c->pg_thermal[path] = thermal;
4504 
4505 		i = 0;
4506 		for (j = 0; j < 64; j++)
4507 			thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
4508 				      thm_up[path][i++] :
4509 				      thm_up[path][DELTA_SWINGIDX_SIZE - 1];
4510 
4511 		i = 1;
4512 		for (j = 127; j >= 64; j--)
4513 			thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
4514 				      -thm_down[path][i++] :
4515 				      -thm_down[path][DELTA_SWINGIDX_SIZE - 1];
4516 
4517 		for (i = 0; i < 128; i += 4) {
4518 			h2c->ftable[path][i + 0] = thm_ofst[i + 3];
4519 			h2c->ftable[path][i + 1] = thm_ofst[i + 2];
4520 			h2c->ftable[path][i + 2] = thm_ofst[i + 1];
4521 			h2c->ftable[path][i + 3] = thm_ofst[i + 0];
4522 
4523 			rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4524 				    "thm ofst [%x]: %02x %02x %02x %02x\n",
4525 				    i, thm_ofst[i], thm_ofst[i + 1],
4526 				    thm_ofst[i + 2], thm_ofst[i + 3]);
4527 		}
4528 	}
4529 }
4530 
4531 static u8 rtw89_phy_cfo_get_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo)
4532 {
4533 	const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info;
4534 	u32 reg_mask;
4535 
4536 	if (sc_xo)
4537 		reg_mask = xtal->sc_xo_mask;
4538 	else
4539 		reg_mask = xtal->sc_xi_mask;
4540 
4541 	return (u8)rtw89_read32_mask(rtwdev, xtal->xcap_reg, reg_mask);
4542 }
4543 
4544 static void rtw89_phy_cfo_set_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo,
4545 				       u8 val)
4546 {
4547 	const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info;
4548 	u32 reg_mask;
4549 
4550 	if (sc_xo)
4551 		reg_mask = xtal->sc_xo_mask;
4552 	else
4553 		reg_mask = xtal->sc_xi_mask;
4554 
4555 	rtw89_write32_mask(rtwdev, xtal->xcap_reg, reg_mask, val);
4556 }
4557 
4558 static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev,
4559 					  u8 crystal_cap, bool force)
4560 {
4561 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4562 	const struct rtw89_chip_info *chip = rtwdev->chip;
4563 	u8 sc_xi_val, sc_xo_val;
4564 
4565 	if (!force && cfo->crystal_cap == crystal_cap)
4566 		return;
4567 	if (chip->chip_id == RTL8852A || chip->chip_id == RTL8851B) {
4568 		rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
4569 		rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
4570 		sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true);
4571 		sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false);
4572 	} else {
4573 		rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO,
4574 					crystal_cap, XTAL_SC_XO_MASK);
4575 		rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI,
4576 					crystal_cap, XTAL_SC_XI_MASK);
4577 		rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO, &sc_xo_val);
4578 		rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI, &sc_xi_val);
4579 	}
4580 	cfo->crystal_cap = sc_xi_val;
4581 	cfo->x_cap_ofst = (s8)((int)cfo->crystal_cap - cfo->def_x_cap);
4582 
4583 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xi=0x%x\n", sc_xi_val);
4584 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xo=0x%x\n", sc_xo_val);
4585 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Get xcap_ofst=%d\n",
4586 		    cfo->x_cap_ofst);
4587 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set xcap OK\n");
4588 }
4589 
4590 static void rtw89_phy_cfo_reset(struct rtw89_dev *rtwdev)
4591 {
4592 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4593 	u8 cap;
4594 
4595 	cfo->def_x_cap = cfo->crystal_cap_default & B_AX_XTAL_SC_MASK;
4596 	cfo->is_adjust = false;
4597 	if (cfo->crystal_cap == cfo->def_x_cap)
4598 		return;
4599 	cap = cfo->crystal_cap;
4600 	cap += (cap > cfo->def_x_cap ? -1 : 1);
4601 	rtw89_phy_cfo_set_crystal_cap(rtwdev, cap, false);
4602 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
4603 		    "(0x%x) approach to dflt_val=(0x%x)\n", cfo->crystal_cap,
4604 		    cfo->def_x_cap);
4605 }
4606 
4607 static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo)
4608 {
4609 	const struct rtw89_reg_def *dcfo_comp = rtwdev->chip->dcfo_comp;
4610 	bool is_linked = rtwdev->total_sta_assoc > 0;
4611 	s32 cfo_avg_312;
4612 	s32 dcfo_comp_val;
4613 	int sign;
4614 
4615 	if (!dcfo_comp)
4616 		return;
4617 
4618 	if (!is_linked) {
4619 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: is_linked=%d\n",
4620 			    is_linked);
4621 		return;
4622 	}
4623 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: curr_cfo=%d\n", curr_cfo);
4624 	if (curr_cfo == 0)
4625 		return;
4626 	dcfo_comp_val = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO);
4627 	sign = curr_cfo > 0 ? 1 : -1;
4628 	cfo_avg_312 = curr_cfo / 625 + sign * dcfo_comp_val;
4629 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "avg_cfo_312=%d step\n", cfo_avg_312);
4630 	if (rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV)
4631 		cfo_avg_312 = -cfo_avg_312;
4632 	rtw89_phy_set_phy_regs(rtwdev, dcfo_comp->addr, dcfo_comp->mask,
4633 			       cfo_avg_312);
4634 }
4635 
4636 static void rtw89_dcfo_comp_init(struct rtw89_dev *rtwdev)
4637 {
4638 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
4639 	const struct rtw89_chip_info *chip = rtwdev->chip;
4640 	const struct rtw89_cfo_regs *cfo = phy->cfo;
4641 
4642 	rtw89_phy_set_phy_regs(rtwdev, cfo->comp_seg0, cfo->valid_0_mask, 1);
4643 	rtw89_phy_set_phy_regs(rtwdev, cfo->comp, cfo->weighting_mask, 8);
4644 
4645 	if (chip->chip_gen == RTW89_CHIP_AX) {
4646 		if (chip->cfo_hw_comp) {
4647 			rtw89_write32_mask(rtwdev, R_AX_PWR_UL_CTRL2,
4648 					   B_AX_PWR_UL_CFO_MASK, 0x6);
4649 		} else {
4650 			rtw89_phy_set_phy_regs(rtwdev, R_DCFO, B_DCFO, 1);
4651 			rtw89_write32_clr(rtwdev, R_AX_PWR_UL_CTRL2,
4652 					  B_AX_PWR_UL_CFO_MASK);
4653 		}
4654 	}
4655 }
4656 
4657 static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev)
4658 {
4659 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4660 	struct rtw89_efuse *efuse = &rtwdev->efuse;
4661 
4662 	cfo->crystal_cap_default = efuse->xtal_cap & B_AX_XTAL_SC_MASK;
4663 	cfo->crystal_cap = cfo->crystal_cap_default;
4664 	cfo->def_x_cap = cfo->crystal_cap;
4665 	cfo->x_cap_ub = min_t(int, cfo->def_x_cap + CFO_BOUND, 0x7f);
4666 	cfo->x_cap_lb = max_t(int, cfo->def_x_cap - CFO_BOUND, 0x1);
4667 	cfo->is_adjust = false;
4668 	cfo->divergence_lock_en = false;
4669 	cfo->x_cap_ofst = 0;
4670 	cfo->lock_cnt = 0;
4671 	cfo->rtw89_multi_cfo_mode = RTW89_TP_BASED_AVG_MODE;
4672 	cfo->apply_compensation = false;
4673 	cfo->residual_cfo_acc = 0;
4674 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Default xcap=%0x\n",
4675 		    cfo->crystal_cap_default);
4676 	rtw89_phy_cfo_set_crystal_cap(rtwdev, cfo->crystal_cap_default, true);
4677 	rtw89_dcfo_comp_init(rtwdev);
4678 	cfo->cfo_timer_ms = 2000;
4679 	cfo->cfo_trig_by_timer_en = false;
4680 	cfo->phy_cfo_trk_cnt = 0;
4681 	cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
4682 	cfo->cfo_ul_ofdma_acc_mode = RTW89_CFO_UL_OFDMA_ACC_ENABLE;
4683 }
4684 
4685 static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
4686 					     s32 curr_cfo)
4687 {
4688 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4689 	int crystal_cap = cfo->crystal_cap;
4690 	s32 cfo_abs = abs(curr_cfo);
4691 	int sign;
4692 
4693 	if (curr_cfo == 0) {
4694 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n");
4695 		return;
4696 	}
4697 	if (!cfo->is_adjust) {
4698 		if (cfo_abs > CFO_TRK_ENABLE_TH)
4699 			cfo->is_adjust = true;
4700 	} else {
4701 		if (cfo_abs <= CFO_TRK_STOP_TH)
4702 			cfo->is_adjust = false;
4703 	}
4704 	if (!cfo->is_adjust) {
4705 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Stop CFO tracking\n");
4706 		return;
4707 	}
4708 	sign = curr_cfo > 0 ? 1 : -1;
4709 	if (cfo_abs > CFO_TRK_STOP_TH_4)
4710 		crystal_cap += 3 * sign;
4711 	else if (cfo_abs > CFO_TRK_STOP_TH_3)
4712 		crystal_cap += 3 * sign;
4713 	else if (cfo_abs > CFO_TRK_STOP_TH_2)
4714 		crystal_cap += 1 * sign;
4715 	else if (cfo_abs > CFO_TRK_STOP_TH_1)
4716 		crystal_cap += 1 * sign;
4717 	else
4718 		return;
4719 
4720 	crystal_cap = clamp(crystal_cap, 0, 127);
4721 	rtw89_phy_cfo_set_crystal_cap(rtwdev, (u8)crystal_cap, false);
4722 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
4723 		    "X_cap{Curr,Default}={0x%x,0x%x}\n",
4724 		    cfo->crystal_cap, cfo->def_x_cap);
4725 }
4726 
4727 static s32 rtw89_phy_average_cfo_calc(struct rtw89_dev *rtwdev)
4728 {
4729 	const struct rtw89_chip_info *chip = rtwdev->chip;
4730 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4731 	s32 cfo_khz_all = 0;
4732 	s32 cfo_cnt_all = 0;
4733 	s32 cfo_all_avg = 0;
4734 	u8 i;
4735 
4736 	if (rtwdev->total_sta_assoc != 1)
4737 		return 0;
4738 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "one_entry_only\n");
4739 	for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
4740 		if (cfo->cfo_cnt[i] == 0)
4741 			continue;
4742 		cfo_khz_all += cfo->cfo_tail[i];
4743 		cfo_cnt_all += cfo->cfo_cnt[i];
4744 		cfo_all_avg = phy_div(cfo_khz_all, cfo_cnt_all);
4745 		cfo->pre_cfo_avg[i] = cfo->cfo_avg[i];
4746 		cfo->dcfo_avg = phy_div(cfo_khz_all << chip->dcfo_comp_sft,
4747 					cfo_cnt_all);
4748 	}
4749 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
4750 		    "CFO track for macid = %d\n", i);
4751 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
4752 		    "Total cfo=%dK, pkt_cnt=%d, avg_cfo=%dK\n",
4753 		    cfo_khz_all, cfo_cnt_all, cfo_all_avg);
4754 	return cfo_all_avg;
4755 }
4756 
4757 static s32 rtw89_phy_multi_sta_cfo_calc(struct rtw89_dev *rtwdev)
4758 {
4759 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4760 	struct rtw89_traffic_stats *stats = &rtwdev->stats;
4761 	s32 target_cfo = 0;
4762 	s32 cfo_khz_all = 0;
4763 	s32 cfo_khz_all_tp_wgt = 0;
4764 	s32 cfo_avg = 0;
4765 	s32 max_cfo_lb = BIT(31);
4766 	s32 min_cfo_ub = GENMASK(30, 0);
4767 	u16 cfo_cnt_all = 0;
4768 	u8 active_entry_cnt = 0;
4769 	u8 sta_cnt = 0;
4770 	u32 tp_all = 0;
4771 	u8 i;
4772 	u8 cfo_tol = 0;
4773 
4774 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Multi entry cfo_trk\n");
4775 	if (cfo->rtw89_multi_cfo_mode == RTW89_PKT_BASED_AVG_MODE) {
4776 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt based avg mode\n");
4777 		for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
4778 			if (cfo->cfo_cnt[i] == 0)
4779 				continue;
4780 			cfo_khz_all += cfo->cfo_tail[i];
4781 			cfo_cnt_all += cfo->cfo_cnt[i];
4782 			cfo_avg = phy_div(cfo_khz_all, (s32)cfo_cnt_all);
4783 			rtw89_debug(rtwdev, RTW89_DBG_CFO,
4784 				    "Msta cfo=%d, pkt_cnt=%d, avg_cfo=%d\n",
4785 				    cfo_khz_all, cfo_cnt_all, cfo_avg);
4786 			target_cfo = cfo_avg;
4787 		}
4788 	} else if (cfo->rtw89_multi_cfo_mode == RTW89_ENTRY_BASED_AVG_MODE) {
4789 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Entry based avg mode\n");
4790 		for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
4791 			if (cfo->cfo_cnt[i] == 0)
4792 				continue;
4793 			cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i],
4794 						  (s32)cfo->cfo_cnt[i]);
4795 			cfo_khz_all += cfo->cfo_avg[i];
4796 			rtw89_debug(rtwdev, RTW89_DBG_CFO,
4797 				    "Macid=%d, cfo_avg=%d\n", i,
4798 				    cfo->cfo_avg[i]);
4799 		}
4800 		sta_cnt = rtwdev->total_sta_assoc;
4801 		cfo_avg = phy_div(cfo_khz_all, (s32)sta_cnt);
4802 		rtw89_debug(rtwdev, RTW89_DBG_CFO,
4803 			    "Msta cfo_acc=%d, ent_cnt=%d, avg_cfo=%d\n",
4804 			    cfo_khz_all, sta_cnt, cfo_avg);
4805 		target_cfo = cfo_avg;
4806 	} else if (cfo->rtw89_multi_cfo_mode == RTW89_TP_BASED_AVG_MODE) {
4807 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "TP based avg mode\n");
4808 		cfo_tol = cfo->sta_cfo_tolerance;
4809 		for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
4810 			sta_cnt++;
4811 			if (cfo->cfo_cnt[i] != 0) {
4812 				cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i],
4813 							  (s32)cfo->cfo_cnt[i]);
4814 				active_entry_cnt++;
4815 			} else {
4816 				cfo->cfo_avg[i] = cfo->pre_cfo_avg[i];
4817 			}
4818 			max_cfo_lb = max(cfo->cfo_avg[i] - cfo_tol, max_cfo_lb);
4819 			min_cfo_ub = min(cfo->cfo_avg[i] + cfo_tol, min_cfo_ub);
4820 			cfo_khz_all += cfo->cfo_avg[i];
4821 			/* need tp for each entry */
4822 			rtw89_debug(rtwdev, RTW89_DBG_CFO,
4823 				    "[%d] cfo_avg=%d, tp=tbd\n",
4824 				    i, cfo->cfo_avg[i]);
4825 			if (sta_cnt >= rtwdev->total_sta_assoc)
4826 				break;
4827 		}
4828 		tp_all = stats->rx_throughput; /* need tp for each entry */
4829 		cfo_avg =  phy_div(cfo_khz_all_tp_wgt, (s32)tp_all);
4830 
4831 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Assoc sta cnt=%d\n",
4832 			    sta_cnt);
4833 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Active sta cnt=%d\n",
4834 			    active_entry_cnt);
4835 		rtw89_debug(rtwdev, RTW89_DBG_CFO,
4836 			    "Msta cfo with tp_wgt=%d, avg_cfo=%d\n",
4837 			    cfo_khz_all_tp_wgt, cfo_avg);
4838 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "cfo_lb=%d,cfo_ub=%d\n",
4839 			    max_cfo_lb, min_cfo_ub);
4840 		if (max_cfo_lb <= min_cfo_ub) {
4841 			rtw89_debug(rtwdev, RTW89_DBG_CFO,
4842 				    "cfo win_size=%d\n",
4843 				    min_cfo_ub - max_cfo_lb);
4844 			target_cfo = clamp(cfo_avg, max_cfo_lb, min_cfo_ub);
4845 		} else {
4846 			rtw89_debug(rtwdev, RTW89_DBG_CFO,
4847 				    "No intersection of cfo tolerance windows\n");
4848 			target_cfo = phy_div(cfo_khz_all, (s32)sta_cnt);
4849 		}
4850 		for (i = 0; i < CFO_TRACK_MAX_USER; i++)
4851 			cfo->pre_cfo_avg[i] = cfo->cfo_avg[i];
4852 	}
4853 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Target cfo=%d\n", target_cfo);
4854 	return target_cfo;
4855 }
4856 
4857 static void rtw89_phy_cfo_statistics_reset(struct rtw89_dev *rtwdev)
4858 {
4859 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4860 
4861 	memset(&cfo->cfo_tail, 0, sizeof(cfo->cfo_tail));
4862 	memset(&cfo->cfo_cnt, 0, sizeof(cfo->cfo_cnt));
4863 	cfo->packet_count = 0;
4864 	cfo->packet_count_pre = 0;
4865 	cfo->cfo_avg_pre = 0;
4866 }
4867 
4868 static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev)
4869 {
4870 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4871 	s32 new_cfo = 0;
4872 	bool x_cap_update = false;
4873 	u8 pre_x_cap = cfo->crystal_cap;
4874 	u8 dcfo_comp_sft = rtwdev->chip->dcfo_comp_sft;
4875 
4876 	cfo->dcfo_avg = 0;
4877 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "CFO:total_sta_assoc=%d\n",
4878 		    rtwdev->total_sta_assoc);
4879 	if (rtwdev->total_sta_assoc == 0 || rtw89_is_mlo_1_1(rtwdev)) {
4880 		rtw89_phy_cfo_reset(rtwdev);
4881 		return;
4882 	}
4883 	if (cfo->packet_count == 0) {
4884 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt = 0\n");
4885 		return;
4886 	}
4887 	if (cfo->packet_count == cfo->packet_count_pre) {
4888 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt doesn't change\n");
4889 		return;
4890 	}
4891 	if (rtwdev->total_sta_assoc == 1)
4892 		new_cfo = rtw89_phy_average_cfo_calc(rtwdev);
4893 	else
4894 		new_cfo = rtw89_phy_multi_sta_cfo_calc(rtwdev);
4895 	if (cfo->divergence_lock_en) {
4896 		cfo->lock_cnt++;
4897 		if (cfo->lock_cnt > CFO_PERIOD_CNT) {
4898 			cfo->divergence_lock_en = false;
4899 			cfo->lock_cnt = 0;
4900 		} else {
4901 			rtw89_phy_cfo_reset(rtwdev);
4902 		}
4903 		return;
4904 	}
4905 	if (cfo->crystal_cap >= cfo->x_cap_ub ||
4906 	    cfo->crystal_cap <= cfo->x_cap_lb) {
4907 		cfo->divergence_lock_en = true;
4908 		rtw89_phy_cfo_reset(rtwdev);
4909 		return;
4910 	}
4911 
4912 	rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo);
4913 	cfo->cfo_avg_pre = new_cfo;
4914 	cfo->dcfo_avg_pre = cfo->dcfo_avg;
4915 	x_cap_update =  cfo->crystal_cap != pre_x_cap;
4916 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap_up=%d\n", x_cap_update);
4917 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap: D:%x C:%x->%x, ofst=%d\n",
4918 		    cfo->def_x_cap, pre_x_cap, cfo->crystal_cap,
4919 		    cfo->x_cap_ofst);
4920 	if (x_cap_update) {
4921 		if (cfo->dcfo_avg > 0)
4922 			cfo->dcfo_avg -= CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft;
4923 		else
4924 			cfo->dcfo_avg += CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft;
4925 	}
4926 	rtw89_dcfo_comp(rtwdev, cfo->dcfo_avg);
4927 	rtw89_phy_cfo_statistics_reset(rtwdev);
4928 }
4929 
4930 void rtw89_phy_cfo_track_work(struct wiphy *wiphy, struct wiphy_work *work)
4931 {
4932 	struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
4933 						cfo_track_work.work);
4934 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4935 
4936 	lockdep_assert_wiphy(wiphy);
4937 
4938 	if (!cfo->cfo_trig_by_timer_en)
4939 		return;
4940 	rtw89_leave_ps_mode(rtwdev);
4941 	rtw89_phy_cfo_dm(rtwdev);
4942 	wiphy_delayed_work_queue(wiphy, &rtwdev->cfo_track_work,
4943 				 msecs_to_jiffies(cfo->cfo_timer_ms));
4944 }
4945 
4946 static void rtw89_phy_cfo_start_work(struct rtw89_dev *rtwdev)
4947 {
4948 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4949 
4950 	wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->cfo_track_work,
4951 				 msecs_to_jiffies(cfo->cfo_timer_ms));
4952 }
4953 
4954 void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev)
4955 {
4956 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4957 	struct rtw89_traffic_stats *stats = &rtwdev->stats;
4958 	bool is_ul_ofdma = false, ofdma_acc_en = false;
4959 
4960 	if (stats->rx_tf_periodic > CFO_TF_CNT_TH)
4961 		is_ul_ofdma = true;
4962 	if (cfo->cfo_ul_ofdma_acc_mode == RTW89_CFO_UL_OFDMA_ACC_ENABLE &&
4963 	    is_ul_ofdma)
4964 		ofdma_acc_en = true;
4965 
4966 	switch (cfo->phy_cfo_status) {
4967 	case RTW89_PHY_DCFO_STATE_NORMAL:
4968 		if (stats->tx_throughput >= CFO_TP_UPPER) {
4969 			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_ENHANCE;
4970 			cfo->cfo_trig_by_timer_en = true;
4971 			cfo->cfo_timer_ms = CFO_COMP_PERIOD;
4972 			rtw89_phy_cfo_start_work(rtwdev);
4973 		}
4974 		break;
4975 	case RTW89_PHY_DCFO_STATE_ENHANCE:
4976 		if (stats->tx_throughput <= CFO_TP_LOWER)
4977 			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
4978 		else if (ofdma_acc_en &&
4979 			 cfo->phy_cfo_trk_cnt >= CFO_PERIOD_CNT)
4980 			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_HOLD;
4981 		else
4982 			cfo->phy_cfo_trk_cnt++;
4983 
4984 		if (cfo->phy_cfo_status == RTW89_PHY_DCFO_STATE_NORMAL) {
4985 			cfo->phy_cfo_trk_cnt = 0;
4986 			cfo->cfo_trig_by_timer_en = false;
4987 		}
4988 		break;
4989 	case RTW89_PHY_DCFO_STATE_HOLD:
4990 		if (stats->tx_throughput <= CFO_TP_LOWER) {
4991 			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
4992 			cfo->phy_cfo_trk_cnt = 0;
4993 			cfo->cfo_trig_by_timer_en = false;
4994 		} else {
4995 			cfo->phy_cfo_trk_cnt++;
4996 		}
4997 		break;
4998 	default:
4999 		cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
5000 		cfo->phy_cfo_trk_cnt = 0;
5001 		break;
5002 	}
5003 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
5004 		    "[CFO]WatchDog tp=%d,state=%d,timer_en=%d,trk_cnt=%d,thermal=%ld\n",
5005 		    stats->tx_throughput, cfo->phy_cfo_status,
5006 		    cfo->cfo_trig_by_timer_en, cfo->phy_cfo_trk_cnt,
5007 		    ewma_thermal_read(&rtwdev->phystat.avg_thermal[0]));
5008 	if (cfo->cfo_trig_by_timer_en)
5009 		return;
5010 	rtw89_phy_cfo_dm(rtwdev);
5011 }
5012 
5013 void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val,
5014 			 struct rtw89_rx_phy_ppdu *phy_ppdu)
5015 {
5016 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
5017 	u8 macid = phy_ppdu->mac_id;
5018 
5019 	if (macid >= CFO_TRACK_MAX_USER) {
5020 		rtw89_warn(rtwdev, "mac_id %d is out of range\n", macid);
5021 		return;
5022 	}
5023 
5024 	cfo->cfo_tail[macid] += cfo_val;
5025 	cfo->cfo_cnt[macid]++;
5026 	cfo->packet_count++;
5027 }
5028 
5029 void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
5030 {
5031 	const struct rtw89_chip_info *chip = rtwdev->chip;
5032 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
5033 						       rtwvif_link->chanctx_idx);
5034 	struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
5035 
5036 	if (!chip->ul_tb_waveform_ctrl)
5037 		return;
5038 
5039 	rtwvif_link->def_tri_idx =
5040 		rtw89_phy_read32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG);
5041 
5042 	if (chip->chip_id == RTL8852B && rtwdev->hal.cv > CHIP_CBV)
5043 		rtwvif_link->dyn_tb_bedge_en = false;
5044 	else if (chan->band_type >= RTW89_BAND_5G &&
5045 		 chan->band_width >= RTW89_CHANNEL_WIDTH_40)
5046 		rtwvif_link->dyn_tb_bedge_en = true;
5047 	else
5048 		rtwvif_link->dyn_tb_bedge_en = false;
5049 
5050 	rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
5051 		    "[ULTB] def_if_bandedge=%d, def_tri_idx=%d\n",
5052 		    ul_tb_info->def_if_bandedge, rtwvif_link->def_tri_idx);
5053 	rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
5054 		    "[ULTB] dyn_tb_begde_en=%d, dyn_tb_tri_en=%d\n",
5055 		    rtwvif_link->dyn_tb_bedge_en, ul_tb_info->dyn_tb_tri_en);
5056 }
5057 
5058 struct rtw89_phy_ul_tb_check_data {
5059 	bool valid;
5060 	bool high_tf_client;
5061 	bool low_tf_client;
5062 	bool dyn_tb_bedge_en;
5063 	u8 def_tri_idx;
5064 };
5065 
5066 struct rtw89_phy_power_diff {
5067 	u32 q_00;
5068 	u32 q_11;
5069 	u32 q_matrix_en;
5070 	u32 ultb_1t_norm_160;
5071 	u32 ultb_2t_norm_160;
5072 	u32 com1_norm_1sts;
5073 	u32 com2_resp_1sts_path;
5074 };
5075 
5076 static void rtw89_phy_ofdma_power_diff(struct rtw89_dev *rtwdev,
5077 				       struct rtw89_vif_link *rtwvif_link)
5078 {
5079 	static const struct rtw89_phy_power_diff table[2] = {
5080 		{0x0, 0x0, 0x0, 0x0, 0xf4, 0x3, 0x3},
5081 		{0xb50, 0xb50, 0x1, 0xc, 0x0, 0x1, 0x1},
5082 	};
5083 	const struct rtw89_phy_power_diff *param;
5084 	u32 reg;
5085 
5086 	if (!rtwdev->chip->ul_tb_pwr_diff)
5087 		return;
5088 
5089 	if (rtwvif_link->pwr_diff_en == rtwvif_link->pre_pwr_diff_en) {
5090 		rtwvif_link->pwr_diff_en = false;
5091 		return;
5092 	}
5093 
5094 	rtwvif_link->pre_pwr_diff_en = rtwvif_link->pwr_diff_en;
5095 	param = &table[rtwvif_link->pwr_diff_en];
5096 
5097 	rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_00, B_Q_MATRIX_00_REAL,
5098 			       param->q_00);
5099 	rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_11, B_Q_MATRIX_11_REAL,
5100 			       param->q_11);
5101 	rtw89_phy_write32_mask(rtwdev, R_CUSTOMIZE_Q_MATRIX,
5102 			       B_CUSTOMIZE_Q_MATRIX_EN, param->q_matrix_en);
5103 
5104 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, rtwvif_link->mac_idx);
5105 	rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_NORM_BW160,
5106 			   param->ultb_1t_norm_160);
5107 
5108 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, rtwvif_link->mac_idx);
5109 	rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_NORM_BW160,
5110 			   param->ultb_2t_norm_160);
5111 
5112 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM1, rtwvif_link->mac_idx);
5113 	rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM1_NORM_1STS,
5114 			   param->com1_norm_1sts);
5115 
5116 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM2, rtwvif_link->mac_idx);
5117 	rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM2_RESP_1STS_PATH,
5118 			   param->com2_resp_1sts_path);
5119 }
5120 
5121 static
5122 void rtw89_phy_ul_tb_ctrl_check(struct rtw89_dev *rtwdev,
5123 				struct rtw89_vif_link *rtwvif_link,
5124 				struct rtw89_phy_ul_tb_check_data *ul_tb_data)
5125 {
5126 	struct rtw89_traffic_stats *stats = &rtwdev->stats;
5127 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
5128 
5129 	if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION)
5130 		return;
5131 
5132 	if (!vif->cfg.assoc)
5133 		return;
5134 
5135 	if (rtwdev->chip->ul_tb_waveform_ctrl) {
5136 		if (stats->rx_tf_periodic > UL_TB_TF_CNT_L2H_TH)
5137 			ul_tb_data->high_tf_client = true;
5138 		else if (stats->rx_tf_periodic < UL_TB_TF_CNT_H2L_TH)
5139 			ul_tb_data->low_tf_client = true;
5140 
5141 		ul_tb_data->valid = true;
5142 		ul_tb_data->def_tri_idx = rtwvif_link->def_tri_idx;
5143 		ul_tb_data->dyn_tb_bedge_en = rtwvif_link->dyn_tb_bedge_en;
5144 	}
5145 
5146 	rtw89_phy_ofdma_power_diff(rtwdev, rtwvif_link);
5147 }
5148 
5149 static void rtw89_phy_ul_tb_waveform_ctrl(struct rtw89_dev *rtwdev,
5150 					  struct rtw89_phy_ul_tb_check_data *ul_tb_data)
5151 {
5152 	struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
5153 
5154 	if (!rtwdev->chip->ul_tb_waveform_ctrl)
5155 		return;
5156 
5157 	if (ul_tb_data->dyn_tb_bedge_en) {
5158 		if (ul_tb_data->high_tf_client) {
5159 			rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 0);
5160 			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
5161 				    "[ULTB] Turn off if_bandedge\n");
5162 		} else if (ul_tb_data->low_tf_client) {
5163 			rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN,
5164 					       ul_tb_info->def_if_bandedge);
5165 			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
5166 				    "[ULTB] Set to default if_bandedge = %d\n",
5167 				    ul_tb_info->def_if_bandedge);
5168 		}
5169 	}
5170 
5171 	if (ul_tb_info->dyn_tb_tri_en) {
5172 		if (ul_tb_data->high_tf_client) {
5173 			rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT,
5174 					       B_TXSHAPE_TRIANGULAR_CFG, 0);
5175 			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
5176 				    "[ULTB] Turn off Tx triangle\n");
5177 		} else if (ul_tb_data->low_tf_client) {
5178 			rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT,
5179 					       B_TXSHAPE_TRIANGULAR_CFG,
5180 					       ul_tb_data->def_tri_idx);
5181 			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
5182 				    "[ULTB] Set to default tx_shap_idx = %d\n",
5183 				    ul_tb_data->def_tri_idx);
5184 		}
5185 	}
5186 }
5187 
5188 void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev)
5189 {
5190 	const struct rtw89_chip_info *chip = rtwdev->chip;
5191 	struct rtw89_phy_ul_tb_check_data ul_tb_data = {};
5192 	struct rtw89_vif_link *rtwvif_link;
5193 	struct rtw89_vif *rtwvif;
5194 	unsigned int link_id;
5195 
5196 	if (!chip->ul_tb_waveform_ctrl && !chip->ul_tb_pwr_diff)
5197 		return;
5198 
5199 	if (rtwdev->total_sta_assoc != 1)
5200 		return;
5201 
5202 	rtw89_for_each_rtwvif(rtwdev, rtwvif)
5203 		rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
5204 			rtw89_phy_ul_tb_ctrl_check(rtwdev, rtwvif_link, &ul_tb_data);
5205 
5206 	if (!ul_tb_data.valid)
5207 		return;
5208 
5209 	rtw89_phy_ul_tb_waveform_ctrl(rtwdev, &ul_tb_data);
5210 }
5211 
5212 static void rtw89_phy_ul_tb_info_init(struct rtw89_dev *rtwdev)
5213 {
5214 	const struct rtw89_chip_info *chip = rtwdev->chip;
5215 	struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
5216 
5217 	if (!chip->ul_tb_waveform_ctrl)
5218 		return;
5219 
5220 	ul_tb_info->dyn_tb_tri_en = true;
5221 	ul_tb_info->def_if_bandedge =
5222 		rtw89_phy_read32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN);
5223 }
5224 
5225 static
5226 void rtw89_phy_antdiv_sts_instance_reset(struct rtw89_antdiv_stats *antdiv_sts)
5227 {
5228 	ewma_rssi_init(&antdiv_sts->cck_rssi_avg);
5229 	ewma_rssi_init(&antdiv_sts->ofdm_rssi_avg);
5230 	ewma_rssi_init(&antdiv_sts->non_legacy_rssi_avg);
5231 	antdiv_sts->pkt_cnt_cck = 0;
5232 	antdiv_sts->pkt_cnt_ofdm = 0;
5233 	antdiv_sts->pkt_cnt_non_legacy = 0;
5234 	antdiv_sts->evm = 0;
5235 }
5236 
5237 static void rtw89_phy_antdiv_sts_instance_add(struct rtw89_dev *rtwdev,
5238 					      struct rtw89_rx_phy_ppdu *phy_ppdu,
5239 					      struct rtw89_antdiv_stats *stats)
5240 {
5241 	if (rtw89_get_data_rate_mode(rtwdev, phy_ppdu->rate) == DATA_RATE_MODE_NON_HT) {
5242 		if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6) {
5243 			ewma_rssi_add(&stats->cck_rssi_avg, phy_ppdu->rssi_avg);
5244 			stats->pkt_cnt_cck++;
5245 		} else {
5246 			ewma_rssi_add(&stats->ofdm_rssi_avg, phy_ppdu->rssi_avg);
5247 			stats->pkt_cnt_ofdm++;
5248 			stats->evm += phy_ppdu->ofdm.evm_min;
5249 		}
5250 	} else {
5251 		ewma_rssi_add(&stats->non_legacy_rssi_avg, phy_ppdu->rssi_avg);
5252 		stats->pkt_cnt_non_legacy++;
5253 		stats->evm += phy_ppdu->ofdm.evm_min;
5254 	}
5255 }
5256 
5257 static u8 rtw89_phy_antdiv_sts_instance_get_rssi(struct rtw89_antdiv_stats *stats)
5258 {
5259 	if (stats->pkt_cnt_non_legacy >= stats->pkt_cnt_cck &&
5260 	    stats->pkt_cnt_non_legacy >= stats->pkt_cnt_ofdm)
5261 		return ewma_rssi_read(&stats->non_legacy_rssi_avg);
5262 	else if (stats->pkt_cnt_ofdm >= stats->pkt_cnt_cck &&
5263 		 stats->pkt_cnt_ofdm >= stats->pkt_cnt_non_legacy)
5264 		return ewma_rssi_read(&stats->ofdm_rssi_avg);
5265 	else
5266 		return ewma_rssi_read(&stats->cck_rssi_avg);
5267 }
5268 
5269 static u8 rtw89_phy_antdiv_sts_instance_get_evm(struct rtw89_antdiv_stats *stats)
5270 {
5271 	return phy_div(stats->evm, stats->pkt_cnt_non_legacy + stats->pkt_cnt_ofdm);
5272 }
5273 
5274 void rtw89_phy_antdiv_parse(struct rtw89_dev *rtwdev,
5275 			    struct rtw89_rx_phy_ppdu *phy_ppdu)
5276 {
5277 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
5278 	struct rtw89_hal *hal = &rtwdev->hal;
5279 
5280 	if (!hal->ant_diversity || hal->ant_diversity_fixed)
5281 		return;
5282 
5283 	rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->target_stats);
5284 
5285 	if (!antdiv->get_stats)
5286 		return;
5287 
5288 	if (hal->antenna_rx == RF_A)
5289 		rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->main_stats);
5290 	else if (hal->antenna_rx == RF_B)
5291 		rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->aux_stats);
5292 }
5293 
5294 static void rtw89_phy_antdiv_reg_init(struct rtw89_dev *rtwdev)
5295 {
5296 	rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_ANT_TRAIN_EN,
5297 			      0x0, RTW89_PHY_0);
5298 	rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_TX_ANT_SEL,
5299 			      0x0, RTW89_PHY_0);
5300 
5301 	rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_TRSW_TX_EXTEND,
5302 			      0x0, RTW89_PHY_0);
5303 	rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_HW_ANTSW_DIS_BY_GNT_BT,
5304 			      0x0, RTW89_PHY_0);
5305 
5306 	rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_BT_FORCE_ANTIDX_EN,
5307 			      0x0, RTW89_PHY_0);
5308 
5309 	rtw89_phy_write32_idx(rtwdev, R_RFSW_CTRL_ANT0_BASE, B_RFSW_CTRL_ANT_MAPPING,
5310 			      0x0100, RTW89_PHY_0);
5311 
5312 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_BTG_TRX,
5313 			      0x1, RTW89_PHY_0);
5314 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_HW_CTRL,
5315 			      0x0, RTW89_PHY_0);
5316 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_2G,
5317 			      0x0, RTW89_PHY_0);
5318 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_5G,
5319 			      0x0, RTW89_PHY_0);
5320 }
5321 
5322 static void rtw89_phy_antdiv_sts_reset(struct rtw89_dev *rtwdev)
5323 {
5324 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
5325 
5326 	rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats);
5327 	rtw89_phy_antdiv_sts_instance_reset(&antdiv->main_stats);
5328 	rtw89_phy_antdiv_sts_instance_reset(&antdiv->aux_stats);
5329 }
5330 
5331 static void rtw89_phy_antdiv_init(struct rtw89_dev *rtwdev)
5332 {
5333 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
5334 	struct rtw89_hal *hal = &rtwdev->hal;
5335 
5336 	if (!hal->ant_diversity)
5337 		return;
5338 
5339 	antdiv->get_stats = false;
5340 	antdiv->rssi_pre = 0;
5341 	rtw89_phy_antdiv_sts_reset(rtwdev);
5342 	rtw89_phy_antdiv_reg_init(rtwdev);
5343 }
5344 
5345 static void rtw89_phy_thermal_protect(struct rtw89_dev *rtwdev)
5346 {
5347 	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
5348 	struct rtw89_hal *hal = &rtwdev->hal;
5349 	u8 th_max = phystat->last_thermal_max;
5350 	u8 lv = hal->thermal_prot_lv;
5351 
5352 	if (!hal->thermal_prot_th ||
5353 	    (hal->disabled_dm_bitmap & BIT(RTW89_DM_THERMAL_PROTECT)))
5354 		return;
5355 
5356 	if (th_max > hal->thermal_prot_th && lv < RTW89_THERMAL_PROT_LV_MAX)
5357 		lv++;
5358 	else if (th_max < hal->thermal_prot_th - 2 && lv > 0)
5359 		lv--;
5360 	else
5361 		return;
5362 
5363 	hal->thermal_prot_lv = lv;
5364 
5365 	rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "thermal protection lv=%d\n", lv);
5366 
5367 	rtw89_fw_h2c_tx_duty(rtwdev, hal->thermal_prot_lv);
5368 }
5369 
5370 static void rtw89_phy_stat_thermal_update(struct rtw89_dev *rtwdev)
5371 {
5372 	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
5373 	u8 th, th_max = 0;
5374 	int i;
5375 
5376 	for (i = 0; i < rtwdev->chip->rf_path_num; i++) {
5377 		th = rtw89_chip_get_thermal(rtwdev, i);
5378 		if (th)
5379 			ewma_thermal_add(&phystat->avg_thermal[i], th);
5380 
5381 		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
5382 			    "path(%d) thermal cur=%u avg=%ld", i, th,
5383 			    ewma_thermal_read(&phystat->avg_thermal[i]));
5384 
5385 		th_max = max(th_max, th);
5386 	}
5387 
5388 	phystat->last_thermal_max = th_max;
5389 }
5390 
5391 struct rtw89_phy_iter_rssi_data {
5392 	struct rtw89_dev *rtwdev;
5393 	bool rssi_changed;
5394 };
5395 
5396 static
5397 void __rtw89_phy_stat_rssi_update_iter(struct rtw89_sta_link *rtwsta_link,
5398 				       struct rtw89_phy_iter_rssi_data *rssi_data)
5399 {
5400 	struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link;
5401 	struct rtw89_dev *rtwdev = rssi_data->rtwdev;
5402 	struct rtw89_phy_ch_info *ch_info;
5403 	struct rtw89_bb_ctx *bb;
5404 	unsigned long rssi_curr;
5405 
5406 	rssi_curr = ewma_rssi_read(&rtwsta_link->avg_rssi);
5407 	bb = rtw89_get_bb_ctx(rtwdev, rtwvif_link->phy_idx);
5408 	ch_info = &bb->ch_info;
5409 
5410 	if (rssi_curr < ch_info->rssi_min) {
5411 		ch_info->rssi_min = rssi_curr;
5412 		ch_info->rssi_min_macid = rtwsta_link->mac_id;
5413 	}
5414 
5415 	if (rtwsta_link->prev_rssi == 0) {
5416 		rtwsta_link->prev_rssi = rssi_curr;
5417 	} else if (abs((int)rtwsta_link->prev_rssi - (int)rssi_curr) >
5418 		   (3 << RSSI_FACTOR)) {
5419 		rtwsta_link->prev_rssi = rssi_curr;
5420 		rssi_data->rssi_changed = true;
5421 	}
5422 }
5423 
5424 static void rtw89_phy_stat_rssi_update_iter(void *data,
5425 					    struct ieee80211_sta *sta)
5426 {
5427 	struct rtw89_phy_iter_rssi_data *rssi_data =
5428 					(struct rtw89_phy_iter_rssi_data *)data;
5429 	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
5430 	struct rtw89_sta_link *rtwsta_link;
5431 	unsigned int link_id;
5432 
5433 	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id)
5434 		__rtw89_phy_stat_rssi_update_iter(rtwsta_link, rssi_data);
5435 }
5436 
5437 static void rtw89_phy_stat_rssi_update(struct rtw89_dev *rtwdev)
5438 {
5439 	struct rtw89_phy_iter_rssi_data rssi_data = {};
5440 	struct rtw89_bb_ctx *bb;
5441 
5442 	rssi_data.rtwdev = rtwdev;
5443 	rtw89_for_each_active_bb(rtwdev, bb)
5444 		bb->ch_info.rssi_min = U8_MAX;
5445 
5446 	ieee80211_iterate_stations_atomic(rtwdev->hw,
5447 					  rtw89_phy_stat_rssi_update_iter,
5448 					  &rssi_data);
5449 	if (rssi_data.rssi_changed)
5450 		rtw89_btc_ntfy_wl_sta(rtwdev);
5451 }
5452 
5453 static void rtw89_phy_stat_init(struct rtw89_dev *rtwdev)
5454 {
5455 	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
5456 	int i;
5457 
5458 	for (i = 0; i < rtwdev->chip->rf_path_num; i++)
5459 		ewma_thermal_init(&phystat->avg_thermal[i]);
5460 
5461 	rtw89_phy_stat_thermal_update(rtwdev);
5462 
5463 	memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat));
5464 	memset(&phystat->last_pkt_stat, 0, sizeof(phystat->last_pkt_stat));
5465 
5466 	ewma_rssi_init(&phystat->bcn_rssi);
5467 
5468 	rtwdev->hal.thermal_prot_lv = 0;
5469 }
5470 
5471 void rtw89_phy_stat_track(struct rtw89_dev *rtwdev)
5472 {
5473 	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
5474 
5475 	rtw89_phy_stat_thermal_update(rtwdev);
5476 	rtw89_phy_thermal_protect(rtwdev);
5477 	rtw89_phy_stat_rssi_update(rtwdev);
5478 
5479 	phystat->last_pkt_stat = phystat->cur_pkt_stat;
5480 	memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat));
5481 }
5482 
5483 static u16 rtw89_phy_ccx_us_to_idx(struct rtw89_dev *rtwdev,
5484 				   struct rtw89_bb_ctx *bb, u32 time_us)
5485 {
5486 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5487 
5488 	return time_us >> (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx);
5489 }
5490 
5491 static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev,
5492 				   struct rtw89_bb_ctx *bb, u16 idx)
5493 {
5494 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5495 
5496 	return idx << (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx);
5497 }
5498 
5499 static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev,
5500 					   struct rtw89_bb_ctx *bb)
5501 {
5502 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
5503 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5504 	const struct rtw89_ccx_regs *ccx = phy->ccx;
5505 
5506 	env->ccx_manual_ctrl = false;
5507 	env->ccx_ongoing = false;
5508 	env->ccx_rac_lv = RTW89_RAC_RELEASE;
5509 	env->ccx_period = 0;
5510 	env->ccx_unit_idx = RTW89_CCX_32_US;
5511 
5512 	rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->en_mask, 1, bb->phy_idx);
5513 	rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->trig_opt_mask, 1,
5514 			      bb->phy_idx);
5515 	rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1,
5516 			      bb->phy_idx);
5517 	rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->edcca_opt_mask,
5518 			      RTW89_CCX_EDCCA_BW20_0, bb->phy_idx);
5519 }
5520 
5521 static u16 rtw89_phy_ccx_get_report(struct rtw89_dev *rtwdev,
5522 				    struct rtw89_bb_ctx *bb,
5523 				    u16 report, u16 score)
5524 {
5525 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5526 	u32 numer = 0;
5527 	u16 ret = 0;
5528 
5529 	numer = report * score + (env->ccx_period >> 1);
5530 	if (env->ccx_period)
5531 		ret = numer / env->ccx_period;
5532 
5533 	return ret >= score ? score - 1 : ret;
5534 }
5535 
5536 static void rtw89_phy_ccx_ms_to_period_unit(struct rtw89_dev *rtwdev,
5537 					    u16 time_ms, u32 *period,
5538 					    u32 *unit_idx)
5539 {
5540 	u32 idx;
5541 	u8 quotient;
5542 
5543 	if (time_ms >= CCX_MAX_PERIOD)
5544 		time_ms = CCX_MAX_PERIOD;
5545 
5546 	quotient = CCX_MAX_PERIOD_UNIT * time_ms / CCX_MAX_PERIOD;
5547 
5548 	if (quotient < 4)
5549 		idx = RTW89_CCX_4_US;
5550 	else if (quotient < 8)
5551 		idx = RTW89_CCX_8_US;
5552 	else if (quotient < 16)
5553 		idx = RTW89_CCX_16_US;
5554 	else
5555 		idx = RTW89_CCX_32_US;
5556 
5557 	*unit_idx = idx;
5558 	*period = (time_ms * MS_TO_4US_RATIO) >> idx;
5559 
5560 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5561 		    "[Trigger Time] period:%d, unit_idx:%d\n",
5562 		    *period, *unit_idx);
5563 }
5564 
5565 static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev,
5566 					 struct rtw89_bb_ctx *bb)
5567 {
5568 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5569 
5570 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5571 		    "lv:(%d)->(0)\n", env->ccx_rac_lv);
5572 
5573 	env->ccx_ongoing = false;
5574 	env->ccx_rac_lv = RTW89_RAC_RELEASE;
5575 	env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
5576 }
5577 
5578 static bool rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev *rtwdev,
5579 					      struct rtw89_bb_ctx *bb,
5580 					      struct rtw89_ccx_para_info *para)
5581 {
5582 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5583 	bool is_update = env->ifs_clm_app != para->ifs_clm_app;
5584 	u8 i = 0;
5585 	u16 *ifs_th_l = env->ifs_clm_th_l;
5586 	u16 *ifs_th_h = env->ifs_clm_th_h;
5587 	u32 ifs_th0_us = 0, ifs_th_times = 0;
5588 	u32 ifs_th_h_us[RTW89_IFS_CLM_NUM] = {0};
5589 
5590 	if (!is_update)
5591 		goto ifs_update_finished;
5592 
5593 	switch (para->ifs_clm_app) {
5594 	case RTW89_IFS_CLM_INIT:
5595 	case RTW89_IFS_CLM_BACKGROUND:
5596 	case RTW89_IFS_CLM_ACS:
5597 	case RTW89_IFS_CLM_DBG:
5598 	case RTW89_IFS_CLM_DIG:
5599 	case RTW89_IFS_CLM_TDMA_DIG:
5600 		ifs_th0_us = IFS_CLM_TH0_UPPER;
5601 		ifs_th_times = IFS_CLM_TH_MUL;
5602 		break;
5603 	case RTW89_IFS_CLM_DBG_MANUAL:
5604 		ifs_th0_us = para->ifs_clm_manual_th0;
5605 		ifs_th_times = para->ifs_clm_manual_th_times;
5606 		break;
5607 	default:
5608 		break;
5609 	}
5610 
5611 	/* Set sampling threshold for 4 different regions, unit in idx_cnt.
5612 	 * low[i] = high[i-1] + 1
5613 	 * high[i] = high[i-1] * ifs_th_times
5614 	 */
5615 	ifs_th_l[IFS_CLM_TH_START_IDX] = 0;
5616 	ifs_th_h_us[IFS_CLM_TH_START_IDX] = ifs_th0_us;
5617 	ifs_th_h[IFS_CLM_TH_START_IDX] = rtw89_phy_ccx_us_to_idx(rtwdev, bb,
5618 								 ifs_th0_us);
5619 	for (i = 1; i < RTW89_IFS_CLM_NUM; i++) {
5620 		ifs_th_l[i] = ifs_th_h[i - 1] + 1;
5621 		ifs_th_h_us[i] = ifs_th_h_us[i - 1] * ifs_th_times;
5622 		ifs_th_h[i] = rtw89_phy_ccx_us_to_idx(rtwdev, bb, ifs_th_h_us[i]);
5623 	}
5624 
5625 ifs_update_finished:
5626 	if (!is_update)
5627 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5628 			    "No need to update IFS_TH\n");
5629 
5630 	return is_update;
5631 }
5632 
5633 static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev,
5634 					 struct rtw89_bb_ctx *bb)
5635 {
5636 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
5637 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5638 	const struct rtw89_ccx_regs *ccx = phy->ccx;
5639 	u8 i = 0;
5640 
5641 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_l_mask,
5642 			      env->ifs_clm_th_l[0], bb->phy_idx);
5643 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_l_mask,
5644 			      env->ifs_clm_th_l[1], bb->phy_idx);
5645 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_l_mask,
5646 			      env->ifs_clm_th_l[2], bb->phy_idx);
5647 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_l_mask,
5648 			      env->ifs_clm_th_l[3], bb->phy_idx);
5649 
5650 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_h_mask,
5651 			      env->ifs_clm_th_h[0], bb->phy_idx);
5652 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_h_mask,
5653 			      env->ifs_clm_th_h[1], bb->phy_idx);
5654 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_h_mask,
5655 			      env->ifs_clm_th_h[2], bb->phy_idx);
5656 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_h_mask,
5657 			      env->ifs_clm_th_h[3], bb->phy_idx);
5658 
5659 	for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
5660 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5661 			    "Update IFS_T%d_th{low, high} : {%d, %d}\n",
5662 			    i + 1, env->ifs_clm_th_l[i], env->ifs_clm_th_h[i]);
5663 }
5664 
5665 static void __rtw89_phy_nhm_setting_init(struct rtw89_dev *rtwdev,
5666 					 struct rtw89_bb_ctx *bb)
5667 {
5668 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
5669 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5670 	const struct rtw89_ccx_regs *ccx = phy->ccx;
5671 
5672 	env->nhm_include_cca = false;
5673 	env->nhm_mntr_time = 0;
5674 	env->nhm_sum = 0;
5675 
5676 	rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_config, ccx->nhm_en_mask, bb->phy_idx);
5677 	rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_method, ccx->nhm_pwr_method_msk,
5678 				  bb->phy_idx);
5679 }
5680 
5681 void rtw89_phy_nhm_setting_init(struct rtw89_dev *rtwdev)
5682 {
5683 	const struct rtw89_chip_info *chip = rtwdev->chip;
5684 	struct rtw89_bb_ctx *bb;
5685 
5686 	if (!chip->support_noise)
5687 		return;
5688 
5689 	rtw89_for_each_active_bb(rtwdev, bb)
5690 		__rtw89_phy_nhm_setting_init(rtwdev, bb);
5691 }
5692 
5693 static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev,
5694 					   struct rtw89_bb_ctx *bb)
5695 {
5696 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
5697 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5698 	const struct rtw89_ccx_regs *ccx = phy->ccx;
5699 	struct rtw89_ccx_para_info para = {};
5700 
5701 	env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
5702 	env->ifs_clm_mntr_time = 0;
5703 
5704 	para.ifs_clm_app = RTW89_IFS_CLM_INIT;
5705 	if (rtw89_phy_ifs_clm_th_update_check(rtwdev, bb, &para))
5706 		rtw89_phy_ifs_clm_set_th_reg(rtwdev, bb);
5707 
5708 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_collect_en_mask, true,
5709 			      bb->phy_idx);
5710 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_en_mask, true,
5711 			      bb->phy_idx);
5712 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_en_mask, true,
5713 			      bb->phy_idx);
5714 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_en_mask, true,
5715 			      bb->phy_idx);
5716 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_en_mask, true,
5717 			      bb->phy_idx);
5718 }
5719 
5720 static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev,
5721 				     struct rtw89_bb_ctx *bb,
5722 				     enum rtw89_env_racing_lv level)
5723 {
5724 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5725 	int ret = 0;
5726 
5727 	if (level >= RTW89_RAC_MAX_NUM) {
5728 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5729 			    "[WARNING] Wrong LV=%d\n", level);
5730 		return -EINVAL;
5731 	}
5732 
5733 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5734 		    "ccx_ongoing=%d, level:(%d)->(%d)\n", env->ccx_ongoing,
5735 		    env->ccx_rac_lv, level);
5736 
5737 	if (env->ccx_ongoing) {
5738 		if (level <= env->ccx_rac_lv)
5739 			ret = -EINVAL;
5740 		else
5741 			env->ccx_ongoing = false;
5742 	}
5743 
5744 	if (ret == 0)
5745 		env->ccx_rac_lv = level;
5746 
5747 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "ccx racing success=%d\n",
5748 		    !ret);
5749 
5750 	return ret;
5751 }
5752 
5753 static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev,
5754 				  struct rtw89_bb_ctx *bb, u8 sel)
5755 {
5756 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
5757 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5758 	const struct rtw89_ccx_regs *ccx = phy->ccx;
5759 
5760 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 0,
5761 			      bb->phy_idx);
5762 	rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0,
5763 			      bb->phy_idx);
5764 	if (sel & RTW89_PHY_ENV_MON_NHM)
5765 		rtw89_phy_write32_idx_clr(rtwdev, ccx->nhm_config,
5766 					  ccx->nhm_en_mask, bb->phy_idx);
5767 
5768 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1,
5769 			      bb->phy_idx);
5770 	rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1,
5771 			      bb->phy_idx);
5772 	if (sel & RTW89_PHY_ENV_MON_NHM)
5773 		rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_config,
5774 					  ccx->nhm_en_mask, bb->phy_idx);
5775 
5776 	env->ccx_ongoing = true;
5777 }
5778 
5779 static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev,
5780 					  struct rtw89_bb_ctx *bb)
5781 {
5782 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5783 	u8 i = 0;
5784 	u32 res = 0;
5785 
5786 	env->ifs_clm_tx_ratio =
5787 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_tx, PERCENT);
5788 	env->ifs_clm_edcca_excl_cca_ratio =
5789 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_edcca_excl_cca,
5790 					 PERCENT);
5791 	env->ifs_clm_cck_fa_ratio =
5792 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_cckfa, PERCENT);
5793 	env->ifs_clm_ofdm_fa_ratio =
5794 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_ofdmfa, PERCENT);
5795 	env->ifs_clm_cck_cca_excl_fa_ratio =
5796 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_cckcca_excl_fa,
5797 					 PERCENT);
5798 	env->ifs_clm_ofdm_cca_excl_fa_ratio =
5799 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_ofdmcca_excl_fa,
5800 					 PERCENT);
5801 	env->ifs_clm_cck_fa_permil =
5802 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_cckfa, PERMIL);
5803 	env->ifs_clm_ofdm_fa_permil =
5804 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_ofdmfa, PERMIL);
5805 
5806 	for (i = 0; i < RTW89_IFS_CLM_NUM; i++) {
5807 		if (env->ifs_clm_his[i] > ENV_MNTR_IFSCLM_HIS_MAX) {
5808 			env->ifs_clm_ifs_avg[i] = ENV_MNTR_FAIL_DWORD;
5809 		} else {
5810 			env->ifs_clm_ifs_avg[i] =
5811 				rtw89_phy_ccx_idx_to_us(rtwdev, bb,
5812 							env->ifs_clm_avg[i]);
5813 		}
5814 
5815 		res = rtw89_phy_ccx_idx_to_us(rtwdev, bb, env->ifs_clm_cca[i]);
5816 		res += env->ifs_clm_his[i] >> 1;
5817 		if (env->ifs_clm_his[i])
5818 			res /= env->ifs_clm_his[i];
5819 		else
5820 			res = 0;
5821 		env->ifs_clm_cca_avg[i] = res;
5822 	}
5823 
5824 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5825 		    "IFS-CLM ratio {Tx, EDCCA_exclu_cca} = {%d, %d}\n",
5826 		    env->ifs_clm_tx_ratio, env->ifs_clm_edcca_excl_cca_ratio);
5827 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5828 		    "IFS-CLM FA ratio {CCK, OFDM} = {%d, %d}\n",
5829 		    env->ifs_clm_cck_fa_ratio, env->ifs_clm_ofdm_fa_ratio);
5830 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5831 		    "IFS-CLM FA permil {CCK, OFDM} = {%d, %d}\n",
5832 		    env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil);
5833 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5834 		    "IFS-CLM CCA_exclu_FA ratio {CCK, OFDM} = {%d, %d}\n",
5835 		    env->ifs_clm_cck_cca_excl_fa_ratio,
5836 		    env->ifs_clm_ofdm_cca_excl_fa_ratio);
5837 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5838 		    "Time:[his, ifs_avg(us), cca_avg(us)]\n");
5839 	for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
5840 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "T%d:[%d, %d, %d]\n",
5841 			    i + 1, env->ifs_clm_his[i], env->ifs_clm_ifs_avg[i],
5842 			    env->ifs_clm_cca_avg[i]);
5843 }
5844 
5845 static u8 rtw89_nhm_weighted_avg(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
5846 {
5847 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5848 	u8 nhm_weight[RTW89_NHM_RPT_NUM];
5849 	u32 nhm_weighted_sum = 0;
5850 	u8 weight_zero;
5851 	u8 i;
5852 
5853 	if (env->nhm_sum == 0)
5854 		return 0;
5855 
5856 	weight_zero = clamp_t(u16, env->nhm_th[0] - RTW89_NHM_WEIGHT_OFFSET, 0, U8_MAX);
5857 
5858 	for (i = 0; i < RTW89_NHM_RPT_NUM; i++) {
5859 		if (i == 0)
5860 			nhm_weight[i] = weight_zero;
5861 		else if (i == (RTW89_NHM_RPT_NUM - 1))
5862 			nhm_weight[i] = env->nhm_th[i - 1] + RTW89_NHM_WEIGHT_OFFSET;
5863 		else
5864 			nhm_weight[i] = (env->nhm_th[i - 1] + env->nhm_th[i]) / 2;
5865 	}
5866 
5867 	if (rtwdev->chip->chip_id == RTL8852A || rtwdev->chip->chip_id == RTL8852B ||
5868 	    rtwdev->chip->chip_id == RTL8852C) {
5869 		if (env->nhm_th[RTW89_NHM_TH_NUM - 1] == RTW89_NHM_WA_TH) {
5870 			nhm_weight[RTW89_NHM_RPT_NUM - 1] =
5871 				env->nhm_th[RTW89_NHM_TH_NUM - 2] +
5872 				RTW89_NHM_WEIGHT_OFFSET;
5873 			nhm_weight[RTW89_NHM_RPT_NUM - 2] =
5874 				nhm_weight[RTW89_NHM_RPT_NUM - 1];
5875 		}
5876 
5877 		env->nhm_result[0] += env->nhm_result[RTW89_NHM_RPT_NUM - 1];
5878 		env->nhm_result[RTW89_NHM_RPT_NUM - 1] = 0;
5879 	}
5880 
5881 	for (i = 0; i < RTW89_NHM_RPT_NUM; i++)
5882 		nhm_weighted_sum += env->nhm_result[i] * nhm_weight[i];
5883 
5884 	return (nhm_weighted_sum / env->nhm_sum) >> RTW89_NHM_TH_FACTOR;
5885 }
5886 
5887 static void __rtw89_phy_nhm_get_result(struct rtw89_dev *rtwdev,
5888 				       struct rtw89_bb_ctx *bb, enum rtw89_band hw_band,
5889 				       u16 ch_hw_value)
5890 {
5891 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
5892 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5893 	const struct rtw89_chip_info *chip = rtwdev->chip;
5894 	const struct rtw89_ccx_regs *ccx = phy->ccx;
5895 	struct ieee80211_supported_band *sband;
5896 	const struct rtw89_reg_def *nhm_rpt;
5897 	enum nl80211_band band;
5898 	u32 sum = 0;
5899 	u8 chan_idx;
5900 	u8 nhm_pwr;
5901 	u8 i;
5902 
5903 	if (!rtw89_phy_read32_idx(rtwdev, ccx->nhm, ccx->nhm_ready, bb->phy_idx)) {
5904 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,  "[NHM] Get NHM report Fail\n");
5905 		return;
5906 	}
5907 
5908 	for (i = 0; i < RTW89_NHM_RPT_NUM; i++) {
5909 		nhm_rpt = &(*chip->nhm_report)[i];
5910 
5911 		env->nhm_result[i] =
5912 			rtw89_phy_read32_idx(rtwdev, nhm_rpt->addr,
5913 					     nhm_rpt->mask, bb->phy_idx);
5914 		sum += env->nhm_result[i];
5915 	}
5916 	env->nhm_sum = sum;
5917 	nhm_pwr = rtw89_nhm_weighted_avg(rtwdev, bb);
5918 
5919 	if (!ch_hw_value)
5920 		return;
5921 
5922 	band = rtw89_hw_to_nl80211_band(hw_band);
5923 	sband = rtwdev->hw->wiphy->bands[band];
5924 	if (!sband)
5925 		return;
5926 
5927 	for (chan_idx = 0; chan_idx < sband->n_channels; chan_idx++) {
5928 		struct ieee80211_channel *channel;
5929 		struct rtw89_nhm_report *rpt;
5930 		struct list_head *nhm_list;
5931 
5932 		channel = &sband->channels[chan_idx];
5933 		if (channel->hw_value != ch_hw_value)
5934 			continue;
5935 
5936 		rpt = &env->nhm_his[hw_band][chan_idx];
5937 		nhm_list = &env->nhm_rpt_list;
5938 
5939 		rpt->channel = channel;
5940 		rpt->noise = nhm_pwr;
5941 
5942 		if (list_empty(&rpt->list))
5943 			list_add_tail(&rpt->list, nhm_list);
5944 
5945 		return;
5946 	}
5947 
5948 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "[NHM] channel not found\n");
5949 }
5950 
5951 void rtw89_phy_nhm_get_result(struct rtw89_dev *rtwdev, enum rtw89_band hw_band,
5952 			      u16 ch_hw_value)
5953 {
5954 	const struct rtw89_chip_info *chip = rtwdev->chip;
5955 	struct rtw89_bb_ctx *bb;
5956 
5957 	if (!chip->support_noise)
5958 		return;
5959 
5960 	rtw89_for_each_active_bb(rtwdev, bb)
5961 		__rtw89_phy_nhm_get_result(rtwdev, bb, hw_band, ch_hw_value);
5962 }
5963 
5964 static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev,
5965 					 struct rtw89_bb_ctx *bb)
5966 {
5967 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
5968 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5969 	const struct rtw89_ccx_regs *ccx = phy->ccx;
5970 	u8 i = 0;
5971 
5972 	if (rtw89_phy_read32_idx(rtwdev, ccx->ifs_total_addr,
5973 				 ccx->ifs_cnt_done_mask, bb->phy_idx) == 0) {
5974 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5975 			    "Get IFS_CLM report Fail\n");
5976 		return false;
5977 	}
5978 
5979 	env->ifs_clm_tx =
5980 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_tx_cnt_addr,
5981 				     ccx->ifs_clm_tx_cnt_msk, bb->phy_idx);
5982 	env->ifs_clm_edcca_excl_cca =
5983 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_tx_cnt_addr,
5984 				     ccx->ifs_clm_edcca_excl_cca_fa_mask, bb->phy_idx);
5985 	env->ifs_clm_cckcca_excl_fa =
5986 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_cca_addr,
5987 				     ccx->ifs_clm_cckcca_excl_fa_mask, bb->phy_idx);
5988 	env->ifs_clm_ofdmcca_excl_fa =
5989 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_cca_addr,
5990 				     ccx->ifs_clm_ofdmcca_excl_fa_mask, bb->phy_idx);
5991 	env->ifs_clm_cckfa =
5992 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_fa_addr,
5993 				     ccx->ifs_clm_cck_fa_mask, bb->phy_idx);
5994 	env->ifs_clm_ofdmfa =
5995 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_fa_addr,
5996 				     ccx->ifs_clm_ofdm_fa_mask, bb->phy_idx);
5997 
5998 	env->ifs_clm_his[0] =
5999 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr,
6000 				     ccx->ifs_t1_his_mask, bb->phy_idx);
6001 	env->ifs_clm_his[1] =
6002 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr,
6003 				     ccx->ifs_t2_his_mask, bb->phy_idx);
6004 	env->ifs_clm_his[2] =
6005 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr,
6006 				     ccx->ifs_t3_his_mask, bb->phy_idx);
6007 	env->ifs_clm_his[3] =
6008 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr,
6009 				     ccx->ifs_t4_his_mask, bb->phy_idx);
6010 
6011 	env->ifs_clm_avg[0] =
6012 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_l_addr,
6013 				     ccx->ifs_t1_avg_mask, bb->phy_idx);
6014 	env->ifs_clm_avg[1] =
6015 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_l_addr,
6016 				     ccx->ifs_t2_avg_mask, bb->phy_idx);
6017 	env->ifs_clm_avg[2] =
6018 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_h_addr,
6019 				     ccx->ifs_t3_avg_mask, bb->phy_idx);
6020 	env->ifs_clm_avg[3] =
6021 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_h_addr,
6022 				     ccx->ifs_t4_avg_mask, bb->phy_idx);
6023 
6024 	env->ifs_clm_cca[0] =
6025 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_l_addr,
6026 				     ccx->ifs_t1_cca_mask, bb->phy_idx);
6027 	env->ifs_clm_cca[1] =
6028 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_l_addr,
6029 				     ccx->ifs_t2_cca_mask, bb->phy_idx);
6030 	env->ifs_clm_cca[2] =
6031 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_h_addr,
6032 				     ccx->ifs_t3_cca_mask, bb->phy_idx);
6033 	env->ifs_clm_cca[3] =
6034 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_h_addr,
6035 				     ccx->ifs_t4_cca_mask, bb->phy_idx);
6036 
6037 	env->ifs_clm_total_ifs =
6038 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_total_addr,
6039 				     ccx->ifs_total_mask, bb->phy_idx);
6040 
6041 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "IFS-CLM total_ifs = %d\n",
6042 		    env->ifs_clm_total_ifs);
6043 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6044 		    "{Tx, EDCCA_exclu_cca} = {%d, %d}\n",
6045 		    env->ifs_clm_tx, env->ifs_clm_edcca_excl_cca);
6046 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6047 		    "IFS-CLM FA{CCK, OFDM} = {%d, %d}\n",
6048 		    env->ifs_clm_cckfa, env->ifs_clm_ofdmfa);
6049 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6050 		    "IFS-CLM CCA_exclu_FA{CCK, OFDM} = {%d, %d}\n",
6051 		    env->ifs_clm_cckcca_excl_fa, env->ifs_clm_ofdmcca_excl_fa);
6052 
6053 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Time:[his, avg, cca]\n");
6054 	for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
6055 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6056 			    "T%d:[%d, %d, %d]\n", i + 1, env->ifs_clm_his[i],
6057 			    env->ifs_clm_avg[i], env->ifs_clm_cca[i]);
6058 
6059 	rtw89_phy_ifs_clm_get_utility(rtwdev, bb);
6060 
6061 	return true;
6062 }
6063 
6064 static void rtw89_phy_nhm_th_update(struct rtw89_dev *rtwdev,
6065 				    struct rtw89_bb_ctx *bb)
6066 {
6067 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6068 	static const u8 nhm_th_11k[RTW89_NHM_RPT_NUM] = {
6069 		18, 21, 24, 27, 30, 35, 40, 45, 50, 55, 60, 0
6070 	};
6071 	const struct rtw89_chip_info *chip = rtwdev->chip;
6072 	const struct rtw89_reg_def *nhm_th;
6073 	u8 i;
6074 
6075 	for (i = 0; i < RTW89_NHM_RPT_NUM; i++)
6076 		env->nhm_th[i] = nhm_th_11k[i] << RTW89_NHM_TH_FACTOR;
6077 
6078 	if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B ||
6079 	    chip->chip_id == RTL8852C)
6080 		env->nhm_th[RTW89_NHM_TH_NUM - 1] = RTW89_NHM_WA_TH;
6081 
6082 	for (i = 0; i < RTW89_NHM_TH_NUM; i++) {
6083 		nhm_th = &(*chip->nhm_th)[i];
6084 
6085 		rtw89_phy_write32_idx(rtwdev, nhm_th->addr, nhm_th->mask,
6086 				      env->nhm_th[i], bb->phy_idx);
6087 	}
6088 }
6089 
6090 static int rtw89_phy_nhm_set(struct rtw89_dev *rtwdev,
6091 			     struct rtw89_bb_ctx *bb,
6092 			     struct rtw89_ccx_para_info *para)
6093 {
6094 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
6095 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6096 	const struct rtw89_ccx_regs *ccx = phy->ccx;
6097 	u32 unit_idx = 0;
6098 	u32 period = 0;
6099 
6100 	if (para->mntr_time == 0) {
6101 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6102 			    "[NHM] MNTR_TIME is 0\n");
6103 		return -EINVAL;
6104 	}
6105 
6106 	if (rtw89_phy_ccx_racing_ctrl(rtwdev, bb, para->rac_lv))
6107 		return -EINVAL;
6108 
6109 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6110 		    "[NHM]nhm_incld_cca=%d, mntr_time=%d ms\n",
6111 		    para->nhm_incld_cca, para->mntr_time);
6112 
6113 	if (para->mntr_time != env->nhm_mntr_time) {
6114 		rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time,
6115 						&period, &unit_idx);
6116 		rtw89_phy_write32_idx(rtwdev, ccx->nhm_config,
6117 				      ccx->nhm_period_mask, period, bb->phy_idx);
6118 		rtw89_phy_write32_idx(rtwdev, ccx->nhm_config,
6119 				      ccx->nhm_unit_mask, period, bb->phy_idx);
6120 
6121 		env->nhm_mntr_time = para->mntr_time;
6122 		env->ccx_period = period;
6123 		env->ccx_unit_idx = unit_idx;
6124 	}
6125 
6126 	if (para->nhm_incld_cca != env->nhm_include_cca) {
6127 		rtw89_phy_write32_idx(rtwdev, ccx->nhm_config,
6128 				      ccx->nhm_include_cca_mask, para->nhm_incld_cca,
6129 				      bb->phy_idx);
6130 
6131 		env->nhm_include_cca = para->nhm_incld_cca;
6132 	}
6133 
6134 	rtw89_phy_nhm_th_update(rtwdev, bb);
6135 
6136 	return 0;
6137 }
6138 
6139 static void __rtw89_phy_nhm_trigger(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
6140 {
6141 	struct rtw89_ccx_para_info para = {
6142 		.mntr_time = RTW89_NHM_MNTR_TIME,
6143 		.rac_lv = RTW89_RAC_LV_1,
6144 		.nhm_incld_cca = true,
6145 	};
6146 
6147 	rtw89_phy_ccx_racing_release(rtwdev, bb);
6148 
6149 	rtw89_phy_nhm_set(rtwdev, bb, &para);
6150 	rtw89_phy_ccx_trigger(rtwdev, bb, RTW89_PHY_ENV_MON_NHM);
6151 }
6152 
6153 void rtw89_phy_nhm_trigger(struct rtw89_dev *rtwdev)
6154 {
6155 	const struct rtw89_chip_info *chip = rtwdev->chip;
6156 	struct rtw89_bb_ctx *bb;
6157 
6158 	if (!chip->support_noise)
6159 		return;
6160 
6161 	rtw89_for_each_active_bb(rtwdev, bb)
6162 		__rtw89_phy_nhm_trigger(rtwdev, bb);
6163 }
6164 
6165 static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev,
6166 				 struct rtw89_bb_ctx *bb,
6167 				 struct rtw89_ccx_para_info *para)
6168 {
6169 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
6170 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6171 	const struct rtw89_ccx_regs *ccx = phy->ccx;
6172 	u32 period = 0;
6173 	u32 unit_idx = 0;
6174 
6175 	if (para->mntr_time == 0) {
6176 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6177 			    "[WARN] MNTR_TIME is 0\n");
6178 		return -EINVAL;
6179 	}
6180 
6181 	if (rtw89_phy_ccx_racing_ctrl(rtwdev, bb, para->rac_lv))
6182 		return -EINVAL;
6183 
6184 	if (para->mntr_time != env->ifs_clm_mntr_time) {
6185 		rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time,
6186 						&period, &unit_idx);
6187 		rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr,
6188 				      ccx->ifs_clm_period_mask, period, bb->phy_idx);
6189 		rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr,
6190 				      ccx->ifs_clm_cnt_unit_mask,
6191 				      unit_idx, bb->phy_idx);
6192 
6193 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6194 			    "Update IFS-CLM time ((%d)) -> ((%d))\n",
6195 			    env->ifs_clm_mntr_time, para->mntr_time);
6196 
6197 		env->ifs_clm_mntr_time = para->mntr_time;
6198 		env->ccx_period = (u16)period;
6199 		env->ccx_unit_idx = (u8)unit_idx;
6200 	}
6201 
6202 	if (rtw89_phy_ifs_clm_th_update_check(rtwdev, bb, para)) {
6203 		env->ifs_clm_app = para->ifs_clm_app;
6204 		rtw89_phy_ifs_clm_set_th_reg(rtwdev, bb);
6205 	}
6206 
6207 	return 0;
6208 }
6209 
6210 static void __rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev,
6211 					  struct rtw89_bb_ctx *bb)
6212 {
6213 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6214 	struct rtw89_ccx_para_info para = {};
6215 	u8 chk_result = RTW89_PHY_ENV_MON_CCX_FAIL;
6216 
6217 	env->ccx_watchdog_result = RTW89_PHY_ENV_MON_CCX_FAIL;
6218 	if (env->ccx_manual_ctrl) {
6219 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6220 			    "CCX in manual ctrl\n");
6221 		return;
6222 	}
6223 
6224 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6225 		    "BB-%d env_monitor track\n", bb->phy_idx);
6226 
6227 	/* only ifs_clm for now */
6228 	if (rtw89_phy_ifs_clm_get_result(rtwdev, bb))
6229 		env->ccx_watchdog_result |= RTW89_PHY_ENV_MON_IFS_CLM;
6230 
6231 	rtw89_phy_ccx_racing_release(rtwdev, bb);
6232 	para.mntr_time = 1900;
6233 	para.rac_lv = RTW89_RAC_LV_1;
6234 	para.ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
6235 
6236 	if (rtw89_phy_ifs_clm_set(rtwdev, bb, &para) == 0)
6237 		chk_result |= RTW89_PHY_ENV_MON_IFS_CLM;
6238 	if (chk_result)
6239 		rtw89_phy_ccx_trigger(rtwdev, bb, chk_result);
6240 
6241 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6242 		    "get_result=0x%x, chk_result:0x%x\n",
6243 		    env->ccx_watchdog_result, chk_result);
6244 }
6245 
6246 void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev)
6247 {
6248 	struct rtw89_bb_ctx *bb;
6249 
6250 	rtw89_for_each_active_bb(rtwdev, bb)
6251 		__rtw89_phy_env_monitor_track(rtwdev, bb);
6252 }
6253 
6254 static bool rtw89_physts_ie_page_valid(struct rtw89_dev *rtwdev,
6255 				       enum rtw89_phy_status_bitmap *ie_page)
6256 {
6257 	const struct rtw89_chip_info *chip = rtwdev->chip;
6258 
6259 	if (*ie_page >= RTW89_PHYSTS_BITMAP_NUM ||
6260 	    *ie_page == RTW89_RSVD_9)
6261 		return false;
6262 	else if (*ie_page > RTW89_RSVD_9 && *ie_page < RTW89_EHT_PKT)
6263 		*ie_page -= 1;
6264 
6265 	if (*ie_page == RTW89_EHT_PKT && chip->chip_gen == RTW89_CHIP_AX)
6266 		return false;
6267 
6268 	return true;
6269 }
6270 
6271 static u32 rtw89_phy_get_ie_bitmap_addr(enum rtw89_phy_status_bitmap ie_page)
6272 {
6273 	static const u8 ie_page_shift = 2;
6274 
6275 	if (ie_page == RTW89_EHT_PKT)
6276 		return R_PHY_STS_BITMAP_EHT;
6277 
6278 	return R_PHY_STS_BITMAP_ADDR_START + (ie_page << ie_page_shift);
6279 }
6280 
6281 static u32 rtw89_physts_get_ie_bitmap(struct rtw89_dev *rtwdev,
6282 				      enum rtw89_phy_status_bitmap ie_page,
6283 				      enum rtw89_phy_idx phy_idx)
6284 {
6285 	u32 addr;
6286 
6287 	if (!rtw89_physts_ie_page_valid(rtwdev, &ie_page))
6288 		return 0;
6289 
6290 	addr = rtw89_phy_get_ie_bitmap_addr(ie_page);
6291 
6292 	return rtw89_phy_read32_idx(rtwdev, addr, MASKDWORD, phy_idx);
6293 }
6294 
6295 static void rtw89_physts_set_ie_bitmap(struct rtw89_dev *rtwdev,
6296 				       enum rtw89_phy_status_bitmap ie_page,
6297 				       u32 val, enum rtw89_phy_idx phy_idx)
6298 {
6299 	const struct rtw89_chip_info *chip = rtwdev->chip;
6300 	u32 addr;
6301 
6302 	if (!rtw89_physts_ie_page_valid(rtwdev, &ie_page))
6303 		return;
6304 
6305 	if (chip->chip_id == RTL8852A)
6306 		val &= B_PHY_STS_BITMAP_MSK_52A;
6307 
6308 	addr = rtw89_phy_get_ie_bitmap_addr(ie_page);
6309 	rtw89_phy_write32_idx(rtwdev, addr, MASKDWORD, val, phy_idx);
6310 }
6311 
6312 static void rtw89_physts_enable_fail_report(struct rtw89_dev *rtwdev,
6313 					    bool enable,
6314 					    enum rtw89_phy_idx phy_idx)
6315 {
6316 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
6317 	const struct rtw89_physts_regs *physts = phy->physts;
6318 
6319 	if (enable) {
6320 		rtw89_phy_write32_idx_clr(rtwdev, physts->setting_addr,
6321 					  physts->dis_trigger_fail_mask, phy_idx);
6322 		rtw89_phy_write32_idx_clr(rtwdev, physts->setting_addr,
6323 					  physts->dis_trigger_brk_mask, phy_idx);
6324 	} else {
6325 		rtw89_phy_write32_idx_set(rtwdev, physts->setting_addr,
6326 					  physts->dis_trigger_fail_mask, phy_idx);
6327 		rtw89_phy_write32_idx_set(rtwdev, physts->setting_addr,
6328 					  physts->dis_trigger_brk_mask, phy_idx);
6329 	}
6330 }
6331 
6332 static void __rtw89_physts_parsing_init(struct rtw89_dev *rtwdev,
6333 					enum rtw89_phy_idx phy_idx)
6334 {
6335 	const struct rtw89_chip_info *chip = rtwdev->chip;
6336 	u32 val;
6337 	u8 i;
6338 
6339 	rtw89_physts_enable_fail_report(rtwdev, false, phy_idx);
6340 
6341 	for (i = 0; i < RTW89_PHYSTS_BITMAP_NUM; i++) {
6342 		if (i == RTW89_RSVD_9 ||
6343 		    (i == RTW89_EHT_PKT && chip->chip_gen == RTW89_CHIP_AX))
6344 			continue;
6345 
6346 		val = rtw89_physts_get_ie_bitmap(rtwdev, i, phy_idx);
6347 		if (i == RTW89_HE_MU || i == RTW89_VHT_MU) {
6348 			val |= BIT(RTW89_PHYSTS_IE13_DL_MU_DEF);
6349 		} else if (i == RTW89_TRIG_BASE_PPDU) {
6350 			val |= BIT(RTW89_PHYSTS_IE13_DL_MU_DEF) |
6351 			       BIT(RTW89_PHYSTS_IE01_CMN_OFDM);
6352 		} else if (i >= RTW89_CCK_PKT) {
6353 			val &= ~(GENMASK(RTW89_PHYSTS_IE07_CMN_EXT_PATH_D,
6354 					 RTW89_PHYSTS_IE04_CMN_EXT_PATH_A));
6355 
6356 			if (i == RTW89_CCK_PKT)
6357 				val |= BIT(RTW89_PHYSTS_IE01_CMN_OFDM);
6358 			else if (i >= RTW89_HT_PKT)
6359 				val |= BIT(RTW89_PHYSTS_IE20_DBG_OFDM_FD_USER_SEG_0);
6360 		}
6361 
6362 		rtw89_physts_set_ie_bitmap(rtwdev, i, val, phy_idx);
6363 	}
6364 }
6365 
6366 static void rtw89_physts_parsing_init(struct rtw89_dev *rtwdev)
6367 {
6368 	__rtw89_physts_parsing_init(rtwdev, RTW89_PHY_0);
6369 	if (rtwdev->dbcc_en)
6370 		__rtw89_physts_parsing_init(rtwdev, RTW89_PHY_1);
6371 }
6372 
6373 static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev,
6374 					  struct rtw89_bb_ctx *bb, int type)
6375 {
6376 	const struct rtw89_chip_info *chip = rtwdev->chip;
6377 	const struct rtw89_phy_dig_gain_cfg *cfg;
6378 	struct rtw89_dig_info *dig = &bb->dig;
6379 	const char *msg;
6380 	u8 i;
6381 	s8 gain_base;
6382 	s8 *gain_arr;
6383 	u32 tmp;
6384 
6385 	switch (type) {
6386 	case RTW89_DIG_GAIN_LNA_G:
6387 		gain_arr = dig->lna_gain_g;
6388 		gain_base = LNA0_GAIN;
6389 		cfg = chip->dig_table->cfg_lna_g;
6390 		msg = "lna_gain_g";
6391 		break;
6392 	case RTW89_DIG_GAIN_TIA_G:
6393 		gain_arr = dig->tia_gain_g;
6394 		gain_base = TIA0_GAIN_G;
6395 		cfg = chip->dig_table->cfg_tia_g;
6396 		msg = "tia_gain_g";
6397 		break;
6398 	case RTW89_DIG_GAIN_LNA_A:
6399 		gain_arr = dig->lna_gain_a;
6400 		gain_base = LNA0_GAIN;
6401 		cfg = chip->dig_table->cfg_lna_a;
6402 		msg = "lna_gain_a";
6403 		break;
6404 	case RTW89_DIG_GAIN_TIA_A:
6405 		gain_arr = dig->tia_gain_a;
6406 		gain_base = TIA0_GAIN_A;
6407 		cfg = chip->dig_table->cfg_tia_a;
6408 		msg = "tia_gain_a";
6409 		break;
6410 	default:
6411 		return;
6412 	}
6413 
6414 	for (i = 0; i < cfg->size; i++) {
6415 		tmp = rtw89_phy_read32_idx(rtwdev, cfg->table[i].addr,
6416 					   cfg->table[i].mask, bb->phy_idx);
6417 		tmp >>= DIG_GAIN_SHIFT;
6418 		gain_arr[i] = sign_extend32(tmp, U4_MAX_BIT) + gain_base;
6419 		gain_base += DIG_GAIN;
6420 
6421 		rtw89_debug(rtwdev, RTW89_DBG_DIG, "%s[%d]=%d\n",
6422 			    msg, i, gain_arr[i]);
6423 	}
6424 }
6425 
6426 static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev,
6427 					   struct rtw89_bb_ctx *bb)
6428 {
6429 	struct rtw89_dig_info *dig = &bb->dig;
6430 	u32 tmp;
6431 	u8 i;
6432 
6433 	if (!rtwdev->hal.support_igi)
6434 		return;
6435 
6436 	tmp = rtw89_phy_read32_idx(rtwdev, R_PATH0_IB_PKPW,
6437 				   B_PATH0_IB_PKPW_MSK, bb->phy_idx);
6438 	dig->ib_pkpwr = sign_extend32(tmp >> DIG_GAIN_SHIFT, U8_MAX_BIT);
6439 	dig->ib_pbk = rtw89_phy_read32_idx(rtwdev, R_PATH0_IB_PBK,
6440 					   B_PATH0_IB_PBK_MSK, bb->phy_idx);
6441 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "ib_pkpwr=%d, ib_pbk=%d\n",
6442 		    dig->ib_pkpwr, dig->ib_pbk);
6443 
6444 	for (i = RTW89_DIG_GAIN_LNA_G; i < RTW89_DIG_GAIN_MAX; i++)
6445 		rtw89_phy_dig_read_gain_table(rtwdev, bb, i);
6446 }
6447 
6448 static const u8 rssi_nolink = 22;
6449 static const u8 igi_rssi_th[IGI_RSSI_TH_NUM] = {68, 84, 90, 98, 104};
6450 static const u16 fa_th_2g[FA_TH_NUM] = {22, 44, 66, 88};
6451 static const u16 fa_th_5g[FA_TH_NUM] = {4, 8, 12, 16};
6452 static const u16 fa_th_nolink[FA_TH_NUM] = {196, 352, 440, 528};
6453 
6454 static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev,
6455 					   struct rtw89_bb_ctx *bb)
6456 {
6457 	struct rtw89_phy_ch_info *ch_info = &bb->ch_info;
6458 	struct rtw89_dig_info *dig = &bb->dig;
6459 	bool is_linked = rtwdev->total_sta_assoc > 0;
6460 
6461 	if (is_linked) {
6462 		dig->igi_rssi = ch_info->rssi_min >> 1;
6463 	} else {
6464 		rtw89_debug(rtwdev, RTW89_DBG_DIG, "RSSI update : NO Link\n");
6465 		dig->igi_rssi = rssi_nolink;
6466 	}
6467 }
6468 
6469 static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev,
6470 				      struct rtw89_bb_ctx *bb)
6471 {
6472 	const struct rtw89_chan *chan = rtw89_mgnt_chan_get(rtwdev, bb->phy_idx);
6473 	struct rtw89_dig_info *dig = &bb->dig;
6474 	bool is_linked = rtwdev->total_sta_assoc > 0;
6475 	const u16 *fa_th_src = NULL;
6476 
6477 	switch (chan->band_type) {
6478 	case RTW89_BAND_2G:
6479 		dig->lna_gain = dig->lna_gain_g;
6480 		dig->tia_gain = dig->tia_gain_g;
6481 		fa_th_src = is_linked ? fa_th_2g : fa_th_nolink;
6482 		dig->force_gaincode_idx_en = false;
6483 		dig->dyn_pd_th_en = true;
6484 		break;
6485 	case RTW89_BAND_5G:
6486 	default:
6487 		dig->lna_gain = dig->lna_gain_a;
6488 		dig->tia_gain = dig->tia_gain_a;
6489 		fa_th_src = is_linked ? fa_th_5g : fa_th_nolink;
6490 		dig->force_gaincode_idx_en = true;
6491 		dig->dyn_pd_th_en = true;
6492 		break;
6493 	}
6494 	memcpy(dig->fa_th, fa_th_src, sizeof(dig->fa_th));
6495 	memcpy(dig->igi_rssi_th, igi_rssi_th, sizeof(dig->igi_rssi_th));
6496 }
6497 
6498 static const u8 pd_low_th_offset = 16, dynamic_igi_min = 0x20;
6499 static const u8 igi_max_performance_mode = 0x5a;
6500 static const u8 dynamic_pd_threshold_max;
6501 
6502 static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev,
6503 				     struct rtw89_bb_ctx *bb)
6504 {
6505 	struct rtw89_dig_info *dig = &bb->dig;
6506 
6507 	dig->cur_gaincode.lna_idx = LNA_IDX_MAX;
6508 	dig->cur_gaincode.tia_idx = TIA_IDX_MAX;
6509 	dig->cur_gaincode.rxb_idx = RXB_IDX_MAX;
6510 	dig->force_gaincode.lna_idx = LNA_IDX_MAX;
6511 	dig->force_gaincode.tia_idx = TIA_IDX_MAX;
6512 	dig->force_gaincode.rxb_idx = RXB_IDX_MAX;
6513 
6514 	dig->dyn_igi_max = igi_max_performance_mode;
6515 	dig->dyn_igi_min = dynamic_igi_min;
6516 	dig->dyn_pd_th_max = dynamic_pd_threshold_max;
6517 	dig->pd_low_th_ofst = pd_low_th_offset;
6518 	dig->is_linked_pre = false;
6519 }
6520 
6521 static void __rtw89_phy_dig_init(struct rtw89_dev *rtwdev,
6522 				 struct rtw89_bb_ctx *bb)
6523 {
6524 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "BB-%d dig_init\n", bb->phy_idx);
6525 
6526 	rtw89_phy_dig_update_gain_para(rtwdev, bb);
6527 	rtw89_phy_dig_reset(rtwdev, bb);
6528 }
6529 
6530 static void rtw89_phy_dig_init(struct rtw89_dev *rtwdev)
6531 {
6532 	struct rtw89_bb_ctx *bb;
6533 
6534 	rtw89_for_each_capab_bb(rtwdev, bb)
6535 		__rtw89_phy_dig_init(rtwdev, bb);
6536 }
6537 
6538 static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev,
6539 					struct rtw89_bb_ctx *bb, u8 rssi)
6540 {
6541 	struct rtw89_dig_info *dig = &bb->dig;
6542 	u8 lna_idx;
6543 
6544 	if (rssi < dig->igi_rssi_th[0])
6545 		lna_idx = RTW89_DIG_GAIN_LNA_IDX6;
6546 	else if (rssi < dig->igi_rssi_th[1])
6547 		lna_idx = RTW89_DIG_GAIN_LNA_IDX5;
6548 	else if (rssi < dig->igi_rssi_th[2])
6549 		lna_idx = RTW89_DIG_GAIN_LNA_IDX4;
6550 	else if (rssi < dig->igi_rssi_th[3])
6551 		lna_idx = RTW89_DIG_GAIN_LNA_IDX3;
6552 	else if (rssi < dig->igi_rssi_th[4])
6553 		lna_idx = RTW89_DIG_GAIN_LNA_IDX2;
6554 	else
6555 		lna_idx = RTW89_DIG_GAIN_LNA_IDX1;
6556 
6557 	return lna_idx;
6558 }
6559 
6560 static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev,
6561 					struct rtw89_bb_ctx *bb, u8 rssi)
6562 {
6563 	struct rtw89_dig_info *dig = &bb->dig;
6564 	u8 tia_idx;
6565 
6566 	if (rssi < dig->igi_rssi_th[0])
6567 		tia_idx = RTW89_DIG_GAIN_TIA_IDX1;
6568 	else
6569 		tia_idx = RTW89_DIG_GAIN_TIA_IDX0;
6570 
6571 	return tia_idx;
6572 }
6573 
6574 #define IB_PBK_BASE 110
6575 #define WB_RSSI_BASE 10
6576 static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev,
6577 					struct rtw89_bb_ctx *bb, u8 rssi,
6578 					struct rtw89_agc_gaincode_set *set)
6579 {
6580 	struct rtw89_dig_info *dig = &bb->dig;
6581 	s8 lna_gain = dig->lna_gain[set->lna_idx];
6582 	s8 tia_gain = dig->tia_gain[set->tia_idx];
6583 	s32 wb_rssi = rssi + lna_gain + tia_gain;
6584 	s32 rxb_idx_tmp = IB_PBK_BASE + WB_RSSI_BASE;
6585 	u8 rxb_idx;
6586 
6587 	rxb_idx_tmp += dig->ib_pkpwr - dig->ib_pbk - wb_rssi;
6588 	rxb_idx = clamp_t(s32, rxb_idx_tmp, RXB_IDX_MIN, RXB_IDX_MAX);
6589 
6590 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "wb_rssi=%03d, rxb_idx_tmp=%03d\n",
6591 		    wb_rssi, rxb_idx_tmp);
6592 
6593 	return rxb_idx;
6594 }
6595 
6596 static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev,
6597 					   struct rtw89_bb_ctx *bb, u8 rssi,
6598 					   struct rtw89_agc_gaincode_set *set)
6599 {
6600 	set->lna_idx = rtw89_phy_dig_lna_idx_by_rssi(rtwdev, bb, rssi);
6601 	set->tia_idx = rtw89_phy_dig_tia_idx_by_rssi(rtwdev, bb, rssi);
6602 	set->rxb_idx = rtw89_phy_dig_rxb_idx_by_rssi(rtwdev, bb, rssi, set);
6603 
6604 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
6605 		    "final_rssi=%03d, (lna,tia,rab)=(%d,%d,%02d)\n",
6606 		    rssi, set->lna_idx, set->tia_idx, set->rxb_idx);
6607 }
6608 
6609 #define IGI_OFFSET_MAX 25
6610 #define IGI_OFFSET_MUL 2
6611 static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev,
6612 					    struct rtw89_bb_ctx *bb)
6613 {
6614 	struct rtw89_dig_info *dig = &bb->dig;
6615 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6616 	enum rtw89_dig_noisy_level noisy_lv;
6617 	u8 igi_offset = dig->fa_rssi_ofst;
6618 	u16 fa_ratio = 0;
6619 
6620 	fa_ratio = env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil;
6621 
6622 	if (fa_ratio < dig->fa_th[0])
6623 		noisy_lv = RTW89_DIG_NOISY_LEVEL0;
6624 	else if (fa_ratio < dig->fa_th[1])
6625 		noisy_lv = RTW89_DIG_NOISY_LEVEL1;
6626 	else if (fa_ratio < dig->fa_th[2])
6627 		noisy_lv = RTW89_DIG_NOISY_LEVEL2;
6628 	else if (fa_ratio < dig->fa_th[3])
6629 		noisy_lv = RTW89_DIG_NOISY_LEVEL3;
6630 	else
6631 		noisy_lv = RTW89_DIG_NOISY_LEVEL_MAX;
6632 
6633 	if (noisy_lv == RTW89_DIG_NOISY_LEVEL0 && igi_offset < 2)
6634 		igi_offset = 0;
6635 	else
6636 		igi_offset += noisy_lv * IGI_OFFSET_MUL;
6637 
6638 	igi_offset = min_t(u8, igi_offset, IGI_OFFSET_MAX);
6639 	dig->fa_rssi_ofst = igi_offset;
6640 
6641 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
6642 		    "fa_th: [+6 (%d) +4 (%d) +2 (%d) 0 (%d) -2 ]\n",
6643 		    dig->fa_th[3], dig->fa_th[2], dig->fa_th[1], dig->fa_th[0]);
6644 
6645 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
6646 		    "fa(CCK,OFDM,ALL)=(%d,%d,%d)%%, noisy_lv=%d, ofst=%d\n",
6647 		    env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil,
6648 		    env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil,
6649 		    noisy_lv, igi_offset);
6650 }
6651 
6652 static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev,
6653 				      struct rtw89_bb_ctx *bb, u8 lna_idx)
6654 {
6655 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
6656 
6657 	rtw89_phy_write32_idx(rtwdev, dig_regs->p0_lna_init.addr,
6658 			      dig_regs->p0_lna_init.mask, lna_idx, bb->phy_idx);
6659 	rtw89_phy_write32_idx(rtwdev, dig_regs->p1_lna_init.addr,
6660 			      dig_regs->p1_lna_init.mask, lna_idx, bb->phy_idx);
6661 }
6662 
6663 static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev,
6664 				      struct rtw89_bb_ctx *bb, u8 tia_idx)
6665 {
6666 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
6667 
6668 	rtw89_phy_write32_idx(rtwdev, dig_regs->p0_tia_init.addr,
6669 			      dig_regs->p0_tia_init.mask, tia_idx, bb->phy_idx);
6670 	rtw89_phy_write32_idx(rtwdev, dig_regs->p1_tia_init.addr,
6671 			      dig_regs->p1_tia_init.mask, tia_idx, bb->phy_idx);
6672 }
6673 
6674 static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev,
6675 				      struct rtw89_bb_ctx *bb, u8 rxb_idx)
6676 {
6677 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
6678 
6679 	rtw89_phy_write32_idx(rtwdev, dig_regs->p0_rxb_init.addr,
6680 			      dig_regs->p0_rxb_init.mask, rxb_idx, bb->phy_idx);
6681 	rtw89_phy_write32_idx(rtwdev, dig_regs->p1_rxb_init.addr,
6682 			      dig_regs->p1_rxb_init.mask, rxb_idx, bb->phy_idx);
6683 }
6684 
6685 static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev,
6686 				     struct rtw89_bb_ctx *bb,
6687 				     const struct rtw89_agc_gaincode_set set)
6688 {
6689 	if (!rtwdev->hal.support_igi)
6690 		return;
6691 
6692 	rtw89_phy_dig_set_lna_idx(rtwdev, bb, set.lna_idx);
6693 	rtw89_phy_dig_set_tia_idx(rtwdev, bb, set.tia_idx);
6694 	rtw89_phy_dig_set_rxb_idx(rtwdev, bb, set.rxb_idx);
6695 
6696 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "Set (lna,tia,rxb)=((%d,%d,%02d))\n",
6697 		    set.lna_idx, set.tia_idx, set.rxb_idx);
6698 }
6699 
6700 static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev,
6701 						   struct rtw89_bb_ctx *bb,
6702 						   bool enable)
6703 {
6704 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
6705 
6706 	rtw89_phy_write32_idx(rtwdev, dig_regs->p0_p20_pagcugc_en.addr,
6707 			      dig_regs->p0_p20_pagcugc_en.mask, enable, bb->phy_idx);
6708 	rtw89_phy_write32_idx(rtwdev, dig_regs->p0_s20_pagcugc_en.addr,
6709 			      dig_regs->p0_s20_pagcugc_en.mask, enable, bb->phy_idx);
6710 	rtw89_phy_write32_idx(rtwdev, dig_regs->p1_p20_pagcugc_en.addr,
6711 			      dig_regs->p1_p20_pagcugc_en.mask, enable, bb->phy_idx);
6712 	rtw89_phy_write32_idx(rtwdev, dig_regs->p1_s20_pagcugc_en.addr,
6713 			      dig_regs->p1_s20_pagcugc_en.mask, enable, bb->phy_idx);
6714 
6715 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable);
6716 }
6717 
6718 static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev,
6719 				     struct rtw89_bb_ctx *bb)
6720 {
6721 	struct rtw89_dig_info *dig = &bb->dig;
6722 
6723 	if (!rtwdev->hal.support_igi)
6724 		return;
6725 
6726 	if (dig->force_gaincode_idx_en) {
6727 		rtw89_phy_dig_set_igi_cr(rtwdev, bb, dig->force_gaincode);
6728 		rtw89_debug(rtwdev, RTW89_DBG_DIG,
6729 			    "Force gaincode index enabled.\n");
6730 	} else {
6731 		rtw89_phy_dig_gaincode_by_rssi(rtwdev, bb, dig->igi_fa_rssi,
6732 					       &dig->cur_gaincode);
6733 		rtw89_phy_dig_set_igi_cr(rtwdev, bb, dig->cur_gaincode);
6734 	}
6735 }
6736 
6737 static u8 rtw89_phy_dig_cal_under_region(struct rtw89_dev *rtwdev,
6738 					 struct rtw89_bb_ctx *bb,
6739 					 const struct rtw89_chan *chan)
6740 {
6741 	enum rtw89_bandwidth cbw = chan->band_width;
6742 	struct rtw89_dig_info *dig = &bb->dig;
6743 	u8 under_region = dig->pd_low_th_ofst;
6744 
6745 	if (rtwdev->chip->chip_gen == RTW89_CHIP_AX)
6746 		under_region += PD_TH_SB_FLTR_CMP_VAL;
6747 
6748 	switch (cbw) {
6749 	case RTW89_CHANNEL_WIDTH_40:
6750 		under_region += PD_TH_BW40_CMP_VAL;
6751 		break;
6752 	case RTW89_CHANNEL_WIDTH_80:
6753 		under_region += PD_TH_BW80_CMP_VAL;
6754 		break;
6755 	case RTW89_CHANNEL_WIDTH_160:
6756 		under_region += PD_TH_BW160_CMP_VAL;
6757 		break;
6758 	case RTW89_CHANNEL_WIDTH_20:
6759 		fallthrough;
6760 	default:
6761 		under_region += PD_TH_BW20_CMP_VAL;
6762 		break;
6763 	}
6764 
6765 	return under_region;
6766 }
6767 
6768 static u32 __rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev,
6769 				     struct rtw89_bb_ctx *bb,
6770 				     u8 rssi, bool enable,
6771 				     const struct rtw89_chan *chan)
6772 {
6773 	struct rtw89_dig_info *dig = &bb->dig;
6774 	u8 ofdm_cca_th, under_region;
6775 	u8 final_rssi;
6776 	u32 pd_val;
6777 
6778 	under_region = rtw89_phy_dig_cal_under_region(rtwdev, bb, chan);
6779 	dig->dyn_pd_th_max = dig->igi_rssi;
6780 
6781 	final_rssi = min_t(u8, rssi, dig->igi_rssi);
6782 	ofdm_cca_th = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region,
6783 			      PD_TH_MAX_RSSI + under_region);
6784 
6785 	if (enable) {
6786 		pd_val = (ofdm_cca_th - under_region - PD_TH_MIN_RSSI) >> 1;
6787 		rtw89_debug(rtwdev, RTW89_DBG_DIG,
6788 			    "igi=%d, ofdm_ccaTH=%d, backoff=%d, PD_low=%d\n",
6789 			    final_rssi, ofdm_cca_th, under_region, pd_val);
6790 	} else {
6791 		pd_val = 0;
6792 		rtw89_debug(rtwdev, RTW89_DBG_DIG,
6793 			    "Dynamic PD th disabled, Set PD_low_bd=0\n");
6794 	}
6795 
6796 	return pd_val;
6797 }
6798 
6799 static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev,
6800 				    struct rtw89_bb_ctx *bb,
6801 				    u8 rssi, bool enable)
6802 {
6803 	const struct rtw89_chan *chan = rtw89_mgnt_chan_get(rtwdev, bb->phy_idx);
6804 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
6805 	struct rtw89_dig_info *dig = &bb->dig;
6806 	u8 final_rssi, under_region = dig->pd_low_th_ofst;
6807 	s8 cck_cca_th;
6808 	u32 pd_val;
6809 
6810 	pd_val = __rtw89_phy_dig_dyn_pd_th(rtwdev, bb, rssi, enable, chan);
6811 	dig->bak_dig = pd_val;
6812 
6813 	rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg,
6814 			      dig_regs->pd_lower_bound_mask, pd_val, bb->phy_idx);
6815 	rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg,
6816 			      dig_regs->pd_spatial_reuse_en, enable, bb->phy_idx);
6817 
6818 	if (!rtwdev->hal.support_cckpd)
6819 		return;
6820 
6821 	final_rssi = min_t(u8, rssi, dig->igi_rssi);
6822 	under_region = rtw89_phy_dig_cal_under_region(rtwdev, bb, chan);
6823 	cck_cca_th = max_t(s8, final_rssi - under_region, CCKPD_TH_MIN_RSSI);
6824 	pd_val = (u32)(cck_cca_th - IGI_RSSI_MAX);
6825 
6826 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
6827 		    "igi=%d, cck_ccaTH=%d, backoff=%d, cck_PD_low=((%d))dB\n",
6828 		    final_rssi, cck_cca_th, under_region, pd_val);
6829 
6830 	rtw89_phy_write32_idx(rtwdev, dig_regs->bmode_pd_reg,
6831 			      dig_regs->bmode_cca_rssi_limit_en, enable, bb->phy_idx);
6832 	rtw89_phy_write32_idx(rtwdev, dig_regs->bmode_pd_lower_bound_reg,
6833 			      dig_regs->bmode_rssi_nocca_low_th_mask, pd_val, bb->phy_idx);
6834 }
6835 
6836 void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
6837 {
6838 	struct rtw89_dig_info *dig = &bb->dig;
6839 
6840 	dig->bypass_dig = false;
6841 	rtw89_phy_dig_para_reset(rtwdev, bb);
6842 	rtw89_phy_dig_set_igi_cr(rtwdev, bb, dig->force_gaincode);
6843 	rtw89_phy_dig_dyn_pd_th(rtwdev, bb, rssi_nolink, false);
6844 	rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, bb, false);
6845 	rtw89_phy_dig_update_para(rtwdev, bb);
6846 }
6847 
6848 #define IGI_RSSI_MIN 10
6849 #define ABS_IGI_MIN 0xc
6850 static
6851 void rtw89_phy_cal_igi_fa_rssi(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
6852 {
6853 	struct rtw89_dig_info *dig = &bb->dig;
6854 	u8 igi_min;
6855 
6856 	rtw89_phy_dig_igi_offset_by_env(rtwdev, bb);
6857 
6858 	igi_min = max_t(int, dig->igi_rssi - IGI_RSSI_MIN, 0);
6859 	dig->dyn_igi_max = min(igi_min + IGI_OFFSET_MAX, igi_max_performance_mode);
6860 	dig->dyn_igi_min = max(igi_min, ABS_IGI_MIN);
6861 
6862 	if (dig->dyn_igi_max >= dig->dyn_igi_min) {
6863 		dig->igi_fa_rssi += dig->fa_rssi_ofst;
6864 		dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min,
6865 					 dig->dyn_igi_max);
6866 	} else {
6867 		dig->igi_fa_rssi = dig->dyn_igi_max;
6868 	}
6869 }
6870 
6871 struct rtw89_phy_iter_mcc_dig {
6872 	struct rtw89_vif_link *rtwvif_link;
6873 	bool has_sta;
6874 	u8 rssi_min;
6875 };
6876 
6877 static void rtw89_phy_set_mcc_dig(struct rtw89_dev *rtwdev,
6878 				  struct rtw89_vif_link *rtwvif_link,
6879 				  struct rtw89_bb_ctx *bb,
6880 				  u8 rssi_min, u8 mcc_role_idx,
6881 				  bool is_linked)
6882 {
6883 	struct rtw89_dig_info *dig = &bb->dig;
6884 	const struct rtw89_chan *chan;
6885 	u8 pd_val;
6886 
6887 	if (is_linked) {
6888 		dig->igi_rssi = rssi_min >> 1;
6889 		dig->igi_fa_rssi = dig->igi_rssi;
6890 	} else {
6891 		rtw89_debug(rtwdev, RTW89_DBG_DIG, "RSSI update : NO Link\n");
6892 		dig->igi_rssi = rssi_nolink;
6893 		dig->igi_fa_rssi = dig->igi_rssi;
6894 	}
6895 
6896 	chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
6897 	rtw89_phy_cal_igi_fa_rssi(rtwdev, bb);
6898 	pd_val = __rtw89_phy_dig_dyn_pd_th(rtwdev, bb, dig->igi_fa_rssi,
6899 					   is_linked, chan);
6900 	rtw89_fw_h2c_mcc_dig(rtwdev, rtwvif_link->chanctx_idx,
6901 			     mcc_role_idx, pd_val, true);
6902 
6903 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
6904 		    "MCC chanctx_idx %d chan %d rssi %d pd_val %d",
6905 		    rtwvif_link->chanctx_idx, chan->primary_channel,
6906 		    dig->igi_rssi, pd_val);
6907 }
6908 
6909 static void rtw89_phy_set_mcc_dig_iter(void *data, struct ieee80211_sta *sta)
6910 {
6911 	struct rtw89_phy_iter_mcc_dig *mcc_dig = (struct rtw89_phy_iter_mcc_dig *)data;
6912 	unsigned int link_id = mcc_dig->rtwvif_link->link_id;
6913 	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
6914 	struct rtw89_sta_link *rtwsta_link;
6915 
6916 	if (rtwsta->rtwvif != mcc_dig->rtwvif_link->rtwvif)
6917 		return;
6918 
6919 	rtwsta_link = rtwsta->links[link_id];
6920 	if (!rtwsta_link)
6921 		return;
6922 
6923 	mcc_dig->has_sta = true;
6924 	if (ewma_rssi_read(&rtwsta_link->avg_rssi) < mcc_dig->rssi_min)
6925 		mcc_dig->rssi_min = ewma_rssi_read(&rtwsta_link->avg_rssi);
6926 }
6927 
6928 static void rtw89_phy_dig_mcc(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
6929 {
6930 	struct rtw89_phy_iter_mcc_dig mcc_dig;
6931 	struct rtw89_vif_link *rtwvif_link;
6932 	struct rtw89_mcc_links_info info;
6933 	int i;
6934 
6935 	rtw89_mcc_get_links(rtwdev, &info);
6936 	for (i = 0; i < ARRAY_SIZE(info.links); i++) {
6937 		rtwvif_link = info.links[i];
6938 		if (!rtwvif_link)
6939 			continue;
6940 
6941 		memset(&mcc_dig, 0, sizeof(mcc_dig));
6942 		mcc_dig.rtwvif_link = rtwvif_link;
6943 		mcc_dig.has_sta = false;
6944 		mcc_dig.rssi_min = U8_MAX;
6945 		ieee80211_iterate_stations_atomic(rtwdev->hw,
6946 						  rtw89_phy_set_mcc_dig_iter,
6947 						  &mcc_dig);
6948 
6949 		rtw89_phy_set_mcc_dig(rtwdev, rtwvif_link, bb,
6950 				      mcc_dig.rssi_min, i, mcc_dig.has_sta);
6951 	}
6952 }
6953 
6954 static void rtw89_phy_dig_ctrl(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb,
6955 			       bool pause_dig, bool restore)
6956 {
6957 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
6958 	struct rtw89_dig_info *dig = &bb->dig;
6959 	bool en_dig;
6960 	u32 pd_val;
6961 
6962 	if (dig->pause_dig == pause_dig)
6963 		return;
6964 
6965 	if (pause_dig) {
6966 		en_dig = false;
6967 		pd_val = 0;
6968 	} else {
6969 		en_dig = rtwdev->total_sta_assoc > 0;
6970 		pd_val = restore ? dig->bak_dig : 0;
6971 	}
6972 
6973 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "%s <%s> PD_low=%d", __func__,
6974 		    pause_dig ? "suspend" : "resume", pd_val);
6975 
6976 	rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg,
6977 			      dig_regs->pd_lower_bound_mask, pd_val, bb->phy_idx);
6978 	rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg,
6979 			      dig_regs->pd_spatial_reuse_en, en_dig, bb->phy_idx);
6980 
6981 	dig->pause_dig = pause_dig;
6982 }
6983 
6984 void rtw89_phy_dig_suspend(struct rtw89_dev *rtwdev)
6985 {
6986 	struct rtw89_bb_ctx *bb;
6987 
6988 	rtw89_for_each_active_bb(rtwdev, bb)
6989 		rtw89_phy_dig_ctrl(rtwdev, bb, true, false);
6990 }
6991 
6992 void rtw89_phy_dig_resume(struct rtw89_dev *rtwdev, bool restore)
6993 {
6994 	struct rtw89_bb_ctx *bb;
6995 
6996 	rtw89_for_each_active_bb(rtwdev, bb)
6997 		rtw89_phy_dig_ctrl(rtwdev, bb, false, restore);
6998 }
6999 
7000 static void __rtw89_phy_dig(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
7001 {
7002 	struct rtw89_dig_info *dig = &bb->dig;
7003 	bool is_linked = rtwdev->total_sta_assoc > 0;
7004 	enum rtw89_entity_mode mode;
7005 
7006 	if (unlikely(dig->bypass_dig)) {
7007 		dig->bypass_dig = false;
7008 		return;
7009 	}
7010 
7011 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "BB-%d dig track\n", bb->phy_idx);
7012 
7013 	rtw89_phy_dig_update_rssi_info(rtwdev, bb);
7014 
7015 	mode = rtw89_get_entity_mode(rtwdev);
7016 	if (mode == RTW89_ENTITY_MODE_MCC) {
7017 		rtw89_phy_dig_mcc(rtwdev, bb);
7018 		return;
7019 	}
7020 
7021 	if (unlikely(dig->pause_dig))
7022 		return;
7023 
7024 	if (!dig->is_linked_pre && is_linked) {
7025 		rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n");
7026 		rtw89_phy_dig_update_para(rtwdev, bb);
7027 		dig->igi_fa_rssi = dig->igi_rssi;
7028 	} else if (dig->is_linked_pre && !is_linked) {
7029 		rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n");
7030 		rtw89_phy_dig_update_para(rtwdev, bb);
7031 		dig->igi_fa_rssi = dig->igi_rssi;
7032 	}
7033 	dig->is_linked_pre = is_linked;
7034 
7035 	rtw89_phy_cal_igi_fa_rssi(rtwdev, bb);
7036 
7037 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
7038 		    "rssi=%03d, dyn_joint(max,min)=(%d,%d), final_rssi=%d\n",
7039 		    dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min,
7040 		    dig->igi_fa_rssi);
7041 
7042 	rtw89_phy_dig_config_igi(rtwdev, bb);
7043 
7044 	rtw89_phy_dig_dyn_pd_th(rtwdev, bb, dig->igi_fa_rssi, dig->dyn_pd_th_en);
7045 
7046 	if (dig->dyn_pd_th_en && dig->igi_fa_rssi > dig->dyn_pd_th_max)
7047 		rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, bb, true);
7048 	else
7049 		rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, bb, false);
7050 }
7051 
7052 void rtw89_phy_dig(struct rtw89_dev *rtwdev)
7053 {
7054 	struct rtw89_bb_ctx *bb;
7055 
7056 	rtw89_for_each_active_bb(rtwdev, bb)
7057 		__rtw89_phy_dig(rtwdev, bb);
7058 }
7059 
7060 static void __rtw89_phy_tx_path_div_sta_iter(struct rtw89_dev *rtwdev,
7061 					     struct rtw89_sta_link *rtwsta_link)
7062 {
7063 	struct rtw89_hal *hal = &rtwdev->hal;
7064 	u8 rssi_a, rssi_b;
7065 	u32 candidate;
7066 
7067 	rssi_a = ewma_rssi_read(&rtwsta_link->rssi[RF_PATH_A]);
7068 	rssi_b = ewma_rssi_read(&rtwsta_link->rssi[RF_PATH_B]);
7069 
7070 	if (rssi_a > rssi_b + RTW89_TX_DIV_RSSI_RAW_TH)
7071 		candidate = RF_A;
7072 	else if (rssi_b > rssi_a + RTW89_TX_DIV_RSSI_RAW_TH)
7073 		candidate = RF_B;
7074 	else
7075 		return;
7076 
7077 	if (hal->antenna_tx == candidate)
7078 		return;
7079 
7080 	hal->antenna_tx = candidate;
7081 	rtw89_fw_h2c_txpath_cmac_tbl(rtwdev, rtwsta_link);
7082 
7083 	if (hal->antenna_tx == RF_A) {
7084 		rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x12);
7085 		rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x11);
7086 	} else if (hal->antenna_tx == RF_B) {
7087 		rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x11);
7088 		rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x12);
7089 	}
7090 }
7091 
7092 static void rtw89_phy_tx_path_div_sta_iter(void *data, struct ieee80211_sta *sta)
7093 {
7094 	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
7095 	struct rtw89_dev *rtwdev = rtwsta->rtwdev;
7096 	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
7097 	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
7098 	struct rtw89_vif_link *rtwvif_link;
7099 	struct rtw89_sta_link *rtwsta_link;
7100 	unsigned int link_id;
7101 	bool *done = data;
7102 
7103 	if (WARN(ieee80211_vif_is_mld(vif), "MLD mix path_div\n"))
7104 		return;
7105 
7106 	if (sta->tdls)
7107 		return;
7108 
7109 	if (*done)
7110 		return;
7111 
7112 	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
7113 		rtwvif_link = rtwsta_link->rtwvif_link;
7114 		if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION)
7115 			continue;
7116 
7117 		*done = true;
7118 		__rtw89_phy_tx_path_div_sta_iter(rtwdev, rtwsta_link);
7119 		return;
7120 	}
7121 }
7122 
7123 void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev)
7124 {
7125 	struct rtw89_hal *hal = &rtwdev->hal;
7126 	bool done = false;
7127 
7128 	if (!hal->tx_path_diversity)
7129 		return;
7130 
7131 	ieee80211_iterate_stations_atomic(rtwdev->hw,
7132 					  rtw89_phy_tx_path_div_sta_iter,
7133 					  &done);
7134 }
7135 
7136 #define ANTDIV_MAIN 0
7137 #define ANTDIV_AUX 1
7138 
7139 static void rtw89_phy_antdiv_set_ant(struct rtw89_dev *rtwdev)
7140 {
7141 	struct rtw89_hal *hal = &rtwdev->hal;
7142 	u8 default_ant, optional_ant;
7143 
7144 	if (!hal->ant_diversity || hal->antenna_tx == 0)
7145 		return;
7146 
7147 	if (hal->antenna_tx == RF_B) {
7148 		default_ant = ANTDIV_AUX;
7149 		optional_ant = ANTDIV_MAIN;
7150 	} else {
7151 		default_ant = ANTDIV_MAIN;
7152 		optional_ant = ANTDIV_AUX;
7153 	}
7154 
7155 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_CGCS_CTRL,
7156 			      default_ant, RTW89_PHY_0);
7157 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ORI,
7158 			      default_ant, RTW89_PHY_0);
7159 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ALT,
7160 			      optional_ant, RTW89_PHY_0);
7161 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_TX_ORI,
7162 			      default_ant, RTW89_PHY_0);
7163 }
7164 
7165 static void rtw89_phy_swap_hal_antenna(struct rtw89_dev *rtwdev)
7166 {
7167 	struct rtw89_hal *hal = &rtwdev->hal;
7168 
7169 	hal->antenna_rx = hal->antenna_rx == RF_A ? RF_B : RF_A;
7170 	hal->antenna_tx = hal->antenna_rx;
7171 }
7172 
7173 static void rtw89_phy_antdiv_decision_state(struct rtw89_dev *rtwdev)
7174 {
7175 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
7176 	struct rtw89_hal *hal = &rtwdev->hal;
7177 	bool no_change = false;
7178 	u8 main_rssi, aux_rssi;
7179 	u8 main_evm, aux_evm;
7180 	u32 candidate;
7181 
7182 	antdiv->get_stats = false;
7183 	antdiv->training_count = 0;
7184 
7185 	main_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->main_stats);
7186 	main_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->main_stats);
7187 	aux_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->aux_stats);
7188 	aux_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->aux_stats);
7189 
7190 	if (main_evm > aux_evm + ANTDIV_EVM_DIFF_TH)
7191 		candidate = RF_A;
7192 	else if (aux_evm > main_evm + ANTDIV_EVM_DIFF_TH)
7193 		candidate = RF_B;
7194 	else if (main_rssi > aux_rssi + RTW89_TX_DIV_RSSI_RAW_TH)
7195 		candidate = RF_A;
7196 	else if (aux_rssi > main_rssi + RTW89_TX_DIV_RSSI_RAW_TH)
7197 		candidate = RF_B;
7198 	else
7199 		no_change = true;
7200 
7201 	if (no_change) {
7202 		/* swap back from training antenna to original */
7203 		rtw89_phy_swap_hal_antenna(rtwdev);
7204 		return;
7205 	}
7206 
7207 	hal->antenna_tx = candidate;
7208 	hal->antenna_rx = candidate;
7209 }
7210 
7211 static void rtw89_phy_antdiv_training_state(struct rtw89_dev *rtwdev)
7212 {
7213 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
7214 	u64 state_period;
7215 
7216 	if (antdiv->training_count % 2 == 0) {
7217 		if (antdiv->training_count == 0)
7218 			rtw89_phy_antdiv_sts_reset(rtwdev);
7219 
7220 		antdiv->get_stats = true;
7221 		state_period = msecs_to_jiffies(ANTDIV_TRAINNING_INTVL);
7222 	} else {
7223 		antdiv->get_stats = false;
7224 		state_period = msecs_to_jiffies(ANTDIV_DELAY);
7225 
7226 		rtw89_phy_swap_hal_antenna(rtwdev);
7227 		rtw89_phy_antdiv_set_ant(rtwdev);
7228 	}
7229 
7230 	antdiv->training_count++;
7231 	wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->antdiv_work,
7232 				 state_period);
7233 }
7234 
7235 void rtw89_phy_antdiv_work(struct wiphy *wiphy, struct wiphy_work *work)
7236 {
7237 	struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
7238 						antdiv_work.work);
7239 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
7240 
7241 	lockdep_assert_wiphy(wiphy);
7242 
7243 	if (antdiv->training_count <= ANTDIV_TRAINNING_CNT) {
7244 		rtw89_phy_antdiv_training_state(rtwdev);
7245 	} else {
7246 		rtw89_phy_antdiv_decision_state(rtwdev);
7247 		rtw89_phy_antdiv_set_ant(rtwdev);
7248 	}
7249 }
7250 
7251 void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev)
7252 {
7253 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
7254 	struct rtw89_hal *hal = &rtwdev->hal;
7255 	u8 rssi, rssi_pre;
7256 
7257 	if (!hal->ant_diversity || hal->ant_diversity_fixed)
7258 		return;
7259 
7260 	rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->target_stats);
7261 	rssi_pre = antdiv->rssi_pre;
7262 	antdiv->rssi_pre = rssi;
7263 	rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats);
7264 
7265 	if (abs((int)rssi - (int)rssi_pre) < ANTDIV_RSSI_DIFF_TH)
7266 		return;
7267 
7268 	antdiv->training_count = 0;
7269 	wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->antdiv_work, 0);
7270 }
7271 
7272 static void __rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev,
7273 					 struct rtw89_bb_ctx *bb)
7274 {
7275 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
7276 		    "BB-%d env_monitor init\n", bb->phy_idx);
7277 
7278 	rtw89_phy_ccx_top_setting_init(rtwdev, bb);
7279 	rtw89_phy_ifs_clm_setting_init(rtwdev, bb);
7280 }
7281 
7282 static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev)
7283 {
7284 	struct rtw89_bb_ctx *bb;
7285 
7286 	rtw89_for_each_capab_bb(rtwdev, bb)
7287 		__rtw89_phy_env_monitor_init(rtwdev, bb);
7288 }
7289 
7290 static void __rtw89_phy_edcca_init(struct rtw89_dev *rtwdev,
7291 				   struct rtw89_bb_ctx *bb)
7292 {
7293 	const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
7294 	struct rtw89_edcca_bak *edcca_bak = &bb->edcca_bak;
7295 
7296 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA, "BB-%d edcca init\n", bb->phy_idx);
7297 
7298 	memset(edcca_bak, 0, sizeof(*edcca_bak));
7299 
7300 	if (rtwdev->chip->chip_id == RTL8922A && rtwdev->hal.cv == CHIP_CAV) {
7301 		rtw89_phy_set_phy_regs(rtwdev, R_TXGATING, B_TXGATING_EN, 0);
7302 		rtw89_phy_set_phy_regs(rtwdev, R_CTLTOP, B_CTLTOP_VAL, 2);
7303 		rtw89_phy_set_phy_regs(rtwdev, R_CTLTOP, B_CTLTOP_ON, 1);
7304 		rtw89_phy_set_phy_regs(rtwdev, R_SPOOF_CG, B_SPOOF_CG_EN, 0);
7305 		rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_CG_EN, 0);
7306 		rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 0);
7307 		rtw89_phy_set_phy_regs(rtwdev, R_SEGSND, B_SEGSND_EN, 0);
7308 		rtw89_phy_set_phy_regs(rtwdev, R_SEGSND, B_SEGSND_EN, 1);
7309 		rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 1);
7310 	}
7311 
7312 	rtw89_phy_write32_idx(rtwdev, edcca_regs->tx_collision_t2r_st,
7313 			      edcca_regs->tx_collision_t2r_st_mask, 0x29, bb->phy_idx);
7314 }
7315 
7316 static void rtw89_phy_edcca_init(struct rtw89_dev *rtwdev)
7317 {
7318 	struct rtw89_bb_ctx *bb;
7319 
7320 	rtw89_for_each_capab_bb(rtwdev, bb)
7321 		__rtw89_phy_edcca_init(rtwdev, bb);
7322 }
7323 
7324 void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
7325 {
7326 	rtw89_phy_stat_init(rtwdev);
7327 
7328 	rtw89_chip_bb_sethw(rtwdev);
7329 
7330 	rtw89_phy_env_monitor_init(rtwdev);
7331 	rtw89_phy_nhm_setting_init(rtwdev);
7332 	rtw89_physts_parsing_init(rtwdev);
7333 	rtw89_phy_dig_init(rtwdev);
7334 	rtw89_phy_cfo_init(rtwdev);
7335 	rtw89_phy_bb_wrap_init(rtwdev);
7336 	rtw89_phy_edcca_init(rtwdev);
7337 	rtw89_phy_ch_info_init(rtwdev);
7338 	rtw89_phy_ul_tb_info_init(rtwdev);
7339 	rtw89_phy_antdiv_init(rtwdev);
7340 	rtw89_chip_rfe_gpio(rtwdev);
7341 	rtw89_phy_antdiv_set_ant(rtwdev);
7342 
7343 	rtw89_chip_rfk_hw_init(rtwdev);
7344 	rtw89_phy_init_rf_nctl(rtwdev);
7345 	rtw89_chip_rfk_init(rtwdev);
7346 	rtw89_chip_set_txpwr_ctrl(rtwdev);
7347 	rtw89_chip_power_trim(rtwdev);
7348 	rtw89_chip_cfg_txrx_path(rtwdev);
7349 }
7350 
7351 void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev)
7352 {
7353 	rtw89_phy_env_monitor_init(rtwdev);
7354 	rtw89_physts_parsing_init(rtwdev);
7355 }
7356 
7357 static void __rtw89_phy_dm_init_data(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
7358 {
7359 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
7360 	const struct rtw89_chip_info *chip = rtwdev->chip;
7361 	struct ieee80211_supported_band *sband;
7362 	enum rtw89_band hw_band;
7363 	enum nl80211_band band;
7364 	u8 idx;
7365 
7366 	if (!chip->support_noise)
7367 		return;
7368 
7369 	for (band = 0; band < NUM_NL80211_BANDS; band++) {
7370 		sband = rtwdev->hw->wiphy->bands[band];
7371 		if (!sband)
7372 			continue;
7373 
7374 		hw_band = rtw89_nl80211_to_hw_band(band);
7375 		env->nhm_his[hw_band] =
7376 			devm_kcalloc(rtwdev->dev, sband->n_channels,
7377 				     sizeof(*env->nhm_his[0]), GFP_KERNEL);
7378 
7379 		for (idx = 0; idx < sband->n_channels; idx++)
7380 			INIT_LIST_HEAD(&env->nhm_his[hw_band][idx].list);
7381 
7382 		INIT_LIST_HEAD(&env->nhm_rpt_list);
7383 	}
7384 }
7385 
7386 void rtw89_phy_dm_init_data(struct rtw89_dev *rtwdev)
7387 {
7388 	struct rtw89_bb_ctx *bb;
7389 
7390 	rtw89_for_each_capab_bb(rtwdev, bb)
7391 		__rtw89_phy_dm_init_data(rtwdev, bb);
7392 }
7393 
7394 void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev,
7395 			     struct rtw89_vif_link *rtwvif_link)
7396 {
7397 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
7398 	const struct rtw89_chip_info *chip = rtwdev->chip;
7399 	const struct rtw89_reg_def *bss_clr_vld = &chip->bss_clr_vld;
7400 	enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
7401 	struct ieee80211_bss_conf *bss_conf;
7402 	u8 bss_color;
7403 
7404 	rcu_read_lock();
7405 
7406 	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
7407 	if (!bss_conf->he_support || !vif->cfg.assoc) {
7408 		rcu_read_unlock();
7409 		return;
7410 	}
7411 
7412 	bss_color = bss_conf->he_bss_color.color;
7413 
7414 	rcu_read_unlock();
7415 
7416 	rtw89_phy_write32_idx(rtwdev, bss_clr_vld->addr, bss_clr_vld->mask, 0x1,
7417 			      phy_idx);
7418 	rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_TGT,
7419 			      bss_color, phy_idx);
7420 	rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_STAID,
7421 			      vif->cfg.aid, phy_idx);
7422 }
7423 
7424 static bool rfk_chan_validate_desc(const struct rtw89_rfk_chan_desc *desc)
7425 {
7426 	return desc->ch != 0;
7427 }
7428 
7429 static bool rfk_chan_is_equivalent(const struct rtw89_rfk_chan_desc *desc,
7430 				   const struct rtw89_chan *chan)
7431 {
7432 	if (!rfk_chan_validate_desc(desc))
7433 		return false;
7434 
7435 	if (desc->ch != chan->channel)
7436 		return false;
7437 
7438 	if (desc->has_band && desc->band != chan->band_type)
7439 		return false;
7440 
7441 	if (desc->has_bw && desc->bw != chan->band_width)
7442 		return false;
7443 
7444 	return true;
7445 }
7446 
7447 struct rfk_chan_iter_data {
7448 	const struct rtw89_rfk_chan_desc desc;
7449 	unsigned int found;
7450 };
7451 
7452 static int rfk_chan_iter_search(const struct rtw89_chan *chan, void *data)
7453 {
7454 	struct rfk_chan_iter_data *iter_data = data;
7455 
7456 	if (rfk_chan_is_equivalent(&iter_data->desc, chan))
7457 		iter_data->found++;
7458 
7459 	return 0;
7460 }
7461 
7462 u8 rtw89_rfk_chan_lookup(struct rtw89_dev *rtwdev,
7463 			 const struct rtw89_rfk_chan_desc *desc, u8 desc_nr,
7464 			 const struct rtw89_chan *target_chan)
7465 {
7466 	int sel = -1;
7467 	u8 i;
7468 
7469 	for (i = 0; i < desc_nr; i++) {
7470 		struct rfk_chan_iter_data iter_data = {
7471 			.desc = desc[i],
7472 		};
7473 
7474 		if (rfk_chan_is_equivalent(&desc[i], target_chan))
7475 			return i;
7476 
7477 		rtw89_iterate_entity_chan(rtwdev, rfk_chan_iter_search, &iter_data);
7478 		if (!iter_data.found && sel == -1)
7479 			sel = i;
7480 	}
7481 
7482 	if (sel == -1) {
7483 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
7484 			    "no idle rfk entry; force replace the first\n");
7485 		sel = 0;
7486 	}
7487 
7488 	return sel;
7489 }
7490 EXPORT_SYMBOL(rtw89_rfk_chan_lookup);
7491 
7492 static void
7493 _rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
7494 {
7495 	rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data);
7496 }
7497 
7498 static void
7499 _rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
7500 {
7501 	rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
7502 }
7503 
7504 static void
7505 _rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
7506 {
7507 	rtw89_phy_write32_set(rtwdev, def->addr, def->mask);
7508 }
7509 
7510 static void
7511 _rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
7512 {
7513 	rtw89_phy_write32_clr(rtwdev, def->addr, def->mask);
7514 }
7515 
7516 static void
7517 _rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
7518 {
7519 	udelay(def->data);
7520 }
7521 
7522 static void
7523 (*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = {
7524 	[RTW89_RFK_F_WRF] = _rfk_write_rf,
7525 	[RTW89_RFK_F_WM] = _rfk_write32_mask,
7526 	[RTW89_RFK_F_WS] = _rfk_write32_set,
7527 	[RTW89_RFK_F_WC] = _rfk_write32_clr,
7528 	[RTW89_RFK_F_DELAY] = _rfk_delay,
7529 };
7530 
7531 static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM);
7532 
7533 void
7534 rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl)
7535 {
7536 	const struct rtw89_reg5_def *p = tbl->defs;
7537 	const struct rtw89_reg5_def *end = tbl->defs + tbl->size;
7538 
7539 	for (; p < end; p++)
7540 		_rfk_handler[p->flag](rtwdev, p);
7541 }
7542 EXPORT_SYMBOL(rtw89_rfk_parser);
7543 
7544 #define RTW89_TSSI_FAST_MODE_NUM 4
7545 
7546 static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_flat[RTW89_TSSI_FAST_MODE_NUM] = {
7547 	{0xD934, 0xff0000},
7548 	{0xD934, 0xff000000},
7549 	{0xD938, 0xff},
7550 	{0xD934, 0xff00},
7551 };
7552 
7553 static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_level[RTW89_TSSI_FAST_MODE_NUM] = {
7554 	{0xD930, 0xff0000},
7555 	{0xD930, 0xff000000},
7556 	{0xD934, 0xff},
7557 	{0xD930, 0xff00},
7558 };
7559 
7560 static
7561 void rtw89_phy_tssi_ctrl_set_fast_mode_cfg(struct rtw89_dev *rtwdev,
7562 					   enum rtw89_mac_idx mac_idx,
7563 					   enum rtw89_tssi_bandedge_cfg bandedge_cfg,
7564 					   u32 val)
7565 {
7566 	const struct rtw89_reg_def *regs;
7567 	u32 reg;
7568 	int i;
7569 
7570 	if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT)
7571 		regs = rtw89_tssi_fastmode_regs_flat;
7572 	else
7573 		regs = rtw89_tssi_fastmode_regs_level;
7574 
7575 	for (i = 0; i < RTW89_TSSI_FAST_MODE_NUM; i++) {
7576 		reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx);
7577 		rtw89_write32_mask(rtwdev, reg, regs[i].mask, val);
7578 	}
7579 }
7580 
7581 static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_flat[RTW89_TSSI_SBW_NUM] = {
7582 	{0xD91C, 0xff000000},
7583 	{0xD920, 0xff},
7584 	{0xD920, 0xff00},
7585 	{0xD920, 0xff0000},
7586 	{0xD920, 0xff000000},
7587 	{0xD924, 0xff},
7588 	{0xD924, 0xff00},
7589 	{0xD914, 0xff000000},
7590 	{0xD918, 0xff},
7591 	{0xD918, 0xff00},
7592 	{0xD918, 0xff0000},
7593 	{0xD918, 0xff000000},
7594 	{0xD91C, 0xff},
7595 	{0xD91C, 0xff00},
7596 	{0xD91C, 0xff0000},
7597 };
7598 
7599 static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_level[RTW89_TSSI_SBW_NUM] = {
7600 	{0xD910, 0xff},
7601 	{0xD910, 0xff00},
7602 	{0xD910, 0xff0000},
7603 	{0xD910, 0xff000000},
7604 	{0xD914, 0xff},
7605 	{0xD914, 0xff00},
7606 	{0xD914, 0xff0000},
7607 	{0xD908, 0xff},
7608 	{0xD908, 0xff00},
7609 	{0xD908, 0xff0000},
7610 	{0xD908, 0xff000000},
7611 	{0xD90C, 0xff},
7612 	{0xD90C, 0xff00},
7613 	{0xD90C, 0xff0000},
7614 	{0xD90C, 0xff000000},
7615 };
7616 
7617 void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev,
7618 					  enum rtw89_mac_idx mac_idx,
7619 					  enum rtw89_tssi_bandedge_cfg bandedge_cfg)
7620 {
7621 	const struct rtw89_chip_info *chip = rtwdev->chip;
7622 	const struct rtw89_reg_def *regs;
7623 	const u32 *data;
7624 	u32 reg;
7625 	int i;
7626 
7627 	if (bandedge_cfg >= RTW89_TSSI_CFG_NUM)
7628 		return;
7629 
7630 	if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT)
7631 		regs = rtw89_tssi_bandedge_regs_flat;
7632 	else
7633 		regs = rtw89_tssi_bandedge_regs_level;
7634 
7635 	data = chip->tssi_dbw_table->data[bandedge_cfg];
7636 
7637 	for (i = 0; i < RTW89_TSSI_SBW_NUM; i++) {
7638 		reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx);
7639 		rtw89_write32_mask(rtwdev, reg, regs[i].mask, data[i]);
7640 	}
7641 
7642 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BANDEDGE_CFG, mac_idx);
7643 	rtw89_write32_mask(rtwdev, reg, B_AX_BANDEDGE_CFG_IDX_MASK, bandedge_cfg);
7644 
7645 	rtw89_phy_tssi_ctrl_set_fast_mode_cfg(rtwdev, mac_idx, bandedge_cfg,
7646 					      data[RTW89_TSSI_SBW20]);
7647 }
7648 EXPORT_SYMBOL(rtw89_phy_tssi_ctrl_set_bandedge_cfg);
7649 
7650 static
7651 const u8 rtw89_ch_base_table[16] = {1, 0xff,
7652 				    36, 100, 132, 149, 0xff,
7653 				    1, 33, 65, 97, 129, 161, 193, 225, 0xff};
7654 #define RTW89_CH_BASE_IDX_2G		0
7655 #define RTW89_CH_BASE_IDX_5G_FIRST	2
7656 #define RTW89_CH_BASE_IDX_5G_LAST	5
7657 #define RTW89_CH_BASE_IDX_6G_FIRST	7
7658 #define RTW89_CH_BASE_IDX_6G_LAST	14
7659 
7660 #define RTW89_CH_BASE_IDX_MASK		GENMASK(7, 4)
7661 #define RTW89_CH_OFFSET_MASK		GENMASK(3, 0)
7662 
7663 u8 rtw89_encode_chan_idx(struct rtw89_dev *rtwdev, u8 central_ch, u8 band)
7664 {
7665 	u8 chan_idx;
7666 	u8 last, first;
7667 	u8 idx;
7668 
7669 	switch (band) {
7670 	case RTW89_BAND_2G:
7671 		chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, RTW89_CH_BASE_IDX_2G) |
7672 			   FIELD_PREP(RTW89_CH_OFFSET_MASK, central_ch);
7673 		return chan_idx;
7674 	case RTW89_BAND_5G:
7675 		first = RTW89_CH_BASE_IDX_5G_FIRST;
7676 		last = RTW89_CH_BASE_IDX_5G_LAST;
7677 		break;
7678 	case RTW89_BAND_6G:
7679 		first = RTW89_CH_BASE_IDX_6G_FIRST;
7680 		last = RTW89_CH_BASE_IDX_6G_LAST;
7681 		break;
7682 	default:
7683 		rtw89_warn(rtwdev, "Unsupported band %d\n", band);
7684 		return 0;
7685 	}
7686 
7687 	for (idx = last; idx >= first; idx--)
7688 		if (central_ch >= rtw89_ch_base_table[idx])
7689 			break;
7690 
7691 	if (idx < first) {
7692 		rtw89_warn(rtwdev, "Unknown band %d channel %d\n", band, central_ch);
7693 		return 0;
7694 	}
7695 
7696 	chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, idx) |
7697 		   FIELD_PREP(RTW89_CH_OFFSET_MASK,
7698 			      (central_ch - rtw89_ch_base_table[idx]) >> 1);
7699 	return chan_idx;
7700 }
7701 EXPORT_SYMBOL(rtw89_encode_chan_idx);
7702 
7703 void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
7704 			   u8 *ch, enum nl80211_band *band)
7705 {
7706 	u8 idx, offset;
7707 
7708 	idx = FIELD_GET(RTW89_CH_BASE_IDX_MASK, chan_idx);
7709 	offset = FIELD_GET(RTW89_CH_OFFSET_MASK, chan_idx);
7710 
7711 	if (idx == RTW89_CH_BASE_IDX_2G) {
7712 		*band = NL80211_BAND_2GHZ;
7713 		*ch = offset;
7714 		return;
7715 	}
7716 
7717 	*band = idx <= RTW89_CH_BASE_IDX_5G_LAST ? NL80211_BAND_5GHZ : NL80211_BAND_6GHZ;
7718 	*ch = rtw89_ch_base_table[idx] + (offset << 1);
7719 }
7720 EXPORT_SYMBOL(rtw89_decode_chan_idx);
7721 
7722 void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev,
7723 			    struct rtw89_bb_ctx *bb, bool scan)
7724 {
7725 	const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
7726 	struct rtw89_edcca_bak *edcca_bak = &bb->edcca_bak;
7727 
7728 	if (scan) {
7729 		edcca_bak->a =
7730 			rtw89_phy_read32_idx(rtwdev, edcca_regs->edcca_level,
7731 					     edcca_regs->edcca_mask, bb->phy_idx);
7732 		edcca_bak->p =
7733 			rtw89_phy_read32_idx(rtwdev, edcca_regs->edcca_level,
7734 					     edcca_regs->edcca_p_mask, bb->phy_idx);
7735 		edcca_bak->ppdu =
7736 			rtw89_phy_read32_idx(rtwdev, edcca_regs->ppdu_level,
7737 					     edcca_regs->ppdu_mask, bb->phy_idx);
7738 
7739 		rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
7740 				      edcca_regs->edcca_mask, EDCCA_MAX, bb->phy_idx);
7741 		rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
7742 				      edcca_regs->edcca_p_mask, EDCCA_MAX, bb->phy_idx);
7743 		rtw89_phy_write32_idx(rtwdev, edcca_regs->ppdu_level,
7744 				      edcca_regs->ppdu_mask, EDCCA_MAX, bb->phy_idx);
7745 	} else {
7746 		rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
7747 				      edcca_regs->edcca_mask,
7748 				      edcca_bak->a, bb->phy_idx);
7749 		rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
7750 				      edcca_regs->edcca_p_mask,
7751 				      edcca_bak->p, bb->phy_idx);
7752 		rtw89_phy_write32_idx(rtwdev, edcca_regs->ppdu_level,
7753 				      edcca_regs->ppdu_mask,
7754 				      edcca_bak->ppdu, bb->phy_idx);
7755 	}
7756 }
7757 
7758 static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
7759 {
7760 	const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
7761 	const struct rtw89_edcca_p_regs *edcca_p_regs;
7762 	bool flag_fb, flag_p20, flag_s20, flag_s40, flag_s80;
7763 	s8 pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80;
7764 	u8 path, per20_bitmap = 0;
7765 	u8 pwdb[8];
7766 	u32 tmp;
7767 
7768 	if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_EDCCA))
7769 		return;
7770 
7771 	if (bb->phy_idx == RTW89_PHY_1)
7772 		edcca_p_regs = &edcca_regs->p[RTW89_PHY_1];
7773 	else
7774 		edcca_p_regs = &edcca_regs->p[RTW89_PHY_0];
7775 
7776 	if (rtwdev->chip->chip_id == RTL8922A)
7777 		rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
7778 				       edcca_regs->rpt_sel_be_mask, 0);
7779 
7780 	rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
7781 			       edcca_p_regs->rpt_sel_mask, 0);
7782 	tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
7783 	path = u32_get_bits(tmp, B_EDCCA_RPT_B_PATH_MASK);
7784 	flag_s80 = u32_get_bits(tmp, B_EDCCA_RPT_B_S80);
7785 	flag_s40 = u32_get_bits(tmp, B_EDCCA_RPT_B_S40);
7786 	flag_s20 = u32_get_bits(tmp, B_EDCCA_RPT_B_S20);
7787 	flag_p20 = u32_get_bits(tmp, B_EDCCA_RPT_B_P20);
7788 	flag_fb = u32_get_bits(tmp, B_EDCCA_RPT_B_FB);
7789 	pwdb_s20 = u32_get_bits(tmp, MASKBYTE1);
7790 	pwdb_p20 = u32_get_bits(tmp, MASKBYTE2);
7791 	pwdb_fb = u32_get_bits(tmp, MASKBYTE3);
7792 
7793 	rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
7794 			       edcca_p_regs->rpt_sel_mask, 5);
7795 	tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
7796 	pwdb_s80 = u32_get_bits(tmp, MASKBYTE1);
7797 	pwdb_s40 = u32_get_bits(tmp, MASKBYTE2);
7798 
7799 	if (rtwdev->chip->chip_id == RTL8922A) {
7800 		rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
7801 				       edcca_regs->rpt_sel_be_mask, 4);
7802 		tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
7803 		pwdb[0] = u32_get_bits(tmp, MASKBYTE3);
7804 		pwdb[1] = u32_get_bits(tmp, MASKBYTE2);
7805 		pwdb[2] = u32_get_bits(tmp, MASKBYTE1);
7806 		pwdb[3] = u32_get_bits(tmp, MASKBYTE0);
7807 		per20_bitmap = rtw89_phy_read32_mask(rtwdev, edcca_p_regs->rpt_a,
7808 						     MASKBYTE0);
7809 
7810 		rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
7811 				       edcca_regs->rpt_sel_be_mask, 5);
7812 		tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
7813 		pwdb[4] = u32_get_bits(tmp, MASKBYTE3);
7814 		pwdb[5] = u32_get_bits(tmp, MASKBYTE2);
7815 		pwdb[6] = u32_get_bits(tmp, MASKBYTE1);
7816 		pwdb[7] = u32_get_bits(tmp, MASKBYTE0);
7817 	} else {
7818 		rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
7819 				       edcca_p_regs->rpt_sel_mask, 0);
7820 		tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
7821 		pwdb[0] = u32_get_bits(tmp, MASKBYTE3);
7822 		pwdb[1] = u32_get_bits(tmp, MASKBYTE2);
7823 
7824 		rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
7825 				       edcca_p_regs->rpt_sel_mask, 5);
7826 		tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
7827 		pwdb[2] = u32_get_bits(tmp, MASKBYTE3);
7828 		pwdb[3] = u32_get_bits(tmp, MASKBYTE2);
7829 
7830 		rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
7831 				       edcca_p_regs->rpt_sel_mask, 2);
7832 		tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
7833 		pwdb[4] = u32_get_bits(tmp, MASKBYTE3);
7834 		pwdb[5] = u32_get_bits(tmp, MASKBYTE2);
7835 
7836 		rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
7837 				       edcca_p_regs->rpt_sel_mask, 3);
7838 		tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
7839 		pwdb[6] = u32_get_bits(tmp, MASKBYTE3);
7840 		pwdb[7] = u32_get_bits(tmp, MASKBYTE2);
7841 	}
7842 
7843 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
7844 		    "[EDCCA]: edcca_bitmap = %04x\n", per20_bitmap);
7845 
7846 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
7847 		    "[EDCCA]: pwdb per20{0,1,2,3,4,5,6,7} = {%d,%d,%d,%d,%d,%d,%d,%d}(dBm)\n",
7848 		    pwdb[0], pwdb[1], pwdb[2], pwdb[3], pwdb[4], pwdb[5],
7849 		    pwdb[6], pwdb[7]);
7850 
7851 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
7852 		    "[EDCCA]: path=%d, flag {FB,p20,s20,s40,s80} = {%d,%d,%d,%d,%d}\n",
7853 		    path, flag_fb, flag_p20, flag_s20, flag_s40, flag_s80);
7854 
7855 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
7856 		    "[EDCCA]: pwdb {FB,p20,s20,s40,s80} = {%d,%d,%d,%d,%d}(dBm)\n",
7857 		    pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80);
7858 }
7859 
7860 static u8 rtw89_phy_edcca_get_thre_by_rssi(struct rtw89_dev *rtwdev,
7861 					   struct rtw89_bb_ctx *bb)
7862 {
7863 	struct rtw89_phy_ch_info *ch_info = &bb->ch_info;
7864 	bool is_linked = rtwdev->total_sta_assoc > 0;
7865 	u8 rssi_min = ch_info->rssi_min >> 1;
7866 	u8 edcca_thre;
7867 
7868 	if (!is_linked) {
7869 		edcca_thre = EDCCA_MAX;
7870 	} else {
7871 		edcca_thre = rssi_min - RSSI_UNIT_CONVER + EDCCA_UNIT_CONVER -
7872 			     EDCCA_TH_REF;
7873 		edcca_thre = max_t(u8, edcca_thre, EDCCA_TH_L2H_LB);
7874 	}
7875 
7876 	return edcca_thre;
7877 }
7878 
7879 void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
7880 {
7881 	const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
7882 	struct rtw89_edcca_bak *edcca_bak = &bb->edcca_bak;
7883 	u8 th;
7884 
7885 	th = rtw89_phy_edcca_get_thre_by_rssi(rtwdev, bb);
7886 	if (th == edcca_bak->th_old)
7887 		return;
7888 
7889 	edcca_bak->th_old = th;
7890 
7891 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
7892 		    "[EDCCA]: Normal Mode, EDCCA_th = %d\n", th);
7893 
7894 	rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
7895 			      edcca_regs->edcca_mask, th, bb->phy_idx);
7896 	rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
7897 			      edcca_regs->edcca_p_mask, th, bb->phy_idx);
7898 	rtw89_phy_write32_idx(rtwdev, edcca_regs->ppdu_level,
7899 			      edcca_regs->ppdu_mask, th, bb->phy_idx);
7900 }
7901 
7902 static
7903 void __rtw89_phy_edcca_track(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
7904 {
7905 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA, "BB-%d edcca track\n", bb->phy_idx);
7906 
7907 	rtw89_phy_edcca_thre_calc(rtwdev, bb);
7908 	rtw89_phy_edcca_log(rtwdev, bb);
7909 }
7910 
7911 void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev)
7912 {
7913 	struct rtw89_hal *hal = &rtwdev->hal;
7914 	struct rtw89_bb_ctx *bb;
7915 
7916 	if (hal->disabled_dm_bitmap & BIT(RTW89_DM_DYNAMIC_EDCCA))
7917 		return;
7918 
7919 	rtw89_for_each_active_bb(rtwdev, bb)
7920 		__rtw89_phy_edcca_track(rtwdev, bb);
7921 }
7922 
7923 enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev,
7924 					   enum rtw89_phy_idx phy_idx)
7925 {
7926 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
7927 		    "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n",
7928 		    rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx);
7929 
7930 	switch (rtwdev->mlo_dbcc_mode) {
7931 	case MLO_1_PLUS_1_1RF:
7932 		if (phy_idx == RTW89_PHY_0)
7933 			return RF_A;
7934 		else
7935 			return RF_B;
7936 	case MLO_1_PLUS_1_2RF:
7937 		if (phy_idx == RTW89_PHY_0)
7938 			return RF_A;
7939 		else
7940 			return RF_D;
7941 	case MLO_0_PLUS_2_1RF:
7942 	case MLO_2_PLUS_0_1RF:
7943 		/* for both PHY 0/1 */
7944 		return RF_AB;
7945 	case MLO_0_PLUS_2_2RF:
7946 	case MLO_2_PLUS_0_2RF:
7947 	case MLO_2_PLUS_2_2RF:
7948 	default:
7949 		if (phy_idx == RTW89_PHY_0)
7950 			return RF_AB;
7951 		else
7952 			return RF_CD;
7953 	}
7954 }
7955 EXPORT_SYMBOL(rtw89_phy_get_kpath);
7956 
7957 enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev,
7958 					 enum rtw89_phy_idx phy_idx)
7959 {
7960 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
7961 		    "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n",
7962 		    rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx);
7963 
7964 	switch (rtwdev->mlo_dbcc_mode) {
7965 	case MLO_1_PLUS_1_1RF:
7966 		if (phy_idx == RTW89_PHY_0)
7967 			return RF_PATH_A;
7968 		else
7969 			return RF_PATH_B;
7970 	case MLO_1_PLUS_1_2RF:
7971 		if (phy_idx == RTW89_PHY_0)
7972 			return RF_PATH_A;
7973 		else
7974 			return RF_PATH_D;
7975 	case MLO_0_PLUS_2_1RF:
7976 	case MLO_2_PLUS_0_1RF:
7977 		if (phy_idx == RTW89_PHY_0)
7978 			return RF_PATH_A;
7979 		else
7980 			return RF_PATH_B;
7981 	case MLO_0_PLUS_2_2RF:
7982 	case MLO_2_PLUS_0_2RF:
7983 	case MLO_2_PLUS_2_2RF:
7984 	default:
7985 		if (phy_idx == RTW89_PHY_0)
7986 			return RF_PATH_A;
7987 		else
7988 			return RF_PATH_C;
7989 	}
7990 }
7991 EXPORT_SYMBOL(rtw89_phy_get_syn_sel);
7992 
7993 static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = {
7994 	.setting_addr = R_CCX,
7995 	.edcca_opt_mask = B_CCX_EDCCA_OPT_MSK,
7996 	.measurement_trig_mask = B_MEASUREMENT_TRIG_MSK,
7997 	.trig_opt_mask = B_CCX_TRIG_OPT_MSK,
7998 	.en_mask = B_CCX_EN_MSK,
7999 	.ifs_cnt_addr = R_IFS_COUNTER,
8000 	.ifs_clm_period_mask = B_IFS_CLM_PERIOD_MSK,
8001 	.ifs_clm_cnt_unit_mask = B_IFS_CLM_COUNTER_UNIT_MSK,
8002 	.ifs_clm_cnt_clear_mask = B_IFS_COUNTER_CLR_MSK,
8003 	.ifs_collect_en_mask = B_IFS_COLLECT_EN,
8004 	.ifs_t1_addr = R_IFS_T1,
8005 	.ifs_t1_th_h_mask = B_IFS_T1_TH_HIGH_MSK,
8006 	.ifs_t1_en_mask = B_IFS_T1_EN_MSK,
8007 	.ifs_t1_th_l_mask = B_IFS_T1_TH_LOW_MSK,
8008 	.ifs_t2_addr = R_IFS_T2,
8009 	.ifs_t2_th_h_mask = B_IFS_T2_TH_HIGH_MSK,
8010 	.ifs_t2_en_mask = B_IFS_T2_EN_MSK,
8011 	.ifs_t2_th_l_mask = B_IFS_T2_TH_LOW_MSK,
8012 	.ifs_t3_addr = R_IFS_T3,
8013 	.ifs_t3_th_h_mask = B_IFS_T3_TH_HIGH_MSK,
8014 	.ifs_t3_en_mask = B_IFS_T3_EN_MSK,
8015 	.ifs_t3_th_l_mask = B_IFS_T3_TH_LOW_MSK,
8016 	.ifs_t4_addr = R_IFS_T4,
8017 	.ifs_t4_th_h_mask = B_IFS_T4_TH_HIGH_MSK,
8018 	.ifs_t4_en_mask = B_IFS_T4_EN_MSK,
8019 	.ifs_t4_th_l_mask = B_IFS_T4_TH_LOW_MSK,
8020 	.ifs_clm_tx_cnt_addr = R_IFS_CLM_TX_CNT,
8021 	.ifs_clm_edcca_excl_cca_fa_mask = B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK,
8022 	.ifs_clm_tx_cnt_msk = B_IFS_CLM_TX_CNT_MSK,
8023 	.ifs_clm_cca_addr = R_IFS_CLM_CCA,
8024 	.ifs_clm_ofdmcca_excl_fa_mask = B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK,
8025 	.ifs_clm_cckcca_excl_fa_mask = B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK,
8026 	.ifs_clm_fa_addr = R_IFS_CLM_FA,
8027 	.ifs_clm_ofdm_fa_mask = B_IFS_CLM_OFDM_FA_MSK,
8028 	.ifs_clm_cck_fa_mask = B_IFS_CLM_CCK_FA_MSK,
8029 	.ifs_his_addr = R_IFS_HIS,
8030 	.ifs_t4_his_mask = B_IFS_T4_HIS_MSK,
8031 	.ifs_t3_his_mask = B_IFS_T3_HIS_MSK,
8032 	.ifs_t2_his_mask = B_IFS_T2_HIS_MSK,
8033 	.ifs_t1_his_mask = B_IFS_T1_HIS_MSK,
8034 	.ifs_avg_l_addr = R_IFS_AVG_L,
8035 	.ifs_t2_avg_mask = B_IFS_T2_AVG_MSK,
8036 	.ifs_t1_avg_mask = B_IFS_T1_AVG_MSK,
8037 	.ifs_avg_h_addr = R_IFS_AVG_H,
8038 	.ifs_t4_avg_mask = B_IFS_T4_AVG_MSK,
8039 	.ifs_t3_avg_mask = B_IFS_T3_AVG_MSK,
8040 	.ifs_cca_l_addr = R_IFS_CCA_L,
8041 	.ifs_t2_cca_mask = B_IFS_T2_CCA_MSK,
8042 	.ifs_t1_cca_mask = B_IFS_T1_CCA_MSK,
8043 	.ifs_cca_h_addr = R_IFS_CCA_H,
8044 	.ifs_t4_cca_mask = B_IFS_T4_CCA_MSK,
8045 	.ifs_t3_cca_mask = B_IFS_T3_CCA_MSK,
8046 	.ifs_total_addr = R_IFSCNT,
8047 	.ifs_cnt_done_mask = B_IFSCNT_DONE_MSK,
8048 	.ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK,
8049 	.nhm = R_NHM_AX,
8050 	.nhm_ready = B_NHM_READY_MSK,
8051 	.nhm_config = R_NHM_CFG,
8052 	.nhm_period_mask = B_NHM_PERIOD_MSK,
8053 	.nhm_unit_mask = B_NHM_COUNTER_MSK,
8054 	.nhm_include_cca_mask = B_NHM_INCLUDE_CCA_MSK,
8055 	.nhm_en_mask = B_NHM_EN_MSK,
8056 	.nhm_method = R_NHM_TH9,
8057 	.nhm_pwr_method_msk = B_NHM_PWDB_METHOD_MSK,
8058 };
8059 
8060 static const struct rtw89_physts_regs rtw89_physts_regs_ax = {
8061 	.setting_addr = R_PLCP_HISTOGRAM,
8062 	.dis_trigger_fail_mask = B_STS_DIS_TRIG_BY_FAIL,
8063 	.dis_trigger_brk_mask = B_STS_DIS_TRIG_BY_BRK,
8064 };
8065 
8066 static const struct rtw89_cfo_regs rtw89_cfo_regs_ax = {
8067 	.comp = R_DCFO_WEIGHT,
8068 	.weighting_mask = B_DCFO_WEIGHT_MSK,
8069 	.comp_seg0 = R_DCFO_OPT,
8070 	.valid_0_mask = B_DCFO_OPT_EN,
8071 };
8072 
8073 const struct rtw89_phy_gen_def rtw89_phy_gen_ax = {
8074 	.cr_base = 0x10000,
8075 	.ccx = &rtw89_ccx_regs_ax,
8076 	.physts = &rtw89_physts_regs_ax,
8077 	.cfo = &rtw89_cfo_regs_ax,
8078 	.phy0_phy1_offset = rtw89_phy0_phy1_offset_ax,
8079 	.config_bb_gain = rtw89_phy_config_bb_gain_ax,
8080 	.preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_ax,
8081 	.bb_wrap_init = NULL,
8082 	.ch_info_init = NULL,
8083 
8084 	.set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_ax,
8085 	.set_txpwr_offset = rtw89_phy_set_txpwr_offset_ax,
8086 	.set_txpwr_limit = rtw89_phy_set_txpwr_limit_ax,
8087 	.set_txpwr_limit_ru = rtw89_phy_set_txpwr_limit_ru_ax,
8088 };
8089 EXPORT_SYMBOL(rtw89_phy_gen_ax);
8090